hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfc48e212278e6e7c38127a59bb5495990cc736
| 207
|
py
|
Python
|
World/Object/Item/Constants/Material.py
|
sundayz/idewave-core
|
5bdb88892173c9c3e8c85f431cf9b5dbd9f23941
|
[
"Apache-2.0"
] | 10
|
2019-06-29T19:24:52.000Z
|
2021-02-21T22:45:57.000Z
|
World/Object/Item/Constants/Material.py
|
sundayz/idewave-core
|
5bdb88892173c9c3e8c85f431cf9b5dbd9f23941
|
[
"Apache-2.0"
] | 4
|
2019-08-15T07:03:36.000Z
|
2021-06-02T13:01:25.000Z
|
World/Object/Item/Constants/Material.py
|
sundayz/idewave-core
|
5bdb88892173c9c3e8c85f431cf9b5dbd9f23941
|
[
"Apache-2.0"
] | 8
|
2019-06-30T22:47:48.000Z
|
2021-02-20T19:21:30.000Z
|
from enum import Enum
class Material(Enum):
METAL = 1
WOOD = 2
LIQUID = 3
JEWELRY = 4
CHAIN = 5
PLATE = 6
CLOTH = 7
LEATHER = 8
| 14.785714
| 21
| 0.42029
|
acfc4912aabca63e11b1afa946a218530156dced
| 735
|
py
|
Python
|
jactorch/functional/probability.py
|
dapatil211/Jacinle
|
7638a46dc06223a1871d88f92aade644883f40a0
|
[
"MIT"
] | 114
|
2018-01-25T04:44:07.000Z
|
2022-03-09T14:33:42.000Z
|
third_party/Jacinle/jactorch/functional/probability.py
|
dair-iitd/1oML_workdir
|
37117de4abf1774548786e9534c90977d67091d8
|
[
"Apache-2.0"
] | 7
|
2018-05-08T17:02:24.000Z
|
2022-02-09T23:44:06.000Z
|
third_party/Jacinle/jactorch/functional/probability.py
|
dair-iitd/1oML_workdir
|
37117de4abf1774548786e9534c90977d67091d8
|
[
"Apache-2.0"
] | 268
|
2018-04-08T10:54:35.000Z
|
2022-03-01T07:10:02.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : probability.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 02/04/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
__all__ = ['normalize_prob', 'check_prob_normalization']
def normalize_prob(a, dim=-1):
"""Perform 1-norm along the specific dimension."""
return a / a.sum(dim=dim, keepdim=True)
def check_prob_normalization(p, dim=-1, atol=1e-5):
"""Check if the probability is normalized along a specific dimension."""
tot = p.sum(dim=dim)
cond = (tot > 1 - atol) * (tot < 1 + atol)
cond = cond.prod()
assert int(cond.data.cpu().numpy()) == 1, 'Probability normalization check failed.'
| 29.4
| 87
| 0.661224
|
acfc49eba45a61ef22dc2c1b84acab98ca249ec0
| 10,344
|
py
|
Python
|
kervi/hal/motor_controller.py
|
kervi/kervi-core
|
3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23
|
[
"MIT"
] | null | null | null |
kervi/hal/motor_controller.py
|
kervi/kervi-core
|
3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23
|
[
"MIT"
] | null | null | null |
kervi/hal/motor_controller.py
|
kervi/kervi-core
|
3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23
|
[
"MIT"
] | null | null | null |
#Copyright 2017 Tim Wentlau.
#Distributed under the MIT License. See LICENSE in root of project.
import time
from kervi.core.utility.thread import KerviThread
from kervi.values import *
from kervi.controllers import Controller
class _MotorNumOutOfBoundsError(Exception):
def __init__(self, device_name, motor):
super(_MotorNumOutOfBoundsError, self).__init__(
'{0} Exception: Motor num out of Bounds, motor={1}'.format(device_name, motor)
)
class DCMotor(object):
def __init__(self, motor):
self._speed = motor
@property
def speed(self):
return self._speed
class DCMotorControllerBase(Controller):
def __init__(self, controller_id, device_name, num_motors):
Controller.__init__(self, controller_id, device_name + "-DC motors")
self._num_motors = num_motors
self._device_name = device_name
for motor in range(0, num_motors):
self.inputs.add("motor_" + str(motor), "Motor " + str(motor), NumberValue)
def __getitem__(self, motor):
return DCMotor(self.inputs["motor_" + str(motor)])
def _validate_motor(self, motor):
if motor < 0 or motor > self._num_motors:
raise _MotorNumOutOfBoundsError(self._device_name, motor)
@property
def device_name(self):
"""Motor controller device name"""
return self._device_name
@property
def num_motors(self):
"""Number of DC motors this motor controller can handle"""
return self._num_motors
def input_changed(self, changed_input):
self._set_speed(changed_input.index, changed_input.value)
def _set_speed(self, motor, speed):
"""
Change the speed of a motor on the controller.
:param motor: The motor to change.
:type motor: ``int``
:param speed: Speed from -100 to +100, 0 is stop
:type speed: ``int``
"""
raise NotImplementedError
def stop_all(self):
"""Stops all motors connected to the motor controller"""
raise NotImplementedError
class _StepperRunThread(KerviThread):
def __init__(self, motor, speed):
KerviThread.__init__(self)
self.motor = motor
self.enabled = False
self.speed = speed
def _step(self):
if self.enabled:
if self.speed > 0:
self.motor.step(1)
elif self.speed < 0:
self.motor.step(-1)
else:
self.motor.release()
class StepperMotor(object):
SINGLE = 1
DOUBLE = 2
INTERLEAVE = 3
MICROSTEP = 4
MICROSTEPS = 8
FORWARD = 1
def __init__(self, device, motor):
self._device = device
self._motor = motor
self._step_style = self.SINGLE
self.stepping_counter = 0
self.current_step = 0
self.stepper_thread = None
self._max_rpm = 60
self._steps = 200
self._min_interval = 60.0 / (self._max_rpm * self._steps)
self._step_interval = self._min_interval
@property
def max_rpm(self):
return self._steps
@max_rpm.setter
def max_rpm(self, value):
self._max_rpm = value
@property
def steps(self):
return self._steps
@steps.setter
def steps(self, value):
self._steps = value
@property
def min_interval(self):
return self._min_interval
@property
def step_style(self):
return self._step_style
@step_style.setter
def step_style(self, value):
self._step_style = value
@property
def step_interval(self):
self.stepping_counter = 0
return self._step_interval
@step_interval.setter
def step_interval(self, value):
self._step_interval = value
def _step(self, direction, style=None):
self._device._step(self._motor, direction, style)
def _release(self):
self._device._release(self._motor)
def set_speed(self, speed):
if not self.stepper_thread:
interval = (100/speed) * self.min_interval
self.step_interval = interval
self.stepper_thread = _StepperRunThread(self, speed)
def release(self):
self._release()
def step(self, steps, step_style=None):
s_per_s = self._step_interval
lateststep = 0
if steps > 0:
direction = 1
else:
direction = 0
steps = abs(steps)
if not step_style:
step_style = self.step_style
if step_style == self.INTERLEAVE:
s_per_s = s_per_s / 2.0
if step_style == self.MICROSTEP:
s_per_s /= self.MICROSTEPS
steps *= self.MICROSTEPS
#print("{} sec per step".format(s_per_s))
for s in range(steps):
lateststep = self._step(direction, step_style)
time.sleep(s_per_s)
if step_style == self.MICROSTEP:
# this is an edge case, if we are in between full steps, lets just keep going
# so we end on a full step
while (lateststep != 0) and (lateststep != self.MICROSTEPS):
lateststep = self._step(direction, step_style)
time.sleep(s_per_s)
class StepperMotorControllerBase(object):
def __init__(self, controller_id, device_name, num_motors):
self._num_motors = num_motors
self._device_name = device_name
def __getitem__(self, motor):
return StepperMotor(self, motor)
def _validate_motor(self, motor):
if motor < 0 or motor > self._num_motors:
raise _MotorNumOutOfBoundsError(self._device_name, motor)
@property
def device_name(self):
"""Motor controller device name"""
return self._device_name
@property
def num_motors(self):
"""Number of DC motors this motor controller can handle"""
return self._num_motors
def _step(self, motor, style):
raise NotImplementedError
def _release(self, motor):
raise NotImplementedError
def _set_speed(self, motor, speed):
"""
Change the speed of a motor on the controller.
:param motor: The motor to change.
:type motor: ``int``
:param speed: Speed from -100 to +100, 0 is stop
:type speed: ``int``
"""
raise NotImplementedError
def stop_all(self):
"""Stops all motors connected to the motor controller"""
raise NotImplementedError
class ServoMotor(object):
def __init__(self, device, motor):
self._device = device
self._motor = motor
self._position = NumberValue(
"Servo " +str(motor),
input_id="servo_" + str(motor),
parent=self._device,
index=motor
)
self._position.add_observer(self)
self._adjust_max = 0
self._adjust_min = 0
self._adjust_center = 0
@property
def position(self):
return self._position
@property
def adjust_max(self):
return self._adjust_max
@adjust_max.setter
def adjust_max(self, value):
self._adjust_max = value
@property
def adjust_min(self):
return self._adjust_min
@adjust_min.setter
def adjust_min(self, value):
self._adjust_min = value
@property
def adjust_center(self):
return self._adjust_center
@adjust_center.setter
def adjust_center(self, value):
self._adjust_center = value
def kervi_value_changed(self, input, value):
self.set_position(value)
def set_position(self, position):
self._device._set_position(self._motor, position, self.adjust_min, self.adjust_max, self.adjust_center)
class ServoMotorControllerBase(Controller):
def __init__(self, controller_id , device_name, num_motors):
Controller.__init__(self, controller_id, device_name + "-DC motors")
self._num_motors = num_motors
self._device_name = device_name
def __getitem__(self, motor):
self._validate_motor(motor)
return ServoMotor(self, motor)
def _validate_motor(self, motor):
if motor < 0 or motor > self._num_motors:
raise _MotorNumOutOfBoundsError(self._device_name, motor)
@property
def device_name(self):
"""Motor controller device name"""
return self._device_name
@property
def num_motors(self):
"""Number of DC motors this motor controller can handle"""
return self._num_motors
def _set_servo(self, motor, position, adjust_min=0, adjust_max=0, adjust_center=0):
"""
Change the angle of a servo on the controller.
:param motor: The motor to change.
:type motor: ``int``
:param position: position from -100 to +100, 0 is neutral
:type position: ``int``
:param adjust_min: factor to adjust min position. Value should be between -1 and 1
:type position: ``float``
:param adjust_max: factor to adjust max position. Value should be between -1 and 1.
:type position: ``float``
:param adjust_center: factor to adjust center position. Value should be between -1 and 1
:type position: ``float``
"""
raise NotImplementedError
def stop_all(self):
"""Stops all motors connected to the motor controller"""
raise NotImplementedError
class MotorControllerBoard(object):
def __init__(self, board_id, device_name, dc_controller=None, stepper_controller=None, servo_controller=None):
self.board_id = board_id
if dc_controller:
self._dc = dc_controller
else:
self._dc = DCMotorControllerBase(board_id+".dc_motors", device_name, 0)
if stepper_controller:
self._stepper = stepper_controller
else:
self._stepper = StepperMotorControllerBase(board_id+".stepper_motors", device_name, 0)
if servo_controller:
self._servo = servo_controller
else:
self._servo = ServoMotorControllerBase(board_id+".servo_motors", device_name, 0)
@property
def dc_motors(self):
return self._dc
@property
def servo_motors(self):
return self._servo
@property
def stepper_motors(self):
return self._stepper
| 28.03252
| 114
| 0.62993
|
acfc4b910dc909708a7dd94c29c840e00ce6db54
| 25,279
|
py
|
Python
|
train.py
|
HaydenFaulkner/Tennis
|
a120dc2c9d5e471ffeb43863da13f59de772932e
|
[
"MIT"
] | 20
|
2019-08-05T10:09:56.000Z
|
2021-12-25T05:40:14.000Z
|
train.py
|
HaydenFaulkner/Tennis
|
a120dc2c9d5e471ffeb43863da13f59de772932e
|
[
"MIT"
] | 3
|
2018-03-22T13:48:43.000Z
|
2021-06-10T06:17:56.000Z
|
train.py
|
HaydenFaulkner/Tennis
|
a120dc2c9d5e471ffeb43863da13f59de772932e
|
[
"MIT"
] | 4
|
2020-11-04T03:38:20.000Z
|
2021-03-30T08:50:52.000Z
|
"""Train script"""
from absl import app, flags
from absl.flags import FLAGS
import logging
import multiprocessing
import mxnet as mx
import numpy as np
import os
import sys
from tensorboardX import SummaryWriter
import time
from tqdm import tqdm
import warnings
from mxnet import gluon
from mxnet import autograd as ag
from mxnet.gluon.data.vision import transforms
from gluoncv.model_zoo import get_model
from gluoncv.utils.metrics.accuracy import Accuracy
from models.vision.definitions import CNNRNN, FrameModel, TwoStreamModel, TemporalPooling
from dataset import TennisSet
from metrics.vision import PRF1
from models.vision.rdnet.r21d import get_r21d
# from utils import frames_to_video
from utils.transforms import TwoStreamNormalize
# disable autotune
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
flags.DEFINE_string('backbone', 'resnet18_v2',
'Backbone CNN name: resnet18_v1')
flags.DEFINE_string('backbone_from_id', None,
'Load a backbone model from a model_id, used for Temporal Pooling with fine-tuned CNN')
flags.DEFINE_bool('freeze_backbone', False,
'Freeze the backbone model')
flags.DEFINE_string('model_id', '0000',
'model identification string')
flags.DEFINE_string('split_id', '02',
'split identification string, 01: single test vid; 02: all videos have test sections')
flags.DEFINE_integer('log_interval', 100,
'Logging mini-batch interval.')
flags.DEFINE_integer('data_shape', 512, #224,
'The width and height for the input image to be cropped to.')
flags.DEFINE_list('every', '1, 1, 1',
'Use only every this many frames: [train, val, test] splits')
flags.DEFINE_list('balance', 'True, False, False',
'Balance the play/not class samples: [train, val, test] splits')
flags.DEFINE_integer('window', 1,
'Temporal window size of frames')
flags.DEFINE_integer('padding', 1,
'Frame*every + and - padding around the marked event boundaries: [train, val, test] splits')
flags.DEFINE_integer('stride', 1,
'Temporal stride of samples within a window')
flags.DEFINE_integer('batch_size', 64,
'Batch size for detection: higher faster, but more memory intensive.')
flags.DEFINE_integer('epochs', 20,
'How many training epochs to complete')
flags.DEFINE_integer('num_gpus', 1,
'Number of GPUs to use')
flags.DEFINE_integer('num_workers', -1,
'The number of workers should be picked so that it’s equal to number of cores on your machine '
'for max parallelization. If this number is bigger than your number of cores it will use up '
'a bunch of extra CPU memory. -1 is auto.')
flags.DEFINE_float('lr', 0.001,
'Learning rate.')
flags.DEFINE_float('lr_factor', 0.75,
'lr factor.')
flags.DEFINE_list('lr_steps', '10, 20',
'Epochs at which learning rate factor applied.')
flags.DEFINE_float('momentum', 0.9,
'momentum.')
flags.DEFINE_float('wd', 0.0001,
'weight decay.')
flags.DEFINE_bool('vis', False,
'Visualise testing results')
flags.DEFINE_bool('save_feats', False,
'save CNN features as npy files')
flags.DEFINE_string('feats_model', None,
'load CNN features as npy files from this model')
flags.DEFINE_string('flow', '',
'How to use flow, "" for none, "only" for no rgb, "sixc" for six channel inp, "twos" for twostream')
flags.DEFINE_string('temp_pool', None,
'mean, max or gru.')
flags.DEFINE_integer('max_batches', -1, # for 0031
'Only do this many batches then break')
def main(_argv):
FLAGS.every = [int(s) for s in FLAGS.every]
FLAGS.balance = [True if s.lower() == 'true' or s.lower() == 't' else False for s in FLAGS.balance]
FLAGS.lr_steps = [int(s) for s in FLAGS.lr_steps]
if FLAGS.num_workers < 0:
FLAGS.num_workers = multiprocessing.cpu_count()
ctx = [mx.gpu(i) for i in range(FLAGS.num_gpus)] if FLAGS.num_gpus > 0 else [mx.cpu()]
# Set up logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = os.path.join('models', 'vision', 'experiments', FLAGS.model_id, 'log.txt')
log_dir = os.path.dirname(log_file_path)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
fh = logging.FileHandler(log_file_path)
logger.addHandler(fh)
key_flags = FLAGS.get_key_flags_for_module(sys.argv[0])
logging.info('\n'.join(f.serialize() for f in key_flags))
# set up tensorboard summary writer
tb_sw = SummaryWriter(log_dir=os.path.join(log_dir, 'tb'), comment=FLAGS.model_id)
feat_sub_dir = None
# Data augmentation, will do in dataset incase window>1 and need to be applied image-wise
jitter_param = 0.4
lighting_param = 0.1
transform_train = None
transform_test = None
balance_train = True
if FLAGS.feats_model is None:
transform_train = transforms.Compose([
transforms.RandomResizedCrop(FLAGS.data_shape),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param,
saturation=jitter_param),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transform_test = transforms.Compose([
transforms.Resize(FLAGS.data_shape + 32),
transforms.CenterCrop(FLAGS.data_shape),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
if bool(FLAGS.flow):
transform_test = transforms.Compose([
transforms.Resize(FLAGS.data_shape + 32),
transforms.CenterCrop(FLAGS.data_shape),
TwoStreamNormalize()
])
transform_train = transform_test
if FLAGS.save_feats:
balance_train = False
transform_train = transform_test
if FLAGS.window > 1:
transform_train = transform_test
# Load datasets
if FLAGS.temp_pool not in ['max', 'mean']:
train_set = TennisSet(split='train', transform=transform_train, every=FLAGS.every[0], padding=FLAGS.padding,
stride=FLAGS.stride, window=FLAGS.window, model_id=FLAGS.model_id, split_id=FLAGS.split_id,
balance=balance_train, flow=bool(FLAGS.flow), feats_model=FLAGS.feats_model, save_feats=FLAGS.save_feats)
logging.info(train_set)
val_set = TennisSet(split='val', transform=transform_test, every=FLAGS.every[1], padding=FLAGS.padding,
stride=FLAGS.stride, window=FLAGS.window, model_id=FLAGS.model_id, split_id=FLAGS.split_id,
balance=False, flow=bool(FLAGS.flow), feats_model=FLAGS.feats_model, save_feats=FLAGS.save_feats)
logging.info(val_set)
test_set = TennisSet(split='test', transform=transform_test, every=FLAGS.every[2], padding=FLAGS.padding,
stride=FLAGS.stride, window=FLAGS.window, model_id=FLAGS.model_id, split_id=FLAGS.split_id,
balance=False, flow=bool(FLAGS.flow), feats_model=FLAGS.feats_model, save_feats=FLAGS.save_feats)
logging.info(test_set)
# Data Loaders
if FLAGS.temp_pool not in ['max', 'mean']:
train_data = gluon.data.DataLoader(train_set, batch_size=FLAGS.batch_size,
shuffle=True, num_workers=FLAGS.num_workers)
val_data = gluon.data.DataLoader(val_set, batch_size=FLAGS.batch_size,
shuffle=False, num_workers=FLAGS.num_workers)
test_data = gluon.data.DataLoader(test_set, batch_size=FLAGS.batch_size,
shuffle=False, num_workers=FLAGS.num_workers)
# Define Model
model = None
if FLAGS.feats_model is None:
if FLAGS.backbone == 'rdnet':
backbone_net = get_r21d(num_layers=34, n_classes=400, t=8, pretrained=True).features
else:
if FLAGS.flow == 'sixc':
backbone_net = get_model(FLAGS.backbone, pretrained=False).features # 6 channel input, don't want pretraind
else:
backbone_net = get_model(FLAGS.backbone, pretrained=True).features
if FLAGS.flow in ['twos', 'only']:
if FLAGS.flow == 'only':
backbone_net = None
flow_net = get_model(FLAGS.backbone, pretrained=True).features # todo orig exp was not pretrained flow
model = TwoStreamModel(backbone_net, flow_net, len(train_set.classes))
elif FLAGS.backbone == 'rdnet':
model = FrameModel(backbone_net, len(train_set.classes), swap=True)
else:
model = FrameModel(backbone_net, len(train_set.classes))
elif FLAGS.temp_pool in ['max', 'mean']:
backbone_net = get_model(FLAGS.backbone, pretrained=True).features
model = FrameModel(backbone_net, len(test_set.classes))
if FLAGS.window > 1: # Time Distributed RNN
if FLAGS.backbone_from_id and model is not None:
if os.path.exists(os.path.join('models', 'vision', 'experiments', FLAGS.backbone_from_id)):
files = os.listdir(os.path.join('models', 'vision', 'experiments', FLAGS.backbone_from_id))
files = [f for f in files if f[-7:] == '.params']
if len(files) > 0:
files = sorted(files, reverse=True) # put latest model first
model_name = files[0]
model.load_parameters(os.path.join('models', 'vision', 'experiments', FLAGS.backbone_from_id, model_name))
logging.info('Loaded backbone params: {}'.format(os.path.join('models', 'vision', 'experiments',
FLAGS.backbone_from_id, model_name)))
if FLAGS.freeze_backbone and model is not None:
for param in model.collect_params().values():
param.grad_req = 'null'
if FLAGS.temp_pool in ['gru', 'lstm']:
model = CNNRNN(model, num_classes=len(test_set.classes), type=FLAGS.temp_pool, hidden_size=128)
elif FLAGS.temp_pool in ['mean', 'max']:
pass
else:
assert FLAGS.backbone == 'rdnet' # ensure 3d net
assert FLAGS.window in [8, 32]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
model.initialize()
num_channels = 3
if bool(FLAGS.flow):
num_channels = 6
if FLAGS.feats_model is None:
if FLAGS.window == 1:
logging.info(model.summary(mx.nd.ndarray.ones(shape=(1,
num_channels, FLAGS.data_shape, FLAGS.data_shape))))
else:
logging.info(model.summary(mx.nd.ndarray.ones(shape=(1, FLAGS.window,
num_channels, FLAGS.data_shape, FLAGS.data_shape))))
else:
if FLAGS.window == 1:
logging.info(model.summary(mx.nd.ndarray.ones(shape=(1, 4096))))
elif FLAGS.temp_pool not in ['max', 'mean']:
logging.info(model.summary(mx.nd.ndarray.ones(shape=(1, FLAGS.window, 4096))))
model.collect_params().reset_ctx(ctx)
model.hybridize()
if FLAGS.save_feats:
best_score = -1
best_epoch = -1
with open(os.path.join('models', 'vision', 'experiments', FLAGS.model_id, 'scores.txt'), 'r') as f:
lines = f.readlines()
lines = [line.rstrip().split() for line in lines]
for ep, sc in lines:
if float(sc) > best_score:
best_epoch = int(ep)
best_score = float(sc)
logging.info('Testing best model from Epoch %d with score of %f' % (best_epoch, best_score))
model.load_parameters(os.path.join('models', 'vision', 'experiments', FLAGS.model_id, "{:04d}.params".format(best_epoch)))
logging.info('Loaded model params: {}'.format(
os.path.join('models', 'vision', 'experiments', FLAGS.model_id, "{:04d}.params".format(best_epoch))))
for data, sett in zip([train_data, val_data, test_data], [train_set, val_set, test_set]):
save_features(model, data, sett, ctx)
return
start_epoch = 0
if os.path.exists(os.path.join('models', 'vision', 'experiments', FLAGS.model_id)):
files = os.listdir(os.path.join('models', 'vision', 'experiments', FLAGS.model_id))
files = [f for f in files if f[-7:] == '.params']
if len(files) > 0:
files = sorted(files, reverse=True) # put latest model first
model_name = files[0]
start_epoch = int(model_name.split('.')[0]) + 1
model.load_parameters(os.path.join('models', 'vision', 'experiments', FLAGS.model_id, model_name), ctx=ctx)
logging.info('Loaded model params: {}'.format(os.path.join('models', 'vision', 'experiments', FLAGS.model_id, model_name)))
# Setup the optimiser
trainer = gluon.Trainer(model.collect_params(), 'sgd',
{'learning_rate': FLAGS.lr, 'momentum': FLAGS.momentum, 'wd': FLAGS.wd})
# Setup Metric/s
metrics = [Accuracy(label_names=test_set.classes),
mx.metric.TopKAccuracy(5, label_names=test_set.classes),
Accuracy(name='accuracy_no', label_names=test_set.classes[1:], ignore_labels=[0]),
Accuracy(name='accuracy_o', label_names=test_set.classes[0],
ignore_labels=list(range(1, len(test_set.classes)))),
PRF1(label_names=test_set.classes)]
val_metrics = [Accuracy(label_names=test_set.classes),
mx.metric.TopKAccuracy(5, label_names=test_set.classes),
Accuracy(name='accuracy_no', label_names=test_set.classes[1:], ignore_labels=[0]),
Accuracy(name='accuracy_o', label_names=test_set.classes[0],
ignore_labels=list(range(1, len(test_set.classes)))),
PRF1(label_names=test_set.classes)]
test_metrics = [Accuracy(label_names=test_set.classes),
mx.metric.TopKAccuracy(5, label_names=test_set.classes),
Accuracy(name='accuracy_no', label_names=test_set.classes[1:], ignore_labels=[0]),
Accuracy(name='accuracy_o', label_names=test_set.classes[0],
ignore_labels=list(range(1, len(test_set.classes)))),
PRF1(label_names=test_set.classes)]
# Setup Loss/es
loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()
if FLAGS.temp_pool not in ['max', 'mean']:
model = train_model(model, train_set, train_data, metrics, val_set, val_data, val_metrics, trainer, loss_fn, start_epoch, ctx, tb_sw)
# model training complete, test it
if FLAGS.temp_pool not in ['max', 'mean']:
mod_path = os.path.join('models', 'vision', 'experiments', FLAGS.model_id)
else:
mod_path = os.path.join('models', 'vision', 'experiments', FLAGS.feats_model)
best_score = -1
best_epoch = -1
with open(os.path.join(mod_path, 'scores.txt'), 'r') as f:
lines = f.readlines()
lines = [line.rstrip().split() for line in lines]
for ep, sc in lines:
if float(sc) > best_score:
best_epoch = int(ep)
best_score = float(sc)
logging.info('Testing best model from Epoch %d with score of %f' % (best_epoch, best_score))
model.load_parameters(os.path.join(mod_path, "{:04d}.params".format(best_epoch)))
logging.info('Loaded model params: {}'.format(os.path.join(mod_path, "{:04d}.params".format(best_epoch))))
if FLAGS.temp_pool in ['max', 'mean']:
assert FLAGS.backbone_from_id or FLAGS.feats_model # if we doing temporal pooling ensure that we have loaded a pretrained net
model = TemporalPooling(model, pool=FLAGS.temp_pool, num_classes=0, feats=FLAGS.feats_model!=None)
tic = time.time()
_ = test_model(model, test_data, test_set, test_metrics, ctx, vis=FLAGS.vis)
if FLAGS.temp_pool not in ['max', 'mean']:
str_ = 'Train set:'
for i in range(len(train_set.classes)):
str_ += '\n'
for j in range(len(train_set.classes)):
str_ += str(metrics[4].mat[i, j]) + '\t'
print(str_)
str_ = 'Test set:'
for i in range(len(test_set.classes)):
str_ += '\n'
for j in range(len(test_set.classes)):
str_ += str(test_metrics[4].mat[i, j]) + '\t'
print(str_)
str_ = '[Finished] '
for metric in test_metrics:
result = metric.get()
if not isinstance(result, list):
result = [result]
for res in result:
str_ += ', Test_{}={:.3f}'.format(res[0], res[1])
metric.reset()
str_ += ' # Samples: {}, Time Taken: {:.1f}'.format(len(test_set), time.time() - tic)
logging.info(str_)
# logging.info("Cleaning up, making test videos.")
# for video in os.listdir(test_set.output_dir):
# frames_to_video(os.path.join(test_set.output_dir, video), os.path.join(test_set.output_dir, video[:-4]),
# fps=int(25/FLAGS.every[2]))
# shutil.rmtree(os.path.join(test_set.output_dir, video))
def train_model(model, train_set, train_data, metrics, val_set, val_data, val_metrics, trainer, loss_fn, start_epoch, ctx, tb_sw=None):
if FLAGS.epochs-start_epoch > 0:
# Training loop
lr_counter = 0
num_batches = int(len(train_set)/FLAGS.batch_size)
for epoch in range(start_epoch, FLAGS.epochs): # loop over epochs
logging.info('[Starting Epoch {}]'.format(epoch))
if epoch == FLAGS.lr_steps[lr_counter]:
trainer.set_learning_rate(trainer.learning_rate*FLAGS.lr_factor)
lr_counter += 1
tic = time.time()
train_sum_loss = 0
for metric in metrics:
metric.reset()
for i, batch in enumerate(train_data): # loop over batches
if FLAGS.max_batches > 0 and i > FLAGS.max_batches:
break
btic = time.time()
# split data across devices
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
sum_losses = []
outputs = []
with ag.record():
for ix, x in enumerate(data): # loop over devices
output = model(x)
outputs.append(output)
sum_losses.append(loss_fn(output, labels[ix]))
ag.backward(sum_losses)
# step the optimizer
trainer.step(FLAGS.batch_size)
# store the epoch loss sums - avg loss across batch (avg across devices)
train_sum_loss += sum([l.mean().asscalar() for l in sum_losses]) / len(sum_losses)
# update metric
for metric in metrics:
metric.update(labels, outputs)
# logging
if FLAGS.log_interval and not (i + 1) % FLAGS.log_interval:
str_ = '[Epoch {}][Batch {}/{}], LR: {:.2E}, Speed: {:.3f} samples/sec'.format(
epoch, i, num_batches, trainer.learning_rate, FLAGS.batch_size / (time.time() - btic))
str_ += ', {}={:.3f}'.format("loss:", train_sum_loss/(i*FLAGS.batch_size))
if tb_sw:
tb_sw.add_scalar(tag='Training_loss',
scalar_value=train_sum_loss/(i*FLAGS.batch_size),
global_step=(epoch * len(train_data) + i))
for metric in metrics:
result = metric.get()
if not isinstance(result, list):
result = [result]
for res in result:
str_ += ', {}={:.3f}'.format(res[0], res[1])
if tb_sw:
tb_sw.add_scalar(tag='Training_{}'.format(res[0]),
scalar_value=float(res[1]),
global_step=(epoch * len(train_data) + i))
logging.info(str_)
# Format end of epoch logging string getting metrics along the way
str_ = '[Epoch {}]'.format(epoch)
for metric in metrics:
result = metric.get()
if not isinstance(result, list):
result = [result]
for res in result:
str_ += ', Train_{}={:.3f}'.format(res[0], res[1])
str_ += ', loss: {:.3f}'.format(train_sum_loss / len(train_data))
vtic = time.time()
_ = test_model(model, val_data, val_set, val_metrics, ctx)
str_2 = 'Val set:'
for i in range(len(train_set.classes)):
str_2 += '\n'
for j in range(len(train_set.classes)):
str_2 += str(val_metrics[4].mat[i, j]) + '\t'
print(str_2)
for metric in val_metrics:
result = metric.get()
if not isinstance(result, list):
result = [result]
for res in result:
str_ += ', Val_{}={:.3f}'.format(res[0], res[1])
if tb_sw:
tb_sw.add_scalar(tag='Val_{}'.format(res[0]),
scalar_value=float(res[1]),
global_step=(epoch * len(train_data)))
if res[0] == 'AVG_NB_f1':
with open(os.path.join('models', 'vision', 'experiments', FLAGS.model_id, 'scores.txt'), 'a') as f:
f.write(str(epoch)+'\t'+str(float(res[1]))+'\n')
metric.reset()
str_ += ', Epoch Time: {:.1f}, Val Time: {:.1f}'.format(time.time() - tic, time.time() - vtic)
logging.info(str_)
model.save_parameters(os.path.join('models', 'vision', 'experiments', FLAGS.model_id, "{:04d}.params".format(epoch)))
return model
# Testing/Validation function
def test_model(net, loader, dataset, metrics, ctx, vis=False):
for i, batch in tqdm(enumerate(loader), total=len(loader), desc='Testing'):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
idxs = gluon.utils.split_and_load(batch[2], ctx_list=ctx, batch_axis=0, even_split=False)
outputs = [net(x) for x in data]
for metric in metrics:
metric.update(labels, outputs)
if vis:
# save the images with labels
for di in range(len(outputs)): # loop over devices
idxs = [int(idx) for idx in idxs[di].asnumpy()]
output = [o.asnumpy() for o in outputs[di]]
if isinstance(outputs[0], list) or isinstance(outputs[0], tuple):
for i in range(len(idxs)): # loop over samples
dataset.save_sample(idxs[i], [o[i] for o in output])
else:
for i in range(len(idxs)): # loop over samples
dataset.save_sample(idxs[i], output[i])
return metrics
def save_features(net, loader, dataset, ctx):
for batch in tqdm(loader, desc='saving features', total=int(len(dataset)/FLAGS.batch_size)):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
# labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
idxs = gluon.utils.split_and_load(batch[2], ctx_list=ctx, batch_axis=0, even_split=False)
for xi, x in enumerate(data):
feat = net.backbone(x)
feat = feat.asnumpy()
idxsi = idxs[xi].asnumpy()
for i in range(len(idxsi)):
feat_path = dataset.save_feature_path(idxsi[i])
if not os.path.exists(feat_path):
os.makedirs(os.path.dirname(feat_path), exist_ok=True)
np.save(feat_path, feat[i])
print("Saving %s" % feat_path)
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| 45.712477
| 141
| 0.588314
|
acfc4d3233a46617557bd58e3da52af5f59fb4f7
| 683
|
py
|
Python
|
app/core/migrations/0002_tag.py
|
llduyll10/Django_Advance
|
77954031a094cf9494f55009e5cb0ba8f996a490
|
[
"MIT"
] | null | null | null |
app/core/migrations/0002_tag.py
|
llduyll10/Django_Advance
|
77954031a094cf9494f55009e5cb0ba8f996a490
|
[
"MIT"
] | null | null | null |
app/core/migrations/0002_tag.py
|
llduyll10/Django_Advance
|
77954031a094cf9494f55009e5cb0ba8f996a490
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-03-13 03:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.458333
| 118
| 0.616398
|
acfc4d3f3dc46c3807e6144303874080fda312b4
| 304
|
py
|
Python
|
lib/models/backbones/test_mode.py
|
shachargluska/centerpose
|
01c2c8bfa9d3ee91807f2ffdcc48728d104265bd
|
[
"MIT"
] | 245
|
2019-11-29T02:55:25.000Z
|
2022-03-30T07:30:18.000Z
|
lib/models/backbones/test_mode.py
|
shachargluska/centerpose
|
01c2c8bfa9d3ee91807f2ffdcc48728d104265bd
|
[
"MIT"
] | 24
|
2019-11-29T10:05:00.000Z
|
2022-03-30T07:16:06.000Z
|
lib/models/backbones/test_mode.py
|
FishLiuabc/centerpose
|
555d753cd82693476f91f78c53aa4147f5a83015
|
[
"MIT"
] | 45
|
2019-11-29T05:12:02.000Z
|
2022-03-21T02:20:36.000Z
|
import torch
from darknet import darknet53
from hardnet import hardnet
model = hardnet(19).cuda()
inputs = torch.randn((1,3,512,512)).cuda()
outs = model(inputs)
print(outs.shape)
model = darknet53(0,1,2).cuda()
inputs = torch.randn((1,3,512,512)).cuda()
outs = model(inputs)
print(outs.shape)
| 13.818182
| 42
| 0.703947
|
acfc4e0048c114ba0fa9c5586c4ff328228fe86e
| 1,407
|
py
|
Python
|
nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py
|
moloney/nipype
|
a7a9c85c79cb1412ba03406074f83200447ef50b
|
[
"Apache-2.0"
] | 7
|
2017-02-17T08:54:26.000Z
|
2022-03-10T20:57:23.000Z
|
nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py
|
moloney/nipype
|
a7a9c85c79cb1412ba03406074f83200447ef50b
|
[
"Apache-2.0"
] | 1
|
2016-04-25T15:07:09.000Z
|
2016-04-25T15:07:09.000Z
|
nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py
|
moloney/nipype
|
a7a9c85c79cb1412ba03406074f83200447ef50b
|
[
"Apache-2.0"
] | 2
|
2017-09-23T16:22:00.000Z
|
2019-08-01T14:18:52.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..featuredetection import DilateMask
def test_DilateMask_inputs():
input_map = dict(
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
inputBinaryVolume=dict(argstr='--inputBinaryVolume %s', ),
inputVolume=dict(argstr='--inputVolume %s', ),
lowerThreshold=dict(argstr='--lowerThreshold %f', ),
outputVolume=dict(
argstr='--outputVolume %s',
hash_files=False,
),
sizeStructuralElement=dict(argstr='--sizeStructuralElement %d', ),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
)
inputs = DilateMask.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_DilateMask_outputs():
output_map = dict(outputVolume=dict(), )
outputs = DilateMask.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 32.72093
| 74
| 0.602701
|
acfc4e0b1fb1928416d3c2bf6c0ddc831f1bed75
| 5,063
|
py
|
Python
|
promoterz/evolutionHooks.py
|
mczero80/japonicus
|
d183f24a7e1d0e52052f4c6e5e82604d9e7700d3
|
[
"MIT"
] | 229
|
2018-01-05T13:32:52.000Z
|
2021-12-18T00:57:49.000Z
|
promoterz/evolutionHooks.py
|
mczero80/japonicus
|
d183f24a7e1d0e52052f4c6e5e82604d9e7700d3
|
[
"MIT"
] | 142
|
2018-01-04T23:39:28.000Z
|
2019-12-14T16:38:24.000Z
|
promoterz/evolutionHooks.py
|
mczero80/japonicus
|
d183f24a7e1d0e52052f4c6e5e82604d9e7700d3
|
[
"MIT"
] | 95
|
2018-01-06T05:35:23.000Z
|
2021-12-13T16:42:22.000Z
|
#!/bin/python
from deap import base, tools
from copy import deepcopy
import random
import promoterz.supplement.age
import promoterz.supplement.PRoFIGA
import promoterz.supplement.phenotypicDivergence
import itertools
# population as last positional argument, to blend with toolbox;
def immigrateHoF(HallOfFame, population):
if not HallOfFame.items:
return population
for Q in range(1):
CHP = deepcopy(random.choice(HallOfFame))
del CHP.fitness.values
population += [CHP]
return population
def immigrateRandom(populate, nb_range, population): # (populate function)
number = random.randint(*nb_range)
population += populate(number)
return population
def filterAwayWorst(population, N=5):
aliveSize = len(population) - 5
population = tools.selBest(population, aliveSize)
return population
def filterAwayThreshold(locale, Threshold, min_nb_inds):
thresholdFilter = lambda ind: ind.fitness.values[0] > Threshold
populationFilter(locale, thresholdFilter, min_nb_inds)
def filterAwayTradeCounts(locale, ThresholdRange, min_nb_inds):
def tradecountFilter(ind):
if ind.trades < ThresholdRange[0]:
return False
elif ind.trades > ThresholdRange[1]:
return False
else:
return True
populationFilter(locale, tradecountFilter, min_nb_inds)
def filterAwayRoundtripDuration(locale, ThresholdRange, min_nb_inds):
def roundtripDurationFilter(ind):
averageExposureHours = ind.averageExposure
if averageExposureHours < ThresholdRange[0]:
return False
elif averageExposureHours > ThresholdRange[1]:
return False
else:
return True
populationFilter(locale, roundtripDurationFilter, min_nb_inds)
def populationFilter(locale, filterFunction, min_nb_inds):
newPopulation = [
ind for ind in locale.population if filterFunction(ind)
]
removed = [ind for ind in locale.population if ind not in newPopulation]
NBreturn = min(min_nb_inds - len(locale.population),
min_nb_inds)
NBreturn = max(0, NBreturn)
if NBreturn and removed:
for k in range(NBreturn):
if removed:
newPopulation.append(removed.pop(random.randrange(0,
len(removed))))
locale.population = newPopulation
def evaluatePopulation(locale):
individues_to_simulate = [ind for ind in locale.population
if not ind.fitness.valid]
fitnesses = locale.World.parallel.starmap(
locale.extratools.Evaluate, zip(individues_to_simulate)
)
for i, fit in zip(range(len(individues_to_simulate)), fitnesses):
individues_to_simulate[i].fitness.values = fit
return len(individues_to_simulate)
def getLocaleEvolutionToolbox(World, locale):
toolbox = base.Toolbox()
toolbox.register("ImmigrateHoF", immigrateHoF, locale.HallOfFame)
toolbox.register("ImmigrateRandom", immigrateRandom, World.tools.population)
toolbox.register("filterThreshold", filterAwayThreshold, locale)
toolbox.register("filterTrades", filterAwayTradeCounts, locale)
toolbox.register("filterExposure", filterAwayRoundtripDuration, locale)
toolbox.register('ageZero', promoterz.supplement.age.ageZero)
toolbox.register(
'populationAges',
promoterz.supplement.age.populationAges,
World.conf.generation.ageBoundaries,
)
toolbox.register(
'populationPD',
promoterz.supplement.phenotypicDivergence.populationPhenotypicDivergence,
World.tools.constructPhenotype,
)
toolbox.register('evaluatePopulation', evaluatePopulation)
return toolbox
def getGlobalToolbox(representationModule):
# GLOBAL FUNCTION TO GET GLOBAL TBX UNDER DEVELOPMENT;
toolbox = base.Toolbox()
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create(
"Individual",
list,
fitness=creator.FitnessMax,
PromoterMap=None,
Strategy=genconf.Strategy,
)
toolbox.register("mate", representationModule.crossover)
toolbox.register("mutate", representationModule.mutate)
PromoterMap = initPromoterMap(Attributes)
toolbox.register("newind", initInd, creator.Individual, PromoterMap)
toolbox.register("population", tools.initRepeat, list, toolbox.newind)
toolbox.register("constructPhenotype", representationModule.constructPhenotype)
return toolbox
def getFitness(individual):
R = sum(individual.wvalues)
def selectCriteria(ind):
return sum(ind.fitness.wvalues)
def selBest(individuals, number):
chosen = sorted(individuals, key=selectCriteria, reverse=True)
return chosen[:number]
def Tournament(individuals, finalselect, tournsize):
chosen = []
for i in range(finalselect):
aspirants = tools.selRandom(individuals, tournsize)
chosen.append(max(individuals, key=selectCriteria))
return chosen
| 31.842767
| 83
| 0.704918
|
acfc4ff770a188064e51eb5ed914123265a2bfe3
| 2,816
|
py
|
Python
|
backend/server/apps/endpoints/migrations/0001_initial.py
|
lovedeepkaursaini/ml_api_lt
|
4af98b6d229fdfcbd2eec56526b6faedd86bf1e3
|
[
"MIT"
] | null | null | null |
backend/server/apps/endpoints/migrations/0001_initial.py
|
lovedeepkaursaini/ml_api_lt
|
4af98b6d229fdfcbd2eec56526b6faedd86bf1e3
|
[
"MIT"
] | null | null | null |
backend/server/apps/endpoints/migrations/0001_initial.py
|
lovedeepkaursaini/ml_api_lt
|
4af98b6d229fdfcbd2eec56526b6faedd86bf1e3
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.4 on 2020-05-19 04:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Endpoint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('owner', models.CharField(max_length=128)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='MLAlgorithm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('description', models.CharField(max_length=1000)),
('code', models.CharField(max_length=50000)),
('version', models.CharField(max_length=128)),
('owner', models.CharField(max_length=128)),
('created_at', models.DateTimeField(auto_now_add=True)),
('parent_endpoint', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='endpoints.Endpoint')),
],
),
migrations.CreateModel(
name='MLRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('input_data', models.CharField(max_length=10000)),
('full_response', models.CharField(max_length=10000)),
('response', models.CharField(max_length=10000)),
('feedback', models.CharField(blank=True, max_length=10000, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('parent_mlalgorithm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='endpoints.MLAlgorithm')),
],
),
migrations.CreateModel(
name='MLAlgorithmStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=128)),
('active', models.BooleanField()),
('created_by', models.CharField(max_length=128)),
('created_at', models.DateTimeField(auto_now_add=True)),
('parent_mlalgorithm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='status', to='endpoints.MLAlgorithm')),
],
),
]
| 46.163934
| 155
| 0.575284
|
acfc50065bd5a3988611269078485018a9338875
| 20
|
py
|
Python
|
tiote/utils/__init__.py
|
burhan/tiote
|
e8512fb15333d519282cf11f882e183e91af95c6
|
[
"MIT"
] | 1
|
2019-06-27T13:05:32.000Z
|
2019-06-27T13:05:32.000Z
|
tiote/utils/__init__.py
|
burhan/tiote
|
e8512fb15333d519282cf11f882e183e91af95c6
|
[
"MIT"
] | null | null | null |
tiote/utils/__init__.py
|
burhan/tiote
|
e8512fb15333d519282cf11f882e183e91af95c6
|
[
"MIT"
] | null | null | null |
import fns, qry, htm
| 20
| 20
| 0.75
|
acfc5011b6d6b6eebf14515d9ab5a25bd14fad20
| 308
|
py
|
Python
|
app/controllers/config/index.py
|
ctxis/crackerjack
|
f354cda0e1c6688527018361f24abf2114a77939
|
[
"MIT"
] | 237
|
2020-03-26T12:39:24.000Z
|
2022-03-28T04:46:22.000Z
|
app/controllers/config/index.py
|
FDlucifer/crackerjack
|
f828aeb7bef6788a179b2c617f9121e317a0eea1
|
[
"MIT"
] | 18
|
2020-03-31T14:59:00.000Z
|
2022-03-19T08:37:07.000Z
|
app/controllers/config/index.py
|
FDlucifer/crackerjack
|
f828aeb7bef6788a179b2c617f9121e317a0eea1
|
[
"MIT"
] | 61
|
2020-03-29T13:53:54.000Z
|
2022-03-22T12:36:14.000Z
|
from . import bp
from flask_login import login_required, current_user
from flask import redirect, url_for
@bp.route('/', methods=['GET'])
@login_required
def index():
if current_user.admin:
return redirect(url_for('config.general'))
else:
return redirect(url_for('config.profile'))
| 23.692308
| 52
| 0.711039
|
acfc502bd0caa01eab499dc5850d6a008f341bb8
| 772
|
py
|
Python
|
Python/Roads/CLUS_ReSymbolizeCLUSRoads.py
|
bcgov/clus
|
e0d4e49f031126ee40f36b338651b9fddc180f8a
|
[
"Apache-2.0"
] | 27
|
2018-07-26T23:05:54.000Z
|
2022-03-15T22:55:46.000Z
|
Python/Roads/CLUS_ReSymbolizeCLUSRoads.py
|
ElizabethKleynhans/clus
|
a02aef861712ab62bb5b5877208a138e0074e365
|
[
"Apache-2.0"
] | 41
|
2018-04-25T19:31:29.000Z
|
2022-03-28T17:08:36.000Z
|
Python/Roads/CLUS_ReSymbolizeCLUSRoads.py
|
ElizabethKleynhans/clus
|
a02aef861712ab62bb5b5877208a138e0074e365
|
[
"Apache-2.0"
] | 10
|
2018-04-25T17:25:10.000Z
|
2022-02-16T21:53:23.000Z
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: mwfowler
#
# Created: 11/05/2018
# Copyright: (c) mwfowler 2018
# Licence: <your licence>
#-------------------------------------------------------------------------------
import arcpy
import os
def ReSymbolizeCLUSRoads():
mxd = arcpy.mapping.MapDocument("CURRENT")
df = mxd.activeDataFrame
for lyrs in arcpy.mapping.ListLayers(mxd, "Symbology", df):
print "Got Symbology Layer {0}".format(lyrs)
symLyr = lyrs
for lyr in arcpy.mapping.ListLayers(mxd, "CLUS_IntRoads_TSA*", df):
print lyr.name
arcpy.ApplySymbologyFromLayer_management(lyr, symLyr)
arcpy.RefreshTOC()
| 21.444444
| 80
| 0.514249
|
acfc51bae7ea6d8fedcaec471d9660d5bf94eff3
| 338
|
py
|
Python
|
tests/test_wostools.py
|
EmPiFree/python-wostools
|
39963dc2b23190737c59eef895f0498e61c0ae73
|
[
"MIT"
] | null | null | null |
tests/test_wostools.py
|
EmPiFree/python-wostools
|
39963dc2b23190737c59eef895f0498e61c0ae73
|
[
"MIT"
] | null | null | null |
tests/test_wostools.py
|
EmPiFree/python-wostools
|
39963dc2b23190737c59eef895f0498e61c0ae73
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `wostools` package."""
from wostools import CollectionLazy
def test_collection():
"""
Just kinda an end to end test.
"""
collection = CollectionLazy('docs/examples/bit-pattern-savedrecs.txt')
for article in collection.articles:
assert article.TI
| 19.882353
| 74
| 0.662722
|
acfc524c361d6f74bbe0620b311aeb348bf72c19
| 6,224
|
py
|
Python
|
roboservos.py
|
rogerioth/BreezyCreate2
|
23fa7f1634daa49318fc18c81e76d8d15c99d64b
|
[
"MIT"
] | null | null | null |
roboservos.py
|
rogerioth/BreezyCreate2
|
23fa7f1634daa49318fc18c81e76d8d15c99d64b
|
[
"MIT"
] | null | null | null |
roboservos.py
|
rogerioth/BreezyCreate2
|
23fa7f1634daa49318fc18c81e76d8d15c99d64b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import time
import math
import smbus
import sys
import queue
import threading
# ============================================================================
# Raspi PCA9685 16-Channel PWM Servo Driver
# ============================================================================
class PCA9685:
# Registers/etc.
__SUBADR1 = 0x02
__SUBADR2 = 0x03
__SUBADR3 = 0x04
__MODE1 = 0x00
__PRESCALE = 0xFE
__LED0_ON_L = 0x06
__LED0_ON_H = 0x07
__LED0_OFF_L = 0x08
__LED0_OFF_H = 0x09
__ALLLED_ON_L = 0xFA
__ALLLED_ON_H = 0xFB
__ALLLED_OFF_L = 0xFC
__ALLLED_OFF_H = 0xFD
def __init__(self, address=0x40, debug=False):
self.bus = smbus.SMBus(1)
self.address = address
self.debug = debug
if (self.debug):
print("Reseting PCA9685")
self.write(self.__MODE1, 0x00)
def write(self, reg, value):
"Writes an 8-bit value to the specified register/address"
self.bus.write_byte_data(self.address, reg, value)
if (self.debug):
print("I2C: Write 0x%02X to register 0x%02X" % (value, reg))
def read(self, reg):
"Read an unsigned byte from the I2C device"
result = self.bus.read_byte_data(self.address, reg)
if (self.debug):
print("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" % (self.address, result & 0xFF, reg))
return result
def setPWMFreq(self, freq):
"Sets the PWM frequency"
prescaleval = 25000000.0 # 25MHz
prescaleval /= 4096.0 # 12-bit
prescaleval /= float(freq)
prescaleval -= 1.0
if (self.debug):
print("Setting PWM frequency to %d Hz" % freq)
print("Estimated pre-scale: %d" % prescaleval)
prescale = math.floor(prescaleval + 0.5)
if (self.debug):
print("Final pre-scale: %d" % prescale)
oldmode = self.read(self.__MODE1);
newmode = (oldmode & 0x7F) | 0x10 # sleep
self.write(self.__MODE1, newmode) # go to sleep
self.write(self.__PRESCALE, int(math.floor(prescale)))
self.write(self.__MODE1, oldmode)
time.sleep(0.005)
self.write(self.__MODE1, oldmode | 0x80)
def setPWM(self, channel, on, off):
"Sets a single PWM channel"
self.write(self.__LED0_ON_L+4*channel, on & 0xFF)
self.write(self.__LED0_ON_H+4*channel, on >> 8)
self.write(self.__LED0_OFF_L+4*channel, off & 0xFF)
self.write(self.__LED0_OFF_H+4*channel, off >> 8)
if (self.debug):
print("channel: %d LED_ON: %d LED_OFF: %d" % (channel,on,off))
def setServoPulse(self, channel, pulse):
"Sets the Servo Pulse,The PWM frequency must be 50HZ"
pulse = pulse*4096/20000 #PWM frequency is 50HZ,the period is 20000us
self.setPWM(channel, 0, int(pulse))
def allServos(self, pulse):
for i in range(0, 15):
pwm.setServoPulse(i, pulse)
class Servo:
__SERVO_STEP = 10
def __init__(self, servo_id, range_max, range_min, name, default_value, controller):
self.range_max = range_max
self.range_min = range_min
self.servo_id = servo_id
self.name = name
self.default_value = default_value
self.currentValue = default_value
self.controller = controller
def servoOffset(self, offset):
previewValue = self.currentValue + offset
if previewValue > self.range_max:
self.currentValue = self.range_max
elif previewValue < self.range_min:
self.currentValue = self.range_min
else:
self.currentValue = previewValue
def syncServoValue(self):
self.controller.pwm.setServoPulse(self.servo_id, self.currentValue)
def servoTestRange(self):
print(f"Performing servo test on {self.name} - Channel {self.servo_id} - Max: {self.range_max} - Min: {self.range_min}")
for i in range(self.range_min, self.range_max, self.__SERVO_STEP):
self.controller.pwm.setServoPulse(self.servo_id, i)
time.sleep(0.02)
for i in range(self.range_max, self.range_min, -self.__SERVO_STEP):
self.controller.pwm.setServoPulse(self.servo_id, i)
time.sleep(0.02)
self.currentValue = self.default_value
self.syncServoValue()
class ServoController:
def __init__(self):
self.pwm = PCA9685(0x40, debug=False)
self.pwm.setPWMFreq(50)
# create all motors
self.base_v_angle = Servo(13, 2280, 570, "base_v_angle", 1660, controller=self) # ok
self.arm_v_angle = Servo(12, 1510, 530, "arm_v_angle", 1010, controller=self) # ok
self.base_rotation = Servo( 0, 2210, 520, "base_rotation", 870, controller=self) # ok
self.claw = Servo( 1, 1460, 1020, "claw", 1260, controller=self) # ok
self.camera_turret_v = Servo( 7, 2490, 1340, "camera_turret_v", 2280, controller=self) # ok
self.camera_turret_h = Servo( 6, 2410, 620, "camera_turret_h", 1440, controller=self) # ok
self.claw_angle = Servo(15, 2180, 710, "claw_angle", 1520, controller=self) # ok
self.claw_rotation = Servo(14, 2240, 510, "claw_rotation", 1220, controller=self) # ok
self.allMotors = [ self.base_v_angle,
self.arm_v_angle,
self.base_rotation,
self.claw,
self.camera_turret_v,
self.camera_turret_h,
self.claw_angle,
self.claw_rotation
]
for servo in self.allMotors:
servo.syncServoValue()
time.sleep(1)
for servo in self.allMotors:
servo.servoTestRange()
time.sleep(0.5)
if __name__=='__main__':
# # test args: servoId, max, min
# print(sys.argv)
# servoId = int(sys.argv[1])
# servoMax = int(sys.argv[2])
# servoMin = int(sys.argv[3])
controller = ServoController()
# pwm = PCA9685(0x40, debug=False)
# pwm.setPWMFreq(50)
# while True:
# # setServoPulse(2,2500)
# for i in range(500,2500,10):
# #pwm.setServoPulse(0,i)
# pwm.allServos(i)
# time.sleep(0.02)
# for i in range(2500,500,-10):
# #pwm.setServoPulse(0,i)
# pwm.allServos(i)
# time.sleep(0.02)
| 33.106383
| 124
| 0.611022
|
acfc5275bf64de1a1f976a87fc0d8e0f5176400f
| 2,584
|
py
|
Python
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/live/apis/AddCustomLiveStreamSnapshotTemplateRequest.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 14
|
2018-04-19T09:53:56.000Z
|
2022-01-27T06:05:48.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/live/apis/AddCustomLiveStreamSnapshotTemplateRequest.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 15
|
2018-09-11T05:39:54.000Z
|
2021-07-02T12:38:02.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/live/apis/AddCustomLiveStreamSnapshotTemplateRequest.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 33
|
2018-04-20T05:29:16.000Z
|
2022-02-17T09:10:05.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class AddCustomLiveStreamSnapshotTemplateRequest(JDCloudRequest):
"""
添加直播截图模板
"""
def __init__(self, parameters, header=None, version="v1"):
super(AddCustomLiveStreamSnapshotTemplateRequest, self).__init__(
'/snapshotCustoms:template', 'POST', header, version)
self.parameters = parameters
class AddCustomLiveStreamSnapshotTemplateParameters(object):
def __init__(self, format, fillType, snapshotInterval, saveMode, saveBucket, template):
"""
:param format: 截图格式
- 取值: jpg, png
- 不区分大小写
:param fillType: 截图与设定的宽高不匹配时的处理规则
1: 拉伸
2: 留黑
3: 留白
4: 高斯模糊
- 1是按照设定宽高拉伸
- 2,3,4是等比例的缩放多余或不足处按调置处理
:param snapshotInterval: 截图周期
- 取值范围 [5,3600]
- 单位: 秒
:param saveMode: 存储模式
1: 覆盖存储
2: 顺序存储
:param saveBucket: 存储桶
:param template: 截图模板自定义名称:
- 取值要求: 数字、大小写字母或短横线("-")、下划线("_"),
首尾不能有特殊字符("-")
最大长度50个字符
- <b>注意: 不能与已定义命名重复</b>
"""
self.format = format
self.width = None
self.height = None
self.fillType = fillType
self.snapshotInterval = snapshotInterval
self.saveMode = saveMode
self.saveBucket = saveBucket
self.saveEndpoint = None
self.template = template
def setWidth(self, width):
"""
:param width: (Optional) 截图宽度
- 取值: [8,8192]
- 如果(width,height)只设置其中之一,则按所设置参数项等比缩放另一项输出截图
- 如果(width,height)都不设置,则按源流大小输出截图
"""
self.width = width
def setHeight(self, height):
"""
:param height: (Optional) 截图高度
- 取值: [8,8192]
- 如果(width,height)只设置其中之一,则按所设置参数项等比缩放另一项输出截图
- 如果(width,height)都不设置,则按源流大小输出截图
"""
self.height = height
def setSaveEndpoint(self, saveEndpoint):
"""
:param saveEndpoint: (Optional) 存储地址
"""
self.saveEndpoint = saveEndpoint
| 25.333333
| 91
| 0.666022
|
acfc534b336ce2053dcca3e95e09109a7b161038
| 2,400
|
py
|
Python
|
proxy.py
|
rsnlnd/fmovies-api
|
2d84c0ceb81d80e0b1d73d1814659f6f5f345870
|
[
"MIT"
] | 6
|
2021-06-13T05:13:20.000Z
|
2022-03-26T05:17:06.000Z
|
proxy.py
|
rsnlnd/fmovies-api
|
2d84c0ceb81d80e0b1d73d1814659f6f5f345870
|
[
"MIT"
] | null | null | null |
proxy.py
|
rsnlnd/fmovies-api
|
2d84c0ceb81d80e0b1d73d1814659f6f5f345870
|
[
"MIT"
] | 5
|
2021-07-06T19:51:07.000Z
|
2022-02-07T13:34:48.000Z
|
try:
import requests
from bs4 import BeautifulSoup
import random
except:
print(" Library Not Found !")
class Random_Proxy(object):
def __init__(self):
self.__url = 'https://www.sslproxies.org/'
self.__headers = {
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Referer': 'http://www.wikipedia.org/',
'Connection': 'keep-alive',
}
self.random_ip = []
self.random_port = []
def __random_proxy(self):
"""
This is Private Function Client Should not have accesss
:return: Dictionary object of Random proxy and port number
"""
r = requests.get(url=self.__url, headers=self.__headers)
soup = BeautifulSoup(r.text, 'html.parser')
# Get the Random IP Address
for x in soup.findAll('td')[::8]:
self.random_ip.append(x.get_text())
# Get Their Port
for y in soup.findAll('td')[1::8]:
self.random_port.append(y.get_text())
# Zip together
z = list(zip(self.random_ip, self.random_port))
# This will Fetch Random IP Address and corresponding PORT Number
number = random.randint(0, len(z)-50)
ip_random = z[number]
# convert Tuple into String and formart IP and PORT Address
ip_random_string = "{}:{}".format(ip_random[0],ip_random[1])
# Create a Proxy
proxy = {'https':ip_random_string}
# return Proxy
return proxy
def Proxy_Request(self,request_type='get',url='',**kwargs):
"""
:param request_type: GET, POST, PUT
:param url: URL from which you want to do webscrapping
:param kwargs: any other parameter you pass
:return: Return Response
"""
while True:
try:
proxy = self.__random_proxy()
print("Using Proxy {}".format(proxy))
r = requests.request(request_type,url,proxies=proxy,headers=self.__headers ,timeout=5, **kwargs)
return r
except:
pass
| 32.876712
| 149
| 0.579583
|
acfc54303b4a6a96d5eae030829c1319f2030974
| 32
|
py
|
Python
|
bot/__init__.py
|
RPITechTalks/BoilerplateDiscordBot
|
d85606d2becef83b9ecd39e49c5efbbcdceff3c3
|
[
"MIT"
] | 1
|
2020-09-21T14:09:41.000Z
|
2020-09-21T14:09:41.000Z
|
bot/__init__.py
|
Prasantacharya/Stonk-bot
|
54fcad38b69239c597c6f6727185de0bc627a827
|
[
"MIT"
] | null | null | null |
bot/__init__.py
|
Prasantacharya/Stonk-bot
|
54fcad38b69239c597c6f6727185de0bc627a827
|
[
"MIT"
] | 1
|
2021-01-30T20:57:20.000Z
|
2021-01-30T20:57:20.000Z
|
#from .bot import BakedBeansBot
| 16
| 31
| 0.8125
|
acfc545ecee8881c6f473d5fb797c10ee3a23bb4
| 4,243
|
py
|
Python
|
aiida_tcod/cli/cmd_deposit.py
|
fawzi/aiida-tcod
|
29e1f1469c3dfef00a15e427418f121b8caacd2d
|
[
"MIT"
] | null | null | null |
aiida_tcod/cli/cmd_deposit.py
|
fawzi/aiida-tcod
|
29e1f1469c3dfef00a15e427418f121b8caacd2d
|
[
"MIT"
] | 1
|
2020-03-19T20:13:46.000Z
|
2020-03-19T20:13:46.000Z
|
aiida_tcod/cli/cmd_deposit.py
|
fawzi/aiida-tcod
|
29e1f1469c3dfef00a15e427418f121b8caacd2d
|
[
"MIT"
] | 2
|
2019-04-29T14:09:46.000Z
|
2019-05-23T16:53:35.000Z
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
This module provides deposit functionality to all data types
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import click
from aiida.cmdline.params import options, types
DEPOSIT_OPTIONS = [
options.CODE(help='Code to be used for the deposition.'),
options.COMPUTER(help='Computer to be used for deposition.'),
click.option(
'-d', '--database', type=click.Choice(['tcod']), default='tcod', help='Label of the database for deposition.'),
click.option(
'--deposition-type',
type=click.Choice(['published', 'prepublication', 'personal']),
default='published',
help='Type of the deposition.'),
click.option('-u', '--username', type=click.STRING, default=None, help="Depositor's username."),
click.option('-p', '--password', is_flag=True, default=False, help="Prompt for depositor's password."),
click.option('--user-email', type=click.STRING, default=None, help="Depositor's e-mail address."),
click.option('--title', type=click.STRING, default=None, help="Title of the publication."),
click.option('--author-name', type=click.STRING, default=None, help="Full name of the publication author."),
click.option('--author-email', type=click.STRING, default=None, help="E-mail address of the publication author."),
click.option('--url', type=click.STRING, default=None, help="URL of the deposition API."),
click.option(
'--parameters',
type=types.DataParamType(sub_classes=('aiida.data:dict',)),
help="Dict to be exported alongside the to be deposited Data node. By default, if "
"the node originates from a calculation with single Dict in the output, aforementioned "
"Dict is picked automatically. Instead, the option is used in the case the calculation produces "
"more than a single instance of Dict."),
click.option(
'--replace', type=click.INT, default=None, help="ID of the structure to be redeposited (replaced), if any."),
click.option(
'-m',
'--message',
type=click.STRING,
default=None,
help="Description of the change (relevant for redepositions only)."),
click.option(
'--reduce-symmetry/--no-reduce-symmetry',
'reduce_symmetry',
is_flag=True,
default=None,
help='Do (default) or do not perform symmetry reduction.'),
click.option(
'--dump-aiida-database/--no-dump-aiida-database',
'dump_aiida_database',
is_flag=True,
default=None,
help='Export (default) or do not export AiiDA database to the CIF file.'),
click.option(
'--exclude-external-contents/--no-exclude-external-contents',
'exclude_external_contents',
is_flag=True,
default=None,
help='Do not (default) or do save the contents for external resources even if URIs are provided'),
click.option(
'--gzip/--no-gzip', 'gzip', is_flag=True, default=None, help='Do or do not (default) gzip large files.'),
click.option(
'--gzip-threshold',
type=click.INT,
default=None,
help="Specify the minimum size of exported file which should be gzipped."),
]
def deposit_options(func):
for option in reversed(DEPOSIT_OPTIONS):
func = option(func)
return func
def data_deposit_tcod(node, deposit_type, parameters=None, **kwargs):
"""
Deposition plugin for TCOD.
"""
from aiida.tools.dbexporters.tcod import deposit
return deposit(node, deposit_type, parameters=parameters, **kwargs)
| 46.119565
| 119
| 0.621494
|
acfc548b15fb2938eabc64a8d39beb37ca332a8f
| 60,911
|
py
|
Python
|
wip/TextAugment.py
|
mapama247/muliwai
|
1aac0f5e9283a7b335e8746b335c9b2cfa0a4946
|
[
"Apache-2.0"
] | 6
|
2022-02-08T14:38:47.000Z
|
2022-03-27T06:51:07.000Z
|
wip/TextAugment.py
|
ianyu93/muliwai
|
22ff672ad0aaf5a1f0d813ffe1b8e84fdab6f40e
|
[
"Apache-2.0"
] | 16
|
2022-01-13T01:49:37.000Z
|
2022-03-09T12:27:08.000Z
|
wip/TextAugment.py
|
ianyu93/muliwai
|
22ff672ad0aaf5a1f0d813ffe1b8e84fdab6f40e
|
[
"Apache-2.0"
] | 6
|
2022-01-12T20:56:06.000Z
|
2022-02-26T19:50:35.000Z
|
"""
Copyright, 2021-2022 Ontocord, LLC, All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import fsspec
from tqdm import tqdm
import difflib
import langid
import json
import random
import logging
from typing import List, Dict, Optional
import torch
from torch.nn.functional import cosine_similarity
import spacy
from nltk.corpus import stopwords
from ontology.ontology_manager import OntologyManager
from datasets import load_dataset
from transformers import (
AutoTokenizer,
M2M100ForConditionalGeneration,
M2M100Tokenizer,
MarianMTModel,
pipeline,
)
from sentence_transformers import SentenceTransformer
import qg_pipeline
from utils import (
rulebase,
banned_words,
hf_ner_model_map,
mariam_mt,
LoggingHandler,
get_oscar_urls,
download_urls,
CharManager,
get_docs,
badwords as badwords_ac_dc,
stopwords as stopwords_ac_dc,
)
# torch.cuda.empty_cache()
# labse = SentenceTransformer("sentence-transformers/LaBSE").half().eval().cuda()
# qg = qg_pipeline.pipeline("multitask-qa-qg")
#
# ontology_manager = None # OntologyManager(target_lang='en') #target_lang=target_lang
# translation_pipelines = {}
# ner_model_name2pipelines = {}
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
class TextAugment:
# don't add a space for junk chars
max_stoword_len_zh = max([len(a) for a in stopwords_ac_dc.get('zh', [''])])
max_stoword_len_ko = max([len(a) for a in stopwords_ac_dc.get('ko', [''])])
max_stoword_len_ja = max([len(a) for a in stopwords_ac_dc.get('ja', [''])])
def __init__(self,
ontology_manager: OntologyManager = None,
translation_pipelines: Dict = None,
ner_model_name2pipelines: Dict = None,
qg=None,
production_mode: bool = False):
self.ontology_manager = ontology_manager
self.qg = qg
self.banned_words = banned_words
self.hf_ner_model_map = hf_ner_model_map
self.translation_pipelines = translation_pipelines if translation_pipelines else {}
self.ner_model_name2pipelines = ner_model_name2pipelines if ner_model_name2pipelines else {}
self.strip_chars = CharManager.strip_chars
self.punc_char = CharManager.punc_char
self.special_char = CharManager.special_char
self.junk = CharManager.junk
# models
# labse model from sentence transformers
self.labse = SentenceTransformer("sentence-transformers/LaBSE").half().eval().cuda()
# spacy
self.en_spacy_nlp = spacy.load('en_core_web_sm')
# m2m models for translation
self.m2m_model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M").eval().half().cuda()
self.m2m_tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
# TODO: OntologyManager init has changed
if production_mode: # use the below for production usage. the above is for testing.
if not self.ontology_manager: self.ontology_manager = OntologyManager() # src_lang=src_lang
logging.info("Finished Loading TextAugment")
def check_good_sentence(self, s, src_lang, stopwords, stopword_ratio_cutoff=0.06, bannedwords=None, badwords=None,
badword_ratio_cutoff=0.15, junk_ratio=0.16, max_badword_len=5):
# basic dejunk
# for badwords, only filter out if the ratio is exceeded AND there exists one banned word
if bannedwords is None:
bannedwords = self.banned_words.get(src_lang, self.banned_words['default'])
default_bannedwords = self.banned_words['default']
s = s.lower().strip()
if not s: return False
# print ([s2 for s2 in s if s2 in self.junk])
# print (len([s2 for s2 in s if s2 in self.junk]), len(s))
jr = len([s2 for s2 in s if s2 in self.junk]) / len(s)
if jr >= junk_ratio:
return False
if src_lang in ("ja", "ko", "zh"):
sArr = s
else:
sArr = [s2.strip(self.special_char) for s2 in s.lower().split() if s2.strip(self.special_char)]
if len(sArr) == 0:
return False
# stopword check
if stopwords is not None:
# TODO: catch multi word with spaces
# print ('sw', len([s2 for s2 in sArr if s2 in stopwords])/len(sArr))
if src_lang not in ("ja", "ko", "zh") and len([s2 for s2 in sArr if s2 in stopwords]) / len(
sArr) < stopword_ratio_cutoff:
return False
if src_lang in ("ja", "ko", "zh"):
if src_lang == "zh":
max_stoword = self.max_stoword_len_zh
elif src_lang == "ko":
max_stoword = self.max_stoword_len_ko
elif src_lang == "ja":
max_stoword = self.max_stoword_len_ja
len_s = len(s)
stop_cnt = 0
total_cnt = 0
for i in range(len_s):
for j in range(i + 1, min(len_s, i + max_stoword)):
if s[i:j] in stopwords:
stop_cnt += 1
total_cnt += 1
# print ('stopword', (stop_cnt/total_cnt) )
if (stop_cnt / total_cnt) < stopword_ratio_cutoff:
return False
if badwords is not None:
# print ('bw', len([s2 for s2 in sArr if s2 in badwords])/len(sArr))
if src_lang not in ("ja", "ko", "zh") and len([s2 for s2 in sArr if s2 in badwords]) / len(
sArr) > badword_ratio_cutoff:
if any(s2 for s2 in sArr if s2 in bannedwords) or any(s2 for s2 in sArr if s2 in default_bannedwords):
return False
if src_lang in ("ja", "ko", "zh"):
badword_ratio_cutoff /= 100
len_s = len(s)
bad_cnt = 0
total_cnt = 0
for i in range(len_s):
for j in range(i + 1, min(len_s, i + max_badword_len)):
if s[i:j] in badwords:
bad_cnt += 1
total_cnt += 1
if (bad_cnt / total_cnt) > badword_ratio_cutoff:
for bword in bannedwords:
if bword in s:
return False
for bword in default_bannedwords:
if bword in s:
return False
# langid check
try:
lang = langid.classify(s)[0]
except:
return True
return lang == src_lang
def generate_questions(self, batch, default_answers=[]):
answers = {}
i = 0
allqa = []
for chunk in batch:
text = chunk['text']
answers1 = {}
# ti = time.time()
text = text.replace("U.S.", "US").replace("\n", " ").replace(",", " , ").replace(" ", " ").strip().replace(
" , ", ", ") # replace(" He ", " Lincoln ").replace(" he ", " Lincoln ").replace(" him ", " Lincoln ")
aHash = self.qg(text) # , default_answers=default_answers)
allqa.append(aHash)
default_answers = list(set([a['answer'] for a in aHash] + default_answers))
print(aHash)
# for aHash1 in aHash:
# extraction = vis.parse(list(dep_parser(aHash1['question']).sents)[0], aHash1['answer'])
# print (extraction.arg1, '*', extraction.rel, '*', extraction.arg2)
for aHash1 in aHash:
if answers.get(aHash1['answer'].lower()) or answers1.get(aHash1['answer'].lower()):
continue
if len(aHash1['answer'].split()) > 10:
aHash1['answer'] = " ".join(aHash1['answer'].split()[:10])
i += 1
quest = aHash1['question'].lower().strip("?").replace("'s", " 's").replace(" ", " ").split()
label = ""
# TODO, use spacy_en to get NER and only fall back to "who", "when", "where" to determine ner if we find nothing
if quest[0] == "who" and aHash1['answer'][-1] == 's':
label = "organization_" + str(i)
if "'s" in quest:
for j in range(len(quest)):
if j > 0 and quest[j - 1] == "'s":
label = quest[j] + "_" + str(i)
break
for a in aHash1['answer'].lower().split():
if a not in stopwords_hash:
answers[a] = label
elif quest[0] == "who":
label = "person_" + str(i)
if "'s" in quest:
for j in range(len(quest)):
if j > 0 and quest[j - 1] == "'s":
label = quest[j] + "_" + str(i)
break
for a in aHash1['answer'].lower().split():
if a not in stopwords_hash:
answers[a] = label
elif quest[0] == "where":
label = "location_" + str(i)
elif quest[0] == "when":
label = "date_or_time_" + str(i)
elif quest[0] == "why":
label = "reason_" + str(i)
elif quest[0] == "how" and quest[1] in ("much", "many"):
label = "quantity_" + str(i)
elif quest[0] == "how":
label = "method_" + str(i)
elif quest[0] in ("which", "what") and quest[1] not in stopwords_hash:
label = quest[1] + "_" + str(i)
elif "'s" in quest:
for j in range(len(quest)):
if j > 0 and quest[j - 1] == "'s":
label = quest[j] + "_" + str(i)
break
if label:
answers[aHash1['answer'].lower()] = label
# for b in a['answer'].lower().split():
# answers[b] = label
print(answers)
for aHash in allqa:
answers1 = {}
for aHash1 in aHash:
if answers1.get(aHash1['answer'].lower()):
continue
quest = " " + aHash1['question'].lower().strip("?").replace("'s", " 's").replace(" ", " ") + " "
q_type = quest[0]
agent = []
answer_keys = list(answers.keys())
answer_keys.sort(key=lambda k: len(k), reverse=True)
for a in answer_keys:
if " " + a + " " in quest:
quest = quest.replace(" " + a + " ", " " + answers[a] + " ")
elif " " + a + ", " in quest:
quest = quest.replace(" " + a + ", ", " " + answers[a] + ", ")
quest = quest.split()
# print (quest)
qtype = []
if answers.get(aHash1['answer'].lower()):
if answers.get(aHash1['answer'].lower()).split("_")[0] == "person":
qtype = ["is", "who"]
if not qtype and quest[0] in ("when", "where", "why", "how"): # , "which"
qtype = [quest[0]]
if quest[0] == "how" and quest[1] in ("much", "many"):
qtype = qtype + [quest[1]]
# label=[q for q in quest if (q not in ("much", "many",) and not stopwords_hash.get(q) and q not in answers)]#+qtype
label = [q for q in quest if (q[0] not in "0123456789") and (q not in ("the", "a", "an"))]
if len(label) > 10:
label = label[:10]
answers1[aHash1['answer'].lower()] = " ".join(label)
print(answers1)
@staticmethod
def get_aligned_text(sent1, sent2, src_lang):
"""
Given two sentences, find blocks of text that match and that don't match.
return the blocks, and a matching score.
Used to extract NER from original language sentence.
"""
if src_lang in ("ja", "ko", "zh"):
# splitting on spaces doesn't always work because some languages aren't space separated
sep = ""
else:
sep = " "
sent1 = sent1.split()
sent2 = sent2.split()
aMatch = difflib.SequenceMatcher(None, sent1, sent2)
score = aMatch.ratio()
blocks = aMatch.get_matching_blocks()
blocks2 = []
prevEndA = 0
prevEndB = 0
matchLen = 0
nonMatchLen = 0
# print (blocks)
for blockI in range(len(blocks)):
if blockI > 0 or (blockI == 0 and (blocks[blockI][0] != 0 or blocks[blockI][1] != 0)):
blocks2.append(
[sep.join(sent1[prevEndA:blocks[blockI][0]]), sep.join(sent2[prevEndB:blocks[blockI][1]]), 0])
nonMatchLen += max(blocks[blockI][0] - prevEndA, blocks[blockI][1] - prevEndB)
if blocks[blockI][2] != 0:
blocks2.append([sep.join(sent1[blocks[blockI][0]:blocks[blockI][0] + blocks[blockI][2]]),
sep.join(sent2[blocks[blockI][1]:blocks[blockI][1] + blocks[blockI][2]]), 1])
prevEndA = blocks[blockI][0] + blocks[blockI][2]
prevEndB = blocks[blockI][1] + blocks[blockI][2]
matchLen += blocks[blockI][2]
# score = float(matchLen+1)/float(nonMatchLen+1)
return (blocks2, score)
def do_translations(self, texts, src_lang='en', target_lang='hi', batch_size=16, do_mariam_mt=False):
if not do_mariam_mt:
try:
self.m2m_tokenizer.src_lang = src_lang
target_lang_bos_token = self.m2m_tokenizer.get_lang_id(target_lang)
translations = []
for src_text_list in tqdm(self.batch(texts, batch_size)):
try:
batch = self.m2m_tokenizer(src_text_list, return_tensors="pt", padding=True,
truncation=True).to('cuda')
except:
break
gen = self.m2m_model.generate(**batch, forced_bos_token_id=target_lang_bos_token,
no_repeat_ngram_size=4, ) #
outputs = self.m2m_tokenizer.batch_decode(gen, skip_special_tokens=True)
translations.extend(outputs)
return translations
except:
pass
translations = []
# mariam_mt = self.mariam_mt
model_name = mariam_mt.get((src_lang, target_lang))
mt_pipeline = None
if model_name is not None and model_name not in self.translation_pipelines:
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name).half().eval().cuda()
mt_pipeline = pipeline("translation", model=model, tokenizer=tokenizer, device=0)
self.translation_pipelines[model_name] = mt_pipeline
if not mt_pipeline:
raise RuntimeError(
"no translation pipeline") # we could do multi-step translation where there are no pairs
mt_pipeline = self.translation_pipelines[model_name]
for src_text_list in tqdm(self.batch(texts, batch_size)):
outputs = [t['translation_text'] for t in mt_pipeline(src_text_list)]
translations.extend(outputs)
return translations
@staticmethod
def cjk_detect(texts):
# korean
if re.search("[\uac00-\ud7a3]", texts):
return "ko"
# japanese
if re.search("[\u3040-\u30ff]", texts):
return "ja"
# chinese
if re.search("[\u4e00-\u9FFF]", texts):
return "zh"
return None
@staticmethod
def batch(lst, n):
"""Generate batches"""
lst = list(lst)
for i in range(0, len(lst), n):
yield lst[i: i + n]
@staticmethod
def convert_uniform_label(label: str) -> str:
if label in ('STREET_ADDRESS',):
label = 'STREET_ADDRESS'
elif label in ('PUBLIC_FIGURE',):
label = 'PUBLIC_FIGURE'
elif label in ('NAME', 'PER', 'PERSON'):
label = 'PERSON'
elif label in ('LOCATION', 'LOC', 'GPE'):
label = 'GPE'
elif label in ('ORGANIZATION', 'ORG'):
label = 'ORG'
elif label in ('AGE',):
label = 'AGE'
elif label in ('NORP',):
label = 'NORP'
elif label in ('BIO', 'SYMPTOM_AND_DISEASE', 'DISEASE'):
label = 'DISEASE'
elif label in ('PATIENT_ID', 'GOVT_ID'):
label = 'GOVT_ID'
elif label in ('DATE'):
label = 'DATE'
elif label in ('USER_ID',):
label = 'USER_ID'
elif label in ('MISC',) and '@' in ner_result['word']:
label = 'USER_ID'
elif label in ('TRANSPORTATION', 'TRANS', 'TRANSPORT'):
label = 'TRANSPORTATION'
else:
logging.warning(f"can not match label: {label}, set as MISC")
label = 'MISC'
return label
def hf_ner(self, hf_pipeline, src_lang, docs, chunks, stopwords=None, weight=1.5):
"""
run the text through a Huggingface ner pipeline.
any tags found by this method will be weighted by the weight param
TODO: use the predicted value of the logits to further weight prediction
"""
if stopwords is None:
stopwords = set(ac_dc_stopwords.get(src_lang, []))
offset_key = f'{src_lang}_offset'
text_key = f'{src_lang}_text'
ner_key = f'{src_lang}_ner'
results_arr = hf_pipeline([chunk[text_key] for chunk in chunks])
results_arr2 = []
offset = 0
for chunk, results in zip(chunks, results_arr):
text = chunk[text_key]
_id = chunk['id']
ner = docs[_id][ner_key] = docs[_id].get(ner_key, {})
offset = chunk[offset_key]
len_text = len(text)
results = [ner_result for ner_result in results if ner_result['word'] not in ("[UNK]", "<unk>")]
if not results:
results_arr2.append([])
continue
results2 = []
if results[0]['start'] is not None:
results.sort(key=lambda a: a['start'])
else:
results.sort(key=lambda a: a['index'])
i = 0
for ner_result in results:
ner_result['word'] = word = ner_result['word'].rstrip('@@')
ner_result['start'] = text.index(word, i)
i = ner_result['start'] + 1
ner_result['end'] = ner_result['start'] + len(word)
for ner_result in results:
start = ner_result['start']
if not self.cjk_detect(text[ner_result['start']:ner_result['end']]):
if text[start] not in self.strip_chars:
for j in range(1, start):
if start - j == -1 or text[start - j] in self.strip_chars:
start = max(start - j, 0)
break
end = ner_result['end']
if end < len_text and text[end] != ' ':
end += len(text[end:].split(' ', 1)[0])
else:
start = ner_result['start']
end = ner_result['end']
while text[start] in self.strip_chars:
start += 1
if start >= end: break
end = start + len(text[start:end].strip(self.strip_chars))
ner_result['word'] = text[start:end]
ner_result['start'] = start + offset
ner_result['end'] = end + offset
if results2 and results2[-1]['end'] > ner_result['start']:
continue
results2.append(ner_result)
results_arr2.append(results2)
results_arr = results_arr2
for chunk, results in zip(chunks, results_arr):
_id = chunk['id']
ner = docs[_id][ner_key]
text = docs[_id][text_key]
len_text = len(text)
results = [ner_result for ner_result in results if ner_result['word'] not in ("[UNK]", "<unk>")]
if not results: continue
prev_word = [0, 0]
prev_label = None
prev_word2 = ""
for ner_result in results:
start = ner_result['start']
if start is None:
prev_word2 = ""
continue
end = ner_result['end']
if text[start:end] != ner_result['word']:
print('offset mismatch', text[start:end], ner_result['word'])
if "-" in ner_result['entity']:
_, label = ner_result['entity'].split('-')
else:
label = ner_result['entity']
label = self.convert_uniform_label(label)
if prev_label is not None:
if not ner_result['entity'].startswith('B-') and label == prev_label and (
prev_word[1] >= start - 5):
prev_word[1] = max(prev_word[1], end)
prev_word2 = prev_word2 + " " + ner_result['word']
else:
if ner_result['entity'].startswith('B-'):
if prev_word[1] > start:
prev_word[1] = start
if prev_word[0] != prev_word[1]:
ner_word = text[prev_word[0]:prev_word[1]]
# if ner_word != prev_word2:
# print (ner_word, '**', prev_word2)
# ner_word.strip(self.strip_chars)
mention = (ner_word, prev_word[0], prev_word[1])
if ner_word and ner_word.lower() not in stopwords:
aHash = ner.get(mention, {})
aHash[prev_label] = aHash.get(prev_label, 0) + weight * (1.0 + len(ner_word) / 100)
ner[mention] = aHash
prev_word = [start, end]
prev_word2 = ner_result['word']
elif prev_label is None:
prev_word = [start, end]
prev_word2 = ner_result['word']
prev_label = label
if prev_label is not None and prev_word[0] != prev_word[1]:
ner_word = text[prev_word[0]:prev_word[1]]
# if ner_word != prev_word2:
# print (ner_word, '**', prev_word2)
mention = (ner_word, prev_word[0], prev_word[1])
if ner_word and ner_word.lower() not in stopwords:
aHash = ner.get(mention, {})
aHash[prev_label] = aHash.get(prev_label, 0) + weight * (1.0 + len(ner_word) / 100)
ner[mention] = aHash
def _spacy_pipeline(self, lang: str=None):
spacy_nlp = None
try:
spacy_nlp = spacy.load(f"{lang}_core_web_sm")
except:
logging.error(f"Failed to load spacy pipeline for {lang} at {lang}_core_web_sm")
finally:
return spacy_nlp
def spacy_ner(self, docs, nlp, stopwords, spacy_weight, src_lang, extra_weight=1.0):
"""
Use the spacy models to create mentions w/ NER
"""
if not nlp:
return
if stopwords is None:
stopwords = set(ac_dc_stopwords.get(src_lang, []))
offset_key = f'{src_lang}_offset'
text_key = f'{src_lang}_text'
ner_key = f'{src_lang}_ner'
for doc in docs.values():
ner = doc[ner_key] = doc.get(ner_key, {})
text = doc[text_key]
doc = nlp(text)
entities = list(doc.ents)
ents = [(entity.text, entity.label_ if (
entity.label_ in ('PERSON', 'GPE', 'ORG', 'NORP') and 'http:' not in entity.text) else 'MISC')
for entity in entities]
i = 0
for ner_word, label in ents:
ner_word = ner_word.strip(self.strip_chars)
if ner_word and ner_word.lower() not in stopwords:
if not self.cjk_detect(ner_word):
if ner_word not in text: continue
i += text[i:].index(ner_word)
ner_word = text[i:].split(" ", 1)[0]
ner_word = ner_word.strip(self.strip_chars)
if ner_word.lower() not in stopwords:
mention = (ner_word, i, i + len(ner_word))
aHash = ner.get(mention, {})
aHash[label] = aHash.get(label, 0) + spacy_weight * (1.0 + len(ner_word) / 100) * extra_weight
ner[mention] = aHash
def trim_to_prefer_person(self, docs, chunks, prob=100):
# downsample to mostly docs with mentions of people, govt_id and email
# if there were no ner set, then don't downsample the doc
len_docs = len(docs)
do_ids = []
for _id, doc in docs.items():
if not any(key for key in doc if key.endswith('_ner')):
do_ids.append(_id)
continue
found_ner = False
for key in list(doc.keys()):
if doc.get('has_person'):
do_ids.append(_id)
break
if key.endswith('_ner'):
if not found_ner:
found_ner = doc[key] != {}
ner = doc[key]
for aHash in ner.values():
if isinstance(aHash, int):
continue
if 'PUBLIC_FIGURE' in aHash or 'PERSON' in aHash or 'GOVT_ID' in aHash or 'USER_ID' in aHash:
doc['has_person'] = True
do_ids.append(_id)
break
if doc.get('has_person'):
do_ids.append(_id)
elif not doc.get('has_person') and random.randint(0, prob) == 0:
do_ids.append(_id)
do_ids = set(do_ids)
chunks2 = [chunk for chunk in chunks if chunk['id'] in do_ids]
docs2 = dict([(doc['id'], doc) for doc in docs.values() if doc['id'] in do_ids])
if len(docs2) == 0 or len_docs == len(docs2):
return docs, chunks
logging.info(f'trim_to_prefer_person {str((len_docs - len(docs2)) / len_docs)}')
return docs2, chunks2
def _split_text_into_chunks(self,
src_lang: str,
src_is_cjk: bool,
doc: Dict,
batch_window: int,
sep: str,
chunks: [Dict],
) -> None:
offset = 0
text = []
textarr = doc[f'{src_lang}_text'] if src_is_cjk else doc[f'{src_lang}_text'].split()
for t in textarr:
punc_found = [punc for punc in t if punc in self.punc_char]
if punc_found and t[-1] not in self.punc_char and t[0] not in "0123456789" and t[0] == t[0].lower():
w = t[t.index(punc_found[0]) + 1]
if w == w.upper():
t, t1 = t.split(punc_found[0], 1)
t = t + punc_found[0] + (" " if src_is_cjk else "")
text.append(t)
text.append(t1)
continue
text.append(t)
text[0] = text[0].lstrip()
text[-1] = text[-1].rstrip()
doc[f'{src_lang}_text'] = sep.join(text)
len_text = len(text)
while len_text > batch_window:
for j in range(batch_window - 1, len_text):
if (src_is_cjk and text[j] in self.punc_char) or (
not src_is_cjk and text[j][-1] in self.punc_char):
break
text_str = sep.join(text[:j + 1])
chunks.append({f'{src_lang}_text': text_str, 'id': doc['id'], f'{src_lang}_offset': offset})
doc['chunks'].append(chunks[-1])
offset += len(text_str) + (0 if src_is_cjk else 1)
text = text[j + 1:]
len_text = len(text)
if text:
text_str = sep.join(text)
chunks.append({f'{src_lang}_text': text_str, 'id': doc['id'], f'{src_lang}_offset': offset})
doc['chunks'].append(chunks[-1])
def process_ner_chunks_with_trans(self,
src_lang,
docs,
chunks,
target_lang=None,
do_spacy=True,
do_hf_ner=True,
do_ontology=True,
do_backtrans=False,
do_regex=True,
do_cleanup=True,
batch_size=5,
batch_window=70,
ontology_weight=0.9,
spacy_weight=1.25,
hf_ner_weight=1.0,
backtrans_weight=0.9,
do_postprocessing_after_backtrans=False,
do_docs_trim=False):
if target_lang is None:
target_lang = src_lang
do_backtrans = False
backtrans_weight = 1.0
if target_lang == src_lang and do_backtrans:
do_backtrans = False
logging.warning(f"Warning src_lang==target_lang={src_lang} but do_backtrans=={True}")
logging.info(f"Set do_backtrans=={False}")
stopwords1 = set(stopwords_ac_dc[src_lang])
stopwords2 = set(stopwords_ac_dc[target_lang])
ner_pipelines = []
# init spacy pipeline
spacy_nlp = self._spacy_pipeline(target_lang) if do_spacy else None
# init hf ner pipelines
if do_hf_ner:
for model_name, model_cls, hf_ner_weight2 in self.hf_ner_model_map.get(target_lang, []):
if model_name not in self.ner_model_name2pipelines:
logging.info(f"adding {model_name} into ner_model_name2pipelines")
# TODO: specific tf or pytroch in hf_ner_model_map to avoid error
ner_pipeline = None
try:
ner_pipeline = pipeline("ner", model=model_name,
tokenizer=(model_name, {"use_fast": True},), device=0)
except:
ner_pipeline = pipeline("ner", model=model_name,
tokenizer=(model_name, {"use_fast": True},), framework="tf",
device=0)
finally:
self.ner_model_name2pipelines[model_name] = ner_pipeline
ner_pipelines.append((self.ner_model_name2pipelines[model_name], hf_ner_weight2))
target_is_cjk = target_lang in ('zh', 'ko', 'ja')
src_is_cjk = src_lang in ('zh', 'ko', 'ja')
if do_backtrans:
# translate from src_lang to target_lang and do ner in target_lang. translation also acts as an error check and additional ner.
# we check to see if the already tagged items in src lang should have scores for tags increased or are common words in target lang and should not be tagged.
# we also add new labels for items that are already tagged in src_lang.
sep = "" if src_is_cjk else " "
if src_is_cjk:
lbracket = "[["
rbracket = "]]"
else:
lbracket = "["
rbracket = "]"
for chunk in chunks:
text = chunk[f'{src_lang}_text'].replace("(", "{").replace(")", "}")
_id = chunk['id']
offset = chunk[f'{src_lang}_offset']
doc = docs[_id]
offset_end = offset + len(text)
if f'{src_lang}_items' not in doc:
doc[f'{src_lang}_items'] = list(doc.get(f'{src_lang}_ner', {}).keys())
doc[f'{src_lang}_items'].sort(key=lambda a: a[1])
i = 0
for idx, key in enumerate(doc[f'{src_lang}_items']):
if key[1] < offset:
continue
if key[2] > offset_end:
break
if len(key[0]) < 4 and not self.cjk_detect(key[0]):
if " " + key[0] + " " in text[i:]:
j = text.index(" " + key[0] + " ", i)
text = text[:j] + (text[j:].replace(" " + key[0] + " ", f" **{idx}** ", 1))
i = j
else:
if key[0] in text[i:]:
j = text.index(key[0], i)
text = text[:j] + (text[j:].replace(key[0], f" **{idx}** ", 1))
i = j
chunk[f'{src_lang}_tmpl_text'] = text
src_items_sorted = list(enumerate(doc[f'{src_lang}_items']))
src_items_sorted.sort(key=lambda a: len(a[1][0]))
for chunk in chunks:
text = chunk[f'{src_lang}_tmpl_text']
_id = chunk['id']
doc = docs[_id]
for idx, key in src_items_sorted:
if len(key[0]) < 5 and not self.cjk_detect(key[0]):
text = text.replace(" " + key[0] + " ", f" **{idx}** ")
else:
text = text.replace(key[0], f" **{idx}** ")
chunk[f'{src_lang}_tmpl_text'] = text
for chunk in chunks:
text = chunk[f'{src_lang}_tmpl_text']
_id = chunk['id']
doc = docs[_id]
for idx, key in enumerate(doc[f'{src_lang}_items']):
text = text.replace(f" **{idx}** ", f" {idx} {lbracket} {key[0]} {rbracket}")
chunk[f'{src_lang}_tmpl_text'] = text.replace(" ", " ")
# print ('*****', chunks2)
chunks2 = [chunk[f'{src_lang}_tmpl_text'] for chunk in chunks]
text2 = self.do_translations(chunks2, src_lang=src_lang, target_lang=target_lang, batch_size=batch_size)
for chunk, trans_text in zip(chunks, text2):
# langid check
try:
lang = langid.classify(trans_text)[0]
except:
lang = target_lang
if lang == target_lang:
chunk[f'{target_lang}_text'] = trans_text.lstrip(" .").replace(rbracket, "]").replace(lbracket,
"[").replace(
"}", ")").replace("{", "(")
else:
chunk[f'{target_lang}_text'] = " . . . "
all_embed = self.labse.encode(chunks2, convert_to_tensor=True)
all_trans_embed = self.labse.encode([chunk[f'{target_lang}_text'] for chunk in chunks],
convert_to_tensor=True)
similarity = cosine_similarity(all_embed, all_trans_embed, dim=1)
for chunk, sim_score in zip(chunks, similarity):
trans_text = chunk[f'{target_lang}_text']
sim_score = sim_score.item()
print(sim_score, '**', trans_text, '**', chunk[f'{src_lang}_tmpl_text'])
_id = chunk['id']
doc = docs[_id]
if sim_score < 0.75:
trans_text = chunk[f'{target_lang}_text'] = " . . . "
if doc.get(f'{target_lang}_text', ""):
chunk[f'{target_lang}_offset'] = len(doc.get(f'{target_lang}_text', "")) + 1
else:
chunk[f'{target_lang}_offset'] = 0
doc[f'{target_lang}_text'] = (doc.get(f'{target_lang}_text', "") + " " + trans_text).strip()
chunk[f'{src_lang}_2_{target_lang}_sim'] = 0.0
continue
chunk[f'{src_lang}_2_{target_lang}_sim'] = sim_score
len_items = len(doc[f'{src_lang}_items'])
doc[f'{target_lang}_2_{src_lang}_ner'] = doc.get(f'{target_lang}_2_{src_lang}_ner', {})
while "[" in trans_text:
before, after = trans_text.split("[", 1)
before = before.strip()
after = after.strip()
before_arr = before.split()
if "]" not in after or not before_arr:
trans_text = before + sep + after
continue
idx = before_arr[-1]
ent, after = after.split("]", 1)
ent = ent.strip()
try:
idx = int(idx)
except:
idx = None
if idx is not None and idx < len_items:
before = " ".join(before_arr[:-1])
key = doc[f'{src_lang}_items'][idx]
ent_lower = ent.lower()
if ent_lower in stopwords2:
# reduce weight of target labels if this is translated into an en stopword
if key in doc[f'{src_lang}_ner']:
aHash = doc[f'{src_lang}_ner'][key]
for key in list(aHash.keys()):
aHash[key] /= 2.0
else:
vals = list(doc[f'{src_lang}_ner'][key].keys())
ent = ent.strip(self.strip_chars)
doc[f'{target_lang}_2_{src_lang}_ner'][ent] = idx
trans_text = before + " " + ent + " " + after
trans_text = chunk[f'{target_lang}_text'] = trans_text.replace(" ", "").strip()
if doc.get(f'{target_lang}_text', ""):
chunk[f'{target_lang}_offset'] = len(doc.get(f'{target_lang}_text', "")) + 1
else:
chunk[f'{target_lang}_offset'] = 0
doc[f'{target_lang}_text'] = (doc.get(f'{target_lang}_text', "") + " " + trans_text).strip()
if do_regex:
pass # TBD
if do_ontology:
# ontology - context independent - there are some bugs in disease detection which needs to be fixed
for doc in docs.values():
doc[f'{target_lang}_ner'] = ner = doc.get(f'{target_lang}_ner', {})
if target_lang == 'en':
chunk2ner = self.ontology_manager.tokenize(doc[f'{target_lang}_text'])['chunk2ner']
onto_items = []
for c, label in chunk2ner.items():
ner_word = c[0].replace(" ", "").replace("_", "").replace("_", "") if self.cjk_detect(c[0]) else \
c[0].replace("_", " ").replace("_", " ").rstrip(self.strip_chars)
if ner_word.lower() not in stopwords2:
if not self.cjk_detect(ner_word) and label in (
'PERSON', 'PUBLIC_FIGURE', 'ORG') and " " not in ner_word: continue
onto_items.append(((ner_word, c[1], c[1] + len(ner_word)), label))
for ner_mention, label in list(set(onto_items)):
aHash = ner.get(ner_mention, {})
aHash[label] = aHash.get(label, 0) + ontology_weight * (
1.0 + len(ner_mention[0]) / 100) * backtrans_weight
ner[ner_mention] = aHash
if do_spacy:
if spacy_nlp:
# spacy
self.spacy_ner(docs, spacy_nlp, stopwords2, spacy_weight, target_lang, extra_weight=backtrans_weight)
if do_hf_ner:
# transformer
for ner_pipeline, hf_ner_weight2 in ner_pipelines:
for a_batch in self.batch(chunks, batch_size):
self.hf_ner(ner_pipeline, target_lang, docs, a_batch, stopwords=stopwords2,
weight=hf_ner_weight * backtrans_weight * hf_ner_weight2)
if do_cleanup:
# do some cleanups. we don't want any ner that are just short numbers, stopwords or single characters.
for _id, doc in docs.items():
ner = doc[f'{target_lang}_ner']
for key in list(doc[f'{target_lang}_ner'].keys()):
ner_word = key[0]
try:
if len(ner_word) < 4 and float(ner_word):
print("deleting ", ner_word)
del doc[f'{target_lang}_ner'][key]
continue
except:
pass
if ner_word.lower() in stopwords2 or (not self.cjk_detect(ner_word) and len(ner_word) <= 1):
print("deleting ", ner_word)
del doc[f'{target_lang}_ner'][key]
# increase weight of src ner items if the target translations indicate it's an NER
if target_lang != src_lang:
for doc in docs.values():
ner = doc[f'{target_lang}_ner']
target2src_ner = doc.get(f'{target_lang}_2_{src_lang}_ner', {})
for ent, idx in target2src_ner.items():
key = doc[f'{src_lang}_items'][idx]
# NOTE that this is an unordered match
ner_match = [key2 for key2 in ner if ent == key2[0]]
if not ner_match and len(ent) > 3:
ner_match = [key2 for key2 in ner if (ent in key2[0] or (len(key2[0]) > 3 and key2[0] in ent))]
if ner_match:
if key in doc[f'{src_lang}_ner']:
aHash = doc[f'{src_lang}_ner'][key]
all_labels = []
for key2 in ner_match:
all_labels.extend(list(ner[key2].keys()))
all_labels = set(all_labels)
found = False
for label in list(aHash.keys()):
if label in all_labels or 'MISC' in all_labels:
aHash[label] *= 1.1
print('increasing ', key, label, aHash[label])
found = True
if not found:
print('not found', key, all_labels)
if do_docs_trim:
docs, chunks = self.trim_to_prefer_person(docs, chunks)
if do_backtrans and target_lang != src_lang:
# backtrans from src_lang to target_lang back to src_lang allows us to catch more NER using target lang NER tools.
# then we tag in target_lang those items we haven't already found, and tranlsate back to match the original text.
# NOTE: We do not modify the original text, but only use backtrans to do NER tagging and other analysis.
sep = "" if target_is_cjk else " "
if target_is_cjk:
lbracket = "[["
rbracket = "]]"
else:
lbracket = "["
rbracket = "]"
for chunk in chunks:
_id = chunk['id']
text = chunk[f'{target_lang}_text'].replace("[", "{").replace("(", "{").replace(")", "}").replace("]",
"}")
offset = chunk[f'{target_lang}_offset']
doc = docs[_id]
offset_end = offset + len(text)
if f'{target_lang}_items' not in doc:
doc[f'{target_lang}_items'] = list(doc.get(f'{target_lang}_ner', {}).keys())
doc[f'{target_lang}_items'].sort(key=lambda a: a[1])
i = 0
for idx, key in enumerate(doc[f'{target_lang}_items']):
if key[1] < offset:
continue
if key[2] > offset_end:
break
if len(key[0]) < 5 and not self.cjk_detect(key[0]):
if " " + key[0] + " " in text[i:]:
j = text.index(" " + key[0] + " ", i)
text = text[:j] + (text[j:].replace(" " + key[0] + " ", f" **{idx}** ", 1))
i = j
else:
if key[0] in text[i:]:
j = text.index(key[0], i)
text = text[:j] + (text[j:].replace(key[0], f" **{idx}** ", 1))
i = j
chunk[f'{target_lang}_tmpl_text'] = text
target_items_sorted = list(enumerate(doc[f'{target_lang}_items']))
target_items_sorted.sort(key=lambda a: len(a[1][0]))
for chunk in chunks:
text = chunk[f'{target_lang}_tmpl_text']
_id = chunk['id']
doc = docs[_id]
for idx, key in target_items_sorted:
if len(key[0]) < 5 and not self.cjk_detect(key[0]):
text = text.replace(" " + key[0] + " ", f" **{idx}** ")
else:
text = text.replace(key[0], f" **{idx}** ")
chunk[f'{target_lang}_tmpl_text'] = text
for chunk in chunks:
text = chunk[f'{target_lang}_text']
_id = chunk['id']
doc = docs[_id]
for idx, key in enumerate(doc[f'{target_lang}_items']):
text = text.replace(f" **{idx}** ", f" {idx} {lbracket} {key[0]} {rbracket}")
chunk[f'{target_lang}_tmpl_text'] = text.replace(" ", " ")
backtrans_text = self.do_translations([chunk[f'{target_lang}_tmpl_text'] for chunk in chunks],
src_lang=target_lang, target_lang=src_lang, batch_size=batch_size)
for chunk, trans_text in zip(chunks, backtrans_text):
# langid check
try:
lang = langid.classify(trans_text)[0]
except:
lang = target_lang
if lang == target_lang:
chunk[f'{src_lang}_text_backtrans_from_{target_lang}'] = trans_text.lstrip(" .").replace(rbracket,
"]").replace(
lbracket, "[").replace("}", ")").replace("{", "(")
else:
chunk[f'{src_lang}_text_backtrans_from_{target_lang}'] = " . . . "
# TODO: do similiarty test?
for chunk, trans_text in zip(chunks, backtrans_text):
_id = chunk['id']
doc = docs[_id]
orig_text = chunk[f'{src_lang}_text']
trans_text = chunk[f'{src_lang}_text_backtrans_from_{target_lang}']
items = doc[f'{target_lang}_items']
len_items = len(items)
doc[f'{src_lang}_2_{target_lang}_backtrans_ner'] = ner = doc.get(
f'{src_lang}_2_{target_lang}_backtrans_ner', {})
pos = 0
blocks, score = self.get_aligned_text(orig_text, trans_text, src_lang)
prev_t = None
prev_o = None
ner_word = ""
ent2 = ""
idx = None
for o, t, _ in blocks:
before = after = ""
if "]" in t:
ner_word = ""
ent2 = ""
t_arr = t.split("]")
before = sep.join(t_arr[-1:])
after = t_arr[-1]
before = before.strip()
if not before:
continue
idx = before.split()[-1]
try:
idx = int(idx)
except:
idx = None
if prev_t and prev_t.strip():
idx = prev_t.strip().split()[-1]
try:
idx = int(idx)
except:
idx = None
pass
if idx is not None and idx < len_items:
ner_word += o
if after:
ent2 = after.split("[", 1)[0]
else:
ent2 += t.split("[", 1)[0]
if "[" in t:
key = items[idx]
if key in ner:
ner_word = ner_word.strip(self.strip_chars)
ent2 = ent2.strip(self.strip_chars)
if ent2 in ner_word:
ner_word = ent2
else:
if src_is_cjk:
ent2arr = list(ent2)
ner_wordarr = list(ner_word)
else:
ent2arr = ent2.split()
ner_wordarr = ner_word.split()
len_ent2arr = len(ent2arr)
found = False
if len_ent2arr > 3:
ent3 = sep.join(ent2arr[:3])
if ent3 in new_word:
new_word = ner_word[ner_word.index(ent3):]
found = True
if not found:
if len_ent2arr < len(new_wordarr):
new_word = sep.join(new_wordarr[-len_ent2arr:])
if ner_word and ner_word.lower() not in stopwords1:
i = orig_text[pos:].index(ner_word)
start = pos + i
len_nerword = len(ner_word)
pos = start + len_nerword
mention = (ner_word, offset + start, offset + start + len_nerword)
aHash = ner.get(mention, {})
for label in ner[key]:
print(f'found new mention from {target_lang}', mention, label)
aHash[label] = aHash.get(label, 0) + ner[key][label]
ner[mention] = aHash
idx = None
ner_word = ""
ent2 = ""
prev_o, prev_t = o, t
# increase the src_lang ner score if we already matched this ner in src_lang or there was a partial match
for doc in docs.values():
bner = doc[f'{src_lang}_2_{target_lang}_backtrans_ner']
ner = doc[f'{src_lang}_ner']
for key, aHash in bner.items():
if key in ner: continue
ent = key[0]
ner_match = [key2 for key2 in ner if ent == key2[0]]
if not ner_match and len(ent) > 3:
ner_match = [key2 for key2 in ner if (ent in key2[0] or (len(key2[0]) > 3 and key2[0] in ent))]
all_keys = []
for key2 in ner_match:
all_keys.extend(list(ner[key2].keys()))
all_keys = set(all_keys)
for label in list(aHash.keys()):
if label in all_keys or 'MISC' in all_keys:
aHash[label] *= 1.1
print('increasing in backtrans ', key, label, aHash[label])
for key, aHash1 in bner.items():
ner[key] = aHash2 = ner.get(key, {})
for key2 in aHash1:
aHash2[key2] = aHash2.get(key2, 0.0) + aHash1[key2]
if do_postprocessing_after_backtrans:
pass
if do_cleanup:
# do some cleanups. we don't want any ner that are just short numbers, stopwords or single characters.
for _id, doc in docs.items():
ner = doc[f'{src_lang}_ner']
for key in list(doc[f'{src_lang}_ner'].keys()):
ner_word = key[0]
try:
if len(ner_word) < 4 and float(ner_word):
print("deleting ", ner_word)
del doc[f'{src_lang}_ner'][key]
continue
except:
pass
if ner_word.lower() in stopwords1 or (not self.cjk_detect(ner_word) and len(ner_word) <= 1):
print("deleting ", ner_word)
del doc[f'{src_lang}_ner'][key]
return docs, chunks
def process_ner(self,
src_lang: str = None,
docs=None,
do_spacy=True,
do_hf_ner=True,
do_ontology=True,
do_backtrans=False,
do_cleanup=True,
do_regex=True,
batch_size=5,
batch_window=70,
ontology_weight=0.9,
spacy_weight=1.25,
hf_ner_weight=1.5,
backtrans_weight=0.9,
do_docs_trim=True,
do_postprocessing_after_backtrans=False,
trim_bad_sentence=False,
cutoff=None,
target_lang='en'):
src_is_cjk = src_lang in ('zh', 'ko', 'ja')
sep = "" if src_is_cjk else " "
domain = 'custom'
if docs is None:
docs, domain = get_docs(src_lang=src_lang)
elif isinstance(docs, str):
docs = [{f'{src_lang}_text': docs}]
elif isinstance(docs, list):
if isinstance(docs[0], dict):
docs = docs
else:
docs = [{f'{src_lang}_text': t} for t in docs]
# for testing only
if cutoff is not None and len(docs) > cutoff:
docs = docs[:cutoff]
len_docs = len(docs)
for doc in docs:
if 'text' in doc:
doc[f'{src_lang}_text'] = doc['text']
del doc['text']
badwords1 = set([s for s in badwords_ac_dc.get(src_lang, []) if len(s) < 5])
stopwords1 = set(stopwords_ac_dc.get(src_lang, []))
if trim_bad_sentence:
docs = [doc for doc in docs if
self.check_good_sentence(doc[f'{src_lang}_text'], src_lang, stopwords=stopwords1, badwords=badwords1)]
logging.info(f'trimmed junk {str((len_docs - len(docs)) / len_docs)}')
chunks = []
for _id, doc in enumerate(docs):
if 'id' not in doc:
doc['id'] = str(_id)
doc.setdefault('id', str(_id))
doc[f'{src_lang}_text'] = doc[f'{src_lang}_text'].replace("[", "(").replace("]",")") # we use [] as special chars
doc['lang'] = src_lang
doc['domain'] = domain
doc['chunks'] = []
self._split_text_into_chunks(src_lang=src_lang, src_is_cjk=src_is_cjk, doc=doc, batch_window=batch_window, sep=sep, chunks=chunks)
docs = dict([(doc['id'], doc) for doc in docs])
if do_docs_trim:
docs2, chunks2 = self.trim_to_prefer_person(docs, chunks)
do_docs_trim = len(docs2) == len(docs)
docs, chunks = docs2, chunks2
# we do this here because we don't want to trim ner items that are considered empty.
# we should probably fix trim_to_prefer_person to not do any trimming if all ner's are empty
for doc in docs.values():
doc[f'{src_lang}_ner'] = doc.get(f'{src_lang}_ner', {})
# do ner processing in src_lang
docs2, chunks2 = self.process_ner_chunks_with_trans(
src_lang,
docs,
chunks,
do_spacy=do_spacy,
do_hf_ner=do_hf_ner,
do_ontology=do_ontology,
do_backtrans=False,
do_regex=do_regex,
do_cleanup=do_cleanup,
batch_size=batch_size,
ontology_weight=ontology_weight,
spacy_weight=spacy_weight,
hf_ner_weight=hf_ner_weight,
backtrans_weight=backtrans_weight,
do_postprocessing_after_backtrans=False,
do_docs_trim=do_docs_trim)
if do_docs_trim:
do_docs_trim = len(docs2) == len(docs)
docs, chunks = docs2, chunks2
if do_backtrans:
logging.info(f"Doing backtrans with src_lang={src_lang} to target_lang={target_lang}")
# do ner processing in target language with optional backtrans
docs2, chunks2 = self.process_ner_chunks_with_trans(
src_lang,
docs,
chunks,
target_lang=target_lang,
do_spacy=do_spacy,
do_hf_ner=do_hf_ner,
do_ontology=do_ontology,
do_backtrans=do_backtrans,
do_regex=do_regex,
do_cleanup=do_cleanup,
batch_size=batch_size,
ontology_weight=ontology_weight,
spacy_weight=spacy_weight,
hf_ner_weight=hf_ner_weight,
backtrans_weight=backtrans_weight,
do_postprocessing_after_backtrans=True,
do_docs_trim=do_docs_trim)
docs, chunks = docs2, chunks2
logging.info('Writing docs into out.jsonl')
with open('out.jsonl', 'w', encoding='utf-8') as file:
for k, doc in docs.items():
file.write(f'{doc}\n')
logging.info('Writing chunks into chunk.jsonl')
with open('chunk.jsonl', 'w', encoding='utf-8') as file:
for chunk in chunks:
file.write(f'{chunk}\n')
return docs, chunks
| 47.108275
| 168
| 0.476778
|
acfc54e12a11b736e70889f76cd3c1b69ba59f9a
| 2,787
|
py
|
Python
|
containers/fibonacci.py
|
brandonsrho57/oop_containers
|
c0529ef1e8db51cb179120acc2fb329db8e70532
|
[
"MIT"
] | null | null | null |
containers/fibonacci.py
|
brandonsrho57/oop_containers
|
c0529ef1e8db51cb179120acc2fb329db8e70532
|
[
"MIT"
] | null | null | null |
containers/fibonacci.py
|
brandonsrho57/oop_containers
|
c0529ef1e8db51cb179120acc2fb329db8e70532
|
[
"MIT"
] | null | null | null |
################################################
# example fibonacci number code;
# you do not have to modify this code in any way
################################################
def fibs(n):
'''
This function computes the first n fibonacci numbers.
Notice that this function uses O(n) memory.
'''
fibs = []
fibs.append(1)
if n == 1:
return fibs
fibs.append(1)
while len(fibs) < n:
fibs.append(fibs[-1] + fibs[-2])
return fibs
def fib_bad(n):
'''
This function computes the n-th fibonacci number,
but it uses O(n) memory to do so,
which is bad.
'''
return fibs(n)[-1]
def fib(n):
'''
This function computes the n-th fibonacci number,
but it consumes only O(1) memory,
and is optimal.
'''
if n < 2:
return 1
f0 = 1
f1 = 1
for i in range(n - 1):
f2 = f1 + f0
f0 = f1
f1 = f2
return f2
#################################################
# fibonacci number code using generators;
# you will need to implement the functions below
#################################################
class Fib:
'''
This class represents all the fibonacci numbers,
but uses O(1) memory to do so.
>>> list(Fib(5))
[1, 1, 2, 3, 5]
'''
def __init__(self, n=None):
# n is the number of iterations
self.n = n
def __iter__(self):
return FibIter(self.n)
def __repr__(self):
if self.n is not None:
return 'Fib(' + str(self.n) + ')'
elif self.n is None:
return 'Fib()'
class FibIter:
'''
This is the iterator helper class for the Fib class.
'''
def __init__(self, n):
self.n = n
self.i = 0
self.f0 = 1
self.f1 = 1
self.f2 = None
def __next__(self):
if self.n is not None and self.n <= self.i:
raise StopIteration
elif self. i < 2:
self.i += 1
return 1
else:
self.i += 1
self.f2 = self.f0 + self.f1
self.f0 = self.f1
self.f1 = self.f2
return self.f2
def fib_yield(n=None):
'''
This function returns a generator that computes
the first n fibonacci numbers.
If n is None, then the generator is infinite.
'''
f0 = 0
f1 = 1
if n is None:
i = 0
while True:
f2 = f1 + f0
f0 = f1
if i >= 2:
f1 = f2
if i < 2:
f2 = 1
i += 1
yield f2
else:
for i in range(n):
f2 = f1 + f0
f0 = f1
if i >= 2:
f1 = f2
if i < 2:
f2 = 1
yield f2
| 21.438462
| 57
| 0.451381
|
acfc5569f342ac5918de1724e08a593dd6661283
| 2,408
|
py
|
Python
|
homeassistant/components/coronavirus/sensor.py
|
CantankerousBullMoose/core
|
2178e27fb4c62271d4872e16838331defed82226
|
[
"Apache-2.0"
] | 1
|
2021-03-12T20:46:40.000Z
|
2021-03-12T20:46:40.000Z
|
homeassistant/components/coronavirus/sensor.py
|
CantankerousBullMoose/core
|
2178e27fb4c62271d4872e16838331defed82226
|
[
"Apache-2.0"
] | 51
|
2020-08-03T07:30:44.000Z
|
2022-03-22T06:02:42.000Z
|
homeassistant/components/coronavirus/sensor.py
|
CantankerousBullMoose/core
|
2178e27fb4c62271d4872e16838331defed82226
|
[
"Apache-2.0"
] | 2
|
2021-03-22T21:42:48.000Z
|
2021-04-12T12:26:39.000Z
|
"""Sensor platform for the Corona virus."""
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import get_coordinator
from .const import ATTRIBUTION, OPTION_WORLDWIDE
SENSORS = {
"confirmed": "mdi:emoticon-neutral-outline",
"current": "mdi:emoticon-sad-outline",
"recovered": "mdi:emoticon-happy-outline",
"deaths": "mdi:emoticon-cry-outline",
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Defer sensor setup to the shared sensor module."""
coordinator = await get_coordinator(hass)
async_add_entities(
CoronavirusSensor(coordinator, config_entry.data["country"], info_type)
for info_type in SENSORS
)
class CoronavirusSensor(CoordinatorEntity):
"""Sensor representing corona virus data."""
name = None
unique_id = None
def __init__(self, coordinator, country, info_type):
"""Initialize coronavirus sensor."""
super().__init__(coordinator)
if country == OPTION_WORLDWIDE:
self.name = f"Worldwide Coronavirus {info_type}"
else:
self.name = f"{coordinator.data[country].country} Coronavirus {info_type}"
self.unique_id = f"{country}-{info_type}"
self.country = country
self.info_type = info_type
@property
def available(self):
"""Return if sensor is available."""
return self.coordinator.last_update_success and (
self.country in self.coordinator.data or self.country == OPTION_WORLDWIDE
)
@property
def state(self):
"""State of the sensor."""
if self.country == OPTION_WORLDWIDE:
sum_cases = 0
for case in self.coordinator.data.values():
value = getattr(case, self.info_type)
if value is None:
continue
sum_cases += value
return sum_cases
return getattr(self.coordinator.data[self.country], self.info_type)
@property
def icon(self):
"""Return the icon."""
return SENSORS[self.info_type]
@property
def unit_of_measurement(self):
"""Return unit of measurement."""
return "people"
@property
def extra_state_attributes(self):
"""Return device attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
| 30.481013
| 86
| 0.649502
|
acfc557cd7599a1177b9c80fe197541f859ffee7
| 2,613
|
py
|
Python
|
preDeal/lstm_v2_bak.py
|
xjtushilei/pCVR
|
29bb49be537baf90f5ab4678630dae3b7fffb94a
|
[
"Apache-2.0"
] | 32
|
2017-06-06T08:08:23.000Z
|
2021-04-11T06:04:31.000Z
|
preDeal/lstm_v2_bak.py
|
wangmengsd/pCVR
|
0070620d4387f545e0fecd9497ce183180f9f63f
|
[
"Apache-2.0"
] | 1
|
2018-04-11T01:34:03.000Z
|
2018-04-11T01:58:13.000Z
|
preDeal/lstm_v2_bak.py
|
wangmengsd/pCVR
|
0070620d4387f545e0fecd9497ce183180f9f63f
|
[
"Apache-2.0"
] | 12
|
2017-07-12T01:28:35.000Z
|
2019-06-23T14:29:01.000Z
|
import pickle
import numpy
import tensorflow
from keras import layers, optimizers
from keras.models import Sequential
from utils import *
from keras import backend as K
K.clear_session()
# 加载数据
print("开始反序列化加载数据...")
with open('get_data_v2.data', 'rb') as train_data_file:
data = pickle.loads(train_data_file.read())
print("加载结束...")
(x, y, test_id, test) = data
# print(test_id[2])
# 前90%是训练数据,后10%是测试数据,通过反向传播来进行预测!
split_at = len(x) - len(x) // 10
(x_train, x_val) = x[:split_at], x[split_at:]
(y_train, y_val) = y[:split_at], y[split_at:]
sample_weight = []
for line in y_train:
if line[0] == 1:
sample_weight.append(1)
else:
sample_weight.append(100)
sample_weight = numpy.array(sample_weight)
print('sample_weight_shape:', sample_weight.shape)
# print(x_val)
print('x-shape:', x_val.shape)
print('y-shape:', y_val.shape)
print('test_id-shape:', test_id.shape)
print('test-shape:', test.shape)
print("设置显卡信息...")
# 设置tendorflow对显存使用按需增长
config = tensorflow.ConfigProto()
config.gpu_options.allow_growth = True
session = tensorflow.Session(config=config)
print("开始构建模型...")
# 构建模型
model_name = 'lstm'
HIDDEN_SIZE = 256
BATCH_SIZE = 1500
# LSTM = layers.LSTM
model = Sequential()
print('首层输入维度是:', x_val.shape[2])
# shape of (len_of_sequences, nb_of_features)
model.add(layers.normalization.BatchNormalization(input_shape=(1, x_val.shape[2])))
model.add(layers.LSTM(HIDDEN_SIZE, input_shape=(1, x_val.shape[2]), return_sequences=True))
model.add(layers.normalization.BatchNormalization())
model.add(layers.LSTM(HIDDEN_SIZE // 2))
model.add(layers.Dense(HIDDEN_SIZE))
model.add(layers.normalization.BatchNormalization())
model.add(layers.Dense(HIDDEN_SIZE))
model.add(layers.Dense(2))
model.add(layers.Activation('sigmoid'))
sgd = optimizers.SGD(lr=1, clipvalue=0.5)
Nadam=optimizers.Nadam(lr=0.00002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.00004)
model.compile(loss='binary_crossentropy',
optimizer=sgd,
metrics=['binary_accuracy'])
model.summary()
print("开始训练模型...")
model.fit(x_train, y_train,
batch_size=BATCH_SIZE,
# sample_weight=sample_weight,
epochs=100,
validation_data=(x_val, y_val))
model.save('lstm_v2.h5')
print("开始预测模型...")
predict = model.predict(test[0:], batch_size=5000, verbose=1)
print(predict[:, 1:50])
print("开始将预测结果写入csv...")
with open('lstm_v2_bak_submission.csv', 'w') as file:
file.write('instanceID,prob\n')
index = 0
for one in predict[:, 1:]:
file.write(str(test_id[index]) + ',' + str(one[0]) + '\n')
index += 1
print("结束...")
| 29.033333
| 99
| 0.70532
|
acfc55ce056743c3806c6db7bf12e51c4b754f85
| 769
|
py
|
Python
|
experiment_sbb_with_two_categories.py
|
erelsgl/auctions
|
d1a55eaacb337ecebeaf5f57a67bcd6a9d4411eb
|
[
"MIT"
] | 1
|
2021-11-20T19:27:45.000Z
|
2021-11-20T19:27:45.000Z
|
experiment_sbb_with_two_categories.py
|
dvirg/auctions
|
da706f3d11b9582c7f811de9f50b96b43ac8cbd0
|
[
"MIT"
] | null | null | null |
experiment_sbb_with_two_categories.py
|
dvirg/auctions
|
da706f3d11b9582c7f811de9f50b96b43ac8cbd0
|
[
"MIT"
] | 1
|
2020-09-09T07:20:04.000Z
|
2020-09-09T07:20:04.000Z
|
#!python3
"""
Simulation experiment for our AAAI 2020 paper, with recipes of size 2.
Comparing McAfee's double auction to our SBB auctions.
Since: 2019-11
Author: Erel Segal-Halevi
"""
from experiment import experiment
from ascending_auction_protocol import budget_balanced_ascending_auction
results_file = "results/experiment_sbb_with_two_categories.csv"
iterations = 50000
for num_of_sellers_per_deal in (2,4,8,16):
experiment(results_file,budget_balanced_ascending_auction, "SBB Ascending Prices", recipe=(1,num_of_sellers_per_deal),
value_ranges = [(1,1000*num_of_sellers_per_deal),(-1000,-1)],
nums_of_agents = (2, 3, 4, 5, 10, 15, 25, 50, 100, 500, 1000),
num_of_iterations = iterations
)
| 32.041667
| 122
| 0.721717
|
acfc576ead5a62b55c2fa9c484cbe50ac6f8f2fe
| 1,211
|
py
|
Python
|
app/core/models.py
|
nuggysantoso/recipe-app-api
|
20a68f4ec3ff2490d7bbe4548eef006a14ada927
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
nuggysantoso/recipe-app-api
|
20a68f4ec3ff2490d7bbe4548eef006a14ada927
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
nuggysantoso/recipe-app-api
|
20a68f4ec3ff2490d7bbe4548eef006a14ada927
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
# Create your models here.
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Create and Saves a New User"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self.db)
return user
def create_superuser(self, email, password):
"""Creates and save a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custome user model that support using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 33.638889
| 90
| 0.683732
|
acfc58089876cd47e3d01e4e49a2da9be592e90b
| 3,860
|
py
|
Python
|
tests/analyses/milhdbk217f/milhdbk217fn2_integration_test.py
|
weibullguy/ramstk
|
3ec41d7e2933045a7a8028aed6c6b04365495095
|
[
"BSD-3-Clause"
] | 4
|
2018-08-26T09:11:36.000Z
|
2019-05-24T12:01:02.000Z
|
tests/analyses/milhdbk217f/milhdbk217fn2_integration_test.py
|
weibullguy/ramstk
|
3ec41d7e2933045a7a8028aed6c6b04365495095
|
[
"BSD-3-Clause"
] | 52
|
2018-08-24T12:51:22.000Z
|
2020-12-28T04:59:42.000Z
|
tests/analyses/milhdbk217f/milhdbk217fn2_integration_test.py
|
weibullguy/ramstk
|
3ec41d7e2933045a7a8028aed6c6b04365495095
|
[
"BSD-3-Clause"
] | 1
|
2018-10-11T07:57:55.000Z
|
2018-10-11T07:57:55.000Z
|
# pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.analyses.milhdbk217f.milhdbk217f_integration_test.py is part of The
# RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Test class for the milhdbk217f class."""
# Third Party Imports
import pytest
from pubsub import pub
# RAMSTK Package Imports
from ramstk.analyses.milhdbk217f import milhdbk217f
@pytest.mark.integration
@pytest.mark.usefixtures("test_attributes")
@pytest.mark.parametrize("hazard_rate_method_id", [1, 2, 3])
def test_do_calculate_active_hazard_rate(hazard_rate_method_id, test_attributes):
"""do_calculate_active_hazard_rate() should return the component attribute dict
with updated values on success."""
test_attributes["hazard_rate_method_id"] = hazard_rate_method_id
def on_message(attributes):
assert isinstance(attributes, dict)
assert (
attributes["hazard_rate_active"]
== {1: 0.00036, 2: 0.07457229625679276, 3: 0.0}[
attributes["hazard_rate_method_id"]
]
)
print(
f"\033[36m\n\tsucceed_predict_reliability topic was broadcast for "
f"hazard rate method {attributes['hazard_rate_method_id']}."
)
pub.subscribe(on_message, "succeed_predict_reliability")
milhdbk217f.do_predict_active_hazard_rate(**test_attributes)
@pytest.mark.integration
@pytest.mark.usefixtures("test_attributes")
def test_do_calculate_active_hazard_rate_negative_input(test_attributes):
"""do_calculate_active_hazard_rate() should raise a ZeroDivisionError when passed a
negative input for various components."""
test_attributes["category_id"] = 2
test_attributes["subcategory_id"] = 2
test_attributes["n_elements"] = None
test_attributes["power_operating"] = None
test_attributes["type_id"] = 4
test_attributes["hazard_rate_method_id"] = 2
def on_message(error_message):
assert error_message == (
"Failed to predict MIL-HDBK-217F hazard rate for "
"hardware ID 12; one or more inputs has a "
"negative or missing value. Hardware item "
"category ID=2, subcategory ID=2, rated "
"power=0.75, number of elements=None."
)
print(
"\033[35m\n\tfail_predict_reliability topic was broadcast on negative "
"input."
)
pub.subscribe(on_message, "fail_predict_reliability")
milhdbk217f.do_predict_active_hazard_rate(**test_attributes)
@pytest.mark.integration
@pytest.mark.usefixtures("test_attributes")
def test_do_calculate_active_hazard_rate_zero_input(test_attributes):
"""do_calculate_active_hazard_rate() should raise a ZeroDivisionError when passed
an input equal to 0.0 for various components."""
test_attributes["category_id"] = 4
test_attributes["subcategory_id"] = 12
test_attributes["voltage_ac_operating"] = 0.0
test_attributes["voltage_dc_operating"] = 0.0
test_attributes["hazard_rate_method_id"] = 2
def on_message(error_message):
assert error_message == (
"Failed to predict MIL-HDBK-217F hazard rate for hardware ID 12; one or "
"more inputs has a value of 0.0. Hardware item category ID=4, subcategory "
"ID=12, operating ac voltage=0.0, operating DC voltage=0.0, operating "
"temperature=45.0, temperature rise=10.0, rated maximum temperature=105.0, "
"feature size=1.5, surface area=1.5, and item weight=0.5."
)
print(
"\033[35m\n\tfail_predict_reliability topic was broadcast on zero "
"division."
)
pub.subscribe(on_message, "fail_predict_reliability")
milhdbk217f.do_predict_active_hazard_rate(**test_attributes)
| 37.475728
| 88
| 0.698187
|
acfc585db30d32a019bb96b8f71ad5a223b1e6bf
| 1,310
|
py
|
Python
|
losses/proto_loss.py
|
AKI-maggie/adapted_deep_embeddings
|
7f58900cf31398f7bb23295c1d4854f1ae941bcd
|
[
"MIT"
] | 24
|
2018-10-19T07:48:16.000Z
|
2021-06-29T08:44:16.000Z
|
losses/proto_loss.py
|
AKI-maggie/adapted_deep_embeddings
|
7f58900cf31398f7bb23295c1d4854f1ae941bcd
|
[
"MIT"
] | 8
|
2019-01-12T02:55:13.000Z
|
2022-03-11T23:32:01.000Z
|
losses/proto_loss.py
|
garyCC227/thesis
|
a93c5061c09fa1a42d54053cd82e71cef447e4b8
|
[
"MIT"
] | 10
|
2018-12-04T11:39:01.000Z
|
2020-11-10T06:57:35.000Z
|
'''
Adapted from Jake Snell's implementation (https://github.com/jakesnell/prototypical-networks)
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def prototypical_networks_loss(prototypes, query, num_query_per_class, target_indices):
m = tf.shape(prototypes)[0]
prototypes = tf.expand_dims(prototypes, 0)
query = tf.expand_dims(query, 1)
dist = tf.reduce_sum(tf.pow(query - prototypes, 2), 2)
log_prob = tf.nn.log_softmax(-dist)
log_prob = tf.reshape(log_prob, shape=[m, num_query_per_class, -1])
idx1 = tf.reshape(tf.range(0, m), shape=[m, 1, 1])
idx1 = tf.reshape(tf.tile(idx1, multiples=(1, num_query_per_class, 1)), shape=[-1])
idx1 = tf.expand_dims(idx1, 1)
idx2 = tf.expand_dims(tf.tile(tf.range(0, num_query_per_class), multiples=(m,)), 1)
indices = tf.concat([idx1, idx2, idx1], axis=1)
loss = tf.squeeze(tf.gather_nd(-log_prob, indices))
loss = tf.reduce_mean(tf.reshape(loss, shape=[-1]))
y_hat = tf.cast(tf.argmax(log_prob, 2), tf.int32)
correct = tf.equal(y_hat, target_indices)
num_correct = tf.reduce_sum(tf.cast(correct, tf.int32))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
return loss, accuracy, num_correct
| 34.473684
| 93
| 0.707634
|
acfc5a25b9fe57b3ac86407b72d71ef53efe3dce
| 3,569
|
py
|
Python
|
linearKF/linearKF.py
|
raghuramshankar/soc-estimation-of-li-ion-batteries
|
0450757e1bb968cb742b6cea73c4523ef81f75fa
|
[
"MIT"
] | null | null | null |
linearKF/linearKF.py
|
raghuramshankar/soc-estimation-of-li-ion-batteries
|
0450757e1bb968cb742b6cea73c4523ef81f75fa
|
[
"MIT"
] | null | null | null |
linearKF/linearKF.py
|
raghuramshankar/soc-estimation-of-li-ion-batteries
|
0450757e1bb968cb742b6cea73c4523ef81f75fa
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
class linearKF():
def __init__(self, N):
'''number of iterations'''
self.N = N
'''process noise covariance'''
self.sigmaW = 1
'''sensor noise covariance'''
self.sigmaV = 1
'''plant definition matrices'''
'''x_k = 1.x_k-1 + 1.u_k-1'''
'''y_k = 1.x_k-1 + 0.u_k-1'''
self.A = 1
self.B = 1
self.C = 1
self.D = 0
'''true system intial state'''
self.xTrue = 100
'''kalman filter initial estimate'''
self.xHat = 0
'''kalman filter inital covariance'''
self.sigmaX = 0
'''inital driving input'''
self.u = 0
'''reserve storage for variables'''
self.xStore = np.zeros((np.size(self.xTrue), self.N+1))
self.xStore[:, 0] = self.xTrue
self.xHatStore = np.zeros((np.size(self.xHat), self.N))
self.sigmaXStore = np.zeros((np.size(self.xHat)**2, self.N))
def genInputMeasurement(self, k):
self.u = 0.5 * np.random.randn(1) + np.cos(k/np.pi)
try:
w = np.transpose(np.linalg.cholesky(self.sigmaW)) * \
np.random.randn(np.size(self.xTrue))
v = self.sigmaV * np.random.randn(np.size(self.C * self.xTrue))
except:
w = self.sigmaW * np.random.randn(np.size(self.xTrue))
v = self.sigmaV * np.random.randn(self.C * np.size(self.xTrue))
self.yTrue = self.C * self.xTrue + self.D * self.u + v
self.xTrue = self.A * self.xTrue + self.B * self.u + w
def iterKF(self):
for k in range(self.N):
'''KF step 1a: state estimate time update'''
self.xHat = self.A * self.xHat + self.B * self.u
'''KF step 1b: error covariance time update'''
self.sigmaX = self.A * self.sigmaX * \
np.transpose(self.A) + self.sigmaW
'''generate input and measurement'''
self.genInputMeasurement(k)
'''KF step 1c: estimate system output'''
self.yHat = self.C * self.xHat + self.D * self.u
'''KF step 2a: compute kalman gain'''
sigmaY = self.C * self.sigmaX * np.transpose(self.C) + self.sigmaV
L = self.sigmaX * np.transpose(self.C)/sigmaY
'''KF step 2b: state estimate measurement update'''
self.xHat = self.xHat + L * (self.yTrue - self.yHat)
'''KF step 2c: error covariance measurement update'''
self.sigmaX = self.sigmaX - L * sigmaY * np.transpose(L)
'''store information'''
self.xStore[:, k+1] = self.xTrue
self.xHatStore[:, k] = self.xHat
self.sigmaXStore[:, k] = self.sigmaX
def postpross(self):
fig = plt.figure()
f = fig.add_subplot(111)
f.plot(range(N), self.xStore[0, 1:], 'k+', label='True')
f.plot(range(N), self.xHatStore[0, :], 'b', label='Estimate')
f.plot(range(N), self.xHatStore[0, :] + np.sqrt(3) *
self.sigmaXStore[0, :], 'g--', label='Upper bound')
f.plot(range(N), self.xHatStore[0, :] - np.sqrt(3) *
self.sigmaXStore[0, :], 'g--', label='Lower bound')
f.set_xlabel('Iteration')
f.set_ylabel('State')
f.set_title('Linear kalman filter generic model')
f.legend()
plt.grid(True)
plt.show()
if __name__ == '__main__':
N = 200
kalmanObj = linearKF(N)
kalmanObj.iterKF()
kalmanObj.postpross()
| 34.317308
| 78
| 0.537966
|
acfc5a318c42394567004fb195beb6f90f9fddfc
| 598
|
py
|
Python
|
test/test1.py
|
1621740748/stock-pandas
|
fe8b741e19311235efe901c63c1a8ee81d112680
|
[
"MIT"
] | 1
|
2020-04-03T09:30:10.000Z
|
2020-04-03T09:30:10.000Z
|
test/test1.py
|
1621740748/stock-pandas
|
fe8b741e19311235efe901c63c1a8ee81d112680
|
[
"MIT"
] | null | null | null |
test/test1.py
|
1621740748/stock-pandas
|
fe8b741e19311235efe901c63c1a8ee81d112680
|
[
"MIT"
] | null | null | null |
from stock_pandas import StockDataFrame
import pandas as pd
tt=pd.read_csv('tencent.csv')
stock = StockDataFrame(tt,date_column='time_key')
#print(tt)
#print(stock)
d=stock['kdj.d']
k=stock['kdj.k']
buy=0
sell=0
sum=0
for i in range(1,len(d)):
if k[i-1]<=d[i-1] and k[i]>=d[i]:
#print("buy:"+str(tt.iloc[i]))
print("buy:")
buy=tt.iloc[i]["close"]
elif k[i-1]>=d[i-1] and k[i]<=d[i]:
#print("sell:" +str(tt.iloc[i]))
sell=tt.iloc[i]["close"]
print("sell:")
print("diff:"+str(sell-buy))
sum=sum+(sell-buy)
print("total:"+str(sum))
| 24.916667
| 49
| 0.576923
|
acfc5aa9c7daa7e9805269513e5a8636bf99645b
| 9,809
|
py
|
Python
|
functions.py
|
notpeople249/Telegram_VC_Bot
|
2f1cac97072ef54913609b49cd2ae80a8dbb9a4e
|
[
"MIT"
] | null | null | null |
functions.py
|
notpeople249/Telegram_VC_Bot
|
2f1cac97072ef54913609b49cd2ae80a8dbb9a4e
|
[
"MIT"
] | null | null | null |
functions.py
|
notpeople249/Telegram_VC_Bot
|
2f1cac97072ef54913609b49cd2ae80a8dbb9a4e
|
[
"MIT"
] | null | null | null |
import asyncio
import functools
import os
import aiofiles
import ffmpeg
import youtube_dl
from aiohttp import ClientSession
from PIL import Image, ImageDraw, ImageFont
from pyrogram import Client
from pyrogram.types import Message
from pyrogram.raw.types import InputGroupCall
from pyrogram.raw.functions.channels import GetFullChannel
from pyrogram.raw.functions.phone import EditGroupCallTitle
from Python_ARQ import ARQ
from db import db
is_config = os.path.exists("config.py")
if is_config:
from config import *
else:
from sample_config import *
if HEROKU:
if is_config:
from config import SESSION_STRING
elif not is_config:
from sample_config import SESSION_STRING
app = Client(
SESSION_STRING if HEROKU else "tgvc", api_id=API_ID, api_hash=API_HASH
)
session = ClientSession()
arq = ARQ(ARQ_API, ARQ_API_KEY, session)
themes = ["darkred", "lightred", "green", "purple", "skyblue", "dark", "black"]
def get_theme(chat_id) -> str:
theme = "purple"
if chat_id not in db:
db[chat_id] = {}
if "theme" not in db[chat_id]:
db[chat_id]["theme"] = theme
theme = db[chat_id]["theme"]
return theme
def change_theme(name: str, chat_id):
if chat_id not in db:
db[chat_id] = {}
if "theme" not in db[chat_id]:
db[chat_id]["theme"] = "green"
db[chat_id]["theme"] = name
# Get default service from config
def get_default_service() -> str:
services = ["youtube", "deezer", "saavn"]
try:
config_service = DEFAULT_SERVICE.lower()
if config_service in services:
return config_service
else: # Invalid DEFAULT_SERVICE
return "youtube"
except NameError: # DEFAULT_SERVICE not defined
return "youtube"
async def pause_skip_watcher(message: Message, duration: int, chat_id: int):
if "skipped" not in db[chat_id]:
db[chat_id]["skipped"] = False
if "paused" not in db[chat_id]:
db[chat_id]["paused"] = False
for _ in range(duration * 10):
if db[chat_id]["skipped"]:
db[chat_id]["skipped"] = False
return await message.delete()
if db[chat_id]["paused"]:
while db[chat_id]["paused"]:
await asyncio.sleep(0.1)
continue
await asyncio.sleep(0.1)
db[chat_id]["skipped"] = False
async def change_vc_title(title: str, chat_id):
peer = await app.resolve_peer(chat_id)
chat = await app.send(GetFullChannel(channel=peer))
data = EditGroupCallTitle(call=chat.full_chat.call, title=title)
await app.send(data)
def transcode(filename: str, chat_id: str):
ffmpeg.input(filename).output(
f"input{chat_id}.raw",
format="s16le",
acodec="pcm_s16le",
ac=2,
ar="48k",
loglevel="error",
).overwrite_output().run()
os.remove(filename)
# Download song
async def download_and_transcode_song(url, chat_id):
song = f"{chat_id}.mp3"
async with session.get(url) as resp:
if resp.status == 200:
f = await aiofiles.open(song, mode="wb")
await f.write(await resp.read())
await f.close()
loop = asyncio.get_running_loop()
await loop.run_in_executor(
None, functools.partial(transcode, song, chat_id)
)
# Convert seconds to mm:ss
def convert_seconds(seconds: int):
seconds = seconds % (24 * 3600)
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%02d:%02d" % (minutes, seconds)
# Convert hh:mm:ss to seconds
def time_to_seconds(time):
stringt = str(time)
return sum(
int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(":")))
)
# Change image size
def changeImageSize(maxWidth: int, maxHeight: int, image):
widthRatio = maxWidth / image.size[0]
heightRatio = maxHeight / image.size[1]
newWidth = int(widthRatio * image.size[0])
newHeight = int(heightRatio * image.size[1])
newImage = image.resize((newWidth, newHeight))
return newImage
# Generate cover for youtube
async def generate_cover(
requested_by, title, views_or_artist, duration, thumbnail, chat_id
):
async with session.get(thumbnail) as resp:
if resp.status == 200:
f = await aiofiles.open(f"background{chat_id}.png", mode="wb")
await f.write(await resp.read())
await f.close()
background = f"./background{chat_id}.png"
final = f"final{chat_id}.png"
temp = f"temp{chat_id}.png"
image1 = Image.open(background)
image2 = Image.open(f"etc/foreground_{get_theme(chat_id)}.png")
image3 = changeImageSize(1280, 720, image1)
image4 = changeImageSize(1280, 720, image2)
image5 = image3.convert("RGBA")
image6 = image4.convert("RGBA")
Image.alpha_composite(image5, image6).save(temp)
img = Image.open(temp)
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("etc/font.otf", 32)
draw.text((190, 550), f"Title: {title}", (255, 255, 255), font=font)
draw.text((190, 590), f"Duration: {duration}", (255, 255, 255), font=font)
draw.text(
(190, 630),
f"Views/Artist: {views_or_artist}",
(255, 255, 255),
font=font,
)
draw.text(
(190, 670), f"Requested By: {requested_by}", (255, 255, 255), font=font
)
img.save(final)
os.remove(temp)
os.remove(background)
try:
await change_vc_title(title, chat_id)
except Exception:
await app.send_message(chat_id, text="[ERROR]: FAILED TO EDIT VC TITLE, MAKE ME ADMIN.")
pass
return final
# Deezer
async def deezer(requested_by, query, message: Message):
m = await message.reply_text(
f"__**Searching for {query} on Deezer.**__", quote=False
)
songs = await arq.deezer(query, 1)
if not songs.ok:
return await m.edit(songs.result)
songs = songs.result
title = songs[0].title
duration = convert_seconds(int(songs[0].duration))
thumbnail = songs[0].thumbnail
artist = songs[0].artist
url = songs[0].url
await m.edit("__**Downloading And Transcoding.**__")
cover, _ = await asyncio.gather(
generate_cover(
requested_by, title, artist, duration, thumbnail, message.chat.id
),
download_and_transcode_song(url, message.chat.id),
)
await m.delete()
caption = (
f"🏷 **Name:** [{title[:45]}]({url})\n⏳ **Duration:** {duration}\n"
+ f"🎧 **Requested By:** {message.from_user.mention}\n📡 **Platform:** Deezer"
)
m = await message.reply_photo(
photo=cover,
caption=caption,
)
os.remove(cover)
duration = int(songs[0]["duration"])
await pause_skip_watcher(m, duration, message.chat.id)
await m.delete()
# saavn
async def saavn(requested_by, query, message):
m = await message.reply_text(
f"__**Searching for {query} on JioSaavn.**__", quote=False
)
songs = await arq.saavn(query)
if not songs.ok:
return await m.edit(songs.result)
songs = songs.result
sname = songs[0].song
slink = songs[0].media_url
ssingers = songs[0].singers
sthumb = songs[0].image
sduration = songs[0].duration
sduration_converted = convert_seconds(int(sduration))
await m.edit("__**Downloading And Transcoding.**__")
cover, _ = await asyncio.gather(
generate_cover(
requested_by,
sname,
ssingers,
sduration_converted,
sthumb,
message.chat.id,
),
download_and_transcode_song(slink, message.chat.id),
)
await m.delete()
caption = (
f"🏷 **Name:** {sname[:45]}\n⏳ **Duration:** {sduration_converted}\n"
+ f"🎧 **Requested By:** {message.from_user.mention}\n📡 **Platform:** JioSaavn"
)
m = await message.reply_photo(
photo=cover,
caption=caption,
)
os.remove(cover)
duration = int(sduration)
await pause_skip_watcher(m, duration, message.chat.id)
await m.delete()
# Youtube
async def youtube(requested_by, query, message):
ydl_opts = {"format": "bestaudio", "quiet": True}
m = await message.reply_text(
f"__**Searching for {query} on YouTube.**__", quote=False
)
results = await arq.youtube(query)
if not results.ok:
return await m.edit(results.result)
results = results.result
link = f"https://youtube.com{results[0].url_suffix}"
title = results[0].title
thumbnail = results[0].thumbnails[0]
duration = results[0].duration
views = results[0].views
if time_to_seconds(duration) >= 1800:
return await m.edit("__**Bruh! Only songs within 30 Mins.**__")
await m.edit("__**Processing Thumbnail.**__")
cover = await generate_cover(
requested_by, title, views, duration, thumbnail, message.chat.id
)
await m.edit("__**Downloading Music.**__")
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
await m.edit("__**Transcoding.**__")
song = f"audio{message.chat.id}.webm"
os.rename(audio_file, song)
loop = asyncio.get_running_loop()
await loop.run_in_executor(
None, functools.partial(transcode, song, message.chat.id)
)
await m.delete()
caption = (
f"🏷 **Name:** [{title[:45]}]({link})\n⏳ **Duration:** {duration}\n"
+ f"🎧 **Requested By:** {message.from_user.mention}\n📡 **Platform:** YouTube"
)
m = await message.reply_photo(
photo=cover,
caption=caption,
)
os.remove(cover)
duration = int(time_to_seconds(duration))
await pause_skip_watcher(m, duration, message.chat.id)
await m.delete()
| 30.274691
| 96
| 0.635233
|
acfc5cbf7be63f11aef63b2e1fb2c1625bac45be
| 2,692
|
py
|
Python
|
qiskit_machine_learning/datasets/wine.py
|
gabrieleagl/qiskit-machine-learning
|
a38e1e8bd044d6993361fad6741131531ab6ef4b
|
[
"Apache-2.0"
] | null | null | null |
qiskit_machine_learning/datasets/wine.py
|
gabrieleagl/qiskit-machine-learning
|
a38e1e8bd044d6993361fad6741131531ab6ef4b
|
[
"Apache-2.0"
] | null | null | null |
qiskit_machine_learning/datasets/wine.py
|
gabrieleagl/qiskit-machine-learning
|
a38e1e8bd044d6993361fad6741131531ab6ef4b
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
wine dataset
"""
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from qiskit.exceptions import MissingOptionalLibraryError
def wine(training_size, test_size, n, plot_data=False):
""" returns wine dataset """
class_labels = [r'A', r'B', r'C']
data, target = datasets.load_wine(return_X_y=True)
sample_train, sample_test, label_train, label_test = \
train_test_split(data, target, test_size=test_size, random_state=7)
# Now we standardize for gaussian around 0 with unit variance
std_scale = StandardScaler().fit(sample_train)
sample_train = std_scale.transform(sample_train)
sample_test = std_scale.transform(sample_test)
# Now reduce number of features to number of qubits
pca = PCA(n_components=n).fit(sample_train)
sample_train = pca.transform(sample_train)
sample_test = pca.transform(sample_test)
# Scale to the range (-1,+1)
samples = np.append(sample_train, sample_test, axis=0)
minmax_scale = MinMaxScaler((-1, 1)).fit(samples)
sample_train = minmax_scale.transform(sample_train)
sample_test = minmax_scale.transform(sample_test)
# Pick training size number of samples from each distro
training_input = {key: (sample_train[label_train == k, :])[:training_size]
for k, key in enumerate(class_labels)}
test_input = {key: (sample_test[label_test == k, :])[:test_size]
for k, key in enumerate(class_labels)}
if plot_data:
try:
import matplotlib.pyplot as plt
except ImportError as ex:
raise MissingOptionalLibraryError(
libname='Matplotlib',
name='wine',
pip_install='pip install matplotlib') from ex
for k in range(0, 3):
plt.scatter(sample_train[label_train == k, 0][:training_size],
sample_train[label_train == k, 1][:training_size])
plt.title("PCA dim. reduced Wine dataset")
plt.show()
return sample_train, training_input, test_input, class_labels
| 38.457143
| 78
| 0.697994
|
acfc5e109ab5262485c963c8ca103907d96436f8
| 3,982
|
py
|
Python
|
GPL/traffic_profiles/trex/trex-astf-ethip4tcp-4096h.py
|
nidhyanandhan/csit
|
2156583b4e66f2c3c35903c854b1823b76a4e9a6
|
[
"Apache-2.0"
] | null | null | null |
GPL/traffic_profiles/trex/trex-astf-ethip4tcp-4096h.py
|
nidhyanandhan/csit
|
2156583b4e66f2c3c35903c854b1823b76a4e9a6
|
[
"Apache-2.0"
] | null | null | null |
GPL/traffic_profiles/trex/trex-astf-ethip4tcp-4096h.py
|
nidhyanandhan/csit
|
2156583b4e66f2c3c35903c854b1823b76a4e9a6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Traffic profile for T-rex advanced stateful (astf) traffic generator.
Traffic profile:
- Two streams sent in directions 0 --> 1 (client -> server, requests) and
1 --> 0 (server -> client, responses) at the same time.
- Packet: ETH / IP / TCP
- Direction 0 --> 1:
- Source IP address range: 192.168.0.0 - 192.168.15.255
- Destination IP address range: 20.0.0.0 - 20.0.15.255
- Direction 1 --> 0:
- Source IP address range: destination IP address from packet received
on port 1
- Destination IP address range: source IP address from packet received
on port 1
"""
from trex.astf.api import *
from profile_trex_astf_base_class import TrafficProfileBaseClass
class TrafficProfile(TrafficProfileBaseClass):
"""Traffic profile."""
def __init__(self, **kwargs):
"""Initialization and setting of profile parameters."""
super(TrafficProfileBaseClass, self).__init__()
# IPs used in packet headers.
self.p1_src_start_ip = u"192.168.0.0"
self.p1_src_end_ip = u"192.168.15.255"
self.p1_dst_start_ip = u"20.0.0.0"
self.p1_dst_end_ip = u"20.0.15.255"
# Headers length; not used in this profile, just for the record of
# header length for TCP packet with 0B payload
self.headers_size = 58 # 14B l2 + 20B ipv4 + 24B tcp incl. 4B options
# Delay for keeping tcp sessions active
self.delay = 2000000 # delay 2s (2,000,000 usec)
def define_profile(self):
"""Define profile to be used by advanced stateful traffic generator.
This method MUST return:
return ip_gen, templates, None
:returns: IP generator and profile templates for ASTFProfile().
:rtype: tuple
"""
# client commands
prog_c = ASTFProgram()
# send syn
prog_c.connect()
# receive syn-ack (0B sent in tcp syn-ack packet) and send ack
prog_c.recv(0)
# wait defined time, then send fin-ack
prog_c.delay(self.delay)
# server commands
prog_s = ASTFProgram()
# receive syn, send syn-ack
prog_s.accept()
# receive fin-ack, send ack + fin-ack
prog_s.wait_for_peer_close()
# ip generators
ip_gen_c = ASTFIPGenDist(
ip_range=[self.p1_src_start_ip, self.p1_src_end_ip],
distribution=u"seq"
)
ip_gen_s = ASTFIPGenDist(
ip_range=[self.p1_dst_start_ip, self.p1_dst_end_ip],
distribution=u"seq"
)
ip_gen = ASTFIPGen(
glob=ASTFIPGenGlobal(ip_offset=u"0.0.0.1"),
dist_client=ip_gen_c,
dist_server=ip_gen_s
)
# server association
s_assoc = ASTFAssociation(rules=ASTFAssociationRule(port=8080))
# template
temp_c = ASTFTCPClientTemplate(
program=prog_c,
ip_gen=ip_gen,
limit=258048, # TODO: set via input parameter
port=8080
)
temp_s = ASTFTCPServerTemplate(program=prog_s, assoc=s_assoc)
template = ASTFTemplate(client_template=temp_c, server_template=temp_s)
return ip_gen, template, None
def register():
"""Register this traffic profile to T-Rex.
Do not change this function.
:return: Traffic Profiles.
:rtype: Object
"""
return TrafficProfile()
| 32.909091
| 79
| 0.645907
|
acfc602fbba87afabbd22cd2c32465965d6675fe
| 1,299
|
py
|
Python
|
src/spaceone/monitoring/error/data_source.py
|
xellos00/monitoring
|
deb5363a2152e7b3f85a08d27bdede0e00023824
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/monitoring/error/data_source.py
|
xellos00/monitoring
|
deb5363a2152e7b3f85a08d27bdede0e00023824
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/monitoring/error/data_source.py
|
xellos00/monitoring
|
deb5363a2152e7b3f85a08d27bdede0e00023824
|
[
"Apache-2.0"
] | null | null | null |
from spaceone.core.error import *
class ERROR_INVALID_PLUGIN_VERSION(ERROR_INVALID_ARGUMENT):
_message = 'Plugin version is invalid. (plugin_id = {plugin_id}, version = {version})'
class ERROR_SUPPORTED_SECRETS_NOT_EXISTS(ERROR_INVALID_ARGUMENT):
_message = 'There are no secrets that support plugins. (plugin_id = {plugin_id}, provider = {provider})'
class ERROR_RESOURCE_SECRETS_NOT_EXISTS(ERROR_INVALID_ARGUMENT):
_message = 'There are no secrets in the resources. (resource_id = {resource_id})'
class ERROR_NOT_ALLOWED_PLUGIN_ID(ERROR_INVALID_ARGUMENT):
_message = 'Changing plugin_id is not allowed. (old_plugin_id = {old_plugin_id}, new_plugin_id = {new_plugin_id})'
class ERROR__PLUGIN_VERSION_NOT_EXISTS(ERROR_INVALID_ARGUMENT):
_message = 'There is no plugin version with given version info. (old_version = {old_version}, new_version = {new_version})'
class ERROR_WRONG_PLUGIN_SETTINGS(ERROR_BASE):
_message = "The plugin settings is incorrect. (key = {key})"
class ERROR_INVALID_PLUGIN_OPTIONS(ERROR_INTERNAL_API):
_message = 'The options received from the plugin is invalid. (reason = {reason})'
class ERROR_DATA_SOURCE_STATE_DISABLED(ERROR_INVALID_ARGUMENT):
_message = 'Data source state is disabled. (data_source_id = {data_source_id})'
| 38.205882
| 127
| 0.7806
|
acfc606928b8b7fef586bd1e2dbf5ab6282b328f
| 2,030
|
py
|
Python
|
example/module/python_loss.py
|
ranxian/mxnet-gpups
|
f03261cf2e2aa1a87c436913624b86bb3fbec6a4
|
[
"Apache-2.0"
] | null | null | null |
example/module/python_loss.py
|
ranxian/mxnet-gpups
|
f03261cf2e2aa1a87c436913624b86bb3fbec6a4
|
[
"Apache-2.0"
] | null | null | null |
example/module/python_loss.py
|
ranxian/mxnet-gpups
|
f03261cf2e2aa1a87c436913624b86bb3fbec6a4
|
[
"Apache-2.0"
] | null | null | null |
# pylint: skip-file
import numpy as np
import mxnet as mx
import numba
import logging
# We use numba.jit to implement the loss gradient.
@numba.jit
def mc_hinge_grad(scores, labels):
scores = scores.asnumpy()
labels = labels.asnumpy()
n, _ = scores.shape
grad = np.zeros_like(scores)
for i in range(n):
score = 1 + scores[i] - scores[i, labels[i]]
score[labels[i]] = 0
ind_pred = score.argmax()
grad[i, labels[i]] -= 1
grad[i, ind_pred] += 1
return grad
if __name__ == '__main__':
n_epoch = 10
batch_size = 100
num_gpu = 0
contexts = mx.context.cpu() if num_gpu < 1 else [mx.context.gpu(i) for i in range(num_gpu)]
# build a MLP module
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(act1, name = 'fc2', num_hidden = 64)
act2 = mx.symbol.Activation(fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=10)
mlp = mx.mod.Module(fc3, context=contexts)
loss = mx.mod.PythonLossModule(grad_func=mc_hinge_grad)
mod = mx.mod.SequentialModule() \
.add(mlp) \
.add(loss, take_labels=True, auto_wiring=True)
train_dataiter = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
data_shape=(784,),
batch_size=batch_size, shuffle=True, flat=True, silent=False, seed=10)
val_dataiter = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
data_shape=(784,),
batch_size=batch_size, shuffle=True, flat=True, silent=False)
logging.basicConfig(level=logging.DEBUG)
mod.fit(train_dataiter, eval_data=val_dataiter,
optimizer_params={'learning_rate':0.01, 'momentum': 0.9},
num_epoch=n_epoch)
| 33.278689
| 95
| 0.635961
|
acfc6163675d56d71f3cf3f8a88f16319da423ba
| 1,540
|
py
|
Python
|
datasets/CLS/factory.py
|
zhangzhengde0225/SwinTrack
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
[
"MIT"
] | 143
|
2021-12-03T02:33:36.000Z
|
2022-03-29T00:01:48.000Z
|
datasets/CLS/factory.py
|
zhangzhengde0225/SwinTrack
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
[
"MIT"
] | 33
|
2021-12-03T10:32:05.000Z
|
2022-03-31T02:13:55.000Z
|
datasets/CLS/factory.py
|
zhangzhengde0225/SwinTrack
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
[
"MIT"
] | 24
|
2021-12-04T06:46:42.000Z
|
2022-03-30T07:57:47.000Z
|
from datasets.base.factory import DatasetFactory
from datasets.base.image.dataset import ImageDataset
from datasets.types.specialized_dataset import SpecializedImageDatasetType
from datasets.base.image.filter.func import apply_filters_on_image_dataset_
from datasets.CLS.dataset import ImageClassificationDataset_MemoryMapped
from typing import List
__all__ = ['ImageClassificationDatasetFactory']
class ImageClassificationDatasetFactory(DatasetFactory):
def __init__(self, seeds: list):
super(ImageClassificationDatasetFactory, self).__init__(seeds, ImageDataset,
SpecializedImageDatasetType.Classification,
apply_filters_on_image_dataset_,
SpecializedImageDatasetType.Classification,
ImageClassificationDataset_MemoryMapped)
def construct(self, filters: list = None, cache_base_format: bool = True, dump_human_readable: bool = False) -> List[ImageClassificationDataset_MemoryMapped]:
return super(ImageClassificationDatasetFactory, self).construct(filters, cache_base_format, dump_human_readable)
def construct_as_base_interface(self, filters=None, make_cache=False, dump_human_readable=False) -> List[ImageDataset]:
return super(ImageClassificationDatasetFactory, self).construct_as_base_interface(filters, make_cache, dump_human_readable)
| 64.166667
| 162
| 0.705195
|
acfc6165e66e507b31662eb98f49b726525f351b
| 3,515
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/afipiaspp5210.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/afipiaspp5210.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/afipiaspp5210.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Afipia sp. P5210.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def AfipiaSpP5210(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Afipia sp. P5210 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Afipia sp. P5210 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="AfipiaSpP5210",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.546296
| 223
| 0.672831
|
acfc61a84e406db933ff9ede4bc91736a2e32009
| 3,591
|
py
|
Python
|
app/views.py
|
Alex0216/tempLogger
|
220bee884216840942249ed849c063000c520410
|
[
"MIT"
] | null | null | null |
app/views.py
|
Alex0216/tempLogger
|
220bee884216840942249ed849c063000c520410
|
[
"MIT"
] | null | null | null |
app/views.py
|
Alex0216/tempLogger
|
220bee884216840942249ed849c063000c520410
|
[
"MIT"
] | null | null | null |
from app import app
from flask import Flask, render_template, g, request, jsonify
import sqlite3
from dateutil.parser import parse
from datetime import datetime, timedelta
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect('templog.db')
return db
#app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
@app.route('/')
@app.route('/index')
def index():
fromDate = datetime.now() - timedelta(days=1)
toDate = datetime.now()
cur = get_db().cursor()
cur.execute("SELECT * FROM temphum WHERE timestamp BETWEEN ? AND ? ORDER BY timestamp", (fromDate.isoformat(), toDate.isoformat()))
rows = cur.fetchall()
rows.reverse()
temperatures = [round(x[1],2) for x in rows]
humidity = [round(x[2],2) for x in rows]
timestamps = [x[0] for x in rows] #strftime("%Y-%b-%dT%H:%M")
return render_template('index.html')
@app.route('/_get_data')
def get_data():
if request.args.get('startDate') and request.args.get('endDate'):
startDate = request.args["startDate"]
endDate = request.args["endDate"]
print(parse(startDate))
print(parse(endDate))
fromDate = parse(startDate)
toDate = parse(endDate)
cur = get_db().cursor()
cur.execute("SELECT * FROM temphum WHERE timestamp BETWEEN ? AND ? ORDER BY timestamp", (fromDate.isoformat(), toDate.isoformat()))
rows = cur.fetchall()
rows.reverse()
temperatures = [round(x[1],2) for x in rows]
humidities = [round(x[2],2) for x in rows]
timestamps = [x[0] for x in rows] #strftime("%Y-%b-%dT%H:%M")
print(timestamps)
return jsonify(timestamps=timestamps, temperatures=temperatures, humidities=humidities)
@app.route('/_get_humidityMinMax')
def get_humidityMinMax():
if request.args.get('startDate') and request.args.get('endDate'):
startDate = request.args["startDate"]
endDate = request.args["endDate"]
fromDate = parse(startDate)
toDate = parse(endDate)
cur = get_db().cursor()
cur.execute("SELECT Date(timestamp) as Day , MAX(humidity) AS MaxHum, MIN(humidity) AS MinHum from temphum WHERE timestamp BETWEEN ? AND ? GROUP BY Day ORDER BY timestamp", (fromDate.isoformat(), toDate.isoformat()))
rows = cur.fetchall()
rows.reverse()
maxHums = [round(x[1], 2) for x in rows]
minHums = [round(x[2], 2) for x in rows]
timestamps = [x[0] for x in rows] #strftime("%Y-%b-%dT%H:%M")
print(timestamps)
return jsonify(timestamps=timestamps, maxHums=maxHums, minHums=minHums)
@app.route('/_get_temperaturesMinMax')
def get_temperaturesMinMax():
if request.args.get('startDate') and request.args.get('endDate'):
startDate = request.args["startDate"]
endDate = request.args["endDate"]
fromDate = parse(startDate)
toDate = parse(endDate)
cur = get_db().cursor()
cur.execute("SELECT Date(timestamp) as Day , MAX(temp) AS MaxHum, MIN(temp) AS MinHum from temphum WHERE timestamp BETWEEN ? AND ? GROUP BY Day ORDER BY timestamp", (fromDate.isoformat(), toDate.isoformat()))
rows = cur.fetchall()
rows.reverse()
maxTemps = [round(x[1], 2) for x in rows]
minTemps = [round(x[2], 2) for x in rows]
timestamps = [x[0] for x in rows] #strftime("%Y-%b-%dT%H:%M")
print(timestamps)
return jsonify(timestamps=timestamps, maxTemps=maxTemps, minTemps=minTemps)
| 41.755814
| 224
| 0.645781
|
acfc6240d7593c81d67e64a990403dabbf775235
| 42,172
|
py
|
Python
|
position/views_admin.py
|
user512/WeVoteServer
|
df209ac61dc7d79a69388e0e3c61d13dd09afae6
|
[
"MIT"
] | null | null | null |
position/views_admin.py
|
user512/WeVoteServer
|
df209ac61dc7d79a69388e0e3c61d13dd09afae6
|
[
"MIT"
] | null | null | null |
position/views_admin.py
|
user512/WeVoteServer
|
df209ac61dc7d79a69388e0e3c61d13dd09afae6
|
[
"MIT"
] | null | null | null |
# position/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import positions_import_from_master_server, refresh_cached_position_info_for_election, \
refresh_positions_with_candidate_details_for_election, \
refresh_positions_with_contest_office_details_for_election, \
refresh_positions_with_contest_measure_details_for_election
from .models import ANY_STANCE, PositionEntered, PositionForFriends, PositionListManager, PERCENT_RATING
from admin_tools.views import redirect_to_sign_in_page
from candidate.models import CandidateCampaign, CandidateCampaignManager
from config.base import get_environment_variable
from django.urls import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.db.models import Q
from election.models import ElectionManager
from exception.models import handle_record_found_more_than_one_exception,\
handle_record_not_found_exception, handle_record_not_saved_exception
from measure.controllers import push_contest_measure_data_to_other_table_caches
from office.controllers import push_contest_office_data_to_other_table_caches
from office.models import ContestOfficeManager
from organization.models import OrganizationManager
from politician.models import PoliticianManager
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists
from django.http import HttpResponse
import json
UNKNOWN = 'U'
POSITIONS_SYNC_URL = get_environment_variable("POSITIONS_SYNC_URL") # positionsSyncOut
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
logger = wevote_functions.admin.get_logger(__name__)
# This page does not need to be protected.
def positions_sync_out_view(request): # positionsSyncOut
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
if not positive_value_exists(google_civic_election_id):
json_data = {
'success': False,
'status': 'POSITION_LIST_CANNOT_BE_RETURNED-ELECTION_ID_REQUIRED'
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
stance_we_are_looking_for = ANY_STANCE
try:
# Only return public positions
position_list_query = PositionEntered.objects.order_by('date_entered')
# As of Aug 2018 we are no longer using PERCENT_RATING
position_list_query = position_list_query.exclude(stance__iexact=PERCENT_RATING)
position_list_query = position_list_query.filter(google_civic_election_id=google_civic_election_id)
# SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING
if stance_we_are_looking_for != ANY_STANCE:
# If we passed in the stance "ANY" it means we want to not filter down the list
position_list_query = position_list_query.filter(stance__iexact=stance_we_are_looking_for)
# convert datetime to str for date_entered and date_last_changed columns
position_list_query = position_list_query.extra(
select={'date_entered': "to_char(date_entered, 'YYYY-MM-DD HH24:MI:SS')"})
position_list_query = position_list_query.extra(
select={'date_last_changed': "to_char(date_last_changed, 'YYYY-MM-DD HH24:MI:SS')"})
position_list_dict = position_list_query.values(
'we_vote_id', 'ballot_item_display_name', 'ballot_item_image_url_https',
'ballot_item_twitter_handle', 'speaker_display_name',
'speaker_image_url_https', 'speaker_twitter_handle', 'date_entered',
'date_last_changed', 'organization_we_vote_id', 'voter_we_vote_id',
'public_figure_we_vote_id', 'google_civic_election_id', 'state_code',
'vote_smart_rating_id', 'vote_smart_time_span', 'vote_smart_rating',
'vote_smart_rating_name', 'contest_office_we_vote_id', 'race_office_level',
'candidate_campaign_we_vote_id', 'google_civic_candidate_name',
'politician_we_vote_id', 'contest_measure_we_vote_id', 'speaker_type', 'stance',
'statement_text', 'statement_html', 'twitter_followers_count', 'more_info_url', 'from_scraper',
'organization_certified', 'volunteer_certified', 'voter_entering_position',
'tweet_source_id', 'twitter_user_entered_position', 'is_private_citizen')
if position_list_dict:
position_list_json = list(position_list_dict)
return HttpResponse(json.dumps(position_list_json), content_type='application/json')
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
json_data = {
'success': False,
'status': 'POSITION_LIST_MISSING'
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
@login_required
def positions_import_from_master_server_view(request):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'admin'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
if WE_VOTE_SERVER_ROOT_URL in POSITIONS_SYNC_URL:
messages.add_message(request, messages.ERROR, "Cannot sync with Master We Vote Server -- "
"this is the Master We Vote Server.")
return HttpResponseRedirect(reverse('admin_tools:admin_home', args=()))
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.INFO, 'Google civic election id is required for Positions import.')
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
results = positions_import_from_master_server(request, google_civic_election_id)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Positions import completed. '
'Saved: {saved}, Updated: {updated}, '
'Duplicates skipped: '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
duplicates_removed=results['duplicates_removed'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
def update_position_list_with_speaker_type(position_list):
organization_manager = OrganizationManager()
organization_dict = {}
for one_position in position_list:
position_change = False
speaker_type = UNKNOWN
twitter_followers_count = 0
if one_position.organization_we_vote_id in organization_dict:
organization = organization_dict[one_position.organization_we_vote_id]
speaker_type = organization.organization_type
twitter_followers_count = organization.twitter_followers_count
else:
organization_results = organization_manager.retrieve_organization_from_we_vote_id(
one_position.organization_we_vote_id)
if organization_results['organization_found']:
organization = organization_results['organization']
organization_dict[one_position.organization_we_vote_id] = organization
speaker_type = organization.organization_type
twitter_followers_count = organization.twitter_followers_count
if speaker_type != UNKNOWN:
one_position.speaker_type = speaker_type
position_change = True
if positive_value_exists(twitter_followers_count):
one_position.twitter_followers_count = twitter_followers_count
position_change = True
if position_change:
one_position.save()
return True
def update_position_list_with_contest_office_info(position_list):
candidate_manager = CandidateCampaignManager()
candidate_dict = {}
politician_manager = PoliticianManager()
politician_dict = {}
for one_position in position_list:
candidate_campaign_id = 0
contest_office_we_vote_id = ''
contest_office_id = 0
politician_we_vote_id = ''
politician_id = 0
position_change = False
if one_position.candidate_campaign_we_vote_id in candidate_dict:
candidate = candidate_dict[one_position.candidate_campaign_we_vote_id]
candidate_campaign_id = candidate.id
contest_office_we_vote_id = candidate.contest_office_we_vote_id
contest_office_id = candidate.contest_office_id
politician_we_vote_id = candidate.politician_we_vote_id
politician_id = candidate.politician_id
else:
results = candidate_manager.retrieve_candidate_campaign_from_we_vote_id(
one_position.candidate_campaign_we_vote_id)
if results['candidate_campaign_found']:
candidate = results['candidate_campaign']
candidate_dict[one_position.candidate_campaign_we_vote_id] = candidate
candidate_campaign_id = candidate.id
contest_office_we_vote_id = candidate.contest_office_we_vote_id
contest_office_id = candidate.contest_office_id
politician_we_vote_id = candidate.politician_we_vote_id
politician_id = candidate.politician_id
if positive_value_exists(candidate_campaign_id):
one_position.candidate_campaign_id = candidate_campaign_id
position_change = True
if positive_value_exists(contest_office_we_vote_id):
one_position.contest_office_we_vote_id = contest_office_we_vote_id
position_change = True
if positive_value_exists(contest_office_id):
one_position.contest_office_id = contest_office_id
position_change = True
if positive_value_exists(politician_we_vote_id):
one_position.politician_we_vote_id = politician_we_vote_id
position_change = True
if positive_value_exists(politician_id):
one_position.politician_id = politician_id
position_change = True
elif positive_value_exists(politician_we_vote_id):
# Look up the politician_id
if politician_we_vote_id in politician_dict:
politician = politician_dict[politician_we_vote_id]
one_position.politician_id = politician.id
position_change = True
else:
results = politician_manager.retrieve_politician(0, politician_we_vote_id)
if results['politician_found']:
politician = results['politician']
politician_dict[politician_we_vote_id] = politician
one_position.politician_id = politician.id
position_change = True
if position_change:
one_position.save()
return True
@login_required
def position_list_view(request):
"""
We actually don't want to see PositionForFriends entries in this view
:param request:
:return:
"""
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'partner_organization', 'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
show_all_elections = request.GET.get('show_all_elections', False)
state_code = request.GET.get('state_code', '')
position_search = request.GET.get('position_search', '')
election_manager = ElectionManager()
office_manager = ContestOfficeManager()
google_civic_election_id_list = []
if positive_value_exists(show_all_elections):
results = election_manager.retrieve_elections()
election_list = results['election_list']
else:
results = election_manager.retrieve_upcoming_elections()
election_list = results['election_list']
# Make sure we always include the current election in the election_list, even if it is older
if positive_value_exists(google_civic_election_id):
this_election_found = False
for one_election in election_list:
if convert_to_int(one_election.google_civic_election_id) == convert_to_int(google_civic_election_id):
this_election_found = True
break
if not this_election_found:
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
one_election = results['election']
election_list.append(one_election)
for one_election in election_list:
google_civic_election_id_list.append(one_election.google_civic_election_id)
# Make sure all positions in this election have a speaker_type
public_position_list_clean_count = 0
friend_position_list_clean_count = 0
if positive_value_exists(google_civic_election_id):
public_position_list_clean_query = PositionEntered.objects.all()
public_position_list_clean_query = public_position_list_clean_query.filter(
google_civic_election_id=google_civic_election_id,
speaker_type=UNKNOWN,
)
public_position_list_clean_count_query = public_position_list_clean_query
public_position_list_clean_count = public_position_list_clean_count_query.count()
public_position_list_clean = list(public_position_list_clean_count_query)
update_position_list_with_speaker_type(public_position_list_clean)
friend_position_list_clean_query = PositionForFriends.objects.all()
friend_position_list_clean_query = friend_position_list_clean_query.filter(
google_civic_election_id=google_civic_election_id,
speaker_type=UNKNOWN,
)
friend_position_list_clean_count_query = friend_position_list_clean_query
friend_position_list_clean_count = friend_position_list_clean_count_query.count()
friend_position_list_clean = list(friend_position_list_clean_count_query)
update_position_list_with_speaker_type(friend_position_list_clean)
# Make sure all candidate-related positions in this election have a contest_office information and politician info
public_position_list_candidate_clean_count = 0
friend_position_list_candidate_clean_count = 0
if positive_value_exists(google_civic_election_id):
public_position_list_candidate_clean_query = PositionEntered.objects.all()
public_position_list_candidate_clean_query = public_position_list_candidate_clean_query.filter(
google_civic_election_id=google_civic_election_id,
)
public_position_list_candidate_clean_query = public_position_list_candidate_clean_query.exclude(
Q(candidate_campaign_we_vote_id__isnull=True) | Q(candidate_campaign_we_vote_id=""))
public_position_list_candidate_clean_query = public_position_list_candidate_clean_query.filter(
Q(contest_office_we_vote_id__isnull=True) | Q(contest_office_we_vote_id=""))
public_position_list_candidate_clean_count_query = public_position_list_candidate_clean_query
public_position_list_candidate_clean_count = public_position_list_candidate_clean_count_query.count()
public_position_list_candidate_clean = list(public_position_list_candidate_clean_count_query)
update_position_list_with_contest_office_info(public_position_list_candidate_clean)
friend_position_list_candidate_clean_query = PositionForFriends.objects.all()
friend_position_list_candidate_clean_query = friend_position_list_candidate_clean_query.filter(
google_civic_election_id=google_civic_election_id,
)
friend_position_list_candidate_clean_query = friend_position_list_candidate_clean_query.exclude(
Q(candidate_campaign_we_vote_id__isnull=True) | Q(candidate_campaign_we_vote_id=""))
friend_position_list_candidate_clean_query = friend_position_list_candidate_clean_query.filter(
Q(contest_office_we_vote_id__isnull=True) | Q(contest_office_we_vote_id=""))
friend_position_list_candidate_clean_count_query = friend_position_list_candidate_clean_query
friend_position_list_candidate_clean_count = friend_position_list_candidate_clean_count_query.count()
friend_position_list_candidate_clean = list(friend_position_list_candidate_clean_count_query)
update_position_list_with_contest_office_info(friend_position_list_candidate_clean)
# Publicly visible positions
public_position_list_query = PositionEntered.objects.order_by('-id') # This order_by is temp
# As of Aug 2018 we are no longer using PERCENT_RATING
public_position_list_query = public_position_list_query.exclude(stance__iexact=PERCENT_RATING)
if positive_value_exists(google_civic_election_id):
office_visiting_list_we_vote_ids = office_manager.fetch_office_visiting_list_we_vote_ids(
host_google_civic_election_id_list=[google_civic_election_id])
public_position_list_query = public_position_list_query.filter(
Q(google_civic_election_id=google_civic_election_id) |
Q(contest_office_we_vote_id__in=office_visiting_list_we_vote_ids))
elif positive_value_exists(show_all_elections):
# Return offices from all elections
pass
else:
# Limit this search to upcoming_elections only
office_visiting_list_we_vote_ids = office_manager.fetch_office_visiting_list_we_vote_ids(
host_google_civic_election_id_list=google_civic_election_id_list)
public_position_list_query = public_position_list_query.filter(
Q(google_civic_election_id__in=google_civic_election_id_list) |
Q(contest_office_we_vote_id__in=office_visiting_list_we_vote_ids))
if positive_value_exists(position_search):
search_words = position_search.split()
for one_word in search_words:
filters = []
new_filter = Q(state_code__icontains=one_word)
filters.append(new_filter)
new_filter = Q(we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(candidate_campaign_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(contest_measure_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(contest_office_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(organization_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(voter_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(google_civic_measure_title__icontains=one_word)
filters.append(new_filter)
new_filter = Q(speaker_display_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(ballot_item_display_name__icontains=one_word)
filters.append(new_filter)
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
public_position_list_query = public_position_list_query.filter(final_filters)
public_position_list_count_query = public_position_list_query
public_position_list_count = public_position_list_count_query.count()
public_position_list_comments_count_query = public_position_list_query
public_position_list_comments_count_query = public_position_list_comments_count_query.exclude(
(Q(statement_text__isnull=True) | Q(statement_text__exact='')))
public_position_list_comments_count = public_position_list_comments_count_query.count()
public_position_list_query = public_position_list_query[:50]
public_position_list = list(public_position_list_query)
# Friends-only visible positions
friends_only_position_list_query = PositionForFriends.objects.order_by('-id') # This order_by is temp
# As of Aug 2018 we are no longer using PERCENT_RATING
friends_only_position_list_query = friends_only_position_list_query.exclude(stance__iexact=PERCENT_RATING)
if positive_value_exists(google_civic_election_id):
office_visiting_list_we_vote_ids = office_manager.fetch_office_visiting_list_we_vote_ids(
host_google_civic_election_id_list=[google_civic_election_id])
friends_only_position_list_query = friends_only_position_list_query.filter(
Q(google_civic_election_id=google_civic_election_id) |
Q(contest_office_we_vote_id__in=office_visiting_list_we_vote_ids))
elif positive_value_exists(show_all_elections):
# Return offices from all elections
pass
else:
# Limit this search to upcoming_elections only
office_visiting_list_we_vote_ids = office_manager.fetch_office_visiting_list_we_vote_ids(
host_google_civic_election_id_list=google_civic_election_id_list)
friends_only_position_list_query = friends_only_position_list_query.filter(
Q(google_civic_election_id__in=google_civic_election_id_list) |
Q(contest_office_we_vote_id__in=office_visiting_list_we_vote_ids))
if positive_value_exists(position_search):
search_words = position_search.split()
for one_word in search_words:
filters = []
new_filter = Q(state_code__icontains=one_word)
filters.append(new_filter)
new_filter = Q(we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(candidate_campaign_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(contest_measure_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(contest_office_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(contest_office_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(organization_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(voter_we_vote_id__iexact=one_word)
filters.append(new_filter)
new_filter = Q(google_civic_measure_title__icontains=one_word)
filters.append(new_filter)
new_filter = Q(speaker_display_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(ballot_item_display_name__icontains=one_word)
filters.append(new_filter)
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
friends_only_position_list_query = friends_only_position_list_query.filter(final_filters)
friends_only_position_list_count_query = friends_only_position_list_query
friends_only_position_list_comments_count_query = friends_only_position_list_query
friends_only_position_list_count = friends_only_position_list_count_query.count()
friends_only_position_list_comments_count_query = friends_only_position_list_comments_count_query.exclude(
(Q(statement_text__isnull=True) | Q(statement_text__exact='')))
friends_only_position_list_comments_count = friends_only_position_list_comments_count_query.count()
friends_only_position_list_query = friends_only_position_list_query[:50]
friends_only_position_list = list(friends_only_position_list_query)
position_list = public_position_list + friends_only_position_list
messages.add_message(
request, messages.INFO,
str(public_position_list_count) + ' public positions found ' +
'(' + str(public_position_list_comments_count) + ' with commentary). ' +
str(friends_only_position_list_count) + ' friends-only positions found ' +
'(' + str(friends_only_position_list_comments_count) + ' with commentary). '
)
if public_position_list_clean_count or friend_position_list_clean_count:
messages.add_message(
request, messages.INFO,
str(public_position_list_clean_count) + ' public positions updated with speaker_type. ' +
str(friend_position_list_clean_count) + ' friends-only positions updated with speaker_type. '
)
if public_position_list_candidate_clean_count or friend_position_list_candidate_clean_count:
messages.add_message(
request, messages.INFO,
str(public_position_list_candidate_clean_count) + ' public positions updated with office info. ' +
str(friend_position_list_candidate_clean_count) + ' friends-only positions updated with office info. '
)
template_values = {
'messages_on_stage': messages_on_stage,
'position_list': position_list,
'position_search': position_search,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'show_all_elections': show_all_elections,
'state_code': state_code,
}
return render(request, 'position/position_list.html', template_values)
@login_required
def position_new_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'position/position_edit.html', template_values)
@login_required
def position_edit_view(request, position_we_vote_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
position_on_stage_found = False
try:
position_on_stage = PositionEntered.objects.get(we_vote_id=position_we_vote_id)
position_on_stage_found = True
except PositionEntered.MultipleObjectsReturned as e:
pass
except PositionEntered.DoesNotExist:
# This is fine, create new
pass
if position_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'position': position_on_stage,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'position/position_edit.html', template_values)
@login_required
def position_edit_process_view(request): # TODO DALE I don't think this is in use, but needs to be updated
"""
Process the new or edit position forms
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
position_we_vote_id = request.POST.get('position_we_vote_id')
position_name = request.POST['position_name']
twitter_handle = request.POST['twitter_handle']
position_website = request.POST['position_website']
# Check to see if this position is already being used anywhere
position_on_stage_found = False
try:
position_query = PositionEntered.objects.filter(we_vote_id=position_we_vote_id)
if len(position_query):
position_on_stage = position_query[0]
position_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
try:
if position_on_stage_found:
# Update
position_on_stage.position_name = position_name
position_on_stage.twitter_handle = twitter_handle
position_on_stage.position_website = position_website
position_on_stage.save()
messages.add_message(request, messages.INFO, 'PositionEntered updated.')
else:
# Create new
position_on_stage = CandidateCampaign(
position_name=position_name,
twitter_handle=twitter_handle,
position_website=position_website,
)
position_on_stage.save()
messages.add_message(request, messages.INFO, 'New position saved.')
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not save position.')
return HttpResponseRedirect(reverse('position:position_list', args=()))
@login_required
def position_summary_view(request, position_we_vote_id):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'partner_organization', 'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
position_on_stage_found = False
position_on_stage = PositionEntered()
try:
position_on_stage = PositionEntered.objects.get(we_vote_id=position_we_vote_id)
position_on_stage_found = True
except PositionEntered.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except PositionEntered.DoesNotExist:
# This is fine, create new
pass
if position_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'position': position_on_stage,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'position/position_summary.html', template_values)
@login_required
def refresh_cached_position_info_for_election_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = refresh_cached_position_info_for_election(google_civic_election_id=google_civic_election_id,
state_code=state_code)
public_positions_updated = results['public_positions_updated']
friends_only_positions_updated = results['friends_only_positions_updated']
messages.add_message(request, messages.INFO,
'public_positions_updated: {public_positions_updated}, '
'friends_only_positions_updated: {friends_only_positions_updated}'
''.format(public_positions_updated=public_positions_updated,
friends_only_positions_updated=friends_only_positions_updated))
return HttpResponseRedirect(reverse('position:position_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code))
@login_required
def refresh_positions_with_candidate_details_for_election_view(request):
"""
Refresh Positions with candidate details
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = refresh_positions_with_candidate_details_for_election(google_civic_election_id=google_civic_election_id,
state_code=state_code)
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
positions_updated_count = results['positions_updated_count']
messages.add_message(request, messages.INFO,
"Social media retrieved. Positions refreshed: {update_all_positions_results_count},"
.format(update_all_positions_results_count=positions_updated_count))
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code))
@login_required
def refresh_positions_with_contest_office_details_for_election_view(request):
"""
Refresh positions with contest office details
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
contest_office_id = request.GET.get('office_id', 0)
contest_office_we_vote_id = request.GET.get('office_we_vote_id', '')
if positive_value_exists(contest_office_id):
results = push_contest_office_data_to_other_table_caches(contest_office_id)
elif positive_value_exists(contest_office_we_vote_id):
results = push_contest_office_data_to_other_table_caches(contest_office_we_vote_id)
elif positive_value_exists(google_civic_election_id):
results = refresh_positions_with_contest_office_details_for_election(
google_civic_election_id=google_civic_election_id, state_code=state_code)
else:
results = refresh_positions_with_contest_office_details_for_election(
google_civic_election_id=google_civic_election_id, state_code=state_code)
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
positions_updated_count = results['positions_updated_count']
messages.add_message(request, messages.INFO,
"Social media retrieved. Positions refreshed: {update_all_positions_results_count},"
.format(update_all_positions_results_count=positions_updated_count))
if positive_value_exists(google_civic_election_id):
return HttpResponseRedirect(reverse('office:office_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code))
elif positive_value_exists(contest_office_id):
return HttpResponseRedirect(reverse('office:office_summary', args=(contest_office_id,)))
else:
return HttpResponseRedirect (reverse ('office:office_list', args=()) +
'?google_civic_election_id=' + str (google_civic_election_id) +
'&state_code=' + str (state_code))
@login_required
def refresh_positions_with_contest_measure_details_for_election_view(request):
"""
Refresh positions with contest measure details
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
contest_measure_id = request.GET.get('measure_id', 0)
contest_measure_we_vote_id = request.GET.get('measure_we_vote_id', '')
if positive_value_exists(contest_measure_id):
results = push_contest_measure_data_to_other_table_caches(contest_measure_id)
elif positive_value_exists(contest_measure_we_vote_id):
results = push_contest_measure_data_to_other_table_caches(contest_measure_we_vote_id)
elif positive_value_exists(google_civic_election_id):
results = refresh_positions_with_contest_measure_details_for_election(
google_civic_election_id=google_civic_election_id, state_code=state_code)
else:
results = refresh_positions_with_contest_measure_details_for_election(
google_civic_election_id=google_civic_election_id, state_code=state_code)
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
positions_updated_count = results['positions_updated_count']
messages.add_message(request, messages.INFO,
"Social media retrieved. Positions refreshed: {update_all_positions_results_count},"
.format(update_all_positions_results_count=positions_updated_count))
if positive_value_exists(google_civic_election_id):
return HttpResponseRedirect(reverse('measure:measure_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code))
elif positive_value_exists(contest_measure_id):
return HttpResponseRedirect(reverse('measure:measure_summary', args=(contest_measure_id,)))
else:
return HttpResponseRedirect (reverse ('measure:measure_list', args=()) +
'?google_civic_election_id=' + str (google_civic_election_id) +
'&state_code=' + str (state_code))
@login_required
def relink_candidates_measures_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages.add_message(request, messages.INFO, 'TO BE BUILT: relink_candidates_measures_view')
return HttpResponseRedirect(reverse('position:position_list', args=()))
@login_required
def position_delete_process_view(request):
"""
Delete a position
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
position_we_vote_id = request.GET.get('position_we_vote_id', '')
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
# Retrieve this position
position_on_stage_found = False
position_on_stage = PositionEntered()
organization_id = 0
try:
position_query = PositionEntered.objects.filter(we_vote_id=position_we_vote_id)
if len(position_query):
position_on_stage = position_query[0]
organization_id = position_on_stage.organization_id
position_on_stage_found = True
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not find position -- exception.')
if not position_on_stage_found:
messages.add_message(request, messages.ERROR, 'Could not find position.')
return HttpResponseRedirect(reverse('position:position_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
try:
if position_on_stage_found:
# Delete
position_on_stage.delete()
messages.add_message(request, messages.INFO, 'Position deleted.')
if positive_value_exists(organization_id):
return HttpResponseRedirect(reverse('organization:organization_position_list',
args=([organization_id])) +
"?google_civic_election_id=" + str(google_civic_election_id))
else:
messages.add_message(request, messages.ERROR, 'Could not find position.')
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not save position.')
return HttpResponseRedirect(reverse('position:position_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
| 49.555817
| 118
| 0.718415
|
acfc62cad2951fc8a86195b7c88e4e9f0592960b
| 434
|
py
|
Python
|
introcs/ch1/exp_q/2/1_2_30.py
|
ujuc/introcs-py
|
81878e70978ff04bdff839de74b05c62d9fe026d
|
[
"MIT"
] | 1
|
2020-07-01T02:11:00.000Z
|
2020-07-01T02:11:00.000Z
|
introcs/ch1/exp_q/2/1_2_30.py
|
ujuc/introcs-py
|
81878e70978ff04bdff839de74b05c62d9fe026d
|
[
"MIT"
] | null | null | null |
introcs/ch1/exp_q/2/1_2_30.py
|
ujuc/introcs-py
|
81878e70978ff04bdff839de74b05c62d9fe026d
|
[
"MIT"
] | 1
|
2020-07-01T02:11:01.000Z
|
2020-07-01T02:11:01.000Z
|
import math
import sys
from introcs.stdlib import stdio
x1 = float(sys.argv[1])
y1 = float(sys.argv[2])
x2 = float(sys.argv[3])
y2 = float(sys.argv[4])
x1 = math.radians(x1)
y1 = math.radians(y1)
x2 = math.radians(x2)
y2 = math.radians(y2)
angle1 = math.acos(
math.sin(x1) * math.sin(x2) + math.cos(x1) * math.cos(x2) * math.cos(y1 - y2)
)
angle1 = math.degrees(angle1)
delta = 60 * angle1
stdio.writeln(f"delta: {delta}")
| 17.36
| 81
| 0.66129
|
acfc6304c0c054726fff6de484a180b631570f02
| 1,361
|
py
|
Python
|
commentparser.py
|
Almenon/reddit_episode_bot
|
6dfcde9f0ffd64d3c3174b50feca0d4007a14295
|
[
"MIT"
] | 10
|
2015-07-17T05:37:56.000Z
|
2021-12-13T06:57:06.000Z
|
commentparser.py
|
Almenon/reddit_episode_bot
|
6dfcde9f0ffd64d3c3174b50feca0d4007a14295
|
[
"MIT"
] | 16
|
2015-09-28T03:51:48.000Z
|
2020-07-15T05:18:44.000Z
|
commentparser.py
|
Almenon/reddit_episode_bot
|
6dfcde9f0ffd64d3c3174b50feca0d4007a14295
|
[
"MIT"
] | 1
|
2018-02-23T16:08:00.000Z
|
2018-02-23T16:08:00.000Z
|
__author__ = 'Almenon'
from re import compile, search
import logging
logging.basicConfig(level="DEBUG") # set level to INFO to get rid of debug messages
regex = compile(r"(?:\/u\/the_episode_bot[ ,:]*(\S+(?:\ \S+\b)*)[ ,]+)?" # bot's username followed by title (optional)
r"(s|e)[a-s]*\ *?(\d+)[\: ,_\[\]\-x]*" # season or episode followed by optional seperator
r"(?:s|e)[a-s]*\ *?(\d+)") # season/episode
# example match: /u/the_episode_bot pokemon S01E06
class ParseError(Exception):
pass
def parse(request):
logging.info("request: " + request)
# PARSE REQUEST
request = request.lower()
show_season_episode = search(regex,request)
if show_season_episode is None:
raise ParseError("request does not contain correct format")
if show_season_episode.group(1) is not None: show = show_season_episode.group(1)
else: show = None
if show_season_episode.group(2) is 'e': # format is episode season
episode = show_season_episode.group(3)
season = show_season_episode.group(4)
else: # format is season episode
season = show_season_episode.group(3)
episode = show_season_episode.group(4)
return show, season, episode
# testing
# try:
# answer = parse("1 :: Beers in S01E05")
# print(answer)
# except ParseError as e:
# print(e)
| 32.404762
| 119
| 0.648053
|
acfc63a4136909f60d39b51213b2f91f1bb09024
| 9,433
|
py
|
Python
|
tester_script.py
|
dorazhao99/revise-tool
|
7912ce314b98e5bd272746222db2385ec2930789
|
[
"MIT"
] | 76
|
2020-08-17T12:52:25.000Z
|
2022-03-24T17:07:07.000Z
|
tester_script.py
|
dorazhao99/revise-tool
|
7912ce314b98e5bd272746222db2385ec2930789
|
[
"MIT"
] | 20
|
2020-09-10T18:30:03.000Z
|
2022-01-24T22:46:45.000Z
|
tester_script.py
|
dorazhao99/revise-tool
|
7912ce314b98e5bd272746222db2385ec2930789
|
[
"MIT"
] | 20
|
2020-07-28T18:00:39.000Z
|
2022-01-22T11:00:16.000Z
|
# #!/usr/bin/env python3
import sys
import importlib
import torchvision.transforms as transforms
import torch
import os
import random
import numpy as np
import cv2
from operator import itemgetter
import pickle
NUM_EXS=5
def validate_dataset(dataset):
# set up dataset
try:
print('starting setup')
t = transforms.Compose([
transforms.CenterCrop(10),
transforms.ToTensor(),
])
ds = dataset(t)
print('end setup')
except Exception as e:
print('ERROR: Initialization failed before testing:', e)
sys.exit()
print('testing imageids')
# testing image_ids
try:
ds.image_ids
except AttributeError:
print('ERROR: self.image_ids is a required field.')
if not isinstance(ds.image_ids, list):
print('---- Image_ids must be of type: list ----')
print('ERROR: Currently of type:', type(ds.image_ids))
else:
print('\n--- Number of images ---')
print(str(len(ds.image_ids)), '\n')
rand_inds = random.sample( range(len(ds.image_ids) - 1), NUM_EXS )
# testing categories
try:
ds.categories
except AttributeError:
print('ERROR: self.categories is a required field.')
if not isinstance(ds.categories, list):
print('--- Categories must be type: list ---')
print('ERROR: Currently of type:', type(ds.categories), '\n')
else:
print('---- Total number of labels in the dataset ---')
print( str(len(ds.categories)), '\n')
# testing scene mappings
try:
if not isinstance(ds.scene_mapping, dict):
print('--- Scene_mapping must be type: dict ---')
print('ERROR: Currently of type:', type(ds.scene_mapping), '\n')
except AttributeError:
pass
# testing supercategories_to_names
try:
if not isinstance(ds.supercategories_to_names, dict):
print('ERROR: self.supercategories_to_names must be type: dict \n')
except AttributeError:
print('ERROR: self.supercategories_to_names is a required field.')
print('Please set self.categories_to_names = DEFAULT_GROUPINGS_TO_NAMES \n')
# testing labels_to_names
try:
ds.labels_to_names
except AttributeError:
print('ERROR: self.labels_to_names is a required field.')
if not isinstance(ds.labels_to_names, dict):
print('---- Labels_to_names must be type: dict ---')
print('ERROR: Currently of type:', type(ds.labels_to_names), '\n')
else:
print('---', str(NUM_EXS), 'random examples of [label] -> [supercategory] ---' )
len_labels = len(ds.labels_to_names) if len(ds.labels_to_names)!=0 else len(ds.categories)
rand_inds = random.sample( range(len_labels - 1), NUM_EXS )
for rand_ind in rand_inds:
try:
supercat = ds.group_mapping( list(ds.labels_to_names.items())[rand_ind][0] )
supercat_name = ds.supercategories_to_names[supercat]
print( ds.labels_to_names[ds.categories[rand_ind]], '->', supercat_name)
scflag=True
except Exception as e:
print( ds.labels_to_names[ds.categories[rand_ind]] )
scflag=False
if not scflag: print('ERROR: self.supercategories not set up correctly so supercategories not displayed.')
print('\n')
# testing __len__:
try:
ds.__len__()
if ds.__len__() != len(ds.image_ids):
print('ERROR: self.__len__() must be equal to length of self.image_ids')
print('self.__len__() returns', ds.__len__(), '/ length of self.image_ids =', len(ds.image_ids), '\n')
except AttributeError:
print('ERROR: self.__len__() is a required method.\n')
# testing __getitem__ and from_path
rand_ind = random.randint(0, ds.__len__()-1)
try:
x = ds.__getitem__(rand_ind)
except AttributeError:
print('ERROR: self.__getitem___() is a required method.\n')
x = ds.__getitem__(rand_ind)
if len(x) != 2:
print('ERROR: self.__getitem__() must return a tuple of length: 2')
print('Return value should be in form (image, annotations)\n')
sys.exit()
img, anns = x
if not isinstance(anns, list):
print('ERROR: Annotations must be of type: list\n')
sys.exit()
if len(anns) != 5:
print('ERROR: self.__getitem__() should return annotations of length: 5')
print('Annotations must be a list containing [image_anns, gender_info, [country, lat_lng], file_path, scene_group]\n')
sys.exit()
labels, att, geo, fp, scn = anns
if len(labels) > 1:
if not isinstance(labels[0], dict) or labels[0].get('label', None)==None:
print('ERROR: image_anns must be a list of dicts. If there are >0 dicts, must contain keyword \'label\' \n')
for label in labels:
wrong_bbox=None
if label.get('bbox', None):
for coord in label['bbox']: wrong_bbox=label['bbox'] if (coord<0 or coord>1) else None
if wrong_bbox:
print('ERROR: All bounding box numbers must be scaled between 0 and 1. Got bounding box: ', end='')
for coord in wrong_bbox:
print('%.2f, ' % coord, end='')
print('\n')
if att and not att[0]:
print('ERROR: If no attribute annotations, must be an empty list i.e. [], got:', att, '\n')
if att and not isinstance(att[0], list):
print('ERROR: Attribute annotation must be in a list, got:', att[0], '\n')
elif att and isinstance(att[0], list):
if len(att)==2 and len(att[0])!=len(att[1]): print('ERROR: length of annotation list is not equal to length of bbox list.\n')
try:
for a in att[0]:
if a >= len(ds.attribute_names): print('ERROR: attribute annotation out of index for given self.attribute_names. Got value:', a, '\n')
except Exception as e:
print(e)
print('ERROR: self.attribute_names is a required for attribute annotations.')
if len(geo) !=1 and len(geo) != 2:
print('ERROR: geography info must be a list of length: 1 in the form [None] or [country] or length: 2 in the form [country, lat_lng] \n')
if len(geo) ==2 and (not isinstance(geo[1], dict) or len(geo[1])!= 2 or not geo[1].get('lat') or not geo[1].get('lng')):
print('ERROR: lat_lng in [country, lat_lng] must be of type:dict with 2 keys: \'lat\' and \'lng\' \n')
rand_inds = random.sample( range(ds.__len__() - 1), NUM_EXS )
print('--- View folder "tester_script_out" for images ----')
for i in range(NUM_EXS):
img, anns = ds.__getitem__(rand_inds[i])
img = img.permute(1, 2, 0).numpy()
cv2.imwrite('tester_script_out/example_' + str(i) + '.jpg', img) #need to generalize
print('--- Annotations for example_' + str(i) + '.jpg in tester_script_out ---')
labels, attribute, geography, filepath, scene = anns
if not labels or len(labels) == 0:
print('Label: No annotations for this image', end='')
for label in labels:
curr_label = ds.labels_to_names.get(label['label'], label['label']) if len(ds.labels_to_names)!=0 else label['label']
print('Label:', str(curr_label), end='')
if label.get('bbox'):
print(', bbox: ', end='')
for coord in label['bbox']:
print("%.2f, " % coord, end='')
print('')
if not attribute or not attribute[0]: print('Attribute: No annotation for this image', end='')
else:
print('Attribute: ', end='')
atts = attribute[0]
bboxs = attribute[1] if len(att)>1 else None
for i in range(len(atts)):
print(str(ds.attribute_names[atts[i]]), end='')
if bboxs:
print(', bbox: ', end='')
for coord in bboxs[i]:
print('%.2f, ' % coord, end='')
print('')
for geo in geography:
print('Geography:', geo) if geo else print('Geography: No annotations for this image')
print('Filepath:', filepath)
if not scene or not scene[0]:
print('Scene group: No annotations for this image')
else:
try:
info = pickle.load(open('util_files/places_scene_info.pkl', 'rb'))
idx_to_scene = info['idx_to_scene']
idx_to_scenegroup = info['idx_to_scenegroup']
for scn in scene:
print('Scene group: ', idx_to_scenegroup[scn])
except Exception as e:
print('ERROR: Must have file util_files/places_scene_info.pkl for scene mapping.')
print('Exception: ', e)
for scn in scene:
print('Scene group: ', scn)
print('\n')
if __name__ == "__main__":
if len(sys.argv) != 2:
print("usage: tester_script.py [DatasetName]")
sys.exit()
filename = 'datasets'
module = sys.argv[1]
try:
dataset = getattr(importlib.import_module(filename), module)
except AttributeError as e:
print('error: no class', module, 'in file datasets.py')
sys.exit()
validate_dataset(dataset)
| 38.03629
| 150
| 0.587512
|
acfc64d31c318a7da92d7c776160f900b4897930
| 1,583
|
py
|
Python
|
piston/web/__init__.py
|
ausbitbank/piston
|
b571a44c71aec637df75bb012fd8d203ffbef18a
|
[
"MIT"
] | null | null | null |
piston/web/__init__.py
|
ausbitbank/piston
|
b571a44c71aec637df75bb012fd8d203ffbef18a
|
[
"MIT"
] | null | null | null |
piston/web/__init__.py
|
ausbitbank/piston
|
b571a44c71aec637df75bb012fd8d203ffbef18a
|
[
"MIT"
] | null | null | null |
import re
from ..utils import strfdelta, strfage
from ..storage import configStorage as configStore
from .app import app, socketio
from ..steem import SteemConnector
from . import views, assets
import logging
log = logging.getLogger(__name__)
steem = SteemConnector().getSteem()
__ALL__ = [
"app",
"assets",
"forms",
"socketio",
"views",
]
@app.template_filter('age')
def _jinja2_filter_age(date, fmt=None):
""" Format a datatime as age
"""
return strfage(date, fmt)
@app.template_filter('excert')
def _jinja2_filter_datetime(data):
""" Extract an excert of a post
"""
words = data.split(" ")
return " ".join(words[:100])
@app.template_filter('parseBody')
def _jinja2_filter_parseBody(body):
""" Pre-process the body of a post before
showing in the UI
"""
body = re.sub(
r"^(https?:.*/(.*\.(jpg|png|gif))\??.*)",
r"\n\n",
body, flags=re.MULTILINE)
return body
@app.template_filter('currency')
def _jinja2_filter_currency(value):
""" Format the crypto tokens properly
:param float value: The amount to format as string
"""
return "{:,.3f}".format(value)
def run(port, host):
""" Run the Webserver/SocketIO and app
"""
socketio.run(app,
debug=configStore.get("web:debug"),
host=host,
port=port)
# FIXME: Don't use .run()
# from gevent.wsgi import WSGIServer
# from yourapplication import app
# http_server = WSGIServer(('', 5000), app)
# http_server.serve_forever()
| 22.942029
| 58
| 0.624131
|
acfc690ab3fdb6cb112e3dbc2f083a4cfd29cfc6
| 4,963
|
py
|
Python
|
pandas/core/groupby/numba_.py
|
luftwurzel/pandas
|
8980af7ce9d98713b0f8792e38f0fe43088e8780
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T01:38:03.000Z
|
2022-03-29T01:38:03.000Z
|
pandas/core/groupby/numba_.py
|
luftwurzel/pandas
|
8980af7ce9d98713b0f8792e38f0fe43088e8780
|
[
"BSD-3-Clause"
] | 1
|
2022-03-18T01:26:58.000Z
|
2022-03-18T01:26:58.000Z
|
pandas/core/groupby/numba_.py
|
luftwurzel/pandas
|
8980af7ce9d98713b0f8792e38f0fe43088e8780
|
[
"BSD-3-Clause"
] | 1
|
2022-03-22T11:50:25.000Z
|
2022-03-22T11:50:25.000Z
|
"""Common utilities for Numba operations with groupby ops"""
from __future__ import annotations
import functools
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
)
import numpy as np
from pandas._typing import Scalar
from pandas.compat._optional import import_optional_dependency
from pandas.core.util.numba_ import (
NumbaUtilError,
jit_user_function,
)
def validate_udf(func: Callable) -> None:
"""
Validate user defined function for ops when using Numba with groupby ops.
The first signature arguments should include:
def f(values, index, ...):
...
Parameters
----------
func : function, default False
user defined function
Returns
-------
None
Raises
------
NumbaUtilError
"""
if not callable(func):
raise NotImplementedError(
"Numba engine can only be used with a single function."
)
udf_signature = list(inspect.signature(func).parameters.keys())
expected_args = ["values", "index"]
min_number_args = len(expected_args)
if (
len(udf_signature) < min_number_args
or udf_signature[:min_number_args] != expected_args
):
raise NumbaUtilError(
f"The first {min_number_args} arguments to {func.__name__} must be "
f"{expected_args}"
)
@functools.lru_cache(maxsize=None)
def generate_numba_agg_func(
func: Callable[..., Scalar],
nopython: bool,
nogil: bool,
parallel: bool,
) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]:
"""
Generate a numba jitted agg function specified by values from engine_kwargs.
1. jit the user's function
2. Return a groupby agg function with the jitted function inline
Configurations specified in engine_kwargs apply to both the user's
function _AND_ the groupby evaluation loop.
Parameters
----------
func : function
function to be applied to each group and will be JITed
nopython : bool
nopython to be passed into numba.jit
nogil : bool
nogil to be passed into numba.jit
parallel : bool
parallel to be passed into numba.jit
Returns
-------
Numba function
"""
numba_func = jit_user_function(func, nopython, nogil, parallel)
if TYPE_CHECKING:
import numba
else:
numba = import_optional_dependency("numba")
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def group_agg(
values: np.ndarray,
index: np.ndarray,
begin: np.ndarray,
end: np.ndarray,
num_columns: int,
*args: Any,
) -> np.ndarray:
assert len(begin) == len(end)
num_groups = len(begin)
result = np.empty((num_groups, num_columns))
for i in numba.prange(num_groups):
group_index = index[begin[i] : end[i]]
for j in numba.prange(num_columns):
group = values[begin[i] : end[i], j]
result[i, j] = numba_func(group, group_index, *args)
return result
return group_agg
@functools.lru_cache(maxsize=None)
def generate_numba_transform_func(
func: Callable[..., np.ndarray],
nopython: bool,
nogil: bool,
parallel: bool,
) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]:
"""
Generate a numba jitted transform function specified by values from engine_kwargs.
1. jit the user's function
2. Return a groupby transform function with the jitted function inline
Configurations specified in engine_kwargs apply to both the user's
function _AND_ the groupby evaluation loop.
Parameters
----------
func : function
function to be applied to each window and will be JITed
nopython : bool
nopython to be passed into numba.jit
nogil : bool
nogil to be passed into numba.jit
parallel : bool
parallel to be passed into numba.jit
Returns
-------
Numba function
"""
numba_func = jit_user_function(func, nopython, nogil, parallel)
if TYPE_CHECKING:
import numba
else:
numba = import_optional_dependency("numba")
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def group_transform(
values: np.ndarray,
index: np.ndarray,
begin: np.ndarray,
end: np.ndarray,
num_columns: int,
*args: Any,
) -> np.ndarray:
assert len(begin) == len(end)
num_groups = len(begin)
result = np.empty((len(values), num_columns))
for i in numba.prange(num_groups):
group_index = index[begin[i] : end[i]]
for j in numba.prange(num_columns):
group = values[begin[i] : end[i], j]
result[begin[i] : end[i], j] = numba_func(group, group_index, *args)
return result
return group_transform
| 27.269231
| 86
| 0.634294
|
acfc69127052efb37e690117059352a9995845f1
| 2,111
|
py
|
Python
|
Python/gui-tk.py
|
festusdrakon/Minesweeper
|
ea128d132f66c89328777e54f973b141f9a77f75
|
[
"MIT"
] | null | null | null |
Python/gui-tk.py
|
festusdrakon/Minesweeper
|
ea128d132f66c89328777e54f973b141f9a77f75
|
[
"MIT"
] | 3
|
2020-10-31T17:29:13.000Z
|
2020-10-31T18:22:58.000Z
|
Python/gui-tk.py
|
festusdrakon/Minesweeper
|
ea128d132f66c89328777e54f973b141f9a77f75
|
[
"MIT"
] | 1
|
2020-10-31T17:33:52.000Z
|
2020-10-31T17:33:52.000Z
|
"""
Minesweeper written in python 2.7 with Tkinter for UI
"""
# The included modules and their functions
from Tkinter import *
from random import randint
from time import time, sleep
# Constants
flag = u"\u26F3"
mine = 'X'
shades = ['#322', '#422', '#522', '#622', '#722', '#822', '#922', '#a22', '#b22']
size = 10, 10
H, W = 500, 500
S = 40
root = Tk()
root.title(string = 'Minesweeper')
root.config(bg = '#122')
# Graph = Frame(root, height = 180, width = 500)
# Graph.grid(row = 4, column = 0, columnspan = 6)
#
# m,h,w = 10,180, 400
#
# alpha = [chr(b) for b in range(65, 91)]+[chr(b) for b in range(48,58)]
#
# canvas = Canvas(root, height = h, width = w, bg = "#000")
#
# for a,b in enumerate(alpha):
# canvas.create_text((a+2.25)*m, h-10, text = b, fill = "#fa5", font = "Helvetica 8")
#
# canvas.pack()
# freq = {'0':0.1, '1':0.2, '2':0.3, '3':0.4, '4':0.5, '5':0.6, '6':0.7, '7':0.8, '8':0.9}
# for (b,c) in freq.items():
# if ord(b)>64:
# a = ord(b)-65
# else:
# a = ord(b)-22
# # Replacing old bars with new data bars
# canvas.delete(b)
# canvas.create_rectangle((a+2)*m, h-20, (a+2.5)*m, h-20-(c*150), fill = shades[int(b)], tag = b)
# canvas.update()
# sleep(1)
class tile:
def __init__(self, coords):
self.xy = coords
self.widget = Button(root, text = ' ', command = self.clicked)
self.widget.config(font = 'Georgia 12', width = 2, height = 2)
def clicked(self):
pass
title = Label(root, text = "Minesweeper",
font = 'Arial 15', height = 3,
fg = 'Azure', bg = '#122')
title.grid(row = 0, column = 0)
div = Frame(root, height = H, width = W)
div.grid(row = 1, column = 0)
view = Canvas(div, height = H, width = W, bg = '#122')
view.pack()
grid = list()
for a in range(size[0]):
grid.append(list())
for b in range(size[1]):
grid[a].append(' ')
for a in range(size[0]):
for b in range(size[1]):
x1, y1 = 50 + (b*S), 50 + (a*S)
view.create_rectangle(x1, y1, x1+S, y1+S, fill = shades[8])
view.update()
sleep(0.2)
root.mainloop()
| 27.415584
| 101
| 0.555661
|
acfc6a8584c7f26cf0ed9e46b175d08ae123eaec
| 266
|
py
|
Python
|
attractions/api/serializers.py
|
isnardsilva/django-attractions-api
|
feade087d840b72b603d2a4bf538b8c362aa91bd
|
[
"MIT"
] | 1
|
2021-12-31T12:59:49.000Z
|
2021-12-31T12:59:49.000Z
|
attractions/api/serializers.py
|
isnardsilva/django-attractions-api
|
feade087d840b72b603d2a4bf538b8c362aa91bd
|
[
"MIT"
] | null | null | null |
attractions/api/serializers.py
|
isnardsilva/django-attractions-api
|
feade087d840b72b603d2a4bf538b8c362aa91bd
|
[
"MIT"
] | null | null | null |
from rest_framework.serializers import ModelSerializer
from core.models import Attraction
class AttractionSerializer(ModelSerializer):
class Meta:
model = Attraction
fields = ['id', 'name', 'description', 'opening_hours', 'minimum_age', 'photo']
| 38
| 87
| 0.736842
|
acfc6bfa59fb4881fcf1161b5b9364200fb83c3b
| 2,803
|
py
|
Python
|
vanstein/interpreter/vs_exceptions.py
|
ArsenArsen/Vanstein
|
a1ad5ff3e52a724e97f1d0e6a45a7c4c1e190a3d
|
[
"MIT"
] | null | null | null |
vanstein/interpreter/vs_exceptions.py
|
ArsenArsen/Vanstein
|
a1ad5ff3e52a724e97f1d0e6a45a7c4c1e190a3d
|
[
"MIT"
] | null | null | null |
vanstein/interpreter/vs_exceptions.py
|
ArsenArsen/Vanstein
|
a1ad5ff3e52a724e97f1d0e6a45a7c4c1e190a3d
|
[
"MIT"
] | null | null | null |
"""
Vanstein exception system.
This hijacks default exceptions to make them work with CPython code.
"""
from forbiddenfruit import curse
from vanstein.context import _VSContext
from vanstein.decorators import native_invoke
class _VSTraceback(object):
"""
Represents a mock traceback.
"""
def __init__(self, root: _VSContext):
self._root = root
@property
def tb_frame(self) -> '_VSContext':
# Context objects act as frame objects too.
return self._root
@property
def tb_lasti(self):
return self._root.f_lasti
@property
def tb_lineno(self):
return self._root.f_lineno
@property
def tb_next(self):
if self._root.next_ctx is None:
return None
return type(self)(self._root.next_ctx)
def get_traceback(self):
"""
Hijacked item for `__traceback__`.
This will overwrite `__traceback__` on the Exception class.
"""
try:
# Return our hijacked traceback.
return self._tb
except AttributeError:
# Explicit super() call otherwise CPython freaks out.
try:
return super(BaseException, self).__traceback__
except AttributeError:
return None
curse(BaseException, "__traceback__", property(get_traceback))
@native_invoke
def create_traceback(ctx: _VSContext) -> _VSTraceback:
"""
Creates a traceback object from a context.
This will iterate down the `prev_ctx` of each context to find the root context.
:param ctx: The context to use.
"""
curr_ctx = ctx
while True:
if curr_ctx.prev_ctx is None:
break
else:
curr_ctx = curr_ctx.prev_ctx
return _VSTraceback(curr_ctx)
# Now we've done that, define the `safe_raise` function.
@native_invoke
def safe_raise(ctx: _VSContext, exception: BaseException):
"""
Attempts to "safely" raise an exception into the context.
If the exception is being handled by a `try` block, it will automatically move the pointer to the Except block
that is consistent with it.
Otherwise, it will attempt to bubble it out of the stack.
:param ctx: The context to raise into.
:param exception: The exception to raise.
:return: The context.
"""
# Create a traceback for this exception.
exception._tb = create_traceback(ctx)
# Inject the exception.
ctx.inject_exception(exception)
return ctx
@native_invoke
def get_ordered_call_stack(start_ctx: _VSContext):
"""
Gets the ordered call stack from the start context.
"""
frames = []
while start_ctx.next_ctx is not None:
frames.append(start_ctx)
start_ctx = start_ctx.prev_ctx
# Reverse the frames, as they've been added in reverse order.
return frames
| 24.80531
| 114
| 0.672851
|
acfc6c644f8ada477c8eb5b53d055f41a47ee202
| 501
|
py
|
Python
|
sem 5 Programs/Skill Development Language/prac4.py
|
abdulmuiz313/Third-Year_Lab_Assignment
|
b9e1a026051d4988fca726530e71d9889b42a0d8
|
[
"MIT"
] | 3
|
2020-04-25T13:06:40.000Z
|
2021-12-16T10:37:16.000Z
|
sem 5 Programs/Skill Development Language/prac4.py
|
abdulmuiz313/Third-Year_Lab_Assignment
|
b9e1a026051d4988fca726530e71d9889b42a0d8
|
[
"MIT"
] | null | null | null |
sem 5 Programs/Skill Development Language/prac4.py
|
abdulmuiz313/Third-Year_Lab_Assignment
|
b9e1a026051d4988fca726530e71d9889b42a0d8
|
[
"MIT"
] | 4
|
2020-10-23T16:32:35.000Z
|
2021-09-21T03:41:30.000Z
|
word="GOOGLE"
guessword=[]
word_len=len(word)
a="_"
c=0
for i in range(word_len):
guessword.append("_")
print(guessword)
j=0
while (j<6):
x=input("Enter your guess")
x.upper()
print(x)
for i in range(word_len):
if word[i]==x:
guessword[i]=x
print(guessword)
j+=1
for i in range(word_len):
if(guessword[i]==a):
c=1
#if str(guessword) not in word:
#flag=1
if(c==1):
print("WELL TRIED BUT YOU GUESS WRONG WORD😒")
else:
print(" CONGRATULATIONS YOU WON THE GAME🙂")
| 12.525
| 46
| 0.636727
|
acfc6ddb4d25b941a12674eb234d76d10af03ac0
| 3,336
|
py
|
Python
|
EDI/Task2/script/main.py
|
KKowalewski24/Reports
|
4702f29a2626f19ffb11801acf2ccd5764793482
|
[
"MIT"
] | null | null | null |
EDI/Task2/script/main.py
|
KKowalewski24/Reports
|
4702f29a2626f19ffb11801acf2ccd5764793482
|
[
"MIT"
] | null | null | null |
EDI/Task2/script/main.py
|
KKowalewski24/Reports
|
4702f29a2626f19ffb11801acf2ccd5764793482
|
[
"MIT"
] | null | null | null |
import glob
import os
import pathlib
import shutil
import subprocess
import sys
import webbrowser
from typing import List
"""
* Building project `python main.py -r`
* Opening pdf file `python main.py -o`
* Cleaning project `python main.py -c`
* Building options
Check types - `python main.py -t`
"""
# VAR ------------------------------------------------------------------------ #
PDF_LATEX = "pdflatex"
TEX_FILENAME = "EDI_Task2_Karwowski_Kowalewski"
PDF = ".pdf"
TEX = ".tex"
SOURCE_DIRECTORY = "src"
DIRECTORIES = ["auxil", "out"]
FILE_EXTENSIONS = [
"*.aux", "*.bcf", "*.locode", "*.lof", "*.log",
"*.lot", "*.lot", "*.pdf", "*.out", "*.xml", "*.gz", "*.toc"
]
# MAIN ----------------------------------------------------------------------- #
def main() -> None:
if len(sys.argv) == 2:
if sys.argv[1] == "clean" or sys.argv[1] == "-c":
clean_project()
elif sys.argv[1] == "run" or sys.argv[1] == "-r":
build_project()
elif sys.argv[1] == "open" or sys.argv[1] == "-o":
open_generated_pdf()
display_finish()
# DEF ------------------------------------------------------------------------ #
def clean_project() -> None:
go_to_parent_directory(get_current_directory())
try:
remove_directories(DIRECTORIES)
except FileNotFoundError:
print("Some directories may not be found!")
go_to_child_directory(SOURCE_DIRECTORY)
try:
for it in FILE_EXTENSIONS:
remove_files(glob.glob(it))
except FileNotFoundError:
print("Some files may not be found!")
def open_generated_pdf() -> None:
go_to_parent_directory(get_current_directory())
go_to_child_directory(DIRECTORIES[1])
webbrowser.open(TEX_FILENAME + PDF)
def build_project() -> None:
go_to_parent_directory(get_current_directory())
go_to_child_directory(SOURCE_DIRECTORY)
generate_pdf()
generate_pdf()
def generate_pdf() -> None:
subprocess.call(
[PDF_LATEX,
"-file-line-error",
"-interaction=nonstopmode",
"-synctex=1",
"-output-format=pdf",
"-output-directory=../" + DIRECTORIES[1],
"-aux-directory=../" + DIRECTORIES[0],
TEX_FILENAME + TEX],
shell=True
)
# UTIL ----------------------------------------------------------------------- #
def remove_files(filenames: List[str]) -> None:
for it in filenames:
os.remove(it)
def remove_directories(directory_names: List[str]) -> None:
for it in directory_names:
shutil.rmtree(it)
def get_current_directory():
return pathlib.Path(os.getcwd())
def go_to_parent_directory(directory) -> None:
os.chdir(directory.parent)
def go_to_child_directory(directory: str) -> None:
os.chdir(directory)
def check_types() -> None:
subprocess.call(["mypy", "."])
def display_finish() -> None:
print("------------------------------------------------------------------------")
print("FINISHED")
print("------------------------------------------------------------------------")
# __MAIN__ ------------------------------------------------------------------- #
if __name__ == "__main__":
if len(sys.argv) == 2 and (sys.argv[1] == "typing" or sys.argv[1] == "-t"):
check_types()
else:
main()
| 25.082707
| 85
| 0.52488
|
acfc6f9ac8c83300abebca89f1259260d16ec22a
| 77,280
|
py
|
Python
|
sympy/geometry/line.py
|
gum3ng/sympy
|
e9414fafa976b26aa0b701a0217ab0f3b561989f
|
[
"BSD-3-Clause"
] | 1
|
2022-01-17T12:38:24.000Z
|
2022-01-17T12:38:24.000Z
|
sympy/geometry/line.py
|
gum3ng/sympy
|
e9414fafa976b26aa0b701a0217ab0f3b561989f
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/geometry/line.py
|
gum3ng/sympy
|
e9414fafa976b26aa0b701a0217ab0f3b561989f
|
[
"BSD-3-Clause"
] | null | null | null |
"""Line-like geometrical entities.
Contains
========
LinearEntity
Line
Ray
Segment
LinearEntity2D
Line2D
Ray2D
Segment2D
LinearEntity3D
Line3D
Ray3D
Segment3D
"""
from sympy.core.containers import Tuple
from sympy.core.evalf import N
from sympy.core.expr import Expr
from sympy.core.numbers import Rational, oo
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.sorting import ordered
from sympy.core.symbol import _symbol, Dummy, uniquely_named_symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import (_pi_coeff as pi_coeff, acos, tan, atan2)
from .entity import GeometryEntity, GeometrySet
from .exceptions import GeometryError
from .point import Point, Point3D
from .util import find, intersection
from sympy.logic.boolalg import And
from sympy.matrices import Matrix
from sympy.sets.sets import Intersection
from sympy.simplify.simplify import simplify
from sympy.solvers.solveset import linear_coeffs
from sympy.utilities.decorator import deprecated
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.misc import Undecidable, filldedent
import random
class LinearEntity(GeometrySet):
"""A base class for all linear entities (Line, Ray and Segment)
in n-dimensional Euclidean space.
Attributes
==========
ambient_dimension
direction
length
p1
p2
points
Notes
=====
This is an abstract class and is not meant to be instantiated.
See Also
========
sympy.geometry.entity.GeometryEntity
"""
def __new__(cls, p1, p2=None, **kwargs):
p1, p2 = Point._normalize_dimension(p1, p2)
if p1 == p2:
# sometimes we return a single point if we are not given two unique
# points. This is done in the specific subclass
raise ValueError(
"%s.__new__ requires two unique Points." % cls.__name__)
if len(p1) != len(p2):
raise ValueError(
"%s.__new__ requires two Points of equal dimension." % cls.__name__)
return GeometryEntity.__new__(cls, p1, p2, **kwargs)
def __contains__(self, other):
"""Return a definitive answer or else raise an error if it cannot
be determined that other is on the boundaries of self."""
result = self.contains(other)
if result is not None:
return result
else:
raise Undecidable(
"Cannot decide whether '%s' contains '%s'" % (self, other))
def _span_test(self, other):
"""Test whether the point `other` lies in the positive span of `self`.
A point x is 'in front' of a point y if x.dot(y) >= 0. Return
-1 if `other` is behind `self.p1`, 0 if `other` is `self.p1` and
and 1 if `other` is in front of `self.p1`."""
if self.p1 == other:
return 0
rel_pos = other - self.p1
d = self.direction
if d.dot(rel_pos) > 0:
return 1
return -1
@property
def ambient_dimension(self):
"""A property method that returns the dimension of LinearEntity
object.
Parameters
==========
p1 : LinearEntity
Returns
=======
dimension : integer
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> l1 = Line(p1, p2)
>>> l1.ambient_dimension
2
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0, 0), Point(1, 1, 1)
>>> l1 = Line(p1, p2)
>>> l1.ambient_dimension
3
"""
return len(self.p1)
def angle_between(l1, l2):
"""Return the non-reflex angle formed by rays emanating from
the origin with directions the same as the direction vectors
of the linear entities.
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
angle : angle in radians
Notes
=====
From the dot product of vectors v1 and v2 it is known that:
``dot(v1, v2) = |v1|*|v2|*cos(A)``
where A is the angle formed between the two vectors. We can
get the directional vectors of the two lines and readily
find the angle between the two using the above formula.
See Also
========
is_perpendicular, Ray2D.closing_angle
Examples
========
>>> from sympy import Line
>>> e = Line((0, 0), (1, 0))
>>> ne = Line((0, 0), (1, 1))
>>> sw = Line((1, 1), (0, 0))
>>> ne.angle_between(e)
pi/4
>>> sw.angle_between(e)
3*pi/4
To obtain the non-obtuse angle at the intersection of lines, use
the ``smallest_angle_between`` method:
>>> sw.smallest_angle_between(e)
pi/4
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(-1, 2, 0)
>>> l1, l2 = Line3D(p1, p2), Line3D(p2, p3)
>>> l1.angle_between(l2)
acos(-sqrt(2)/3)
>>> l1.smallest_angle_between(l2)
acos(sqrt(2)/3)
"""
if not isinstance(l1, LinearEntity) and not isinstance(l2, LinearEntity):
raise TypeError('Must pass only LinearEntity objects')
v1, v2 = l1.direction, l2.direction
return acos(v1.dot(v2)/(abs(v1)*abs(v2)))
def smallest_angle_between(l1, l2):
"""Return the smallest angle formed at the intersection of the
lines containing the linear entities.
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
angle : angle in radians
See Also
========
angle_between, is_perpendicular, Ray2D.closing_angle
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(0, 4), Point(2, -2)
>>> l1, l2 = Line(p1, p2), Line(p1, p3)
>>> l1.smallest_angle_between(l2)
pi/4
See Also
========
angle_between, Ray2D.closing_angle
"""
if not isinstance(l1, LinearEntity) and not isinstance(l2, LinearEntity):
raise TypeError('Must pass only LinearEntity objects')
v1, v2 = l1.direction, l2.direction
return acos(abs(v1.dot(v2))/(abs(v1)*abs(v2)))
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the Line.
Parameters
==========
parameter : str, optional
The name of the parameter which will be used for the parametric
point. The default value is 't'. When this parameter is 0, the
first point used to define the line will be returned, and when
it is 1 the second point will be returned.
Returns
=======
point : Point
Raises
======
ValueError
When ``parameter`` already appears in the Line's definition.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(1, 0), Point(5, 3)
>>> l1 = Line(p1, p2)
>>> l1.arbitrary_point()
Point2D(4*t + 1, 3*t)
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(1, 0, 0), Point3D(5, 3, 1)
>>> l1 = Line3D(p1, p2)
>>> l1.arbitrary_point()
Point3D(4*t + 1, 3*t, t)
"""
t = _symbol(parameter, real=True)
if t.name in (f.name for f in self.free_symbols):
raise ValueError(filldedent('''
Symbol %s already appears in object
and cannot be used as a parameter.
''' % t.name))
# multiply on the right so the variable gets
# combined with the coordinates of the point
return self.p1 + (self.p2 - self.p1)*t
@staticmethod
def are_concurrent(*lines):
"""Is a sequence of linear entities concurrent?
Two or more linear entities are concurrent if they all
intersect at a single point.
Parameters
==========
lines : a sequence of linear entities.
Returns
=======
True : if the set of linear entities intersect in one point
False : otherwise.
See Also
========
sympy.geometry.util.intersection
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(3, 5)
>>> p3, p4 = Point(-2, -2), Point(0, 2)
>>> l1, l2, l3 = Line(p1, p2), Line(p1, p3), Line(p1, p4)
>>> Line.are_concurrent(l1, l2, l3)
True
>>> l4 = Line(p2, p3)
>>> Line.are_concurrent(l2, l3, l4)
False
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(3, 5, 2)
>>> p3, p4 = Point3D(-2, -2, -2), Point3D(0, 2, 1)
>>> l1, l2, l3 = Line3D(p1, p2), Line3D(p1, p3), Line3D(p1, p4)
>>> Line3D.are_concurrent(l1, l2, l3)
True
>>> l4 = Line3D(p2, p3)
>>> Line3D.are_concurrent(l2, l3, l4)
False
"""
common_points = Intersection(*lines)
if common_points.is_FiniteSet and len(common_points) == 1:
return True
return False
def contains(self, other):
"""Subclasses should implement this method and should return
True if other is on the boundaries of self;
False if not on the boundaries of self;
None if a determination cannot be made."""
raise NotImplementedError()
@property
def direction(self):
"""The direction vector of the LinearEntity.
Returns
=======
p : a Point; the ray from the origin to this point is the
direction of `self`
Examples
========
>>> from sympy.geometry import Line
>>> a, b = (1, 1), (1, 3)
>>> Line(a, b).direction
Point2D(0, 2)
>>> Line(b, a).direction
Point2D(0, -2)
This can be reported so the distance from the origin is 1:
>>> Line(b, a).direction.unit
Point2D(0, -1)
See Also
========
sympy.geometry.point.Point.unit
"""
return self.p2 - self.p1
def intersection(self, other):
"""The intersection with another geometrical entity.
Parameters
==========
o : Point or LinearEntity
Returns
=======
intersection : list of geometrical entities
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line, Segment
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(7, 7)
>>> l1 = Line(p1, p2)
>>> l1.intersection(p3)
[Point2D(7, 7)]
>>> p4, p5 = Point(5, 0), Point(0, 3)
>>> l2 = Line(p4, p5)
>>> l1.intersection(l2)
[Point2D(15/8, 15/8)]
>>> p6, p7 = Point(0, 5), Point(2, 6)
>>> s1 = Segment(p6, p7)
>>> l1.intersection(s1)
[]
>>> from sympy import Point3D, Line3D, Segment3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(7, 7, 7)
>>> l1 = Line3D(p1, p2)
>>> l1.intersection(p3)
[Point3D(7, 7, 7)]
>>> l1 = Line3D(Point3D(4,19,12), Point3D(5,25,17))
>>> l2 = Line3D(Point3D(-3, -15, -19), direction_ratio=[2,8,8])
>>> l1.intersection(l2)
[Point3D(1, 1, -3)]
>>> p6, p7 = Point3D(0, 5, 2), Point3D(2, 6, 3)
>>> s1 = Segment3D(p6, p7)
>>> l1.intersection(s1)
[]
"""
def intersect_parallel_rays(ray1, ray2):
if ray1.direction.dot(ray2.direction) > 0:
# rays point in the same direction
# so return the one that is "in front"
return [ray2] if ray1._span_test(ray2.p1) >= 0 else [ray1]
else:
# rays point in opposite directions
st = ray1._span_test(ray2.p1)
if st < 0:
return []
elif st == 0:
return [ray2.p1]
return [Segment(ray1.p1, ray2.p1)]
def intersect_parallel_ray_and_segment(ray, seg):
st1, st2 = ray._span_test(seg.p1), ray._span_test(seg.p2)
if st1 < 0 and st2 < 0:
return []
elif st1 >= 0 and st2 >= 0:
return [seg]
elif st1 >= 0: # st2 < 0:
return [Segment(ray.p1, seg.p1)]
else: # st1 < 0 and st2 >= 0:
return [Segment(ray.p1, seg.p2)]
def intersect_parallel_segments(seg1, seg2):
if seg1.contains(seg2):
return [seg2]
if seg2.contains(seg1):
return [seg1]
# direct the segments so they're oriented the same way
if seg1.direction.dot(seg2.direction) < 0:
seg2 = Segment(seg2.p2, seg2.p1)
# order the segments so seg1 is "behind" seg2
if seg1._span_test(seg2.p1) < 0:
seg1, seg2 = seg2, seg1
if seg2._span_test(seg1.p2) < 0:
return []
return [Segment(seg2.p1, seg1.p2)]
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if other.is_Point:
if self.contains(other):
return [other]
else:
return []
elif isinstance(other, LinearEntity):
# break into cases based on whether
# the lines are parallel, non-parallel intersecting, or skew
pts = Point._normalize_dimension(self.p1, self.p2, other.p1, other.p2)
rank = Point.affine_rank(*pts)
if rank == 1:
# we're collinear
if isinstance(self, Line):
return [other]
if isinstance(other, Line):
return [self]
if isinstance(self, Ray) and isinstance(other, Ray):
return intersect_parallel_rays(self, other)
if isinstance(self, Ray) and isinstance(other, Segment):
return intersect_parallel_ray_and_segment(self, other)
if isinstance(self, Segment) and isinstance(other, Ray):
return intersect_parallel_ray_and_segment(other, self)
if isinstance(self, Segment) and isinstance(other, Segment):
return intersect_parallel_segments(self, other)
elif rank == 2:
# we're in the same plane
l1 = Line(*pts[:2])
l2 = Line(*pts[2:])
# check to see if we're parallel. If we are, we can't
# be intersecting, since the collinear case was already
# handled
if l1.direction.is_scalar_multiple(l2.direction):
return []
# find the intersection as if everything were lines
# by solving the equation t*d + p1 == s*d' + p1'
m = Matrix([l1.direction, -l2.direction]).transpose()
v = Matrix([l2.p1 - l1.p1]).transpose()
# we cannot use m.solve(v) because that only works for square matrices
m_rref, pivots = m.col_insert(2, v).rref(simplify=True)
# rank == 2 ensures we have 2 pivots, but let's check anyway
if len(pivots) != 2:
raise GeometryError("Failed when solving Mx=b when M={} and b={}".format(m, v))
coeff = m_rref[0, 2]
line_intersection = l1.direction*coeff + self.p1
# if we're both lines, we can skip a containment check
if isinstance(self, Line) and isinstance(other, Line):
return [line_intersection]
if ((isinstance(self, Line) or
self.contains(line_intersection)) and
other.contains(line_intersection)):
return [line_intersection]
return []
else:
# we're skew
return []
return other.intersection(self)
def is_parallel(l1, l2):
"""Are two linear entities parallel?
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
True : if l1 and l2 are parallel,
False : otherwise.
See Also
========
coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> p3, p4 = Point(3, 4), Point(6, 7)
>>> l1, l2 = Line(p1, p2), Line(p3, p4)
>>> Line.is_parallel(l1, l2)
True
>>> p5 = Point(6, 6)
>>> l3 = Line(p3, p5)
>>> Line.is_parallel(l1, l3)
False
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(3, 4, 5)
>>> p3, p4 = Point3D(2, 1, 1), Point3D(8, 9, 11)
>>> l1, l2 = Line3D(p1, p2), Line3D(p3, p4)
>>> Line3D.is_parallel(l1, l2)
True
>>> p5 = Point3D(6, 6, 6)
>>> l3 = Line3D(p3, p5)
>>> Line3D.is_parallel(l1, l3)
False
"""
if not isinstance(l1, LinearEntity) and not isinstance(l2, LinearEntity):
raise TypeError('Must pass only LinearEntity objects')
return l1.direction.is_scalar_multiple(l2.direction)
def is_perpendicular(l1, l2):
"""Are two linear entities perpendicular?
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
True : if l1 and l2 are perpendicular,
False : otherwise.
See Also
========
coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(-1, 1)
>>> l1, l2 = Line(p1, p2), Line(p1, p3)
>>> l1.is_perpendicular(l2)
True
>>> p4 = Point(5, 3)
>>> l3 = Line(p1, p4)
>>> l1.is_perpendicular(l3)
False
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(-1, 2, 0)
>>> l1, l2 = Line3D(p1, p2), Line3D(p2, p3)
>>> l1.is_perpendicular(l2)
False
>>> p4 = Point3D(5, 3, 7)
>>> l3 = Line3D(p1, p4)
>>> l1.is_perpendicular(l3)
False
"""
if not isinstance(l1, LinearEntity) and not isinstance(l2, LinearEntity):
raise TypeError('Must pass only LinearEntity objects')
return S.Zero.equals(l1.direction.dot(l2.direction))
def is_similar(self, other):
"""
Return True if self and other are contained in the same line.
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 1), Point(3, 4), Point(2, 3)
>>> l1 = Line(p1, p2)
>>> l2 = Line(p1, p3)
>>> l1.is_similar(l2)
True
"""
l = Line(self.p1, self.p2)
return l.contains(other)
@property
def length(self):
"""
The length of the line.
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(3, 5)
>>> l1 = Line(p1, p2)
>>> l1.length
oo
"""
return S.Infinity
@property
def p1(self):
"""The first defining point of a linear entity.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l = Line(p1, p2)
>>> l.p1
Point2D(0, 0)
"""
return self.args[0]
@property
def p2(self):
"""The second defining point of a linear entity.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l = Line(p1, p2)
>>> l.p2
Point2D(5, 3)
"""
return self.args[1]
def parallel_line(self, p):
"""Create a new Line parallel to this linear entity which passes
through the point `p`.
Parameters
==========
p : Point
Returns
=======
line : Line
See Also
========
is_parallel
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(2, 3), Point(-2, 2)
>>> l1 = Line(p1, p2)
>>> l2 = l1.parallel_line(p3)
>>> p3 in l2
True
>>> l1.is_parallel(l2)
True
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(2, 3, 4), Point3D(-2, 2, 0)
>>> l1 = Line3D(p1, p2)
>>> l2 = l1.parallel_line(p3)
>>> p3 in l2
True
>>> l1.is_parallel(l2)
True
"""
p = Point(p, dim=self.ambient_dimension)
return Line(p, p + self.direction)
def perpendicular_line(self, p):
"""Create a new Line perpendicular to this linear entity which passes
through the point `p`.
Parameters
==========
p : Point
Returns
=======
line : Line
See Also
========
sympy.geometry.line.LinearEntity.is_perpendicular, perpendicular_segment
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(2, 3), Point(-2, 2)
>>> l1 = Line(p1, p2)
>>> l2 = l1.perpendicular_line(p3)
>>> p3 in l2
True
>>> l1.is_perpendicular(l2)
True
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(2, 3, 4), Point3D(-2, 2, 0)
>>> l1 = Line3D(p1, p2)
>>> l2 = l1.perpendicular_line(p3)
>>> p3 in l2
True
>>> l1.is_perpendicular(l2)
True
"""
p = Point(p, dim=self.ambient_dimension)
if p in self:
p = p + self.direction.orthogonal_direction
return Line(p, self.projection(p))
def perpendicular_segment(self, p):
"""Create a perpendicular line segment from `p` to this line.
The enpoints of the segment are ``p`` and the closest point in
the line containing self. (If self is not a line, the point might
not be in self.)
Parameters
==========
p : Point
Returns
=======
segment : Segment
Notes
=====
Returns `p` itself if `p` is on this linear entity.
See Also
========
perpendicular_line
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(0, 2)
>>> l1 = Line(p1, p2)
>>> s1 = l1.perpendicular_segment(p3)
>>> l1.is_perpendicular(s1)
True
>>> p3 in s1
True
>>> l1.perpendicular_segment(Point(4, 0))
Segment2D(Point2D(4, 0), Point2D(2, 2))
>>> from sympy import Point3D, Line3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(0, 2, 0)
>>> l1 = Line3D(p1, p2)
>>> s1 = l1.perpendicular_segment(p3)
>>> l1.is_perpendicular(s1)
True
>>> p3 in s1
True
>>> l1.perpendicular_segment(Point3D(4, 0, 0))
Segment3D(Point3D(4, 0, 0), Point3D(4/3, 4/3, 4/3))
"""
p = Point(p, dim=self.ambient_dimension)
if p in self:
return p
l = self.perpendicular_line(p)
# The intersection should be unique, so unpack the singleton
p2, = Intersection(Line(self.p1, self.p2), l)
return Segment(p, p2)
@property
def points(self):
"""The two points used to define this linear entity.
Returns
=======
points : tuple of Points
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(5, 11)
>>> l1 = Line(p1, p2)
>>> l1.points
(Point2D(0, 0), Point2D(5, 11))
"""
return (self.p1, self.p2)
def projection(self, other):
"""Project a point, line, ray, or segment onto this linear entity.
Parameters
==========
other : Point or LinearEntity (Line, Ray, Segment)
Returns
=======
projection : Point or LinearEntity (Line, Ray, Segment)
The return type matches the type of the parameter ``other``.
Raises
======
GeometryError
When method is unable to perform projection.
Notes
=====
A projection involves taking the two points that define
the linear entity and projecting those points onto a
Line and then reforming the linear entity using these
projections.
A point P is projected onto a line L by finding the point
on L that is closest to P. This point is the intersection
of L and the line perpendicular to L that passes through P.
See Also
========
sympy.geometry.point.Point, perpendicular_line
Examples
========
>>> from sympy import Point, Line, Segment, Rational
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(Rational(1, 2), 0)
>>> l1 = Line(p1, p2)
>>> l1.projection(p3)
Point2D(1/4, 1/4)
>>> p4, p5 = Point(10, 0), Point(12, 1)
>>> s1 = Segment(p4, p5)
>>> l1.projection(s1)
Segment2D(Point2D(5, 5), Point2D(13/2, 13/2))
>>> p1, p2, p3 = Point(0, 0, 1), Point(1, 1, 2), Point(2, 0, 1)
>>> l1 = Line(p1, p2)
>>> l1.projection(p3)
Point3D(2/3, 2/3, 5/3)
>>> p4, p5 = Point(10, 0, 1), Point(12, 1, 3)
>>> s1 = Segment(p4, p5)
>>> l1.projection(s1)
Segment3D(Point3D(10/3, 10/3, 13/3), Point3D(5, 5, 6))
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
def proj_point(p):
return Point.project(p - self.p1, self.direction) + self.p1
if isinstance(other, Point):
return proj_point(other)
elif isinstance(other, LinearEntity):
p1, p2 = proj_point(other.p1), proj_point(other.p2)
# test to see if we're degenerate
if p1 == p2:
return p1
projected = other.__class__(p1, p2)
projected = Intersection(self, projected)
# if we happen to have intersected in only a point, return that
if projected.is_FiniteSet and len(projected) == 1:
# projected is a set of size 1, so unpack it in `a`
a, = projected
return a
# order args so projection is in the same direction as self
if self.direction.dot(projected.direction) < 0:
p1, p2 = projected.args
projected = projected.func(p2, p1)
return projected
raise GeometryError(
"Do not know how to project %s onto %s" % (other, self))
def random_point(self, seed=None):
"""A random point on a LinearEntity.
Returns
=======
point : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line, Ray, Segment
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> line = Line(p1, p2)
>>> r = line.random_point(seed=42) # seed value is optional
>>> r.n(3)
Point2D(-0.72, -0.432)
>>> r in line
True
>>> Ray(p1, p2).random_point(seed=42).n(3)
Point2D(0.72, 0.432)
>>> Segment(p1, p2).random_point(seed=42).n(3)
Point2D(3.2, 1.92)
"""
if seed is not None:
rng = random.Random(seed)
else:
rng = random
t = Dummy()
pt = self.arbitrary_point(t)
if isinstance(self, Ray):
v = abs(rng.gauss(0, 1))
elif isinstance(self, Segment):
v = rng.random()
elif isinstance(self, Line):
v = rng.gauss(0, 1)
else:
raise NotImplementedError('unhandled line type')
return pt.subs(t, Rational(v))
def bisectors(self, other):
"""Returns the perpendicular lines which pass through the intersections
of self and other that are in the same plane.
Parameters
==========
line : Line3D
Returns
=======
list: two Line instances
Examples
========
>>> from sympy.geometry import Point3D, Line3D
>>> r1 = Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0))
>>> r2 = Line3D(Point3D(0, 0, 0), Point3D(0, 1, 0))
>>> r1.bisectors(r2)
[Line3D(Point3D(0, 0, 0), Point3D(1, 1, 0)), Line3D(Point3D(0, 0, 0), Point3D(1, -1, 0))]
"""
if not isinstance(other, LinearEntity):
raise GeometryError("Expecting LinearEntity, not %s" % other)
l1, l2 = self, other
# make sure dimensions match or else a warning will rise from
# intersection calculation
if l1.p1.ambient_dimension != l2.p1.ambient_dimension:
if isinstance(l1, Line2D):
l1, l2 = l2, l1
_, p1 = Point._normalize_dimension(l1.p1, l2.p1, on_morph='ignore')
_, p2 = Point._normalize_dimension(l1.p2, l2.p2, on_morph='ignore')
l2 = Line(p1, p2)
point = intersection(l1, l2)
# Three cases: Lines may intersect in a point, may be equal or may not intersect.
if not point:
raise GeometryError("The lines do not intersect")
else:
pt = point[0]
if isinstance(pt, Line):
# Intersection is a line because both lines are coincident
return [self]
d1 = l1.direction.unit
d2 = l2.direction.unit
bis1 = Line(pt, pt + d1 + d2)
bis2 = Line(pt, pt + d1 - d2)
return [bis1, bis2]
class Line(LinearEntity):
"""An infinite line in space.
A 2D line is declared with two distinct points, point and slope, or
an equation. A 3D line may be defined with a point and a direction ratio.
Parameters
==========
p1 : Point
p2 : Point
slope : SymPy expression
direction_ratio : list
equation : equation of a line
Notes
=====
`Line` will automatically subclass to `Line2D` or `Line3D` based
on the dimension of `p1`. The `slope` argument is only relevant
for `Line2D` and the `direction_ratio` argument is only relevant
for `Line3D`.
See Also
========
sympy.geometry.point.Point
sympy.geometry.line.Line2D
sympy.geometry.line.Line3D
Examples
========
>>> from sympy import Point, Eq
>>> from sympy.geometry import Line, Segment
>>> from sympy.abc import x, y, a, b
>>> L = Line(Point(2,3), Point(3,5))
>>> L
Line2D(Point2D(2, 3), Point2D(3, 5))
>>> L.points
(Point2D(2, 3), Point2D(3, 5))
>>> L.equation()
-2*x + y + 1
>>> L.coefficients
(-2, 1, 1)
Instantiate with keyword ``slope``:
>>> Line(Point(0, 0), slope=0)
Line2D(Point2D(0, 0), Point2D(1, 0))
Instantiate with another linear object
>>> s = Segment((0, 0), (0, 1))
>>> Line(s).equation()
x
The line corresponding to an equation in the for `ax + by + c = 0`,
can be entered:
>>> Line(3*x + y + 18)
Line2D(Point2D(0, -18), Point2D(1, -21))
If `x` or `y` has a different name, then they can be specified, too,
as a string (to match the name) or symbol:
>>> Line(Eq(3*a + b, -18), x='a', y=b)
Line2D(Point2D(0, -18), Point2D(1, -21))
"""
def __new__(cls, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], (Expr, Eq)):
missing = uniquely_named_symbol('?', args).name
if not kwargs:
x = 'x'
y = 'y'
else:
x = kwargs.pop('x', missing)
y = kwargs.pop('y', missing)
if kwargs:
raise ValueError('expecting only x and y as keywords')
equation = args[0]
if isinstance(equation, Eq):
equation = equation.lhs - equation.rhs
def find_or_missing(x):
try:
return find(x, equation)
except ValueError:
return missing
x = find_or_missing(x)
y = find_or_missing(y)
a, b, c = linear_coeffs(equation, x, y)
if b:
return Line((0, -c/b), slope=-a/b)
if a:
return Line((-c/a, 0), slope=oo)
raise ValueError('not found in equation: %s' % (set('xy') - {x, y}))
else:
if len(args) > 0:
p1 = args[0]
if len(args) > 1:
p2 = args[1]
else:
p2 = None
if isinstance(p1, LinearEntity):
if p2:
raise ValueError('If p1 is a LinearEntity, p2 must be None.')
dim = len(p1.p1)
else:
p1 = Point(p1)
dim = len(p1)
if p2 is not None or isinstance(p2, Point) and p2.ambient_dimension != dim:
p2 = Point(p2)
if dim == 2:
return Line2D(p1, p2, **kwargs)
elif dim == 3:
return Line3D(p1, p2, **kwargs)
return LinearEntity.__new__(cls, p1, p2, **kwargs)
def contains(self, other):
"""
Return True if `other` is on this Line, or False otherwise.
Examples
========
>>> from sympy import Line,Point
>>> p1, p2 = Point(0, 1), Point(3, 4)
>>> l = Line(p1, p2)
>>> l.contains(p1)
True
>>> l.contains((0, 1))
True
>>> l.contains((0, 0))
False
>>> a = (0, 0, 0)
>>> b = (1, 1, 1)
>>> c = (2, 2, 2)
>>> l1 = Line(a, b)
>>> l2 = Line(b, a)
>>> l1 == l2
False
>>> l1 in l2
True
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if isinstance(other, Point):
return Point.is_collinear(other, self.p1, self.p2)
if isinstance(other, LinearEntity):
return Point.is_collinear(self.p1, self.p2, other.p1, other.p2)
return False
def distance(self, other):
"""
Finds the shortest distance between a line and a point.
Raises
======
NotImplementedError is raised if `other` is not a Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> s = Line(p1, p2)
>>> s.distance(Point(-1, 1))
sqrt(2)
>>> s.distance((-1, 2))
3*sqrt(2)/2
>>> p1, p2 = Point(0, 0, 0), Point(1, 1, 1)
>>> s = Line(p1, p2)
>>> s.distance(Point(-1, 1, 1))
2*sqrt(6)/3
>>> s.distance((-1, 1, 1))
2*sqrt(6)/3
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if self.contains(other):
return S.Zero
return self.perpendicular_segment(other).length
@deprecated(useinstead="equals", issue=12860, deprecated_since_version="1.0")
def equal(self, other):
return self.equals(other)
def equals(self, other):
"""Returns True if self and other are the same mathematical entities"""
if not isinstance(other, Line):
return False
return Point.is_collinear(self.p1, other.p1, self.p2, other.p2)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of line. Gives
values that will produce a line that is +/- 5 units long (where a
unit is the distance between the two points that define the line).
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list (plot interval)
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l1 = Line(p1, p2)
>>> l1.plot_interval()
[t, -5, 5]
"""
t = _symbol(parameter, real=True)
return [t, -5, 5]
class Ray(LinearEntity):
"""A Ray is a semi-line in the space with a source point and a direction.
Parameters
==========
p1 : Point
The source of the Ray
p2 : Point or radian value
This point determines the direction in which the Ray propagates.
If given as an angle it is interpreted in radians with the positive
direction being ccw.
Attributes
==========
source
See Also
========
sympy.geometry.line.Ray2D
sympy.geometry.line.Ray3D
sympy.geometry.point.Point
sympy.geometry.line.Line
Notes
=====
`Ray` will automatically subclass to `Ray2D` or `Ray3D` based on the
dimension of `p1`.
Examples
========
>>> from sympy import Point, pi
>>> from sympy.geometry import Ray
>>> r = Ray(Point(2, 3), Point(3, 5))
>>> r
Ray2D(Point2D(2, 3), Point2D(3, 5))
>>> r.points
(Point2D(2, 3), Point2D(3, 5))
>>> r.source
Point2D(2, 3)
>>> r.xdirection
oo
>>> r.ydirection
oo
>>> r.slope
2
>>> Ray(Point(0, 0), angle=pi/4).slope
1
"""
def __new__(cls, p1, p2=None, **kwargs):
p1 = Point(p1)
if p2 is not None:
p1, p2 = Point._normalize_dimension(p1, Point(p2))
dim = len(p1)
if dim == 2:
return Ray2D(p1, p2, **kwargs)
elif dim == 3:
return Ray3D(p1, p2, **kwargs)
return LinearEntity.__new__(cls, p1, p2, **kwargs)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the LinearEntity.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
verts = (N(self.p1), N(self.p2))
coords = ["{},{}".format(p.x, p.y) for p in verts]
path = "M {} L {}".format(coords[0], " L ".join(coords[1:]))
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" '
'marker-start="url(#markerCircle)" marker-end="url(#markerArrow)"/>'
).format(2.*scale_factor, path, fill_color)
def contains(self, other):
"""
Is other GeometryEntity contained in this Ray?
Examples
========
>>> from sympy import Ray,Point,Segment
>>> p1, p2 = Point(0, 0), Point(4, 4)
>>> r = Ray(p1, p2)
>>> r.contains(p1)
True
>>> r.contains((1, 1))
True
>>> r.contains((1, 3))
False
>>> s = Segment((1, 1), (2, 2))
>>> r.contains(s)
True
>>> s = Segment((1, 2), (2, 5))
>>> r.contains(s)
False
>>> r1 = Ray((2, 2), (3, 3))
>>> r.contains(r1)
True
>>> r1 = Ray((2, 2), (3, 5))
>>> r.contains(r1)
False
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if isinstance(other, Point):
if Point.is_collinear(self.p1, self.p2, other):
# if we're in the direction of the ray, our
# direction vector dot the ray's direction vector
# should be non-negative
return bool((self.p2 - self.p1).dot(other - self.p1) >= S.Zero)
return False
elif isinstance(other, Ray):
if Point.is_collinear(self.p1, self.p2, other.p1, other.p2):
return bool((self.p2 - self.p1).dot(other.p2 - other.p1) > S.Zero)
return False
elif isinstance(other, Segment):
return other.p1 in self and other.p2 in self
# No other known entity can be contained in a Ray
return False
def distance(self, other):
"""
Finds the shortest distance between the ray and a point.
Raises
======
NotImplementedError is raised if `other` is not a Point
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> s = Ray(p1, p2)
>>> s.distance(Point(-1, -1))
sqrt(2)
>>> s.distance((-1, 2))
3*sqrt(2)/2
>>> p1, p2 = Point(0, 0, 0), Point(1, 1, 2)
>>> s = Ray(p1, p2)
>>> s
Ray3D(Point3D(0, 0, 0), Point3D(1, 1, 2))
>>> s.distance(Point(-1, -1, 2))
4*sqrt(3)/3
>>> s.distance((-1, -1, 2))
4*sqrt(3)/3
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if self.contains(other):
return S.Zero
proj = Line(self.p1, self.p2).projection(other)
if self.contains(proj):
return abs(other - proj)
else:
return abs(other - self.source)
def equals(self, other):
"""Returns True if self and other are the same mathematical entities"""
if not isinstance(other, Ray):
return False
return self.source == other.source and other.p2 in self
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ray. Gives
values that will produce a ray that is 10 units long (where a unit is
the distance between the two points that define the ray).
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Ray, pi
>>> r = Ray((0, 0), angle=pi/4)
>>> r.plot_interval()
[t, 0, 10]
"""
t = _symbol(parameter, real=True)
return [t, 0, 10]
@property
def source(self):
"""The point from which the ray emanates.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2 = Point(0, 0), Point(4, 1)
>>> r1 = Ray(p1, p2)
>>> r1.source
Point2D(0, 0)
>>> p1, p2 = Point(0, 0, 0), Point(4, 1, 5)
>>> r1 = Ray(p2, p1)
>>> r1.source
Point3D(4, 1, 5)
"""
return self.p1
class Segment(LinearEntity):
"""A line segment in space.
Parameters
==========
p1 : Point
p2 : Point
Attributes
==========
length : number or SymPy expression
midpoint : Point
See Also
========
sympy.geometry.line.Segment2D
sympy.geometry.line.Segment3D
sympy.geometry.point.Point
sympy.geometry.line.Line
Notes
=====
If 2D or 3D points are used to define `Segment`, it will
be automatically subclassed to `Segment2D` or `Segment3D`.
Examples
========
>>> from sympy import Point
>>> from sympy.geometry import Segment
>>> Segment((1, 0), (1, 1)) # tuples are interpreted as pts
Segment2D(Point2D(1, 0), Point2D(1, 1))
>>> s = Segment(Point(4, 3), Point(1, 1))
>>> s.points
(Point2D(4, 3), Point2D(1, 1))
>>> s.slope
2/3
>>> s.length
sqrt(13)
>>> s.midpoint
Point2D(5/2, 2)
>>> Segment((1, 0, 0), (1, 1, 1)) # tuples are interpreted as pts
Segment3D(Point3D(1, 0, 0), Point3D(1, 1, 1))
>>> s = Segment(Point(4, 3, 9), Point(1, 1, 7)); s
Segment3D(Point3D(4, 3, 9), Point3D(1, 1, 7))
>>> s.points
(Point3D(4, 3, 9), Point3D(1, 1, 7))
>>> s.length
sqrt(17)
>>> s.midpoint
Point3D(5/2, 2, 8)
"""
def __new__(cls, p1, p2, **kwargs):
p1, p2 = Point._normalize_dimension(Point(p1), Point(p2))
dim = len(p1)
if dim == 2:
return Segment2D(p1, p2, **kwargs)
elif dim == 3:
return Segment3D(p1, p2, **kwargs)
return LinearEntity.__new__(cls, p1, p2, **kwargs)
def contains(self, other):
"""
Is the other GeometryEntity contained within this Segment?
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 1), Point(3, 4)
>>> s = Segment(p1, p2)
>>> s2 = Segment(p2, p1)
>>> s.contains(s2)
True
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 1, 1), Point3D(3, 4, 5)
>>> s = Segment3D(p1, p2)
>>> s2 = Segment3D(p2, p1)
>>> s.contains(s2)
True
>>> s.contains((p1 + p2)/2)
True
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if isinstance(other, Point):
if Point.is_collinear(other, self.p1, self.p2):
if isinstance(self, Segment2D):
# if it is collinear and is in the bounding box of the
# segment then it must be on the segment
vert = (1/self.slope).equals(0)
if vert is False:
isin = (self.p1.x - other.x)*(self.p2.x - other.x) <= 0
if isin in (True, False):
return isin
if vert is True:
isin = (self.p1.y - other.y)*(self.p2.y - other.y) <= 0
if isin in (True, False):
return isin
# use the triangle inequality
d1, d2 = other - self.p1, other - self.p2
d = self.p2 - self.p1
# without the call to simplify, SymPy cannot tell that an expression
# like (a+b)*(a/2+b/2) is always non-negative. If it cannot be
# determined, raise an Undecidable error
try:
# the triangle inequality says that |d1|+|d2| >= |d| and is strict
# only if other lies in the line segment
return bool(simplify(Eq(abs(d1) + abs(d2) - abs(d), 0)))
except TypeError:
raise Undecidable("Cannot determine if {} is in {}".format(other, self))
if isinstance(other, Segment):
return other.p1 in self and other.p2 in self
return False
def equals(self, other):
"""Returns True if self and other are the same mathematical entities"""
return isinstance(other, self.func) and list(
ordered(self.args)) == list(ordered(other.args))
def distance(self, other):
"""
Finds the shortest distance between a line segment and a point.
Raises
======
NotImplementedError is raised if `other` is not a Point
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 1), Point(3, 4)
>>> s = Segment(p1, p2)
>>> s.distance(Point(10, 15))
sqrt(170)
>>> s.distance((0, 12))
sqrt(73)
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 3), Point3D(1, 1, 4)
>>> s = Segment3D(p1, p2)
>>> s.distance(Point3D(10, 15, 12))
sqrt(341)
>>> s.distance((10, 15, 12))
sqrt(341)
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if isinstance(other, Point):
vp1 = other - self.p1
vp2 = other - self.p2
dot_prod_sign_1 = self.direction.dot(vp1) >= 0
dot_prod_sign_2 = self.direction.dot(vp2) <= 0
if dot_prod_sign_1 and dot_prod_sign_2:
return Line(self.p1, self.p2).distance(other)
if dot_prod_sign_1 and not dot_prod_sign_2:
return abs(vp2)
if not dot_prod_sign_1 and dot_prod_sign_2:
return abs(vp1)
raise NotImplementedError()
@property
def length(self):
"""The length of the line segment.
See Also
========
sympy.geometry.point.Point.distance
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 0), Point(4, 3)
>>> s1 = Segment(p1, p2)
>>> s1.length
5
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(4, 3, 3)
>>> s1 = Segment3D(p1, p2)
>>> s1.length
sqrt(34)
"""
return Point.distance(self.p1, self.p2)
@property
def midpoint(self):
"""The midpoint of the line segment.
See Also
========
sympy.geometry.point.Point.midpoint
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 0), Point(4, 3)
>>> s1 = Segment(p1, p2)
>>> s1.midpoint
Point2D(2, 3/2)
>>> from sympy import Point3D, Segment3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(4, 3, 3)
>>> s1 = Segment3D(p1, p2)
>>> s1.midpoint
Point3D(2, 3/2, 3/2)
"""
return Point.midpoint(self.p1, self.p2)
def perpendicular_bisector(self, p=None):
"""The perpendicular bisector of this segment.
If no point is specified or the point specified is not on the
bisector then the bisector is returned as a Line. Otherwise a
Segment is returned that joins the point specified and the
intersection of the bisector and the segment.
Parameters
==========
p : Point
Returns
=======
bisector : Line or Segment
See Also
========
LinearEntity.perpendicular_segment
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2, p3 = Point(0, 0), Point(6, 6), Point(5, 1)
>>> s1 = Segment(p1, p2)
>>> s1.perpendicular_bisector()
Line2D(Point2D(3, 3), Point2D(-3, 9))
>>> s1.perpendicular_bisector(p3)
Segment2D(Point2D(5, 1), Point2D(3, 3))
"""
l = self.perpendicular_line(self.midpoint)
if p is not None:
p2 = Point(p, dim=self.ambient_dimension)
if p2 in l:
return Segment(p2, self.midpoint)
return l
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Segment gives
values that will produce the full segment in a plot.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> s1 = Segment(p1, p2)
>>> s1.plot_interval()
[t, 0, 1]
"""
t = _symbol(parameter, real=True)
return [t, 0, 1]
class LinearEntity2D(LinearEntity):
"""A base class for all linear entities (line, ray and segment)
in a 2-dimensional Euclidean space.
Attributes
==========
p1
p2
coefficients
slope
points
Notes
=====
This is an abstract class and is not meant to be instantiated.
See Also
========
sympy.geometry.entity.GeometryEntity
"""
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
verts = self.points
xs = [p.x for p in verts]
ys = [p.y for p in verts]
return (min(xs), min(ys), max(xs), max(ys))
def perpendicular_line(self, p):
"""Create a new Line perpendicular to this linear entity which passes
through the point `p`.
Parameters
==========
p : Point
Returns
=======
line : Line
See Also
========
sympy.geometry.line.LinearEntity.is_perpendicular, perpendicular_segment
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(2, 3), Point(-2, 2)
>>> l1 = Line(p1, p2)
>>> l2 = l1.perpendicular_line(p3)
>>> p3 in l2
True
>>> l1.is_perpendicular(l2)
True
"""
p = Point(p, dim=self.ambient_dimension)
# any two lines in R^2 intersect, so blindly making
# a line through p in an orthogonal direction will work
return Line(p, p + self.direction.orthogonal_direction)
@property
def slope(self):
"""The slope of this linear entity, or infinity if vertical.
Returns
=======
slope : number or SymPy expression
See Also
========
coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(3, 5)
>>> l1 = Line(p1, p2)
>>> l1.slope
5/3
>>> p3 = Point(0, 4)
>>> l2 = Line(p1, p3)
>>> l2.slope
oo
"""
d1, d2 = (self.p1 - self.p2).args
if d1 == 0:
return S.Infinity
return simplify(d2/d1)
class Line2D(LinearEntity2D, Line):
"""An infinite line in space 2D.
A line is declared with two distinct points or a point and slope
as defined using keyword `slope`.
Parameters
==========
p1 : Point
pt : Point
slope : SymPy expression
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point
>>> from sympy.geometry import Line, Segment
>>> L = Line(Point(2,3), Point(3,5))
>>> L
Line2D(Point2D(2, 3), Point2D(3, 5))
>>> L.points
(Point2D(2, 3), Point2D(3, 5))
>>> L.equation()
-2*x + y + 1
>>> L.coefficients
(-2, 1, 1)
Instantiate with keyword ``slope``:
>>> Line(Point(0, 0), slope=0)
Line2D(Point2D(0, 0), Point2D(1, 0))
Instantiate with another linear object
>>> s = Segment((0, 0), (0, 1))
>>> Line(s).equation()
x
"""
def __new__(cls, p1, pt=None, slope=None, **kwargs):
if isinstance(p1, LinearEntity):
if pt is not None:
raise ValueError('When p1 is a LinearEntity, pt should be None')
p1, pt = Point._normalize_dimension(*p1.args, dim=2)
else:
p1 = Point(p1, dim=2)
if pt is not None and slope is None:
try:
p2 = Point(pt, dim=2)
except (NotImplementedError, TypeError, ValueError):
raise ValueError(filldedent('''
The 2nd argument was not a valid Point.
If it was a slope, enter it with keyword "slope".
'''))
elif slope is not None and pt is None:
slope = sympify(slope)
if slope.is_finite is False:
# when infinite slope, don't change x
dx = 0
dy = 1
else:
# go over 1 up slope
dx = 1
dy = slope
# XXX avoiding simplification by adding to coords directly
p2 = Point(p1.x + dx, p1.y + dy, evaluate=False)
else:
raise ValueError('A 2nd Point or keyword "slope" must be used.')
return LinearEntity2D.__new__(cls, p1, p2, **kwargs)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the LinearEntity.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
verts = (N(self.p1), N(self.p2))
coords = ["{},{}".format(p.x, p.y) for p in verts]
path = "M {} L {}".format(coords[0], " L ".join(coords[1:]))
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" '
'marker-start="url(#markerReverseArrow)" marker-end="url(#markerArrow)"/>'
).format(2.*scale_factor, path, fill_color)
@property
def coefficients(self):
"""The coefficients (`a`, `b`, `c`) for `ax + by + c = 0`.
See Also
========
sympy.geometry.line.Line2D.equation
Examples
========
>>> from sympy import Point, Line
>>> from sympy.abc import x, y
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l = Line(p1, p2)
>>> l.coefficients
(-3, 5, 0)
>>> p3 = Point(x, y)
>>> l2 = Line(p1, p3)
>>> l2.coefficients
(-y, x, 0)
"""
p1, p2 = self.points
if p1.x == p2.x:
return (S.One, S.Zero, -p1.x)
elif p1.y == p2.y:
return (S.Zero, S.One, -p1.y)
return tuple([simplify(i) for i in
(self.p1.y - self.p2.y,
self.p2.x - self.p1.x,
self.p1.x*self.p2.y - self.p1.y*self.p2.x)])
def equation(self, x='x', y='y'):
"""The equation of the line: ax + by + c.
Parameters
==========
x : str, optional
The name to use for the x-axis, default value is 'x'.
y : str, optional
The name to use for the y-axis, default value is 'y'.
Returns
=======
equation : SymPy expression
See Also
========
sympy.geometry.line.Line2D.coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(1, 0), Point(5, 3)
>>> l1 = Line(p1, p2)
>>> l1.equation()
-3*x + 4*y + 3
"""
x = _symbol(x, real=True)
y = _symbol(y, real=True)
p1, p2 = self.points
if p1.x == p2.x:
return x - p1.x
elif p1.y == p2.y:
return y - p1.y
a, b, c = self.coefficients
return a*x + b*y + c
class Ray2D(LinearEntity2D, Ray):
"""
A Ray is a semi-line in the space with a source point and a direction.
Parameters
==========
p1 : Point
The source of the Ray
p2 : Point or radian value
This point determines the direction in which the Ray propagates.
If given as an angle it is interpreted in radians with the positive
direction being ccw.
Attributes
==========
source
xdirection
ydirection
See Also
========
sympy.geometry.point.Point, Line
Examples
========
>>> from sympy import Point, pi
>>> from sympy.geometry import Ray
>>> r = Ray(Point(2, 3), Point(3, 5))
>>> r
Ray2D(Point2D(2, 3), Point2D(3, 5))
>>> r.points
(Point2D(2, 3), Point2D(3, 5))
>>> r.source
Point2D(2, 3)
>>> r.xdirection
oo
>>> r.ydirection
oo
>>> r.slope
2
>>> Ray(Point(0, 0), angle=pi/4).slope
1
"""
def __new__(cls, p1, pt=None, angle=None, **kwargs):
p1 = Point(p1, dim=2)
if pt is not None and angle is None:
try:
p2 = Point(pt, dim=2)
except (NotImplementedError, TypeError, ValueError):
raise ValueError(filldedent('''
The 2nd argument was not a valid Point; if
it was meant to be an angle it should be
given with keyword "angle".'''))
if p1 == p2:
raise ValueError('A Ray requires two distinct points.')
elif angle is not None and pt is None:
# we need to know if the angle is an odd multiple of pi/2
c = pi_coeff(sympify(angle))
p2 = None
if c is not None:
if c.is_Rational:
if c.q == 2:
if c.p == 1:
p2 = p1 + Point(0, 1)
elif c.p == 3:
p2 = p1 + Point(0, -1)
elif c.q == 1:
if c.p == 0:
p2 = p1 + Point(1, 0)
elif c.p == 1:
p2 = p1 + Point(-1, 0)
if p2 is None:
c *= S.Pi
else:
c = angle % (2*S.Pi)
if not p2:
m = 2*c/S.Pi
left = And(1 < m, m < 3) # is it in quadrant 2 or 3?
x = Piecewise((-1, left), (Piecewise((0, Eq(m % 1, 0)), (1, True)), True))
y = Piecewise((-tan(c), left), (Piecewise((1, Eq(m, 1)), (-1, Eq(m, 3)), (tan(c), True)), True))
p2 = p1 + Point(x, y)
else:
raise ValueError('A 2nd point or keyword "angle" must be used.')
return LinearEntity2D.__new__(cls, p1, p2, **kwargs)
@property
def xdirection(self):
"""The x direction of the ray.
Positive infinity if the ray points in the positive x direction,
negative infinity if the ray points in the negative x direction,
or 0 if the ray is vertical.
See Also
========
ydirection
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(0, -1)
>>> r1, r2 = Ray(p1, p2), Ray(p1, p3)
>>> r1.xdirection
oo
>>> r2.xdirection
0
"""
if self.p1.x < self.p2.x:
return S.Infinity
elif self.p1.x == self.p2.x:
return S.Zero
else:
return S.NegativeInfinity
@property
def ydirection(self):
"""The y direction of the ray.
Positive infinity if the ray points in the positive y direction,
negative infinity if the ray points in the negative y direction,
or 0 if the ray is horizontal.
See Also
========
xdirection
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2, p3 = Point(0, 0), Point(-1, -1), Point(-1, 0)
>>> r1, r2 = Ray(p1, p2), Ray(p1, p3)
>>> r1.ydirection
-oo
>>> r2.ydirection
0
"""
if self.p1.y < self.p2.y:
return S.Infinity
elif self.p1.y == self.p2.y:
return S.Zero
else:
return S.NegativeInfinity
def closing_angle(r1, r2):
"""Return the angle by which r2 must be rotated so it faces the same
direction as r1.
Parameters
==========
r1 : Ray2D
r2 : Ray2D
Returns
=======
angle : angle in radians (ccw angle is positive)
See Also
========
LinearEntity.angle_between
Examples
========
>>> from sympy import Ray, pi
>>> r1 = Ray((0, 0), (1, 0))
>>> r2 = r1.rotate(-pi/2)
>>> angle = r1.closing_angle(r2); angle
pi/2
>>> r2.rotate(angle).direction.unit == r1.direction.unit
True
>>> r2.closing_angle(r1)
-pi/2
"""
if not all(isinstance(r, Ray2D) for r in (r1, r2)):
# although the direction property is defined for
# all linear entities, only the Ray is truly a
# directed object
raise TypeError('Both arguments must be Ray2D objects.')
a1 = atan2(*list(reversed(r1.direction.args)))
a2 = atan2(*list(reversed(r2.direction.args)))
if a1*a2 < 0:
a1 = 2*S.Pi + a1 if a1 < 0 else a1
a2 = 2*S.Pi + a2 if a2 < 0 else a2
return a1 - a2
class Segment2D(LinearEntity2D, Segment):
"""A line segment in 2D space.
Parameters
==========
p1 : Point
p2 : Point
Attributes
==========
length : number or SymPy expression
midpoint : Point
See Also
========
sympy.geometry.point.Point, Line
Examples
========
>>> from sympy import Point
>>> from sympy.geometry import Segment
>>> Segment((1, 0), (1, 1)) # tuples are interpreted as pts
Segment2D(Point2D(1, 0), Point2D(1, 1))
>>> s = Segment(Point(4, 3), Point(1, 1)); s
Segment2D(Point2D(4, 3), Point2D(1, 1))
>>> s.points
(Point2D(4, 3), Point2D(1, 1))
>>> s.slope
2/3
>>> s.length
sqrt(13)
>>> s.midpoint
Point2D(5/2, 2)
"""
def __new__(cls, p1, p2, **kwargs):
p1 = Point(p1, dim=2)
p2 = Point(p2, dim=2)
if p1 == p2:
return p1
return LinearEntity2D.__new__(cls, p1, p2, **kwargs)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the LinearEntity.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
verts = (N(self.p1), N(self.p2))
coords = ["{},{}".format(p.x, p.y) for p in verts]
path = "M {} L {}".format(coords[0], " L ".join(coords[1:]))
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" />'
).format(2.*scale_factor, path, fill_color)
class LinearEntity3D(LinearEntity):
"""An base class for all linear entities (line, ray and segment)
in a 3-dimensional Euclidean space.
Attributes
==========
p1
p2
direction_ratio
direction_cosine
points
Notes
=====
This is a base class and is not meant to be instantiated.
"""
def __new__(cls, p1, p2, **kwargs):
p1 = Point3D(p1, dim=3)
p2 = Point3D(p2, dim=3)
if p1 == p2:
# if it makes sense to return a Point, handle in subclass
raise ValueError(
"%s.__new__ requires two unique Points." % cls.__name__)
return GeometryEntity.__new__(cls, p1, p2, **kwargs)
ambient_dimension = 3
@property
def direction_ratio(self):
"""The direction ratio of a given line in 3D.
See Also
========
sympy.geometry.line.Line3D.equation
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l = Line3D(p1, p2)
>>> l.direction_ratio
[5, 3, 1]
"""
p1, p2 = self.points
return p1.direction_ratio(p2)
@property
def direction_cosine(self):
"""The normalized direction ratio of a given line in 3D.
See Also
========
sympy.geometry.line.Line3D.equation
Examples
========
>>> from sympy import Point3D, Line3D
>>> p1, p2 = Point3D(0, 0, 0), Point3D(5, 3, 1)
>>> l = Line3D(p1, p2)
>>> l.direction_cosine
[sqrt(35)/7, 3*sqrt(35)/35, sqrt(35)/35]
>>> sum(i**2 for i in _)
1
"""
p1, p2 = self.points
return p1.direction_cosine(p2)
class Line3D(LinearEntity3D, Line):
"""An infinite 3D line in space.
A line is declared with two distinct points or a point and direction_ratio
as defined using keyword `direction_ratio`.
Parameters
==========
p1 : Point3D
pt : Point3D
direction_ratio : list
See Also
========
sympy.geometry.point.Point3D
sympy.geometry.line.Line
sympy.geometry.line.Line2D
Examples
========
>>> from sympy import Point3D
>>> from sympy.geometry import Line3D
>>> L = Line3D(Point3D(2, 3, 4), Point3D(3, 5, 1))
>>> L
Line3D(Point3D(2, 3, 4), Point3D(3, 5, 1))
>>> L.points
(Point3D(2, 3, 4), Point3D(3, 5, 1))
"""
def __new__(cls, p1, pt=None, direction_ratio=(), **kwargs):
if isinstance(p1, LinearEntity3D):
if pt is not None:
raise ValueError('if p1 is a LinearEntity, pt must be None.')
p1, pt = p1.args
else:
p1 = Point(p1, dim=3)
if pt is not None and len(direction_ratio) == 0:
pt = Point(pt, dim=3)
elif len(direction_ratio) == 3 and pt is None:
pt = Point3D(p1.x + direction_ratio[0], p1.y + direction_ratio[1],
p1.z + direction_ratio[2])
else:
raise ValueError('A 2nd Point or keyword "direction_ratio" must '
'be used.')
return LinearEntity3D.__new__(cls, p1, pt, **kwargs)
def equation(self, x='x', y='y', z='z', k=None):
"""Return the equations that define the line in 3D.
Parameters
==========
x : str, optional
The name to use for the x-axis, default value is 'x'.
y : str, optional
The name to use for the y-axis, default value is 'y'.
z : str, optional
The name to use for the z-axis, default value is 'z'.
Returns
=======
equation : Tuple of simultaneous equations
Examples
========
>>> from sympy import Point3D, Line3D, solve
>>> from sympy.abc import x, y, z
>>> p1, p2 = Point3D(1, 0, 0), Point3D(5, 3, 0)
>>> l1 = Line3D(p1, p2)
>>> eq = l1.equation(x, y, z); eq
(-3*x + 4*y + 3, z)
>>> solve(eq.subs(z, 0), (x, y, z))
{x: 4*y/3 + 1}
"""
if k is not None:
SymPyDeprecationWarning(
feature="equation() no longer needs 'k'",
issue=13742,
deprecated_since_version="1.2").warn()
from sympy.solvers.solvers import solve
x, y, z, k = [_symbol(i, real=True) for i in (x, y, z, 'k')]
p1, p2 = self.points
d1, d2, d3 = p1.direction_ratio(p2)
x1, y1, z1 = p1
eqs = [-d1*k + x - x1, -d2*k + y - y1, -d3*k + z - z1]
# eliminate k from equations by solving first eq with k for k
for i, e in enumerate(eqs):
if e.has(k):
kk = solve(eqs[i], k)[0]
eqs.pop(i)
break
return Tuple(*[i.subs(k, kk).as_numer_denom()[0] for i in eqs])
class Ray3D(LinearEntity3D, Ray):
"""
A Ray is a semi-line in the space with a source point and a direction.
Parameters
==========
p1 : Point3D
The source of the Ray
p2 : Point or a direction vector
direction_ratio: Determines the direction in which the Ray propagates.
Attributes
==========
source
xdirection
ydirection
zdirection
See Also
========
sympy.geometry.point.Point3D, Line3D
Examples
========
>>> from sympy import Point3D
>>> from sympy.geometry import Ray3D
>>> r = Ray3D(Point3D(2, 3, 4), Point3D(3, 5, 0))
>>> r
Ray3D(Point3D(2, 3, 4), Point3D(3, 5, 0))
>>> r.points
(Point3D(2, 3, 4), Point3D(3, 5, 0))
>>> r.source
Point3D(2, 3, 4)
>>> r.xdirection
oo
>>> r.ydirection
oo
>>> r.direction_ratio
[1, 2, -4]
"""
def __new__(cls, p1, pt=None, direction_ratio=(), **kwargs):
if isinstance(p1, LinearEntity3D):
if pt is not None:
raise ValueError('If p1 is a LinearEntity, pt must be None')
p1, pt = p1.args
else:
p1 = Point(p1, dim=3)
if pt is not None and len(direction_ratio) == 0:
pt = Point(pt, dim=3)
elif len(direction_ratio) == 3 and pt is None:
pt = Point3D(p1.x + direction_ratio[0], p1.y + direction_ratio[1],
p1.z + direction_ratio[2])
else:
raise ValueError(filldedent('''
A 2nd Point or keyword "direction_ratio" must be used.
'''))
return LinearEntity3D.__new__(cls, p1, pt, **kwargs)
@property
def xdirection(self):
"""The x direction of the ray.
Positive infinity if the ray points in the positive x direction,
negative infinity if the ray points in the negative x direction,
or 0 if the ray is vertical.
See Also
========
ydirection
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(0, -1, 0)
>>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3)
>>> r1.xdirection
oo
>>> r2.xdirection
0
"""
if self.p1.x < self.p2.x:
return S.Infinity
elif self.p1.x == self.p2.x:
return S.Zero
else:
return S.NegativeInfinity
@property
def ydirection(self):
"""The y direction of the ray.
Positive infinity if the ray points in the positive y direction,
negative infinity if the ray points in the negative y direction,
or 0 if the ray is horizontal.
See Also
========
xdirection
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(-1, -1, -1), Point3D(-1, 0, 0)
>>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3)
>>> r1.ydirection
-oo
>>> r2.ydirection
0
"""
if self.p1.y < self.p2.y:
return S.Infinity
elif self.p1.y == self.p2.y:
return S.Zero
else:
return S.NegativeInfinity
@property
def zdirection(self):
"""The z direction of the ray.
Positive infinity if the ray points in the positive z direction,
negative infinity if the ray points in the negative z direction,
or 0 if the ray is horizontal.
See Also
========
xdirection
Examples
========
>>> from sympy import Point3D, Ray3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(-1, -1, -1), Point3D(-1, 0, 0)
>>> r1, r2 = Ray3D(p1, p2), Ray3D(p1, p3)
>>> r1.ydirection
-oo
>>> r2.ydirection
0
>>> r2.zdirection
0
"""
if self.p1.z < self.p2.z:
return S.Infinity
elif self.p1.z == self.p2.z:
return S.Zero
else:
return S.NegativeInfinity
class Segment3D(LinearEntity3D, Segment):
"""A line segment in a 3D space.
Parameters
==========
p1 : Point3D
p2 : Point3D
Attributes
==========
length : number or SymPy expression
midpoint : Point3D
See Also
========
sympy.geometry.point.Point3D, Line3D
Examples
========
>>> from sympy import Point3D
>>> from sympy.geometry import Segment3D
>>> Segment3D((1, 0, 0), (1, 1, 1)) # tuples are interpreted as pts
Segment3D(Point3D(1, 0, 0), Point3D(1, 1, 1))
>>> s = Segment3D(Point3D(4, 3, 9), Point3D(1, 1, 7)); s
Segment3D(Point3D(4, 3, 9), Point3D(1, 1, 7))
>>> s.points
(Point3D(4, 3, 9), Point3D(1, 1, 7))
>>> s.length
sqrt(17)
>>> s.midpoint
Point3D(5/2, 2, 8)
"""
def __new__(cls, p1, p2, **kwargs):
p1 = Point(p1, dim=3)
p2 = Point(p2, dim=3)
if p1 == p2:
return p1
return LinearEntity3D.__new__(cls, p1, p2, **kwargs)
| 27.689
| 112
| 0.51131
|
acfc706c38f0803c0975c29507a35f5e88215bb1
| 13,638
|
py
|
Python
|
nova/virt/libvirt/firewall.py
|
bopopescu/nova_audit
|
1cd2901802f82d39411adfa04cf2f432ff3bf280
|
[
"Apache-2.0"
] | 1
|
2020-02-21T19:19:11.000Z
|
2020-02-21T19:19:11.000Z
|
nova/virt/libvirt/firewall.py
|
bopopescu/nova_audit
|
1cd2901802f82d39411adfa04cf2f432ff3bf280
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/libvirt/firewall.py
|
bopopescu/nova_audit
|
1cd2901802f82d39411adfa04cf2f432ff3bf280
|
[
"Apache-2.0"
] | 1
|
2020-07-24T09:44:17.000Z
|
2020-07-24T09:44:17.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import tpool
from oslo.config import cfg
from nova.cloudpipe import pipelib
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
import nova.virt.firewall as base_firewall
from nova.virt import netutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
libvirt = None
class NWFilterFirewall(base_firewall.FirewallDriver):
"""
This class implements a network filtering mechanism by using
libvirt's nwfilter.
all instances get a filter ("nova-base") applied. This filter
provides some basic security such as protection against MAC
spoofing, IP spoofing, and ARP spoofing.
"""
def __init__(self, virtapi, get_connection, **kwargs):
super(NWFilterFirewall, self).__init__(virtapi)
global libvirt
if libvirt is None:
try:
libvirt = __import__('libvirt')
except ImportError:
LOG.warn(_("Libvirt module could not be loaded. "
"NWFilterFirewall will not work correctly."))
self._libvirt_get_connection = get_connection
self.static_filters_configured = False
self.handle_security_groups = False
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
pass
def _get_connection(self):
return self._libvirt_get_connection()
_conn = property(_get_connection)
@staticmethod
def nova_no_nd_reflection_filter():
"""
This filter protects false positives on IPv6 Duplicate Address
Detection(DAD).
"""
return '''<filter name='nova-no-nd-reflection' chain='ipv6'>
<!-- no nd reflection -->
<!-- drop if destination mac is v6 mcast mac addr and
we sent it. -->
<rule action='drop' direction='in'>
<mac dstmacaddr='33:33:00:00:00:00'
dstmacmask='ff:ff:00:00:00:00' srcmacaddr='$MAC'/>
</rule>
</filter>'''
@staticmethod
def nova_dhcp_filter():
"""The standard allow-dhcp-server filter is an <ip> one, so it uses
ebtables to allow traffic through. Without a corresponding rule in
iptables, it'll get blocked anyway.
"""
return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
<uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
<rule action='accept' direction='out'
priority='100'>
<udp srcipaddr='0.0.0.0'
dstipaddr='255.255.255.255'
srcportstart='68'
dstportstart='67'/>
</rule>
<rule action='accept' direction='in'
priority='100'>
<udp srcipaddr='$DHCPSERVER'
srcportstart='67'
dstportstart='68'/>
</rule>
</filter>'''
def setup_basic_filtering(self, instance, network_info):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
LOG.info(_('Called setup_basic_filtering in nwfilter'),
instance=instance)
if self.handle_security_groups:
# No point in setting up a filter set that we'll be overriding
# anyway.
return
LOG.info(_('Ensuring static filters'), instance=instance)
self._ensure_static_filters()
allow_dhcp = False
for vif in network_info:
if not vif['network'] or not vif['network']['subnets']:
continue
for subnet in vif['network']['subnets']:
if subnet.get_meta('dhcp_server'):
allow_dhcp = True
break
base_filter = self.get_base_filter_list(instance, allow_dhcp)
for vif in network_info:
self._define_filter(self._get_instance_filter_xml(instance,
base_filter,
vif))
def _get_instance_filter_parameters(self, vif):
parameters = []
def format_parameter(parameter, value):
return ("<parameter name='%s' value='%s'/>" % (parameter, value))
network = vif['network']
if not vif['network'] or not vif['network']['subnets']:
return parameters
v4_subnets = [s for s in network['subnets'] if s['version'] == 4]
v6_subnets = [s for s in network['subnets'] if s['version'] == 6]
for subnet in v4_subnets:
for ip in subnet['ips']:
parameters.append(format_parameter('IP', ip['address']))
dhcp_server = subnet.get_meta('dhcp_server')
if dhcp_server:
parameters.append(format_parameter('DHCPSERVER', dhcp_server))
if CONF.use_ipv6:
for subnet in v6_subnets:
gateway = subnet.get('gateway')
if gateway:
ra_server = gateway['address'] + "/128"
parameters.append(format_parameter('RASERVER', ra_server))
if CONF.allow_same_net_traffic:
for subnet in v4_subnets:
ipv4_cidr = subnet['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
parameters.append(format_parameter('PROJNET', net))
parameters.append(format_parameter('PROJMASK', mask))
if CONF.use_ipv6:
for subnet in v6_subnets:
ipv6_cidr = subnet['cidr']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
parameters.append(format_parameter('PROJNET6', net))
parameters.append(format_parameter('PROJMASK6', prefix))
return parameters
def _get_instance_filter_xml(self, instance, filters, vif):
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
parameters = self._get_instance_filter_parameters(vif)
xml = '''<filter name='%s' chain='root'>''' % instance_filter_name
for f in filters:
xml += '''<filterref filter='%s'>''' % f
xml += ''.join(parameters)
xml += '</filterref>'
xml += '</filter>'
return xml
def get_base_filter_list(self, instance, allow_dhcp):
"""
Obtain a list of base filters to apply to an instance.
The return value should be a list of strings, each
specifying a filter name. Subclasses can override this
function to add additional filters as needed. Additional
filters added to the list must also be correctly defined
within the subclass.
"""
if pipelib.is_vpn_image(instance['image_ref']):
base_filter = 'nova-vpn'
elif allow_dhcp:
base_filter = 'nova-base'
else:
base_filter = 'nova-nodhcp'
return [base_filter]
def _ensure_static_filters(self):
"""Static filters are filters that have no need to be IP aware.
There is no configuration or tuneability of these filters, so they
can be set up once and forgotten about.
"""
if self.static_filters_configured:
return
filter_set = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing']
self._define_filter(self.nova_no_nd_reflection_filter)
filter_set.append('nova-no-nd-reflection')
self._define_filter(self._filter_container('nova-nodhcp', filter_set))
filter_set.append('allow-dhcp-server')
self._define_filter(self._filter_container('nova-base', filter_set))
self._define_filter(self._filter_container('nova-vpn',
['allow-dhcp-server']))
self._define_filter(self.nova_dhcp_filter)
self.static_filters_configured = True
def _filter_container(self, name, filters):
xml = '''<filter name='%s' chain='root'>%s</filter>''' % (
name,
''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
return xml
def _define_filter(self, xml):
if callable(xml):
xml = xml()
# execute in a native thread and block current greenthread until done
if not CONF.libvirt_nonblocking:
# NOTE(maoy): the original implementation is to have the API called
# in the thread pool no matter what.
tpool.execute(self._conn.nwfilterDefineXML, xml)
else:
# NOTE(maoy): self._conn is an eventlet.tpool.Proxy object
self._conn.nwfilterDefineXML(xml)
def unfilter_instance(self, instance, network_info):
"""Clear out the nwfilter rules."""
instance_name = instance['name']
for vif in network_info:
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
try:
_nw = self._conn.nwfilterLookupByName(instance_filter_name)
_nw.undefine()
except libvirt.libvirtError as e:
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# This happens when the instance filter is still in
# use (ie. when the instance has not terminated properly)
raise
LOG.debug(_('The nwfilter(%s) is not found.'),
instance_filter_name, instance=instance)
def _define_filters(self, filter_name, filter_children):
self._define_filter(self._filter_container(filter_name,
filter_children))
@staticmethod
def _instance_filter_name(instance, nic_id=None):
if not nic_id:
return 'nova-instance-%s' % (instance['name'])
return 'nova-instance-%s-%s' % (instance['name'], nic_id)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
for vif in network_info:
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
try:
self._conn.nwfilterLookupByName(instance_filter_name)
except libvirt.libvirtError:
name = instance['name']
LOG.debug(_('The nwfilter(%(instance_filter_name)s) for'
'%(name)s is not found.'),
{'instance_filter_name': instance_filter_name,
'name': name},
instance=instance)
return False
return True
class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
def __init__(self, virtapi, execute=None, **kwargs):
super(IptablesFirewallDriver, self).__init__(virtapi, **kwargs)
self.nwfilter = NWFilterFirewall(virtapi, kwargs['get_connection'])
def setup_basic_filtering(self, instance, network_info):
"""Set up provider rules and basic NWFilter."""
self.nwfilter.setup_basic_filtering(instance, network_info)
if not self.basically_filtered:
LOG.debug(_('iptables firewall: Setup Basic Filtering'),
instance=instance)
self.refresh_provider_fw_rules()
self.basically_filtered = True
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
pass
def unfilter_instance(self, instance, network_info):
# NOTE(salvatore-orlando):
# Overriding base class method for applying nwfilter operation
if self.instances.pop(instance['id'], None):
# NOTE(vish): use the passed info instead of the stored info
self.network_infos.pop(instance['id'])
self.remove_filters_for_instance(instance)
self.iptables.apply()
self.nwfilter.unfilter_instance(instance, network_info)
else:
LOG.info(_('Attempted to unfilter instance which is not '
'filtered'), instance=instance)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
return self.nwfilter.instance_filter_exists(instance, network_info)
| 41.078313
| 79
| 0.595615
|
acfc710f4e804b443c9997a4d22aef945b7591cb
| 54,981
|
py
|
Python
|
hoggorm/plsr1.py
|
Mohamed0gad/hoggorm
|
4debdb49a8d1d8858abb783be2ad67ffc96fd3ab
|
[
"BSD-2-Clause"
] | null | null | null |
hoggorm/plsr1.py
|
Mohamed0gad/hoggorm
|
4debdb49a8d1d8858abb783be2ad67ffc96fd3ab
|
[
"BSD-2-Clause"
] | null | null | null |
hoggorm/plsr1.py
|
Mohamed0gad/hoggorm
|
4debdb49a8d1d8858abb783be2ad67ffc96fd3ab
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Import necessary modules
import numpy as np
import numpy.linalg as npla
import hoggorm.statTools as st
import hoggorm.cross_val as cv
class nipalsPLS1:
"""
This class carries out partial least squares regression (PLSR) for two
arrays using NIPALS algorithm. The y array is univariate, which is why
PLS1 is applied.
PARAMETERS
----------
arrX : numpy array
This is X in the PLS1 model. Number and order of objects (rows) must match those of ``arrY``.
vecy : numpy array
This is y in the PLS1 model. Number and order of objects (rows) must match those of ``arrX``.
numComp : int, optional
An integer that defines how many components are to be computed. If not provided, the maximum possible number of components is used.
Xstand : boolean, optional
Defines whether variables in ``arrX`` are to be standardised/scaled or centered.
False : columns of ``arrX`` are mean centred (default)
``Xstand = False``
True : columns of ``arrX`` are mean centred and devided by their own standard deviation
``Xstand = True``
Ystand : boolean, optional
Defines whether ``vecy`` is to be standardised/scaled or centered.
False : ``vecy`` is to be mean centred (default)
``Ystand = False``
True : ``vecy`` is to be mean centred and devided by its own standard deviation
``Ystand = True``
cvType : list, optional
The list defines cross validation settings when computing the PCA model. Note if `cvType` is not provided, cross validation will not be performed and as such cross validation results will not be available. Choose cross validation type from the following:
loo : leave one out / a.k.a. full cross validation (default)
``cvType = ["loo"]``
KFold : leave out one fold or segment
``cvType = ["KFold", numFolds]``
numFolds: int
Number of folds or segments
lolo : leave one label out
``cvType = ["lolo", labelsList]``
labelsList: list
Sequence of lables. Must be same lenght as number of rows in ``arrX`` and ``arrY``. Leaves out objects with same lable.
RETURNS
-------
class
A class that contains the PLS1 model and computational results
EXAMPLES
--------
First import the hoggormpackage
>>> import hoggorm as ho
Import your data into a numpy array.
>>> np.shape(my_X_data)
(14, 292)
>>> np.shape(my_y_data)
(14, 1)
Examples of how to compute a PLS1 model using different settings for the input parameters.
>>> model = ho.nipalsPLS1(arrX=my_X_data, vecy=my_y_data, numComp=5)
>>> model = ho.nipalsPLS1(arrX=my_X_data, vecy=my_y_data)
>>> model = ho.nipalsPLS1(arrX=my_X_data, vecy=my_y_data, numComp=3, Ystand=True)
>>> model = ho.nipalsPLS1(arrX=my_X_data, vecy=my_y_data, Xstand=False, Ystand=True)
>>> model = ho.nipalsPLS1(arrX=my_X_data, vecy=my_y_data, cvType=["loo"])
>>> model = ho.nipalsPLS1(arrX=my_X_data, vecy=my_y_data, cvType=["KFold", 7])
>>> model = ho.nipalsPLS1(arrX=my_X_data, vecy=my_y_data, cvType=["lolo", [1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7]]])
Examples of how to extract results from the PCR model.
>>> X_scores = model.X_scores()
>>> X_loadings = model.X_loadings()
>>> y_loadings = model.Y_loadings()
>>> X_cumulativeCalibratedExplainedVariance_allVariables = model.X_cumCalExplVar_indVar()
>>> Y_cumulativeValidatedExplainedVariance_total = model.Y_cumCalExplVar()
"""
def __init__(self, arrX, vecy, numComp=3, Xstand=False, Ystand=False, cvType=["loo"]):
"""
On initialisation check how X and y are to be pre-processed (which
mode is used). Then check whether number of PC's chosen by user is OK.
Then run NIPALS PLS1 algorithm.
"""
# ===============================================================================
# Check what is provided by user for PLS1
# ===============================================================================
# Define X and y within class such that the data can be accessed from
# all attributes in class.
self.arrX_input = arrX
self.vecy_input = vecy
# Check whether cvType is provided. If NOT, then no cross validation
# is carried out.
self.cvType = cvType
# Define maximum number of components to compute depending on whether
# cross validation was selected or not.
if isinstance(self.cvType, type(None)):
maxNumPC = min(np.shape(self.arrX_input))
else:
# Depict the number of components that are possible to compute based
# on size of data set (#rows, #cols), type of cross validation (i.e.
# size of CV segments)
numObj = np.shape(self.arrX_input)[0]
# Compute the sizes of training sets in CV
if self.cvType[0] == "loo":
cvComb = cv.LeaveOneOut(numObj)
elif self.cvType[0] == "KFold":
cvComb = cv.KFold(numObj, k=self.cvType[1])
elif self.cvType[0] == "lolo":
cvComb = cv.LeaveOneLabelOut(self.cvType[1])
else:
print('Requested form of cross validation is not available')
pass
# First devide into combinations of training and test sets. Collect
# sizes of training sets, since this also may limit the number of
# components that can be computed.
segSizes = []
for train_index, test_index in cvComb:
x_train, x_test = cv.split(train_index, test_index, self.arrX_input)
y_train, y_test = cv.split(train_index, test_index, self.vecy_input)
segSizes.append(numObj - sum(train_index))
# Compute the max number of components based on only object size
maxN = numObj - max(segSizes) - 1
# Choose whatever is smaller, number of variables or maxN
maxNumPC = min(np.shape(arrX)[1], maxN)
# Now set the number of components that is possible to compute.
if numComp is None:
self.numPC = maxNumPC
else:
if numComp > maxNumPC:
self.numPC = maxNumPC
else:
self.numPC = numComp
# Pre-process data according to user request.
# -------------------------------------------
# Check whether standardisation of X and Y are requested by user. If
# NOT, then X and y are centred by default.
self.Xstand = Xstand
self.ystand = Ystand
# Standardise X if requested by user, otherwise center X.
if self.Xstand:
Xmeans = np.average(self.arrX_input, axis=0)
Xstd = np.std(self.arrX_input, axis=0, ddof=1)
self.arrX = (self.arrX_input - Xmeans) / Xstd
else:
Xmeans = np.average(self.arrX_input, axis=0)
self.arrX = self.arrX_input - Xmeans
# Standardise Y if requested by user, otherwise center Y.
if self.ystand:
vecyMean = np.average(self.vecy_input)
yStd = np.std(self.vecy_input, ddof=1)
self.vecy = (self.vecy_input - vecyMean) / yStd
else:
vecyMean = np.average(self.vecy_input)
self.vecy = self.vecy_input - vecyMean
# Before PLS1 NIPALS algorithm starts initiate dictionaries and lists
# in which results are stored.
self.x_scoresList = []
self.x_loadingsList = []
self.y_scoresList = []
self.y_loadingsList = []
self.x_loadingWeightsList = []
self.coeffList = []
self.Y_residualsList = [self.vecy]
self.X_residualsList = [self.arrX]
# ===============================================================================
# Here PLS1 NIPALS algorithm starts
# ===============================================================================
X_new = self.arrX.copy()
y_new = self.vecy.copy()
# Compute j number of components
for j in range(self.numPC):
# Module 7: STEP 1
w_num = np.dot(np.transpose(X_new), y_new)
w_denom = npla.norm(w_num)
w = w_num / w_denom
# Module 7: STEP 2
t = np.dot(X_new, w)
# Module 7: STEP 3
# NOTE: c_hat (in Module 7 paper) = q (here in code) ==> Yloadings
q_num = np.dot(np.transpose(t), y_new)
q_denom = np.dot(np.transpose(t), t)
q = q_num / q_denom
# Module 7: STEP 4
p_num = np.dot(np.transpose(X_new), t)
p_denom = np.dot(np.transpose(t), t)
p = p_num / p_denom
# Module 7: STEP 5
X_old = X_new.copy()
X_new = X_old - np.dot(t, np.transpose(p))
y_old = y_new.copy()
y_new = y_old - t*q
# Collect vectors t, p, u, q, w and
self.x_scoresList.append(t.reshape(-1))
self.x_loadingsList.append(p.reshape(-1))
self.y_loadingsList.append(q.reshape(-1))
self.x_loadingWeightsList.append(w.reshape(-1))
# Collect residuals
self.Y_residualsList.append(y_new)
self.X_residualsList.append(X_new)
# Construct T, P, U, Q and W from lists of vectors
self.arrT = np.array(np.transpose(self.x_scoresList))
self.arrP = np.array(np.transpose(self.x_loadingsList))
self.arrQ = np.array(np.transpose(self.y_loadingsList))
self.arrW = np.array(np.transpose(self.x_loadingWeightsList))
# ========== COMPUTATIONS FOR X ==========
# ---------------------------------------------------------------------
# Create a list holding arrays of Xhat predicted calibration after each
# component. Xhat is computed with Xhat = T*P'
self.calXpredList = []
# Compute Xhat for 1 and more components (cumulatively).
for ind in range(1,self.numPC+1):
part_arrT = self.arrT[:,0:ind]
part_arrP = self.arrP[:,0:ind]
predXcal = np.dot(part_arrT, np.transpose(part_arrP))
if self.Xstand:
Xhat = (predXcal * Xstd) + Xmeans
else:
Xhat = predXcal + Xmeans
self.calXpredList.append(Xhat)
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Construct a dictionary that holds predicted X (Xhat) from calibration
# for each number of components.
self.calXpredDict = {}
for ind, item in enumerate(self.calXpredList):
self.calXpredDict[ind+1] = item
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Collect all PRESSE for individual variables in a dictionary.
# Keys represent number of component.
self.PRESSEdict_indVar_X = {}
# Compute PRESS for calibration / estimation
PRESSE_0_indVar_X = np.sum(np.square(st.center(self.arrX_input)), axis=0)
self.PRESSEdict_indVar_X[0] = PRESSE_0_indVar_X
# Compute PRESS for each Xhat for 1, 2, 3, etc number of components
# and compute explained variance
for ind, Xhat in enumerate(self.calXpredList):
diffX = self.arrX_input - Xhat
PRESSE_indVar_X = np.sum(np.square(diffX), axis=0)
self.PRESSEdict_indVar_X[ind+1] = PRESSE_indVar_X
# Now store all PRESSE values into an array. Then compute MSEE and
# RMSEE.
self.PRESSEarr_indVar_X = np.array(list(self.PRESSEdict_indVar_X.values()))
self.MSEEarr_indVar_X = self.PRESSEarr_indVar_X / np.shape(self.arrX_input)[0]
self.RMSEEarr_indVar_X = np.sqrt(self.MSEEarr_indVar_X)
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Compute explained variance for each variable in X using the
# MSEE for each variable. Also collect PRESSE, MSEE, RMSEE in
# their respective dictionaries for each variable. Keys represent
# now variables and NOT components as above with
# self.PRESSEdict_indVar_X
self.cumCalExplVarXarr_indVar = np.zeros(np.shape(self.MSEEarr_indVar_X))
MSEE_0_indVar_X = self.MSEEarr_indVar_X[0,:]
for ind, MSEE_indVar_X in enumerate(self.MSEEarr_indVar_X):
explVar = (MSEE_0_indVar_X - MSEE_indVar_X) / MSEE_0_indVar_X * 100
self.cumCalExplVarXarr_indVar[ind] = explVar
self.PRESSE_indVar_X = {}
self.MSEE_indVar_X = {}
self.RMSEE_indVar_X = {}
self.cumCalExplVarX_indVar = {}
for ind in range(np.shape(self.PRESSEarr_indVar_X)[1]):
self.PRESSE_indVar_X[ind] = self.PRESSEarr_indVar_X[:,ind]
self.MSEE_indVar_X[ind] = self.MSEEarr_indVar_X[:,ind]
self.RMSEE_indVar_X[ind] = self.RMSEEarr_indVar_X[:,ind]
self.cumCalExplVarX_indVar[ind] = self.cumCalExplVarXarr_indVar[:,ind]
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Collect total PRESSE across all variables in a dictionary. Also,
# compute total calibrated explained variance in X.
self.PRESSE_total_dict_X = {}
self.PRESSE_total_list_X = np.sum(self.PRESSEarr_indVar_X, axis=1)
for ind, PRESSE_X in enumerate(self.PRESSE_total_list_X):
self.PRESSE_total_dict_X[ind] = PRESSE_X
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Collect total MSEE across all variables in a dictionary. Also,
# compute total validated explained variance in X.
self.MSEE_total_dict_X = {}
self.MSEE_total_list_X = np.sum(self.MSEEarr_indVar_X, axis=1) / np.shape(self.arrX_input)[1]
MSEE_0_X = self.MSEE_total_list_X[0]
# Compute total calibrated explained variance in X
self.XcumCalExplVarList = []
if not self.Xstand:
for ind, MSEE_X in enumerate(self.MSEE_total_list_X):
perc = (MSEE_0_X - MSEE_X) / MSEE_0_X * 100
self.MSEE_total_dict_X[ind] = MSEE_X
self.XcumCalExplVarList.append(perc)
else:
self.XcumCalExplVarArr = np.average(self.cumCalExplVarXarr_indVar, axis=1)
self.XcumCalExplVarList = list(self.XcumCalExplVarArr)
# Construct list with total validated explained variance in X
self.XcalExplVarList = []
for ind, item in enumerate(self.XcumCalExplVarList):
if ind == len(self.XcumCalExplVarList)-1:
break
explVarComp = self.XcumCalExplVarList[ind+1] - self.XcumCalExplVarList[ind]
self.XcalExplVarList.append(explVarComp)
# Construct a dictionary that holds predicted X (Xhat) from calibration
# for each number of components.
self.calXpredDict = {}
for ind, item in enumerate(self.calXpredList):
self.calXpredDict[ind+1] = item
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Compute total RMSEE and store values in a dictionary and list.
self.RMSEE_total_dict_X = {}
self.RMSEE_total_list_X = np.sqrt(self.MSEE_total_list_X)
for ind, RMSEE_X in enumerate(self.RMSEE_total_list_X):
self.RMSEE_total_dict_X[ind] = RMSEE_X
# ---------------------------------------------------------------------
# ========== COMPUTATIONS FOR y ============
# --------------------------------------------------------------------
# Create a list holding arrays of yhat predicted calibration after each
# component. yhat is computed with Yhat = T*Q'
self.calYpredList = []
for ind in range(1, self.numPC+1):
x_scores = self.arrT[:,0:ind]
y_loadings = self.arrQ[:,0:ind]
# Depending on whether Y was standardised or not compute Yhat
# accordingly.
if self.ystand:
yhat_stand = np.dot(x_scores, np.transpose(y_loadings))
yhat = (yhat_stand * yStd.reshape(1,-1)) + vecyMean.reshape(1,-1)
else:
yhat = np.dot(x_scores, np.transpose(y_loadings)) + vecyMean
self.calYpredList.append(yhat)
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Construct a dictionary that holds predicted Y (Yhat) from calibration
# for each number of components.
self.calYpredDict = {}
for ind, item in enumerate(self.calYpredList):
self.calYpredDict[ind+1] = item
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Collect PRESSE for each PC in a dictionary.
# Keys represent number of component.
self.PRESSE_total_dict = {}
self.MSEE_total_dict = {}
# Compute PRESS and MSEE for calibration / estimation with zero
# components
PRESSE_0 = np.sum(np.square(st.center(self.vecy_input)))
self.PRESSE_total_dict[0] = PRESSE_0
MSEE_0 = PRESSE_0 / np.shape(self.vecy_input)[0]
self.MSEE_total_dict[0] = MSEE_0
# Compute PRESS and MSEE for each Yhat for 1, 2, 3, etc number of
# components and compute explained variance
for ind, yhat in enumerate(self.calYpredList):
diffy = self.vecy_input - yhat
PRESSE = np.sum(np.square(diffy))
self.PRESSE_total_dict[ind+1] = PRESSE
self.MSEE_total_dict[ind+1] = PRESSE / np.shape(self.vecy_input)[0]
# Compute total calibrated explained variance in Y
self.MSEE_total_list = self.MSEE_total_dict.values()
self.YcumCalExplVarList = []
for ind, MSEE in enumerate(self.MSEE_total_list):
perc = (MSEE_0 - MSEE) / MSEE_0 * 100
self.YcumCalExplVarList.append(perc)
# Construct list with total validated explained variance in Y
self.YcalExplVarList = []
for ind, item in enumerate(self.YcumCalExplVarList):
if ind == len(self.YcumCalExplVarList)-1:
break
explVarComp = self.YcumCalExplVarList[ind+1] - self.YcumCalExplVarList[ind]
self.YcalExplVarList.append(explVarComp)
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Compute total RMSEP and store values in a dictionary and list.
self.RMSEE_total_dict = {}
self.RMSEE_total_list = np.sqrt(list(self.MSEE_total_list))
for ind, RMSEE in enumerate(self.RMSEE_total_list):
self.RMSEE_total_dict[ind] = RMSEE
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
self.PRESSEarr = np.array(list(self.PRESSE_total_dict.values()))
self.MSEEarr = np.array(list(self.MSEE_total_dict.values()))
self.RMSEEarr = np.array(list(self.RMSEE_total_dict.values()))
# ---------------------------------------------------------------------
# ===============================================================================
# Here starts the cross validation process of PLS1
# ===============================================================================
# Check whether cross validation is required by user. If required,
# check what kind and build training and test sets thereafter.
if self.cvType is not None:
numObj = np.shape(self.vecy)[0]
if self.cvType[0] == "loo":
print("loo")
cvComb = cv.LeaveOneOut(numObj)
elif self.cvType[0] == "KFold":
print("KFold")
cvComb = cv.KFold(numObj, k=self.cvType[1])
elif self.cvType[0] == "lolo":
print("lolo")
cvComb = cv.LeaveOneLabelOut(self.cvType[1])
else:
print('Requested form of cross validation is not available')
pass
# Collect predicted y (i.e. yhat) for each CV segment in a
# dictionary according to numer of PC
self.valYpredDict = {}
for ind in range(1, self.numPC+1):
self.valYpredDict[ind] = np.zeros(np.shape(self.vecy_input))
# Construct a dictionary that holds predicted X (Xhat) from
# validation for each number of components.
self.valXpredDict = {}
for ind in range(1, self.numPC+1):
self.valXpredDict[ind] = np.zeros(np.shape(self.arrX_input))
# Collect train and test set in dictionaries for each PC and put
# them in this list.
self.cvTrainAndTestDataList = []
# Collect: validation X scores T, validation X loadings P,
# validation Y scores U, validation Y loadings Q,
# validation X loading weights W and scores regression coefficients C
# in lists for each PC
self.val_arrTlist = []
self.val_arrPlist = []
self.val_arrUlist = []
self.val_arrQlist = []
self.val_arrWlist = []
all_ytm = np.zeros(np.shape(self.vecy_input))
all_xtm = np.zeros(np.shape(self.arrX_input))
# First devide into combinations of training and test sets
for train_index, test_index in cvComb:
x_train, x_test = cv.split(train_index, test_index, self.arrX_input)
y_train, y_test = cv.split(train_index, test_index, self.vecy_input)
subDict = {}
subDict['x train'] = x_train
subDict['x test'] = x_test
subDict['y train'] = y_train
subDict['y test'] = y_test
self.cvTrainAndTestDataList.append(subDict)
# Collect X scores and Y loadings vectors from each iterations step
val_x_scoresList = []
val_x_loadingsList = []
# val_y_scoresList = []
val_y_loadingsList = []
val_x_loadingWeightsList = []
# Standardise X if requested by user, otherwise center X.
if self.Xstand:
x_train_means = np.average(x_train, axis=0)
x_train_std = np.std(x_train, axis=0, ddof=1)
X_new = (x_train - x_train_means) / x_train_std
else:
x_train_means = np.average(x_train, axis=0)
X_new = x_train - x_train_means
# Standardise y if requested by user, otherwise center y.
if self.ystand:
y_train_means = np.average(y_train)
y_train_std = np.std(y_train, ddof=1)
y_new = (y_train - y_train_means) / y_train_std
else:
y_train_means = np.average(y_train)
y_new = y_train - y_train_means
# Compute j number of components
for j in range(self.numPC):
# Module 7: STEP 1
w_num = np.dot(np.transpose(X_new), y_new)
w_denom = npla.norm(w_num)
w = w_num / w_denom
# Module 7: STEP 2
t = np.dot(X_new, w)
# Module 7: STEP 3
q_num = np.dot(np.transpose(t), y_new)
q_denom = np.dot(np.transpose(t), t)
q = q_num / q_denom
# Module 7: STEP 4
p_num = np.dot(np.transpose(X_new), t)
p_denom = np.dot(np.transpose(t), t)
p = p_num / p_denom
# Module 7: STEP 5
X_old = X_new.copy()
X_new = X_old - np.dot(t, np.transpose(p))
y_old = y_new.copy()
y_new = y_old - t*q
# Collect vectors t, p, u, q, w and
val_x_scoresList.append(t.reshape(-1))
val_x_loadingsList.append(p.reshape(-1))
val_y_loadingsList.append(q.reshape(-1))
val_x_loadingWeightsList.append(w.reshape(-1))
# Construct T, P, U, Q and W from lists of vectors
val_arrT = np.array(np.transpose(val_x_scoresList))
val_arrP = np.array(np.transpose(val_x_loadingsList))
val_arrQ = np.array(np.transpose(val_y_loadingsList))
val_arrW = np.array(np.transpose(val_x_loadingWeightsList))
self.val_arrTlist.append(val_arrT)
self.val_arrPlist.append(val_arrP)
self.val_arrQlist.append(val_arrQ)
self.val_arrWlist.append(val_arrW)
# Give vector y_train_mean the correct dimension in
# numpy, so matrix multiplication will be possible
# i.e from dimension (x,) to (1,x)
ytm = y_train_means.reshape(1,-1)
xtm = x_train_means.reshape(1,-1)
for ind_test in range(np.shape(y_test)[0]):
all_ytm[test_index,] = ytm
all_xtm[test_index,] = xtm
# 'Module 7: Partial least squares regression I' - section 7.2
# Prediction for PLS2.
if self.Xstand:
x_new = (x_test - x_train_means) / x_train_std
else:
x_new = x_test - x_train_means
t_list = []
for ind in range(self.numPC):
# Module 8: Prediction STEP 1
# ---------------------------
t = np.dot(x_new, val_arrW[:,ind]).reshape(-1,1)
# Module 8: Prediction STEP 2
# ---------------------------
p = val_arrP[:,ind].reshape(-1,1)
x_old = x_new
x_new = x_old - np.dot(t,np.transpose(p))
# Generate a vector t that gets longer by one element with
# each PC
t_list.append(t)
t_vec = np.hstack(t_list)
# Get relevant part of Q for specific number of PC's
part_val_arrP = val_arrP[:,0:ind+1]
part_val_arrQ = val_arrQ[0,0:ind+1]
# Module 8: Prediction STEP 3
# ---------------------------
# First compute yhat
if self.ystand:
tCQ = np.dot(t_vec,part_val_arrQ) * y_train_std.reshape(1,-1)
else:
tCQ = np.dot(t_vec,part_val_arrQ)
yhat = np.transpose(ytm + tCQ)
self.valYpredDict[ind+1][test_index,] = yhat
# Then compute Xhat
if self.Xstand:
tP = np.dot(t_vec,np.transpose(part_val_arrP)) * x_train_std.reshape(1,-1)
else:
tP = np.dot(t_vec, np.transpose(part_val_arrP))
xhat = xtm + tP
self.valXpredDict[ind+1][test_index,] = xhat
# ========== COMPUTATIONS FOR y ============
# -----------------------------------------------------------------
# Convert vectors from CV segments stored in lists into matrices
# for each PC
self.valYpredList = list(self.valYpredDict.values())
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Collect all PRESSCV for individual variables in a dictionary.
# Keys represent number of component.
self.PRESSCV_total_dict = {}
self.MSECV_total_dict = {}
# Compute PRESS for validation
PRESSCV_0 = np.sum(np.square(self.vecy_input-all_ytm), axis=0)
self.PRESSCV_total_dict[0] = PRESSCV_0
MSECV_0 = PRESSCV_0 / np.shape(self.vecy_input)[0]
self.MSECV_total_dict[0] = list(MSECV_0)[0]
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Compute PRESSCV and MSECV for each Yhat for 1, 2, 3, etc number
# of components and compute explained variance
for ind, yhat in enumerate(self.valYpredList):
diffy = self.vecy_input - yhat
PRESSCV = np.sum(np.square(diffy))
self.PRESSCV_total_dict[ind+1] = PRESSCV
self.MSECV_total_dict[ind+1] = PRESSCV / np.shape(self.vecy_input)[0]
# Compute total validated explained variance in Y
self.MSECV_total_list = self.MSECV_total_dict.values()
self.YcumValExplVarList = []
for ind, MSECV in enumerate(self.MSECV_total_list):
perc = (MSECV_0 - MSECV) / MSECV_0 * 100
self.YcumValExplVarList.append(perc[0])
# Construct list with total validated explained variance in Y
self.YvalExplVarList = []
for ind, item in enumerate(self.YcumValExplVarList):
if ind == len(self.YcumValExplVarList)-1:
break
explVarComp = self.YcumValExplVarList[ind+1] - self.YcumValExplVarList[ind]
self.YvalExplVarList.append(explVarComp)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Compute total RMSECV and store values in a dictionary and list.
self.RMSECV_total_dict = {}
self.RMSECV_total_list = np.sqrt(list(self.MSECV_total_list))
for ind, RMSECV in enumerate(self.RMSECV_total_list):
self.RMSECV_total_dict[ind] = RMSECV
# -----------------------------------------------------------------
# -----------------------------------------------------------------
self.PRESSCVarr = np.array(list(self.PRESSCV_total_dict.values()))
self.MSECVarr = np.array(list(self.MSECV_total_dict.values()))
self.RMSECVarr = np.array(list(self.RMSECV_total_dict.values()))
# -----------------------------------------------------------------
# ========== COMPUTATIONS FOR X ============
# -----------------------------------------------------------------
# Convert vectors from CV segments stored in lists into matrices
# for each PC
self.valXpredList = self.valXpredDict.values()
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Collect all PRESSCV for individual variables in X in a dictionary.
# Keys represent number of component.
self.PRESSCVdict_indVar_X = {}
# First compute PRESSCV for zero components
PRESSCV_0_indVar_X = np.sum(np.square(self.arrX_input-all_xtm), axis=0)
self.PRESSCVdict_indVar_X[0] = PRESSCV_0_indVar_X
# Compute PRESS for each Xhat for 1, 2, 3, etc number of components
# and compute explained variance
for ind, Xhat in enumerate(self.valXpredList):
diffX = self.arrX_input - Xhat
PRESSCV_indVar_X = np.sum(np.square(diffX), axis=0)
self.PRESSCVdict_indVar_X[ind+1] = PRESSCV_indVar_X
# Now store all PRESSE values into an array. Then compute MSEE and
# RMSEE.
self.PRESSCVarr_indVar_X = np.array(list(self.PRESSCVdict_indVar_X.values()))
self.MSECVarr_indVar_X = self.PRESSCVarr_indVar_X / np.shape(self.arrX_input)[0]
self.RMSECVarr_indVar_X = np.sqrt(self.MSECVarr_indVar_X)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Compute explained variance for each variable in X using the
# MSEP for each variable. Also collect PRESS, MSECV, RMSECV in
# their respective dictionaries for each variable. Keys represent
# now variables and NOT components as above with
# self.PRESSdict_indVar
self.cumValExplVarXarr_indVar = np.zeros(np.shape(self.MSECVarr_indVar_X))
MSECV_0_indVar_X = self.MSECVarr_indVar_X[0,:]
for ind, MSECV_indVar_X in enumerate(self.MSECVarr_indVar_X):
explVar = (MSECV_0_indVar_X - MSECV_indVar_X) / MSECV_0_indVar_X * 100
self.cumValExplVarXarr_indVar[ind] = explVar
self.PRESSCV_indVar_X = {}
self.MSECV_indVar_X = {}
self.RMSECV_indVar_X = {}
self.cumValExplVarX_indVar = {}
for ind in range(np.shape(self.PRESSCVarr_indVar_X)[1]):
self.PRESSCV_indVar_X[ind] = self.PRESSCVarr_indVar_X[:,ind]
self.MSECV_indVar_X[ind] = self.MSECVarr_indVar_X[:,ind]
self.RMSECV_indVar_X[ind] = self.RMSECVarr_indVar_X[:,ind]
self.cumValExplVarX_indVar[ind] = self.cumValExplVarXarr_indVar[:,ind]
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Collect total PRESSCV across all variables in a dictionary.
self.PRESSCV_total_dict_X = {}
self.PRESSCV_total_list_X = np.sum(self.PRESSCVarr_indVar_X, axis=1)
for ind, PRESSCV_X in enumerate(self.PRESSCV_total_list_X):
self.PRESSCV_total_dict_X[ind] = PRESSCV_X
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Collect total MSECV across all variables in a dictionary. Also,
# compute total validated explained variance in X.
self.MSECV_total_dict_X = {}
self.MSECV_total_list_X = np.sum(self.MSECVarr_indVar_X, axis=1) / np.shape(self.arrX_input)[1]
MSECV_0_X = self.MSECV_total_list_X[0]
# Compute total validated explained variance in X
self.XcumValExplVarList = []
if not self.Xstand:
for ind, MSECV_X in enumerate(self.MSECV_total_list_X):
perc = (MSECV_0_X - MSECV_X) / MSECV_0_X * 100
self.MSECV_total_dict_X[ind] = MSECV_X
self.XcumValExplVarList.append(perc)
else:
self.XcumValExplVarArr = np.average(self.cumValExplVarXarr_indVar, axis=1)
self.XcumValExplVarList = list(self.XcumValExplVarArr)
# Construct list with total validated explained variance in X in
# each component
self.XvalExplVarList = []
for ind, item in enumerate(self.XcumValExplVarList):
if ind == len(self.XcumValExplVarList)-1:
break
explVarComp = self.XcumValExplVarList[ind+1] - self.XcumValExplVarList[ind]
self.XvalExplVarList.append(explVarComp)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Compute total RMSECV and store values in a dictionary and list.
self.RMSECV_total_dict_X = {}
self.RMSECV_total_list_X = np.sqrt(self.MSECV_total_list_X)
for ind, RMSECV_X in enumerate(self.RMSECV_total_list_X):
self.RMSECV_total_dict_X[ind] = RMSECV_X
# -----------------------------------------------------------------
def modelSettings(self):
"""
Returns a dictionary holding settings under which PLS1 was run.
"""
settingsDict = {}
settingsDict['numComp'] = self.numPC
settingsDict['X'] = self.arrX_input
settingsDict['y'] = self.vecy_input
settingsDict['arrX'] = self.arrX
settingsDict['arrY'] = self.vecy
settingsDict['Xstand'] = self.Xstand
settingsDict['ystand'] = self.ystand
settingsDict['cv type'] = self.cvType
return settingsDict
def X_means(self):
"""
Returns array holding the column means of X.
"""
return np.average(self.arrX_input, axis=0).reshape(1,-1)
def X_scores(self):
"""
Returns array holding scores of array X. First column holds scores
for component 1, second column holds scores for component 2, etc.
"""
return self.arrT
def X_loadings(self):
"""
Returns array holding loadings of array X. Rows represent variables
and columns represent components. First column holds loadings for
component 1, second column holds scores for component 2, etc.
"""
return self.arrP
def X_loadingWeights(self):
"""
Returns an array holding X loadings weights.
"""
return self.arrW
def X_corrLoadings(self):
"""
Returns array holding correlation loadings of array X. First column
holds correlation loadings for component 1, second column holds
correlation loadings for component 2, etc.
"""
# Creates empty matrix for correlation loadings
arr_XcorrLoadings = np.zeros((np.shape(self.arrT)[1], np.shape(self.arrP)[0]), float)
# Compute correlation loadings:
# For each component in score matrix
for PC in range(np.shape(self.arrT)[1]):
PCscores = self.arrT[:, PC]
# For each variable/attribute in original matrix (not meancentered)
for var in range(np.shape(self.arrX)[1]):
origVar = self.arrX[:, var]
corrs = np.corrcoef(PCscores, origVar)
arr_XcorrLoadings[PC, var] = corrs[0,1]
self.arr_XcorrLoadings = np.transpose(arr_XcorrLoadings)
return self.arr_XcorrLoadings
def X_residuals(self):
"""
Returns a dictionary holding the residual arrays for array X after
each computed component. Dictionary key represents order of component.
"""
# Create empty dictionary that will hold residuals
X_residualsDict = {}
# Fill dictionary with residuals arrays from residuals list
for ind, item in enumerate(self.X_residualsList):
X_residualsDict[ind] = item
return X_residualsDict
def X_calExplVar(self):
"""
Returns a list holding the calibrated explained variance for
each component. First number in list is for component 1, second number
for component 2, etc.
"""
return self.XcalExplVarList
def X_cumCalExplVar_indVar(self):
"""
Returns an array holding the cumulative calibrated explained variance
for each variable in X after each component. First row represents zero
components, second row represents one component, third row represents
two components, etc. Columns represent variables.
"""
return self.cumCalExplVarXarr_indVar
def X_cumCalExplVar(self):
"""
Returns a list holding the cumulative calibrated explained variance
for array X after each component.
"""
return self.XcumCalExplVarList
def X_predCal(self):
"""
Returns a dictionary holding the predicted arrays Xhat from
calibration after each computed component. Dictionary key represents
order of component.
"""
return self.calXpredDict
def X_PRESSE_indVar(self):
"""
Returns array holding PRESSE for each individual variable in X
acquired through calibration after each computed component. First row
is PRESSE for zero components, second row for component 1, third row
for component 2, etc.
"""
return self.PRESSEarr_indVar_X
def X_PRESSE(self):
"""
Returns array holding PRESSE across all variables in X acquired
through calibration after each computed component. First row is PRESSE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.PRESSE_total_list_X
def X_MSEE_indVar(self):
"""
Returns an array holding MSEE for each variable in array X acquired
through calibration after each computed component. First row holds MSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.MSEEarr_indVar_X
def X_MSEE(self):
"""
Returns an array holding MSEE across all variables in X acquired
through calibration after each computed component. First row is MSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.MSEE_total_list_X
def X_RMSEE_indVar(self):
"""
Returns an array holding RMSEE for each variable in array X acquired
through calibration after each component. First row holds RMSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSEEarr_indVar_X
def X_RMSEE(self):
"""
Returns an array holding RMSEE across all variables in X acquired
through calibration after each computed component. First row is RMSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSEE_total_list_X
def X_valExplVar(self):
"""
Returns a list holding the validated explained variance for X after
each component. First number in list is for component 1, second number
for component 2, third number for component 3, etc.
"""
return self.XvalExplVarList
def X_cumValExplVar_indVar(self):
"""
Returns an array holding the cumulative validated explained variance
for each variable in X after each component. First row represents
zero components, second row represents component 1, third row for
compnent 2, etc. Columns represent variables.
"""
return self.cumValExplVarXarr_indVar
def X_cumValExplVar(self):
"""
Returns a list holding the cumulative validated explained variance
for array X after each component. First number represents zero
components, second number represents component 1, etc.
"""
return self.XcumValExplVarList
def X_predVal(self):
"""
Returns dictionary holding arrays of predicted Xhat after each
component from validation. Dictionary key represents order of
component.
"""
return self.valXpredDict
def X_PRESSCV_indVar(self):
"""
Returns array holding PRESSCV for each individual variable in X
acquired through cross validation after each computed component. First
row is PRESSCV for zero components, second row for component 1, third
row for component 2, etc.
"""
return self.PRESSCVarr_indVar_X
def X_PRESSCV(self):
"""
Returns an array holding PRESSCV across all variables in X acquired
through cross validation after each computed component. First row is
PRESSCV for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.PRESSCV_total_list_X
def X_MSECV_indVar(self):
"""
Returns an arrary holding MSECV for each variable in X acquired through
cross validation. First row is MSECV for zero components, second row
for component 1, etc.
"""
return self.MSECVarr_indVar_X
def X_MSECV(self):
"""
Returns an array holding MSECV across all variables in X acquired
through cross validation after each computed component. First row is
MSECV for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.MSECV_total_list_X
def X_RMSECV_indVar(self):
"""
Returns an arrary holding RMSECV for each variable in X acquired
through cross validation after each computed component. First row is
RMSECV for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSECVarr_indVar_X
def X_RMSECV(self):
"""
Returns an array holding RMSECV across all variables in X acquired
through cross validation after each computed component. First row is
RMSECV for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSECV_total_list_X
def X_scores_predict(self, Xnew, numComp=None):
"""
Returns array of X scores from new X data using the exsisting model.
Rows represent objects and columns represent components.
"""
if numComp == None:
numComp = self.numPC
assert numComp <= self.numPC, ValueError('Maximum numComp = ' + str(self.numPC))
assert numComp > -1, ValueError('numComp must be >= 0')
# First pre-process new X data accordingly
if self.Xstand:
x_new = (Xnew - np.average(self.arrX_input, axis=0)) / np.std(self.arrX_input, ddof=1)
else:
x_new = (Xnew - np.average(self.arrX_input, axis=0))
# x_new* W*inv(P'W)
return np.dot(x_new, np.dot(self.arrW[:,0:numComp], np.linalg.inv(np.dot(np.transpose(self.arrP[:,0:numComp]), self.arrW[:,0:numComp]))))
def Y_means(self):
"""
Returns an array holding the mean of vector y.
"""
return np.average(self.vecy_input)
def Y_scores(self):
"""
Returns scores of array Y (NOT IMPLEMENTED)
"""
print("Not implemented")
return None
def Y_loadings(self):
"""
Returns an array holding loadings of vector y. Columns represent
components. First column for component 1, second columns for
component 2, etc.
"""
return self.arrQ
def Y_corrLoadings(self):
"""
Returns an array holding correlation loadings of vector y. Columns
represent components. First column for component 1, second columns for
component 2, etc.
"""
# Creates empty matrix for correlation loadings
arr_ycorrLoadings = np.zeros((np.shape(self.arrT)[1], np.shape(self.arrQ)[0]), float)
# Compute correlation loadings:
# For each PC in score matrix
for PC in range(np.shape(self.arrT)[1]):
PCscores = self.arrT[:, PC]
# For each variable/attribute in original matrix (not meancentered)
for var in range(np.shape(self.vecy)[1]):
origVar = self.vecy[:, var]
corrs = np.corrcoef(PCscores, origVar)
arr_ycorrLoadings[PC, var] = corrs[0,1]
self.arr_ycorrLoadings = np.transpose(arr_ycorrLoadings)
return self.arr_ycorrLoadings
def Y_residuals(self):
"""
Returns list of arrays holding residuals of vector y after each
component.
"""
Y_residualsDict = {}
for ind, item in enumerate(self.Y_residualsList):
Y_residualsDict[ind] = item
return Y_residualsDict
def Y_calExplVar(self):
"""
Returns list holding calibrated explained variance for each component
in vector y.
"""
return self.YcalExplVarList
def Y_cumCalExplVar(self):
"""
Returns a list holding the calibrated explained variance for
each component. First number represent zero components, second number
one component, etc.
"""
return self.YcumCalExplVarList
def Y_predCal(self):
"""
Returns dictionary holding arrays of predicted yhat after each component
from calibration. Dictionary key represents order of components.
"""
return self.calYpredDict
def Y_PRESSE(self):
"""
Returns an array holding PRESSE for y acquired through calibration
after each computed component. First row is PRESSE for zero components,
second row component 1, third row for component 2, etc.
"""
return self.PRESSEarr
def Y_MSEE(self):
"""
Returns an array holding MSEE of vector y acquired through
calibration after each component. First row holds MSEE for zero
components, second row component 1, third row for component 2, etc.
"""
return self.MSEEarr
def Y_RMSEE(self):
"""
Returns an array holding RMSEE of vector y acquired through calibration
after each computed component. First row is RMSEE for zero
components, second row component 1, third row for component 2, etc.
"""
return self.RMSEEarr
def Y_valExplVar(self):
"""
Returns list holding validated explained variance for each component in
vector y.
"""
return self.YvalExplVarList
def Y_cumValExplVar(self):
"""
Returns list holding cumulative validated explained variance in
vector y.
"""
return self.YcumValExplVarList
def Y_predVal(self):
"""
Returns dictionary holding arrays of predicted yhat after each
component from validation. Dictionary key represents order of component.
"""
return self.valYpredDict
def Y_PRESSCV(self):
"""
Returns an array holding PRESSECV for Y acquired through cross
validation after each computed component. First row is PRESSECV for
zero components, second row component 1, third row for component 2,
etc.
"""
return self.PRESSCVarr
def Y_MSECV(self):
"""
Returns an array holding MSECV of vector y acquired through cross
validation after each computed component. First row is MSECV for
zero components, second row component 1, third row for component 2, etc.
"""
return self.MSECVarr
def Y_RMSECV(self):
"""
Returns an array holding RMSECV for vector y acquired through cross
validation after each computed component. First row is RMSECV for zero
components, second row component 1, third row for component 2, etc.
"""
return self.RMSECVarr
def regressionCoefficients(self, numComp=1):
"""
Returns regression coefficients from the fitted model using all
available samples and a chosen number of components.
"""
assert numComp <= self.numPC, ValueError('Maximum numComp = ' + str(self.numPC))
assert numComp > -1, ValueError('numComp must be >= 0')
# B = W*inv(P'W)*Q'
if self.ystand:
return np.dot(np.dot(self.arrW[:, 0:numComp],
np.linalg.inv(np.dot(np.transpose(self.arrP[:, 0:numComp]), self.arrW[:, 0:numComp]))),
np.transpose(self.arrQ[:, 0:numComp])) * np.std(self.vecy_input, ddof=1, axis=0).reshape(1, -1)
else:
return np.dot(np.dot(self.arrW[:, 0:numComp],
np.linalg.inv(np.dot(np.transpose(self.arrP[:, 0:numComp]), self.arrW[:, 0:numComp]))),
np.transpose(self.arrQ[:, 0:numComp]))
def Y_predict(self, Xnew, numComp=1):
"""
Return predicted yhat from new measurements X.
"""
assert numComp <= self.numPC, ValueError('Maximum numComp = ' + str(self.numPC))
assert numComp > -1, ValueError('numComp must be >= 0')
# First pre-process new X data accordingly
if self.Xstand:
x_new = (Xnew - np.average(self.arrX_input, axis=0)) / np.std(self.arrX_input, ddof=1, axis=0)
else:
x_new = (Xnew - np.average(self.arrX_input, axis=0))
return np.dot(x_new, self.regressionCoefficients(numComp)) + np.mean(self.vecy_input)
def cvTrainAndTestData(self):
"""
Returns a list consisting of dictionaries holding training and test
sets.
"""
return self.cvTrainAndTestDataList
def corrLoadingsEllipses(self):
"""
Returns coordinates of ellipses that represent 50% and 100% expl.
variance in correlation loadings plot.
"""
# Create range for ellipses
t = np.arange(0.0, 2*np.pi, 0.01)
# Compuing the outer circle (100 % expl. variance)
xcords100perc = np.cos(t)
ycords100perc = np.sin(t)
# Computing inner circle
xcords50perc = 0.707 * np.cos(t)
ycords50perc = 0.707 * np.sin(t)
# Collect ellipse coordinates in dictionary
ellipses = {}
ellipses['x50perc'] = xcords50perc
ellipses['y50perc'] = ycords50perc
ellipses['x100perc'] = xcords100perc
ellipses['y100perc'] = ycords100perc
return ellipses
| 38.7463
| 262
| 0.552445
|
acfc7285a8c5868037a028f8cd11f270861f4a15
| 10,965
|
py
|
Python
|
cloudroast/compute/api/config_drive/test_config_drive_reboot_hard.py
|
bhushan5/cloudroast
|
60e05df96fe50b3bc511ee1cf1c818329d4360a1
|
[
"Apache-2.0"
] | null | null | null |
cloudroast/compute/api/config_drive/test_config_drive_reboot_hard.py
|
bhushan5/cloudroast
|
60e05df96fe50b3bc511ee1cf1c818329d4360a1
|
[
"Apache-2.0"
] | null | null | null |
cloudroast/compute/api/config_drive/test_config_drive_reboot_hard.py
|
bhushan5/cloudroast
|
60e05df96fe50b3bc511ee1cf1c818329d4360a1
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, hardware
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import base64
from cloudcafe.common.tools.datagen import rand_name
from cloudroast.compute.fixtures import ComputeFixture
from cloudcafe.compute.common.types import NovaServerRebootTypes
class RebootServerHardTests(ComputeFixture):
@classmethod
def setUpClass(cls):
"""
Perform actions that setup the necessary resources for testing
The following data is generated during this set up:
- A dictionary of metadata with the values:
{'meta_key_1': 'meta_value_1',
'meta_key_2': 'meta_value_2'}
- A list of files containing a file with the path '/test.txt' and
the contents 'This is a config drive test file.'
- User data contents 'My user data'
The following resources are created during this set up:
- A keypair with a random name starting with 'key'
- A server with the following settings:
- The config_drive is set to True
- The keypair previously created
- Files to be injected at server creation including the
'/test.txt' data previously generated
- The user data previously generated
- The metadata previously created
- Remaining values required for creating a server will come
from test configuration.
The following actions are performed during this set up:
- A remote instance client is set up for the previously created
server
- The config drive is mounted at the base path set during test
configuration on the previously created server
- Using the remote client, the config drive user data is recorded
- Using the remote client, the size of the config drive is recorded
- The OpenStack metadata of the previously created server is
recorded prior to reboot
- The previously created server is hard rebooted
- A fresh remote instance client is set up for the rebooted server
- A fresh remote instance client is set up again for the rebooted
server
- The config drive is mounted at the base path set during test
configuration on the rebooted server
- Using the remote instance client, it is determined whether the
directory '/openstack/content' is present at the base path to
mount set during test configuration
"""
super(RebootServerHardTests, cls).setUpClass()
# set variables
cls.metadata = {'meta_key_1': 'meta_value_1',
'meta_key_2': 'meta_value_2'}
cls.file_contents = 'This is a config drive test file.'
files = [{'path': '/test.txt', 'contents': base64.b64encode(
cls.file_contents)}]
cls.user_data_contents = "My user data"
user_data = base64.b64encode(cls.user_data_contents)
cls.key = cls.keypairs_client.create_keypair(rand_name("key")).entity
cls.resources.add(
cls.key.name, cls.keypairs_client.delete_keypair)
# build server
response = cls.server_behaviors.create_active_server(
config_drive=True, personality=files, user_data=user_data,
metadata=cls.metadata, key_name=cls.key.name)
cls.server = response.entity
cls.resources.add(cls.server.id, cls.servers_client.delete_server)
cls.user_data_filepath = '{0}/openstack/latest/user_data'.format(
cls.config_drive_config.base_path_to_mount)
remote_client = cls.server_behaviors.get_remote_instance_client(
cls.server, cls.servers_config, key=cls.key.private_key)
# Mount config drive
cls.config_drive_behaviors.mount_config_drive(
server=cls.server, servers_config=cls.servers_config,
key=cls.key.private_key,
source_path=cls.config_drive_config.mount_source_path,
destination_path=cls.config_drive_config.base_path_to_mount)
cls.user_data = remote_client.get_file_details(
file_path=cls.user_data_filepath).content
cls.kb_size = remote_client.get_directory_details(
cls.config_drive_config.base_path_to_mount)
cls.openstack_meta_before_reboot = (
cls.config_drive_behaviors.get_openstack_metadata(
cls.server, cls.servers_config, key=cls.key.private_key,
filepath=cls.config_drive_config.openstack_meta_filepath))
# reboot server
cls.server_behaviors.reboot_and_await(
cls.server.id, NovaServerRebootTypes.HARD)
remote_client = cls.server_behaviors.get_remote_instance_client(
cls.server, config=cls.servers_config, key=cls.key.private_key)
# Mount config drive
cls.user_data_filepath = '{0}/openstack/latest/user_data'.format(
cls.config_drive_config.base_path_to_mount)
remote_client = cls.server_behaviors.get_remote_instance_client(
cls.server, cls.servers_config, key=cls.key.private_key)
cls.config_drive_behaviors.mount_config_drive(
server=cls.server, servers_config=cls.servers_config,
key=cls.key.private_key,
source_path=cls.config_drive_config.mount_source_path,
destination_path=cls.config_drive_config.base_path_to_mount)
cls.dir_openstack_content_present = remote_client.is_directory_present(
directory_path='{0}/openstack/content'.format(
cls.config_drive_config.base_path_to_mount))
def test_directory_present_after_hard_reboot(self):
"""
Verify that the 'openstack' directory is present after a hard reboot
Validate that after the server is hard rebooted that the variable
showing whether the directory of OpenStack content is present is True.
The following assertions occur:
- The 'dir_openstack_content_present' variable is True
"""
self.assertTrue(
self.dir_openstack_content_present,
msg="Directory openstack is not present")
def test_hard_reboot_openstack_metadata(self):
"""
OpenStack metadata should remain consistent through a hard reboot
Get the OpenStack metadata of the server created and hard rebooted
during test setup. Validate that the metadata values after a reboot
matches the metadata that was recorded during test set up before the
reboot. Validate that the metadata contains select key value pairs.
The following assertions occur:
- The metadata recorded during test set up prior to the reboot is
equal to the metadata found on the server after the reboot.
- The availability_zone value in the OpenStack metadata is not None
- The hostname value in the OpenStack metadata is not None
- The launch index value in the OpenStack metadata is not None
- The server name value in the OpenStack metadata after reboot is
equal to the server name value in the OpenStack metadata prior to
reboot
- The value of 'meta_key_1' in the OpenStack metadata is equal to
'meta_value_1'
- The value of 'meta_key_2' in the OpenStack metadata is equal to
'meta_value_2'
- The public key value in the OpenStack metadata after reboot is
equal to the public key value in the OpenStack metadata prior to
reboot
- The uuid value in the OpenStack metadata after reboot is
equal to the uuid value in the OpenStack metadata prior to
reboot
"""
message = "Expected {0} to be {1}, was {2}."
self.openstack_meta_after_reboot = (
self.config_drive_behaviors.get_openstack_metadata(
self.server, self.servers_config, key=self.key.private_key,
filepath=self.config_drive_config.openstack_meta_filepath))
self.assertEqual(
self.openstack_meta_after_reboot,
self.openstack_meta_before_reboot,
msg="Meta data does not match")
openstack_meta = self.openstack_meta_after_reboot
self.assertEqual(
self.openstack_meta_before_reboot.admin_pass,
self.openstack_meta_after_reboot.admin_pass,
msg=message.format(
'Password mismatch',
self.server.admin_pass,
openstack_meta.admin_pass))
self.assertIsNotNone(
self.openstack_meta_after_reboot.availability_zone,
msg="availability_zone was not set in the response")
self.assertIsNotNone(
self.openstack_meta_after_reboot.hostname,
msg="hostname was not set in the response")
self.assertIsNotNone(
self.openstack_meta_after_reboot.launch_index,
msg="launch_index was not set in the response")
self.assertEqual(
self.openstack_meta_before_reboot.name,
self.openstack_meta_after_reboot.name,
msg=message.format(
'server name',
self.openstack_meta_before_reboot.name,
self.openstack_meta_after_reboot.name))
self.assertEqual(self.openstack_meta_after_reboot.meta.get(
'meta_key_1'), 'meta_value_1')
self.assertEqual(self.openstack_meta_after_reboot.meta.get(
'meta_key_2'), 'meta_value_2')
self.assertEqual(
getattr(self.openstack_meta_before_reboot.public_keys,
self.key.name),
getattr(self.openstack_meta_after_reboot.public_keys,
self.key.name),
msg=message.format(
'key do not match',
self.openstack_meta_before_reboot.public_keys,
self.openstack_meta_after_reboot.public_keys))
self.assertEqual(self.openstack_meta_before_reboot.uuid,
self.openstack_meta_after_reboot.uuid,
msg=message.format(
'server id does not match',
self.openstack_meta_before_reboot.uuid,
self.openstack_meta_after_reboot.uuid))
| 48.517699
| 79
| 0.659371
|
acfc736c266f5e40e23b19b8620ba8daefc67644
| 210
|
py
|
Python
|
label_generation.py
|
hex-plex/Gait-Occlusion-GAN
|
2ebc217cd1ff38af6c38257952888fab337c100c
|
[
"MIT"
] | 1
|
2021-07-22T09:06:54.000Z
|
2021-07-22T09:06:54.000Z
|
label_generation.py
|
hex-plex/Gait-Occlusion-GAN
|
2ebc217cd1ff38af6c38257952888fab337c100c
|
[
"MIT"
] | null | null | null |
label_generation.py
|
hex-plex/Gait-Occlusion-GAN
|
2ebc217cd1ff38af6c38257952888fab337c100c
|
[
"MIT"
] | null | null | null |
import numpy as np
from gait import *
import cv2
import os
import pickle
kmeans = kmean_train(subject='001',choice='bg-01',override=True)
ret = supervision(kmeans,override=True)
if ret:
a = fetch_labels()
| 19.090909
| 64
| 0.747619
|
acfc73c2d94d76068d7e1c7e353e58ae8843bd30
| 7,280
|
py
|
Python
|
colossalai/logging/logger.py
|
RichardoLuo/ColossalAI
|
797a9dc5a9e801d7499b8667c3ef039a38aa15ba
|
[
"Apache-2.0"
] | 1,630
|
2021-10-30T01:00:27.000Z
|
2022-03-31T23:02:41.000Z
|
colossalai/logging/logger.py
|
RichardoLuo/ColossalAI
|
797a9dc5a9e801d7499b8667c3ef039a38aa15ba
|
[
"Apache-2.0"
] | 166
|
2021-10-30T01:03:01.000Z
|
2022-03-31T14:19:07.000Z
|
colossalai/logging/logger.py
|
RichardoLuo/ColossalAI
|
797a9dc5a9e801d7499b8667c3ef039a38aa15ba
|
[
"Apache-2.0"
] | 253
|
2021-10-30T06:10:29.000Z
|
2022-03-31T13:30:06.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import colossalai
import logging
from pathlib import Path
from typing import Union, List
import inspect
from colossalai.context.parallel_mode import ParallelMode
try:
from rich.logging import RichHandler
_FORMAT = 'colossalai - %(name)s - %(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO,
format=_FORMAT,
handlers=[RichHandler(show_path=False, markup=True, rich_tracebacks=True)])
except ImportError:
_FORMAT = 'colossalai - %(name)s - %(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=_FORMAT)
class DistributedLogger:
"""This is a distributed event logger class essentially based on :class:`logging`.
Args:
name (str): The name of the logger.
Note:
The parallel_mode used in ``info``, ``warning``, ``debug`` and ``error``
should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
__instances = dict()
@staticmethod
def get_instance(name: str):
"""Get the unique single logger instance based on name.
Args:
name (str): The name of the logger.
Returns:
DistributedLogger: A DistributedLogger object
"""
if name in DistributedLogger.__instances:
return DistributedLogger.__instances[name]
else:
logger = DistributedLogger(name=name)
return logger
def __init__(self, name):
if name in DistributedLogger.__instances:
raise Exception(
'Logger with the same name has been created, you should use colossalai.logging.get_dist_logger')
else:
self._name = name
self._logger = logging.getLogger(name)
DistributedLogger.__instances[name] = self
@staticmethod
def __get_call_info():
stack = inspect.stack()
# stack[1] gives previous function ('info' in our case)
# stack[2] gives before previous function and so on
fn = stack[2][1]
ln = stack[2][2]
func = stack[2][3]
return fn, ln, func
@staticmethod
def _check_valid_logging_level(level: str):
assert level in ['INFO', 'DEBUG', 'WARNING', 'ERROR'], 'found invalid logging level'
def set_level(self, level: str) -> None:
"""Set the logging level
Args:
level (str): Can only be INFO, DEBUG, WARNING and ERROR.
"""
self._check_valid_logging_level(level)
self._logger.setLevel(getattr(logging, level))
def log_to_file(self, path: Union[str, Path], mode: str = 'a', level: str = 'INFO', suffix: str = None) -> None:
"""Save the logs to file
Args:
path (A string or pathlib.Path object): The file to save the log.
mode (str): The mode to write log into the file.
level (str): Can only be INFO, DEBUG, WARNING and ERROR.
suffix (str): The suffix string of log's name.
"""
assert isinstance(path, (str, Path)), \
f'expected argument path to be type str or Path, but got {type(path)}'
self._check_valid_logging_level(level)
if isinstance(path, str):
path = Path(path)
# create log directory
path.mkdir(parents=True, exist_ok=True)
# set the default file name if path is a directory
if not colossalai.core.global_context.is_initialized(ParallelMode.GLOBAL):
rank = 0
else:
rank = colossalai.core.global_context.get_global_rank()
if suffix is not None:
log_file_name = f'rank_{rank}_{suffix}.log'
else:
log_file_name = f'rank_{rank}.log'
path = path.joinpath(log_file_name)
# add file handler
file_handler = logging.FileHandler(path, mode)
file_handler.setLevel(getattr(logging, level))
formatter = logging.Formatter(_FORMAT)
file_handler.setFormatter(formatter)
self._logger.addHandler(file_handler)
def _log(self,
level,
message: str,
parallel_mode: ParallelMode = ParallelMode.GLOBAL,
ranks: List[int] = None) -> None:
if ranks is None:
getattr(self._logger, level)(message)
else:
local_rank = colossalai.core.global_context.get_local_rank(parallel_mode)
if local_rank in ranks:
getattr(self._logger, level)(message)
def info(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: List[int] = None) -> None:
"""Log an info message.
Args:
message (str): The message to be logged.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`):
The parallel mode used for logging. Defaults to ParallelMode.GLOBAL.
ranks (List[int]): List of parallel ranks.
"""
message_prefix = "{}:{} {}".format(*self.__get_call_info())
self._log('info', message_prefix, parallel_mode, ranks)
self._log('info', message, parallel_mode, ranks)
def warning(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: List[int] = None) -> None:
"""Log a warning message.
Args:
message (str): The message to be logged.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`):
The parallel mode used for logging. Defaults to ParallelMode.GLOBAL.
ranks (List[int]): List of parallel ranks.
"""
message_prefix = "{}:{} {}".format(*self.__get_call_info())
self._log('warning', message_prefix, parallel_mode, ranks)
self._log('warning', message, parallel_mode, ranks)
def debug(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: List[int] = None) -> None:
"""Log a debug message.
Args:
message (str): The message to be logged.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`):
The parallel mode used for logging. Defaults to ParallelMode.GLOBAL.
ranks (List[int]): List of parallel ranks.
"""
message_prefix = "{}:{} {}".format(*self.__get_call_info())
self._log('debug', message_prefix, parallel_mode, ranks)
self._log('debug', message, parallel_mode, ranks)
def error(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: List[int] = None) -> None:
"""Log an error message.
Args:
message (str): The message to be logged.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`):
The parallel mode used for logging. Defaults to ParallelMode.GLOBAL.
ranks (List[int]): List of parallel ranks.
"""
message_prefix = "{}:{} {}".format(*self.__get_call_info())
self._log('error', message_prefix, parallel_mode, ranks)
self._log('error', message, parallel_mode, ranks)
| 38.518519
| 120
| 0.621291
|
acfc7464439a5acfcb9fdb6899481427420aca10
| 11,412
|
py
|
Python
|
python/paddle/fluid/input.py
|
slf12/Paddle
|
fa43d74a3a16ac696db5dc893c9a7b1c6913dc85
|
[
"Apache-2.0"
] | 1
|
2020-05-02T00:00:20.000Z
|
2020-05-02T00:00:20.000Z
|
python/paddle/fluid/input.py
|
slf12/Paddle
|
fa43d74a3a16ac696db5dc893c9a7b1c6913dc85
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/input.py
|
slf12/Paddle
|
fa43d74a3a16ac696db5dc893c9a7b1c6913dc85
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import warnings
from .framework import Variable, in_dygraph_mode
from .layer_helper import LayerHelper
from .data_feeder import check_variable_and_dtype, check_dtype
__all__ = ['one_hot', 'embedding']
def one_hot(input, depth, allow_out_of_range=False):
"""
The operator converts each id in the input to an one-hot vector with a
depth length. The value in the vector dimension corresponding to the id
is 1, and the value in the remaining dimension is 0.
The shape of output Tensor or LoDTensor is generated by appending depth dimension
behind the last dimension of the input shape.
.. code-block:: text
Example 1 (allow_out_of_range=False):
input:
X.shape = [4]
X.data = [1, 1, 3, 0]
depth = 4
output:
Out.shape = [4, 4]
Out.data = [[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.]]
Example 2 (allow_out_of_range=True):
input:
X.shape = [4]
X.data = [1, 1, 5, 0]
depth = 4
allow_out_of_range = True
output:
Out.shape = [4, 4]
Out.data = [[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.], # This id is 5, which goes beyond depth, so set it all-zeros data.
[1., 0., 0., 0.]]
Example 3 (allow_out_of_range=False):
input:
X.shape = [4]
X.data = [1, 1, 5, 0]
depth = 4
allow_out_of_range = False
output: Throw an exception for Illegal value
The second dimension in X is 5, which is greater than depth.
Allow_out_of_range =False means that does not allow the word id to exceed depth,
so it throws an exception.
Args:
input(Variable): Tensor or LoDTensor with shape :math:`[N_1, N_2, ..., N_k]` ,
which contains at least one dimension. The data type is int32 or int64.
depth(int): An integer defining the depth of the one hot dimension. If input
is word id, depth is generally the dictionary size.
allow_out_of_range(bool): A bool value indicating whether the input
indices could be out of range :math:`[0, depth)` . When input indices are
out of range, exceptions :code:`Illegal value` is raised if :attr:`allow_out_of_range`
is False, or zero-filling representations is created if it is set True.
Default: False.
Returns:
Variable: The one-hot representations of input. A Tensor or LoDTensor with type float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# Correspond to the first example above, where label.shape is 4 and one_hot_label.shape is [4, 4].
label = fluid.data(name="label", shape=[4], dtype="int64")
one_hot_label = fluid.one_hot(input=label, depth=4)
"""
check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'one_hot_v2')
helper = LayerHelper("one_hot_v2", **locals())
one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if in_dygraph_mode():
inputs = {'X': input}
attrs = {'depth': depth, 'allow_out_of_range': allow_out_of_range}
else:
if not isinstance(depth, Variable):
# user attribute
inputs = {'X': input}
attrs = {'depth': depth, 'allow_out_of_range': allow_out_of_range}
else:
depth.stop_gradient = True
inputs = {'X': input, 'depth_tensor': depth}
attrs = {'allow_out_of_range': allow_out_of_range}
helper.append_op(
type="one_hot_v2",
inputs=inputs,
attrs=attrs,
outputs={'Out': one_hot_out},
stop_gradient=True)
return one_hot_out
def embedding(input,
size,
is_sparse=False,
is_distributed=False,
padding_idx=None,
param_attr=None,
dtype='float32'):
"""
The operator is used to lookup embeddings vector of ids provided by :attr:`input` .
It automatically constructs a 2D embedding matrix based on the
input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .
The shape of output Tensor is generated by appending an emb_size dimension to the
last dimension of the input Tensor shape.
**Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
otherwise the program will throw an exception and exit.
.. code-block:: text
Case 1:
input is a Tensor. padding_idx = -1
input.data = [[1, 3], [2, 4], [4, 127]]
input.shape = [3, 2]
Given size = [128, 16]
output is a Tensor:
out.shape = [3, 2, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]]] # padding data
The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
It will pad all-zero data when ids is 127.
Case 2:
input is a LoDTensor with 1-level LoD. padding_idx = 0
input.lod = [[2, 3]]
input.data = [[1], [3], [2], [4], [0]]
input.shape = [5, 1]
Given size = [128, 16]
output is a LoDTensor:
out.lod = [[2, 3]]
out.shape = [5, 1, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452]],
[[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745]],
[[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.0, 0.0, ..., 0.0 ]]] # padding data
It will pad all-zero data when ids is 0.
Args:
input(Variable): A Tensor or LoDTensor with type int64, which contains the id information.
The value of the input id should satisfy :math:`0<= id < size[0]` .
size(tuple|list): The shape of lookup table parameter. It should have two elements which
indicates the size of the dictionary of embeddings and the size of each embedding vector respectively.
is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
affects the performance of the backwards gradient update. It is recommended to set
True because sparse update is faster. But some optimizer does not support sparse update,
such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` ,
:ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
:ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
In these case, is_sparse must be False. Default: False.
is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
in multi-machine distributed CPU training. Default: False.
padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
If set None, it makes no effect to output. Default: None.
param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
The local word vector needs to be transformed into numpy format, and the shape of local word
vector should be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
is used to load custom or pre-trained word vectors. See code example 2 for details.
dtype(str|core.VarDesc.VarType): It refers to the data type of output Tensor.
It must be float32 or float64. Default: float32.
Returns:
Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.data(name='x', shape=[None, 10], dtype='int64')
# example 1
emb_1 = fluid.embedding(input=data, size=[128, 64])
# example 2: load custom or pre-trained word vectors
weight_data = np.random.random(size=(128, 100)) # word vectors with numpy format
w_param_attrs = fluid.ParamAttr(
name="emb_weight",
learning_rate=0.5,
initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
trainable=True)
emb_2 = fluid.embedding(input=data, size=(128, 100), param_attr=w_param_attrs, dtype='float32')
"""
helper = LayerHelper('embedding', **locals())
check_variable_and_dtype(input, 'input', ['int64'], 'fluid.embedding')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
'fluid.embedding')
remote_prefetch = is_sparse and (not is_distributed)
if remote_prefetch:
assert is_sparse is True and is_distributed is False
w = helper.create_parameter(
attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False)
tmp = helper.create_variable_for_type_inference(dtype)
padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
size[0] + padding_idx)
helper.append_op(
type='lookup_table_v2',
inputs={'Ids': input,
'W': w},
outputs={'Out': tmp},
attrs={
'is_sparse': is_sparse,
'is_distributed': is_distributed,
'remote_prefetch': remote_prefetch,
'padding_idx': padding_idx
})
return tmp
| 43.892308
| 121
| 0.604364
|
acfc747853f9bc083d4a9ee980793be3f14d84e9
| 50,936
|
py
|
Python
|
python/mediumlevelil.py
|
carsonharmon/binaryninja-api
|
f7ad332ad69d370aa29cd54f4c7307da4d9173e2
|
[
"MIT"
] | null | null | null |
python/mediumlevelil.py
|
carsonharmon/binaryninja-api
|
f7ad332ad69d370aa29cd54f4c7307da4d9173e2
|
[
"MIT"
] | null | null | null |
python/mediumlevelil.py
|
carsonharmon/binaryninja-api
|
f7ad332ad69d370aa29cd54f4c7307da4d9173e2
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2018-2020 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import ctypes
import struct
# Binary Ninja components
import binaryninja
from binaryninja import _binaryninjacore as core
from binaryninja.enums import MediumLevelILOperation, InstructionTextTokenType, ILBranchDependence, DataFlowQueryOption
from binaryninja import basicblock #required for MediumLevelILBasicBlock argument
from binaryninja import function
from binaryninja import types
from binaryninja import lowlevelil
# 2-3 compatibility
from binaryninja import range
class SSAVariable(object):
def __init__(self, var, version):
self._var = var
self._version = version
def __repr__(self):
return "<ssa %s version %d>" % (repr(self._var), self._version)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (self._var, self._version) == (other.var, other.version)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash((self._var, self._version))
@property
def var(self):
""" """
return self._var
@var.setter
def var(self, value):
self._var = value
@property
def version(self):
""" """
return self._version
@version.setter
def version(self, value):
self._version = value
class MediumLevelILLabel(object):
def __init__(self, handle = None):
if handle is None:
self.handle = (core.BNMediumLevelILLabel * 1)()
core.BNMediumLevelILInitLabel(self.handle)
else:
self.handle = handle
class MediumLevelILOperationAndSize(object):
def __init__(self, operation, size):
self._operation = operation
self._size = size
def __repr__(self):
if self._size == 0:
return "<%s>" % self._operation.name
return "<%s %d>" % (self._operation.name, self._size)
def __eq__(self, other):
if isinstance(other, MediumLevelILOperation):
return other == self._operation
if isinstance(other, self.__class__):
return (other.size, other.operation) == (self._size, self._operation)
return NotImplemented
def __ne__(self, other):
if isinstance(other, MediumLevelILOperation) or isinstance(other, self.__class__):
return not (self == other)
return NotImplemented
def __hash__(self):
return hash((self._operation, self._size))
@property
def operation(self):
""" """
return self._operation
@property
def size(self):
""" """
return self._size
class MediumLevelILInstruction(object):
"""
``class MediumLevelILInstruction`` Medium Level Intermediate Language Instructions are infinite length tree-based
instructions. Tree-based instructions use infix notation with the left hand operand being the destination operand.
Infix notation is thus more natural to read than other notations (e.g. x86 ``mov eax, 0`` vs. MLIL ``eax = 0``).
"""
ILOperations = {
MediumLevelILOperation.MLIL_NOP: [],
MediumLevelILOperation.MLIL_SET_VAR: [("dest", "var"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_FIELD: [("dest", "var"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SPLIT: [("high", "var"), ("low", "var"), ("src", "expr")],
MediumLevelILOperation.MLIL_LOAD: [("src", "expr")],
MediumLevelILOperation.MLIL_LOAD_STRUCT: [("src", "expr"), ("offset", "int")],
MediumLevelILOperation.MLIL_STORE: [("dest", "expr"), ("src", "expr")],
MediumLevelILOperation.MLIL_STORE_STRUCT: [("dest", "expr"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_VAR: [("src", "var")],
MediumLevelILOperation.MLIL_VAR_FIELD: [("src", "var"), ("offset", "int")],
MediumLevelILOperation.MLIL_VAR_SPLIT: [("high", "var"), ("low", "var")],
MediumLevelILOperation.MLIL_ADDRESS_OF: [("src", "var")],
MediumLevelILOperation.MLIL_ADDRESS_OF_FIELD: [("src", "var"), ("offset", "int")],
MediumLevelILOperation.MLIL_CONST: [("constant", "int")],
MediumLevelILOperation.MLIL_CONST_PTR: [("constant", "int")],
MediumLevelILOperation.MLIL_EXTERN_PTR: [("constant", "int"), ("offset", "int")],
MediumLevelILOperation.MLIL_FLOAT_CONST: [("constant", "float")],
MediumLevelILOperation.MLIL_IMPORT: [("constant", "int")],
MediumLevelILOperation.MLIL_ADD: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_ADC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_SUB: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_SBB: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_AND: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_OR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_XOR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_LSL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_LSR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_ASR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_ROL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_RLC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_ROR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_RRC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_MUL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MULU_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MULS_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVU: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVU_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVS: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVS_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODU: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODU_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODS: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODS_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_NEG: [("src", "expr")],
MediumLevelILOperation.MLIL_NOT: [("src", "expr")],
MediumLevelILOperation.MLIL_SX: [("src", "expr")],
MediumLevelILOperation.MLIL_ZX: [("src", "expr")],
MediumLevelILOperation.MLIL_LOW_PART: [("src", "expr")],
MediumLevelILOperation.MLIL_JUMP: [("dest", "expr")],
MediumLevelILOperation.MLIL_JUMP_TO: [("dest", "expr"), ("targets", "target_map")],
MediumLevelILOperation.MLIL_RET_HINT: [("dest", "expr")],
MediumLevelILOperation.MLIL_CALL: [("output", "var_list"), ("dest", "expr"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_CALL_UNTYPED: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_CALL_OUTPUT: [("dest", "var_list")],
MediumLevelILOperation.MLIL_CALL_PARAM: [("src", "var_list")],
MediumLevelILOperation.MLIL_RET: [("src", "expr_list")],
MediumLevelILOperation.MLIL_NORET: [],
MediumLevelILOperation.MLIL_IF: [("condition", "expr"), ("true", "int"), ("false", "int")],
MediumLevelILOperation.MLIL_GOTO: [("dest", "int")],
MediumLevelILOperation.MLIL_CMP_E: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_NE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SLT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_ULT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SLE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_ULE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SGE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_UGE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SGT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_UGT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_TEST_BIT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_BOOL_TO_INT: [("src", "expr")],
MediumLevelILOperation.MLIL_ADD_OVERFLOW: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_SYSCALL: [("output", "var_list"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_SYSCALL_UNTYPED: [("output", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_TAILCALL: [("output", "var_list"), ("dest", "expr"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_TAILCALL_UNTYPED: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_BP: [],
MediumLevelILOperation.MLIL_TRAP: [("vector", "int")],
MediumLevelILOperation.MLIL_INTRINSIC: [("output", "var_list"), ("intrinsic", "intrinsic"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_INTRINSIC_SSA: [("output", "var_ssa_list"), ("intrinsic", "intrinsic"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_FREE_VAR_SLOT: [("dest", "var")],
MediumLevelILOperation.MLIL_FREE_VAR_SLOT_SSA: [("prev", "var_ssa_dest_and_src")],
MediumLevelILOperation.MLIL_UNDEF: [],
MediumLevelILOperation.MLIL_UNIMPL: [],
MediumLevelILOperation.MLIL_UNIMPL_MEM: [("src", "expr")],
MediumLevelILOperation.MLIL_FADD: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FSUB: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FMUL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FDIV: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FSQRT: [("src", "expr")],
MediumLevelILOperation.MLIL_FNEG: [("src", "expr")],
MediumLevelILOperation.MLIL_FABS: [("src", "expr")],
MediumLevelILOperation.MLIL_FLOAT_TO_INT: [("src", "expr")],
MediumLevelILOperation.MLIL_INT_TO_FLOAT: [("src", "expr")],
MediumLevelILOperation.MLIL_FLOAT_CONV: [("src", "expr")],
MediumLevelILOperation.MLIL_ROUND_TO_INT: [("src", "expr")],
MediumLevelILOperation.MLIL_FLOOR: [("src", "expr")],
MediumLevelILOperation.MLIL_CEIL: [("src", "expr")],
MediumLevelILOperation.MLIL_FTRUNC: [("src", "expr")],
MediumLevelILOperation.MLIL_FCMP_E: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_NE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_LT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_LE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_GE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_GT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_O: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_UO: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SSA: [("dest", "var_ssa"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD: [("prev", "var_ssa_dest_and_src"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA: [("high", "var_ssa"), ("low", "var_ssa"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_ALIASED: [("prev", "var_ssa_dest_and_src"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD: [("prev", "var_ssa_dest_and_src"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_VAR_SSA: [("src", "var_ssa")],
MediumLevelILOperation.MLIL_VAR_SSA_FIELD: [("src", "var_ssa"), ("offset", "int")],
MediumLevelILOperation.MLIL_VAR_ALIASED: [("src", "var_ssa")],
MediumLevelILOperation.MLIL_VAR_ALIASED_FIELD: [("src", "var_ssa"), ("offset", "int")],
MediumLevelILOperation.MLIL_VAR_SPLIT_SSA: [("high", "var_ssa"), ("low", "var_ssa")],
MediumLevelILOperation.MLIL_CALL_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr_list"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_SYSCALL_SSA: [("output", "expr"), ("params", "expr_list"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA: [("output", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_TAILCALL_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr_list"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA: [("dest_memory", "int"), ("dest", "var_ssa_list")],
MediumLevelILOperation.MLIL_CALL_PARAM_SSA: [("src_memory", "int"), ("src", "var_ssa_list")],
MediumLevelILOperation.MLIL_LOAD_SSA: [("src", "expr"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_LOAD_STRUCT_SSA: [("src", "expr"), ("offset", "int"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_STORE_SSA: [("dest", "expr"), ("dest_memory", "int"), ("src_memory", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_STORE_STRUCT_SSA: [("dest", "expr"), ("offset", "int"), ("dest_memory", "int"), ("src_memory", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_VAR_PHI: [("dest", "var_ssa"), ("src", "var_ssa_list")],
MediumLevelILOperation.MLIL_MEM_PHI: [("dest_memory", "int"), ("src_memory", "int_list")]
}
def __init__(self, func, expr_index, instr_index=None):
instr = core.BNGetMediumLevelILByIndex(func.handle, expr_index)
self._function = func
self._expr_index = expr_index
if instr_index is None:
self._instr_index = core.BNGetMediumLevelILInstructionForExpr(func.handle, expr_index)
else:
self._instr_index = instr_index
self._operation = MediumLevelILOperation(instr.operation)
self._size = instr.size
self._address = instr.address
self._source_operand = instr.sourceOperand
operands = MediumLevelILInstruction.ILOperations[instr.operation]
self._operands = []
i = 0
for operand in operands:
name, operand_type = operand
if operand_type == "int":
value = instr.operands[i]
value = (value & ((1 << 63) - 1)) - (value & (1 << 63))
elif operand_type == "float":
if instr.size == 4:
value = struct.unpack("f", struct.pack("I", instr.operands[i] & 0xffffffff))[0]
elif instr.size == 8:
value = struct.unpack("d", struct.pack("Q", instr.operands[i]))[0]
else:
value = instr.operands[i]
elif operand_type == "expr":
value = MediumLevelILInstruction(func, instr.operands[i])
elif operand_type == "intrinsic":
value = lowlevelil.ILIntrinsic(func.arch, instr.operands[i])
elif operand_type == "var":
value = function.Variable.from_identifier(self._function.source_function, instr.operands[i])
elif operand_type == "var_ssa":
var = function.Variable.from_identifier(self._function.source_function, instr.operands[i])
version = instr.operands[i + 1]
i += 1
value = SSAVariable(var, version)
elif operand_type == "var_ssa_dest_and_src":
var = function.Variable.from_identifier(self._function.source_function, instr.operands[i])
dest_version = instr.operands[i + 1]
src_version = instr.operands[i + 2]
i += 2
self._operands.append(SSAVariable(var, dest_version))
#TODO: documentation for dest
self.dest = SSAVariable(var, dest_version)
value = SSAVariable(var, src_version)
elif operand_type == "int_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
value = []
for j in range(count.value):
value.append(operand_list[j])
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "var_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = []
for j in range(count.value):
value.append(function.Variable.from_identifier(self._function.source_function, operand_list[j]))
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "var_ssa_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = []
for j in range(count.value // 2):
var_id = operand_list[j * 2]
var_version = operand_list[(j * 2) + 1]
value.append(SSAVariable(function.Variable.from_identifier(self._function.source_function,
var_id), var_version))
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "expr_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = []
for j in range(count.value):
value.append(MediumLevelILInstruction(func, operand_list[j]))
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "target_map":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = {}
for j in range(count.value // 2):
key = operand_list[j * 2]
target = operand_list[(j * 2) + 1]
value[key] = target
core.BNMediumLevelILFreeOperandList(operand_list)
self._operands.append(value)
self.__dict__[name] = value
i += 1
def __str__(self):
tokens = self.tokens
if tokens is None:
return "invalid"
result = ""
for token in tokens:
result += token.text
return result
def __repr__(self):
return "<il: %s>" % str(self)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self._expr_index == other.expr_index
def __lt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index < other.expr_index
def __le__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index <= other.expr_index
def __gt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index > other.expr_index
def __ge__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index >= other.expr_index
def __hash__(self):
return hash((self._instr_index, self._function))
@property
def tokens(self):
"""MLIL tokens (read-only)"""
count = ctypes.c_ulonglong()
tokens = ctypes.POINTER(core.BNInstructionTextToken)()
if ((self._instr_index is not None) and (self._function.source_function is not None) and
(self._expr_index == core.BNGetMediumLevelILIndexForInstruction(self._function.handle, self._instr_index))):
if not core.BNGetMediumLevelILInstructionText(self._function.handle, self._function.source_function.handle,
self._function.arch.handle, self._instr_index, tokens, count):
return None
else:
if not core.BNGetMediumLevelILExprText(self._function.handle, self._function.arch.handle,
self._expr_index, tokens, count):
return None
result = binaryninja.function.InstructionTextToken.get_instruction_lines(tokens, count.value)
core.BNFreeInstructionText(tokens, count.value)
return result
@property
def il_basic_block(self):
"""IL basic block object containing this expression (read-only) (only available on finalized functions)"""
return MediumLevelILBasicBlock(self._function.source_function.view, core.BNGetMediumLevelILBasicBlockForInstruction(self._function.handle, self._instr_index), self._function)
@property
def ssa_form(self):
"""SSA form of expression (read-only)"""
return MediumLevelILInstruction(self._function.ssa_form,
core.BNGetMediumLevelILSSAExprIndex(self._function.handle, self._expr_index))
@property
def non_ssa_form(self):
"""Non-SSA form of expression (read-only)"""
return MediumLevelILInstruction(self._function.non_ssa_form,
core.BNGetMediumLevelILNonSSAExprIndex(self._function.handle, self._expr_index))
@property
def value(self):
"""Value of expression if constant or a known value (read-only)"""
value = core.BNGetMediumLevelILExprValue(self._function.handle, self._expr_index)
result = function.RegisterValue(self._function.arch, value)
return result
@property
def possible_values(self):
"""Possible values of expression using path-sensitive static data flow analysis (read-only)"""
value = core.BNGetMediumLevelILPossibleExprValues(self._function.handle, self._expr_index, None, 0)
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
@property
def branch_dependence(self):
"""Set of branching instructions that must take the true or false path to reach this instruction"""
count = ctypes.c_ulonglong()
deps = core.BNGetAllMediumLevelILBranchDependence(self._function.handle, self._instr_index, count)
result = {}
for i in range(0, count.value):
result[deps[i].branch] = ILBranchDependence(deps[i].dependence)
core.BNFreeILBranchDependenceList(deps)
return result
@property
def low_level_il(self):
"""Low level IL form of this expression"""
expr = self._function.get_low_level_il_expr_index(self._expr_index)
if expr is None:
return None
return lowlevelil.LowLevelILInstruction(self._function.low_level_il.ssa_form, expr)
@property
def llil(self):
"""Alias for low_level_il"""
return self.low_level_il
@property
def llils(self):
exprs = self.function.get_low_level_il_expr_indexes(self.expr_index)
result = []
for expr in exprs:
result.append(lowlevelil.LowLevelILInstruction(self._function.low_level_il.ssa_form, expr))
return result
@property
def high_level_il(self):
"""High level IL form of this expression"""
expr = self._function.get_high_level_il_expr_index(self._expr_index)
if expr is None:
return None
return binaryninja.highlevelil.HighLevelILInstruction(self._function.high_level_il, expr)
@property
def hlil(self):
"""Alias for high_level_il"""
return self.high_level_il
@property
def ssa_memory_version(self):
"""Version of active memory contents in SSA form for this instruction"""
return core.BNGetMediumLevelILSSAMemoryVersionAtILInstruction(self._function.handle, self._instr_index)
@property
def prefix_operands(self):
"""All operands in the expression tree in prefix order"""
result = [MediumLevelILOperationAndSize(self._operation, self._size)]
for operand in self._operands:
if isinstance(operand, MediumLevelILInstruction):
result += operand.prefix_operands
else:
result.append(operand)
return result
@property
def postfix_operands(self):
"""All operands in the expression tree in postfix order"""
result = []
for operand in self._operands:
if isinstance(operand, MediumLevelILInstruction):
result += operand.postfix_operands
else:
result.append(operand)
result.append(MediumLevelILOperationAndSize(self._operation, self._size))
return result
@property
def vars_written(self):
"""List of variables written by instruction"""
if self._operation in [MediumLevelILOperation.MLIL_SET_VAR, MediumLevelILOperation.MLIL_SET_VAR_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_SSA, MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_ALIASED, MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD,
MediumLevelILOperation.MLIL_VAR_PHI]:
return [self.dest]
elif self._operation in [MediumLevelILOperation.MLIL_SET_VAR_SPLIT, MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA]:
return [self.high, self.low]
elif self._operation in [MediumLevelILOperation.MLIL_CALL, MediumLevelILOperation.MLIL_SYSCALL, MediumLevelILOperation.MLIL_TAILCALL]:
return self.output
elif self._operation in [MediumLevelILOperation.MLIL_CALL_UNTYPED, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED,
MediumLevelILOperation.MLIL_CALL_SSA, MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA,
MediumLevelILOperation.MLIL_SYSCALL_SSA, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA,
MediumLevelILOperation.MLIL_TAILCALL_SSA, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA]:
return self.output.vars_written
elif self._operation in [MediumLevelILOperation.MLIL_CALL_OUTPUT, MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA]:
return self.dest
return []
@property
def vars_read(self):
"""List of variables read by instruction"""
if self._operation in [MediumLevelILOperation.MLIL_SET_VAR, MediumLevelILOperation.MLIL_SET_VAR_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_SPLIT, MediumLevelILOperation.MLIL_SET_VAR_SSA,
MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA, MediumLevelILOperation.MLIL_SET_VAR_ALIASED]:
return self.src.vars_read
elif self._operation in [MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD]:
return [self.prev] + self.src.vars_read
elif self._operation in [MediumLevelILOperation.MLIL_CALL, MediumLevelILOperation.MLIL_SYSCALL, MediumLevelILOperation.MLIL_TAILCALL,
MediumLevelILOperation.MLIL_CALL_SSA, MediumLevelILOperation.MLIL_SYSCALL_SSA, MediumLevelILOperation.MLIL_TAILCALL_SSA]:
result = []
for param in self.params:
result += param.vars_read
return result
elif self._operation in [MediumLevelILOperation.MLIL_CALL_UNTYPED, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED,
MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA]:
return self.params.vars_read
elif self._operation in [MediumLevelILOperation.MLIL_CALL_PARAM, MediumLevelILOperation.MLIL_CALL_PARAM_SSA,
MediumLevelILOperation.MLIL_VAR_PHI]:
return self.src
elif self._operation in [MediumLevelILOperation.MLIL_CALL_OUTPUT, MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA]:
return []
result = []
for operand in self._operands:
if (isinstance(operand, function.Variable)) or (isinstance(operand, SSAVariable)):
result.append(operand)
elif isinstance(operand, MediumLevelILInstruction):
result += operand.vars_read
return result
@property
def expr_type(self):
"""Type of expression"""
result = core.BNGetMediumLevelILExprType(self._function.handle, self._expr_index)
if result.type:
platform = None
if self._function.source_function:
platform = self._function.source_function.platform
return types.Type(result.type, platform = platform, confidence = result.confidence)
return None
def get_possible_values(self, options = []):
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleExprValues(self._function.handle, self._expr_index, option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_ssa_var_possible_values(self, ssa_var, options = []):
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleSSAVarValues(self._function.handle, var_data, ssa_var.version,
self._instr_index, option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_ssa_var_version(self, var):
var_data = core.BNVariable()
var_data.type = var.source_type
var_data.index = var.index
var_data.storage = var.storage
return core.BNGetMediumLevelILSSAVarVersionAtILInstruction(self._function.handle, var_data, self._instr_index)
def get_var_for_reg(self, reg):
reg = self._function.arch.get_reg_index(reg)
result = core.BNGetMediumLevelILVariableForRegisterAtInstruction(self._function.handle, reg, self._instr_index)
return function.Variable(self._function.source_function, result.type, result.index, result.storage)
def get_var_for_flag(self, flag):
flag = self._function.arch.get_flag_index(flag)
result = core.BNGetMediumLevelILVariableForFlagAtInstruction(self._function.handle, flag, self._instr_index)
return function.Variable(self._function.source_function, result.type, result.index, result.storage)
def get_var_for_stack_location(self, offset):
result = core.BNGetMediumLevelILVariableForStackLocationAtInstruction(self._function.handle, offset, self._instr_index)
return function.Variable(self._function.source_function, result.type, result.index, result.storage)
def get_reg_value(self, reg):
reg = self._function.arch.get_reg_index(reg)
value = core.BNGetMediumLevelILRegisterValueAtInstruction(self._function.handle, reg, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_reg_value_after(self, reg):
reg = self._function.arch.get_reg_index(reg)
value = core.BNGetMediumLevelILRegisterValueAfterInstruction(self._function.handle, reg, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_possible_reg_values(self, reg, options = []):
reg = self._function.arch.get_reg_index(reg)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleRegisterValuesAtInstruction(self._function.handle, reg, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_possible_reg_values_after(self, reg, options = []):
reg = self._function.arch.get_reg_index(reg)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleRegisterValuesAfterInstruction(self._function.handle, reg, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_flag_value(self, flag):
flag = self._function.arch.get_flag_index(flag)
value = core.BNGetMediumLevelILFlagValueAtInstruction(self._function.handle, flag, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_flag_value_after(self, flag):
flag = self._function.arch.get_flag_index(flag)
value = core.BNGetMediumLevelILFlagValueAfterInstruction(self._function.handle, flag, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_possible_flag_values(self, flag, options = []):
flag = self._function.arch.get_flag_index(flag)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleFlagValuesAtInstruction(self._function.handle, flag, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_possible_flag_values_after(self, flag, options = []):
flag = self._function.arch.get_flag_index(flag)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleFlagValuesAfterInstruction(self._function.handle, flag, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_stack_contents(self, offset, size):
value = core.BNGetMediumLevelILStackContentsAtInstruction(self._function.handle, offset, size, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_stack_contents_after(self, offset, size):
value = core.BNGetMediumLevelILStackContentsAfterInstruction(self._function.handle, offset, size, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_possible_stack_contents(self, offset, size, options = []):
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleStackContentsAtInstruction(self._function.handle, offset, size, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_possible_stack_contents_after(self, offset, size, options = []):
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleStackContentsAfterInstruction(self._function.handle, offset, size, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_branch_dependence(self, branch_instr):
return ILBranchDependence(core.BNGetMediumLevelILBranchDependence(self._function.handle, self._instr_index, branch_instr))
@property
def function(self):
""" """
return self._function
@property
def expr_index(self):
""" """
return self._expr_index
@property
def instr_index(self):
""" """
return self._instr_index
@property
def operation(self):
""" """
return self._operation
@property
def size(self):
""" """
return self._size
@property
def address(self):
""" """
return self._address
@property
def source_operand(self):
""" """
return self._source_operand
@property
def operands(self):
""" """
return self._operands
class MediumLevelILExpr(object):
"""
``class MediumLevelILExpr`` hold the index of IL Expressions.
.. note:: This class shouldn't be instantiated directly. Rather the helper members of MediumLevelILFunction should be \
used instead.
"""
def __init__(self, index):
self._index = index
@property
def index(self):
""" """
return self._index
@index.setter
def index(self, value):
self._index = value
class MediumLevelILFunction(object):
"""
``class MediumLevelILFunction`` contains the list of MediumLevelILExpr objects that make up a binaryninja.function. MediumLevelILExpr
objects can be added to the MediumLevelILFunction by calling :func:`append` and passing the result of the various class
methods which return MediumLevelILExpr objects.
"""
def __init__(self, arch = None, handle = None, source_func = None):
self._arch = arch
self._source_function = source_func
if handle is not None:
self.handle = core.handle_of_type(handle, core.BNMediumLevelILFunction)
if self._source_function is None:
self._source_function = binaryninja.function.Function(handle = core.BNGetMediumLevelILOwnerFunction(self.handle))
if self._arch is None:
self._arch = self._source_function.arch
else:
if self._source_function is None:
self.handle = None
raise ValueError("IL functions must be created with an associated function")
if self._arch is None:
self._arch = self._source_function.arch
func_handle = self._source_function.handle
self.handle = core.BNCreateMediumLevelILFunction(arch.handle, func_handle)
def __del__(self):
if self.handle is not None:
core.BNFreeMediumLevelILFunction(self.handle)
def __repr__(self):
arch = self.source_function.arch
if arch:
return "<mlil func: %s@%#x>" % (arch.name, self.source_function.start)
else:
return "<mlil func: %#x>" % self.source_function.start
def __len__(self):
return int(core.BNGetMediumLevelILInstructionCount(self.handle))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return ctypes.addressof(self.handle.contents) == ctypes.addressof(other.handle.contents)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash(('MLIL', self._source_function))
def __getitem__(self, i):
if isinstance(i, slice) or isinstance(i, tuple):
raise IndexError("expected integer instruction index")
if isinstance(i, MediumLevelILExpr):
return MediumLevelILInstruction(self, i.index)
# for backwards compatibility
if isinstance(i, MediumLevelILInstruction):
return i
if i < -len(self) or i >= len(self):
raise IndexError("index out of range")
if i < 0:
i = len(self) + i
return MediumLevelILInstruction(self, core.BNGetMediumLevelILIndexForInstruction(self.handle, i), i)
def __setitem__(self, i, j):
raise IndexError("instruction modification not implemented")
def __iter__(self):
count = ctypes.c_ulonglong()
blocks = core.BNGetMediumLevelILBasicBlockList(self.handle, count)
view = None
if self._source_function is not None:
view = self._source_function.view
try:
for i in range(0, count.value):
yield MediumLevelILBasicBlock(view, core.BNNewBasicBlockReference(blocks[i]), self)
finally:
core.BNFreeBasicBlockList(blocks, count.value)
@property
def current_address(self):
"""Current IL Address (read/write)"""
return core.BNMediumLevelILGetCurrentAddress(self.handle)
@current_address.setter
def current_address(self, value):
core.BNMediumLevelILSetCurrentAddress(self.handle, self._arch.handle, value)
def set_current_address(self, value, arch = None):
if arch is None:
arch = self._arch
core.BNMediumLevelILSetCurrentAddress(self.handle, arch.handle, value)
@property
def basic_blocks(self):
"""list of MediumLevelILBasicBlock objects (read-only)"""
count = ctypes.c_ulonglong()
blocks = core.BNGetMediumLevelILBasicBlockList(self.handle, count)
result = []
view = None
if self._source_function is not None:
view = self._source_function.view
for i in range(0, count.value):
result.append(MediumLevelILBasicBlock(view, core.BNNewBasicBlockReference(blocks[i]), self))
core.BNFreeBasicBlockList(blocks, count.value)
return result
@property
def instructions(self):
"""A generator of mlil instructions of the current function"""
for block in self.basic_blocks:
for i in block:
yield i
@property
def ssa_form(self):
"""Medium level IL in SSA form (read-only)"""
result = core.BNGetMediumLevelILSSAForm(self.handle)
if not result:
return None
return MediumLevelILFunction(self._arch, result, self._source_function)
@property
def non_ssa_form(self):
"""Medium level IL in non-SSA (default) form (read-only)"""
result = core.BNGetMediumLevelILNonSSAForm(self.handle)
if not result:
return None
return MediumLevelILFunction(self._arch, result, self._source_function)
@property
def low_level_il(self):
"""Low level IL for this function"""
result = core.BNGetLowLevelILForMediumLevelIL(self.handle)
if not result:
return None
return lowlevelil.LowLevelILFunction(self._arch, result, self._source_function)
@property
def llil(self):
"""Alias for low_level_il"""
return self.low_level_il
@property
def high_level_il(self):
"""High level IL for this medium level IL."""
result = core.BNGetHighLevelILForMediumLevelIL(self.handle)
if not result:
return None
return binaryninja.highlevelil.HighLevelILFunction(self._arch, result, self._source_function)
@property
def hlil(self):
return self.high_level_il
def get_instruction_start(self, addr, arch = None):
if arch is None:
arch = self._arch
result = core.BNMediumLevelILGetInstructionStart(self.handle, arch.handle, addr)
if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
return None
return result
def expr(self, operation, a = 0, b = 0, c = 0, d = 0, e = 0, size = 0):
if isinstance(operation, str):
operation = MediumLevelILOperation[operation]
elif isinstance(operation, MediumLevelILOperation):
operation = operation.value
return MediumLevelILExpr(core.BNMediumLevelILAddExpr(self.handle, operation, size, a, b, c, d, e))
def append(self, expr):
"""
``append`` adds the MediumLevelILExpr ``expr`` to the current MediumLevelILFunction.
:param MediumLevelILExpr expr: the MediumLevelILExpr to add to the current MediumLevelILFunction
:return: number of MediumLevelILExpr in the current function
:rtype: int
"""
return core.BNMediumLevelILAddInstruction(self.handle, expr.index)
def goto(self, label):
"""
``goto`` returns a goto expression which jumps to the provided MediumLevelILLabel.
:param MediumLevelILLabel label: Label to jump to
:return: the MediumLevelILExpr that jumps to the provided label
:rtype: MediumLevelILExpr
"""
return MediumLevelILExpr(core.BNMediumLevelILGoto(self.handle, label.handle))
def if_expr(self, operand, t, f):
"""
``if_expr`` returns the ``if`` expression which depending on condition ``operand`` jumps to the MediumLevelILLabel
``t`` when the condition expression ``operand`` is non-zero and ``f`` when it's zero.
:param MediumLevelILExpr operand: comparison expression to evaluate.
:param MediumLevelILLabel t: Label for the true branch
:param MediumLevelILLabel f: Label for the false branch
:return: the MediumLevelILExpr for the if expression
:rtype: MediumLevelILExpr
"""
return MediumLevelILExpr(core.BNMediumLevelILIf(self.handle, operand.index, t.handle, f.handle))
def mark_label(self, label):
"""
``mark_label`` assigns a MediumLevelILLabel to the current IL address.
:param MediumLevelILLabel label:
:rtype: None
"""
core.BNMediumLevelILMarkLabel(self.handle, label.handle)
def add_label_list(self, labels):
"""
``add_label_list`` returns a label list expression for the given list of MediumLevelILLabel objects.
:param labels: the list of MediumLevelILLabel to get a label list expression from
:type labels: list(MediumLevelILLabel)
:return: the label list expression
:rtype: MediumLevelILExpr
"""
label_list = (ctypes.POINTER(core.BNMediumLevelILLabel) * len(labels))()
for i in range(len(labels)):
label_list[i] = labels[i].handle
return MediumLevelILExpr(core.BNMediumLevelILAddLabelList(self.handle, label_list, len(labels)))
def add_operand_list(self, operands):
"""
``add_operand_list`` returns an operand list expression for the given list of integer operands.
:param operands: list of operand numbers
:type operands: list(int)
:return: an operand list expression
:rtype: MediumLevelILExpr
"""
operand_list = (ctypes.c_ulonglong * len(operands))()
for i in range(len(operands)):
operand_list[i] = operands[i]
return MediumLevelILExpr(core.BNMediumLevelILAddOperandList(self.handle, operand_list, len(operands)))
def finalize(self):
"""
``finalize`` ends the function and computes the list of basic blocks.
:rtype: None
"""
core.BNFinalizeMediumLevelILFunction(self.handle)
def get_ssa_instruction_index(self, instr):
return core.BNGetMediumLevelILSSAInstructionIndex(self.handle, instr)
def get_non_ssa_instruction_index(self, instr):
return core.BNGetMediumLevelILNonSSAInstructionIndex(self.handle, instr)
def get_ssa_var_definition(self, ssa_var):
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
result = core.BNGetMediumLevelILSSAVarDefinition(self.handle, var_data, ssa_var.version)
if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
return None
return self[result]
def get_ssa_memory_definition(self, version):
result = core.BNGetMediumLevelILSSAMemoryDefinition(self.handle, version)
if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
return None
return self[result]
def get_ssa_var_uses(self, ssa_var):
count = ctypes.c_ulonglong()
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
instrs = core.BNGetMediumLevelILSSAVarUses(self.handle, var_data, ssa_var.version, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def get_ssa_memory_uses(self, version):
count = ctypes.c_ulonglong()
instrs = core.BNGetMediumLevelILSSAMemoryUses(self.handle, version, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def is_ssa_var_live(self, ssa_var):
"""
``is_ssa_var_live`` determines if ``ssa_var`` is live at any point in the function
:param SSAVariable ssa_var: the SSA variable to query
:return: whether the variable is live at any point in the function
:rtype: bool
"""
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
return core.BNIsMediumLevelILSSAVarLive(self.handle, var_data, ssa_var.version)
def get_var_definitions(self, var):
count = ctypes.c_ulonglong()
var_data = core.BNVariable()
var_data.type = var.source_type
var_data.index = var.index
var_data.storage = var.storage
instrs = core.BNGetMediumLevelILVariableDefinitions(self.handle, var_data, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def get_var_uses(self, var):
count = ctypes.c_ulonglong()
var_data = core.BNVariable()
var_data.type = var.source_type
var_data.index = var.index
var_data.storage = var.storage
instrs = core.BNGetMediumLevelILVariableUses(self.handle, var_data, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def get_ssa_var_value(self, ssa_var):
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
value = core.BNGetMediumLevelILSSAVarValue(self.handle, var_data, ssa_var.version)
result = function.RegisterValue(self._arch, value)
return result
def get_low_level_il_instruction_index(self, instr):
low_il = self.low_level_il
if low_il is None:
return None
low_il = low_il.ssa_form
if low_il is None:
return None
result = core.BNGetLowLevelILInstructionIndex(self.handle, instr)
if result >= core.BNGetLowLevelILInstructionCount(low_il.handle):
return None
return result
def get_low_level_il_expr_index(self, expr):
low_il = self.low_level_il
if low_il is None:
return None
low_il = low_il.ssa_form
if low_il is None:
return None
result = core.BNGetLowLevelILExprIndex(self.handle, expr)
if result >= core.BNGetLowLevelILExprCount(low_il.handle):
return None
return result
def get_low_level_il_expr_indexes(self, expr):
count = ctypes.c_ulonglong()
exprs = core.BNGetLowLevelILExprIndexes(self.handle, expr, count)
result = []
for i in range(0, count.value):
result.append(exprs[i])
core.BNFreeILInstructionList(exprs)
return result
def get_high_level_il_instruction_index(self, instr):
high_il = self.high_level_il
if high_il is None:
return None
result = core.BNGetHighLevelILInstructionIndex(self.handle, instr)
if result >= core.BNGetHighLevelILInstructionCount(high_il.handle):
return None
return result
def get_high_level_il_expr_index(self, expr):
high_il = self.high_level_il
if high_il is None:
return None
result = core.BNGetHighLevelILExprIndex(self.handle, expr)
if result >= core.BNGetHighLevelILExprCount(high_il.handle):
return None
return result
def create_graph(self, settings = None):
if settings is not None:
settings_obj = settings.handle
else:
settings_obj = None
return binaryninja.flowgraph.CoreFlowGraph(core.BNCreateMediumLevelILFunctionGraph(self.handle, settings_obj))
@property
def arch(self):
""" """
return self._arch
@arch.setter
def arch(self, value):
self._arch = value
@property
def source_function(self):
""" """
return self._source_function
@source_function.setter
def source_function(self, value):
self._source_function = value
class MediumLevelILBasicBlock(basicblock.BasicBlock):
def __init__(self, view, handle, owner):
super(MediumLevelILBasicBlock, self).__init__(handle, view)
self.il_function = owner
def __repr__(self):
arch = self.arch
if arch:
return "<mlil block: %s@%d-%d>" % (arch.name, self.start, self.end)
else:
return "<mlil block: %d-%d>" % (self.start, self.end)
def __iter__(self):
for idx in range(self.start, self.end):
yield self.il_function[idx]
def __getitem__(self, idx):
size = self.end - self.start
if idx > size or idx < -size:
raise IndexError("list index is out of range")
if idx >= 0:
return self.il_function[idx + self.start]
else:
return self.il_function[self.end + idx]
def __hash__(self):
return hash((self.start, self.end, self.il_function))
def __contains__(self, instruction):
if type(instruction) != MediumLevelILInstruction or instruction.il_basic_block != self:
return False
if instruction.instr_index >= self.start and instruction.instr_index <= self.end:
return True
else:
return False
def _create_instance(self, handle, view):
"""Internal method by super to instantiate child instances"""
return MediumLevelILBasicBlock(view, handle, self.il_function)
@property
def il_function(self):
""" """
return self._il_function
@il_function.setter
def il_function(self, value):
self._il_function = value
| 39.981162
| 176
| 0.741185
|
acfc7621eaecb0b049fe0899827b957d8af9a9ac
| 23,919
|
py
|
Python
|
velo_payments/models/__init__.py
|
velopaymentsapi/velo-python
|
59b39555e9714139b4bf697151cc7d15f6dd510e
|
[
"Apache-2.0"
] | null | null | null |
velo_payments/models/__init__.py
|
velopaymentsapi/velo-python
|
59b39555e9714139b4bf697151cc7d15f6dd510e
|
[
"Apache-2.0"
] | null | null | null |
velo_payments/models/__init__.py
|
velopaymentsapi/velo-python
|
59b39555e9714139b4bf697151cc7d15f6dd510e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# flake8: noqa
"""
Velo Payments APIs
## Terms and Definitions Throughout this document and the Velo platform the following terms are used: * **Payor.** An entity (typically a corporation) which wishes to pay funds to one or more payees via a payout. * **Payee.** The recipient of funds paid out by a payor. * **Payment.** A single transfer of funds from a payor to a payee. * **Payout.** A batch of Payments, typically used by a payor to logically group payments (e.g. by business day). Technically there need be no relationship between the payments in a payout - a single payout can contain payments to multiple payees and/or multiple payments to a single payee. * **Sandbox.** An integration environment provided by Velo Payments which offers a similar API experience to the production environment, but all funding and payment events are simulated, along with many other services such as OFAC sanctions list checking. ## Overview The Velo Payments API allows a payor to perform a number of operations. The following is a list of the main capabilities in a natural order of execution: * Authenticate with the Velo platform * Maintain a collection of payees * Query the payor’s current balance of funds within the platform and perform additional funding * Issue payments to payees * Query the platform for a history of those payments This document describes the main concepts and APIs required to get up and running with the Velo Payments platform. It is not an exhaustive API reference. For that, please see the separate Velo Payments API Reference. ## API Considerations The Velo Payments API is REST based and uses the JSON format for requests and responses. Most calls are secured using OAuth 2 security and require a valid authentication access token for successful operation. See the Authentication section for details. Where a dynamic value is required in the examples below, the {token} format is used, suggesting that the caller needs to supply the appropriate value of the token in question (without including the { or } characters). Where curl examples are given, the –d @filename.json approach is used, indicating that the request body should be placed into a file named filename.json in the current directory. Each of the curl examples in this document should be considered a single line on the command-line, regardless of how they appear in print. ## Authenticating with the Velo Platform Once Velo backoffice staff have added your organization as a payor within the Velo platform sandbox, they will create you a payor Id, an API key and an API secret and share these with you in a secure manner. You will need to use these values to authenticate with the Velo platform in order to gain access to the APIs. The steps to take are explained in the following: create a string comprising the API key (e.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8) and API secret (e.g. c396b26b-137a-44fd-87f5-34631f8fd529) with a colon between them. E.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8:c396b26b-137a-44fd-87f5-34631f8fd529 base64 encode this string. E.g.: NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== create an HTTP **Authorization** header with the value set to e.g. Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== perform the Velo authentication REST call using the HTTP header created above e.g. via curl: ``` curl -X POST \\ -H \"Content-Type: application/json\" \\ -H \"Authorization: Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ==\" \\ 'https://api.sandbox.velopayments.com/v1/authenticate?grant_type=client_credentials' ``` If successful, this call will result in a **200** HTTP status code and a response body such as: ``` { \"access_token\":\"19f6bafd-93fd-4747-b229-00507bbc991f\", \"token_type\":\"bearer\", \"expires_in\":1799, \"scope\":\"...\" } ``` ## API access following authentication Following successful authentication, the value of the access_token field in the response (indicated in green above) should then be presented with all subsequent API calls to allow the Velo platform to validate that the caller is authenticated. This is achieved by setting the HTTP Authorization header with the value set to e.g. Bearer 19f6bafd-93fd-4747-b229-00507bbc991f such as the curl example below: ``` -H \"Authorization: Bearer 19f6bafd-93fd-4747-b229-00507bbc991f \" ``` If you make other Velo API calls which require authorization but the Authorization header is missing or invalid then you will get a **401** HTTP status response. # noqa: E501
The version of the OpenAPI document: 2.26.124
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import models into model package
from velo_payments.models.accepted_payment_v3 import AcceptedPaymentV3
from velo_payments.models.access_token_response import AccessTokenResponse
from velo_payments.models.access_token_validation_request import AccessTokenValidationRequest
from velo_payments.models.auth_response import AuthResponse
from velo_payments.models.auto_top_up_config import AutoTopUpConfig
from velo_payments.models.auto_top_up_config2 import AutoTopUpConfig2
from velo_payments.models.category import Category
from velo_payments.models.challenge import Challenge
from velo_payments.models.challenge2 import Challenge2
from velo_payments.models.company import Company
from velo_payments.models.company2 import Company2
from velo_payments.models.create_funding_account_request_v2 import CreateFundingAccountRequestV2
from velo_payments.models.create_individual import CreateIndividual
from velo_payments.models.create_individual2 import CreateIndividual2
from velo_payments.models.create_individual_name import CreateIndividualName
from velo_payments.models.create_payee import CreatePayee
from velo_payments.models.create_payee2 import CreatePayee2
from velo_payments.models.create_payee_address import CreatePayeeAddress
from velo_payments.models.create_payee_address2 import CreatePayeeAddress2
from velo_payments.models.create_payees_csv_request import CreatePayeesCSVRequest
from velo_payments.models.create_payees_csv_request2 import CreatePayeesCSVRequest2
from velo_payments.models.create_payees_csv_response import CreatePayeesCSVResponse
from velo_payments.models.create_payees_csv_response2 import CreatePayeesCSVResponse2
from velo_payments.models.create_payees_csv_response_rejected_csv_rows import CreatePayeesCSVResponseRejectedCsvRows
from velo_payments.models.create_payees_request import CreatePayeesRequest
from velo_payments.models.create_payees_request2 import CreatePayeesRequest2
from velo_payments.models.create_payment_channel import CreatePaymentChannel
from velo_payments.models.create_payment_channel2 import CreatePaymentChannel2
from velo_payments.models.create_payor_link_request import CreatePayorLinkRequest
from velo_payments.models.create_payout_request_v3 import CreatePayoutRequestV3
from velo_payments.models.create_webhook_request import CreateWebhookRequest
from velo_payments.models.debit_event import DebitEvent
from velo_payments.models.debit_event_all_of import DebitEventAllOf
from velo_payments.models.debit_status_changed import DebitStatusChanged
from velo_payments.models.debit_status_changed_all_of import DebitStatusChangedAllOf
from velo_payments.models.error import Error
from velo_payments.models.error_data import ErrorData
from velo_payments.models.error_response import ErrorResponse
from velo_payments.models.failed_payee import FailedPayee
from velo_payments.models.failed_payee2 import FailedPayee2
from velo_payments.models.failed_submission import FailedSubmission
from velo_payments.models.failed_submission2 import FailedSubmission2
from velo_payments.models.funding_account_response import FundingAccountResponse
from velo_payments.models.funding_account_response2 import FundingAccountResponse2
from velo_payments.models.funding_account_type import FundingAccountType
from velo_payments.models.funding_audit import FundingAudit
from velo_payments.models.funding_event import FundingEvent
from velo_payments.models.funding_event_type import FundingEventType
from velo_payments.models.funding_payor_status_audit_response import FundingPayorStatusAuditResponse
from velo_payments.models.funding_request_v1 import FundingRequestV1
from velo_payments.models.funding_request_v2 import FundingRequestV2
from velo_payments.models.funding_request_v3 import FundingRequestV3
from velo_payments.models.fx_summary import FxSummary
from velo_payments.models.fx_summary_v3 import FxSummaryV3
from velo_payments.models.get_fundings_response import GetFundingsResponse
from velo_payments.models.get_fundings_response_links import GetFundingsResponseLinks
from velo_payments.models.get_payee_list_response import GetPayeeListResponse
from velo_payments.models.get_payee_list_response2 import GetPayeeListResponse2
from velo_payments.models.get_payee_list_response_company import GetPayeeListResponseCompany
from velo_payments.models.get_payee_list_response_company2 import GetPayeeListResponseCompany2
from velo_payments.models.get_payee_list_response_individual import GetPayeeListResponseIndividual
from velo_payments.models.get_payee_list_response_individual2 import GetPayeeListResponseIndividual2
from velo_payments.models.get_payments_for_payout_response_v3 import GetPaymentsForPayoutResponseV3
from velo_payments.models.get_payments_for_payout_response_v3_page import GetPaymentsForPayoutResponseV3Page
from velo_payments.models.get_payments_for_payout_response_v3_summary import GetPaymentsForPayoutResponseV3Summary
from velo_payments.models.get_payments_for_payout_response_v4 import GetPaymentsForPayoutResponseV4
from velo_payments.models.get_payments_for_payout_response_v4_summary import GetPaymentsForPayoutResponseV4Summary
from velo_payments.models.get_payout_statistics import GetPayoutStatistics
from velo_payments.models.get_payouts_response import GetPayoutsResponse
from velo_payments.models.get_payouts_response_v3 import GetPayoutsResponseV3
from velo_payments.models.get_payouts_response_v3_links import GetPayoutsResponseV3Links
from velo_payments.models.get_payouts_response_v3_page import GetPayoutsResponseV3Page
from velo_payments.models.individual import Individual
from velo_payments.models.individual2 import Individual2
from velo_payments.models.individual_name import IndividualName
from velo_payments.models.inline_response400 import InlineResponse400
from velo_payments.models.inline_response401 import InlineResponse401
from velo_payments.models.inline_response403 import InlineResponse403
from velo_payments.models.inline_response404 import InlineResponse404
from velo_payments.models.inline_response409 import InlineResponse409
from velo_payments.models.inline_response412 import InlineResponse412
from velo_payments.models.invitation_status import InvitationStatus
from velo_payments.models.invitation_status2 import InvitationStatus2
from velo_payments.models.invite_payee_request import InvitePayeeRequest
from velo_payments.models.invite_payee_request2 import InvitePayeeRequest2
from velo_payments.models.invite_user_request import InviteUserRequest
from velo_payments.models.kyc_state import KycState
from velo_payments.models.link_for_response import LinkForResponse
from velo_payments.models.list_funding_accounts_response import ListFundingAccountsResponse
from velo_payments.models.list_funding_accounts_response2 import ListFundingAccountsResponse2
from velo_payments.models.list_payments_response_v3 import ListPaymentsResponseV3
from velo_payments.models.list_payments_response_v3_page import ListPaymentsResponseV3Page
from velo_payments.models.list_payments_response_v4 import ListPaymentsResponseV4
from velo_payments.models.list_source_account_response import ListSourceAccountResponse
from velo_payments.models.list_source_account_response_links import ListSourceAccountResponseLinks
from velo_payments.models.list_source_account_response_page import ListSourceAccountResponsePage
from velo_payments.models.list_source_account_response_v2 import ListSourceAccountResponseV2
from velo_payments.models.list_source_account_response_v2_links import ListSourceAccountResponseV2Links
from velo_payments.models.list_source_account_response_v3 import ListSourceAccountResponseV3
from velo_payments.models.list_source_account_response_v3_links import ListSourceAccountResponseV3Links
from velo_payments.models.localisation_details import LocalisationDetails
from velo_payments.models.mfa_details import MFADetails
from velo_payments.models.mfa_type import MFAType
from velo_payments.models.name import Name
from velo_payments.models.name2 import Name2
from velo_payments.models.notification import Notification
from velo_payments.models.notifications import Notifications
from velo_payments.models.notifications2 import Notifications2
from velo_payments.models.ofac_status import OfacStatus
from velo_payments.models.onboarded_status import OnboardedStatus
from velo_payments.models.onboarded_status2 import OnboardedStatus2
from velo_payments.models.onboarding_status_changed import OnboardingStatusChanged
from velo_payments.models.page_for_response import PageForResponse
from velo_payments.models.page_resource_funding_payor_status_audit_response_funding_payor_status_audit_response import PageResourceFundingPayorStatusAuditResponseFundingPayorStatusAuditResponse
from velo_payments.models.paged_payee_invitation_status_response import PagedPayeeInvitationStatusResponse
from velo_payments.models.paged_payee_invitation_status_response2 import PagedPayeeInvitationStatusResponse2
from velo_payments.models.paged_payee_invitation_status_response_page import PagedPayeeInvitationStatusResponsePage
from velo_payments.models.paged_payee_response import PagedPayeeResponse
from velo_payments.models.paged_payee_response2 import PagedPayeeResponse2
from velo_payments.models.paged_payee_response_links import PagedPayeeResponseLinks
from velo_payments.models.paged_payee_response_page import PagedPayeeResponsePage
from velo_payments.models.paged_payee_response_summary import PagedPayeeResponseSummary
from velo_payments.models.paged_payments_response_v3 import PagedPaymentsResponseV3
from velo_payments.models.paged_user_response import PagedUserResponse
from velo_payments.models.paged_user_response_links import PagedUserResponseLinks
from velo_payments.models.paged_user_response_page import PagedUserResponsePage
from velo_payments.models.password_request import PasswordRequest
from velo_payments.models.payable_issue import PayableIssue
from velo_payments.models.payable_issue2 import PayableIssue2
from velo_payments.models.payable_status_changed import PayableStatusChanged
from velo_payments.models.payee_address import PayeeAddress
from velo_payments.models.payee_address2 import PayeeAddress2
from velo_payments.models.payee_delta import PayeeDelta
from velo_payments.models.payee_delta2 import PayeeDelta2
from velo_payments.models.payee_delta_response import PayeeDeltaResponse
from velo_payments.models.payee_delta_response2 import PayeeDeltaResponse2
from velo_payments.models.payee_delta_response2_links import PayeeDeltaResponse2Links
from velo_payments.models.payee_delta_response_links import PayeeDeltaResponseLinks
from velo_payments.models.payee_delta_response_page import PayeeDeltaResponsePage
from velo_payments.models.payee_detail_response import PayeeDetailResponse
from velo_payments.models.payee_detail_response2 import PayeeDetailResponse2
from velo_payments.models.payee_details_changed import PayeeDetailsChanged
from velo_payments.models.payee_event import PayeeEvent
from velo_payments.models.payee_event_all_of import PayeeEventAllOf
from velo_payments.models.payee_event_all_of_reasons import PayeeEventAllOfReasons
from velo_payments.models.payee_invitation_status_response import PayeeInvitationStatusResponse
from velo_payments.models.payee_invitation_status_response2 import PayeeInvitationStatusResponse2
from velo_payments.models.payee_payor_ref import PayeePayorRef
from velo_payments.models.payee_payor_ref_v3 import PayeePayorRefV3
from velo_payments.models.payee_type import PayeeType
from velo_payments.models.payee_user_self_update_request import PayeeUserSelfUpdateRequest
from velo_payments.models.payment_audit_currency import PaymentAuditCurrency
from velo_payments.models.payment_audit_currency_v3 import PaymentAuditCurrencyV3
from velo_payments.models.payment_channel_country import PaymentChannelCountry
from velo_payments.models.payment_channel_rule import PaymentChannelRule
from velo_payments.models.payment_channel_rules_response import PaymentChannelRulesResponse
from velo_payments.models.payment_delta import PaymentDelta
from velo_payments.models.payment_delta_response import PaymentDeltaResponse
from velo_payments.models.payment_delta_response_v1 import PaymentDeltaResponseV1
from velo_payments.models.payment_delta_v1 import PaymentDeltaV1
from velo_payments.models.payment_event import PaymentEvent
from velo_payments.models.payment_event_all_of import PaymentEventAllOf
from velo_payments.models.payment_event_response import PaymentEventResponse
from velo_payments.models.payment_event_response_v3 import PaymentEventResponseV3
from velo_payments.models.payment_instruction_v3 import PaymentInstructionV3
from velo_payments.models.payment_rails import PaymentRails
from velo_payments.models.payment_rejected_or_returned import PaymentRejectedOrReturned
from velo_payments.models.payment_rejected_or_returned_all_of import PaymentRejectedOrReturnedAllOf
from velo_payments.models.payment_response_v3 import PaymentResponseV3
from velo_payments.models.payment_response_v4 import PaymentResponseV4
from velo_payments.models.payment_response_v4_payout import PaymentResponseV4Payout
from velo_payments.models.payment_status_changed import PaymentStatusChanged
from velo_payments.models.payment_status_changed_all_of import PaymentStatusChangedAllOf
from velo_payments.models.payment_v3 import PaymentV3
from velo_payments.models.payor_address import PayorAddress
from velo_payments.models.payor_address_v2 import PayorAddressV2
from velo_payments.models.payor_aml_transaction import PayorAmlTransaction
from velo_payments.models.payor_aml_transaction_v3 import PayorAmlTransactionV3
from velo_payments.models.payor_branding_response import PayorBrandingResponse
from velo_payments.models.payor_create_api_key_request import PayorCreateApiKeyRequest
from velo_payments.models.payor_create_api_key_response import PayorCreateApiKeyResponse
from velo_payments.models.payor_create_application_request import PayorCreateApplicationRequest
from velo_payments.models.payor_email_opt_out_request import PayorEmailOptOutRequest
from velo_payments.models.payor_links_response import PayorLinksResponse
from velo_payments.models.payor_links_response_links import PayorLinksResponseLinks
from velo_payments.models.payor_links_response_payors import PayorLinksResponsePayors
from velo_payments.models.payor_logo_request import PayorLogoRequest
from velo_payments.models.payor_v1 import PayorV1
from velo_payments.models.payor_v2 import PayorV2
from velo_payments.models.payout_company_v3 import PayoutCompanyV3
from velo_payments.models.payout_individual_v3 import PayoutIndividualV3
from velo_payments.models.payout_name_v3 import PayoutNameV3
from velo_payments.models.payout_payee_v3 import PayoutPayeeV3
from velo_payments.models.payout_payor import PayoutPayor
from velo_payments.models.payout_payor_ids import PayoutPayorIds
from velo_payments.models.payout_principal import PayoutPrincipal
from velo_payments.models.payout_status import PayoutStatus
from velo_payments.models.payout_status_v3 import PayoutStatusV3
from velo_payments.models.payout_summary_audit import PayoutSummaryAudit
from velo_payments.models.payout_summary_audit_v3 import PayoutSummaryAuditV3
from velo_payments.models.payout_summary_response_v3 import PayoutSummaryResponseV3
from velo_payments.models.payout_type import PayoutType
from velo_payments.models.ping import Ping
from velo_payments.models.query_batch_response import QueryBatchResponse
from velo_payments.models.query_batch_response2 import QueryBatchResponse2
from velo_payments.models.quote_fx_summary_v3 import QuoteFxSummaryV3
from velo_payments.models.quote_response_v3 import QuoteResponseV3
from velo_payments.models.region_v2 import RegionV2
from velo_payments.models.register_sms_request import RegisterSmsRequest
from velo_payments.models.rejected_payment_v3 import RejectedPaymentV3
from velo_payments.models.resend_token_request import ResendTokenRequest
from velo_payments.models.reset_password_request import ResetPasswordRequest
from velo_payments.models.role import Role
from velo_payments.models.role_update_request import RoleUpdateRequest
from velo_payments.models.self_mfa_type_unregister_request import SelfMFATypeUnregisterRequest
from velo_payments.models.self_update_password_request import SelfUpdatePasswordRequest
from velo_payments.models.set_notifications_request import SetNotificationsRequest
from velo_payments.models.source_account_response import SourceAccountResponse
from velo_payments.models.source_account_response_v2 import SourceAccountResponseV2
from velo_payments.models.source_account_response_v3 import SourceAccountResponseV3
from velo_payments.models.source_account_summary import SourceAccountSummary
from velo_payments.models.source_account_summary_v3 import SourceAccountSummaryV3
from velo_payments.models.source_account_type import SourceAccountType
from velo_payments.models.source_account_v3 import SourceAccountV3
from velo_payments.models.source_event import SourceEvent
from velo_payments.models.supported_countries_response import SupportedCountriesResponse
from velo_payments.models.supported_countries_response_v2 import SupportedCountriesResponseV2
from velo_payments.models.supported_country import SupportedCountry
from velo_payments.models.supported_country_v2 import SupportedCountryV2
from velo_payments.models.supported_currency_response_v2 import SupportedCurrencyResponseV2
from velo_payments.models.supported_currency_v2 import SupportedCurrencyV2
from velo_payments.models.transfer_request import TransferRequest
from velo_payments.models.transfer_request2 import TransferRequest2
from velo_payments.models.transmission_type import TransmissionType
from velo_payments.models.transmission_types import TransmissionTypes
from velo_payments.models.transmission_types2 import TransmissionTypes2
from velo_payments.models.unregister_mfa_request import UnregisterMFARequest
from velo_payments.models.update_payee_details_request import UpdatePayeeDetailsRequest
from velo_payments.models.update_payee_details_request2 import UpdatePayeeDetailsRequest2
from velo_payments.models.update_remote_id_request import UpdateRemoteIdRequest
from velo_payments.models.update_remote_id_request2 import UpdateRemoteIdRequest2
from velo_payments.models.update_webhook_request import UpdateWebhookRequest
from velo_payments.models.user_details_update_request import UserDetailsUpdateRequest
from velo_payments.models.user_info import UserInfo
from velo_payments.models.user_response import UserResponse
from velo_payments.models.user_status import UserStatus
from velo_payments.models.user_type import UserType
from velo_payments.models.user_type2 import UserType2
from velo_payments.models.validate_password_response import ValidatePasswordResponse
from velo_payments.models.watchlist_status import WatchlistStatus
from velo_payments.models.watchlist_status2 import WatchlistStatus2
from velo_payments.models.webhook_response import WebhookResponse
from velo_payments.models.webhooks_response import WebhooksResponse
from velo_payments.models.withdraw_payment_request import WithdrawPaymentRequest
| 88.261993
| 4,651
| 0.891843
|
acfc762c91b59ab8e0a7b2fff96bc1cd6d794ace
| 1,741
|
py
|
Python
|
tests/mlir/rllib_ppo_smoke_test.py
|
lipovsek/CompilerGym
|
444b82fdf304949e6b8cea23c61f6477b419e3a1
|
[
"MIT"
] | null | null | null |
tests/mlir/rllib_ppo_smoke_test.py
|
lipovsek/CompilerGym
|
444b82fdf304949e6b8cea23c61f6477b419e3a1
|
[
"MIT"
] | null | null | null |
tests/mlir/rllib_ppo_smoke_test.py
|
lipovsek/CompilerGym
|
444b82fdf304949e6b8cea23c61f6477b419e3a1
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import warnings
import gym
import numpy as np
import pytest
import torch
from flaky import flaky
from ray.rllib.agents.ppo import PPOTrainer
from ray.tune.registry import register_env
from compiler_gym.wrappers.mlir import make_mlir_rl_wrapper_env
from tests.test_main import main
# Ignore import deprecation warnings from ray.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import ray
@flaky(max_runs=3, min_passes=1)
@pytest.mark.filterwarnings(
"ignore:`np\\.bool` is a deprecated alias for the builtin `bool`\\.",
"ignore:Mean of empty slice",
"ignore::ResourceWarning",
"ignore:using `dtype=` in comparisons is only useful for `dtype=object`",
)
def test_rllib_ppo_smoke():
ray.shutdown()
seed = 123
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
ray.init(local_mode=True) # Runs PPO training in the same process
register_env(
"mlir_rl_env-v0",
lambda env_config: make_mlir_rl_wrapper_env(env=gym.make("mlir-v0")),
)
config = {
"env": "mlir_rl_env-v0",
"framework": "torch",
"model": {
"fcnet_hiddens": [2, 2],
"fcnet_activation": "relu",
},
"num_workers": 0, # local worker only
"train_batch_size": 2,
"sgd_minibatch_size": 1,
"num_sgd_iter": 1,
"rollout_fragment_length": 2,
}
trainer = PPOTrainer(config=config)
trainer.train()
ray.shutdown()
if __name__ == "__main__":
main()
| 27.634921
| 77
| 0.67892
|
acfc7648754ecff29fbfcdca9a18b9577e1792ac
| 8,406
|
py
|
Python
|
envelopes/views.py
|
audiolion/Envelopes-api
|
f9e93521dc8cb5fd8169551192e1d98f6a359c7c
|
[
"MIT"
] | null | null | null |
envelopes/views.py
|
audiolion/Envelopes-api
|
f9e93521dc8cb5fd8169551192e1d98f6a359c7c
|
[
"MIT"
] | 4
|
2019-10-18T15:54:05.000Z
|
2021-06-01T22:07:16.000Z
|
envelopes/views.py
|
audiolion/Envelopes-API
|
f9e93521dc8cb5fd8169551192e1d98f6a359c7c
|
[
"MIT"
] | null | null | null |
# Third Party Library Imports
from apistar import Response, http
from apistar.backends.django_orm import Session
from apistar.interfaces import Auth
from django.core.exceptions import ObjectDoesNotExist
# Local Imports
from . import schemas
from .forms import AccountForm, CategoryForm, EnvelopeForm, TransactionForm
account_schema = schemas.Account(exclude=('id',))
category_schema = schemas.Category(exclude=('id',))
envelope_schema = schemas.Envelope(exclude=('id',))
transaction_schema = schemas.Envelope(exclude=('id',))
def retrieve(queryset):
try:
if queryset.exists():
return {'obj': queryset.get(), 'error': False, 'exception': None}
except ObjectDoesNotExist as e:
return {'obj': None, 'error': True, 'exception': e}
except Exception as f:
return {'obj': None, 'error': True, 'exception': f}
return {'obj': queryset, 'error': True, 'exception': None}
def handle_error(props):
if props['exception']:
return Response({'message': 'Bad request'}, status=400)
return Response({'message': 'Not found'}, status=404)
def list_accounts(request: http.Request, auth: Auth, session: Session):
queryset = session.Account.objects.filter(owner=auth.user['id'])
accounts = account_schema.dump(queryset, many=True)
return accounts.data
def get_account(request: http.Request, auth: Auth, session: Session, uuid):
queryset = session.Account.objects.filter(uuid=uuid).filter(owner=auth.user['id'])
props = retrieve(queryset)
if props['error']:
return handle_error(props)
account, errors = account_schema.dump(props['obj'])
if errors:
return Response(errors, status=400)
return account
def create_account(request: http.Request, auth: Auth, session: Session, data: http.RequestData):
account_schema.context['session'] = session
if hasattr(data, 'get') and not data.get('owner'):
data['owner'] = auth.user['id']
account, errors = account_schema.load(data)
if errors:
return Response(errors, status=400)
account.save()
return Response(account_schema.dump(account).data, status=201)
def update_account(request: http.Request, auth: Auth, session: Session, data: http.RequestData, uuid): # noqa; E501
queryset = session.Account.objects.filter(uuid=uuid).filter(owner=auth.user['id'])
props = retrieve(queryset)
if props['error']:
return handle_error(props)
form = AccountForm(data, instance=props['obj'])
if form.is_valid():
account = form.save()
return account_schema.dump(account).data
return Response(form.errors, status=400)
def delete_account(request: http.Request, auth: Auth, session: Session, uuid):
queryset = session.Account.objects.filter(uuid=uuid).filter(owner=auth.user['id'])
props = retrieve(queryset)
if props['error']:
return handle_error(props)
props['obj'].delete()
return Response(None, status=204)
def list_envelopes(request: http.Request, auth: Auth, session: Session):
queryset = session.Envelope.objects.filter(account__owner_id=auth.user['id'])
envelopes = envelope_schema.dump(queryset, many=True)
return envelopes.data
def get_envelope(request: http.Request, auth: Auth, session: Session, uuid):
queryset = session.Envelope.objects.filter(uuid=uuid).filter(account__owner_id=auth.user['id'])
props = retrieve(queryset)
if props['error']:
return handle_error(props)
envelope, errors = envelope_schema.dump(props['obj'])
if errors:
return Response(errors, status=400)
return envelope
def create_envelope(request: http.Request, auth: Auth, session: Session, data: http.RequestData):
envelope_schema.context['session'] = session
if hasattr(data, 'get') and not data.get('creator'):
data['creator'] = auth.user['id']
envelope, errors = envelope_schema.load(data)
if errors:
return Response(errors, status=400)
envelope.save()
return Response(envelope_schema.dump(envelope).data, status=201)
def update_envelope(request: http.Request, auth: Auth, session: Session, data: http.RequestData, uuid): # noqa; E501
queryset = session.Envelope.objects.filter(uuid=uuid).filter(account__owner_id=auth.user['id'])
props = retrieve(queryset)
if props['error']:
return handle_error(props)
form = EnvelopeForm(data, instance=props['obj'])
if form.is_valid():
envelope = form.save()
return envelope_schema.dump(envelope).data
return Response(form.errors, status=400)
def delete_envelope(request: http.Request, auth: Auth, session: Session, uuid):
queryset = session.Envelope.objects.filter(uuid=uuid).filter(account__owner=auth.user['id'])
props = retrieve(queryset)
if props['error']:
return handle_error(props)
props['obj'].delete()
return Response(None, status=204)
def list_categories(request: http.Request, auth: Auth, session: Session):
queryset = session.Category.objects.all()
categories = category_schema.dump(queryset, many=True)
return categories.data
def get_category(request: http.Request, auth: Auth, session: Session, name):
queryset = session.Category.objects.filter(name=name)
props = retrieve(queryset)
if props['error']:
return handle_error(props)
category, errors = category_schema.dump(props['obj'])
if errors:
return Response(errors, status=400)
return category
def create_category(request: http.Request, auth: Auth, session: Session, data: http.RequestData):
category_schema.context['session'] = session
category, errors = category_schema.load(data)
if errors:
return Response(errors, status=400)
category.save()
return Response(category_schema.dump(category).data, status=201)
def update_category(request: http.Request, auth: Auth, session: Session, data: http.RequestData, name): # noqa; E501
queryset = session.Category.objects.filter(name=name)
props = retrieve(queryset)
if props['error']:
return handle_error(props)
form = CategoryForm(data, instance=props['obj'])
if form.is_valid():
category = form.save()
return category_schema.dump(category).data
return Response(form.errors, status=400)
def delete_category(request: http.Request, auth: Auth, session: Session, name):
queryset = session.Category.objects.filter(name=name)
props = retrieve(queryset)
if props['error']:
return handle_error(props)
props['obj'].delete()
return Response(None, status=204)
def list_transactions(request: http.Request, auth: Auth, session: Session):
queryset = session.Transaction.objects.all()
transactions = transaction_schema.dump(queryset, many=True)
return transactions.data
def get_transaction(request: http.Request, auth: Auth, session: Session, friendly_id):
queryset = session.Transaction.objects.filter(friendly_id=friendly_id)
props = retrieve(queryset)
if props['error']:
return handle_error(props)
transaction, errors = transaction_schema.dump(props['obj'])
if errors:
return Response(errors, status=400)
return transaction
def create_transaction(request: http.Request, auth: Auth, session: Session, data: http.RequestData): # noqa; E501
transaction_schema.context['session'] = session
transaction, errors = transaction_schema.load(data)
if errors:
return Response(errors, status=400)
transaction.save()
return Response(transaction_schema.dump(transaction).data, status=201)
def update_transaction(request: http.Request, auth: Auth, session: Session, data: http.RequestData, friendly_id): # noqa; E501
queryset = session.Transaction.objects.filter(friendly_id=friendly_id)
props = retrieve(queryset)
if props['error']:
return handle_error(props)
form = TransactionForm(data, instance=props['obj'])
if form.is_valid():
transaction = form.save()
return transaction_schema.dump(transaction).data
return Response(form.errors, status=400)
def delete_transaction(request: http.Request, auth: Auth, session: Session, friendly_id):
queryset = session.Transaction.objects.filter(friendly_id=friendly_id)
props = retrieve(queryset)
if props['error']:
return handle_error(props)
props['obj'].delete()
return Response(None, status=204)
| 37.526786
| 127
| 0.707709
|
acfc774734fa5c3ba5c0924a7e1e8a35abd62bf9
| 12,845
|
py
|
Python
|
stackdio/api/formulas/models.py
|
hdmillerdr/stackdio
|
84be621705031d147e104369399b872d5093ef64
|
[
"Apache-2.0"
] | 9
|
2015-12-18T22:44:55.000Z
|
2022-02-07T19:34:44.000Z
|
stackdio/api/formulas/models.py
|
hdmillerdr/stackdio
|
84be621705031d147e104369399b872d5093ef64
|
[
"Apache-2.0"
] | 77
|
2015-01-12T17:49:38.000Z
|
2017-02-24T17:57:46.000Z
|
stackdio/api/formulas/models.py
|
hdmillerdr/stackdio
|
84be621705031d147e104369399b872d5093ef64
|
[
"Apache-2.0"
] | 11
|
2015-01-23T15:50:19.000Z
|
2022-02-07T19:34:45.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import logging
import os
from collections import OrderedDict
from shutil import rmtree
import salt.config
import six
import yaml
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models
from django.dispatch import receiver
from django_extensions.db.models import TimeStampedModel, TitleSlugDescriptionModel
from model_utils.models import StatusModel
from stackdio.api.formulas import utils
from stackdio.core.models import SearchQuerySet
logger = logging.getLogger(__name__)
@six.python_2_unicode_compatible
class FormulaVersion(models.Model):
class Meta:
default_permissions = ()
content_type = models.ForeignKey('contenttypes.ContentType')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
formula = models.ForeignKey('formulas.Formula')
version = models.CharField('Formula Version', max_length=100)
def __str__(self):
return six.text_type('{}, version {}'.format(self.formula, self.version))
class StatusDetailModel(StatusModel):
status_detail = models.TextField(blank=True)
class Meta:
abstract = True
default_permissions = ()
def set_status(self, status, detail=''):
self.status = status
self.status_detail = detail
return self.save()
_formula_model_permissions = (
'create',
'admin',
)
_formula_object_permissions = (
'view',
'update',
'delete',
'admin',
)
class FormulaQuerySet(SearchQuerySet):
"""
Override create to automatically kick off a celery task to clone the formula repository.
"""
searchable_fields = ('title', 'description', 'uri')
def search(self, query):
result = super(FormulaQuerySet, self).search(query)
# Find formula component matches
formula_ids = []
for formula in self.all():
for sls_path, component in formula.components(formula.default_version).items():
if query.lower() in sls_path.lower():
formula_ids.append(formula.id)
elif query.lower() in component['title'].lower():
formula_ids.append(formula.id)
elif query.lower() in component['description'].lower():
formula_ids.append(formula.id)
component_matches = self.filter(id__in=formula_ids)
# Or them together and return
return (result or component_matches).distinct()
@six.python_2_unicode_compatible
class Formula(TimeStampedModel, TitleSlugDescriptionModel):
"""
The intention here is to be able to install an entire formula along
with identifying the individual components that may be installed
with the formula. For example, a simple formula like EPEL may
only have one SLS file that sets up the EPEL repository, while a
more complicated formula for Hadoop should be able to install
the complete set of Hadoop components on a single machine or
install the individual NameNode, DataNode, etc on to individual
machines.
The problem right now is that salt formula (a packaging convention
defined by saltstack) doesn't make it easy to pick out those
individual components. So, to make things easier for stackd.io
we're proposing a SPECFILE that would allow formulas to define
a mapping of their components, with a name, description, etc. Then,
when stackd.io is importing a formula from an external repository,
we can read that SPECFILE and build up the entries in the database
for allowing users to choose the entire formula or the installable
components in the formula. For more on Formula and the packaging
please see the following link:
http://docs.saltstack.com/topics/conventions/formulas.html
The SPECFILE we're proposing is simple, it maps the top-level
formula to all of its individual components that may be installed
separately. Formula must still be written in a way that these
componenets are useful across multiple machines. The SPECFILE
is a YAML file that looks like:
formula_name: # the top-level formula identifier
name: <string> # a human-readable name of the formula
description: <string> # a description of the formula
root_path: <string> # the root directory of the formula
components: # a list of components that may be
# installed separately
- name: <string>
description: <string>
sls_path: <string> # the path to the SLS for this component
# using standard stal dot notation
- name: <string>
description: <string>
sls_path: <string>
...
more components
...
##
# Example to install CDH4 or its components
##
name: CDH4
description: Formula to install a complete CDH4 system on a
single machine, or use the individual components to
install them on separate machines for a distributed
Hadoop system.
root_path: cdh4
components:
- name: Hadoop
description: This component installs the entire Hadoop system.
sls_path: cdh4.hadoop
- name: Hadoop NameNode
description: The NameNode component of the CDH4 formula.
sls_path: cdh4.hadoop.namenode
- name: Hadoop DataNode
description: The DataNode component of the CDH4 formula.
sls_path: cdh4.hadoop.datanode
- name: HBase
description: This component installs the entire HBase system.
sls_path: cdh4.hbase
- name: HBase Master
description: The Master component of the CDH4 formula.
sls_path: cdh4.hbase.master
- name: HBase RegionServer
description: The RegionServer component of the CDH4 formula.
sls_path: cdh4.hbase.regionserver
"""
model_permissions = _formula_model_permissions
object_permissions = _formula_object_permissions
class Meta:
ordering = ['title']
default_permissions = tuple(set(_formula_model_permissions + _formula_object_permissions))
objects = FormulaQuerySet.as_manager()
# uri to the repository for this formula
uri = models.URLField('Repository URI', unique=True)
# All components in this formula should start with this prefix
root_path = models.CharField('Root Path', max_length=64)
ssh_private_key = models.TextField('SSH Private Key', blank=True)
def __str__(self):
return six.text_type('{} ({})'.format(self.title, self.uri))
def get_root_dir(self):
root_dir = os.path.join(
settings.FILE_STORAGE_DIRECTORY,
'formulas',
six.text_type(self.pk),
)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
return root_dir
def get_gitfs(self):
return utils.get_gitfs(self.uri, self.ssh_private_key, self)
def get_valid_versions(self):
gitfs = self.get_gitfs()
return gitfs.envs()
@property
def default_version(self):
return 'base'
def components(self, version=None):
gitfs = self.get_gitfs()
version = version or self.default_version
fnd = gitfs.find_file('SPECFILE', tgt_env=version)
if not os.path.exists(fnd['path']):
# If the path doesn't exist, update & retry
gitfs.update()
fnd = gitfs.find_file('SPECFILE', tgt_env=version)
with open(fnd['path']) as f:
yaml_data = yaml.safe_load(f)
ret = OrderedDict()
# Create a map of sls_path -> component pairs
sorted_components = sorted(yaml_data.get('components', []), key=lambda x: x['title'])
for component in sorted_components:
ret[component['sls_path']] = OrderedDict((
('title', component['title']),
('description', component['description']),
('sls_path', component['sls_path']),
))
return ret
@classmethod
def all_components(cls, version_map=None):
version_map = version_map or {}
ret = {}
for formula in cls.objects.all():
if formula in version_map:
# Use the specified version
components = formula.components(version_map[formula])
else:
# Otherwise use the default version
components = formula.components(formula.default_version)
for component in components:
ret.setdefault(component, []).append(formula)
return ret
def properties(self, version):
gitfs = self.get_gitfs()
version = version or self.default_version
fnd = gitfs.find_file('SPECFILE', tgt_env=version)
if not os.path.exists(fnd['path']):
# If the path doesn't exist, update & retry
gitfs.update()
fnd = gitfs.find_file('SPECFILE', tgt_env=version)
with open(fnd['path']) as f:
yaml_data = yaml.safe_load(f)
return yaml_data.get('pillar_defaults', {})
@six.python_2_unicode_compatible
class FormulaComponent(TimeStampedModel):
"""
An extension of an existing FormulaComponent to add additional metadata
for those components based on this blueprint. In particular, this is how
we track the order in which the formula should be provisioned in a
blueprint.
"""
class Meta:
verbose_name_plural = 'formula components'
ordering = ['order']
default_permissions = ()
# The formula component we're extending
formula = models.ForeignKey('formulas.Formula')
sls_path = models.CharField(max_length=255)
# The host definition / cloud account this formula component is associated with
content_type = models.ForeignKey('contenttypes.ContentType')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
# The order in which the component should be provisioned
order = models.IntegerField('Order', default=0)
def __str__(self):
return six.text_type('{0}:{1}'.format(
self.sls_path,
self.content_object,
))
@property
def title(self):
if not hasattr(self, '_full_component'):
version = self.formula.default_version
self._full_component = self.formula.components(version)[self.sls_path]
return self._full_component['title']
@property
def description(self):
if not hasattr(self, '_full_component'):
version = self.formula.default_version
self._full_component = self.formula.components(version)[self.sls_path]
return self._full_component['description']
def get_metadata_for_host(self, host):
"""
Get the current status of a given host
"""
return self.metadatas.filter(host=host).order_by('-modified').first()
##
# Signal events and handlers
##
@receiver(models.signals.post_save, sender=Formula)
def formula_post_save(sender, instance, **kwargs):
"""
Utility method to clone the formula once it is saved. We do this here so that the POST
request to create the formula returns immediately, letting the update wait
"""
# Go ahead and update the gitfs
instance.get_gitfs().update()
@receiver(models.signals.post_delete, sender=Formula)
def cleanup_formula(sender, instance, **kwargs):
"""
Utility method to clean up the cloned formula repository when
the formula is deleted.
"""
repos_dir = instance.get_root_dir()
if os.path.isdir(repos_dir):
rmtree(repos_dir)
opts = salt.config.client_config(settings.STACKDIO_CONFIG.salt_master_config)
gitfs_cachedir = os.path.join(opts['cachedir'],
'stackdio',
'formulas',
six.text_type(instance.id))
if os.path.isdir(gitfs_cachedir):
rmtree(gitfs_cachedir)
| 33.981481
| 98
| 0.6587
|
acfc778903e18638e197dff48a42b5af3062cef7
| 2,020
|
py
|
Python
|
legacy/code/emp/novel_samples.py
|
GLOMICON/emp
|
c1f752d1ae4c009328bbdcecf9666dbd4dac39b6
|
[
"BSD-3-Clause"
] | 1
|
2020-01-30T15:06:26.000Z
|
2020-01-30T15:06:26.000Z
|
legacy/code/emp/novel_samples.py
|
https-github-com-peadrakw-p1/emp
|
3bc8984ec56c3fcc93e8241ac4cdb88177772cf6
|
[
"BSD-3-Clause"
] | null | null | null |
legacy/code/emp/novel_samples.py
|
https-github-com-peadrakw-p1/emp
|
3bc8984ec56c3fcc93e8241ac4cdb88177772cf6
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2013, The QIIME Project"
__credits__ = ["Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.7.0-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "jai.rideout@gmail.com"
__status__ = "Development"
"""Contains functions used in the novel_samples.py script."""
from collections import defaultdict
from operator import itemgetter
from biom.parse import parse_biom_table
from cogent.parse.fasta import MinimalFastaParser
def compute_sample_novelty(table_fs, rep_set_f, verbose=False):
""""""
ref_otus = [seq_id.split()[0] for seq_id, _ in
MinimalFastaParser(rep_set_f)]
# {sample_id: [novel_count, known_count, [novel_obs_ids]]}
sample_novelty = defaultdict(lambda: [0, 0, []])
tables_processed = 0
for table_f in table_fs:
table = parse_biom_table(table_f)
novel_obs = set(table.ObservationIds) - set(ref_otus)
for counts, obs_id, _ in table.iterObservations():
if obs_id in novel_obs:
for sid, count in zip(table.SampleIds, counts):
if count > 0:
sample_novelty[sid][0] += count
sample_novelty[sid][2].append(obs_id)
else:
for sid, count in zip(table.SampleIds, counts):
sample_novelty[sid][1] += count
tables_processed += 1
if verbose:
print "Processed %d table(s)." % tables_processed
results = []
for sid, (novel_count, known_count, novel_obs_ids) in \
sample_novelty.items():
percent_novel_seqs = (novel_count / (known_count + novel_count)) * 100
# Create a set first in case a sample in multiple tables has the same
# novel observations.
num_new_obs = len(set(novel_obs_ids))
results.append((sid, num_new_obs, percent_novel_seqs))
return sorted(results, reverse=True, key=itemgetter(1))
| 34.237288
| 78
| 0.650495
|
acfc77c3c3361e06685c723f891d360005992267
| 57,952
|
py
|
Python
|
simpletransformers/classification/classification_model.py
|
taranais/simpletransformers
|
36b2519cad5d8beed1f1726fa9b1163eb52286f0
|
[
"Apache-2.0"
] | 1
|
2021-08-05T02:21:09.000Z
|
2021-08-05T02:21:09.000Z
|
simpletransformers/classification/classification_model.py
|
taranais/simpletransformers
|
36b2519cad5d8beed1f1726fa9b1163eb52286f0
|
[
"Apache-2.0"
] | null | null | null |
simpletransformers/classification/classification_model.py
|
taranais/simpletransformers
|
36b2519cad5d8beed1f1726fa9b1163eb52286f0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function
import json
import logging
import math
import os
import random
import warnings
from multiprocessing import cpu_count
from dataclasses import asdict
import numpy as np
from scipy.stats import mode, pearsonr
from sklearn.metrics import (
confusion_matrix,
label_ranking_average_precision_score,
matthews_corrcoef,
mean_squared_error,
)
from tqdm.auto import tqdm, trange
from tqdm.contrib import tenumerate
import pandas as pd
import torch
from simpletransformers.classification.classification_utils import InputExample, convert_examples_to_features
from simpletransformers.classification.transformer_models.albert_model import AlbertForSequenceClassification
from simpletransformers.classification.transformer_models.bert_model import BertForSequenceClassification
from simpletransformers.classification.transformer_models.camembert_model import CamembertForSequenceClassification
from simpletransformers.classification.transformer_models.distilbert_model import DistilBertForSequenceClassification
from simpletransformers.classification.transformer_models.flaubert_model import FlaubertForSequenceClassification
from simpletransformers.classification.transformer_models.roberta_model import RobertaForSequenceClassification
from simpletransformers.classification.transformer_models.xlm_model import XLMForSequenceClassification
from simpletransformers.classification.transformer_models.xlm_roberta_model import XLMRobertaForSequenceClassification
from simpletransformers.classification.transformer_models.xlnet_model import XLNetForSequenceClassification
from simpletransformers.config.global_args import global_args
from simpletransformers.config.model_args import ClassificationArgs
from simpletransformers.classification.classification_utils import LazyClassificationDataset
from simpletransformers.custom_models.models import ElectraForSequenceClassification
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertTokenizer,
BertConfig,
BertTokenizer,
CamembertConfig,
CamembertTokenizer,
DistilBertConfig,
DistilBertTokenizer,
ElectraConfig,
ElectraTokenizer,
FlaubertConfig,
FlaubertTokenizer,
LongformerConfig,
LongformerForSequenceClassification,
LongformerTokenizer,
MobileBertConfig,
MobileBertTokenizer,
MobileBertForSequenceClassification,
RobertaConfig,
RobertaTokenizer,
XLMConfig,
XLMRobertaConfig,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
class ClassificationModel:
def __init__(
self, model_type, model_name, num_labels=None, weight=None, args=None, use_cuda=True, cuda_device=-1, **kwargs,
):
"""
Initializes a ClassificationModel model.
Args:
model_type: The type of model (bert, xlnet, xlm, roberta, distilbert)
model_name: The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
num_labels (optional): The number of labels or classes in the dataset.
weight (optional): A list of length num_labels containing the weights to assign to each label for loss calculation.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
MODEL_CLASSES = {
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
"camembert": (CamembertConfig, CamembertForSequenceClassification, CamembertTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"electra": (ElectraConfig, ElectraForSequenceClassification, ElectraTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
"longformer": (LongformerConfig, LongformerForSequenceClassification, LongformerTokenizer),
"mobilebert": (MobileBertConfig, MobileBertForSequenceClassification, MobileBertTokenizer),
"roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
}
self.args = self._load_model_args(model_name)
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, ClassificationArgs):
self.args = args
if "sweep_config" in kwargs:
sweep_config = kwargs.pop("sweep_config")
sweep_values = {key: value["value"] for key, value in sweep_config.as_dict().items() if key != "_wandb"}
self.args.update_from_dict(sweep_values)
if self.args.manual_seed:
random.seed(self.args.manual_seed)
np.random.seed(self.args.manual_seed)
torch.manual_seed(self.args.manual_seed)
if self.args.n_gpu > 0:
torch.cuda.manual_seed_all(self.args.manual_seed)
if self.args.labels_list:
if num_labels:
assert num_labels == len(self.args.labels_list)
else:
assert len(self.args.labels_list) == 2
if self.args.labels_map:
assert list(self.args.labels_map.keys()) == self.args.labels_list
else:
self.args.labels_map = {label: i for i, label in enumerate(self.args.labels_list)}
else:
len_labels_list = 2 if not num_labels else num_labels
self.args.labels_list = [i for i in range(len_labels_list)]
config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]
if num_labels:
self.config = config_class.from_pretrained(model_name, num_labels=num_labels, **self.args.config)
self.num_labels = num_labels
else:
self.config = config_class.from_pretrained(model_name, **self.args.config)
self.num_labels = self.config.num_labels
self.weight = weight
if use_cuda:
if torch.cuda.is_available():
if cuda_device == -1:
self.device = torch.device("cuda")
else:
self.device = torch.device(f"cuda:{cuda_device}")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable."
" Make sure CUDA is available or set use_cuda=False."
)
else:
self.device = "cpu"
if self.weight:
self.model = model_class.from_pretrained(
model_name, config=self.config, weight=torch.Tensor(self.weight).to(self.device), **kwargs,
)
else:
self.model = model_class.from_pretrained(model_name, config=self.config, **kwargs)
self.results = {}
if not use_cuda:
self.args.fp16 = False
self.tokenizer = tokenizer_class.from_pretrained(model_name, do_lower_case=self.args.do_lower_case, **kwargs)
self.args.model_name = model_name
self.args.model_type = model_type
if model_type in ["camembert", "xlmroberta"]:
warnings.warn(
f"use_multiprocessing automatically disabled as {model_type}"
" fails when using multiprocessing for feature conversion."
)
self.args.use_multiprocessing = False
if self.args.wandb_project and not wandb_available:
warnings.warn("wandb_project specified but wandb is not available. Wandb disabled.")
self.args.wandb_project = None
def train_model(
self,
train_df,
multi_label=False,
output_dir=None,
show_running_loss=True,
args=None,
eval_df=None,
verbose=True,
**kwargs,
):
"""
Trains the model using 'train_df'
Args:
train_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,
the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be trained on this Dataframe.
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_df (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.
Returns:
None
""" # noqa: ignore flake8"
if args:
self.args.update_from_dict(args)
if self.args.silent:
show_running_loss = False
if self.args.evaluate_during_training and eval_df is None:
raise ValueError(
"evaluate_during_training is enabled but eval_df is not specified."
" Pass eval_df to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args.output_dir
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Use --overwrite_output_dir to overcome.".format(output_dir)
)
self._move_model_to_device()
if isinstance(train_df, str) and self.args.lazy_loading:
if self.args.sliding_window:
raise ValueError("Lazy loading cannot be used with sliding window.")
train_dataset = LazyClassificationDataset(train_df, self.tokenizer, self.args)
else:
if self.args.lazy_loading:
raise ValueError("Input must be given as a path to a file when using lazy loading")
if "text" in train_df.columns and "labels" in train_df.columns:
train_examples = [
InputExample(i, text, None, label)
for i, (text, label) in enumerate(zip(train_df["text"].astype(str), train_df["labels"]))
]
elif "text_a" in train_df.columns and "text_b" in train_df.columns:
train_examples = [
InputExample(i, text_a, text_b, label)
for i, (text_a, text_b, label) in enumerate(
zip(train_df["text_a"].astype(str), train_df["text_b"].astype(str), train_df["labels"])
)
]
else:
warnings.warn(
"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels."
)
train_examples = [
InputExample(i, text, None, label)
for i, (text, label) in enumerate(zip(train_df.iloc[:, 0], train_df.iloc[:, 1]))
]
train_dataset = self.load_and_cache_examples(train_examples, verbose=verbose)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=self.args.train_batch_size, num_workers=14
)
os.makedirs(output_dir, exist_ok=True)
global_step, tr_loss = self.train(
train_dataloader,
output_dir,
multi_label=multi_label,
show_running_loss=show_running_loss,
eval_df=eval_df,
verbose=verbose,
**kwargs,
)
# model_to_save = self.model.module if hasattr(self.model, "module") else self.model
# model_to_save.save_pretrained(output_dir)
# self.tokenizer.save_pretrained(output_dir)
# torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
self._save_model(model=self.model)
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args.model_type, output_dir))
def train(
self,
train_dataloader,
output_dir,
multi_label=False,
show_running_loss=True,
eval_df=None,
verbose=True,
**kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop("params")
custom_parameter_names.update(params)
param_group = {**group}
param_group["params"] = [p for n, p in model.named_parameters() if n in params]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop("layer")
layer = f"layer.{layer_number}."
group_d = {**group}
group_nd = {**group}
group_nd["weight_decay"] = 0.0
params_d = []
params_nd = []
for n, p in model.named_parameters():
if n not in custom_parameter_names and layer in n:
if any(nd in n for nd in no_decay):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d["params"] = params_d
group_nd["params"] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if not self.args.train_custom_parameters_only:
optimizer_grouped_parameters.extend(
[
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
)
warmup_steps = math.ceil(t_total * args.warmup_ratio)
args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.silent, mininterval=0)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
current_loss = "Initializing"
if args.model_name and os.path.exists(args.model_name):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name.split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(multi_label, **kwargs)
if args.wandb_project:
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
wandb.watch(self.model)
model.train()
for _ in train_iterator:
if epochs_trained > 0:
epochs_trained -= 1
continue
train_iterator.set_description(f"Epoch {epoch_number + 1} of {args.num_train_epochs}")
batch_iterator = tqdm(
train_dataloader,
desc=f"Running Epoch {epoch_number} of {args.num_train_epochs}",
disable=args.silent,
mininterval=0,
)
for step, batch in enumerate(batch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs = self._get_inputs_dict(batch)
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
current_loss = loss.item()
if show_running_loss:
batch_iterator.set_description(
f"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}"
)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# torch.nn.utils.clip_grad_norm_(
# amp.master_params(optimizer), args.max_grad_norm
# )
else:
loss.backward()
# torch.nn.utils.clip_grad_norm_(
# model.parameters(), args.max_grad_norm
# )
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.wandb_project:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_lr()[0],
"global_step": global_step,
}
)
if args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and (
args.evaluate_during_training_steps > 0
and global_step % args.evaluate_during_training_steps == 0
):
# Only evaluate when single GPU otherwise metrics may not average well
results, _, _ = self.eval_model(
eval_df,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
wandb_log=False,
**kwargs,
)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args.save_eval_checkpoints:
self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args.output_dir, "training_progress_scores.csv"), index=False,
)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if best_eval_metric and args.early_stopping_metric_minimize:
if best_eval_metric - results[args.early_stopping_metric] > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if args.save_model_every_epoch or args.evaluate_during_training:
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch:
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training:
results, _, _ = self.eval_model(
eval_df,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
wandb_log=False,
**kwargs,
)
self._save_model(output_dir_current, optimizer, scheduler, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args.output_dir, "training_progress_scores.csv"), index=False)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if best_eval_metric and args.early_stopping_metric_minimize:
if best_eval_metric - results[args.early_stopping_metric] > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args.use_early_stopping and args.early_stopping_consider_epochs:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args.use_early_stopping and args.early_stopping_consider_epochs:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
return global_step, tr_loss / global_step
def eval_model(
self, eval_df, multi_label=False, output_dir=None, verbose=True, silent=False, wandb_log=True, **kwargs
):
"""
Evaluates the model on eval_df. Saves results to output_dir.
Args:
eval_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,
the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be evaluated on this Dataframe.
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
wandb_log: If True, evaluation results will be logged to wandb.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.
Returns:
result: Dictionary containing evaluation results.
model_outputs: List of model outputs for each row in eval_df
wrong_preds: List of InputExample objects corresponding to each incorrect prediction by the model
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args.output_dir
self._move_model_to_device()
result, model_outputs, wrong_preds = self.evaluate(
eval_df, output_dir, multi_label=multi_label, verbose=verbose, silent=silent, wandb_log=wandb_log, **kwargs
)
self.results.update(result)
if verbose:
logger.info(self.results)
return result, model_outputs, wrong_preds
def evaluate(
self, eval_df, output_dir, multi_label=False, prefix="", verbose=True, silent=False, wandb_log=True, **kwargs
):
"""
Evaluates the model on eval_df.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
if isinstance(eval_df, str) and self.args.lazy_loading:
eval_dataset = LazyClassificationDataset(eval_df, self.tokenizer, self.args)
eval_examples = None
else:
if self.args.lazy_loading:
raise ValueError("Input must be given as a path to a file when using lazy loading")
if "text" in eval_df.columns and "labels" in eval_df.columns:
eval_examples = [
InputExample(i, text, None, label)
for i, (text, label) in enumerate(zip(eval_df["text"].astype(str), eval_df["labels"]))
]
elif "text_a" in eval_df.columns and "text_b" in eval_df.columns:
eval_examples = [
InputExample(i, text_a, text_b, label)
for i, (text_a, text_b, label) in enumerate(
zip(eval_df["text_a"].astype(str), eval_df["text_b"].astype(str), eval_df["labels"])
)
]
else:
warnings.warn(
"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels."
)
eval_examples = [
InputExample(i, text, None, label)
for i, (text, label) in enumerate(zip(eval_df.iloc[:, 0], eval_df.iloc[:, 1]))
]
if args.sliding_window:
eval_dataset, window_counts = self.load_and_cache_examples(
eval_examples, evaluate=True, verbose=verbose, silent=silent
)
else:
eval_dataset = self.load_and_cache_examples(
eval_examples, evaluate=True, verbose=verbose, silent=silent
)
os.makedirs(eval_output_dir, exist_ok=True)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, disable=args.silent or silent, desc="Running Evaluation"):
# batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = self._get_inputs_dict(batch)
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if multi_label:
logits = logits.sigmoid()
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.sliding_window:
count = 0
window_ranges = []
for n_windows in window_counts:
window_ranges.append([count, count + n_windows])
count += n_windows
preds = [preds[window_range[0] : window_range[1]] for window_range in window_ranges]
out_label_ids = [
out_label_ids[i] for i in range(len(out_label_ids)) if i in [window[0] for window in window_ranges]
]
model_outputs = preds
preds = [np.argmax(pred, axis=1) for pred in preds]
final_preds = []
for pred_row in preds:
mode_pred, counts = mode(pred_row)
if len(counts) > 1 and counts[0] == counts[1]:
final_preds.append(args.tie_value)
else:
final_preds.append(mode_pred[0])
preds = np.array(final_preds)
elif not multi_label and args.regression is True:
preds = np.squeeze(preds)
model_outputs = preds
else:
model_outputs = preds
if not multi_label:
preds = np.argmax(preds, axis=1)
result, wrong = self.compute_metrics(preds, out_label_ids, eval_examples, **kwargs)
result["eval_loss"] = eval_loss
results.update(result)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
if self.args.wandb_project and wandb_log and not multi_label:
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
if not args.labels_map:
self.args.labels_map = {i: i for i in range(self.num_labels)}
labels_list = sorted(list(self.args.labels_map.keys()))
inverse_labels_map = {value: key for key, value in self.args.labels_map.items()}
truth = [inverse_labels_map[out] for out in out_label_ids]
# ROC
wandb.log({"roc": wandb.plots.ROC(truth, model_outputs, labels_list)})
# Precision Recall
wandb.log({"pr": wandb.plots.precision_recall(truth, model_outputs, labels_list)})
# Confusion Matrix
wandb.sklearn.plot_confusion_matrix(
truth, [inverse_labels_map[np.argmax(out)] for out in model_outputs], labels=labels_list,
)
return results, model_outputs, wrong
def load_and_cache_examples(
self, examples, evaluate=False, no_cache=False, multi_label=False, verbose=True, silent=False
):
"""
Converts a list of InputExample objects to a TensorDataset containing InputFeatures. Caches the InputFeatures.
Utility function for train() and eval() methods. Not intended to be used directly.
"""
process_count = self.args.process_count
tokenizer = self.tokenizer
args = self.args
if not no_cache:
no_cache = args.no_cache
if not multi_label and args.regression:
output_mode = "regression"
else:
output_mode = "classification"
if not no_cache:
os.makedirs(self.args.cache_dir, exist_ok=True)
mode = "dev" if evaluate else "train"
cached_features_file = os.path.join(
args.cache_dir,
"cached_{}_{}_{}_{}_{}".format(
mode, args.model_type, args.max_seq_length, self.num_labels, len(examples),
),
)
if os.path.exists(cached_features_file) and (
(not args.reprocess_input_data and not no_cache)
or (mode == "dev" and args.use_cached_eval_features and not no_cache)
):
features = torch.load(cached_features_file)
if verbose:
logger.info(f" Features loaded from cache at {cached_features_file}")
else:
if verbose:
logger.info(" Converting to features started. Cache is not used.")
if args.sliding_window:
logger.info(" Sliding window enabled")
# If labels_map is defined, then labels need to be replaced with ints
if self.args.labels_map:
for example in examples:
if multi_label:
example.label = [self.args.labels_map[label] for label in example.label]
else:
example.label = self.args.labels_map[example.label]
features = convert_examples_to_features(
examples,
args.max_seq_length,
tokenizer,
output_mode,
# XLNet has a CLS token at the end
cls_token_at_end=bool(args.model_type in ["xlnet"]),
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
# RoBERTa uses an extra separator b/w pairs of sentences,
# cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
sep_token_extra=bool(args.model_type in ["roberta", "camembert", "xlmroberta", "longformer"]),
# PAD on the left for XLNet
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
process_count=process_count,
multi_label=multi_label,
silent=args.silent or silent,
use_multiprocessing=args.use_multiprocessing,
sliding_window=args.sliding_window,
flatten=not evaluate,
stride=args.stride,
add_prefix_space=bool(args.model_type in ["roberta", "camembert", "xlmroberta", "longformer"]),
args=args,
)
if verbose and args.sliding_window:
logger.info(f" {len(features)} features created from {len(examples)} samples.")
if not no_cache:
torch.save(features, cached_features_file)
if args.sliding_window and evaluate:
features = [
[feature_set] if not isinstance(feature_set, list) else feature_set for feature_set in features
]
window_counts = [len(sample) for sample in features]
features = [feature for feature_set in features for feature in feature_set]
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.sliding_window and evaluate:
return dataset, window_counts
else:
return dataset
def compute_metrics(self, preds, labels, eval_examples=None, multi_label=False, **kwargs):
"""
Computes the evaluation metrics for the model predictions.
Args:
preds: Model predictions
labels: Ground truth labels
eval_examples: List of examples on which evaluation was performed
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.
Returns:
result: Dictionary containing evaluation results. (Matthews correlation coefficient, tp, tn, fp, fn)
wrong: List of InputExample objects corresponding to each incorrect prediction by the model
""" # noqa: ignore flake8"
assert len(preds) == len(labels)
extra_metrics = {}
for metric, func in kwargs.items():
extra_metrics[metric] = func(labels, preds)
mismatched = labels != preds
if eval_examples:
wrong = [i for (i, v) in zip(eval_examples, mismatched) if v.any()]
else:
wrong = ["NA"]
if multi_label:
label_ranking_score = label_ranking_average_precision_score(labels, preds)
return {**{"LRAP": label_ranking_score}, **extra_metrics}, wrong
elif self.args.regression:
return {**extra_metrics}, wrong
mcc = matthews_corrcoef(labels, preds)
if self.model.num_labels == 2:
tn, fp, fn, tp = confusion_matrix(labels, preds, labels=[0, 1]).ravel()
return (
{**{"mcc": mcc, "tp": tp, "tn": tn, "fp": fp, "fn": fn}, **extra_metrics},
wrong,
)
else:
return {**{"mcc": mcc}, **extra_metrics}, wrong
def predict(self, to_predict, multi_label=False):
"""
Performs predictions on a list of text.
Args:
to_predict: A python list of text (str) to be sent to the model for prediction.
Returns:
preds: A python list of the predictions (0 or 1) for each text.
model_outputs: A python list of the raw model outputs for each text.
"""
model = self.model
args = self.args
self._move_model_to_device()
dummy_label = 0 if not self.args.labels_map else next(iter(self.args.labels_map.keys()))
if multi_label:
eval_examples = [
InputExample(i, text, None, [dummy_label for i in range(self.num_labels)])
for i, text in enumerate(to_predict)
]
else:
if isinstance(to_predict[0], list):
eval_examples = [InputExample(i, text[0], text[1], dummy_label) for i, text in enumerate(to_predict)]
else:
eval_examples = [InputExample(i, text, None, dummy_label) for i, text in enumerate(to_predict)]
if args.sliding_window:
eval_dataset, window_counts = self.load_and_cache_examples(eval_examples, evaluate=True, no_cache=True)
else:
eval_dataset = self.load_and_cache_examples(
eval_examples, evaluate=True, multi_label=multi_label, no_cache=True
)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
if self.config.output_hidden_states:
for batch in tqdm(eval_dataloader, disable=args.silent, desc="Running Prediction"):
model.eval()
# batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = self._get_inputs_dict(batch)
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
embedding_outputs, layer_hidden_states = outputs[2][0], outputs[2][1:]
if multi_label:
logits = logits.sigmoid()
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
all_layer_hidden_states = np.array([state.detach().cpu().numpy() for state in layer_hidden_states])
all_embedding_outputs = embedding_outputs.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
all_layer_hidden_states = np.append(
all_layer_hidden_states,
np.array([state.detach().cpu().numpy() for state in layer_hidden_states]),
axis=1,
)
all_embedding_outputs = np.append(
all_embedding_outputs, embedding_outputs.detach().cpu().numpy(), axis=0
)
else:
for batch in tqdm(eval_dataloader, disable=args.silent):
model.eval()
# batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = self._get_inputs_dict(batch)
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if multi_label:
logits = logits.sigmoid()
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.sliding_window:
count = 0
window_ranges = []
for n_windows in window_counts:
window_ranges.append([count, count + n_windows])
count += n_windows
preds = [preds[window_range[0] : window_range[1]] for window_range in window_ranges]
model_outputs = preds
preds = [np.argmax(pred, axis=1) for pred in preds]
final_preds = []
for pred_row in preds:
mode_pred, counts = mode(pred_row)
if len(counts) > 1 and counts[0] == counts[1]:
final_preds.append(args.tie_value)
else:
final_preds.append(mode_pred[0])
preds = np.array(final_preds)
elif not multi_label and args.regression is True:
preds = np.squeeze(preds)
model_outputs = preds
else:
model_outputs = preds
if multi_label:
if isinstance(args.threshold, list):
threshold_values = args.threshold
preds = [
[self._threshold(pred, threshold_values[i]) for i, pred in enumerate(example)]
for example in preds
]
else:
preds = [[self._threshold(pred, args.threshold) for pred in example] for example in preds]
else:
preds = np.argmax(preds, axis=1)
if self.args.labels_map:
inverse_labels_map = {value: key for key, value in self.args.labels_map.items()}
preds = [inverse_labels_map[pred] for pred in preds]
if self.config.output_hidden_states:
return preds, model_outputs, all_embedding_outputs, all_layer_hidden_states
else:
return preds, model_outputs
def _threshold(self, x, threshold):
if x >= threshold:
return 1
return 0
def _move_model_to_device(self):
self.model.to(self.device)
def _get_inputs_dict(self, batch):
if isinstance(batch[0], dict):
inputs = {key: value.squeeze().to(self.device) for key, value in batch[0].items()}
inputs["labels"] = batch[1].to(self.device)
else:
batch = tuple(t.to(self.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
# XLM, DistilBERT and RoBERTa don't use segment_ids
if self.args.model_type != "distilbert":
inputs["token_type_ids"] = batch[2] if self.args.model_type in ["bert", "xlnet", "albert"] else None
return inputs
def _get_last_metrics(self, metric_values):
return {metric: values[-1] for metric, values in metric_values.items()}
def _create_training_progress_scores(self, multi_label, **kwargs):
extra_metrics = {key: [] for key in kwargs}
if multi_label:
training_progress_scores = {
"global_step": [],
"LRAP": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
else:
if self.model.num_labels == 2:
training_progress_scores = {
"global_step": [],
"tp": [],
"tn": [],
"fp": [],
"fn": [],
"mcc": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
elif self.model.num_labels == 1:
training_progress_scores = {
"global_step": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
else:
training_progress_scores = {
"global_step": [],
"mcc": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
return training_progress_scores
def _save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):
if not output_dir:
output_dir = self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
if model and not self.args.no_save:
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
self.tokenizer.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if optimizer and scheduler and self.args.save_optimizer_and_scheduler:
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
self._save_model_args(output_dir)
if results:
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
def _save_model_args(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
self.args.save(output_dir)
def _load_model_args(self, input_dir):
args = ClassificationArgs()
args.load(input_dir)
return args
def get_named_parameters(self):
return [n for n, p in self.model.named_parameters()]
| 45.811858
| 214
| 0.580601
|
acfc7a04593afb73fd0a34685525d5f6f4139961
| 11,703
|
py
|
Python
|
awx/main/tests/functional/api/test_unified_jobs_stdout.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 11,396
|
2017-09-07T04:56:02.000Z
|
2022-03-31T13:56:17.000Z
|
awx/main/tests/functional/api/test_unified_jobs_stdout.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 11,046
|
2017-09-07T09:30:46.000Z
|
2022-03-31T20:28:01.000Z
|
awx/main/tests/functional/api/test_unified_jobs_stdout.py
|
AmadeusITGroup/awx
|
74af187568b06fcf2ac6d7ed05c4f1c5934346eb
|
[
"Apache-2.0"
] | 3,592
|
2017-09-07T04:14:31.000Z
|
2022-03-31T23:53:09.000Z
|
# -*- coding: utf-8 -*-
import base64
import json
import re
from datetime import datetime
from django.conf import settings
from django.utils.encoding import smart_str
from unittest import mock
import pytest
from awx.api.versioning import reverse
from awx.main.models import (
Job,
JobEvent,
AdHocCommand,
AdHocCommandEvent,
Project,
ProjectUpdate,
ProjectUpdateEvent,
InventoryUpdate,
InventorySource,
InventoryUpdateEvent,
SystemJob,
SystemJobEvent,
)
def _mk_project_update(created=None):
kwargs = {}
if created:
kwargs['created'] = created
project = Project()
project.save()
return ProjectUpdate(project=project, **kwargs)
def _mk_inventory_update(created=None):
kwargs = {}
if created:
kwargs['created'] = created
source = InventorySource(source='ec2')
source.save()
iu = InventoryUpdate(inventory_source=source, source='e2', **kwargs)
return iu
@pytest.mark.django_db
@pytest.mark.parametrize(
'Parent, Child, relation, view',
[
[Job, JobEvent, 'job', 'api:job_stdout'],
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
],
)
def test_text_stdout(sqlite_copy_expert, Parent, Child, relation, view, get, admin):
job = Parent()
job.save()
for i in range(3):
Child(**{relation: job, 'stdout': 'Testing {}\n'.format(i), 'start_line': i}).save()
url = reverse(view, kwargs={'pk': job.pk}) + '?format=txt'
response = get(url, user=admin, expect=200)
assert smart_str(response.content).splitlines() == ['Testing %d' % i for i in range(3)]
@pytest.mark.django_db
@pytest.mark.parametrize(
'Parent, Child, relation, view',
[
[Job, JobEvent, 'job', 'api:job_stdout'],
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
],
)
@pytest.mark.parametrize('download', [True, False])
def test_ansi_stdout_filtering(sqlite_copy_expert, Parent, Child, relation, view, download, get, admin):
job = Parent()
job.save()
for i in range(3):
Child(**{relation: job, 'stdout': '\x1B[0;36mTesting {}\x1B[0m\n'.format(i), 'start_line': i}).save()
url = reverse(view, kwargs={'pk': job.pk})
# ansi codes in ?format=txt should get filtered
fmt = "?format={}".format("txt_download" if download else "txt")
response = get(url + fmt, user=admin, expect=200)
assert smart_str(response.content).splitlines() == ['Testing %d' % i for i in range(3)]
has_download_header = response.has_header('Content-Disposition')
assert has_download_header if download else not has_download_header
# ask for ansi and you'll get it
fmt = "?format={}".format("ansi_download" if download else "ansi")
response = get(url + fmt, user=admin, expect=200)
assert smart_str(response.content).splitlines() == ['\x1B[0;36mTesting %d\x1B[0m' % i for i in range(3)]
has_download_header = response.has_header('Content-Disposition')
assert has_download_header if download else not has_download_header
@pytest.mark.django_db
@pytest.mark.parametrize(
'Parent, Child, relation, view',
[
[Job, JobEvent, 'job', 'api:job_stdout'],
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
],
)
def test_colorized_html_stdout(sqlite_copy_expert, Parent, Child, relation, view, get, admin):
job = Parent()
job.save()
for i in range(3):
Child(**{relation: job, 'stdout': '\x1B[0;36mTesting {}\x1B[0m\n'.format(i), 'start_line': i}).save()
url = reverse(view, kwargs={'pk': job.pk}) + '?format=html'
response = get(url, user=admin, expect=200)
assert '.ansi36 { color: #2dbaba; }' in smart_str(response.content)
for i in range(3):
assert '<span class="ansi36">Testing {}</span>'.format(i) in smart_str(response.content)
@pytest.mark.django_db
@pytest.mark.parametrize(
'Parent, Child, relation, view',
[
[Job, JobEvent, 'job', 'api:job_stdout'],
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
],
)
def test_stdout_line_range(sqlite_copy_expert, Parent, Child, relation, view, get, admin):
job = Parent()
job.save()
for i in range(20):
Child(**{relation: job, 'stdout': 'Testing {}\n'.format(i), 'start_line': i}).save()
url = reverse(view, kwargs={'pk': job.pk}) + '?format=html&start_line=5&end_line=10'
response = get(url, user=admin, expect=200)
assert re.findall('Testing [0-9]+', smart_str(response.content)) == ['Testing %d' % i for i in range(5, 10)]
@pytest.mark.django_db
def test_text_stdout_from_system_job_events(sqlite_copy_expert, get, admin):
created = datetime.utcnow()
job = SystemJob(created=created)
job.save()
for i in range(3):
SystemJobEvent(system_job=job, stdout='Testing {}\n'.format(i), start_line=i, job_created=created).save()
url = reverse('api:system_job_detail', kwargs={'pk': job.pk})
response = get(url, user=admin, expect=200)
assert smart_str(response.data['result_stdout']).splitlines() == ['Testing %d' % i for i in range(3)]
@pytest.mark.django_db
def test_text_stdout_with_max_stdout(sqlite_copy_expert, get, admin):
created = datetime.utcnow()
job = SystemJob(created=created)
job.save()
total_bytes = settings.STDOUT_MAX_BYTES_DISPLAY + 1
large_stdout = 'X' * total_bytes
SystemJobEvent(system_job=job, stdout=large_stdout, start_line=0, job_created=created).save()
url = reverse('api:system_job_detail', kwargs={'pk': job.pk})
response = get(url, user=admin, expect=200)
assert response.data['result_stdout'] == (
'Standard Output too large to display ({actual} bytes), only download '
'supported for sizes over {max} bytes.'.format(actual=total_bytes, max=settings.STDOUT_MAX_BYTES_DISPLAY)
)
@pytest.mark.django_db
@pytest.mark.parametrize(
'Parent, Child, relation, view',
[
[Job, JobEvent, 'job', 'api:job_stdout'],
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
],
)
@pytest.mark.parametrize('fmt', ['txt', 'ansi'])
@mock.patch('awx.main.redact.UriCleaner.SENSITIVE_URI_PATTERN', mock.Mock(**{'search.return_value': None})) # really slow for large strings
def test_max_bytes_display(sqlite_copy_expert, Parent, Child, relation, view, fmt, get, admin):
created = datetime.utcnow()
job = Parent(created=created)
job.save()
total_bytes = settings.STDOUT_MAX_BYTES_DISPLAY + 1
large_stdout = 'X' * total_bytes
Child(**{relation: job, 'stdout': large_stdout, 'start_line': 0, 'job_created': created}).save()
url = reverse(view, kwargs={'pk': job.pk})
response = get(url + '?format={}'.format(fmt), user=admin, expect=200)
assert smart_str(response.content) == (
'Standard Output too large to display ({actual} bytes), only download '
'supported for sizes over {max} bytes.'.format(actual=total_bytes, max=settings.STDOUT_MAX_BYTES_DISPLAY)
)
response = get(url + '?format={}_download'.format(fmt), user=admin, expect=200)
assert smart_str(response.content) == large_stdout
@pytest.mark.django_db
@pytest.mark.parametrize('Cls, view', [[_mk_project_update, 'api:project_update_stdout'], [_mk_inventory_update, 'api:inventory_update_stdout']])
@pytest.mark.parametrize('fmt', ['txt', 'ansi', 'txt_download', 'ansi_download'])
def test_legacy_result_stdout_text_fallback(Cls, view, fmt, get, admin):
# older versions of stored raw stdout in a raw text blob at
# main_unifiedjob.result_stdout_text; this test ensures that fallback
# works properly if no job events exist
job = Cls()
job.save()
job.result_stdout_text = 'LEGACY STDOUT!'
job.save()
url = reverse(view, kwargs={'pk': job.pk})
response = get(url + '?format={}'.format(fmt), user=admin, expect=200)
assert smart_str(response.content) == 'LEGACY STDOUT!'
@pytest.mark.django_db
@pytest.mark.parametrize('Cls, view', [[_mk_project_update, 'api:project_update_stdout'], [_mk_inventory_update, 'api:inventory_update_stdout']])
@pytest.mark.parametrize('fmt', ['txt', 'ansi'])
@mock.patch('awx.main.redact.UriCleaner.SENSITIVE_URI_PATTERN', mock.Mock(**{'search.return_value': None})) # really slow for large strings
def test_legacy_result_stdout_with_max_bytes(Cls, view, fmt, get, admin):
job = Cls()
job.save()
total_bytes = settings.STDOUT_MAX_BYTES_DISPLAY + 1
large_stdout = 'X' * total_bytes
job.result_stdout_text = large_stdout
job.save()
url = reverse(view, kwargs={'pk': job.pk})
response = get(url + '?format={}'.format(fmt), user=admin, expect=200)
assert smart_str(response.content) == (
'Standard Output too large to display ({actual} bytes), only download '
'supported for sizes over {max} bytes.'.format(actual=total_bytes, max=settings.STDOUT_MAX_BYTES_DISPLAY)
)
response = get(url + '?format={}'.format(fmt + '_download'), user=admin, expect=200)
assert smart_str(response.content) == large_stdout
@pytest.mark.django_db
@pytest.mark.parametrize(
'Parent, Child, relation, view',
[
[Job, JobEvent, 'job', 'api:job_stdout'],
[AdHocCommand, AdHocCommandEvent, 'ad_hoc_command', 'api:ad_hoc_command_stdout'],
[_mk_project_update, ProjectUpdateEvent, 'project_update', 'api:project_update_stdout'],
[_mk_inventory_update, InventoryUpdateEvent, 'inventory_update', 'api:inventory_update_stdout'],
],
)
@pytest.mark.parametrize('fmt', ['txt', 'ansi', 'txt_download', 'ansi_download'])
def test_text_with_unicode_stdout(sqlite_copy_expert, Parent, Child, relation, view, get, admin, fmt):
job = Parent()
job.save()
for i in range(3):
Child(**{relation: job, 'stdout': u'オ{}\n'.format(i), 'start_line': i}).save()
url = reverse(view, kwargs={'pk': job.pk}) + '?format=' + fmt
response = get(url, user=admin, expect=200)
assert smart_str(response.content).splitlines() == ['オ%d' % i for i in range(3)]
@pytest.mark.django_db
def test_unicode_with_base64_ansi(sqlite_copy_expert, get, admin):
created = datetime.utcnow()
job = Job(created=created)
job.save()
for i in range(3):
JobEvent(job=job, stdout='オ{}\n'.format(i), start_line=i, job_created=created).save()
url = reverse('api:job_stdout', kwargs={'pk': job.pk}) + '?format=json&content_encoding=base64'
response = get(url, user=admin, expect=200)
content = base64.b64decode(json.loads(smart_str(response.content))['content'])
assert smart_str(content).splitlines() == ['オ%d' % i for i in range(3)]
| 41.796429
| 145
| 0.691532
|
acfc7a3188faf44f7ec672d430c3bf4e84b95638
| 2,979
|
py
|
Python
|
msdnet/stoppingcriterion.py
|
dani-lbnl/msdnet
|
20f503322524ceb340379448f1778a58bb1f9a18
|
[
"MIT"
] | 24
|
2019-08-24T06:42:51.000Z
|
2021-10-09T14:27:51.000Z
|
msdnet/stoppingcriterion.py
|
dani-lbnl/msdnet
|
20f503322524ceb340379448f1778a58bb1f9a18
|
[
"MIT"
] | 12
|
2019-07-31T06:56:19.000Z
|
2020-12-05T18:08:54.000Z
|
msdnet/stoppingcriterion.py
|
dani-lbnl/msdnet
|
20f503322524ceb340379448f1778a58bb1f9a18
|
[
"MIT"
] | 11
|
2019-09-17T02:39:24.000Z
|
2022-03-30T21:28:35.000Z
|
#-----------------------------------------------------------------------
#Copyright 2020 Centrum Wiskunde & Informatica, Amsterdam
#
#Author: Daniel M. Pelt
#Contact: D.M.Pelt@cwi.nl
#Website: http://dmpelt.github.io/msdnet/
#License: MIT
#
#This file is part of MSDNet, a Python implementation of the
#Mixed-Scale Dense Convolutional Neural Network.
#-----------------------------------------------------------------------
"""Module for defining a stopping criterion for training."""
import abc
import time
class StoppingCriterion(abc.ABC):
'''Base stopping criterion class
'''
@abc.abstractmethod
def check(self, ntrainimages, better_val):
'''Decides whether to continue training or not. To be implemented by each class.
:param ntrainimages: number of training images processed since last check
:param better_val: whether a better validation loss was found since last check
:returns: True if training has to be stopped, False otherwise
'''
pass
def reset(self):
'''Reset and initialize the stopping criterion instance.
'''
pass
class NeverStop(StoppingCriterion):
'''Never stop training, i.e. train until process is killed.
'''
def check(self, ntrainimages, better_val):
return False
class NonImprovingValidationSteps(StoppingCriterion):
'''Stop after a chosen number of non-improving validation steps.
'''
def __init__(self, maxsteps):
'''
:param maxsteps: Maximum number of non-improving validation steps.
'''
self.max = maxsteps
def reset(self):
self.cur = 0
def check(self, ntrainimages, better_val):
if better_val:
self.cur = 0
return False
else:
self.cur += 1
if self.cur >= self.max:
return True
else:
return False
class MaxTime(StoppingCriterion):
'''Stop after a certain number of hours of training.
'''
def __init__(self, hours):
'''
:param hours: number of hours to train.
'''
self.s = hours*60*60
def reset(self):
self.starttime = time.monotonic()
def check(self, ntrainimages, better_val):
if time.monotonic() - self.starttime > self.s:
return True
else:
return False
class MaxEpochs(StoppingCriterion):
'''Stop after a certain number of epochs.
'''
def __init__(self, epochsize, maxepochs):
'''
:param epochsize: number of training images in each epoch.
:param maxepochs: number of epochs to train.
'''
self.esize = epochsize
self.max = maxepochs
def reset(self):
self.nims = 0
def check(self, ntrainimages, better_val):
self.nims += ntrainimages
if self.nims/self.esize >= self.max:
return True
else:
return False
| 27.583333
| 88
| 0.583753
|
acfc7a55cc1f7a079c0bce7ebd92f0b7a8d0ab89
| 3,073
|
py
|
Python
|
benchmarks/utils/helper.py
|
kmaehashi/chainer-benchmark
|
7af2005d71253d236f7f239119d7130f22b26bb4
|
[
"MIT"
] | 9
|
2018-04-09T10:26:45.000Z
|
2019-07-13T11:31:49.000Z
|
benchmarks/utils/helper.py
|
chainer/chainer-benchmark
|
8d0c8f5052b5e2a85ad522ff48899ffc9a2bfafb
|
[
"MIT"
] | 19
|
2018-04-09T10:35:12.000Z
|
2018-08-30T08:49:40.000Z
|
benchmarks/utils/helper.py
|
chainer/chainer-benchmark
|
8d0c8f5052b5e2a85ad522ff48899ffc9a2bfafb
|
[
"MIT"
] | 2
|
2018-04-09T10:26:53.000Z
|
2019-03-20T01:35:26.000Z
|
from functools import wraps
import inspect
import cupy
def _is_func(target):
return inspect.ismethod(target) or inspect.isfunction(target)
def sync(target):
"""Decorator to perform CPU/GPU synchronization.
This decorator can be applied to both classes and functions.
"""
if isinstance(target, type):
klass = target
members = inspect.getmembers(klass, predicate=_is_func)
for (name, func) in members:
if not (name == 'setup' or name.startswith('time_')):
continue
setattr(klass, name, _synchronized_func(func))
return klass
elif _is_func(target):
return _synchronized_func(target)
else:
raise TypeError('cannot apply decorator to {}'.format(target))
def _synchronized_func(func):
@wraps(func)
def _wrap_func(*args, **kwargs):
event = cupy.cuda.stream.Event()
event.record()
event.synchronize()
func(*args, **kwargs)
event = cupy.cuda.stream.Event()
event.record()
event.synchronize()
return _wrap_func
def parameterize(args):
"""Class decorator to parameterize the benchmark.
Pass the list of pair of parameter name and values. Each parameter
value will be passed as the function argument when benchmark runs.
See the example below for the usage.
>>> @parameterize([
... ('batchsize', [32, 64, 128]),
... ('n_gpus', [1, 2]),
... ])
... class MyBenchmark(object):
... def time_all(self, batchsize, n_gpus):
... ...
Parameters cannot be sparse due to the limitation of ASV.
"""
def _wrap_class(klass):
"""Wraps the given class.
Internally, this function utilizes the parameterization feature of
ASV, i.e., set `params` and `param_names` attribute of the class.
`params` is a list of list of parameters, and `param_names` is a list
of parameter names. `params[i]` is a list of parameters for parameter
named `param_names[i]` where `i` is an index.
"""
assert isinstance(klass, type)
params = [arg[1] for arg in args]
param_names = [arg[0] for arg in args]
orig_params = getattr(klass, 'params', [])
orig_param_names = getattr(klass, 'param_names', [])
if 0 < len(orig_params):
# ASV allows specifying list of parameters (instead of list of
# list of parameters) if only one parameter axis is given.
if not isinstance(orig_params[0], (tuple, list)):
orig_params = [orig_params]
if len(orig_param_names) == 0:
orig_param_names = ['param']
assert len(orig_param_names) == 1
else:
assert len(orig_param_names) == 0
params += orig_params
param_names += orig_param_names
assert len(params) == len(param_names)
setattr(klass, 'params', params)
setattr(klass, 'param_names', param_names)
return klass
return _wrap_class
| 30.127451
| 77
| 0.61178
|
acfc7b6bf85f614d5247f0c576fe96caeb04e0a0
| 7,835
|
py
|
Python
|
worker/__main__.py
|
shield-h2020/dare-workers
|
2293bef65d42912468da53bfc7bb506793516a78
|
[
"Apache-2.0"
] | null | null | null |
worker/__main__.py
|
shield-h2020/dare-workers
|
2293bef65d42912468da53bfc7bb506793516a78
|
[
"Apache-2.0"
] | null | null | null |
worker/__main__.py
|
shield-h2020/dare-workers
|
2293bef65d42912468da53bfc7bb506793516a78
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Listen to a particular topic of the Kafka cluster and write incoming records to a
Hive table, via Spark2 Streaming module.
'''
import json
import logging
import os
import pipelines
import sys
from argparse import ArgumentParser, HelpFormatter
from pyspark import SparkContext
from pyspark.sql import HiveContext
from pyspark.streaming import StreamingContext
from utils import authenticate, get_logger
from version import __version__
def parse_args():
'''
Parse command-line options found in 'args' (default: sys.argv[1:]).
:returns: On success, a namedtuple of Values instances.
'''
parser = ArgumentParser(
prog='worker',
description='Streaming Worker reads from Kafka cluster and writes to Hive '
'through the Spark2 streaming module.',
epilog='END',
formatter_class=lambda prog: HelpFormatter(prog, max_help_position=40, width=80),
usage='worker [OPTIONS]... -t <pipeline> --topic <topic> -p <partition>')
# .................................set optional arguments
parser._optionals.title = 'Optional Arguments'
parser.add_argument('-a', '--app-name', metavar='STRING',
help='name of the Spark Job to display on the cluster web UI')
parser.add_argument('-b', '--batch-duration', default=30, type=int, metavar='INTEGER',
help='time interval (in seconds) at which streaming data will be divided into batches')
parser.add_argument('-c', '--config-file', metavar='FILE', type=file,
help='path of configuration file', default=os.path.expanduser('~/.worker.json'))
parser.add_argument('-g', '--group-id', metavar='STRING',
help='name of the consumer group to join for dynamic partition assignment')
parser.add_argument('-l', '--log-level', default='INFO', metavar='STRING',
help='determine the level of the logger')
parser.add_argument('-v', '--version', action='version', version='%(prog)s {0}'
.format(__version__))
# .................................set required arguments
required = parser.add_argument_group('Required Arguments')
required.add_argument('-p', '--partition', required=True, metavar='INTEGER',
help='partition number to consume')
required.add_argument('--topic', required=True, metavar='STRING',
help='topic to listen for new messages')
required.add_argument('-t', '--type', choices=pipelines.__all__, required=True,
help='type of the data that will be ingested', metavar='STRING')
return parser.parse_args()
def store(rdd, hsc, table, topic, schema=None, segtype='segments', index=0):
'''
Interface for saving the content of the streaming :class:`DataFrame` out into
Hive storage.
:param rdd : The content as a :class:`pyspark.RDD` of :class:`Row`.
:param hsc : A variant of Spark SQL that integrates with data stored in Hive.
:param table : The specified table in Hive database.
:param topic : Name of the topic to listen for incoming segments.
:param schema : The schema of this :class:`DataFrame` as a
:class:`pyspark.sql.types.StructType`.
:param segtype: The type of the received segments.
:param index : Index of array that identify the segment.
'''
logger = logging.getLogger('SHIELD.WORKER.STREAMING')
if rdd.isEmpty():
logger.info(' ---- LISTENING KAFKA TOPIC: {0} ---- '.format(topic))
return
hsc.setConf('hive.exec.dynamic.partition', 'true')
hsc.setConf('hive.exec.dynamic.partition.mode', 'nonstrict')
logger.info('Received {0} from topic. [Rows: {1}]'.format(segtype, rdd.count()))
logger.info('Create distributed collection for partition "{0}".'
.format(rdd.first()[index]))
df = hsc.createDataFrame(rdd, schema)
df.write.format('parquet').mode('append').insertInto(table)
logger.info(' **** REGISTRATION COMPLETED **** ')
class StreamingWorker:
'''
StreamingWorker is responsible for listening to a particular topic of the Kafka
cluster, consuming incoming messages and writing data to the Hive database.
:param app_name : A name for your job, to display on the cluster web UI.
:param batch_duration: The time interval (in seconds) at which streaming data will be
divided into batches.
'''
def __init__(self, app_name, batch_duration):
self._logger = logging.getLogger('SHIELD.WORKER.STREAMING')
self._logger.info('Initializing Streaming Worker...')
# .............................connect to Spark cluster
self._context = SparkContext(appName=app_name)
self._logger.info('Connect to Spark Cluster as job "{0}" and broadcast variables'
' on it.'.format(app_name))
# .............................create a new streaming context
self._streaming = StreamingContext(self._context, batchDuration=batch_duration)
self._logger.info('Streaming data will be divided into batches of {0} seconds.'
.format(batch_duration))
# .............................read from ``hive-site.xml``
self._hive_context = HiveContext(self._context)
self._logger.info('Read Hive\'s configuration to integrate with data stored in it.')
def start(self, datatype, db_name, zk_quorum, group_id, topic, partition):
'''
Start the ingestion.
:param datatype : Type of the data that will be ingested.
:param db_name : Name of the database in Hive, where the data will be stored.
:param zk_quorum: Zookeeper quorum (host[:port],...).
:param group_id : The group id for this consumer.
:param topic : Topic to listen for new messages.
:param partition: Partition number to consume.
'''
import worker.pipelines
module = getattr(worker.pipelines, datatype)
table = '{0}.{1}'.format(db_name, datatype)
dstream = module.create_dstream(self._streaming, zk_quorum, group_id,
{ topic: partition })
schema = module.struct_type()
dstream.map(lambda x: module.stream_parser(x))\
.filter(lambda x: bool(x))\
.foreachRDD(lambda x:
store(x, self._hive_context, table, topic, schema,
module.SEGTYPE, module.INDEX))
self._streaming.start()
self._logger.info('Start the execution of the streams.')
self._streaming.awaitTermination()
@classmethod
def run(cls, **kwargs):
'''
Main command-line entry point.
:param cls: The class as implicit first argument.
'''
try:
conf = json.loads(kwargs.pop('config_file').read())
topic = kwargs.pop('topic')
# .........................set up logger
get_logger('SHIELD.WORKER', kwargs.pop('log_level'))
# .........................check kerberos authentication
if os.getenv('KRB_AUTH'):
authenticate(conf['kerberos'])
# .........................instantiate StreamingWorker
worker = cls(kwargs.pop('app_name') or topic, kwargs.pop('batch_duration'))
# .........................start StreamingWorker
worker.start(
kwargs.pop('type'),
conf['database'],
conf['zkQuorum'],
kwargs.pop('group_id') or topic,
topic,
int(kwargs.pop('partition')))
except SystemExit: raise
except:
sys.excepthook(*sys.exc_info())
sys.exit(1)
if __name__ == '__main__': StreamingWorker.run(**parse_args().__dict__)
| 39.97449
| 95
| 0.615571
|
acfc7c143e0fbcd179b632b66d3cde3713e2e164
| 17,700
|
py
|
Python
|
pyiron_atomistics/gpaw/pyiron_ase.py
|
dgehringer/pyiron_atomistics
|
f8f2d573a483e802c8e5840998a0769378b95e31
|
[
"BSD-3-Clause"
] | null | null | null |
pyiron_atomistics/gpaw/pyiron_ase.py
|
dgehringer/pyiron_atomistics
|
f8f2d573a483e802c8e5840998a0769378b95e31
|
[
"BSD-3-Clause"
] | 1
|
2022-02-24T09:51:10.000Z
|
2022-02-24T09:51:10.000Z
|
pyiron_atomistics/gpaw/pyiron_ase.py
|
srmnitc/pyiron_atomistics
|
2c8052b082f2c4fb6f6291ac2b1f801ea7ab1567
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from ase import Atoms
from ase.constraints import dict2constraint
import copy
import importlib
import numpy as np
from pyiron_atomistics.atomistics.job.interactive import GenericInteractive
from pyiron_atomistics.atomistics.structure.atoms import pyiron_to_ase, Atoms as PAtoms
try:
from ase.cell import Cell
except ImportError:
Cell = None
__author__ = "Jan Janssen"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Jan Janssen"
__email__ = "janssen@mpie.de"
__status__ = "development"
__date__ = "Sep 1, 2018"
def ase_structure_todict(structure):
atoms_dict = {
"symbols": structure.get_chemical_symbols(),
"positions": structure.get_positions(),
"pbc": structure.get_pbc(),
"celldisp": structure.get_celldisp(),
"constraint": [c.todict() for c in structure.constraints],
"info": copy.deepcopy(structure.info),
}
if Cell is not None:
atoms_dict["cell"] = structure.get_cell().todict()
else:
atoms_dict["cell"] = structure.get_cell()
if structure.has("tags"):
atoms_dict["tags"] = structure.get_tags()
if structure.has("masses"):
atoms_dict["masses"] = structure.get_masses()
if structure.has("momenta"):
atoms_dict["momenta"] = structure.get_momenta()
if structure.has("initial_magmoms"):
atoms_dict["magmoms"] = structure.get_initial_magnetic_moments()
if structure.has("initial_charges"):
atoms_dict["charges"] = structure.get_initial_charges()
if structure.calc is not None:
calculator_dict = structure.calc.todict()
calculator_dict["calculator_class"] = (
str(structure.calc.__class__).replace("'", " ").split()[1]
)
calculator_dict["label"] = structure.calc.label
atoms_dict["calculator"] = calculator_dict
return atoms_dict
def ase_calculator_fromdict(class_path, class_dict):
module_loaded = importlib.import_module(".".join(class_path.split(".")[:-1]))
module_class = getattr(module_loaded, class_path.split(".")[-1])
return module_class(**class_dict)
def ase_structure_fromdict(atoms_dict):
def cell_fromdict(celldict):
celldict.pop("pbc", None)
if Cell is not None:
return Cell(**celldict)
else:
return celldict
atoms_dict_copy = copy.deepcopy(atoms_dict)
if "calculator" in atoms_dict_copy.keys():
calculator_dict = atoms_dict_copy["calculator"]
calculator_class = calculator_dict["calculator_class"]
del calculator_dict["calculator_class"]
atoms_dict_copy["calculator"] = ase_calculator_fromdict(
calculator_class, calculator_dict
)
if "constraint" in atoms_dict_copy.keys():
atoms_dict_copy["constraint"] = [
dict2constraint(const_dict) for const_dict in atoms_dict_copy["constraint"]
]
atoms_dict_copy["cell"] = cell_fromdict(celldict=atoms_dict_copy["cell"])
atoms = Atoms(**atoms_dict_copy)
if atoms.calc is not None:
atoms.calc.read(atoms.calc.label)
return atoms
class AseJob(GenericInteractive):
def __init__(self, project, job_name):
super(AseJob, self).__init__(project, job_name)
self.__name__ = "AseJob"
self.__version__ = (
None # Reset the version number to the executable is set automatically
)
@property
def structure(self):
return GenericInteractive.structure.fget(self)
@structure.setter
def structure(self, structure):
if isinstance(structure, PAtoms):
structure = pyiron_to_ase(structure)
GenericInteractive.structure.fset(self, structure)
def to_hdf(self, hdf=None, group_name=None):
super(AseJob, self).to_hdf(hdf=hdf, group_name=group_name)
with self.project_hdf5.open("input") as hdf_input:
hdf_input["structure"] = ase_structure_todict(self._structure)
def from_hdf(self, hdf=None, group_name=None):
super(AseJob, self).from_hdf(hdf=hdf, group_name=group_name)
with self.project_hdf5.open("input") as hdf_input:
self.structure = ase_structure_fromdict(hdf_input["structure"])
def run_static(self):
pre_run_mode = self.server.run_mode
self.server.run_mode.interactive = True
self.run_if_interactive()
self.interactive_close()
self.server.run_mode = pre_run_mode
def run_if_interactive(self):
if self.structure.calc is None:
self.set_calculator()
super(AseJob, self).run_if_interactive()
self.interactive_collect()
def set_calculator(self):
raise NotImplementedError(
"The _set_calculator function is not implemented for this code."
)
def interactive_structure_setter(self, structure):
self.structure.calc.calculate(structure)
def interactive_positions_setter(self, positions):
self.structure.positions = positions
def interactive_initialize_interface(self):
self.status.running = True
self._structure.calc.set_label(self.working_directory + "/")
self._interactive_library = True
def interactive_close(self):
if self.interactive_is_activated():
super(AseJob, self).interactive_close()
with self.project_hdf5.open("output") as h5:
if "interactive" in h5.list_groups():
for key in h5["interactive"].list_nodes():
h5["generic/" + key] = h5["interactive/" + key]
def interactive_forces_getter(self):
return self.structure.get_forces()
def interactive_pressures_getter(self):
return -self.structure.get_stress(voigt=False)
def interactive_energy_pot_getter(self):
return self.structure.get_potential_energy()
def interactive_energy_tot_getter(self):
return self.structure.get_potential_energy()
def interactive_indices_getter(self):
element_lst = sorted(list(set(self.structure.get_chemical_symbols())))
return np.array(
[element_lst.index(el) for el in self.structure.get_chemical_symbols()]
)
def interactive_positions_getter(self):
return self.structure.positions.copy()
def interactive_steps_getter(self):
return len(self.interactive_cache[list(self.interactive_cache.keys())[0]])
def interactive_time_getter(self):
return self.interactive_steps_getter()
def interactive_volume_getter(self):
return self.structure.get_volume()
def interactive_cells_getter(self):
return self.structure.cell.copy()
def write_input(self):
pass
def collect_output(self):
pass
def run_if_scheduler(self):
self._create_working_directory()
super(AseJob, self).run_if_scheduler()
def interactive_index_organizer(self):
index_merge_lst = self._interactive_species_lst.tolist() + list(
np.unique(self._structure_current.get_chemical_symbols())
)
el_lst = sorted(set(index_merge_lst), key=index_merge_lst.index)
current_structure_index = [
el_lst.index(el) for el in self._structure_current.get_chemical_symbols()
]
previous_structure_index = [
el_lst.index(el) for el in self._structure_previous.get_chemical_symbols()
]
if not np.array_equal(
np.array(current_structure_index),
np.array(previous_structure_index),
):
self._logger.debug("Generic library: indices changed!")
self.interactive_indices_setter(self._structure_current.indices)
def _get_structure(self, frame=-1, wrap_atoms=True):
if (
self.server.run_mode.interactive
or self.server.run_mode.interactive_non_modal
):
# Warning: We only copy symbols, positions and cell information - no tags.
if self.output.indices is not None and len(self.output.indices) != 0:
indices = self.output.indices[frame]
else:
return None
if len(self._interactive_species_lst) == 0:
el_lst = list(np.unique(self._structure_current.get_chemical_symbols()))
else:
el_lst = self._interactive_species_lst.tolist()
if indices is not None:
if wrap_atoms:
positions = self.output.positions[frame]
else:
if len(self.output.unwrapped_positions) > max([frame, 0]):
positions = self.output.unwrapped_positions[frame]
else:
positions = (
self.output.positions[frame]
+ self.output.total_displacements[frame]
)
atoms = Atoms(
symbols=np.array([el_lst[el] for el in indices]),
positions=positions,
cell=self.output.cells[frame],
pbc=self.structure.pbc,
)
# Update indicies to match the indicies in the cache.
atoms.indices = indices
return atoms
else:
return None
else:
if (
self.get("output/generic/cells") is not None
and len(self.get("output/generic/cells")) != 0
):
return super()._get_structure(frame=frame, wrap_atoms=wrap_atoms)
else:
return None
class AseAdapter(object):
def __init__(self, ham, fast_mode=False):
self._ham = ham
self._fast_mode = fast_mode
if self._ham.server.run_mode.interactive and fast_mode:
self.interactive_cache = {
"velocities": [],
"energy_kin": [],
"momenta": [],
"positions": [],
"energy_tot": [],
"energy_pot": [],
}
self._ham.run()
self._ham.interactive_cache = {}
elif self._ham.server.run_mode.interactive:
self.interactive_cache = {"velocities": [], "energy_kin": [], "momenta": []}
self.constraints = []
try:
self.arrays = {
"positions": self._ham.structure.positions.copy(),
"numbers": self._ham.structure.numbers,
}
except AttributeError:
self.arrays = {
"positions": self._ham.structure.positions.copy(),
"numbers": self._ham.structure.get_atomic_numbers(),
}
@property
def communicator(self):
return None
def get_masses(self):
return np.array(self._ham.structure.get_masses())
def get_positions(self):
return self.arrays["positions"]
def set_positions(self, positions):
self.arrays["positions"] = positions
def get_forces(self, md=True):
if self._fast_mode:
self._ham.interactive_positions_setter(self.arrays["positions"])
self.interactive_cache["positions"].append(self.arrays["positions"])
self._ham.interactive_execute()
self.interactive_cache["energy_pot"].append(
self._ham.interactive_energy_pot_getter()
)
return np.array(self._ham.interactive_forces_getter())
else:
self._ham.structure.positions = self.arrays["positions"]
if self._ham.server.run_mode.interactive:
self._ham.run()
else:
self._ham.run(delete_existing_job=True)
return self._ham.output.forces[-1]
def interactive_close(self):
self._ham.interactive_store_in_cache(
"velocities", self.interactive_cache["velocities"]
)
self._ham.interactive_store_in_cache(
"energy_kin", self.interactive_cache["energy_kin"]
)
if self._fast_mode:
self._ham.interactive_store_in_cache(
"positions", self.interactive_cache["positions"]
)
self._ham.interactive_store_in_cache(
"energy_pot", self.interactive_cache["energy_pot"][::2]
)
self._ham.interactive_store_in_cache(
"energy_tot",
(
np.array(self.interactive_cache["energy_pot"][::2])
+ np.array(self.interactive_cache["energy_kin"])
).tolist(),
)
else:
self._ham.interactive_store_in_cache(
"energy_tot",
(
np.array(self._ham.output.energy_pot)[::2]
+ np.array(self.interactive_cache["energy_kin"])
).tolist(),
)
self._ham.interactive_close()
def get_number_of_atoms(self):
return self._ham.structure.get_number_of_atoms()
# ASE functions
def get_kinetic_energy(self):
"""Get the kinetic energy."""
momenta = self.arrays.get("momenta")
if momenta is None:
return 0.0
return 0.5 * np.vdot(momenta, self.get_velocities())
def set_momenta(self, momenta, apply_constraint=True):
"""Set momenta."""
if apply_constraint and len(self.constraints) > 0 and momenta is not None:
momenta = np.array(momenta) # modify a copy
for constraint in self.constraints:
if hasattr(constraint, "adjust_momenta"):
constraint.adjust_momenta(self, momenta)
self.set_array("momenta", momenta, float, (3,))
self.interactive_cache["velocities"].append(self.get_velocities())
self.interactive_cache["energy_kin"].append(self.get_kinetic_energy())
def set_velocities(self, velocities):
"""Set the momenta by specifying the velocities."""
self.set_momenta(self.get_masses()[:, np.newaxis] * velocities)
def get_momenta(self):
"""Get array of momenta."""
if "momenta" in self.arrays:
return self.arrays["momenta"].copy()
else:
return np.zeros((len(self), 3))
def set_array(self, name, a, dtype=None, shape=None):
"""Update array.
If *shape* is not *None*, the shape of *a* will be checked.
If *a* is *None*, then the array is deleted."""
b = self.arrays.get(name)
if b is None:
if a is not None:
self.new_array(name, a, dtype, shape)
else:
if a is None:
del self.arrays[name]
else:
a = np.asarray(a)
if a.shape != b.shape:
raise ValueError(
"Array has wrong shape %s != %s." % (a.shape, b.shape)
)
b[:] = a
def get_angular_momentum(self):
"""Get total angular momentum with respect to the center of mass."""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
return np.cross(positions, self.get_momenta()).sum(0)
def new_array(self, name, a, dtype=None, shape=None):
"""Add new array.
If *shape* is not *None*, the shape of *a* will be checked."""
if dtype is not None:
a = np.array(a, dtype, order="C")
if len(a) == 0 and shape is not None:
a.shape = (-1,) + shape
else:
if not a.flags["C_CONTIGUOUS"]:
a = np.ascontiguousarray(a)
else:
a = a.copy()
if name in self.arrays:
raise RuntimeError
for b in self.arrays.values():
if len(a) != len(b):
raise ValueError("Array has wrong length: %d != %d." % (len(a), len(b)))
break
if shape is not None and a.shape[1:] != shape:
raise ValueError(
"Array has wrong shape %s != %s." % (a.shape, (a.shape[0:1] + shape))
)
self.arrays[name] = a
def has(self, name):
"""Check for existence of array.
name must be one of: 'tags', 'momenta', 'masses', 'initial_magmoms',
'initial_charges'."""
# XXX extend has to calculator properties
return name in self.arrays
def get_center_of_mass(self, scaled=False):
"""Get the center of mass.
If scaled=True the center of mass in scaled coordinates
is returned."""
m = self.get_masses()
com = np.dot(m, self.arrays["positions"]) / m.sum()
if scaled:
if self._fast_mode:
return np.linalg.solve(self._ham.structure.cells[-1].T, com)
else:
return np.linalg.solve(self._ham.output.cells[-1].T, com)
else:
return com
def get_velocities(self):
"""Get array of velocities."""
momenta = self.arrays.get("momenta")
if momenta is None:
return None
m = self.get_masses()
# m = self.arrays.get('masses')
# if m is None:
# m = atomic_masses[self.arrays['numbers']]
return momenta / m.reshape(-1, 1)
def __len__(self):
return len(self._ham.structure)
| 36.494845
| 108
| 0.601695
|
acfc7cde8c5d81ebc3cce5db77af0f6df1b39f05
| 320
|
py
|
Python
|
Lab 1 - Forward chaining- backward chaining and goal trees/run.py
|
nuhman/MIT-OCW-6.034-Artificial-Intelligence
|
59ab288b5571d51d22ac98e748cdb223f9f08e74
|
[
"MIT"
] | 1
|
2017-04-12T14:40:08.000Z
|
2017-04-12T14:40:08.000Z
|
Lab 1 - Forward chaining- backward chaining and goal trees/run.py
|
nuhman/MIT-OCW-6.034-Artificial-Intelligence
|
59ab288b5571d51d22ac98e748cdb223f9f08e74
|
[
"MIT"
] | null | null | null |
Lab 1 - Forward chaining- backward chaining and goal trees/run.py
|
nuhman/MIT-OCW-6.034-Artificial-Intelligence
|
59ab288b5571d51d22ac98e748cdb223f9f08e74
|
[
"MIT"
] | null | null | null |
from production import IF, AND, OR, NOT, THEN, DELETE, forward_chain
theft_rule = IF('you have (?x)',
THEN( 'i have (?x)' ),
DELETE( 'you have (?x)' ))
data = ( 'you have apple','you have orange','you have pear' )
print(forward_chain([theft_rule], data, verbose=True))
| 32
| 70
| 0.5625
|
acfc7d1e7f3212881d470f6ad0be0a0f92467e2d
| 271
|
py
|
Python
|
mayan/apps/converter/runtime.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 2,743
|
2017-12-18T07:12:30.000Z
|
2022-03-27T17:21:25.000Z
|
mayan/apps/converter/runtime.py
|
kyper999/mayan-edms
|
ca7b8301a1f68548e8e718d42a728a500d67286e
|
[
"Apache-2.0"
] | 15
|
2017-12-18T14:58:07.000Z
|
2021-03-01T20:05:05.000Z
|
mayan/apps/converter/runtime.py
|
kyper999/mayan-edms
|
ca7b8301a1f68548e8e718d42a728a500d67286e
|
[
"Apache-2.0"
] | 257
|
2017-12-18T03:12:58.000Z
|
2022-03-25T08:59:10.000Z
|
from __future__ import unicode_literals
import logging
from django.utils.module_loading import import_string
from .settings import setting_graphics_backend
logger = logging.getLogger(__name__)
backend = converter_class = import_string(setting_graphics_backend.value)
| 24.636364
| 73
| 0.859779
|
acfc7deba3f994d5db5d6b5454a3489d62fc2373
| 628
|
py
|
Python
|
kwpbar/demo.py
|
Kwpolska/kwpbar.py
|
41152301a6c7d0a7eb1229d2ce0ec09596bb3e8d
|
[
"BSD-3-Clause"
] | 4
|
2015-11-11T11:22:12.000Z
|
2021-02-15T00:29:04.000Z
|
kwpbar/demo.py
|
Kwpolska/kwpbar.py
|
41152301a6c7d0a7eb1229d2ce0ec09596bb3e8d
|
[
"BSD-3-Clause"
] | null | null | null |
kwpbar/demo.py
|
Kwpolska/kwpbar.py
|
41152301a6c7d0a7eb1229d2ce0ec09596bb3e8d
|
[
"BSD-3-Clause"
] | 2
|
2017-04-29T13:26:47.000Z
|
2021-02-15T01:51:46.000Z
|
# -*- encoding: utf-8 -*-
# KwPBar for Python 0.1.0
# A progress bar for Python.
# Copyright © 2015-2018, Chris Warrick.
# See /LICENSE for licensing information.
"""
Demo module.
:Copyright: © 2015-2018, Chris Warrick.
:License: BSD (see /LICENSE).
"""
import sys
import kwpbar
import time
__all__ = ('demo')
def demo(): # pragma: no cover
"""Run the demo."""
if len(sys.argv) == 1:
runs = 5
else:
runs = int(sys.argv[1])
for i in range(0, runs + 1):
kwpbar.pbar(i, runs)
time.sleep(1)
kwpbar.erase_pbar()
if __name__ == '__main__': # pragma: no cover
demo()
| 16.972973
| 46
| 0.595541
|
acfc7e9447668218693c301517d2251d9a370192
| 6,794
|
py
|
Python
|
tnt/torchnet/logger/tensorboardmeterlogger.py
|
jozhang97/Side-tuning
|
dea345691fb7ee0230150fe56ddd644efdffa6ac
|
[
"MIT"
] | 56
|
2020-01-12T05:45:59.000Z
|
2022-03-17T15:04:15.000Z
|
tnt/torchnet/logger/tensorboardmeterlogger.py
|
jozhang97/Side-tuning
|
dea345691fb7ee0230150fe56ddd644efdffa6ac
|
[
"MIT"
] | 7
|
2020-01-28T23:14:45.000Z
|
2022-02-10T01:56:48.000Z
|
tnt/torchnet/logger/tensorboardmeterlogger.py
|
jozhang97/Side-tuning
|
dea345691fb7ee0230150fe56ddd644efdffa6ac
|
[
"MIT"
] | 2
|
2020-02-29T14:51:23.000Z
|
2020-03-07T03:23:27.000Z
|
import os
from . import MeterLogger
from .. import meter as Meter
import numpy as np
import torch
import functools
IS_IMPORTED_TENSORBOARDX = False
try:
import tensorboardX
IS_IMPORTED_TENSORBOARDX = True
except:
pass
class TensorboardMeterLogger(MeterLogger):
''' A class to package and visualize meters.
Args:
log_dir: Directory to write events to (log_dir/env)
env: Tensorboard environment to log to.
plotstylecombined: Whether to plot curves in the same window.
loggers: All modes: defaults to ['train', 'val']. If plotstylecombined, these will be superimposed in one plot.
'''
def __init__(self, env, log_dir=None, plotstylecombined=True, loggers=('train', 'val')):
super().__init__(modes=loggers)
self.env = env
self.log_dir = os.path.join(log_dir, env)
self.logger = {}
self.writer = {}
for logger in loggers:
self.logger[logger] = {}
self.writer[logger] = tensorboardX.SummaryWriter(logdir=self.log_dir + "-{}".format(logger))
self.metername_to_ptype = {}
self.plotstylecombined = plotstylecombined
def __addlogger(self, meter, ptype, kwargs={}):
for key in self.writer.keys():
self.metername_to_ptype[meter] = ptype
if ptype == 'stacked_line':
raise NotImplementedError("stacked_line not yet implemented for TensorboardX meter")
elif ptype == 'line':
if self.plotstylecombined:
for key in self.writer.keys():
self.logger[key][meter] = functools.partial(self.writer[key].add_scalar, tag=meter)
else:
for key in self.writer.keys():
self.logger[key][meter] = functools.partial(self.writer[key].add_scalar, tag=meter)
elif ptype == 'image':
if self.plotstylecombined:
for key in self.writer.keys():
self.logger[key][meter] = functools.partial(self.writer[key].add_image, tag=meter)
else:
for key in self.writer.keys():
self.logger[key][meter] = functools.partial(self.writer[key].add_image, tag=meter)
elif ptype == 'histogram':
if self.plotstylecombined:
for key in self.writer.keys():
self.logger[key][meter] = functools.partial(self.writer[key].add_histogram, tag=meter)
else:
for key in self.writer.keys():
self.logger[key][meter] = functools.partial(self.writer[key].add_histogram, tag=meter)
elif ptype == 'heatmap':
raise NotImplementedError("heatmap not yet implemented for TensorboardX meter")
elif ptype == 'text':
for key in self.writer.keys():
self.logger[key][meter] = functools.partial(self.writer[key].add_text, tag=meter)
elif ptype == 'video':
for key in self.writer.keys():
self.logger[key][meter] = functools.partial(self.writer[key].add_video, tag=meter, **kwargs)
def add_meter(self, meter_name, meter, ptype=None, kwargs={}):
super().add_meter(meter_name, meter)
if ptype: # Use `ptype` for manually selecting the plot type
self.__addlogger(meter_name, ptype, kwargs)
elif isinstance(meter, Meter.ClassErrorMeter):
self.__addlogger(meter_name, 'line')
elif isinstance(meter, Meter.mAPMeter):
self.__addlogger(meter_name, 'line')
elif isinstance(meter, Meter.AUCMeter):
self.__addlogger(meter_name, 'line')
elif isinstance(meter, Meter.ConfusionMeter):
self.__addlogger(meter_name, 'heatmap')
elif isinstance(meter, Meter.MSEMeter):
self.__addlogger(meter_name, 'line')
elif type(meter) == Meter.ValueSummaryMeter:
self.__addlogger(meter_name, 'line')
elif isinstance(meter, Meter.MultiValueSummaryMeter):
self.__addlogger(meter_name, 'stacked_line')
else:
raise NotImplementedError("Unknown meter type (and pytpe): {} ({})".format(type(meter), ptype))
def reset_meter(self, iepoch, mode='train', meterlist=None):
self.timer.reset()
for meter_name, meter in self.meter[mode].items():
if meterlist is not None and meter_name not in meterlist:
continue
val = self.meter[mode][meter_name].value()
val = val[0] if isinstance(val, (list, tuple)) else val
should_reset_and_continue = False
if isinstance(val, str) or val is None:
should_reset_and_continue = (val is None)
elif isinstance(val, np.ndarray):
should_reset_and_continue = np.isnan(val).any()
elif isinstance(val, torch.Tensor):
should_reset_and_continue = torch.isnan(val).any()
else:
should_reset_and_continue = np.isnan(val)
if should_reset_and_continue:
self.meter[mode][meter_name].reset()
continue
if isinstance(meter, Meter.ConfusionMeter):
self.logger[mode][meter_name].log(val, global_step=iepoch)
elif 'image' == self.metername_to_ptype[meter_name]:
try:
self.logger[mode][meter_name](img_tensor=val, global_step=iepoch)
except ValueError as e:
print(f'trouble logging {meter_name} {e}')
print('probably due to fake 0 data the data is all at 0')
elif 'histogram' == self.metername_to_ptype[meter_name]:
try:
self.logger[mode][meter_name](values=val, global_step=iepoch)
except ValueError as e:
print(f'trouble logging {meter_name} {e}')
print('probably due to fake 0 data the data is all at 0')
elif 'text' == self.metername_to_ptype[meter_name]:
if val is not None:
self.logger[mode][meter_name](text_string=val, global_step=iepoch)
elif 'video' == self.metername_to_ptype[meter_name]:
if val is not None:
self.logger[mode][meter_name](vid_tensor=val, global_step=iepoch)
elif isinstance(self.meter[mode][meter_name], Meter.MultiValueSummaryMeter):
self.logger[mode][meter_name](scalar_val=np.array(np.cumsum(val), global_step=iepoch)) # keep mean
else:
self.logger[mode][meter_name](scalar_value=val, global_step=iepoch)
self.meter[mode][meter_name].reset()
def flush(self):
for k in self.writer:
self.writer[k].flush()
| 46.534247
| 119
| 0.603326
|
acfc80477b1e9bb603aff2ad8baf07135b716dc2
| 236
|
py
|
Python
|
dsc/modules/simulate/changepoint.py
|
stephenslab/eb-linreg-dsc
|
f84cadb65b635c89ceeeaa9c780f73265e5dfc31
|
[
"MIT"
] | null | null | null |
dsc/modules/simulate/changepoint.py
|
stephenslab/eb-linreg-dsc
|
f84cadb65b635c89ceeeaa9c780f73265e5dfc31
|
[
"MIT"
] | null | null | null |
dsc/modules/simulate/changepoint.py
|
stephenslab/eb-linreg-dsc
|
f84cadb65b635c89ceeeaa9c780f73265e5dfc31
|
[
"MIT"
] | null | null | null |
#
import simulate
n, p, s = simulate.parse_input_params (dims, sfix = sfix)
X, y, Xtest, ytest, beta, sigma = simulate.changepoint_predictors (n, p, s, snr, k = basis_k, signal = signal, seed = None, bfix = bfix, center_sticky = True)
| 39.333333
| 158
| 0.699153
|
acfc80de3786d10471eb35b4ca946d9abda83559
| 3,896
|
py
|
Python
|
mapchete/cli/default/convert.py
|
ashutoshkumarjha/mapchete
|
2eb37a886c09563eb17b1102d78d9de5e8d1141c
|
[
"MIT"
] | 161
|
2016-02-20T15:18:13.000Z
|
2022-03-28T11:55:32.000Z
|
mapchete/cli/default/convert.py
|
ashutoshkumarjha/mapchete
|
2eb37a886c09563eb17b1102d78d9de5e8d1141c
|
[
"MIT"
] | 387
|
2015-08-12T07:16:56.000Z
|
2022-03-30T14:27:12.000Z
|
mapchete/cli/default/convert.py
|
ashutoshkumarjha/mapchete
|
2eb37a886c09563eb17b1102d78d9de5e8d1141c
|
[
"MIT"
] | 20
|
2016-02-22T12:51:54.000Z
|
2022-01-30T22:54:08.000Z
|
import click
from rasterio.dtypes import dtype_ranges
from rasterio.enums import Resampling
from rasterio.rio.options import creation_options
import tilematrix
import tqdm
import mapchete
from mapchete import commands
from mapchete.cli import options
from mapchete.formats import available_output_formats
OUTPUT_FORMATS = available_output_formats()
def _validate_bidx(ctx, param, bidx):
if bidx:
try:
return list(map(int, bidx.split(",")))
except ValueError:
raise click.BadParameter("band indexes must be positive integer values")
@click.command(help="Convert outputs or other geodata.")
@options.arg_tiledir
@options.arg_output
@options.opt_zoom
@options.opt_bounds
@options.opt_bounds_crs
@options.opt_area
@options.opt_area_crs
@options.opt_point
@options.opt_point_crs
@click.option(
"--clip-geometry",
"-c",
type=click.Path(exists=True),
help="Clip output by geometry.",
)
@click.option("--bidx", callback=_validate_bidx, help="Band indexes to copy.")
@click.option(
"--output-pyramid",
type=click.Choice(tilematrix._conf.PYRAMID_PARAMS.keys()),
help="Output pyramid to write to.",
)
@click.option(
"--output-metatiling",
type=click.INT,
help="Output metatiling.",
)
@click.option(
"--output-format",
type=click.Choice(OUTPUT_FORMATS),
help="Output format.",
)
@click.option(
"--output-dtype",
type=click.Choice(dtype_ranges.keys()),
help="Output data type (for raster output only).",
)
@click.option(
"--output-geometry-type",
type=click.STRING,
help="Output geometry type (for vector output only).",
)
@creation_options
@click.option(
"--scale-ratio",
type=click.FLOAT,
default=1.0,
help="Scaling factor (for raster output only).",
)
@click.option(
"--scale-offset",
type=click.FLOAT,
default=0.0,
help="Scaling offset (for raster output only).",
)
@options.opt_resampling_method
@click.option(
"--overviews", is_flag=True, help="Generate overviews (single GTiff output only)."
)
@click.option(
"--overviews-resampling-method",
type=click.Choice([it.name for it in Resampling if it.value in range(8)]),
default="cubic_spline",
help="Resampling method used for overviews. (default: cubic_spline)",
)
@click.option(
"--cog",
is_flag=True,
help="Write a valid COG. This will automatically generate verviews. (GTiff only)",
)
@options.opt_overwrite
@options.opt_verbose
@options.opt_no_pbar
@options.opt_debug
@options.opt_workers
@options.opt_multi
@options.opt_concurrency
@options.opt_logfile
@options.opt_vrt
@options.opt_idx_out_dir
def convert(
tiledir,
output,
*args,
vrt=False,
idx_out_dir=None,
debug=False,
no_pbar=False,
verbose=False,
logfile=None,
**kwargs,
):
with mapchete.Timer() as t:
job = commands.convert(
tiledir,
output,
*args,
as_iterator=True,
msg_callback=tqdm.tqdm.write if verbose else None,
**kwargs,
)
if not len(job):
return
list(
tqdm.tqdm(
job,
unit="task",
disable=debug or no_pbar,
)
)
tqdm.tqdm.write(f"processing {tiledir} finished in {t}")
if vrt:
tqdm.tqdm.write("creating VRT(s)")
list(
tqdm.tqdm(
commands.index(
output,
*args,
vrt=vrt,
idx_out_dir=idx_out_dir,
as_iterator=True,
msg_callback=tqdm.tqdm.write if verbose else None,
**kwargs,
),
unit="tile",
disable=debug or no_pbar,
)
)
tqdm.tqdm.write(f"index(es) creation for {tiledir} finished")
| 25.298701
| 86
| 0.627053
|
acfc81a71809dc3867a956872e6d65e23ad4d58d
| 4,222
|
py
|
Python
|
fetch/range/resources/long-wav.py
|
BasixKOR/wpt
|
aa27d567c10dcdb2aea6884d5155dfaaa177a800
|
[
"BSD-3-Clause"
] | null | null | null |
fetch/range/resources/long-wav.py
|
BasixKOR/wpt
|
aa27d567c10dcdb2aea6884d5155dfaaa177a800
|
[
"BSD-3-Clause"
] | 112
|
2021-09-27T14:39:02.000Z
|
2022-03-30T14:26:35.000Z
|
fetch/range/resources/long-wav.py
|
clopez/wpt
|
4ba8a4a1f41e166289c0a7feaa5665e1385e90f3
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This generates a 30 minute silent wav, and is capable of
responding to Range requests.
"""
import time
import re
import struct
from wptserve.utils import isomorphic_decode
def create_wav_header(sample_rate, bit_depth, channels, duration):
bytes_per_sample = int(bit_depth / 8)
block_align = bytes_per_sample * channels
byte_rate = sample_rate * block_align
sub_chunk_2_size = duration * byte_rate
data = b''
# ChunkID
data += b'RIFF'
# ChunkSize
data += struct.pack('<L', 36 + sub_chunk_2_size)
# Format
data += b'WAVE'
# Subchunk1ID
data += b'fmt '
# Subchunk1Size
data += struct.pack('<L', 16)
# AudioFormat
data += struct.pack('<H', 1)
# NumChannels
data += struct.pack('<H', channels)
# SampleRate
data += struct.pack('<L', sample_rate)
# ByteRate
data += struct.pack('<L', byte_rate)
# BlockAlign
data += struct.pack('<H', block_align)
# BitsPerSample
data += struct.pack('<H', bit_depth)
# Subchunk2ID
data += b'data'
# Subchunk2Size
data += struct.pack('<L', sub_chunk_2_size)
return data
def main(request, response):
if request.method == u"OPTIONS":
response.status = (404, b"Not Found")
response.headers.set(b"Content-Type", b"text/plain")
return b"Preflight not accepted"
response.headers.set(b"Content-Type", b"audio/wav")
response.headers.set(b"Accept-Ranges", b"bytes")
response.headers.set(b"Cache-Control", b"no-cache")
response.headers.set(b"Access-Control-Allow-Origin", request.headers.get(b'Origin', b''))
range_header = request.headers.get(b'Range', b'')
range_header_match = range_header and re.search(r'^bytes=(\d*)-(\d*)$', isomorphic_decode(range_header))
range_received_key = request.GET.first(b'range-received-key', b'')
accept_encoding_key = request.GET.first(b'accept-encoding-key', b'')
if range_received_key and range_header:
# Remove any current value
request.server.stash.take(range_received_key, b'/fetch/range/')
# This is later collected using stash-take.py
request.server.stash.put(range_received_key, u'range-header-received', b'/fetch/range/')
if accept_encoding_key:
# Remove any current value
request.server.stash.take(
accept_encoding_key,
b'/fetch/range/'
)
# This is later collected using stash-take.py
request.server.stash.put(
accept_encoding_key,
isomorphic_decode(request.headers.get(b'Accept-Encoding', b'')),
b'/fetch/range/'
)
# Audio details
sample_rate = 8000
bit_depth = 8
channels = 1
duration = 60 * 5
total_length = int((sample_rate * bit_depth * channels * duration) / 8)
bytes_remaining_to_send = total_length
initial_write = b''
if range_header_match:
response.status = 206
start, end = range_header_match.groups()
start = int(start)
end = int(end) if end else 0
if end:
bytes_remaining_to_send = (end + 1) - start
else:
bytes_remaining_to_send = total_length - start
wav_header = create_wav_header(sample_rate, bit_depth, channels, duration)
if start < len(wav_header):
initial_write = wav_header[start:]
if bytes_remaining_to_send < len(initial_write):
initial_write = initial_write[0:bytes_remaining_to_send]
content_range = b"bytes %d-%d/%d" % (start, end or total_length - 1, total_length)
response.headers.set(b"Content-Range", content_range)
else:
initial_write = create_wav_header(sample_rate, bit_depth, channels, duration)
response.headers.set(b"Content-Length", bytes_remaining_to_send)
response.write_status_headers()
response.writer.write(initial_write)
bytes_remaining_to_send -= len(initial_write)
while bytes_remaining_to_send > 0:
to_send = b'\x00' * min(bytes_remaining_to_send, sample_rate)
bytes_remaining_to_send -= len(to_send)
if not response.writer.write(to_send):
break
# Throttle the stream
time.sleep(0.5)
| 31.274074
| 108
| 0.651587
|
acfc81fafa36f62a4292afa44bd832ee1ca74591
| 2,167
|
py
|
Python
|
pypy/interpreter/test/test_module.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | 1
|
2019-05-27T00:58:46.000Z
|
2019-05-27T00:58:46.000Z
|
pypy/interpreter/test/test_module.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
pypy/interpreter/test/test_module.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
from pypy.interpreter.module import Module
class TestModule:
def test_name(self, space):
w = space.wrap
m = Module(space, space.wrap('m'))
w_m = w(m)
assert space.eq_w(space.getattr(w_m, w('__name__')), w('m'))
def test_attr(self, space):
w = space.wrap
w_m = w(Module(space, space.wrap('m')))
self.space.setattr(w_m, w('x'), w(15))
assert space.eq_w(space.getattr(w_m, w('x')), w(15))
space.delattr(w_m, w('x'))
space.raises_w(space.w_AttributeError,
space.delattr, w_m, w('x'))
class AppTest_ModuleObject:
def test_attr(self):
m = __import__('__builtin__')
m.x = 15
assert m.x == 15
assert getattr(m, 'x') == 15
setattr(m, 'x', 23)
assert m.x == 23
assert getattr(m, 'x') == 23
del m.x
raises(AttributeError, getattr, m, 'x')
m.x = 15
delattr(m, 'x')
raises(AttributeError, getattr, m, 'x')
raises(AttributeError, delattr, m, 'x')
raises(TypeError, setattr, m, '__dict__', {})
def test_docstring(self):
import sys
foo = type(sys)('foo')
assert foo.__name__ == 'foo'
assert foo.__doc__ is None
bar = type(sys)('bar','docstring')
assert bar.__doc__ == 'docstring'
def test___file__(self):
import sys, os
if not hasattr(sys, "pypy_objspaceclass"):
skip("need PyPy for sys.__file__ checking")
assert sys.__file__
assert os.path.basename(sys.__file__) == 'sys'
def test_repr(self):
import sys
r = repr(sys)
assert r == "<module 'sys' (built-in)>"
import _exceptions # known to be in pypy/lib
r = repr(_exceptions)
assert r.startswith("<module '_exceptions' from ") and \
'pypy/lib/_exceptions.py' in r and \
r.endswith('>')
nofile = type(_exceptions)('nofile', 'foo')
assert repr(nofile) == "<module 'nofile' from ?>"
m = type(_exceptions).__new__(type(_exceptions))
assert repr(m).startswith("<module '?'")
| 32.343284
| 68
| 0.550531
|
acfc82173602097ae303e5e3a1a7b841a20b2f97
| 4,194
|
py
|
Python
|
DQMOffline/Configuration/python/autoDQM.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
DQMOffline/Configuration/python/autoDQM.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
DQMOffline/Configuration/python/autoDQM.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
autoDQM = { 'common' : ['DQMOfflineCommon+@L1TMon',
'PostDQMOffline',
'DQMHarvestCommon+DQMCertCommon+@L1TMon'],
'commonSiStripZeroBias' : ['DQMOfflineCommonSiStripZeroBias',
'PostDQMOffline',
'DQMHarvestCommonSiStripZeroBias+DQMCertCommon'],
'trackingOnlyDQM' : ['DQMOfflineTracking',
'PostDQMOffline',
'DQMHarvestTracking'],
'pixelTrackingOnlyDQM': ['DQMOfflinePixelTracking',
'PostDQMOffline',
'DQMHarvestPixelTracking'],
'outerTracker': ['DQMOuterTracker',
'PostDQMOffline',
'DQMHarvestOuterTracker'],
'lumi': ['DQMOfflineLumi',
'PostDQMOffline',
'DQMHarvestLumi'],
'muon': ['DQMOfflineMuon',
'PostDQMOffline',
'DQMHarvestMuon+DQMCertMuon'],
'hcal': ['DQMOfflineHcal',
'PostDQMOffline',
'DQMHarvestHcal'],
'hcal2': ['HcalDQMOfflineSequence',
'PostDQMOffline',
'HcalDQMOfflinePostProcessor'],
'jetmet': ['DQMOfflineJetMET',
'PostDQMOffline',
'DQMHarvestJetMET+DQMCertJetMET'],
'ecal': ['DQMOfflineEcal',
'PostDQMOffline',
'DQMHarvestEcal+DQMCertEcal'],
'egamma': ['DQMOfflineEGamma',
'PostDQMOffline',
'DQMHarvestEGamma'],
'btag': ['DQMOfflineBTag',
'PostDQMOffline',
'DQMHarvestBTag'],
'L1TMon': ['DQMOfflineL1TMonitoring',
'PostDQMOffline',
'DQMHarvestL1TMonitoring'],
'L1TEgamma': ['DQMOfflineL1TEgamma',
'PostDQMOffline',
'DQMHarvestL1TEgamma'],
'L1TMuon': ['DQMOfflineL1TMuon',
'PostDQMOffline',
'DQMHarvestL1TMuon'],
'HLTMon': ['HLTMonitoring',
'PostDQMOffline',
'HLTMonitoringClient'],
'HLTMonPA' : ['HLTMonitoringPA', 'PostDQMOffline', 'HLTMonitoringClientPA'],
'express': ['@commonSiStripZeroBias+@muon+@hcal+@jetmet+@ecal',
'PostDQMOffline',
'@commonSiStripZeroBias+@muon+@hcal+@jetmet+@ecal'],
'allForPrompt': ['@common+@muon+@hcal+@jetmet+@ecal+@egamma',
'PostDQMOffline',
'@common+@muon+@hcal+@jetmet+@ecal+@egamma'],
'miniAODDQM': ['DQMOfflineMiniAOD',
'PostDQMOfflineMiniAOD',
'DQMHarvestMiniAOD'],
'nanoAODDQM': ['DQMOfflineNanoAOD',
'PostDQMOffline',
'DQMHarvestNanoAOD'],
'standardDQM': ['DQMOffline',
'PostDQMOffline',
'dqmHarvesting'],
'ExtraHLT': ['DQMOfflineExtraHLT',
'PostDQMOffline',
'dqmHarvestingExtraHLT'],
'standardDQMFakeHLT': ['DQMOfflineFakeHLT',
'PostDQMOffline',
'dqmHarvestingFakeHLT'],
'liteDQMHI': ['liteDQMOfflineHeavyIons',
'PostDQMOffline',
'dqmHarvesting']
}
_phase2_allowed = ['trackingOnlyDQM','outerTracker','muon','hcal','hcal2','egamma']
autoDQM['phase2'] = ['','','']
for i in [0,2]:
autoDQM['phase2'][i] = '+'.join([autoDQM[m][i] for m in _phase2_allowed])
autoDQM['phase2'][1] = 'PostDQMOffline'
| 49.928571
| 89
| 0.437291
|
acfc8564ecf0fd539cbe399197dc7db78b67648b
| 4,042
|
py
|
Python
|
huaweicloud-sdk-waf/huaweicloudsdkwaf/v1/model/delete_certificate_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-waf/huaweicloudsdkwaf/v1/model/delete_certificate_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-waf/huaweicloudsdkwaf/v1/model/delete_certificate_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteCertificateRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'enterprise_project_id': 'str',
'certificate_id': 'str'
}
attribute_map = {
'enterprise_project_id': 'enterprise_project_id',
'certificate_id': 'certificate_id'
}
def __init__(self, enterprise_project_id=None, certificate_id=None):
"""DeleteCertificateRequest - a model defined in huaweicloud sdk"""
self._enterprise_project_id = None
self._certificate_id = None
self.discriminator = None
if enterprise_project_id is not None:
self.enterprise_project_id = enterprise_project_id
self.certificate_id = certificate_id
@property
def enterprise_project_id(self):
"""Gets the enterprise_project_id of this DeleteCertificateRequest.
企业项目id
:return: The enterprise_project_id of this DeleteCertificateRequest.
:rtype: str
"""
return self._enterprise_project_id
@enterprise_project_id.setter
def enterprise_project_id(self, enterprise_project_id):
"""Sets the enterprise_project_id of this DeleteCertificateRequest.
企业项目id
:param enterprise_project_id: The enterprise_project_id of this DeleteCertificateRequest.
:type: str
"""
self._enterprise_project_id = enterprise_project_id
@property
def certificate_id(self):
"""Gets the certificate_id of this DeleteCertificateRequest.
证书ID
:return: The certificate_id of this DeleteCertificateRequest.
:rtype: str
"""
return self._certificate_id
@certificate_id.setter
def certificate_id(self, certificate_id):
"""Sets the certificate_id of this DeleteCertificateRequest.
证书ID
:param certificate_id: The certificate_id of this DeleteCertificateRequest.
:type: str
"""
self._certificate_id = certificate_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteCertificateRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.666667
| 97
| 0.599208
|
acfc87773fa0021ee867e8d6ab23c47dccef403c
| 8,179
|
py
|
Python
|
keystone/tests/test_injection.py
|
sanket4373/keystone
|
7cf7e7497729803f0470167315af9349b88fe0ec
|
[
"Apache-2.0"
] | null | null | null |
keystone/tests/test_injection.py
|
sanket4373/keystone
|
7cf7e7497729803f0470167315af9349b88fe0ec
|
[
"Apache-2.0"
] | null | null | null |
keystone/tests/test_injection.py
|
sanket4373/keystone
|
7cf7e7497729803f0470167315af9349b88fe0ec
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import uuid
from keystone.common import dependency
class TestDependencyInjection(testtools.TestCase):
def setUp(self):
super(TestDependencyInjection, self).setUp()
self.addCleanup(dependency.reset)
def test_dependency_injection(self):
class Interface(object):
def do_work(self):
assert False
@dependency.provider('first_api')
class FirstImplementation(Interface):
def do_work(self):
return True
@dependency.provider('second_api')
class SecondImplementation(Interface):
def do_work(self):
return True
@dependency.requires('first_api', 'second_api')
class Consumer(object):
def do_work_with_dependencies(self):
assert self.first_api.do_work()
assert self.second_api.do_work()
# initialize dependency providers
first_api = FirstImplementation()
second_api = SecondImplementation()
# ... sometime later, initialize a dependency consumer
consumer = Consumer()
# the expected dependencies should be available to the consumer
self.assertIs(consumer.first_api, first_api)
self.assertIs(consumer.second_api, second_api)
self.assertIsInstance(consumer.first_api, Interface)
self.assertIsInstance(consumer.second_api, Interface)
consumer.do_work_with_dependencies()
def test_dependency_provider_configuration(self):
@dependency.provider('api')
class Configurable(object):
def __init__(self, value=None):
self.value = value
def get_value(self):
return self.value
@dependency.requires('api')
class Consumer(object):
def get_value(self):
return self.api.get_value()
# initialize dependency providers
api = Configurable(value=True)
# ... sometime later, initialize a dependency consumer
consumer = Consumer()
# the expected dependencies should be available to the consumer
self.assertIs(consumer.api, api)
self.assertIsInstance(consumer.api, Configurable)
self.assertTrue(consumer.get_value())
def test_dependency_consumer_configuration(self):
@dependency.provider('api')
class Provider(object):
def get_value(self):
return True
@dependency.requires('api')
class Configurable(object):
def __init__(self, value=None):
self.value = value
def get_value(self):
if self.value:
return self.api.get_value()
# initialize dependency providers
api = Provider()
# ... sometime later, initialize a dependency consumer
consumer = Configurable(value=True)
# the expected dependencies should be available to the consumer
self.assertIs(consumer.api, api)
self.assertIsInstance(consumer.api, Provider)
self.assertTrue(consumer.get_value())
def test_inherited_dependency(self):
class Interface(object):
def do_work(self):
assert False
@dependency.provider('first_api')
class FirstImplementation(Interface):
def do_work(self):
return True
@dependency.provider('second_api')
class SecondImplementation(Interface):
def do_work(self):
return True
@dependency.requires('first_api')
class ParentConsumer(object):
def do_work_with_dependencies(self):
assert self.first_api.do_work()
@dependency.requires('second_api')
class ChildConsumer(ParentConsumer):
def do_work_with_dependencies(self):
assert self.second_api.do_work()
super(ChildConsumer, self).do_work_with_dependencies()
# initialize dependency providers
first_api = FirstImplementation()
second_api = SecondImplementation()
# ... sometime later, initialize a dependency consumer
consumer = ChildConsumer()
# dependencies should be naturally inherited
self.assertEqual(
ParentConsumer._dependencies,
set(['first_api']))
self.assertEqual(
ChildConsumer._dependencies,
set(['first_api', 'second_api']))
self.assertEqual(
consumer._dependencies,
set(['first_api', 'second_api']))
# the expected dependencies should be available to the consumer
self.assertIs(consumer.first_api, first_api)
self.assertIs(consumer.second_api, second_api)
self.assertIsInstance(consumer.first_api, Interface)
self.assertIsInstance(consumer.second_api, Interface)
consumer.do_work_with_dependencies()
def test_unresolvable_dependency(self):
@dependency.requires(uuid.uuid4().hex)
class Consumer(object):
pass
def for_test():
Consumer()
dependency.resolve_future_dependencies()
self.assertRaises(dependency.UnresolvableDependencyException, for_test)
def test_circular_dependency(self):
p1_name = uuid.uuid4().hex
p2_name = uuid.uuid4().hex
@dependency.provider(p1_name)
@dependency.requires(p2_name)
class P1(object):
pass
@dependency.provider(p2_name)
@dependency.requires(p1_name)
class P2(object):
pass
p1 = P1()
p2 = P2()
dependency.resolve_future_dependencies()
self.assertIs(getattr(p1, p2_name), p2)
self.assertIs(getattr(p2, p1_name), p1)
def test_reset(self):
# Can reset the registry of providers.
p_id = uuid.uuid4().hex
@dependency.provider(p_id)
class P(object):
pass
p_inst = P()
self.assertIs(dependency.REGISTRY[p_id], p_inst)
dependency.reset()
self.assertFalse(dependency.REGISTRY)
def test_optional_dependency_not_provided(self):
requirement_name = uuid.uuid4().hex
@dependency.optional(requirement_name)
class C1(object):
pass
c1_inst = C1()
dependency.resolve_future_dependencies()
self.assertIsNone(getattr(c1_inst, requirement_name))
def test_optional_dependency_provided(self):
requirement_name = uuid.uuid4().hex
@dependency.optional(requirement_name)
class C1(object):
pass
@dependency.provider(requirement_name)
class P1(object):
pass
c1_inst = C1()
p1_inst = P1()
dependency.resolve_future_dependencies()
self.assertIs(getattr(c1_inst, requirement_name), p1_inst)
def test_optional_and_required(self):
p1_name = uuid.uuid4().hex
p2_name = uuid.uuid4().hex
optional_name = uuid.uuid4().hex
@dependency.provider(p1_name)
@dependency.requires(p2_name)
@dependency.optional(optional_name)
class P1(object):
pass
@dependency.provider(p2_name)
@dependency.requires(p1_name)
class P2(object):
pass
p1 = P1()
p2 = P2()
dependency.resolve_future_dependencies()
self.assertIs(getattr(p1, p2_name), p2)
self.assertIs(getattr(p2, p1_name), p1)
self.assertIsNone(getattr(p1, optional_name))
| 30.518657
| 79
| 0.632229
|
acfc87e0f3b20b604ca11666b0414692e55920a7
| 4,254
|
py
|
Python
|
uer/calc_map_mrr.py
|
nju-websoft/TSQA
|
d0b3f0c3a5e55a46fc5d281cae09597aa7f76e2e
|
[
"Apache-2.0"
] | 12
|
2020-12-19T05:26:49.000Z
|
2022-03-30T13:20:46.000Z
|
uer/calc_map_mrr.py
|
nju-websoft/TSQA
|
d0b3f0c3a5e55a46fc5d281cae09597aa7f76e2e
|
[
"Apache-2.0"
] | null | null | null |
uer/calc_map_mrr.py
|
nju-websoft/TSQA
|
d0b3f0c3a5e55a46fc5d281cae09597aa7f76e2e
|
[
"Apache-2.0"
] | null | null | null |
import json
def ranking(outputs, all_t_count_map=None):
predictions = [[o[1]] for o in outputs['prob']]
targets = outputs['target']
assert len(predictions) == len(targets)
t_count = 0
for i, pred in enumerate(predictions):
if targets[i] == 1:
t_count += 1
predictions[i].append(targets[i])
predictions.sort(key=lambda x: x[0], reverse=True)
map_sum = 0.0
mrr_sum = -1.0
count = 1
for i, pred in enumerate(predictions):
if pred[1] == 1:
map_sum += float(count) / (i + 1)
count += 1
if mrr_sum == -1:
mrr_sum = 1.0 / (i + 1)
if all_t_count_map:
t_count = all_t_count_map[outputs['guid']]
if t_count == 0:
map_ = 0.0
mrr = 0.0
else:
map_ = map_sum / t_count
mrr = mrr_sum
return {
'map_': map_,
'mrr_': mrr,
}
def get_all_t_count(test_file):
datas = _read_tsv(test_file)
t_count_map = {}
for data in datas:
guid = data[3]
label = data[2]
if not guid in t_count_map:
t_count_map[guid] = 0
if label == '1':
t_count_map[guid] = t_count_map[guid] + 1
return t_count_map
def _read_tsv(input_file, _format=str, is_result=False):
f = open(input_file)
lines = f.read().split('\n')
ret = []
for line in lines:
if 'text_a' in line and 'text_b' in line:
continue
if line.strip() == '':
continue
ll = line.split('\t')
if is_result:
ll = ll[:2]
ret.append(list(map(lambda x: _format(x), ll)))
return ret
def write_line(lines, output):
output.write(lines[0][0].split('\t')[3] + '\n' + '-' * 20 + '\n')
for line in lines:
line_split = line[0].split('\t')
new_line = '\t'.join([line_split[2], line_split[1], line[0]])
output.write(new_line + '\t' + str(line[1]) + '\n')
# output.write(line_split[1] + '\n')
output.flush()
output.write('\n\n')
def cacl(result_file=None, test_file=None, use_all_t_count=False, result_output=None, fold=None):
results = _read_tsv(result_file, float, is_result=True)
tests = _read_tsv(test_file)
if len(results) != len(tests):
print('result file: ' + result_file)
print('test file: ' + test_file)
print('results len: ' + str(len(results)))
print('test len: ' + str(len(tests)))
assert len(results) == len(tests)
guid = tests[0][3]
prob = []
target = []
line = []
t_count = 0
map_sum = 0.0
mrr_sum = 0.0
t_result = f't{fold}_no_entities.json'
tt_result = {}
if use_all_t_count:
all_t_count_map = get_all_t_count(test_file)
else:
all_t_count_map = None
if result_output:
output_rank = open(result_output, 'w', encoding='utf-8')
for i, result in enumerate(results):
if tests[i][3] == guid:
prob.append(result)
target.append(int(tests[i][2]))
line.append(('\t'.join(tests[i]), result[1]))
else:
outputs = {}
outputs['guid'] = tests[i - 1][3]
outputs['prob'] = prob
outputs['target'] = target
line.sort(key=lambda x: x[1], reverse=True)
if result_output:
write_line(line, output_rank)
t_count += 1
mm = ranking(outputs, all_t_count_map=all_t_count_map)
map_sum += mm['map_']
mrr_sum += mm['mrr_']
tt_result[guid] = (mm['map_'], mm['mrr_'])
guid = tests[i][3]
prob = [result]
target = [int(tests[i][2])]
line = [('\t'.join(tests[i]), result[1])]
line.sort(key=lambda x: x[1], reverse=True)
if result_output:
write_line(line, output_rank)
t_count += 1
mm = ranking(outputs, all_t_count_map=all_t_count_map)
map_sum += mm['map_']
mrr_sum += mm['mrr_']
tt_result[guid] = (mm['map_'], mm['mrr_'])
output = open(t_result, 'w')
output.write(json.dumps(tt_result))
output.flush()
output.close()
if result_output:
output_rank.close()
return map_sum / t_count, mrr_sum / t_count
| 29.748252
| 97
| 0.546309
|
acfc882a5569b14f11468c6a24612bda47cc8a0a
| 4,938
|
py
|
Python
|
misc/global_conv.py
|
kwrobert/nanowire
|
2dd003e9e1898b2ae9f78a261b3ce1406ec717b8
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
misc/global_conv.py
|
kwrobert/nanowire
|
2dd003e9e1898b2ae9f78a261b3ce1406ec717b8
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
misc/global_conv.py
|
kwrobert/nanowire
|
2dd003e9e1898b2ae9f78a261b3ce1406ec717b8
|
[
"Apache-2.0",
"MIT"
] | 1
|
2019-06-23T01:26:44.000Z
|
2019-06-23T01:26:44.000Z
|
import glob
import re
import os
import argparse as ap
import matplotlib
# Enables saving plots over ssh
try:
os.environ['DISPLAY']
except KeyError:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from operator import itemgetter, attrgetter, methodcaller
def analyze(path,thresh,local,show):
# Get all error files via recursive globbing
if local:
glob_str = os.path.join(path,'**/localerror_*.dat')
else:
glob_str = os.path.join(path,'**/globalerror_*.dat')
err_files = glob.glob(glob_str,recursive=True)
# Get the first number of basis terms that is within our
# error bound
data_pairs = []
for f in err_files:
# Get the parameter value from the directory name
dirpath = os.path.dirname(f)
param_dir = os.path.split(dirpath)[-1]
m = re.search('[0-9]*\.[0-9]*[eE]?[-+]?[0-9]*',param_dir)
x_val = float(m.group(0))
# Now find the first number of basis terms that is within our error threshold
# and store True because its converged
with open(f,'r') as err:
tup = None
lines = err.readlines()
for line in lines:
data = line.split(',')
if float(data[-1]) < thresh:
tup = (x_val,int(data[0]),True)
data_pairs.append(tup)
break
# If the error is never within the threshold, use highest available # of terms
# and store False because it is not converged
if not tup:
data = lines[-1].split(',')
data_pairs.append((x_val,int(data[0]),False))
# Sort parameters
data_pairs.sort(key=itemgetter(0))
# Write out to file
if local:
out = 'minimum_basis_terms_local_t%s.dat'%str(thresh)
else:
out = 'minimum_basis_terms_global_t%s.dat'%str(thresh)
with open(os.path.join(path,out),'w') as minf:
minf.write('# frequency,numterms\n')
for pair in data_pairs:
minf.write('%E,%i\n'%(pair[0],int(pair[1])))
x_vals,min_terms,converged = zip(*data_pairs)
conv_x = [x_vals[i] for i in range(len(x_vals)) if converged[i]]
conv_minterms = [min_terms[i] for i in range(len(x_vals)) if converged[i]]
noconv_x = [x_vals[i] for i in range(len(x_vals)) if not converged[i]]
noconv_minterms = [min_terms[i] for i in range(len(x_vals)) if not converged[i]]
# Plot
plt.figure()
plt.plot(x_vals,min_terms,'b-')
plt.plot(conv_x,conv_minterms,'bo',label='Converged')
plt.plot(noconv_x,noconv_minterms,'ro',label='Not Converged')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Number of Fourier Terms')
plt.title('Lower Bound on Basis Terms,Threshold = %s'%str(thresh))
xlow,xhigh,ylow,yhigh = plt.axis()
plt.ylim(ylow-10,yhigh+10)
plt.legend(loc='best')
if local:
plt.savefig(os.path.join(path,'minimum_basis_terms_localerror_t%s.pdf'%str(thresh)))
else:
plt.savefig(os.path.join(path,'minimum_basis_terms_globalerror_t%s.pdf'%str(thresh)))
if show:
plt.show()
def main():
parser = ap.ArgumentParser(description="""Plots the minimum required basis terms to achieve a
desired accuracy against an independent parameter""")
parser.add_argument('path',type=str,help="Base directory path for the sorted sweep")
parser.add_argument('-t','--threshold',type=float,default=.01,help="Maximum error threshold")
parser.add_argument('-l','--local',action='store_true',default=False,help="""Use local error""")
parser.add_argument('-s','--show',action='store_true',default=False,help="""Show plots
interactively""")
args = parser.parse_args()
path = args.path
if not os.path.isdir(path):
print("Specified path does not exist")
quit()
if args.local:
print('Using local error')
file_reg = 'localerror_.+\.dat'
else:
print('Assuming global error')
file_reg = 'globalerror_.+\.dat'
excludes = [os.path.join(path,'comp_struct')]
for root,dirs,files in os.walk(path,topdown=False):
# If we haven't already analyzed this node in the directory tree
if os.path.split(root)[0] not in excludes:
# Find all error files in current directory
err_files = [m for f in files for m in [re.search(file_reg,f)] if m]
# If we found an error file, go up one directory and perform the analysis
if err_files:
base = os.path.split(root)[0]
print('Analyzing error in %s'%base)
analyze(base,args.threshold,args.local,args.show)
# Add the level above the basis term sweep to the excludes list so we don't perform
# the analysis for every basis term dir we find an error file in
excludes.append(base)
if __name__ == '__main__':
main()
| 40.47541
| 100
| 0.624747
|
acfc887322e730e99a6fea6c66551386f953ec61
| 23,803
|
py
|
Python
|
mkt/developers/tests/test_views_versions.py
|
acidburn0zzz/zamboni
|
780fbeb99e240a569a72a1c15410f49b76b3807c
|
[
"BSD-3-Clause"
] | 1
|
2017-07-14T19:22:39.000Z
|
2017-07-14T19:22:39.000Z
|
mkt/developers/tests/test_views_versions.py
|
Acidburn0zzz/zamboni
|
780fbeb99e240a569a72a1c15410f49b76b3807c
|
[
"BSD-3-Clause"
] | 6
|
2021-02-02T23:08:48.000Z
|
2021-09-08T02:47:17.000Z
|
mkt/developers/tests/test_views_versions.py
|
Acidburn0zzz/zamboni
|
780fbeb99e240a569a72a1c15410f49b76b3807c
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import mock
from nose.tools import eq_
import os
from pyquery import PyQuery as pq
from django.conf import settings
import amo
import amo.tests
from amo.tests import req_factory_factory
from addons.models import Addon, AddonUser
from devhub.models import ActivityLog, AppLog
from editors.models import EscalationQueue, EditorSubscription
from files.models import File
from users.models import UserProfile
from versions.models import Version
from mkt.comm.models import CommunicationNote
from mkt.developers.models import PreloadTestPlan
from mkt.developers.views import preload_submit, status
from mkt.site.fixtures import fixture
from mkt.submit.tests.test_views import BasePackagedAppTest
class TestVersion(amo.tests.TestCase):
fixtures = fixture('group_admin', 'user_999', 'user_admin',
'user_admin_group', 'webapp_337141')
def setUp(self):
self.client.login(username='admin@mozilla.com', password='password')
self.webapp = self.get_webapp()
self.url = self.webapp.get_dev_url('versions')
def get_webapp(self):
return Addon.objects.get(id=337141)
def test_nav_link(self):
r = self.client.get(self.url)
eq_(pq(r.content)('.edit-addon-nav li.selected a').attr('href'),
self.url)
def test_items(self):
doc = pq(self.client.get(self.url).content)
eq_(doc('#version-status').length, 1)
eq_(doc('#version-list').length, 0)
eq_(doc('#delete-addon').length, 1)
eq_(doc('#modal-delete').length, 1)
eq_(doc('#modal-disable').length, 1)
eq_(doc('#modal-delete-version').length, 0)
def test_delete_link(self):
# Hard "Delete App" link should be visible for only incomplete apps.
self.webapp.update(status=amo.STATUS_NULL)
doc = pq(self.client.get(self.url).content)
eq_(doc('#delete-addon').length, 1)
eq_(doc('#modal-delete').length, 1)
def test_pending(self):
self.webapp.update(status=amo.STATUS_PENDING)
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('#version-status .status-pending').length, 1)
eq_(doc('#rejection').length, 0)
def test_public(self):
eq_(self.webapp.status, amo.STATUS_PUBLIC)
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('#version-status .status-public').length, 1)
eq_(doc('#rejection').length, 0)
def test_blocked(self):
self.webapp.update(status=amo.STATUS_BLOCKED)
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('#version-status .status-blocked').length, 1)
eq_(doc('#rejection').length, 0)
assert 'blocked by a site administrator' in doc.text()
def test_rejected(self):
comments = "oh no you di'nt!!"
amo.set_user(UserProfile.objects.get(username='admin'))
amo.log(amo.LOG.REJECT_VERSION, self.webapp,
self.webapp.current_version, user_id=999,
details={'comments': comments, 'reviewtype': 'pending'})
self.webapp.update(status=amo.STATUS_REJECTED)
(self.webapp.versions.latest()
.all_files[0].update(status=amo.STATUS_DISABLED))
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)('#version-status')
eq_(doc('.status-rejected').length, 1)
eq_(doc('#rejection').length, 1)
eq_(doc('#rejection blockquote').text(), comments)
my_reply = 'fixed just for u, brah'
r = self.client.post(self.url, {'notes': my_reply,
'resubmit-app': ''})
self.assertRedirects(r, self.url, 302)
webapp = self.get_webapp()
eq_(webapp.status, amo.STATUS_PENDING,
'Reapplied apps should get marked as pending')
eq_(webapp.versions.latest().all_files[0].status, amo.STATUS_PENDING,
'Files for reapplied apps should get marked as pending')
action = amo.LOG.WEBAPP_RESUBMIT
assert AppLog.objects.filter(
addon=webapp, activity_log__action=action.id).exists(), (
"Didn't find `%s` action in logs." % action.short)
def test_no_ratings_no_resubmit(self):
self.create_switch('iarc')
self.webapp.update(status=amo.STATUS_REJECTED)
r = self.client.post(self.url, {'notes': 'lol',
'resubmit-app': ''})
eq_(r.status_code, 403)
self.webapp.content_ratings.create(ratings_body=0, rating=0)
r = self.client.post(self.url, {'notes': 'lol',
'resubmit-app': ''})
self.assert3xx(r, self.webapp.get_dev_url('versions'))
def test_comm_thread_after_resubmission(self):
self.create_switch('comm-dashboard')
self.webapp.update(status=amo.STATUS_REJECTED)
amo.set_user(UserProfile.objects.get(username='admin'))
(self.webapp.versions.latest()
.all_files[0].update(status=amo.STATUS_DISABLED))
my_reply = 'no give up'
self.client.post(self.url, {'notes': my_reply,
'resubmit-app': ''})
notes = CommunicationNote.objects.all()
eq_(notes.count(), 1)
eq_(notes[0].body, my_reply)
def test_rejected_packaged(self):
self.webapp.update(is_packaged=True)
comments = "oh no you di'nt!!"
amo.set_user(UserProfile.objects.get(username='admin'))
amo.log(amo.LOG.REJECT_VERSION, self.webapp,
self.webapp.current_version, user_id=999,
details={'comments': comments, 'reviewtype': 'pending'})
self.webapp.update(status=amo.STATUS_REJECTED)
(self.webapp.versions.latest()
.all_files[0].update(status=amo.STATUS_DISABLED))
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)('#version-status')
eq_(doc('.status-rejected').length, 1)
eq_(doc('#rejection').length, 1)
eq_(doc('#rejection blockquote').text(), comments)
@mock.patch('mkt.webapps.tasks.update_cached_manifests.delay', new=mock.Mock)
class TestAddVersion(BasePackagedAppTest):
def setUp(self):
super(TestAddVersion, self).setUp()
self.app = amo.tests.app_factory(
complete=True, is_packaged=True, version_kw=dict(version='1.0'))
self.url = self.app.get_dev_url('versions')
self.user = UserProfile.objects.get(username='regularuser')
AddonUser.objects.create(user=self.user, addon=self.app)
def _post(self, expected_status=200):
res = self.client.post(self.url, {'upload': self.upload.pk,
'upload-version': ''})
eq_(res.status_code, expected_status)
return res
def test_post(self):
self.app.current_version.update(version='0.9',
created=self.days_ago(1))
self._post(302)
version = self.app.versions.latest()
eq_(version.version, '1.0')
eq_(version.all_files[0].status, amo.STATUS_PENDING)
def test_post_subscribers(self):
# Same test as above, but add a suscriber. We only want to make sure
# we are not causing a traceback because of that.
reviewer = UserProfile.objects.create(email='foo@example.com')
self.grant_permission(reviewer, 'Apps:Review')
EditorSubscription.objects.create(addon=self.app, user=reviewer)
self.app.current_version.update(version='0.9',
created=self.days_ago(1))
self._post(302)
version = self.app.versions.latest()
eq_(version.version, '1.0')
eq_(version.all_files[0].status, amo.STATUS_PENDING)
def test_unique_version(self):
res = self._post(200)
self.assertFormError(res, 'upload_form', 'upload',
'Version 1.0 already exists')
def test_pending_on_new_version(self):
# Test app rejection, then new version, updates app status to pending.
self.app.current_version.update(version='0.9',
created=self.days_ago(1))
self.app.update(status=amo.STATUS_REJECTED)
files = File.objects.filter(version__addon=self.app)
files.update(status=amo.STATUS_DISABLED)
self._post(302)
self.app.reload()
version = self.app.versions.latest()
eq_(version.version, '1.0')
eq_(version.all_files[0].status, amo.STATUS_PENDING)
eq_(self.app.status, amo.STATUS_PENDING)
@mock.patch('mkt.developers.views.run_validator')
def test_prefilled_features(self, run_validator_):
run_validator_.return_value = '{"feature_profile": ["apps", "audio"]}'
self.app.current_version.update(version='0.9',
created=self.days_ago(1))
# All features should be disabled.
features = self.app.current_version.features.to_dict()
eq_(any(features.values()), False)
self._post(302)
# In this new version we should be prechecked new ones.
features = self.app.versions.latest().features.to_dict()
for key, feature in features.iteritems():
eq_(feature, key in ('has_apps', 'has_audio'))
def test_blocklist_on_new_version(self):
# Test app blocked, then new version, doesn't update app status, and
# app shows up in escalation queue.
self.app.current_version.update(version='0.9',
created=self.days_ago(1))
self.app.update(status=amo.STATUS_BLOCKED)
files = File.objects.filter(version__addon=self.app)
files.update(status=amo.STATUS_DISABLED)
self._post(302)
version = self.app.versions.latest()
eq_(version.version, '1.0')
eq_(version.all_files[0].status, amo.STATUS_PENDING)
self.app.update_status()
eq_(self.app.status, amo.STATUS_BLOCKED)
assert EscalationQueue.objects.filter(addon=self.app).exists(), (
'App not in escalation queue')
def test_new_version_when_incomplete(self):
self.app.current_version.update(version='0.9',
created=self.days_ago(1))
self.app.update(status=amo.STATUS_NULL)
files = File.objects.filter(version__addon=self.app)
files.update(status=amo.STATUS_DISABLED)
self._post(302)
self.app.reload()
version = self.app.versions.latest()
eq_(version.version, '1.0')
eq_(version.all_files[0].status, amo.STATUS_PENDING)
eq_(self.app.status, amo.STATUS_PENDING)
def test_vip_app_added_to_escalation_queue(self):
self.app.current_version.update(version='0.9',
created=self.days_ago(1))
self.app.update(vip_app=True)
self._post(302)
assert EscalationQueue.objects.filter(addon=self.app).exists(), (
'VIP App not in escalation queue')
class TestVersionPackaged(amo.tests.WebappTestCase):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
super(TestVersionPackaged, self).setUp()
assert self.client.login(username='steamcube@mozilla.com',
password='password')
self.app.update(is_packaged=True)
self.app = self.get_app()
self.url = self.app.get_dev_url('versions')
self.delete_url = self.app.get_dev_url('versions.delete')
def test_items_packaged(self):
doc = pq(self.client.get(self.url).content)
eq_(doc('#version-status').length, 1)
eq_(doc('#version-list').length, 1)
eq_(doc('#delete-addon').length, 1)
eq_(doc('#modal-delete').length, 1)
eq_(doc('#modal-disable').length, 1)
eq_(doc('#modal-delete-version').length, 1)
def test_version_list_packaged(self):
self.app.update(is_packaged=True)
amo.tests.version_factory(addon=self.app, version='2.0',
file_kw=dict(status=amo.STATUS_PENDING))
self.app = self.get_app()
doc = pq(self.client.get(self.url).content)
eq_(doc('#version-status').length, 1)
eq_(doc('#version-list tbody tr').length, 2)
# 1 pending and 1 public.
eq_(doc('#version-list span.status-pending').length, 1)
eq_(doc('#version-list span.status-public').length, 1)
# Check version strings and order of versions.
eq_(map(lambda x: x.text, doc('#version-list h4 a')),
['2.0', '1.0'])
# There should be 2 delete buttons.
eq_(doc('#version-list a.delete-version.button').length, 2)
# Check download url.
eq_(doc('#version-list a.download').eq(0).attr('href'),
self.app.versions.all()[0].all_files[0].get_url_path(''))
eq_(doc('#version-list a.download').eq(1).attr('href'),
self.app.versions.all()[1].all_files[0].get_url_path(''))
def test_delete_version(self):
version = self.app.versions.latest()
version.update(version='<script>alert("xss")</script>')
res = self.client.get(self.url)
assert not '<script>alert(' in res.content
assert '<script>alert(' in res.content
# Now do the POST to delete.
res = self.client.post(self.delete_url, dict(version_id=version.pk),
follow=True)
assert not Version.objects.filter(pk=version.pk).exists()
eq_(ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id)
.count(), 1)
# Since this was the last version, the app status should be incomplete.
eq_(self.get_app().status, amo.STATUS_NULL)
# Check xss in success flash message.
assert not '<script>alert(' in res.content
assert '<script>alert(' in res.content
# Test that the soft deletion works pretty well.
eq_(self.app.versions.count(), 0)
# We can't use `.reload()` :(
version = Version.with_deleted.filter(addon=self.app)
eq_(version.count(), 1)
# Test that the status of the "deleted" version is STATUS_DELETED.
eq_(str(version[0].status[0]),
str(amo.MKT_STATUS_CHOICES[amo.STATUS_DELETED]))
def test_anonymous_delete_redirects(self):
self.client.logout()
version = self.app.versions.latest()
res = self.client.post(self.delete_url, dict(version_id=version.pk))
self.assertLoginRedirects(res, self.delete_url)
def test_non_author_no_delete_for_you(self):
self.client.logout()
assert self.client.login(username='regular@mozilla.com',
password='password')
version = self.app.versions.latest()
res = self.client.post(self.delete_url, dict(version_id=version.pk))
eq_(res.status_code, 403)
@mock.patch.object(Version, 'delete')
def test_roles_and_delete(self, mock_version):
user = UserProfile.objects.get(email='regular@mozilla.com')
addon_user = AddonUser.objects.create(user=user, addon=self.app)
allowed = [amo.AUTHOR_ROLE_OWNER, amo.AUTHOR_ROLE_DEV]
for role in [r[0] for r in amo.AUTHOR_CHOICES]:
self.client.logout()
addon_user.role = role
addon_user.save()
assert self.client.login(username='regular@mozilla.com',
password='password')
version = self.app.versions.latest()
res = self.client.post(self.delete_url,
dict(version_id=version.pk))
if role in allowed:
self.assert3xx(res, self.url)
assert mock_version.called, ('Unexpected: `Version.delete` '
'should have been called.')
mock_version.reset_mock()
else:
eq_(res.status_code, 403)
def test_cannot_delete_blocked(self):
v = self.app.versions.latest()
f = v.all_files[0]
f.update(status=amo.STATUS_BLOCKED)
res = self.client.post(self.delete_url, dict(version_id=v.pk))
eq_(res.status_code, 403)
def test_dev_cannot_blocklist(self):
url = self.app.get_dev_url('blocklist')
res = self.client.post(url)
eq_(res.status_code, 403)
@mock.patch('lib.crypto.packaged.os.unlink', new=mock.Mock)
def test_admin_can_blocklist(self):
self.grant_permission(UserProfile.objects.get(username='regularuser'),
'Apps:Configure')
assert self.client.login(username='regular@mozilla.com',
password='password')
v_count = self.app.versions.count()
url = self.app.get_dev_url('blocklist')
res = self.client.post(url)
self.assert3xx(res, self.app.get_dev_url('versions'))
app = self.app.reload()
eq_(app.versions.count(), v_count + 1)
eq_(app.status, amo.STATUS_BLOCKED)
eq_(app.versions.latest().files.latest().status, amo.STATUS_BLOCKED)
class TestPreloadSubmit(amo.tests.TestCase):
fixtures = fixture('group_admin', 'user_admin', 'user_admin_group',
'webapp_337141')
def setUp(self):
self.create_switch('preload-apps')
self.user = UserProfile.objects.get(username='admin')
self.login(self.user)
self.webapp = Addon.objects.get(id=337141)
self.url = self.webapp.get_dev_url('versions')
self.home_url = self.webapp.get_dev_url('preload_home')
self.submit_url = self.webapp.get_dev_url('preload_submit')
path = os.path.dirname(os.path.abspath(__file__))
self.test_pdf = path + '/files/test.pdf'
self.test_xls = path + '/files/test.xls'
def _submit_pdf(self):
f = open(self.test_pdf, 'r')
req = req_factory_factory(self.submit_url, user=self.user, post=True,
data={'agree': True, 'test_plan': f})
return preload_submit(req, self.webapp.slug)
def test_get_200(self):
eq_(self.client.get(self.home_url).status_code, 200)
eq_(self.client.get(self.submit_url).status_code, 200)
@mock.patch('mkt.developers.views.save_test_plan')
@mock.patch('mkt.developers.views.messages')
def test_preload_on_status_page(self, noop1, noop2):
req = req_factory_factory(self.url, user=self.user)
r = status(req, self.webapp.slug)
doc = pq(r.content)
eq_(doc('#preload .listing-footer a').attr('href'),
self.webapp.get_dev_url('preload_home'))
assert doc('#preload .not-submitted')
self._submit_pdf()
req = req_factory_factory(self.url, user=self.user)
r = status(req, self.webapp.slug)
doc = pq(r.content)
eq_(doc('#preload .listing-footer a').attr('href'),
self.webapp.get_dev_url('preload_submit'))
assert doc('#preload .submitted')
def _assert_submit(self, endswith, content_type, save_mock):
test_plan = PreloadTestPlan.objects.get()
eq_(test_plan.addon, self.webapp)
assert test_plan.filename.startswith('test_plan_')
assert test_plan.filename.endswith(endswith)
self.assertCloseToNow(test_plan.last_submission)
eq_(save_mock.call_args[0][0].content_type, content_type)
assert save_mock.call_args[0][1].startswith('test_plan')
eq_(save_mock.call_args[0][2], self.webapp)
@mock.patch('mkt.developers.views.save_test_plan')
@mock.patch('mkt.developers.views.messages')
def test_submit_pdf(self, noop, save_mock):
r = self._submit_pdf()
self.assert3xx(r, self.url)
self._assert_submit('pdf', 'application/pdf', save_mock)
@mock.patch('mkt.developers.views.save_test_plan')
@mock.patch('mkt.developers.views.messages')
def test_submit_xls(self, noop, save_mock):
f = open(self.test_xls, 'r')
req = req_factory_factory(self.submit_url, user=self.user, post=True,
data={'agree': True, 'test_plan': f})
r = preload_submit(req, self.webapp.slug)
self.assert3xx(r, self.url)
self._assert_submit('xls', 'application/vnd.ms-excel', save_mock)
@mock.patch('mkt.developers.views.save_test_plan')
@mock.patch('mkt.developers.views.messages')
def test_submit_bad_file(self, noop, save_mock):
f = open(os.path.abspath(__file__), 'r')
req = req_factory_factory(self.submit_url, user=self.user, post=True,
data={'agree': True, 'test_plan': f})
r = preload_submit(req, self.webapp.slug)
eq_(r.status_code, 200)
eq_(PreloadTestPlan.objects.count(), 0)
assert not save_mock.called
assert ('Invalid file type.' in
pq(r.content)('.test_plan .errorlist').text())
@mock.patch('mkt.developers.views.save_test_plan')
@mock.patch('mkt.developers.views.messages')
def test_submit_no_file(self, noop, save_mock):
req = req_factory_factory(self.submit_url, user=self.user, post=True,
data={'agree': True})
r = preload_submit(req, self.webapp.slug)
eq_(r.status_code, 200)
eq_(PreloadTestPlan.objects.count(), 0)
assert not save_mock.called
assert 'required' in pq(r.content)('.test_plan .errorlist').text()
@mock.patch('mkt.developers.views.save_test_plan')
@mock.patch('mkt.developers.views.messages')
def test_submit_no_agree(self, noop, save_mock):
f = open(self.test_xls, 'r')
req = req_factory_factory(self.submit_url, user=self.user, post=True,
data={'test_plan': f})
r = preload_submit(req, self.webapp.slug)
eq_(r.status_code, 200)
eq_(PreloadTestPlan.objects.count(), 0)
assert not save_mock.called
assert 'required' in pq(r.content)('.agree .errorlist').text()
@mock.patch('mkt.developers.views.save_test_plan')
@mock.patch('mkt.developers.views.messages')
def test_submit_multiple_status(self, noop, save_mock):
f = open(self.test_xls, 'r')
req = req_factory_factory(self.submit_url, user=self.user, post=True,
data={'test_plan': f, 'agree': True})
preload_submit(req, self.webapp.slug)
self._submit_pdf()
eq_(PreloadTestPlan.objects.count(), 2)
xls = PreloadTestPlan.objects.get(filename__contains='xls')
pdf = PreloadTestPlan.objects.get(filename__contains='pdf')
eq_(xls.status, amo.STATUS_DISABLED)
eq_(pdf.status, amo.STATUS_PUBLIC)
# Check the link points to most recent one.
req = req_factory_factory(self.url, user=self.user)
r = status(req, self.webapp.slug)
doc = pq(r.content)
eq_(doc('.test-plan-download').attr('href'),
pdf.preload_test_plan_url)
@mock.patch.object(settings, 'PREINSTALL_TEST_PLAN_LATEST',
datetime.datetime.now() + datetime.timedelta(days=1))
@mock.patch('mkt.developers.views.save_test_plan')
@mock.patch('mkt.developers.views.messages')
def test_outdated(self, noop, save_mock):
self._submit_pdf()
req = req_factory_factory(self.url, user=self.user)
r = status(req, self.webapp.slug)
doc = pq(r.content)
assert doc('.outdated')
| 42.734291
| 79
| 0.621938
|
acfc8987ea1ac4fe87c465109ceca3bd6ef81de0
| 437
|
py
|
Python
|
botw-modkit/resource.py
|
BravelyPeculiar/botw-modkit
|
3d3c2d3d491cbd63f330d210f391f353c38b96fc
|
[
"MIT"
] | 1
|
2020-08-16T18:49:53.000Z
|
2020-08-16T18:49:53.000Z
|
botw-modkit/resource.py
|
BravelyPeculiar/botw-modkit
|
3d3c2d3d491cbd63f330d210f391f353c38b96fc
|
[
"MIT"
] | null | null | null |
botw-modkit/resource.py
|
BravelyPeculiar/botw-modkit
|
3d3c2d3d491cbd63f330d210f391f353c38b96fc
|
[
"MIT"
] | null | null | null |
class ResourceManager:
def __init__(self):
self.resources = []
def get_resource(self, name):
for res in self.resources:
if res.name == name:
return res
else:
new_res = Resource(name)
self.resources.append(new_res)
return new_res
class Resource:
def __init__(self, name, data=None):
self.name = name
self.data = data
| 25.705882
| 42
| 0.549199
|
acfc8a75fb15acd433afa5d30ac6445a658373bf
| 944
|
py
|
Python
|
tests/values_test.py
|
mfouesneau/vaex
|
d0f38b4beee381744df7e02a37c329581e8bd70e
|
[
"MIT"
] | 2
|
2020-12-01T09:41:54.000Z
|
2020-12-13T14:10:19.000Z
|
tests/values_test.py
|
mfouesneau/vaex
|
d0f38b4beee381744df7e02a37c329581e8bd70e
|
[
"MIT"
] | null | null | null |
tests/values_test.py
|
mfouesneau/vaex
|
d0f38b4beee381744df7e02a37c329581e8bd70e
|
[
"MIT"
] | null | null | null |
import pytest
from common import *
def test_values(ds_local):
ds = ds_local
assert ds['x'].values.tolist() == ds.evaluate('x').tolist()
assert ds['name'].values.tolist() == ds.evaluate('name').tolist()
assert ds['obj'].values.tolist() == ds.evaluate('obj').tolist()
assert ds[['x', 'y']].values.tolist() == np.array([ds.evaluate('x'), ds.evaluate('y')]).T.tolist()
assert ds[['x', 'y']].values.shape == (len(ds), 2)
assert ds[['m']].values[9][0] == 77777.0
assert ds[['m','x']].values[9][0] == 77777
# The missing values are included. This may not be the correct behaviour
assert ds[['x', 'y', 'nm']].values.tolist(), np.array([ds.evaluate('x'), ds.evaluate('y'), ds.evaluate('nm')]).T.tolist()
@pytest.mark.skip(reason='TOFIX: obj is now recognized as str')
def test_object_column_values(ds_local):
ds = ds_local
with pytest.raises(ValueError):
ds[['x', 'name', 'nm', 'obj']].values
| 41.043478
| 125
| 0.617585
|
acfc8aa1ff9bcb1ccaa78356feb344df5de7dcc9
| 2,209
|
py
|
Python
|
wprevents/admin/tests/test_views.py
|
arroway/wprevents
|
5ed14c0e85c1a6463a0e72c94836fae81fbe3fda
|
[
"BSD-3-Clause"
] | 1
|
2015-02-07T10:04:48.000Z
|
2015-02-07T10:04:48.000Z
|
wprevents/admin/tests/test_views.py
|
yvan-sraka/wprevents
|
03f95150fe7c09338c3a17e00a4b85febef87789
|
[
"BSD-3-Clause"
] | 6
|
2015-02-07T10:08:38.000Z
|
2021-06-06T13:17:24.000Z
|
wprevents/admin/tests/test_views.py
|
yvan-sraka/wprevents
|
03f95150fe7c09338c3a17e00a4b85febef87789
|
[
"BSD-3-Clause"
] | 4
|
2015-01-20T19:48:31.000Z
|
2017-04-08T22:10:52.000Z
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from wprevents.events.models import Event
test_username = 'admin_tester'
test_password = 'abcd1234'
class ViewsTest(TestCase):
fixtures = ['events_test_data.json']
def setUp(self):
User.objects.create_superuser(username=test_username, email='test@mozilla.org', password=test_password)
# Events
def test_view_admin_events(self):
"""should list events in admin section"""
self.client.login(username=test_username, password=test_password)
response = self.client.get(reverse('admin_event_list'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'events.html')
# Delete and dedupe
def test_view_admin_events_delete_logged_in(self):
"""should delete/dedupe an event when logged in"""
self.client.login(username=test_username, password=test_password)
response = self.client.post(reverse('admin_event_ajax_delete'), { 'id': 1 }, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(Event.objects.filter(pk=1).exists(), False)
def test_view_admin_events_delete_not_logged_out(self):
"""should delete/dedupe an event when not logged in"""
response = self.client.post(reverse('admin_event_ajax_delete'), { 'id': 1 }, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 302)
self.assertEqual(Event.objects.filter(pk=1).exists(), True)
# Spaces
def test_view_admin_spaces(self):
"""should list spaces in admin section"""
self.client.login(username=test_username, password=test_password)
response = self.client.get(reverse('admin_spaces_all'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'spaces.html')
# Functional areas
def test_view_admin_areas(self):
"""should list functional areas in admin section"""
self.client.login(username=test_username, password=test_password)
response = self.client.get(reverse('admin_area_list'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'areas.html')
| 35.063492
| 120
| 0.757356
|
acfc8dee310c6ffb2c7002d2119ed1600a9755b4
| 856
|
py
|
Python
|
dob.py
|
PRASAD-DANGARE/PYTHON
|
36214f7dc3762d327e5a29e40752edeb098249c8
|
[
"MIT"
] | 1
|
2021-06-07T07:55:28.000Z
|
2021-06-07T07:55:28.000Z
|
dob.py
|
PRASAD-DANGARE/PYTHON
|
36214f7dc3762d327e5a29e40752edeb098249c8
|
[
"MIT"
] | null | null | null |
dob.py
|
PRASAD-DANGARE/PYTHON
|
36214f7dc3762d327e5a29e40752edeb098249c8
|
[
"MIT"
] | null | null | null |
# Python Program To Create Dob Class Within Person Class
'''
Function Name : Create Date Of Birth Within Person Class
Function Date : 17 Sep 2020
Function Author : Prasad Dangare
Input : String,Integer
Output : String,Integer
'''
class Person:
def __init__(self):
self.name = 'Prasad'
self.db = self.Dob()
def display(self):
print('Name = ', self.name)
# This Is Inner Class
class Dob:
def __init__(self):
self.dd = 9
self.mm = 3
self.yy = 2002
def display(self):
print('Dob = {}/{}/{}'.format(self.dd, self.mm, self.yy))
# Creating Person Class Object
p = Person()
p.display()
# Create Inner Class Object
x = p.db
x.display()
| 23.135135
| 70
| 0.518692
|
acfc8e0688d055c082de68c77fa95d53703abc32
| 3,148
|
py
|
Python
|
nltk/corpus/reader/indian.py
|
PhanatosZou/nltk
|
750e488569b6f80c72ae6ca74eff90eae55e6c4e
|
[
"Apache-2.0"
] | null | null | null |
nltk/corpus/reader/indian.py
|
PhanatosZou/nltk
|
750e488569b6f80c72ae6ca74eff90eae55e6c4e
|
[
"Apache-2.0"
] | null | null | null |
nltk/corpus/reader/indian.py
|
PhanatosZou/nltk
|
750e488569b6f80c72ae6ca74eff90eae55e6c4e
|
[
"Apache-2.0"
] | null | null | null |
# Natural Language Toolkit: Indian Language POS-Tagged Corpus Reader
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Indian Language POS-Tagged Corpus
Collected by A Kumaran, Microsoft Research, India
Distributed with permission
Contents:
- Bangla: IIT Kharagpur
- Hindi: Microsoft Research India
- Marathi: IIT Bombay
- Telugu: IIIT Hyderabad
"""
from nltk.tag import str2tuple, map_tag
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
class IndianCorpusReader(CorpusReader):
"""
List of words, one per line. Blank lines are ignored.
"""
def words(self, fileids=None):
return concat(
[
IndianCorpusView(fileid, enc, False, False)
for (fileid, enc) in self.abspaths(fileids, True)
]
)
def tagged_words(self, fileids=None, tagset=None):
if tagset and tagset != self._tagset:
tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
else:
tag_mapping_function = None
return concat(
[
IndianCorpusView(fileid, enc, True, False, tag_mapping_function)
for (fileid, enc) in self.abspaths(fileids, True)
]
)
def sents(self, fileids=None):
return concat(
[
IndianCorpusView(fileid, enc, False, True)
for (fileid, enc) in self.abspaths(fileids, True)
]
)
def tagged_sents(self, fileids=None, tagset=None):
if tagset and tagset != self._tagset:
tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
else:
tag_mapping_function = None
return concat(
[
IndianCorpusView(fileid, enc, True, True, tag_mapping_function)
for (fileid, enc) in self.abspaths(fileids, True)
]
)
def raw(self, fileids=None):
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, str):
fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
class IndianCorpusView(StreamBackedCorpusView):
def __init__(
self, corpus_file, encoding, tagged, group_by_sent, tag_mapping_function=None
):
self._tagged = tagged
self._group_by_sent = group_by_sent
self._tag_mapping_function = tag_mapping_function
StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
def read_block(self, stream):
line = stream.readline()
if line.startswith("<"):
return []
sent = [str2tuple(word, sep="_") for word in line.split()]
if self._tag_mapping_function:
sent = [(w, self._tag_mapping_function(t)) for (w, t) in sent]
if not self._tagged:
sent = [w for (w, t) in sent]
if self._group_by_sent:
return [sent]
else:
return sent
| 30.862745
| 85
| 0.60737
|
acfc8f5d10db251029d4c39c28c2874a8024d401
| 55,760
|
py
|
Python
|
sklearn/utils/validation.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | 1
|
2021-05-25T18:06:44.000Z
|
2021-05-25T18:06:44.000Z
|
sklearn/utils/validation.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | null | null | null |
sklearn/utils/validation.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | null | null | null |
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# Sylvain Marie
# License: BSD 3 clause
from functools import wraps
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from inspect import signature, isclass, Parameter
# mypy error: Module 'numpy.core.numeric' has no attribute 'ComplexWarning'
from numpy.core.numeric import ComplexWarning # type: ignore
import joblib
from contextlib import suppress
from .fixes import _object_dtype_isnan, parse_version
from .. import get_config as _get_config
from ..exceptions import PositiveSpectrumWarning
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
def _deprecate_positional_args(func=None, *, version="1.1 (renaming of 0.26)"):
"""Decorator for methods that issues warnings for positional arguments.
Using the keyword-only argument syntax in pep 3102, arguments after the
* will issue a warning when passed as a positional argument.
Parameters
----------
func : callable, default=None
Function to check arguments on.
version : callable, default="1.1 (renaming of 0.26)"
The version when positional arguments will result in error.
"""
def _inner_deprecate_positional_args(f):
sig = signature(f)
kwonly_args = []
all_args = []
for name, param in sig.parameters.items():
if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
all_args.append(name)
elif param.kind == Parameter.KEYWORD_ONLY:
kwonly_args.append(name)
@wraps(f)
def inner_f(*args, **kwargs):
extra_args = len(args) - len(all_args)
if extra_args <= 0:
return f(*args, **kwargs)
# extra_args > 0
args_msg = ['{}={}'.format(name, arg)
for name, arg in zip(kwonly_args[:extra_args],
args[-extra_args:])]
args_msg = ", ".join(args_msg)
warnings.warn(f"Pass {args_msg} as keyword args. From version "
f"{version} passing these as positional arguments "
"will result in an error", FutureWarning)
kwargs.update(zip(sig.parameters, args))
return f(**kwargs)
return inner_f
if func is not None:
return _inner_deprecate_positional_args(func)
return _inner_deprecate_positional_args
def _assert_all_finite(X, allow_nan=False, msg_dtype=None):
"""Like assert_all_finite, but only for ndarray."""
# validation is also imported in extmath
from .extmath import _safe_accumulator_op
if _get_config()['assume_finite']:
return
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method. The sum is also calculated
# safely to reduce dtype induced overflows.
is_float = X.dtype.kind in 'fc'
if is_float and (np.isfinite(_safe_accumulator_op(np.sum, X))):
pass
elif is_float:
msg_err = "Input contains {} or a value too large for {!r}."
if (allow_nan and np.isinf(X).any() or
not allow_nan and not np.isfinite(X).all()):
type_err = 'infinity' if allow_nan else 'NaN, infinity'
raise ValueError(
msg_err.format
(type_err,
msg_dtype if msg_dtype is not None else X.dtype)
)
# for object dtype data, we only check for NaNs (GH-13254)
elif X.dtype == np.dtype('object') and not allow_nan:
if _object_dtype_isnan(X).any():
raise ValueError("Input contains NaN")
def assert_all_finite(X, *, allow_nan=False):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : {ndarray, sparse matrix}
allow_nan : bool, default=False
"""
_assert_all_finite(X.data if sp.issparse(X) else X, allow_nan)
def as_float_array(X, *, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, default=True
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
Returns
-------
XT : {ndarray, sparse matrix}
An array of type float.
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64, copy=copy,
force_all_finite=force_all_finite, ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
if X.dtype.kind in 'uib' and X.dtype.itemsize <= 4:
return_dtype = np.float32
else:
return_dtype = np.float64
return X.astype(return_dtype)
def _is_arraylike(x):
"""Returns whether the input is array-like."""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_features(X):
"""Return the number of features in an array-like X.
This helper function tries hard to avoid to materialize an array version
of X unless necessary. For instance, if X is a list of lists,
this function will return the length of the first element, assuming
that subsequent elements are all lists of the same length without
checking.
Parameters
----------
X : array-like
array-like to get the number of features.
Returns
-------
features : int
Number of features
"""
type_ = type(X)
if type_.__module__ == "builtins":
type_name = type_.__qualname__
else:
type_name = f"{type_.__module__}.{type_.__qualname__}"
message = (
"Unable to find the number of features from X of type "
f"{type_name}"
)
if not hasattr(X, '__len__') and not hasattr(X, 'shape'):
if not hasattr(X, '__array__'):
raise TypeError(message)
# Only convert X to a numpy array if there is no cheaper, heuristic
# option.
X = np.asarray(X)
if hasattr(X, 'shape'):
if not hasattr(X.shape, '__len__') or len(X.shape) <= 1:
message += f" with shape {X.shape}"
raise TypeError(message)
return X.shape[1]
first_sample = X[0]
# Do not consider an array-like of strings or dicts to be a 2D array
if isinstance(first_sample, (str, bytes, dict)):
message += (f" where the samples are of type "
f"{type(first_sample).__qualname__}")
raise TypeError(message)
try:
# If X is a list of lists, for instance, we assume that all nested
# lists have the same length without checking or converting to
# a numpy array to keep this function call as cheap as possible.
return len(first_sample)
except Exception as err:
raise TypeError(message) from err
def _num_samples(x):
"""Return number of samples in array-like x."""
message = 'Expected sequence or array-like, got %s' % type(x)
if hasattr(x, 'fit') and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError(message)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError(message)
if hasattr(x, 'shape') and x.shape is not None:
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
# Check that shape is returning an integer or default to len
# Dask dataframes may not return numeric shape[0] value
if isinstance(x.shape[0], numbers.Integral):
return x.shape[0]
try:
return len(x)
except TypeError as type_error:
raise TypeError(message) from type_error
def check_memory(memory):
"""Check that ``memory`` is joblib.Memory-like.
joblib.Memory-like means that ``memory`` can be converted into a
joblib.Memory instance (typically a str denoting the ``location``)
or has the same interface (has a ``cache`` method).
Parameters
----------
memory : None, str or object with the joblib.Memory interface
Returns
-------
memory : object with the joblib.Memory interface
Raises
------
ValueError
If ``memory`` is not joblib.Memory-like.
"""
if memory is None or isinstance(memory, str):
if parse_version(joblib.__version__) < parse_version('0.12'):
memory = joblib.Memory(cachedir=memory, verbose=0)
else:
memory = joblib.Memory(location=memory, verbose=0)
elif not hasattr(memory, 'cache'):
raise ValueError("'memory' should be None, a string or have the same"
" interface as joblib.Memory."
" Got memory='{}' instead.".format(memory))
return memory
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
uniques = np.unique(lengths)
if len(uniques) > 1:
raise ValueError("Found input variables with inconsistent numbers of"
" samples: %r" % [int(l) for l in lengths])
def _make_indexable(iterable):
"""Ensure iterable supports indexing or convert to an indexable variant.
Convert sparse matrices to csr and other non-indexable iterable to arrays.
Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged.
Parameters
----------
iterable : {list, dataframe, ndarray, sparse matrix} or None
Object to be converted to an indexable iterable.
"""
if sp.issparse(iterable):
return iterable.tocsr()
elif hasattr(iterable, "__getitem__") or hasattr(iterable, "iloc"):
return iterable
elif iterable is None:
return iterable
return np.array(iterable)
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : {lists, dataframes, ndarrays, sparse matrices}
List of objects to ensure sliceability.
"""
result = [_make_indexable(X) for X in iterables]
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite, accept_large_sparse):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : sparse matrix
Input to validate and convert.
accept_sparse : str, bool or list/tuple of str
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
not in the allowed format, it will be converted to the first listed
format. True allows the input to be any format. False means
that a sparse matrix input will raise an error.
dtype : str, type or None
Data type of result. If None, the dtype of the input is preserved.
copy : bool
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : bool or 'allow-nan'
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
Returns
-------
spmatrix_converted : sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# Indices dtype validation
_check_large_sparse(spmatrix, accept_large_sparse)
if accept_sparse is False:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
elif isinstance(accept_sparse, (list, tuple)):
if len(accept_sparse) == 0:
raise ValueError("When providing 'accept_sparse' "
"as a tuple or list, it must contain at "
"least one string value.")
# ensure correct sparse format
if spmatrix.format not in accept_sparse:
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
elif accept_sparse is not True:
# any other type
raise ValueError("Parameter 'accept_sparse' should be a string, "
"boolean or list of strings. You provided "
"'accept_sparse={}'.".format(accept_sparse))
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format, stacklevel=2)
else:
_assert_all_finite(spmatrix.data,
allow_nan=force_all_finite == 'allow-nan')
return spmatrix
def _ensure_no_complex_data(array):
if hasattr(array, 'dtype') and array.dtype is not None \
and hasattr(array.dtype, 'kind') and array.dtype.kind == "c":
raise ValueError("Complex data not supported\n"
"{}\n".format(array))
def check_array(array, accept_sparse=False, *, accept_large_sparse=True,
dtype="numeric", order=None, copy=False, force_all_finite=True,
ensure_2d=True, allow_nd=False, ensure_min_samples=1,
ensure_min_features=1, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is checked to be a non-empty 2D array containing
only finite values. If the dtype of the array is object, attempt
converting to float, raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : str, bool or list/tuple of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'} or None, default=None
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
ensure_2d : bool, default=True
Whether to raise a value error if array is not 2D.
allow_nd : bool, default=False
Whether to allow array.ndim > 2.
ensure_min_samples : int, default=1
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
array_converted : object
The converted and validated array.
"""
# store reference to original array to check if copy is needed when
# function returns
array_orig = array
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, str) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
# check if the object contains several dtypes (typically a pandas
# DataFrame), and store them. If not, store None.
dtypes_orig = None
has_pd_integer_array = False
if hasattr(array, "dtypes") and hasattr(array.dtypes, '__array__'):
# throw warning if columns are sparse. If all columns are sparse, then
# array.sparse exists and sparsity will be perserved (later).
with suppress(ImportError):
from pandas.api.types import is_sparse
if (not hasattr(array, 'sparse') and
array.dtypes.apply(is_sparse).any()):
warnings.warn(
"pandas.DataFrame with sparse columns found."
"It will be converted to a dense numpy array."
)
dtypes_orig = list(array.dtypes)
# pandas boolean dtype __array__ interface coerces bools to objects
for i, dtype_iter in enumerate(dtypes_orig):
if dtype_iter.kind == 'b':
dtypes_orig[i] = np.dtype(object)
elif dtype_iter.name.startswith(("Int", "UInt")):
# name looks like an Integer Extension Array, now check for
# the dtype
with suppress(ImportError):
from pandas import (Int8Dtype, Int16Dtype,
Int32Dtype, Int64Dtype,
UInt8Dtype, UInt16Dtype,
UInt32Dtype, UInt64Dtype)
if isinstance(dtype_iter, (Int8Dtype, Int16Dtype,
Int32Dtype, Int64Dtype,
UInt8Dtype, UInt16Dtype,
UInt32Dtype, UInt64Dtype)):
has_pd_integer_array = True
if all(isinstance(dtype, np.dtype) for dtype in dtypes_orig):
dtype_orig = np.result_type(*dtypes_orig)
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if has_pd_integer_array:
# If there are any pandas integer extension arrays,
array = array.astype(dtype)
if force_all_finite not in (True, False, 'allow-nan'):
raise ValueError('force_all_finite should be a bool or "allow-nan"'
'. Got {!r} instead'.format(force_all_finite))
if estimator is not None:
if isinstance(estimator, str):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
# When all dataframe columns are sparse, convert to a sparse array
if hasattr(array, 'sparse') and array.ndim > 1:
# DataFrame.sparse only supports `to_coo`
array = array.sparse.to_coo()
if array.dtype == np.dtype('object'):
unique_dtypes = set(
[dt.subtype.name for dt in array_orig.dtypes]
)
if len(unique_dtypes) > 1:
raise ValueError(
"Pandas DataFrame with mixed sparse extension arrays "
"generated a sparse matrix with object dtype which "
"can not be converted to a scipy sparse matrix."
"Sparse extension arrays should all have the same "
"numeric type.")
if sp.issparse(array):
_ensure_no_complex_data(array)
array = _ensure_sparse_format(array, accept_sparse=accept_sparse,
dtype=dtype, copy=copy,
force_all_finite=force_all_finite,
accept_large_sparse=accept_large_sparse)
else:
# If np.array(..) gives ComplexWarning, then we convert the warning
# to an error. This is needed because specifying a non complex
# dtype to the function converts complex to real dtype,
# thereby passing the test made in the lines following the scope
# of warnings context manager.
with warnings.catch_warnings():
try:
warnings.simplefilter('error', ComplexWarning)
if dtype is not None and np.dtype(dtype).kind in 'iu':
# Conversion float -> int should not contain NaN or
# inf (numpy#14412). We cannot use casting='safe' because
# then conversion float -> int would be disallowed.
array = np.asarray(array, order=order)
if array.dtype.kind == 'f':
_assert_all_finite(array, allow_nan=False,
msg_dtype=dtype)
array = array.astype(dtype, casting="unsafe", copy=False)
else:
array = np.asarray(array, order=order, dtype=dtype)
except ComplexWarning as complex_warning:
raise ValueError("Complex data not supported\n"
"{}\n".format(array)) from complex_warning
# It is possible that the np.array(..) gave no warning. This happens
# when no dtype conversion happened, for example dtype = None. The
# result is that np.array(..) produces an array of complex dtype
# and we need to catch and raise exception for such cases.
_ensure_no_complex_data(array)
if ensure_2d:
# If input is scalar raise error
if array.ndim == 0:
raise ValueError(
"Expected 2D array, got scalar array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array))
# If input is 1D raise error
if array.ndim == 1:
raise ValueError(
"Expected 2D array, got 1D array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array))
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind in "OUSV":
warnings.warn(
"Arrays of bytes/strings is being converted to decimal "
"numbers if dtype='numeric'. This behavior is deprecated in "
"0.24 and will be removed in 1.1 (renaming of 0.26). Please "
"convert your data to numeric values explicitly instead.",
FutureWarning, stacklevel=2
)
try:
array = array.astype(np.float64)
except ValueError as e:
raise ValueError(
"Unable to convert array of bytes/strings "
"into decimal numbers with dtype='numeric'") from e
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array,
allow_nan=force_all_finite == 'allow-nan')
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, array.shape, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, array.shape, ensure_min_features,
context))
if copy and np.may_share_memory(array, array_orig):
array = np.array(array, dtype=dtype, order=order)
return array
def _check_large_sparse(X, accept_large_sparse=False):
"""Raise a ValueError if X has 64bit indices and accept_large_sparse=False
"""
if not accept_large_sparse:
supported_indices = ["int32"]
if X.getformat() == "coo":
index_keys = ['col', 'row']
elif X.getformat() in ["csr", "csc", "bsr"]:
index_keys = ['indices', 'indptr']
else:
return
for key in index_keys:
indices_datatype = getattr(X, key).dtype
if (indices_datatype not in supported_indices):
raise ValueError("Only sparse matrices with 32-bit integer"
" indices are accepted. Got %s indices."
% indices_datatype)
def check_X_y(X, y, accept_sparse=False, *, accept_large_sparse=True,
dtype="numeric", order=None, copy=False, force_all_finite=True,
ensure_2d=True, allow_nd=False, multi_output=False,
ensure_min_samples=1, ensure_min_features=1, y_numeric=False,
estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : {ndarray, list, sparse matrix}
Input data.
y : {ndarray, list, sparse matrix}
Labels.
accept_sparse : str, bool or list of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'}, default=None
Whether an array will be forced to be fortran or c-style.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
ensure_2d : bool, default=True
Whether to raise a value error if X is not 2D.
allow_nd : bool, default=False
Whether to allow X.ndim > 2.
multi_output : bool, default=False
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int, default=1
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : bool, default=False
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
if y is None:
raise ValueError("y cannot be None")
X = check_array(X, accept_sparse=accept_sparse,
accept_large_sparse=accept_large_sparse,
dtype=dtype, order=order, copy=copy,
force_all_finite=force_all_finite,
ensure_2d=ensure_2d, allow_nd=allow_nd,
ensure_min_samples=ensure_min_samples,
ensure_min_features=ensure_min_features,
estimator=estimator)
if multi_output:
y = check_array(y, accept_sparse='csr', force_all_finite=True,
ensure_2d=False, dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, *, warn=False):
""" Ravel column or 1d numpy array, else raises an error.
Parameters
----------
y : array-like
warn : bool, default=False
To control display of warnings.
Returns
-------
y : ndarray
"""
y = np.asarray(y)
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError(
"y should be a 1d array, "
"got an array of shape {} instead.".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None, int or instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Parameters
----------
estimator : object
An estimator to inspect.
parameter : str
The searched parameter.
Returns
-------
is_parameter: bool
Whether the parameter was found to be a named parameter of the
estimator's fit method.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, *, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : {ndarray, sparse matrix}
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float, default=1e-10
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : bool, default=True
If True then raise a warning if conversion is required.
raise_exception : bool, default=False
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : {ndarray, sparse matrix}
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.",
stacklevel=2)
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
fitted attributes (ending with a trailing underscore) and otherwise
raises a NotFittedError with the given message.
This utility is meant to be used internally by estimators themselves,
typically in their own predict / transform methods.
Parameters
----------
estimator : estimator instance
estimator instance for which the check is performed.
attributes : str, list or tuple of str, default=None
Attribute name(s) given as string or a list/tuple of strings
Eg.: ``["coef_", "estimator_", ...], "coef_"``
If `None`, `estimator` is considered fitted if there exist an
attribute that ends with a underscore and does not start with double
underscore.
msg : str, default=None
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this
estimator."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default=all
Specify whether all or any of the given attributes must exist.
Returns
-------
None
Raises
------
NotFittedError
If the attributes are not found.
"""
if isclass(estimator):
raise TypeError("{} is a class, not an instance.".format(estimator))
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if attributes is not None:
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
attrs = all_or_any([hasattr(estimator, attr) for attr in attributes])
else:
attrs = [v for v in vars(estimator)
if v.endswith("_") and not v.startswith("__")]
if not attrs:
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : {array-like, sparse matrix}
Input data.
whom : str
Who passed X to this function.
"""
# avoid X.min() on sparse matrix since it also sorts the indices
if sp.issparse(X):
if X.format in ['lil', 'dok']:
X = X.tocsr()
if X.data.size == 0:
X_min = 0
else:
X_min = X.data.min()
else:
X_min = X.min()
if X_min < 0:
raise ValueError("Negative values in data passed to %s" % whom)
def check_scalar(x, name, target_type, *, min_val=None, max_val=None):
"""Validate scalar parameters type and value.
Parameters
----------
x : object
The scalar parameter to validate.
name : str
The name of the parameter to be printed in error messages.
target_type : type or tuple
Acceptable data types for the parameter.
min_val : float or int, default=None
The minimum valid value the parameter can take. If None (default) it
is implied that the parameter does not have a lower bound.
max_val : float or int, default=None
The maximum valid value the parameter can take. If None (default) it
is implied that the parameter does not have an upper bound.
Raises
-------
TypeError
If the parameter's type does not match the desired type.
ValueError
If the parameter's value violates the given bounds.
"""
if not isinstance(x, target_type):
raise TypeError('`{}` must be an instance of {}, not {}.'
.format(name, target_type, type(x)))
if min_val is not None and x < min_val:
raise ValueError('`{}`= {}, must be >= {}.'.format(name, x, min_val))
if max_val is not None and x > max_val:
raise ValueError('`{}`= {}, must be <= {}.'.format(name, x, max_val))
def _check_psd_eigenvalues(lambdas, enable_warnings=False):
"""Check the eigenvalues of a positive semidefinite (PSD) matrix.
Checks the provided array of PSD matrix eigenvalues for numerical or
conditioning issues and returns a fixed validated version. This method
should typically be used if the PSD matrix is user-provided (e.g. a
Gram matrix) or computed using a user-provided dissimilarity metric
(e.g. kernel function), or if the decomposition process uses approximation
methods (randomized SVD, etc.).
It checks for three things:
- that there are no significant imaginary parts in eigenvalues (more than
1e-5 times the maximum real part). If this check fails, it raises a
``ValueError``. Otherwise all non-significant imaginary parts that may
remain are set to zero. This operation is traced with a
``PositiveSpectrumWarning`` when ``enable_warnings=True``.
- that eigenvalues are not all negative. If this check fails, it raises a
``ValueError``
- that there are no significant negative eigenvalues with absolute value
more than 1e-10 (1e-6) and more than 1e-5 (5e-3) times the largest
positive eigenvalue in double (simple) precision. If this check fails,
it raises a ``ValueError``. Otherwise all negative eigenvalues that may
remain are set to zero. This operation is traced with a
``PositiveSpectrumWarning`` when ``enable_warnings=True``.
Finally, all the positive eigenvalues that are too small (with a value
smaller than the maximum eigenvalue multiplied by 1e-12 (2e-7)) are set to
zero. This operation is traced with a ``PositiveSpectrumWarning`` when
``enable_warnings=True``.
Parameters
----------
lambdas : array-like of shape (n_eigenvalues,)
Array of eigenvalues to check / fix.
enable_warnings : bool, default=False
When this is set to ``True``, a ``PositiveSpectrumWarning`` will be
raised when there are imaginary parts, negative eigenvalues, or
extremely small non-zero eigenvalues. Otherwise no warning will be
raised. In both cases, imaginary parts, negative eigenvalues, and
extremely small non-zero eigenvalues will be set to zero.
Returns
-------
lambdas_fixed : ndarray of shape (n_eigenvalues,)
A fixed validated copy of the array of eigenvalues.
Examples
--------
>>> _check_psd_eigenvalues([1, 2]) # nominal case
array([1, 2])
>>> _check_psd_eigenvalues([5, 5j]) # significant imag part
Traceback (most recent call last):
...
ValueError: There are significant imaginary parts in eigenvalues (1
of the maximum real part). Either the matrix is not PSD, or there was
an issue while computing the eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, 5e-5j]) # insignificant imag part
array([5., 0.])
>>> _check_psd_eigenvalues([-5, -1]) # all negative
Traceback (most recent call last):
...
ValueError: All eigenvalues are negative (maximum is -1). Either the
matrix is not PSD, or there was an issue while computing the
eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, -1]) # significant negative
Traceback (most recent call last):
...
ValueError: There are significant negative eigenvalues (0.2 of the
maximum positive). Either the matrix is not PSD, or there was an issue
while computing the eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, -5e-5]) # insignificant negative
array([5., 0.])
>>> _check_psd_eigenvalues([5, 4e-12]) # bad conditioning (too small)
array([5., 0.])
"""
lambdas = np.array(lambdas)
is_double_precision = lambdas.dtype == np.float64
# note: the minimum value available is
# - single-precision: np.finfo('float32').eps = 1.2e-07
# - double-precision: np.finfo('float64').eps = 2.2e-16
# the various thresholds used for validation
# we may wish to change the value according to precision.
significant_imag_ratio = 1e-5
significant_neg_ratio = 1e-5 if is_double_precision else 5e-3
significant_neg_value = 1e-10 if is_double_precision else 1e-6
small_pos_ratio = 1e-12 if is_double_precision else 2e-7
# Check that there are no significant imaginary parts
if not np.isreal(lambdas).all():
max_imag_abs = np.abs(np.imag(lambdas)).max()
max_real_abs = np.abs(np.real(lambdas)).max()
if max_imag_abs > significant_imag_ratio * max_real_abs:
raise ValueError(
"There are significant imaginary parts in eigenvalues (%g "
"of the maximum real part). Either the matrix is not PSD, or "
"there was an issue while computing the eigendecomposition "
"of the matrix."
% (max_imag_abs / max_real_abs))
# warn about imaginary parts being removed
if enable_warnings:
warnings.warn("There are imaginary parts in eigenvalues (%g "
"of the maximum real part). Either the matrix is not"
" PSD, or there was an issue while computing the "
"eigendecomposition of the matrix. Only the real "
"parts will be kept."
% (max_imag_abs / max_real_abs),
PositiveSpectrumWarning)
# Remove all imaginary parts (even if zero)
lambdas = np.real(lambdas)
# Check that there are no significant negative eigenvalues
max_eig = lambdas.max()
if max_eig < 0:
raise ValueError("All eigenvalues are negative (maximum is %g). "
"Either the matrix is not PSD, or there was an "
"issue while computing the eigendecomposition of "
"the matrix." % max_eig)
else:
min_eig = lambdas.min()
if (min_eig < -significant_neg_ratio * max_eig
and min_eig < -significant_neg_value):
raise ValueError("There are significant negative eigenvalues (%g"
" of the maximum positive). Either the matrix is "
"not PSD, or there was an issue while computing "
"the eigendecomposition of the matrix."
% (-min_eig / max_eig))
elif min_eig < 0:
# Remove all negative values and warn about it
if enable_warnings:
warnings.warn("There are negative eigenvalues (%g of the "
"maximum positive). Either the matrix is not "
"PSD, or there was an issue while computing the"
" eigendecomposition of the matrix. Negative "
"eigenvalues will be replaced with 0."
% (-min_eig / max_eig),
PositiveSpectrumWarning)
lambdas[lambdas < 0] = 0
# Check for conditioning (small positive non-zeros)
too_small_lambdas = (0 < lambdas) & (lambdas < small_pos_ratio * max_eig)
if too_small_lambdas.any():
if enable_warnings:
warnings.warn("Badly conditioned PSD matrix spectrum: the largest "
"eigenvalue is more than %g times the smallest. "
"Small eigenvalues will be replaced with 0."
"" % (1 / small_pos_ratio),
PositiveSpectrumWarning)
lambdas[too_small_lambdas] = 0
return lambdas
def _check_sample_weight(sample_weight, X, dtype=None, copy=False):
"""Validate sample weights.
Note that passing sample_weight=None will output an array of ones.
Therefore, in some cases, you may want to protect the call with:
if sample_weight is not None:
sample_weight = _check_sample_weight(...)
Parameters
----------
sample_weight : {ndarray, Number or None}, shape (n_samples,)
Input sample weights.
X : {ndarray, list, sparse matrix}
Input data.
dtype : dtype, default=None
dtype of the validated `sample_weight`.
If None, and the input `sample_weight` is an array, the dtype of the
input is preserved; otherwise an array with the default numpy dtype
is be allocated. If `dtype` is not one of `float32`, `float64`,
`None`, the output will be of dtype `float64`.
copy : bool, default=False
If True, a copy of sample_weight will be created.
Returns
-------
sample_weight : ndarray of shape (n_samples,)
Validated sample weight. It is guaranteed to be "C" contiguous.
"""
n_samples = _num_samples(X)
if dtype is not None and dtype not in [np.float32, np.float64]:
dtype = np.float64
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=dtype)
elif isinstance(sample_weight, numbers.Number):
sample_weight = np.full(n_samples, sample_weight, dtype=dtype)
else:
if dtype is None:
dtype = [np.float64, np.float32]
sample_weight = check_array(
sample_weight, accept_sparse=False, ensure_2d=False, dtype=dtype,
order="C", copy=copy
)
if sample_weight.ndim != 1:
raise ValueError("Sample weights must be 1D array or scalar")
if sample_weight.shape != (n_samples,):
raise ValueError("sample_weight.shape == {}, expected {}!"
.format(sample_weight.shape, (n_samples,)))
return sample_weight
def _allclose_dense_sparse(x, y, rtol=1e-7, atol=1e-9):
"""Check allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : {array-like, sparse matrix}
First array to compare.
y : {array-like, sparse matrix}
Second array to compare.
rtol : float, default=1e-7
Relative tolerance; see numpy.allclose.
atol : float, default=1e-9
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
"""
if sp.issparse(x) and sp.issparse(y):
x = x.tocsr()
y = y.tocsr()
x.sum_duplicates()
y.sum_duplicates()
return (np.array_equal(x.indices, y.indices) and
np.array_equal(x.indptr, y.indptr) and
np.allclose(x.data, y.data, rtol=rtol, atol=atol))
elif not sp.issparse(x) and not sp.issparse(y):
return np.allclose(x, y, rtol=rtol, atol=atol)
raise ValueError("Can only compare two sparse matrices, not a sparse "
"matrix and an array")
def _check_fit_params(X, fit_params, indices=None):
"""Check and validate the parameters passed during `fit`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data array.
fit_params : dict
Dictionary containing the parameters passed at fit.
indices : array-like of shape (n_samples,), default=None
Indices to be selected if the parameter has the same size as `X`.
Returns
-------
fit_params_validated : dict
Validated parameters. We ensure that the values support indexing.
"""
from . import _safe_indexing
fit_params_validated = {}
for param_key, param_value in fit_params.items():
if (not _is_arraylike(param_value) or
_num_samples(param_value) != _num_samples(X)):
# Non-indexable pass-through (for now for backward-compatibility).
# https://github.com/scikit-learn/scikit-learn/issues/15805
fit_params_validated[param_key] = param_value
else:
# Any other fit_params should support indexing
# (e.g. for cross-validation).
fit_params_validated[param_key] = _make_indexable(param_value)
fit_params_validated[param_key] = _safe_indexing(
fit_params_validated[param_key], indices
)
return fit_params_validated
| 38.323024
| 79
| 0.615495
|
acfc903c335ef9236af6d0e0de71678b908f6705
| 854
|
py
|
Python
|
var/spack/repos/builtin/packages/r-findpython/package.py
|
RemoteConnectionManager/spack
|
f2967b6c16effd26ce007cf86cadbb645c574f50
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2020-10-15T01:08:42.000Z
|
2021-10-18T01:28:18.000Z
|
var/spack/repos/builtin/packages/r-findpython/package.py
|
RemoteConnectionManager/spack
|
f2967b6c16effd26ce007cf86cadbb645c574f50
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 17
|
2018-09-20T18:32:50.000Z
|
2019-12-04T16:58:12.000Z
|
var/spack/repos/builtin/packages/r-findpython/package.py
|
RemoteConnectionManager/spack
|
f2967b6c16effd26ce007cf86cadbb645c574f50
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5
|
2019-07-30T09:42:14.000Z
|
2021-01-25T05:39:20.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RFindpython(RPackage):
"""Package designed to find an acceptable python binary."""
homepage = "https://github.com/trevorld/findpython"
url = "https://cloud.r-project.org/src/contrib/findpython_1.0.3.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/findpython"
version('1.0.5', sha256='3e9a21988cb78833769b02680d128a0cc01bcb41aa9c9725ab1742f349759145')
version('1.0.4', sha256='a58fb46d53d3bdea1e00b2f4f9bdb5e98be9329ea9d8e2fe150d91462e6bccfd')
version('1.0.3', sha256='5486535ae2f0a123b630d8eabf93a61b730765f55dfcc8ef4f6e56e7c49408f8')
depends_on('python', type='run')
| 40.666667
| 95
| 0.761124
|
acfc9065875199e688aab91f13d104d3eed8678f
| 1,002
|
py
|
Python
|
tests/ufirebase_test.py
|
esbullington/cct
|
1a89d346c76236a9710177a208730584ecb65c02
|
[
"MIT"
] | null | null | null |
tests/ufirebase_test.py
|
esbullington/cct
|
1a89d346c76236a9710177a208730584ecb65c02
|
[
"MIT"
] | null | null | null |
tests/ufirebase_test.py
|
esbullington/cct
|
1a89d346c76236a9710177a208730584ecb65c02
|
[
"MIT"
] | null | null | null |
# Append base path
# add project base directory to python path
# so we can access both cct and test dir
import usys
usys.path.append('.')
# show micropython path
print("Path: ")
print(usys.path)
from cct.google.ufirebase import Firebase
from tests.test_tools import assert_with_msg
import gc
import time
import os
gc.enable()
database = os.getenv("FIREBASE_DATABASE")
service_account = os.getenv("FIREBASE_SA")
keyfile_location = os.getenv("FIREBASE_KEYFILE")
fb = Firebase(database, service_account, keyfile_location)
fb.put("test_entry", "wwwzzzyyy")
v = fb.get("test_entry")
assert_with_msg(v == "wwwzzzyyy", "Firebase get failed")
fb.delete("test_entry")
not_found = fb.get("test_entry")
assert_with_msg(not_found is None, "Firebase delete failed")
fb.delete("test_list")
fb.append_to_list("test_list", "a")
fb.append_to_list("test_list", "b")
xs = fb.get_list("test_list")
xs.sort()
assert_with_msg(xs[1] == "b")
fb.delete("test_list")
print("ufirebase tests finished successfully")
| 21.319149
| 60
| 0.753493
|
acfc90676383363fafb40c0663b9ebe893331ebf
| 1,207
|
py
|
Python
|
List-Based Collections/new.py
|
Emmaka9/data-structure-and-algorithms
|
dd9933c277561f217b1cbad0e62e568cd2e8684d
|
[
"Apache-2.0"
] | null | null | null |
List-Based Collections/new.py
|
Emmaka9/data-structure-and-algorithms
|
dd9933c277561f217b1cbad0e62e568cd2e8684d
|
[
"Apache-2.0"
] | null | null | null |
List-Based Collections/new.py
|
Emmaka9/data-structure-and-algorithms
|
dd9933c277561f217b1cbad0e62e568cd2e8684d
|
[
"Apache-2.0"
] | null | null | null |
# # Python 3 program to remove a given
# # element from an array
# # This function removes an element x
# # from arr[] and returns new size after
# # removal (size is reduced only when x
# # is present in arr[]
# def deleteElement(arr, n, x):
# # Search x in array
# for i in range(n):
# if (arr[i] == x):
# break
# # If x found in array
# if (i < n):
# # reduce size of array and move
# # all elements on space ahead
# n = n - 1
# for j in range(i, n, 1):
# arr[j] = arr[j + 1]
# return n
# # Driver Code
# if __name__ == '__main__':
# arr = [11, 15, 6, 8, 9, 10]
# n = len(arr)
# x = 6
# # Delete x from arr[]
# n = deleteElement(arr, n, x)
# print("Modified array is")
# for i in range(n):
# print(arr[i], end=" ")
# print('\narr final:', arr)
# # This code is contributed by
# # Shashank_Sharma
print('-'*20)
def print_nums(N, line_gap=True):
count = N
assert N >= 0
end = "\n" if line_gap else ""
if N <= 1:
print(N, end=end)
else:
print_nums(N-1, line_gap=False)
print(' ',N, end=end)
# print('\n')
print_nums(5)
| 19.786885
| 41
| 0.517813
|
acfc923a3703d8d86a260298137727f5337efbad
| 3,236
|
py
|
Python
|
profiles_project/settings.py
|
panavenue/profiles-rest-api
|
44940a0d14944a304468f44244d91dcbba7b701c
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
panavenue/profiles-rest-api
|
44940a0d14944a304468f44244d91dcbba7b701c
|
[
"MIT"
] | 6
|
2020-06-06T01:39:58.000Z
|
2022-02-10T09:16:54.000Z
|
profiles_project/settings.py
|
panavenue/profiles-rest-api
|
44940a0d14944a304468f44244d91dcbba7b701c
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6hck-xeg@j_0$gr#$pqdj_k@gg0-c183j8^3mg+^06o_zg#q&s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 25.68254
| 91
| 0.699938
|
acfc923c20a3b804f0cc67c17ab3868e811776b1
| 3,233
|
py
|
Python
|
topo_processor/metadata/metadata_validators/metadata_validator_stac.py
|
linz/processor-aerial-imagery
|
cf6425560cea381278fe7857865e3d9158b28d7e
|
[
"MIT"
] | null | null | null |
topo_processor/metadata/metadata_validators/metadata_validator_stac.py
|
linz/processor-aerial-imagery
|
cf6425560cea381278fe7857865e3d9158b28d7e
|
[
"MIT"
] | 15
|
2020-07-21T04:56:31.000Z
|
2020-09-21T06:28:57.000Z
|
topo_processor/metadata/metadata_validators/metadata_validator_stac.py
|
linz/processor-aerial-imagery
|
cf6425560cea381278fe7857865e3d9158b28d7e
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import warnings
from typing import Any, Dict, Union
import fsspec
import jsonschema_rs
import pystac.validation
from linz_logger import get_log
from pystac.errors import STACValidationError
from topo_processor.stac.collection import Collection
from topo_processor.stac.item import Item
from topo_processor.stac.iter_errors_validator import IterErrorsValidator
from .metadata_validator import MetadataValidator
class MetadataValidatorStac(MetadataValidator):
name = "validator.stac"
validator_cache: Dict[str, Any] = {}
def get_validator_from_uri(self, schema_uri: str) -> Any:
if schema_uri not in self.validator_cache:
file = fsspec.open(schema_uri, "rt")
with file as f:
self.validator_cache[schema_uri] = jsonschema_rs.JSONSchema.from_str(f.read())
validator = self.validator_cache[schema_uri]
return validator
def is_applicable(self, stac_object: Union[Item, Collection]) -> bool:
return True
def validate_metadata(self, item: Item) -> None:
if isinstance(pystac.validation.RegisteredValidator.get_validator(), IterErrorsValidator):
with warnings.catch_warnings(record=True) as w:
item.create_stac().validate()
msg = ""
for warn in w:
msg = msg + ", " + str(warn.message)
if w:
raise STACValidationError(message=f"Not valid STAC: {msg}")
else:
try:
item.create_stac().validate()
except STACValidationError as e:
raise STACValidationError(message=f"Not valid STAC: {e}")
def validate_metadata_with_report(self, stac_object: Union[Item, Collection]) -> Dict[str, list[str]]:
"""Validate the STAC object (Item or Collection) against the core json schema and its extensions.
Return an error report [{schemaURI, [errors]}]
"""
errors_report: Dict[str, list[str]] = {}
if isinstance(stac_object, Collection):
stac_collection = stac_object.create_stac()
for item in stac_object.items:
stac_item = stac_object.items[item].create_stac()
stac_collection.add_item(stac_item)
stac_object.generate_summaries(stac_collection)
stac_dict = stac_collection.to_dict(include_self_link=False)
else:
stac_dict = stac_object.create_stac().to_dict(include_self_link=False)
schema_uris: list[str] = [stac_object.schema] + stac_dict["stac_extensions"]
for schema_uri in schema_uris:
get_log().trace(f"{self.name}:validate_metadata_with_report", stacId=stac_dict["id"], schema=schema_uri)
current_errors = []
v = self.get_validator_from_uri(schema_uri)
errors = v.iter_errors(stac_dict)
for error in errors:
current_errors.append(error.message)
get_log().warn(f"{self.name}:validate_metadata_with_report", stacId=stac_dict["id"], error=error.message)
if current_errors:
errors_report[schema_uri] = current_errors
return errors_report
| 38.951807
| 121
| 0.660377
|
acfc94becc9eab72afc7612e28dd90f5fe28cd28
| 2,160
|
py
|
Python
|
tests/check_consistent.py
|
olmokramer/typeshed
|
1c8c6ec7d90e86380d9a732c09213392e3af1fdf
|
[
"Apache-2.0"
] | 1
|
2018-10-03T12:35:12.000Z
|
2018-10-03T12:35:12.000Z
|
tests/check_consistent.py
|
olmokramer/typeshed
|
1c8c6ec7d90e86380d9a732c09213392e3af1fdf
|
[
"Apache-2.0"
] | null | null | null |
tests/check_consistent.py
|
olmokramer/typeshed
|
1c8c6ec7d90e86380d9a732c09213392e3af1fdf
|
[
"Apache-2.0"
] | 1
|
2018-10-03T12:35:06.000Z
|
2018-10-03T12:35:06.000Z
|
#!/usr/bin/env python3
# Symlinks are bad on Windows, so we cannot use them in typeshed.
# This checks that certain files are duplicated exactly.
import os
import filecmp
consistent_files = [
{'stdlib/2/builtins.pyi', 'stdlib/2/__builtin__.pyi'},
{'stdlib/2/SocketServer.pyi', 'stdlib/3/socketserver.pyi'},
{'stdlib/2/os2emxpath.pyi', 'stdlib/2/posixpath.pyi', 'stdlib/2/ntpath.pyi', 'stdlib/2/macpath.pyi'},
{'stdlib/2and3/pyexpat/__init__.pyi', 'stdlib/2and3/xml/parsers/expat/__init__.pyi'},
{'stdlib/2and3/pyexpat/errors.pyi', 'stdlib/2and3/xml/parsers/expat/errors.pyi'},
{'stdlib/2and3/pyexpat/model.pyi', 'stdlib/2and3/xml/parsers/expat/model.pyi'},
{'stdlib/3/ntpath.pyi', 'stdlib/3/posixpath.pyi', 'stdlib/3/macpath.pyi', 'stdlib/3/posixpath.pyi'},
{'stdlib/3/enum.pyi', 'third_party/3/enum.pyi'},
{'stdlib/2/os/path.pyi', 'stdlib/3/os/path.pyi'},
{'stdlib/3/unittest/mock.pyi', 'third_party/2and3/mock.pyi'},
{'stdlib/3/concurrent/__init__.pyi', 'third_party/2/concurrent/__init__.pyi'},
{'stdlib/3/concurrent/futures/__init__.pyi', 'third_party/2/concurrent/futures/__init__.pyi'},
{'stdlib/3/concurrent/futures/_base.pyi', 'third_party/2/concurrent/futures/_base.pyi'},
{'stdlib/3/concurrent/futures/thread.pyi', 'third_party/2/concurrent/futures/thread.pyi'},
{'stdlib/3/concurrent/futures/process.pyi', 'third_party/2/concurrent/futures/process.pyi'},
]
def main():
files = [os.path.join(root, file) for root, dir, files in os.walk('.') for file in files]
no_symlink = 'You cannot use symlinks in typeshed, please copy {} to its link.'
for file in files:
_, ext = os.path.splitext(file)
if ext == '.pyi' and os.path.islink(file):
raise ValueError(no_symlink.format(file))
for file1, *others in consistent_files:
f1 = os.path.join(os.getcwd(), file1)
for file2 in others:
f2 = os.path.join(os.getcwd(), file2)
if not filecmp.cmp(f1, f2):
raise ValueError('File {f1} does not match file {f2}. Please copy it to {f2}'.format(f1=file1, f2=file2))
if __name__ == '__main__':
main()
| 50.232558
| 121
| 0.675926
|
acfc950eb29889ab84ee4697b947d1432e8f6370
| 7,537
|
py
|
Python
|
humble/eclipse-cyclonedds-report/perf_tool/perf_tool/resulter.py
|
kydos/TSC-RMW-Reports
|
b9fa8afe0aef554f00e6708f524e27cfdea88232
|
[
"MIT"
] | 15
|
2020-12-30T02:31:42.000Z
|
2022-02-09T12:07:16.000Z
|
humble/eclipse-cyclonedds-report/perf_tool/perf_tool/resulter.py
|
kydos/TSC-RMW-Reports
|
b9fa8afe0aef554f00e6708f524e27cfdea88232
|
[
"MIT"
] | 10
|
2021-08-16T14:46:26.000Z
|
2022-01-19T20:05:09.000Z
|
humble/eclipse-cyclonedds-report/perf_tool/perf_tool/resulter.py
|
kydos/TSC-RMW-Reports
|
b9fa8afe0aef554f00e6708f524e27cfdea88232
|
[
"MIT"
] | 7
|
2021-03-15T15:17:24.000Z
|
2022-03-13T08:00:02.000Z
|
import numpy as np
from enum import Enum
def verify_numerical_sequence(data, subtype, operation):
try:
converted = [subtype(v) for v in data]
except:
return None
for c in converted:
if np.isnan(c) or np.isinf(c):
return None
return subtype(operation(converted))
def test_results(data, host, subs):
data = data.copy()
ind = []
for i, recv in enumerate(data['received']):
if int(recv.strip()) == 0:
ind.append(i)
if len(ind) > 0.75 * len(data['received']):
# if more than 75% of data entries are empty reject
return None
for key in data.keys():
subdata = data[key]
for index in reversed(ind):
del subdata[index]
data[key] = subdata
received = verify_numerical_sequence(data['received'], int, np.sum)
if received is None or received == 0:
return None
sent = verify_numerical_sequence(data['sent'], int, np.sum)
if sent is None:
return None
lost = verify_numerical_sequence(data['lost'], int, np.sum)
if lost is None:
return None
data_received = verify_numerical_sequence(data['data_received'], int, np.sum)
if data_received is None:
return None
latency_min = verify_numerical_sequence(data['latency_min'], float, np.min)
if latency_min is None:
return None
latency_max = verify_numerical_sequence(data['latency_max'], float, np.max)
if latency_max is None:
return None
latency_mean = verify_numerical_sequence(data['latency_mean'], float, np.mean)
if latency_mean is None:
return None
latency_variance = verify_numerical_sequence(data['latency_variance'], float, np.mean)
if latency_variance is None:
return None
cpu_usage = verify_numerical_sequence(data['cpu_usage'], float, np.mean)
if cpu_usage is None:
return None
ram_usage = verify_numerical_sequence([data['ru_maxrss'][-1]], float, np.mean)
if ram_usage is None:
return None
ram_usage /= 1000.0
if 'mac' in host.lower():
# for some reason getrusage() on mac returns bytes
ram_usage /= 1024.0
return {
'received': received,
'sent': sent,
'lost': lost,
'data_received': data_received,
'latency_min': latency_min,
'latency_max': latency_max,
'latency_mean': latency_mean,
'latency_variance': latency_variance,
'jitter': np.sqrt(latency_variance + 0.000001),
'cpu_usage': cpu_usage,
'rel_cpu_usage': cpu_usage / float(received),
'ram_usage': ram_usage,
'length': len(data['data_received']),
'throughput': data_received / 1024.0 / 1024.0 / float(len(data['data_received'])) / float(subs)
}
class Status(Enum):
CYCLONEDDS_WIN = 1
FASTRTPS_WIN = 2
FASTRTPS_FAIL = 3
CYCLONEDDS_FAIL = 4
def make_comparison(cyclonedds, fastrtps):
if cyclonedds is None:
return {
'cpu_usage': Status.CYCLONEDDS_FAIL.value,
'cpu_usage_diff': 0,
'rel_cpu_usage': Status.CYCLONEDDS_FAIL.value,
'rel_cpu_usage_diff': 0,
'ram_usage': Status.CYCLONEDDS_FAIL.value,
'ram_usage_diff': 0,
'latency_mean': Status.CYCLONEDDS_FAIL.value,
'latency_mean_diff': 0,
'jitter': Status.CYCLONEDDS_FAIL.value,
'jitter_diff': 0,
'throughput': Status.CYCLONEDDS_FAIL.value,
'throughput_diff': 0
}
if fastrtps is None:
return {
'cpu_usage': Status.FASTRTPS_FAIL.value,
'cpu_usage_diff': 0,
'rel_cpu_usage': Status.FASTRTPS_FAIL.value,
'rel_cpu_usage_diff': 0,
'ram_usage': Status.FASTRTPS_FAIL.value,
'ram_usage_diff': 0,
'latency_mean': Status.FASTRTPS_FAIL.value,
'latency_mean_diff': 0,
'jitter': Status.FASTRTPS_FAIL.value,
'jitter_diff': 0,
'throughput': Status.FASTRTPS_FAIL.value,
'throughput_diff': 0
}
result = {}
for var in ['cpu_usage', 'rel_cpu_usage', 'ram_usage', 'latency_mean', 'jitter']:
if cyclonedds[var] < fastrtps[var]:
result[var] = Status.CYCLONEDDS_WIN.value
result[f"{var}_diff"] = fastrtps[var] / (cyclonedds[var] + 0.000001)
else:
result[var] = Status.FASTRTPS_WIN.value
result[f"{var}_diff"] = cyclonedds[var] / (fastrtps[var] + 0.000001)
for var in ['throughput']:
if cyclonedds[var] > fastrtps[var]:
result[var] = Status.CYCLONEDDS_WIN.value
result[f"{var}_diff"] = cyclonedds[var] / (fastrtps[var] + 0.000001)
else:
result[var] = Status.FASTRTPS_WIN.value
result[f"{var}_diff"] = fastrtps[var] / (cyclonedds[var] + 0.000001)
return result
def boolify(x):
return x if type(x) == bool else x.lower().strip() in ['1', 'true']
def process_results(database):
data = database["raw"]
results = []
# This monstrous iteration will ensure that our results are ordered nicely
for host, hdata in sorted(data.items()):
for mode, mdata in sorted(hdata.items()):
for topic, tdata in sorted(mdata.items()):
for rate, rtdata in sorted(tdata.items(), key=lambda x: int(x[0])):
for subs, sdata in sorted(rtdata.items(), key=lambda x: int(x[0])):
for zero_copy, zcdata in sorted(sdata.items(), key=lambda x: boolify(x[0])):
for reliable, rldata in sorted(zcdata.items(), key=lambda x: boolify(x[0])):
for transient, trdata in sorted(rldata.items(), key=lambda x: boolify(x[0])):
cyclonedds = test_results(trdata["cyclonedds"], host, subs) if "cyclonedds" in trdata else None
fastrtps = test_results(trdata["fastrtps"], host, subs) if "fastrtps" in trdata else None
raw = test_results(trdata["raw_cyclonedds"], host, subs) if "raw_cyclonedds" in trdata else None
if cyclonedds is None and fastrtps is None and raw is None:
# No point saving it if both fail
continue
results.append({
"host": host,
"mode": mode,
"topic": topic,
"rate": int(rate),
"subs": int(subs),
"zero_copy": boolify(zero_copy),
"reliable": boolify(reliable),
"transient": boolify(transient),
"cyclonedds": cyclonedds,
"fastrtps": fastrtps,
"raw": raw,
"comparison": make_comparison(cyclonedds, fastrtps)
})
database["processed"] = results
| 37.685
| 133
| 0.537614
|
acfc9977fc869dde75dbba7784900bab31ee1dd7
| 38,263
|
py
|
Python
|
sphinx/ext/napoleon/docstring.py
|
shimizukawa/sphinx
|
359fc3c7998e057bdb7884f20f5745efd53da49a
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/ext/napoleon/docstring.py
|
shimizukawa/sphinx
|
359fc3c7998e057bdb7884f20f5745efd53da49a
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/ext/napoleon/docstring.py
|
shimizukawa/sphinx
|
359fc3c7998e057bdb7884f20f5745efd53da49a
|
[
"BSD-2-Clause"
] | null | null | null |
"""
sphinx.ext.napoleon.docstring
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Classes for docstring parsing and formatting.
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import inspect
import re
from functools import partial
from typing import Any, Callable, Dict, List, Tuple, Type, Union
from sphinx.application import Sphinx
from sphinx.config import Config as SphinxConfig
from sphinx.ext.napoleon.iterators import modify_iter
from sphinx.locale import _
_directive_regex = re.compile(r'\.\. \S+::')
_google_section_regex = re.compile(r'^(\s|\w)+:\s*$')
_google_typed_arg_regex = re.compile(r'\s*(.+?)\s*\(\s*(.*[^\s]+)\s*\)')
_numpy_section_regex = re.compile(r'^[=\-`:\'"~^_*+#<>]{2,}\s*$')
_single_colon_regex = re.compile(r'(?<!:):(?!:)')
_xref_or_code_regex = re.compile(
r'((?::(?:[a-zA-Z0-9]+[\-_+:.])*[a-zA-Z0-9]+:`.+?`)|'
r'(?:``.+``))')
_bullet_list_regex = re.compile(r'^(\*|\+|\-)(\s+\S|\s*$)')
_enumerated_list_regex = re.compile(
r'^(?P<paren>\()?'
r'(\d+|#|[ivxlcdm]+|[IVXLCDM]+|[a-zA-Z])'
r'(?(paren)\)|\.)(\s+\S|\s*$)')
class GoogleDocstring:
"""Convert Google style docstrings to reStructuredText.
Parameters
----------
docstring : :obj:`str` or :obj:`list` of :obj:`str`
The docstring to parse, given either as a string or split into
individual lines.
config: :obj:`sphinx.ext.napoleon.Config` or :obj:`sphinx.config.Config`
The configuration settings to use. If not given, defaults to the
config object on `app`; or if `app` is not given defaults to the
a new :class:`sphinx.ext.napoleon.Config` object.
Other Parameters
----------------
app : :class:`sphinx.application.Sphinx`, optional
Application object representing the Sphinx process.
what : :obj:`str`, optional
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : :obj:`str`, optional
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : :class:`sphinx.ext.autodoc.Options`, optional
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
Example
-------
>>> from sphinx.ext.napoleon import Config
>>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
>>> docstring = '''One line summary.
...
... Extended description.
...
... Args:
... arg1(int): Description of `arg1`
... arg2(str): Description of `arg2`
... Returns:
... str: Description of return value.
... '''
>>> print(GoogleDocstring(docstring, config))
One line summary.
<BLANKLINE>
Extended description.
<BLANKLINE>
:param arg1: Description of `arg1`
:type arg1: int
:param arg2: Description of `arg2`
:type arg2: str
<BLANKLINE>
:returns: Description of return value.
:rtype: str
<BLANKLINE>
"""
_name_rgx = re.compile(r"^\s*((?::(?P<role>\S+):)?`(?P<name>~?[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>~?[a-zA-Z0-9_.-]+))\s*", re.X)
def __init__(self, docstring: Union[str, List[str]], config: SphinxConfig = None,
app: Sphinx = None, what: str = '', name: str = '',
obj: Any = None, options: Any = None) -> None:
self._config = config
self._app = app
if not self._config:
from sphinx.ext.napoleon import Config
self._config = self._app.config if self._app else Config() # type: ignore
if not what:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
self._what = what
self._name = name
self._obj = obj
self._opt = options
if isinstance(docstring, str):
lines = docstring.splitlines()
else:
lines = docstring
self._line_iter = modify_iter(lines, modifier=lambda s: s.rstrip())
self._parsed_lines = [] # type: List[str]
self._is_in_section = False
self._section_indent = 0
if not hasattr(self, '_directive_sections'):
self._directive_sections = [] # type: List[str]
if not hasattr(self, '_sections'):
self._sections = {
'args': self._parse_parameters_section,
'arguments': self._parse_parameters_section,
'attention': partial(self._parse_admonition, 'attention'),
'attributes': self._parse_attributes_section,
'caution': partial(self._parse_admonition, 'caution'),
'danger': partial(self._parse_admonition, 'danger'),
'error': partial(self._parse_admonition, 'error'),
'example': self._parse_examples_section,
'examples': self._parse_examples_section,
'hint': partial(self._parse_admonition, 'hint'),
'important': partial(self._parse_admonition, 'important'),
'keyword args': self._parse_keyword_arguments_section,
'keyword arguments': self._parse_keyword_arguments_section,
'methods': self._parse_methods_section,
'note': partial(self._parse_admonition, 'note'),
'notes': self._parse_notes_section,
'other parameters': self._parse_other_parameters_section,
'parameters': self._parse_parameters_section,
'return': self._parse_returns_section,
'returns': self._parse_returns_section,
'raises': self._parse_raises_section,
'references': self._parse_references_section,
'see also': self._parse_see_also_section,
'tip': partial(self._parse_admonition, 'tip'),
'todo': partial(self._parse_admonition, 'todo'),
'warning': partial(self._parse_admonition, 'warning'),
'warnings': partial(self._parse_admonition, 'warning'),
'warns': self._parse_warns_section,
'yield': self._parse_yields_section,
'yields': self._parse_yields_section,
} # type: Dict[str, Callable]
self._load_custom_sections()
self._parse()
def __str__(self) -> str:
"""Return the parsed docstring in reStructuredText format.
Returns
-------
unicode
Unicode version of the docstring.
"""
return '\n'.join(self.lines())
def lines(self) -> List[str]:
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
list(str)
The lines of the docstring in a list.
"""
return self._parsed_lines
def _consume_indented_block(self, indent: int = 1) -> List[str]:
lines = []
line = self._line_iter.peek()
while(not self._is_section_break() and
(not line or self._is_indented(line, indent))):
lines.append(next(self._line_iter))
line = self._line_iter.peek()
return lines
def _consume_contiguous(self) -> List[str]:
lines = []
while (self._line_iter.has_next() and
self._line_iter.peek() and
not self._is_section_header()):
lines.append(next(self._line_iter))
return lines
def _consume_empty(self) -> List[str]:
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and not line:
lines.append(next(self._line_iter))
line = self._line_iter.peek()
return lines
def _consume_field(self, parse_type: bool = True, prefer_type: bool = False
) -> Tuple[str, str, List[str]]:
line = next(self._line_iter)
before, colon, after = self._partition_field_on_colon(line)
_name, _type, _desc = before, '', after
if parse_type:
match = _google_typed_arg_regex.match(before)
if match:
_name = match.group(1)
_type = match.group(2)
_name = self._escape_args_and_kwargs(_name)
if prefer_type and not _type:
_type, _name = _name, _type
indent = self._get_indent(line) + 1
_descs = [_desc] + self._dedent(self._consume_indented_block(indent))
_descs = self.__class__(_descs, self._config).lines()
return _name, _type, _descs
def _consume_fields(self, parse_type: bool = True, prefer_type: bool = False
) -> List[Tuple[str, str, List[str]]]:
self._consume_empty()
fields = []
while not self._is_section_break():
_name, _type, _desc = self._consume_field(parse_type, prefer_type)
if _name or _type or _desc:
fields.append((_name, _type, _desc,))
return fields
def _consume_inline_attribute(self) -> Tuple[str, List[str]]:
line = next(self._line_iter)
_type, colon, _desc = self._partition_field_on_colon(line)
if not colon or not _desc:
_type, _desc = _desc, _type
_desc += colon
_descs = [_desc] + self._dedent(self._consume_to_end())
_descs = self.__class__(_descs, self._config).lines()
return _type, _descs
def _consume_returns_section(self) -> List[Tuple[str, str, List[str]]]:
lines = self._dedent(self._consume_to_next_section())
if lines:
before, colon, after = self._partition_field_on_colon(lines[0])
_name, _type, _desc = '', '', lines
if colon:
if after:
_desc = [after] + lines[1:]
else:
_desc = lines[1:]
_type = before
_desc = self.__class__(_desc, self._config).lines()
return [(_name, _type, _desc,)]
else:
return []
def _consume_usage_section(self) -> List[str]:
lines = self._dedent(self._consume_to_next_section())
return lines
def _consume_section_header(self) -> str:
section = next(self._line_iter)
stripped_section = section.strip(':')
if stripped_section.lower() in self._sections:
section = stripped_section
return section
def _consume_to_end(self) -> List[str]:
lines = []
while self._line_iter.has_next():
lines.append(next(self._line_iter))
return lines
def _consume_to_next_section(self) -> List[str]:
self._consume_empty()
lines = []
while not self._is_section_break():
lines.append(next(self._line_iter))
return lines + self._consume_empty()
def _dedent(self, lines: List[str], full: bool = False) -> List[str]:
if full:
return [line.lstrip() for line in lines]
else:
min_indent = self._get_min_indent(lines)
return [line[min_indent:] for line in lines]
def _escape_args_and_kwargs(self, name: str) -> str:
if name.endswith('_'):
name = name[:-1] + r'\_'
if name[:2] == '**':
return r'\*\*' + name[2:]
elif name[:1] == '*':
return r'\*' + name[1:]
else:
return name
def _fix_field_desc(self, desc: List[str]) -> List[str]:
if self._is_list(desc):
desc = [''] + desc
elif desc[0].endswith('::'):
desc_block = desc[1:]
indent = self._get_indent(desc[0])
block_indent = self._get_initial_indent(desc_block)
if block_indent > indent:
desc = [''] + desc
else:
desc = ['', desc[0]] + self._indent(desc_block, 4)
return desc
def _format_admonition(self, admonition: str, lines: List[str]) -> List[str]:
lines = self._strip_empty(lines)
if len(lines) == 1:
return ['.. %s:: %s' % (admonition, lines[0].strip()), '']
elif lines:
lines = self._indent(self._dedent(lines), 3)
return ['.. %s::' % admonition, ''] + lines + ['']
else:
return ['.. %s::' % admonition, '']
def _format_block(self, prefix: str, lines: List[str], padding: str = None) -> List[str]:
if lines:
if padding is None:
padding = ' ' * len(prefix)
result_lines = []
for i, line in enumerate(lines):
if i == 0:
result_lines.append((prefix + line).rstrip())
elif line:
result_lines.append(padding + line)
else:
result_lines.append('')
return result_lines
else:
return [prefix]
def _format_docutils_params(self, fields: List[Tuple[str, str, List[str]]],
field_role: str = 'param', type_role: str = 'type'
) -> List[str]:
lines = []
for _name, _type, _desc in fields:
_desc = self._strip_empty(_desc)
if any(_desc):
_desc = self._fix_field_desc(_desc)
field = ':%s %s: ' % (field_role, _name)
lines.extend(self._format_block(field, _desc))
else:
lines.append(':%s %s:' % (field_role, _name))
if _type:
lines.append(':%s %s: %s' % (type_role, _name, _type))
return lines + ['']
def _format_field(self, _name: str, _type: str, _desc: List[str]) -> List[str]:
_desc = self._strip_empty(_desc)
has_desc = any(_desc)
separator = ' -- ' if has_desc else ''
if _name:
if _type:
if '`' in _type:
field = '**%s** (%s)%s' % (_name, _type, separator)
else:
field = '**%s** (*%s*)%s' % (_name, _type, separator)
else:
field = '**%s**%s' % (_name, separator)
elif _type:
if '`' in _type:
field = '%s%s' % (_type, separator)
else:
field = '*%s*%s' % (_type, separator)
else:
field = ''
if has_desc:
_desc = self._fix_field_desc(_desc)
if _desc[0]:
return [field + _desc[0]] + _desc[1:]
else:
return [field] + _desc
else:
return [field]
def _format_fields(self, field_type: str, fields: List[Tuple[str, str, List[str]]]
) -> List[str]:
field_type = ':%s:' % field_type.strip()
padding = ' ' * len(field_type)
multi = len(fields) > 1
lines = [] # type: List[str]
for _name, _type, _desc in fields:
field = self._format_field(_name, _type, _desc)
if multi:
if lines:
lines.extend(self._format_block(padding + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' ', field))
if lines and lines[-1]:
lines.append('')
return lines
def _get_current_indent(self, peek_ahead: int = 0) -> int:
line = self._line_iter.peek(peek_ahead + 1)[peek_ahead]
while line != self._line_iter.sentinel:
if line:
return self._get_indent(line)
peek_ahead += 1
line = self._line_iter.peek(peek_ahead + 1)[peek_ahead]
return 0
def _get_indent(self, line: str) -> int:
for i, s in enumerate(line):
if not s.isspace():
return i
return len(line)
def _get_initial_indent(self, lines: List[str]) -> int:
for line in lines:
if line:
return self._get_indent(line)
return 0
def _get_min_indent(self, lines: List[str]) -> int:
min_indent = None
for line in lines:
if line:
indent = self._get_indent(line)
if min_indent is None:
min_indent = indent
elif indent < min_indent:
min_indent = indent
return min_indent or 0
def _indent(self, lines: List[str], n: int = 4) -> List[str]:
return [(' ' * n) + line for line in lines]
def _is_indented(self, line: str, indent: int = 1) -> bool:
for i, s in enumerate(line):
if i >= indent:
return True
elif not s.isspace():
return False
return False
def _is_list(self, lines: List[str]) -> bool:
if not lines:
return False
if _bullet_list_regex.match(lines[0]):
return True
if _enumerated_list_regex.match(lines[0]):
return True
if len(lines) < 2 or lines[0].endswith('::'):
return False
indent = self._get_indent(lines[0])
next_indent = indent
for line in lines[1:]:
if line:
next_indent = self._get_indent(line)
break
return next_indent > indent
def _is_section_header(self) -> bool:
section = self._line_iter.peek().lower()
match = _google_section_regex.match(section)
if match and section.strip(':') in self._sections:
header_indent = self._get_indent(section)
section_indent = self._get_current_indent(peek_ahead=1)
return section_indent > header_indent
elif self._directive_sections:
if _directive_regex.match(section):
for directive_section in self._directive_sections:
if section.startswith(directive_section):
return True
return False
def _is_section_break(self) -> bool:
line = self._line_iter.peek()
return (not self._line_iter.has_next() or
self._is_section_header() or
(self._is_in_section and
line and
not self._is_indented(line, self._section_indent)))
def _load_custom_sections(self) -> None:
if self._config.napoleon_custom_sections is not None:
for entry in self._config.napoleon_custom_sections:
if isinstance(entry, str):
# if entry is just a label, add to sections list,
# using generic section logic.
self._sections[entry.lower()] = self._parse_custom_generic_section
else:
# otherwise, assume entry is container;
# [0] is new section, [1] is the section to alias.
# in the case of key mismatch, just handle as generic section.
self._sections[entry[0].lower()] = \
self._sections.get(entry[1].lower(),
self._parse_custom_generic_section)
def _parse(self) -> None:
self._parsed_lines = self._consume_empty()
if self._name and self._what in ('attribute', 'data', 'property'):
# Implicit stop using StopIteration no longer allowed in
# Python 3.7; see PEP 479
res = [] # type: List[str]
try:
res = self._parse_attribute_docstring()
except StopIteration:
pass
self._parsed_lines.extend(res)
return
while self._line_iter.has_next():
if self._is_section_header():
try:
section = self._consume_section_header()
self._is_in_section = True
self._section_indent = self._get_current_indent()
if _directive_regex.match(section):
lines = [section] + self._consume_to_next_section()
else:
lines = self._sections[section.lower()](section)
finally:
self._is_in_section = False
self._section_indent = 0
else:
if not self._parsed_lines:
lines = self._consume_contiguous() + self._consume_empty()
else:
lines = self._consume_to_next_section()
self._parsed_lines.extend(lines)
def _parse_admonition(self, admonition: str, section: str) -> List[str]:
# type (str, str) -> List[str]
lines = self._consume_to_next_section()
return self._format_admonition(admonition, lines)
def _parse_attribute_docstring(self) -> List[str]:
_type, _desc = self._consume_inline_attribute()
lines = self._format_field('', '', _desc)
if _type:
lines.extend(['', ':type: %s' % _type])
return lines
def _parse_attributes_section(self, section: str) -> List[str]:
lines = []
for _name, _type, _desc in self._consume_fields():
if self._config.napoleon_use_ivar:
_name = self._qualify_name(_name, self._obj)
field = ':ivar %s: ' % _name
lines.extend(self._format_block(field, _desc))
if _type:
lines.append(':vartype %s: %s' % (_name, _type))
else:
lines.append('.. attribute:: ' + _name)
if self._opt and 'noindex' in self._opt:
lines.append(' :noindex:')
if _type:
lines.extend(self._indent([':type: %s' % _type], 3))
lines.append('')
fields = self._format_field('', '', _desc)
lines.extend(self._indent(fields, 3))
lines.append('')
if self._config.napoleon_use_ivar:
lines.append('')
return lines
def _parse_examples_section(self, section: str) -> List[str]:
labels = {
'example': _('Example'),
'examples': _('Examples'),
}
use_admonition = self._config.napoleon_use_admonition_for_examples
label = labels.get(section.lower(), section)
return self._parse_generic_section(label, use_admonition)
def _parse_custom_generic_section(self, section: str) -> List[str]:
# for now, no admonition for simple custom sections
return self._parse_generic_section(section, False)
def _parse_usage_section(self, section: str) -> List[str]:
header = ['.. rubric:: Usage:', '']
block = ['.. code-block:: python', '']
lines = self._consume_usage_section()
lines = self._indent(lines, 3)
return header + block + lines + ['']
def _parse_generic_section(self, section: str, use_admonition: bool) -> List[str]:
lines = self._strip_empty(self._consume_to_next_section())
lines = self._dedent(lines)
if use_admonition:
header = '.. admonition:: %s' % section
lines = self._indent(lines, 3)
else:
header = '.. rubric:: %s' % section
if lines:
return [header, ''] + lines + ['']
else:
return [header, '']
def _parse_keyword_arguments_section(self, section: str) -> List[str]:
fields = self._consume_fields()
if self._config.napoleon_use_keyword:
return self._format_docutils_params(
fields,
field_role="keyword",
type_role="kwtype")
else:
return self._format_fields(_('Keyword Arguments'), fields)
def _parse_methods_section(self, section: str) -> List[str]:
lines = [] # type: List[str]
for _name, _type, _desc in self._consume_fields(parse_type=False):
lines.append('.. method:: %s' % _name)
if self._opt and 'noindex' in self._opt:
lines.append(' :noindex:')
if _desc:
lines.extend([''] + self._indent(_desc, 3))
lines.append('')
return lines
def _parse_notes_section(self, section: str) -> List[str]:
use_admonition = self._config.napoleon_use_admonition_for_notes
return self._parse_generic_section(_('Notes'), use_admonition)
def _parse_other_parameters_section(self, section: str) -> List[str]:
return self._format_fields(_('Other Parameters'), self._consume_fields())
def _parse_parameters_section(self, section: str) -> List[str]:
fields = self._consume_fields()
if self._config.napoleon_use_param:
return self._format_docutils_params(fields)
else:
return self._format_fields(_('Parameters'), fields)
def _parse_raises_section(self, section: str) -> List[str]:
fields = self._consume_fields(parse_type=False, prefer_type=True)
lines = [] # type: List[str]
for _name, _type, _desc in fields:
m = self._name_rgx.match(_type)
if m and m.group('name'):
_type = m.group('name')
_type = ' ' + _type if _type else ''
_desc = self._strip_empty(_desc)
_descs = ' ' + '\n '.join(_desc) if any(_desc) else ''
lines.append(':raises%s:%s' % (_type, _descs))
if lines:
lines.append('')
return lines
def _parse_references_section(self, section: str) -> List[str]:
use_admonition = self._config.napoleon_use_admonition_for_references
return self._parse_generic_section(_('References'), use_admonition)
def _parse_returns_section(self, section: str) -> List[str]:
fields = self._consume_returns_section()
multi = len(fields) > 1
if multi:
use_rtype = False
else:
use_rtype = self._config.napoleon_use_rtype
lines = [] # type: List[str]
for _name, _type, _desc in fields:
if use_rtype:
field = self._format_field(_name, '', _desc)
else:
field = self._format_field(_name, _type, _desc)
if multi:
if lines:
lines.extend(self._format_block(' * ', field))
else:
lines.extend(self._format_block(':returns: * ', field))
else:
lines.extend(self._format_block(':returns: ', field))
if _type and use_rtype:
lines.extend([':rtype: %s' % _type, ''])
if lines and lines[-1]:
lines.append('')
return lines
def _parse_see_also_section(self, section: str) -> List[str]:
return self._parse_admonition('seealso', section)
def _parse_warns_section(self, section: str) -> List[str]:
return self._format_fields(_('Warns'), self._consume_fields())
def _parse_yields_section(self, section: str) -> List[str]:
fields = self._consume_returns_section()
return self._format_fields(_('Yields'), fields)
def _partition_field_on_colon(self, line: str) -> Tuple[str, str, str]:
before_colon = []
after_colon = []
colon = ''
found_colon = False
for i, source in enumerate(_xref_or_code_regex.split(line)):
if found_colon:
after_colon.append(source)
else:
m = _single_colon_regex.search(source)
if (i % 2) == 0 and m:
found_colon = True
colon = source[m.start(): m.end()]
before_colon.append(source[:m.start()])
after_colon.append(source[m.end():])
else:
before_colon.append(source)
return ("".join(before_colon).strip(),
colon,
"".join(after_colon).strip())
def _qualify_name(self, attr_name: str, klass: "Type") -> str:
if klass and '.' not in attr_name:
if attr_name.startswith('~'):
attr_name = attr_name[1:]
try:
q = klass.__qualname__
except AttributeError:
q = klass.__name__
return '~%s.%s' % (q, attr_name)
return attr_name
def _strip_empty(self, lines: List[str]) -> List[str]:
if lines:
start = -1
for i, line in enumerate(lines):
if line:
start = i
break
if start == -1:
lines = []
end = -1
for i in reversed(range(len(lines))):
line = lines[i]
if line:
end = i
break
if start > 0 or end + 1 < len(lines):
lines = lines[start:end + 1]
return lines
class NumpyDocstring(GoogleDocstring):
"""Convert NumPy style docstrings to reStructuredText.
Parameters
----------
docstring : :obj:`str` or :obj:`list` of :obj:`str`
The docstring to parse, given either as a string or split into
individual lines.
config: :obj:`sphinx.ext.napoleon.Config` or :obj:`sphinx.config.Config`
The configuration settings to use. If not given, defaults to the
config object on `app`; or if `app` is not given defaults to the
a new :class:`sphinx.ext.napoleon.Config` object.
Other Parameters
----------------
app : :class:`sphinx.application.Sphinx`, optional
Application object representing the Sphinx process.
what : :obj:`str`, optional
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : :obj:`str`, optional
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : :class:`sphinx.ext.autodoc.Options`, optional
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
Example
-------
>>> from sphinx.ext.napoleon import Config
>>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
>>> docstring = '''One line summary.
...
... Extended description.
...
... Parameters
... ----------
... arg1 : int
... Description of `arg1`
... arg2 : str
... Description of `arg2`
... Returns
... -------
... str
... Description of return value.
... '''
>>> print(NumpyDocstring(docstring, config))
One line summary.
<BLANKLINE>
Extended description.
<BLANKLINE>
:param arg1: Description of `arg1`
:type arg1: int
:param arg2: Description of `arg2`
:type arg2: str
<BLANKLINE>
:returns: Description of return value.
:rtype: str
<BLANKLINE>
Methods
-------
__str__()
Return the parsed docstring in reStructuredText format.
Returns
-------
str
UTF-8 encoded version of the docstring.
__unicode__()
Return the parsed docstring in reStructuredText format.
Returns
-------
unicode
Unicode version of the docstring.
lines()
Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
list(str)
The lines of the docstring in a list.
"""
def __init__(self, docstring: Union[str, List[str]], config: SphinxConfig = None,
app: Sphinx = None, what: str = '', name: str = '',
obj: Any = None, options: Any = None) -> None:
self._directive_sections = ['.. index::']
super().__init__(docstring, config, app, what, name, obj, options)
def _consume_field(self, parse_type: bool = True, prefer_type: bool = False
) -> Tuple[str, str, List[str]]:
line = next(self._line_iter)
if parse_type:
_name, _, _type = self._partition_field_on_colon(line)
else:
_name, _type = line, ''
_name, _type = _name.strip(), _type.strip()
_name = self._escape_args_and_kwargs(_name)
if prefer_type and not _type:
_type, _name = _name, _type
indent = self._get_indent(line) + 1
_desc = self._dedent(self._consume_indented_block(indent))
_desc = self.__class__(_desc, self._config).lines()
return _name, _type, _desc
def _consume_returns_section(self) -> List[Tuple[str, str, List[str]]]:
return self._consume_fields(prefer_type=True)
def _consume_section_header(self) -> str:
section = next(self._line_iter)
if not _directive_regex.match(section):
# Consume the header underline
next(self._line_iter)
return section
def _is_section_break(self) -> bool:
line1, line2 = self._line_iter.peek(2)
return (not self._line_iter.has_next() or
self._is_section_header() or
['', ''] == [line1, line2] or
(self._is_in_section and
line1 and
not self._is_indented(line1, self._section_indent)))
def _is_section_header(self) -> bool:
section, underline = self._line_iter.peek(2)
section = section.lower()
if section in self._sections and isinstance(underline, str):
return bool(_numpy_section_regex.match(underline))
elif self._directive_sections:
if _directive_regex.match(section):
for directive_section in self._directive_sections:
if section.startswith(directive_section):
return True
return False
def _parse_see_also_section(self, section: str) -> List[str]:
lines = self._consume_to_next_section()
try:
return self._parse_numpydoc_see_also_section(lines)
except ValueError:
return self._format_admonition('seealso', lines)
def _parse_numpydoc_see_also_section(self, content: List[str]) -> List[str]:
"""
Derived from the NumpyDoc implementation of _parse_see_also.
See Also
--------
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text: str) -> Tuple[str, str]:
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name: str, rest: List[str]) -> None:
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = [] # type: List[str]
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
if not items:
return []
roles = {
'method': 'meth',
'meth': 'meth',
'function': 'func',
'func': 'func',
'class': 'class',
'exception': 'exc',
'exc': 'exc',
'object': 'obj',
'obj': 'obj',
'module': 'mod',
'mod': 'mod',
'data': 'data',
'constant': 'const',
'const': 'const',
'attribute': 'attr',
'attr': 'attr'
}
if self._what is None:
func_role = 'obj'
else:
func_role = roles.get(self._what, '')
lines = [] # type: List[str]
last_had_desc = True
for func, desc, role in items:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
lines += ['']
lines += [link]
else:
lines[-1] += ", %s" % link
if desc:
lines += self._indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
lines += ['']
return self._format_admonition('seealso', lines)
| 36.933398
| 93
| 0.545592
|
acfc9999ff6ab8bab908d85ee5910be1baa6607a
| 2,801
|
py
|
Python
|
evaluate/eval_cylib/test_cython.py
|
mondrasovic/reid_baseline_syncbn
|
3d21a786fb1a0519caaa0572c649f750036689b5
|
[
"MIT"
] | 1
|
2022-01-05T15:42:44.000Z
|
2022-01-05T15:42:44.000Z
|
evaluate/eval_cylib/test_cython.py
|
mondrasovic/reid_baseline_syncbn
|
3d21a786fb1a0519caaa0572c649f750036689b5
|
[
"MIT"
] | null | null | null |
evaluate/eval_cylib/test_cython.py
|
mondrasovic/reid_baseline_syncbn
|
3d21a786fb1a0519caaa0572c649f750036689b5
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import sys
import os.path as osp
import timeit
import numpy as np
sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../..')
from torchreid.eval_metrics import evaluate
"""
Test the speed of cython-based evaluation code. The speed improvements
can be much bigger when using the real reid data, which contains a larger
amount of query and gallery images.
Note: you might encounter the following error:
'AssertionError: Error: all query identities do not appear in gallery'.
This is normal because the inputs are random numbers. Just try again.
"""
print('*** Compare running time ***')
setup = '''
import sys
import os.path as osp
import numpy as np
sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../..')
from torchreid.eval_metrics import evaluate
num_q = 30
num_g = 300
max_rank = 5
distmat = np.random.rand(num_q, num_g) * 20
q_pids = np.random.randint(0, num_q, size=num_q)
g_pids = np.random.randint(0, num_g, size=num_g)
q_camids = np.random.randint(0, 5, size=num_q)
g_camids = np.random.randint(0, 5, size=num_g)
'''
print('=> Using market1501\'s metric')
pytime = timeit.timeit(
'evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False)',
setup=setup,
number=20
)
cytime = timeit.timeit(
'evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True)',
setup=setup,
number=20
)
print('Python time: {} s'.format(pytime))
print('Cython time: {} s'.format(cytime))
print('Cython is {} times faster than python\n'.format(pytime / cytime))
print('=> Using cuhk03\'s metric')
pytime = timeit.timeit(
'evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=False)',
setup=setup,
number=20
)
cytime = timeit.timeit(
'evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=True)',
setup=setup,
number=20
)
print('Python time: {} s'.format(pytime))
print('Cython time: {} s'.format(cytime))
print('Cython is {} times faster than python\n'.format(pytime / cytime))
"""
print("=> Check precision")
num_q = 30
num_g = 300
max_rank = 5
distmat = np.random.rand(num_q, num_g) * 20
q_pids = np.random.randint(0, num_q, size=num_q)
g_pids = np.random.randint(0, num_g, size=num_g)
q_camids = np.random.randint(0, 5, size=num_q)
g_camids = np.random.randint(0, 5, size=num_g)
cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False)
print("Python:\nmAP = {} \ncmc = {}\n".format(mAP, cmc))
cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True)
print("Cython:\nmAP = {} \ncmc = {}\n".format(mAP, cmc))
"""
| 32.952941
| 113
| 0.695466
|
acfc9a0d53fc3165149119995c179bc82c54f412
| 230
|
py
|
Python
|
custom/icds_reports/migrations/0041_added_num_anc_visits_to_agg_awc_montlhy_view.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | 1
|
2020-07-14T13:00:23.000Z
|
2020-07-14T13:00:23.000Z
|
custom/icds_reports/migrations/0041_added_num_anc_visits_to_agg_awc_montlhy_view.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | 1
|
2021-06-02T04:45:16.000Z
|
2021-06-02T04:45:16.000Z
|
custom/icds_reports/migrations/0041_added_num_anc_visits_to_agg_awc_montlhy_view.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 1.11.12 on 2018-04-13 9:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('icds_reports', '0040_add_days_ration_column'),
]
operations = []
| 17.692308
| 56
| 0.682609
|
acfc9a35460003dba3af4da4a8a3deb4e939f172
| 1,667
|
py
|
Python
|
cutde/backend.py
|
tbenthompson/cutde
|
7b263e2ac16de536d23d8e4d3165705e2ec9a31f
|
[
"MIT"
] | 17
|
2018-05-11T01:48:47.000Z
|
2022-03-31T22:41:41.000Z
|
cutde/backend.py
|
tbenthompson/cutde
|
7b263e2ac16de536d23d8e4d3165705e2ec9a31f
|
[
"MIT"
] | 17
|
2021-04-05T20:59:44.000Z
|
2022-03-17T13:21:10.000Z
|
cutde/backend.py
|
tbenthompson/cutde
|
7b263e2ac16de536d23d8e4d3165705e2ec9a31f
|
[
"MIT"
] | 6
|
2021-04-05T19:44:49.000Z
|
2022-03-31T22:42:40.000Z
|
# noqa: F401
import logging
import os
import numpy as np
try:
if os.environ.get("CUTDE_USE_BACKEND", "cuda") != "cuda":
# The CUTDE_USE_BACKEND environment variable overrides the normal
# choice of backend.
# This can be helpful for testing purposes when it might be nice to run
# with OpenCL or C++ even if CUDA is installed.
raise ImportError
from cutde.cuda import ( # noqa: F401
empty,
get,
load_module,
max_block_size,
to,
zeros,
)
which_backend = "cuda"
except ImportError:
try:
if os.environ.get("CUTDE_USE_BACKEND", "opencl") != "opencl":
raise ImportError
from cutde.opencl import ( # noqa: F401
empty,
get,
load_module,
max_block_size,
to,
zeros,
)
which_backend = "opencl"
except ImportError:
from cutde.cpp import ( # noqa: F401
empty,
get,
load_module,
max_block_size,
to,
zeros,
)
which_backend = "cpp"
logger = logging.getLogger(__name__)
logger.debug(f'cutde is using the "{which_backend}" backend')
def np_to_c_type(t):
if t == np.float32:
return "float"
elif t == np.float64:
return "double"
def intervals(length, step_size):
out = []
next_start = 0
next_end = step_size
while next_end < length + step_size:
this_end = min(next_end, length)
out.append((next_start, this_end))
next_start += step_size
next_end += step_size
return out
| 22.527027
| 79
| 0.563287
|
acfc9a948911ac518bb88397752f239309e1cbe4
| 383
|
py
|
Python
|
demo/special_iter.py
|
1987539447/start-python
|
06ee5eb30e7395cd8432e8e33d7209fa855f4ad9
|
[
"Apache-2.0"
] | null | null | null |
demo/special_iter.py
|
1987539447/start-python
|
06ee5eb30e7395cd8432e8e33d7209fa855f4ad9
|
[
"Apache-2.0"
] | null | null | null |
demo/special_iter.py
|
1987539447/start-python
|
06ee5eb30e7395cd8432e8e33d7209fa855f4ad9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# FileName:special_iter.py
# -*- coding: utf-8 -*-
class Fib(object):
def __init__(self):
self.a, self.b = 0, 1
def __iter__(self):
return self
def __next__(self):
self.a, self.b = self.b, self.a + self.b
if self.a > 1000:
raise StopIteration();
return self.a
for n in Fib():
print(n)
| 16.652174
| 48
| 0.545692
|
acfc9b571934953de3d299141d69a8ec55968bf0
| 2,157
|
py
|
Python
|
pipelines/pipeline/var_processing.py
|
Wiredcraft/pipelines
|
2d8db414ada5d39f648ae995a714f572806ab345
|
[
"MIT"
] | 121
|
2016-05-24T03:32:14.000Z
|
2021-12-16T12:12:12.000Z
|
pipelines/pipeline/var_processing.py
|
Wiredcraft/pipelines
|
2d8db414ada5d39f648ae995a714f572806ab345
|
[
"MIT"
] | 83
|
2016-05-06T08:21:34.000Z
|
2022-02-10T12:45:52.000Z
|
pipelines/pipeline/var_processing.py
|
Wiredcraft/pipelines
|
2d8db414ada5d39f648ae995a714f572806ab345
|
[
"MIT"
] | 11
|
2016-05-13T09:44:42.000Z
|
2022-03-02T07:41:54.000Z
|
import logging
from dotmap import DotMap
import jinja2
import yaml
import json
log = logging.getLogger('pipelines')
def substitute_variables(pipeline_context, obj):
if isinstance(pipeline_context, DotMap):
pipeline_context = pipeline_context.toDict()
pipeline_context.update(pipeline_context.get('vars')) # Pull everything from "vars" to root
jinja_env = jinja2.Environment()
jinja_env.filters['to_json'] = json.dumps
def to_yaml(input):
return yaml.dump(input, default_flow_style=False)
jinja_env.filters['to_yaml'] = to_yaml
def replace_vars_func(token):
template = jinja_env.from_string(token)
substituted = template.render(**pipeline_context)
return substituted
return _loop_strings(replace_vars_func, obj)
def _loop_strings(func, obj):
new_obj = obj
if isinstance(obj, basestring):
new_obj = func(obj)
elif isinstance(obj, dict):
new_obj = dict([(_loop_strings(func, k), _loop_strings(func, v)) for k,v in obj.items()])
elif isinstance(obj, list) or isinstance(obj, tuple):
new_obj = [_loop_strings(func, item) for item in obj]
return new_obj
if __name__ == '__main__':
vars = {
'vars': {
'var1': 11,
'var_2': 'var2works',
'var_3': 'var 3 also works',
'nested': {
'n1': 'nestedWorks'
}
}
}
obj = {
'testnorm': '11 22',
'testvar1': '{{var1}}',
'testjson': '{{ nested | to_json }}',
'testyaml': '{{ nested | to_yaml }}',
'testvar2': '--{{ var_2 }}',
'testvar3': '{{ var_3}}jj',
'test:{{var1}}': '{{var1}}',
'testlist': ['norm', '{{var1}}', '{{var_2}}', 'norm2'],
'testdict': {
'{{var1}}': 'vvv',
'd2': '{{var1}}',
'nested': ['nest1', 'nestvar{{var_2}}']
},
'test1': 'nestedTest: {{ nested.n1 }}',
'if test': 'should say ok: {% if var1 %} ok {% else %} TEST FAIL!! {% endif %}'
}
vars = DotMap(vars)
res = substitute_variables(vars, obj)
print json.dumps(res, indent=2)
| 28.012987
| 97
| 0.568382
|
acfc9b57525169a120d26e9e90e6636bff2d349e
| 21,245
|
py
|
Python
|
Emotif/motif_clustering.py
|
YichaoOU/Emotif_Alpha
|
1b98f82fe359ab5f16056b288b6a39694962992c
|
[
"MIT"
] | null | null | null |
Emotif/motif_clustering.py
|
YichaoOU/Emotif_Alpha
|
1b98f82fe359ab5f16056b288b6a39694962992c
|
[
"MIT"
] | null | null | null |
Emotif/motif_clustering.py
|
YichaoOU/Emotif_Alpha
|
1b98f82fe359ab5f16056b288b6a39694962992c
|
[
"MIT"
] | null | null | null |
from __future__ import division
import os
import sys
import argparse
import shutil
import re
from algo import greedy
from algo import ILP
from algo import branch_cut
from algo import required_cover
from utils import *
from utils import Tomtom
from utils import general_utils
def run_rami_welch_clustering(jid, confDict):
pos_seq = confDict['input']['pos_seq']
neg_seq = confDict['input']['neg_seq']
output_folder = jid + '_rami_welch_motif_clustering'
pos_mhit = confDict['input']['pos_mhit']
neg_mhit = confDict['input']['neg_mhit']
allPwmFile = confDict['input']['pwm_file']
fileList = []
#parameters
#max negative coverage like 20% or 10%
maxNeg = confDict['prefilter']['maxnegcov']
maxNeg = maxNeg * 100
#min positive coverage like 20% or 10%
minPos = confDict['prefilter']['minposcov']
minPos = maxNeg * 100
# minPos = 20
#max negative coverage per super motif
maxNegSuper = confDict['rami_welch']['maxnegcov']
maxNegSuper = maxNegSuper * 100
# maxNegSuper = 100
#min positive coverage per super motif
minPosSuper = confDict['rami_welch']['minposcov']
minPosSuper = minPosSuper * 100
# minPosSuper = 0
#tomtom match e-value
tomEValue = confDict['rami_welch']['evalue']
tomStrand = confDict['rami_welch']['strand']
# tomEValue = 0.0005
# tomStrand = '+'
#name of job for pca plot
jobDesc = 'test'
#find number of seqs in the fasta file
foreTotalNumSeqs = general_utils.findNumSeqs(pos_seq)
backTotalNumSeqs = general_utils.findNumSeqs(neg_seq)
print 'num fore seqs:', foreTotalNumSeqs, 'num back seqs:', backTotalNumSeqs
#read the hit files and get a dict of motif names and hits
foreMotifDict = readHitFile(pos_mhit)
backMotifDict = readHitFile(neg_mhit)
#write to a file the coverage of the selected motifs,
allMotifInfoFileName = jid + '_all_motifs_info.csv'
writeMotifInfo(foreMotifDict, backMotifDict, foreTotalNumSeqs, backTotalNumSeqs, allMotifInfoFileName)
fileList.append(allMotifInfoFileName)
#filter the motifs based on their hits and make a new filtered PWM file
filtMotifList = filterMotifs(foreMotifDict, backMotifDict, maxNeg, minPos, foreTotalNumSeqs, backTotalNumSeqs)
print 'len filtmotif list:', len(filtMotifList)
#create a filtered PWM file
filtPwmFileName = jid + '_filt_PWM.pwm'
writeFiltPwmFile(inputPwmFile, filtPwmFileName, filtMotifList)
fileList.append(filtPwmFileName)
#create the motif logos for all the filtered motifs
allMotifLogoFolderName = jid + '_all_logos'
general_utils.makeMotifLogoFromPwm(filtPwmFileName, allMotifLogoFolderName)
fileList.append(allMotifLogoFolderName)
#run tomtom on the list of filtered motifs
tomtomDict = {}
#dict that stores parameters for the tomtom tool
tomParaDict = {}
tomParaDict['evalue'] = tomEValue
#tomtom output directory name
tomtomOutDir = jid + '_Tomtom'
#we are comparing the motifs against each other, so use the same PWM file
Tomtom.callTomtom(filtPwmFileName, filtPwmFileName, tomParaDict, tomtomOutDir)
fileList.append(tomtomOutDir)
#parse the tomtom output file and return a dict between motif Name and list[] of motifs that match it
tomtomDict = Tomtom.parse(tomtomOutDir+'/tomtom.txt')
#2 dimensional dict for e value
tomEvalDict = parseTomEval(tomtomOutDir+'/tomtom.txt', tomStrand)
#write the set of motif clusters info to a file, sort the motifs on fore cove and the top motifs is considered a seed motif
setsFileName = jid + '_motif_sets_1.csv'
setsFile = open(setsFileName, 'wb')
selectList = []
#dict between group ID and motif group object
groupObjDict = {}
groupId = 1
for key, value in sorted(foreMotifDict.iteritems(), key = lambda (k,v):len(v), reverse=True):
motifName = key
#check if motif passed the filtering or not
if motifName not in filtMotifList:
continue
#check if motif already was selected as a first motif or not
if motifName in selectList:
continue
seqList = foreMotifDict[motifName]
#if motifName not in foreMotifDict:
#continue
setsFile.write('\n\nfirst motif:' + motifName + '\n')
matchList = []
selectList.append(motifName)
if motifName in tomtomDict:
for val in tomtomDict[motifName]:
if val not in foreMotifDict:
continue
if val in selectList:
continue
setsFile.write(val + '\n')
matchList.append(val)
selectList.append(val)
groupObjDict[groupId] = MotifGroup(groupId, motifName, matchList)
groupId += 1
setsFile.close()
fileList.append(setsFileName)
print 'num of motif groups:', len(groupObjDict)
#file that stores and comapres the stats
statsFileName = jid + '_group_stats.csv'
fileList.append(statsFileName)
statsFile = open(statsFileName, 'wb')
statsFile.write('Motif,Group_num,clusterSize,reducedCluster,foreground_cov(%),fore_numSeqs,background_cov(%),back_numSeqs\n')
#create the logo group folders for each motif group
#folder list for the group logos
allGroupLogoFolder = jid + '_groups_logos'
os.makedirs(allGroupLogoFolder)
allGroupLogoFileList = []
#create a folder to store the groups hit files and coverage results
allGroupCovFolder = jid + '_groups_coverage'
os.makedirs(allGroupCovFolder)
allGroupCovList = []
#file for motif match e-values
simEvalueFileName = jid + '_similarity_evalues.csv'
fileList.append(simEvalueFileName)
simEvalueFile = open(simEvalueFileName, 'wb')
simEvalueFile.write('seedMotif,similarMotif,E-value\n')
#file for motif hits of the combined cov selected motifs; super motifs
foreSuperMotifHitFileName = jid + '_fore_superMotif_hits'
foreSuperMotifHitFile = open(foreSuperMotifHitFileName, 'wb')
backSuperMotifHitFileName = jid + '_back_superMotif_hits'
backSuperMotifHitFile = open(backSuperMotifHitFileName, 'wb')
fileList.append(foreSuperMotifHitFileName)
fileList.append(backSuperMotifHitFileName)
#loop thru the groups
for groupId in sorted(groupObjDict.iterkeys()):
groupObj = groupObjDict[groupId]
#if len(groupObj.simList) == 0:
#continue
print 'group Id:', groupId,'seedMotif:', groupObj.seedMotif,'num matches:', len(groupObj.simList)
groupLogoFolder = jid + '_group_' + str(groupId)
#create the folder
os.makedirs(groupLogoFolder)
#copy the first motif logo to this folder
try:
#MEME logos changes any - to _ , so replace that character, also any '.' in the name is changed to '_' as well
tmpName = groupObj.seedMotif
if re.search(r'-', tmpName):
tmpName = tmpName.replace('-', '_')
if tmpName.count('.') > 0:
tmpName = tmpName.replace('.', '_')
cmd = 'cp ./' + allMotifLogoFolderName + '/logo' + tmpName + '.png ' + groupLogoFolder
os.system(cmd)
except:
print 'copy logos failed'
exit()
#copy the similar motif logos
for simMotif in groupObj.simList:
tmpName = simMotif
if re.search(r'-', tmpName):
tmpName = tmpName.replace('-', '_')
if tmpName.count('.') > 0:
tmpName = tmpName.replace('.', '_')
try:
cmd = 'cp ' + allMotifLogoFolderName + '/logo' + tmpName + '.png ' + groupLogoFolder
os.system(cmd)
except:
print 'copy similar logos failed'
exit()
allGroupLogoFileList.append(groupLogoFolder)
###write the match e-values
simEvalueFile.write(groupObj.seedMotif + '\n')
for simMotif in groupObj.simList:
eValue = tomEvalDict[groupObj.seedMotif][simMotif]
simEvalueFile.write(',' + simMotif + ',' + str(eValue) + '\n')
####################################################################
#make a hits file per group to run motif selection on it
#create the folder
####################################################################
groupCovFolder = jid + '_group_' + str(groupId) + '_cov'
os.makedirs(groupCovFolder)
covFolderList = []
foreGroupHitFileName = jid + '_group_' + str(groupId) + '_fore_hits'
backGroupHitFileName = jid + '_group_' + str(groupId) + '_back_hits'
writeGroupHits(foreGroupHitFileName, backGroupHitFileName, groupObj.seedMotif, groupObj, foreMotifDict, backMotifDict)
covFolderList.append(foreGroupHitFileName)
covFolderList.append(backGroupHitFileName)
#********************* motif selection on motif similarity groups ********************#
#call the greedy algthm on the group hit files
jobID = jid + '_Greedy_group_' + str(groupId)
groupConfFileName = jid + '_confFile_group_' + str(groupId) + '.conf'
callGreedySelect_per_group(groupObj.seedMotif, groupObj.simList, foreGroupHitFileName, backGroupHitFileName, inputPwmFile, groupConfFileName, motifSelectDefaultConfFile, foreFastaFile, backFastaFile, jobID)
covFolderList.append(groupConfFileName)
covFolderList.append(jobID)
#read the depth results file to get how many motifs produced and their coverage
covForeCumCov, covBackCumCov, greedyMotifList = readGreedyResults(jobID, statsFile, foreGroupHitFileName, groupId)
print 'group ID:', groupId, 'greedy list:', greedyMotifList
#read the greedyMotifList and make a new hit file
#create a new hit file based on these requirements
foreGroupSelectHitFileName = jid + '_pos.mhit'
foreGroupSelectHitFile = open(foreGroupSelectHitFileName, 'wb')
covFolderList.append(foreGroupSelectHitFileName)
backGroupSelectHitFileName = jid + '_neg.mhit'
backGroupSelectHitFile = open(backGroupSelectHitFileName, 'wb')
covFolderList.append(backGroupSelectHitFileName)
makeHitFile(foreGroupSelectHitFile, backGroupSelectHitFile, foreMotifDict, backMotifDict, greedyMotifList)
foreGroupSelectHitFile.close()
backGroupSelectHitFile.close()
#read the group cov selected hit file and add it to the super motif all hit file. This will be the hit file of the super motifs
#check how much was the fore and back cum coverage and add if pass the check
if covForeCumCov >= minPosSuper and covBackCumCov <= maxNegSuper:
writeHitsToSuperMotifFile(foreGroupSelectHitFileName, foreSuperMotifHitFile)
writeHitsToSuperMotifFile(backGroupSelectHitFileName, backSuperMotifHitFile)
#move the group cov files and folders
for outFile in covFolderList:
shutil.move(outFile, groupCovFolder)
allGroupCovList.append(groupCovFolder)
##break outer loop
#break
#close the sim e value file
simEvalueFile.close()
statsFile.close()
#move the group logo folders
for outFile in allGroupLogoFileList:
shutil.move(outFile, allGroupLogoFolder)
fileList.append(allGroupLogoFolder)
#move the cov folders
for outFile in allGroupCovList:
shutil.move(outFile, allGroupCovFolder)
fileList.append(allGroupCovFolder)
foreSuperMotifHitFile.close()
backSuperMotifHitFile.close()
#move files to results folder
for outFile in fileList:
shutil.move(outFile, resultsDirName)
exit()
return output_folder
class MotifGroup:
"""general word (site) class to store info for a specific word
"""
def __init__(self, groupId, seedMotif, simList):
self.groupId = groupId #the ID of this group which is based on the ranking of the seed motif
self.seedMotif = seedMotif #the seed motif in the group which has the highest fore coverage
self.simList = simList #the list of similar motifs
def parseTomEval(inFile, strand):
"""parse the tomtotm.txt file and return a dict of e-values between motifs """
#dict between query and target Ids
matchDict = {}
#read the tomtom.txt file and see if any known are found
with open(inFile, 'rb') as handler:
for line in handler:
line = line.strip()
if re.search(r'Query', line):
continue
#skip if the target is reverse compliment
lineSplit = line.split('\t')
#when making the dict, the targets are usually motif IDs as digits so check them, otherwise they are strings/names
if lineSplit[0].isdigit():
query = int(lineSplit[0])
else:
query = lineSplit[0]
if lineSplit[1].isdigit():
target = int(lineSplit[1])
else:
target = lineSplit[1]
eVal = float(lineSplit[4])
overlap = int(lineSplit[6])
strand = lineSplit[9]
#if strand != '+':
#continue
if query not in matchDict:
matchDict[query] = {}
if target not in matchDict[query] and target != query:
matchDict[query][target]=eVal
#for query in matchDict:
#print 'query:', query
#for target in matchDict[query]:
#print '\ttarget:', target,'eval:', matchDict[query][target]
return matchDict
def readHitFile(hitFile):
"""Read the hit file and make a dict between motifnames and their seqs """
motifDict = {}
motifName = ''
with open(hitFile, 'rb') as handler:
for line in handler:
line = line.strip()
if re.search(r'>', line):
motifName = line[1:]
if motifName not in motifDict:
motifDict[motifName] = []
continue
motifDict[motifName].append(line)
print 'len of motifDict:', len(motifDict)
return motifDict
def writeMotifInfo(foreMotifDict, backMotifDict, totalNumPosSeqs, totalNumNegSeqs, outFileName):
outFile = open(outFileName, 'wb')
outFile.write('Motif,PosCov,PosNumSeqs,NegCov,NegNumSeqs\n')
for motifName in foreMotifDict:
posNumSeqs = len(foreMotifDict[motifName])
posCov = round( 100*(posNumSeqs/totalNumPosSeqs), 1)
if motifName in backMotifDict:
negNumSeqs = len(backMotifDict[motifName])
negCov = round( 100*(negNumSeqs/totalNumNegSeqs), 1)
else:
negNumSeqs = 0
negCov= 0
lineList = [motifName, str(posCov), str(posNumSeqs), str(negCov), str(negNumSeqs)]
lineStr = ','.join(lineList)
outFile.write(lineStr + '\n')
outFile.close()
def filterMotifs(foreMotifDict, backMotifDict, maxNeg, minPos, foreTotalNumSeqs, backTotalNumSeqs):
"""filter the motifs based on fore and back coverage """
filtMotifList = []
for motifName in foreMotifDict:
posNumSeqs = len(foreMotifDict[motifName])
posCov = round( 100*(posNumSeqs/foreTotalNumSeqs), 1)
if motifName in backMotifDict:
negNumSeqs = len(backMotifDict[motifName])
negCov = round( 100*(negNumSeqs/backTotalNumSeqs), 1)
else:
negCov = 0
#check the cov
if posCov >= minPos and negCov < maxNeg:
filtMotifList.append(motifName)
print 'maxNeg:', maxNeg, 'minPos:', minPos, 'num of filtered motifs:', len(filtMotifList)
return filtMotifList
def writeFiltPwmFile(inputPwmFile, filtPwmFileName, filtMotifList):
filtPwmFile = open(filtPwmFileName, 'wb')
filtPwmFile.write('MEME version 4.4\nALPHABET= ACGT\nstrands: + -\nBackground letter frequencies (from web form):\nA 0.25000 C 0.25000 G 0.25000 T 0.25000\n\n')
flag = 0
count = 0
with open(inputPwmFile, 'rb') as handler:
for line in handler:
line = line.strip()
if re.search(r'MOTIF', line):
split = line.split()
motifName = split[1]
if motifName not in filtMotifList:
flag = 0
continue
else:
filtPwmFile.write(line + '\n')
flag = 1
count += 1
continue
if flag == 1:
filtPwmFile.write(line + '\n')
print 'count:', count
filtPwmFile.close()
def writeGroupHits(foreGroupHitFileName, backGroupHitFileName, seedMotif, groupObj, foreMotifDict, backMotifDict):
"""write the motif hits per group """
foreGroupHitFile = open(foreGroupHitFileName, 'wb')
backGroupHitFile = open(backGroupHitFileName, 'wb')
#foreSeqList = []
#backSeqList = []
#write the hits for the seed motif
foreGroupHitFile.write('>' + seedMotif + '\n')
if seedMotif in backMotifDict:
backGroupHitFile.write('>' + seedMotif + '\n')
for seqName in foreMotifDict[seedMotif]:
foreGroupHitFile.write(seqName + '\n')
#foreSeqList.append(seqName)
if seedMotif in backMotifDict:
for seqName in backMotifDict[seedMotif]:
backGroupHitFile.write(seqName + '\n')
#backSeqList.append(seqName)
#write the hits for the similar motifs
for simMotif in groupObj.simList:
foreGroupHitFile.write('>' + simMotif + '\n')
for seqName in foreMotifDict[simMotif]:
#if seqName not in foreSeqList:
foreGroupHitFile.write(seqName + '\n')
#foreSeqList.append(seqName)
if simMotif in backMotifDict:
backGroupHitFile.write('>' + simMotif + '\n')
for seqName in backMotifDict[simMotif]:
#if seqName not in backSeqList:
backGroupHitFile.write(seqName + '\n')
#backSeqList.append(seqName)
foreGroupHitFile.close()
backGroupHitFile.close()
def writeGreedySelectConfFile(foreHitFileName, backHitFileName, pwmFileName, groupConfFileName, defaultConfFileName, foreFastaFile, backFastaFile):
"""
Read the conf file and find which elements to change
"""
outFile = open(groupConfFileName, 'wb')
with open(defaultConfFileName, 'rb') as handler:
for line in handler:
line = line.strip()
if re.search(r'fore_testing_file=', line):
outFile.write('fore_testing_file=' + foreFastaFile + '\n')
continue
if re.search(r'back_testing_file=', line):
outFile.write('back_testing_file=' + backFastaFile + '\n')
continue
if re.search(r'pwm_file=', line):
outFile.write('pwm_file=' + pwmFileName + '\n')
continue
if re.search(r'foremotifhits_file=', line):
outFile.write('foremotifhits_file=' + foreHitFileName + '\n')
continue
if re.search(r'backmotifhits_file=', line):
outFile.write('backmotifhits_file=' + backHitFileName + '\n')
continue
outFile.write(line + '\n')
outFile.close()
def callGreedySelect_per_group(seedMotif, simList, foreGroupHitFileName, backGroupHitFileName, pwmFileName, groupConfFileName, defaultConfFileName, foreFastaFile, backFastaFile, jobID):
"""
seedMotif: the seed motif of the group
simList: list of motifs similar to the seed motif
"""
#make the conf file
writeGreedySelectConfFile(foreGroupHitFileName, backGroupHitFileName, pwmFileName, groupConfFileName, defaultConfFileName, foreFastaFile, backFastaFile)
flag = 1
cmd = 'python /home/rami/Documents/seq_cov/main.py -jid ' + jobID + ' -confFile ' + groupConfFileName
print 'CMD:', cmd
os.system(cmd)
def readGreedyResults(jobID, statsFile, groupForeFileName, groupCount):
""" read the depth results file to get how many motifs produced and their coverage """
#find how many motifs in the combined list
totalNumMotifs = 0
with open(groupForeFileName, 'rb') as handler:
for line in handler:
if re.search(r'>', line):
totalNumMotifs += 1
motifNameList = []
cumForeNumSeqs = 0
cumBackNumSeqs = 0
for fileName in os.listdir(jobID):
if re.search(r'_depth_results', fileName):
covLineCount = 0
flag = 1
with open(jobID + '/' + fileName, 'rb') as handler:
for line in handler:
if not line.strip() or re.search(r'foreground', line) or re.search(r'Depth', line):
continue
line = line.strip()
covLineCount += 1
split = line.split(',')
motifNameList.append(split[0])
cumForeNumSeqs += int(split[3])
cumBackNumSeqs += int(split[7])
#to get the seed motif name
if flag == 1:
mName = split[0]
flag=0
#process the last line
covForeCumCov = float(split[4])
covBackCumCov = float(split[8])
#print 'cov numMotifs:', covLineCount, 'cumCov:', covForeCumCov, 'backCov:', covBackCumCov
#print info to stats file
statsFile.write(mName + ',' + str(groupCount) + ',' + str(totalNumMotifs) + ',' + str(covLineCount) + ',' + str(covForeCumCov) + ',' + str(cumForeNumSeqs) + ','
+ str(covBackCumCov) + ',' + str(cumBackNumSeqs) +'\n')
return covForeCumCov, covBackCumCov, motifNameList
def makeHitFile(foreFinalHitFile, backFinalHitFile, foreMotifDict, backMotifDict, greedyMotifList):
""" create a new hit file based on the reduced combined list of motifs """
count = 0
foreSeqList = []
backSeqList = []
#the motifs already sorted on fore cov
for motifName in greedyMotifList:
if count == 0:
foreFinalHitFile.write('>' + motifName + '\n')
if motifName in backMotifDict:
backFinalHitFile.write('>' + motifName + '\n')
count += 1
for seqName in foreMotifDict[motifName]:
if seqName not in foreSeqList:
foreFinalHitFile.write(seqName + '\n')
foreSeqList.append(seqName)
if motifName in backMotifDict:
for seqName in backMotifDict[motifName]:
if seqName not in backSeqList:
backFinalHitFile.write(seqName + '\n')
backSeqList.append(seqName)
def writeHitsToSuperMotifFile(inHitFile, foreSuperMotifHitFile):
with open(inHitFile, 'rb') as handler:
for line in handler:
foreSuperMotifHitFile.write(line)
def callGreedySelect_super(foreSuperMotifHitFileName, backSuperMotifHitFileName, inputPwmFile, superConfFileName, defaultConfFileName, foreFastaFile, backFastaFile, jobID):
#make the conf file
writeGreedySelectConfFile(foreSuperMotifHitFileName, backSuperMotifHitFileName, inputPwmFile, superConfFileName, defaultConfFileName, foreFastaFile, backFastaFile)
cmd = 'python /home/rami/Documents/seq_cov/main.py -jid ' + jobID + ' -confFile ' + superConfFileName
print 'CMD:', cmd
os.system(cmd)
| 35.765993
| 209
| 0.706802
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.