max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
setup.py | inmagik/django-jsoneditor | 2 | 6613551 | <gh_stars>1-10
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
setup(
name='django-jsoneditor',
version='0.0.1',
url='https://github.com/inmagik/django-jsoneditor',
install_requires=[
'Django >=1.8',
],
description="JSON editor fields and widgets",
long_description=README,
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
packages=['jsoneditor'],
#package_dir={'jsoneditor': 'jsoneditor'},
include_package_data = True, # include everything in source control
#package_data={'jsoneditor': ['*.py','contrib/*.py','tests/*.py','tests/templates/*.html']},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Programming Language :: Python']
)
| import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
setup(
name='django-jsoneditor',
version='0.0.1',
url='https://github.com/inmagik/django-jsoneditor',
install_requires=[
'Django >=1.8',
],
description="JSON editor fields and widgets",
long_description=README,
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
packages=['jsoneditor'],
#package_dir={'jsoneditor': 'jsoneditor'},
include_package_data = True, # include everything in source control
#package_data={'jsoneditor': ['*.py','contrib/*.py','tests/*.py','tests/templates/*.html']},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Programming Language :: Python']
) | en | 0.511293 | #package_dir={'jsoneditor': 'jsoneditor'}, # include everything in source control #package_data={'jsoneditor': ['*.py','contrib/*.py','tests/*.py','tests/templates/*.html']}, | 1.312126 | 1 |
emloop/hooks/accumulate_variables.py | iterait/cxflow | 3 | 6613552 | """
Module with batch data accumulating hook.
"""
import typing
from collections import defaultdict
from . import AbstractHook
from ..types import Batch
class AccumulateVariables(AbstractHook):
"""
Accumulate the specified variables allowing their aggregation after each epoch.
The hook itself does not utilize the accumulated variables. It is meant to be inherited from. The child hook
will have the accumulated variables available in ``self._accumulator`` after each epoch.
The data are accumulated in a form of nested mapping
``stream_name`` -> ``variable_name`` -> ``Iterable``[``values``].
.. warning::
This hook should not be used directly as it does nothing on its own.
"""
def __init__(self, variables: typing.Iterable[str], **kwargs):
"""
Create new AccumulateVariables hook.
:param variables: collection of variable names to be logged
"""
super().__init__(**kwargs)
self._variables = variables
self._accumulator = None
self._reset_accumulator()
def _reset_accumulator(self):
"""Set the accumulator to an empty double-index :py:class:`collections.defaultdict`."""
self._accumulator = defaultdict(lambda: defaultdict(list))
def after_batch(self, stream_name: str, batch_data: Batch):
"""
Extend the accumulated variables with the given batch data.
:param stream_name: stream name; e.g. ``train`` or any other...
:param batch_data: batch data = stream sources + model outputs
:raise KeyError: if the variables to be aggregated are missing
:raise TypeError: if the variable value is not iterable (e.g. it is only a scalar)
"""
for variable in self._variables:
if variable in batch_data:
value = batch_data[variable]
if not hasattr(value, '__iter__'):
raise TypeError('Variable `{}` to be accumulated is not iterable.'.format(variable))
self._accumulator[stream_name][variable] += list(value)
else:
raise KeyError('Variable `{}` to be accumulated was not found in the batch data. '
'Available variables are `{}`.'.format(variable, batch_data.keys()))
def after_epoch(self, **_):
"""Reset the accumulator after each epoch."""
self._reset_accumulator()
| """
Module with batch data accumulating hook.
"""
import typing
from collections import defaultdict
from . import AbstractHook
from ..types import Batch
class AccumulateVariables(AbstractHook):
"""
Accumulate the specified variables allowing their aggregation after each epoch.
The hook itself does not utilize the accumulated variables. It is meant to be inherited from. The child hook
will have the accumulated variables available in ``self._accumulator`` after each epoch.
The data are accumulated in a form of nested mapping
``stream_name`` -> ``variable_name`` -> ``Iterable``[``values``].
.. warning::
This hook should not be used directly as it does nothing on its own.
"""
def __init__(self, variables: typing.Iterable[str], **kwargs):
"""
Create new AccumulateVariables hook.
:param variables: collection of variable names to be logged
"""
super().__init__(**kwargs)
self._variables = variables
self._accumulator = None
self._reset_accumulator()
def _reset_accumulator(self):
"""Set the accumulator to an empty double-index :py:class:`collections.defaultdict`."""
self._accumulator = defaultdict(lambda: defaultdict(list))
def after_batch(self, stream_name: str, batch_data: Batch):
"""
Extend the accumulated variables with the given batch data.
:param stream_name: stream name; e.g. ``train`` or any other...
:param batch_data: batch data = stream sources + model outputs
:raise KeyError: if the variables to be aggregated are missing
:raise TypeError: if the variable value is not iterable (e.g. it is only a scalar)
"""
for variable in self._variables:
if variable in batch_data:
value = batch_data[variable]
if not hasattr(value, '__iter__'):
raise TypeError('Variable `{}` to be accumulated is not iterable.'.format(variable))
self._accumulator[stream_name][variable] += list(value)
else:
raise KeyError('Variable `{}` to be accumulated was not found in the batch data. '
'Available variables are `{}`.'.format(variable, batch_data.keys()))
def after_epoch(self, **_):
"""Reset the accumulator after each epoch."""
self._reset_accumulator()
| en | 0.784459 | Module with batch data accumulating hook. Accumulate the specified variables allowing their aggregation after each epoch. The hook itself does not utilize the accumulated variables. It is meant to be inherited from. The child hook will have the accumulated variables available in ``self._accumulator`` after each epoch. The data are accumulated in a form of nested mapping ``stream_name`` -> ``variable_name`` -> ``Iterable``[``values``]. .. warning:: This hook should not be used directly as it does nothing on its own. Create new AccumulateVariables hook. :param variables: collection of variable names to be logged Set the accumulator to an empty double-index :py:class:`collections.defaultdict`. Extend the accumulated variables with the given batch data. :param stream_name: stream name; e.g. ``train`` or any other... :param batch_data: batch data = stream sources + model outputs :raise KeyError: if the variables to be aggregated are missing :raise TypeError: if the variable value is not iterable (e.g. it is only a scalar) Reset the accumulator after each epoch. | 3.194823 | 3 |
_old/server/oas/apps.py | chris-ch/myledger-online-bookkeeping | 0 | 6613553 | <gh_stars>0
from django.apps import AppConfig
class OasConfig(AppConfig):
name = 'oas'
| from django.apps import AppConfig
class OasConfig(AppConfig):
name = 'oas' | none | 1 | 1.303073 | 1 | |
main.py | Harnoorsingh5/blood_cell_recoginition | 1 | 6613554 | <gh_stars>1-10
"""
Code Flow
1 -> Main method is called, main()
2 -> Inside main method, data object are initialised. Head to Data Constructor(Data.py) to know more(Just Constructor)
3 -> A flag is used to test or train
4 -> In Training, A checkpoint is created to save the progress of the model
5 -> Then the model is defined using height, width of image of dimension height * weight * 3, 3 -> RGB
6 -> Note we are using 20_4 model so just head to this model
7 -> First Feature learning is done using consecutive steps of Conv2d, Batch Normalization and Dropout
8 -> After Feature Learning Classification is done and layers are added like input, hidden and output
9 -> Then the Loss Function is Defined RMSprop and cross entropy
10 -> Now the model is trained using fit_generator method, batch by batch
11 -> In testing all the results are evaluated using evaluate_generator method, gives out a measure of performance (accuracy)
"""
import os
from datetime import datetime
import numpy as np
import tensorflow as tf
my_random_seed = 1337
np.random.seed(my_random_seed)
tf.random.set_seed(my_random_seed)
# tf.set_random_seed(my_random_seed)
# Intentsionally added step to avoid tensorflow Error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.models import Model, load_model
from keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation
from keras.optimizers import Adam, Adadelta, Adagrad, RMSprop
from keras.callbacks import ModelCheckpoint
from keras import regularizers
from data import Data
# this_path = os.path.dirname(os.path.abspath(__file__))
this_path = os.path.abspath('')
def get_model(out_ht, out_wd, model_id):
inputs = Input(shape=(out_ht, out_wd, 3))
# Input is used to instantiate a Keras tensor. A Keras tensor is a tensor object from the underlying
# backend (TensorFlow in out case), which we augment with certain attributes that allow us to build a
# Keras model just by knowing the inputs and outputs of the model.
# shape => height/2 , width/2, 3 Here 3 -> RGB
# Note -> Since we are using 20_4 model for use, directly head to case where model_id = 20_4, line_no: 221
if model_id == '0':
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(inputs)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
x = Dense(128, activation='relu')(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '1':
# Ran for 100 epochs: Shows overfitting. best validation accuracy: 78%
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(inputs)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dense(8, activation='relu')(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '2_0':
# L2 regularization
# It does slow down the overfitting but validation accuracy gets stuck at ~60%
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu', kernel_regularizer=regularizers.l2())(inputs)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu', kernel_regularizer=regularizers.l2())(x)
x = Flatten()(x)
x = Dense(16, activation='relu', kernel_regularizer=regularizers.l2())(x)
x = Dense(8, activation='relu', kernel_regularizer=regularizers.l2())(x)
x = Dense(4, activation='softmax', kernel_regularizer=regularizers.l2())(x)
elif model_id == '2_1':
# L1 regularization
# Accuracy of training and validation got stuck at 25%
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu', kernel_regularizer=regularizers.l1())(inputs)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu', kernel_regularizer=regularizers.l1())(x)
x = Flatten()(x)
x = Dense(16, activation='relu', kernel_regularizer=regularizers.l1())(x)
x = Dense(8, activation='relu', kernel_regularizer=regularizers.l1())(x)
x = Dense(4, activation='softmax', kernel_regularizer=regularizers.l1())(x)
elif model_id == '3_0':
# Have dropout
# No overfitting. training loss was still decreasing. train acc: 70%, val_acc: 75%
# Need more epochs
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(inputs)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '3_1':
# Batch normalization
# Could not prevent from overfitting. Train acc: 93% val acc 70%
x = Conv2D(4, 5, strides=(4, 4), padding='same')(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(4, 5, strides=(4, 4), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Flatten()(x)
x = Dense(16)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(8)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '3_2':
# Batch normalization + Dropout
# Faster convergence. Has overvitting. train acc 82% val acc 66%
x = Conv2D(4, 5, strides=(4, 4), padding='same')(inputs)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16)(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(8)(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '10_0':
# 3_0 with more epochs
# No overfitting. train acc: 70%, val_acc: 75%
# It gets hard to get more gains beyond it
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(inputs)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
# x = Dense(8, activation='relu')(x)
# x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '20_0':
# Reducing the stride on conv layers
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
# x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
# x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
# x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
# x = Dropout(0.2)(x)
x = Dense(8, activation='relu')(x)
# x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '20_1':
# 20_0 with dropout
# Achieves 88% val accuracy in ~100 epochs
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '20_2':
# Increase model complexity with Dropout
# 88% val_acc in 80 epochs
# 95% val_acc in 200 epochs
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = Dropout(0.2)(x)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(32, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '20_3':
# Reduce the kernel size from 5 to 3
# val acc is lower than with kernel 5
x = Conv2D(4, 3, strides=(2, 2), padding='same', activation='relu')(inputs)
x = Dropout(0.2)(x)
x = Conv2D(4, 3, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 3, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
# --------------------------------------
# Just Focus Here
# --------------------------------------
elif model_id == '20_4':
# 20_2 with BatchNorm for faster convergence
# Gives 97% accuracy. Model saved as model_20_4_e1000.h5
# In Conv2d -> 2D Convolution Layer, This layer creates a convolution kernel that is
# convolved with the layer input to produce a tensor of outputs
# 1st Argument, Filters -> The number of output channels i.e. 16
# 2nd Argument, Kernel Size -> 5, always keep it odd for better performance
# 3rd Argument, Strides -> (2, 2), Look into doc for better understanding
# 4th Argument, Padding -> Same, Look into doc for better understanding
# 5th Argument, Activation -> Relu activation function, Rectified Linear Unit
# Batch normalization layer -> Normalize the activations of the previous layer at each batch
# applies a transformation that maintains the mean activation close to 0 and
# the activation standard deviation close to 1. Detailed explaination in Google Doc
# Dropout is a technique used to prevent a model from overfitting.
# --------------------- Feature Learning Starts ---------------------------
# Input Layer
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
# I Think this is by mistake written twice by the author repeated twice!
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
# --------------------- Feature Learning Ends ---------------------------
# --------------------- Classification Starts ---------------------------
# Pooling
x = Flatten()(x)
# In our neural network, we are using 3 Hidden layers of 32, 16 and 8 dimension.
# The Dense is used to specify the fully connected layer.
# The arguments of Dense are output dimension which are 32
# First Hidden Layer
x = Dense(32, activation='relu')(x)
x = Dropout(0.2)(x)
# Second Hidden Layer
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
# Third Hidden Layer
x = Dense(8, activation='relu')(x)
x = Dropout(0.2)(x)
# Output Layer
# The output Layer for the case of multiclass classification takes softmax as activation function.
x = Dense(4, activation='softmax')(x)
# --------------------- Classification Ends ---------------------------
elif model_id == '100_0':
# A low capacity model
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(inputs)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '100_1':
# A low capacity model with dropout to show that capacity isn't enough
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(inputs)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '100_2':
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Flatten()(x)
x = Dense(32, activation='relu')(x)
x = Dense(16, activation='relu')(x)
x = Dense(8, activation='relu')(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '100_3':
# 100_2 with Dropout
pass # Same as 20_2
elif model_id == '100_4':
# 100_3 with BatchNormaliation
# 20_2 with BatchNorm for faster convergence
# Gives 97% accuracy. Model saved as model_20_4_e1000.h5
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(32, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id in ('100_5_0', '100_5_1', '100_5_2'):
# Effect of dropout amount
if model_id == '100_5_0':
dropout = 0.1
elif model_id == '100_5_1':
dropout = 0.2
elif model_id == '100_5_2':
dropout = 0.3
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Flatten()(x)
x = Dense(32, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(16, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(4, activation='softmax')(x)
elif model_id in ('100_6_0', '100_6_1', '100_6_2', '100_6_3'):
dropout = 0.2
# Effect of optimizers
if model_id == '100_6_0':
opt = Adam()
elif model_id == '100_6_1':
opt = Adadelta()
elif model_id == '100_6_2':
opt = Adagrad()
elif model_id == '100_6_3':
opt = RMSprop()
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Flatten()(x)
x = Dense(32, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(16, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(4, activation='softmax')(x)
outputs = x
m = Model(inputs=inputs, outputs=outputs)
print(m.summary())
m.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return m
elif model_id in ('100_7_0', '100_7_1', '100_7_2'):
# Effect of activation function
dropout = 0.2
if model_id == '100_7_0':
act_fn = 'sigmoid'
elif model_id == '100_7_1':
act_fn = 'tanh'
elif model_id == '100_7_2':
act_fn = 'relu'
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation=act_fn)(inputs)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Flatten()(x)
x = Dense(32, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(16, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(8, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(4, activation='softmax')(x)
elif model_id in ('100_8_0', '100_8_1'):
# Effect of Conv filter size
dropout = 0.2
act_fn = 'relu'
if model_id == '100_8_0':
filter_size = 3 # 3x3
elif model_id == '100_8_1':
filter_size = 5 # 5x5
x = Conv2D(16, filter_size, strides=(2, 2), padding='same', activation=act_fn)(inputs)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(8, filter_size, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, filter_size, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, filter_size, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Flatten()(x)
x = Dense(32, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(16, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(8, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '100_9_0':
# This could be the best model based on hyperparameters experimentation
# Nope: overfits slightly faster than validation loss
dropout = 0.1
act_fn = 'tanh'
filter_size = 5
opt = Adam()
x = Conv2D(16, filter_size, strides=(2, 2), padding='same', activation=act_fn)(inputs)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(8, filter_size, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, filter_size, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, filter_size, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Flatten()(x)
x = Dense(32, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(16, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(8, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(4, activation='softmax')(x)
outputs = x
m = Model(inputs=inputs, outputs=outputs)
print(m.summary())
m.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return m
outputs = x
m = Model(inputs=inputs, outputs=outputs)
print(m.summary())
# RMS Prop is an optimizer (Root Mean Square).
# Optimizers are algorithms or methods used to change the attributes
# of your neural network such as weights and learning rate in order to reduce the losses
opt = RMSprop()
# Categorical_crossentropy -> specifies that we have multiple classes
# Metrics -> used to specify the way we want to judge the performance of our neural network, via accuracy in out case
m.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return m
def main():
batch_size = 128
# epochs = 1000
epochs = 10
# model_list = ['100_4']
model_list = ['20_4']
create_stat_image = False
resource_dir = os.path.join(this_path, 'resources')
os.makedirs(resource_dir, exist_ok=True)
try:
data = Data(batch_size)
except Data.DataInitError as e:
print('Failed to initialize Data instance.\n{:s}'.format(str(e)))
return
trainFlag = True
# if 0: # Training
if trainFlag == True:
for model_id in model_list: # Training
# model_path -> Output File for Training
model_path = os.path.join(resource_dir, model_id + '_model.h5')
# Save The Check point of the Model, It is an approach where a snapshot of the state of the
# system is taken in case of system failure
cb_save = ModelCheckpoint(model_path, monitor='val_loss', verbose=0, save_best_only=True)
m = get_model(data.out_ht, data.out_wd, model_id)
# ----------------------- Training the Model ---------------------
# fit_generator -> Trains the model on data generated batch-by-batch by a Python generator
# 1st argument -> generator, Training for now since we are training
# 2nd argument -> steps_per_epoch, It should typically be equal to ceil(num_samples / batch_size)
# 3rd argument -> validation_data
# 4th argument -> validation_steps, No of steps to yield from validation data before stopping at the end of every epoch
# 5th argument -> callbacks, Passing current saved weight
# print('############### Training Model ID: {:s} #####################'.format(model_id))
m.fit_generator(data.get_batch('TRAIN'),
steps_per_epoch=data.steps_per_epoch,
epochs=epochs,
validation_data=data.get_batch('VALIDATION'),
validation_steps=data.validation_steps,
shuffle=False,
callbacks=[cb_save])
# if 1: # Testing
if trainFlag == False:
# model_path = os.path.join(resource_dir, '20_2_model_e1000.h5')
# model_path = os.path.join(resource_dir, 'model_20_4_e1000.h5')
print("Inside Testing ^_^")
# ----------------------- Testing the Model ----------------------
model_path = os.path.join(resource_dir, '20_4_model.h5')
m = load_model(model_path)
# evaluate_generator -> uses both your test input and output.
# It first predicts output using training input and then evaluates performance by comparing it
# against your test output. So it gives out a measure of performance, i.e. accuracy in your case
eval_out = m.evaluate_generator(data.get_batch('TRAIN'),
steps=data.test_steps)
print('Train error: ', eval_out)
eval_out = m.evaluate_generator(data.get_batch('VALIDATION'),
steps=data.test_steps)
print('Validation error: ', eval_out)
eval_out = m.evaluate_generator(data.get_batch('TEST'),
steps=data.test_steps)
print('Test error: ', eval_out)
if __name__ == '__main__':
main()
| """
Code Flow
1 -> Main method is called, main()
2 -> Inside main method, data object are initialised. Head to Data Constructor(Data.py) to know more(Just Constructor)
3 -> A flag is used to test or train
4 -> In Training, A checkpoint is created to save the progress of the model
5 -> Then the model is defined using height, width of image of dimension height * weight * 3, 3 -> RGB
6 -> Note we are using 20_4 model so just head to this model
7 -> First Feature learning is done using consecutive steps of Conv2d, Batch Normalization and Dropout
8 -> After Feature Learning Classification is done and layers are added like input, hidden and output
9 -> Then the Loss Function is Defined RMSprop and cross entropy
10 -> Now the model is trained using fit_generator method, batch by batch
11 -> In testing all the results are evaluated using evaluate_generator method, gives out a measure of performance (accuracy)
"""
import os
from datetime import datetime
import numpy as np
import tensorflow as tf
my_random_seed = 1337
np.random.seed(my_random_seed)
tf.random.set_seed(my_random_seed)
# tf.set_random_seed(my_random_seed)
# Intentsionally added step to avoid tensorflow Error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.models import Model, load_model
from keras.layers import Input, Conv2D, Dense, Flatten, Dropout, BatchNormalization, Activation
from keras.optimizers import Adam, Adadelta, Adagrad, RMSprop
from keras.callbacks import ModelCheckpoint
from keras import regularizers
from data import Data
# this_path = os.path.dirname(os.path.abspath(__file__))
this_path = os.path.abspath('')
def get_model(out_ht, out_wd, model_id):
inputs = Input(shape=(out_ht, out_wd, 3))
# Input is used to instantiate a Keras tensor. A Keras tensor is a tensor object from the underlying
# backend (TensorFlow in out case), which we augment with certain attributes that allow us to build a
# Keras model just by knowing the inputs and outputs of the model.
# shape => height/2 , width/2, 3 Here 3 -> RGB
# Note -> Since we are using 20_4 model for use, directly head to case where model_id = 20_4, line_no: 221
if model_id == '0':
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(inputs)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
x = Dense(128, activation='relu')(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '1':
# Ran for 100 epochs: Shows overfitting. best validation accuracy: 78%
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(inputs)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dense(8, activation='relu')(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '2_0':
# L2 regularization
# It does slow down the overfitting but validation accuracy gets stuck at ~60%
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu', kernel_regularizer=regularizers.l2())(inputs)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu', kernel_regularizer=regularizers.l2())(x)
x = Flatten()(x)
x = Dense(16, activation='relu', kernel_regularizer=regularizers.l2())(x)
x = Dense(8, activation='relu', kernel_regularizer=regularizers.l2())(x)
x = Dense(4, activation='softmax', kernel_regularizer=regularizers.l2())(x)
elif model_id == '2_1':
# L1 regularization
# Accuracy of training and validation got stuck at 25%
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu', kernel_regularizer=regularizers.l1())(inputs)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu', kernel_regularizer=regularizers.l1())(x)
x = Flatten()(x)
x = Dense(16, activation='relu', kernel_regularizer=regularizers.l1())(x)
x = Dense(8, activation='relu', kernel_regularizer=regularizers.l1())(x)
x = Dense(4, activation='softmax', kernel_regularizer=regularizers.l1())(x)
elif model_id == '3_0':
# Have dropout
# No overfitting. training loss was still decreasing. train acc: 70%, val_acc: 75%
# Need more epochs
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(inputs)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '3_1':
# Batch normalization
# Could not prevent from overfitting. Train acc: 93% val acc 70%
x = Conv2D(4, 5, strides=(4, 4), padding='same')(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(4, 5, strides=(4, 4), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Flatten()(x)
x = Dense(16)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(8)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '3_2':
# Batch normalization + Dropout
# Faster convergence. Has overvitting. train acc 82% val acc 66%
x = Conv2D(4, 5, strides=(4, 4), padding='same')(inputs)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16)(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(8)(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '10_0':
# 3_0 with more epochs
# No overfitting. train acc: 70%, val_acc: 75%
# It gets hard to get more gains beyond it
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(inputs)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
# x = Dense(8, activation='relu')(x)
# x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '20_0':
# Reducing the stride on conv layers
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
# x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
# x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
# x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
# x = Dropout(0.2)(x)
x = Dense(8, activation='relu')(x)
# x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '20_1':
# 20_0 with dropout
# Achieves 88% val accuracy in ~100 epochs
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '20_2':
# Increase model complexity with Dropout
# 88% val_acc in 80 epochs
# 95% val_acc in 200 epochs
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = Dropout(0.2)(x)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(32, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '20_3':
# Reduce the kernel size from 5 to 3
# val acc is lower than with kernel 5
x = Conv2D(4, 3, strides=(2, 2), padding='same', activation='relu')(inputs)
x = Dropout(0.2)(x)
x = Conv2D(4, 3, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 3, strides=(2, 2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
# --------------------------------------
# Just Focus Here
# --------------------------------------
elif model_id == '20_4':
# 20_2 with BatchNorm for faster convergence
# Gives 97% accuracy. Model saved as model_20_4_e1000.h5
# In Conv2d -> 2D Convolution Layer, This layer creates a convolution kernel that is
# convolved with the layer input to produce a tensor of outputs
# 1st Argument, Filters -> The number of output channels i.e. 16
# 2nd Argument, Kernel Size -> 5, always keep it odd for better performance
# 3rd Argument, Strides -> (2, 2), Look into doc for better understanding
# 4th Argument, Padding -> Same, Look into doc for better understanding
# 5th Argument, Activation -> Relu activation function, Rectified Linear Unit
# Batch normalization layer -> Normalize the activations of the previous layer at each batch
# applies a transformation that maintains the mean activation close to 0 and
# the activation standard deviation close to 1. Detailed explaination in Google Doc
# Dropout is a technique used to prevent a model from overfitting.
# --------------------- Feature Learning Starts ---------------------------
# Input Layer
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
# I Think this is by mistake written twice by the author repeated twice!
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
# --------------------- Feature Learning Ends ---------------------------
# --------------------- Classification Starts ---------------------------
# Pooling
x = Flatten()(x)
# In our neural network, we are using 3 Hidden layers of 32, 16 and 8 dimension.
# The Dense is used to specify the fully connected layer.
# The arguments of Dense are output dimension which are 32
# First Hidden Layer
x = Dense(32, activation='relu')(x)
x = Dropout(0.2)(x)
# Second Hidden Layer
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
# Third Hidden Layer
x = Dense(8, activation='relu')(x)
x = Dropout(0.2)(x)
# Output Layer
# The output Layer for the case of multiclass classification takes softmax as activation function.
x = Dense(4, activation='softmax')(x)
# --------------------- Classification Ends ---------------------------
elif model_id == '100_0':
# A low capacity model
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(inputs)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '100_1':
# A low capacity model with dropout to show that capacity isn't enough
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(inputs)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(4, 4), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '100_2':
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = Flatten()(x)
x = Dense(32, activation='relu')(x)
x = Dense(16, activation='relu')(x)
x = Dense(8, activation='relu')(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '100_3':
# 100_2 with Dropout
pass # Same as 20_2
elif model_id == '100_4':
# 100_3 with BatchNormaliation
# 20_2 with BatchNorm for faster convergence
# Gives 97% accuracy. Model saved as model_20_4_e1000.h5
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(32, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(4, activation='softmax')(x)
elif model_id in ('100_5_0', '100_5_1', '100_5_2'):
# Effect of dropout amount
if model_id == '100_5_0':
dropout = 0.1
elif model_id == '100_5_1':
dropout = 0.2
elif model_id == '100_5_2':
dropout = 0.3
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Flatten()(x)
x = Dense(32, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(16, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(4, activation='softmax')(x)
elif model_id in ('100_6_0', '100_6_1', '100_6_2', '100_6_3'):
dropout = 0.2
# Effect of optimizers
if model_id == '100_6_0':
opt = Adam()
elif model_id == '100_6_1':
opt = Adadelta()
elif model_id == '100_6_2':
opt = Adagrad()
elif model_id == '100_6_3':
opt = RMSprop()
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation='relu')(inputs)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Flatten()(x)
x = Dense(32, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(16, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(8, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(4, activation='softmax')(x)
outputs = x
m = Model(inputs=inputs, outputs=outputs)
print(m.summary())
m.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return m
elif model_id in ('100_7_0', '100_7_1', '100_7_2'):
# Effect of activation function
dropout = 0.2
if model_id == '100_7_0':
act_fn = 'sigmoid'
elif model_id == '100_7_1':
act_fn = 'tanh'
elif model_id == '100_7_2':
act_fn = 'relu'
x = Conv2D(16, 5, strides=(2, 2), padding='same', activation=act_fn)(inputs)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(8, 5, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, 5, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Flatten()(x)
x = Dense(32, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(16, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(8, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(4, activation='softmax')(x)
elif model_id in ('100_8_0', '100_8_1'):
# Effect of Conv filter size
dropout = 0.2
act_fn = 'relu'
if model_id == '100_8_0':
filter_size = 3 # 3x3
elif model_id == '100_8_1':
filter_size = 5 # 5x5
x = Conv2D(16, filter_size, strides=(2, 2), padding='same', activation=act_fn)(inputs)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(8, filter_size, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, filter_size, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, filter_size, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Flatten()(x)
x = Dense(32, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(16, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(8, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(4, activation='softmax')(x)
elif model_id == '100_9_0':
# This could be the best model based on hyperparameters experimentation
# Nope: overfits slightly faster than validation loss
dropout = 0.1
act_fn = 'tanh'
filter_size = 5
opt = Adam()
x = Conv2D(16, filter_size, strides=(2, 2), padding='same', activation=act_fn)(inputs)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(8, filter_size, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, filter_size, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Conv2D(4, filter_size, strides=(2, 2), padding='same', activation=act_fn)(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Flatten()(x)
x = Dense(32, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(16, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(8, activation=act_fn)(x)
x = Dropout(dropout)(x)
x = Dense(4, activation='softmax')(x)
outputs = x
m = Model(inputs=inputs, outputs=outputs)
print(m.summary())
m.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return m
outputs = x
m = Model(inputs=inputs, outputs=outputs)
print(m.summary())
# RMS Prop is an optimizer (Root Mean Square).
# Optimizers are algorithms or methods used to change the attributes
# of your neural network such as weights and learning rate in order to reduce the losses
opt = RMSprop()
# Categorical_crossentropy -> specifies that we have multiple classes
# Metrics -> used to specify the way we want to judge the performance of our neural network, via accuracy in out case
m.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return m
def main():
batch_size = 128
# epochs = 1000
epochs = 10
# model_list = ['100_4']
model_list = ['20_4']
create_stat_image = False
resource_dir = os.path.join(this_path, 'resources')
os.makedirs(resource_dir, exist_ok=True)
try:
data = Data(batch_size)
except Data.DataInitError as e:
print('Failed to initialize Data instance.\n{:s}'.format(str(e)))
return
trainFlag = True
# if 0: # Training
if trainFlag == True:
for model_id in model_list: # Training
# model_path -> Output File for Training
model_path = os.path.join(resource_dir, model_id + '_model.h5')
# Save The Check point of the Model, It is an approach where a snapshot of the state of the
# system is taken in case of system failure
cb_save = ModelCheckpoint(model_path, monitor='val_loss', verbose=0, save_best_only=True)
m = get_model(data.out_ht, data.out_wd, model_id)
# ----------------------- Training the Model ---------------------
# fit_generator -> Trains the model on data generated batch-by-batch by a Python generator
# 1st argument -> generator, Training for now since we are training
# 2nd argument -> steps_per_epoch, It should typically be equal to ceil(num_samples / batch_size)
# 3rd argument -> validation_data
# 4th argument -> validation_steps, No of steps to yield from validation data before stopping at the end of every epoch
# 5th argument -> callbacks, Passing current saved weight
# print('############### Training Model ID: {:s} #####################'.format(model_id))
m.fit_generator(data.get_batch('TRAIN'),
steps_per_epoch=data.steps_per_epoch,
epochs=epochs,
validation_data=data.get_batch('VALIDATION'),
validation_steps=data.validation_steps,
shuffle=False,
callbacks=[cb_save])
# if 1: # Testing
if trainFlag == False:
# model_path = os.path.join(resource_dir, '20_2_model_e1000.h5')
# model_path = os.path.join(resource_dir, 'model_20_4_e1000.h5')
print("Inside Testing ^_^")
# ----------------------- Testing the Model ----------------------
model_path = os.path.join(resource_dir, '20_4_model.h5')
m = load_model(model_path)
# evaluate_generator -> uses both your test input and output.
# It first predicts output using training input and then evaluates performance by comparing it
# against your test output. So it gives out a measure of performance, i.e. accuracy in your case
eval_out = m.evaluate_generator(data.get_batch('TRAIN'),
steps=data.test_steps)
print('Train error: ', eval_out)
eval_out = m.evaluate_generator(data.get_batch('VALIDATION'),
steps=data.test_steps)
print('Validation error: ', eval_out)
eval_out = m.evaluate_generator(data.get_batch('TEST'),
steps=data.test_steps)
print('Test error: ', eval_out)
if __name__ == '__main__':
main() | en | 0.820747 | Code Flow 1 -> Main method is called, main() 2 -> Inside main method, data object are initialised. Head to Data Constructor(Data.py) to know more(Just Constructor) 3 -> A flag is used to test or train 4 -> In Training, A checkpoint is created to save the progress of the model 5 -> Then the model is defined using height, width of image of dimension height * weight * 3, 3 -> RGB 6 -> Note we are using 20_4 model so just head to this model 7 -> First Feature learning is done using consecutive steps of Conv2d, Batch Normalization and Dropout 8 -> After Feature Learning Classification is done and layers are added like input, hidden and output 9 -> Then the Loss Function is Defined RMSprop and cross entropy 10 -> Now the model is trained using fit_generator method, batch by batch 11 -> In testing all the results are evaluated using evaluate_generator method, gives out a measure of performance (accuracy) # tf.set_random_seed(my_random_seed) # Intentsionally added step to avoid tensorflow Error # this_path = os.path.dirname(os.path.abspath(__file__)) # Input is used to instantiate a Keras tensor. A Keras tensor is a tensor object from the underlying # backend (TensorFlow in out case), which we augment with certain attributes that allow us to build a # Keras model just by knowing the inputs and outputs of the model. # shape => height/2 , width/2, 3 Here 3 -> RGB # Note -> Since we are using 20_4 model for use, directly head to case where model_id = 20_4, line_no: 221 # Ran for 100 epochs: Shows overfitting. best validation accuracy: 78% # L2 regularization # It does slow down the overfitting but validation accuracy gets stuck at ~60% # L1 regularization # Accuracy of training and validation got stuck at 25% # Have dropout # No overfitting. training loss was still decreasing. train acc: 70%, val_acc: 75% # Need more epochs # Batch normalization # Could not prevent from overfitting. Train acc: 93% val acc 70% # Batch normalization + Dropout # Faster convergence. Has overvitting. train acc 82% val acc 66% # 3_0 with more epochs # No overfitting. train acc: 70%, val_acc: 75% # It gets hard to get more gains beyond it # x = Dense(8, activation='relu')(x) # x = Dropout(0.2)(x) # Reducing the stride on conv layers # x = Dropout(0.2)(x) # x = Dropout(0.2)(x) # x = Dropout(0.2)(x) # x = Dropout(0.2)(x) # x = Dropout(0.2)(x) # 20_0 with dropout # Achieves 88% val accuracy in ~100 epochs # Increase model complexity with Dropout # 88% val_acc in 80 epochs # 95% val_acc in 200 epochs # Reduce the kernel size from 5 to 3 # val acc is lower than with kernel 5 # -------------------------------------- # Just Focus Here # -------------------------------------- # 20_2 with BatchNorm for faster convergence # Gives 97% accuracy. Model saved as model_20_4_e1000.h5 # In Conv2d -> 2D Convolution Layer, This layer creates a convolution kernel that is # convolved with the layer input to produce a tensor of outputs # 1st Argument, Filters -> The number of output channels i.e. 16 # 2nd Argument, Kernel Size -> 5, always keep it odd for better performance # 3rd Argument, Strides -> (2, 2), Look into doc for better understanding # 4th Argument, Padding -> Same, Look into doc for better understanding # 5th Argument, Activation -> Relu activation function, Rectified Linear Unit # Batch normalization layer -> Normalize the activations of the previous layer at each batch # applies a transformation that maintains the mean activation close to 0 and # the activation standard deviation close to 1. Detailed explaination in Google Doc # Dropout is a technique used to prevent a model from overfitting. # --------------------- Feature Learning Starts --------------------------- # Input Layer # I Think this is by mistake written twice by the author repeated twice! # --------------------- Feature Learning Ends --------------------------- # --------------------- Classification Starts --------------------------- # Pooling # In our neural network, we are using 3 Hidden layers of 32, 16 and 8 dimension. # The Dense is used to specify the fully connected layer. # The arguments of Dense are output dimension which are 32 # First Hidden Layer # Second Hidden Layer # Third Hidden Layer # Output Layer # The output Layer for the case of multiclass classification takes softmax as activation function. # --------------------- Classification Ends --------------------------- # A low capacity model # A low capacity model with dropout to show that capacity isn't enough # 100_2 with Dropout # Same as 20_2 # 100_3 with BatchNormaliation # 20_2 with BatchNorm for faster convergence # Gives 97% accuracy. Model saved as model_20_4_e1000.h5 # Effect of dropout amount # Effect of optimizers # Effect of activation function # Effect of Conv filter size # 3x3 # 5x5 # This could be the best model based on hyperparameters experimentation # Nope: overfits slightly faster than validation loss # RMS Prop is an optimizer (Root Mean Square). # Optimizers are algorithms or methods used to change the attributes # of your neural network such as weights and learning rate in order to reduce the losses # Categorical_crossentropy -> specifies that we have multiple classes # Metrics -> used to specify the way we want to judge the performance of our neural network, via accuracy in out case # epochs = 1000 # model_list = ['100_4'] # if 0: # Training # Training # model_path -> Output File for Training # Save The Check point of the Model, It is an approach where a snapshot of the state of the # system is taken in case of system failure # ----------------------- Training the Model --------------------- # fit_generator -> Trains the model on data generated batch-by-batch by a Python generator # 1st argument -> generator, Training for now since we are training # 2nd argument -> steps_per_epoch, It should typically be equal to ceil(num_samples / batch_size) # 3rd argument -> validation_data # 4th argument -> validation_steps, No of steps to yield from validation data before stopping at the end of every epoch # 5th argument -> callbacks, Passing current saved weight # print('############### Training Model ID: {:s} #####################'.format(model_id)) # if 1: # Testing # model_path = os.path.join(resource_dir, '20_2_model_e1000.h5') # model_path = os.path.join(resource_dir, 'model_20_4_e1000.h5') # ----------------------- Testing the Model ---------------------- # evaluate_generator -> uses both your test input and output. # It first predicts output using training input and then evaluates performance by comparing it # against your test output. So it gives out a measure of performance, i.e. accuracy in your case | 3.891063 | 4 |
scripts/baseutil/bio/gtf_manager.py | vanya-antonov/ivanya_python_lib | 0 | 6613555 | #!/usr/bin/env python3
# $Id: gtf_manager.py 2905 2018-08-07 15:42:08Z antonov $
###
# <NAME> (<EMAIL>)
#
import sys, os, re
from pprint import pprint
from subprocess import Popen, PIPE
###
# CONSTANTS
###
# SUBROUTINES
def run(opts):
if opts['todo'] == 'make_long_chimerome':
make_long_chimerome(opts['arg1'], opts)
else:
usage("Unknown TODO = '%s'" % opts['todo'])
def make_long_chimerome(gtf_fn, opts={}):
keys = ['chr', 'c2', 'c3', 'start', 'end', 'c6', 'strand', 'c8', 'info']
(cur_exons, processed_genes) = ([], {})
with open(gtf_fn) as f:
for line in f:
if line == '\n':
continue
line.rstrip() # remove \n
vals = line.split('\t')
if len(vals) != len(keys):
sys.stderr.write("Wrong line: '%s'", line)
continue
# chr1 FANTOM6 exon 91421 91629 . - . gene_id "ENSG00000225880"; transcript_id "FTMT20100027365.C1";
exon = dict(zip(keys, vals))
gene_mo = re.compile(r'gene_id\s*"(.+?)"').search(exon['info'])
if gene_mo == None:
sys.stderr.write("Can't find gene_id in string '%s'", exon['info'])
continue
exon['gene_id'] = gene_mo.group(1)
if (len(cur_exons) == 0) or (cur_exons[0]['gene_id'] == exon['gene_id']):
cur_exons.append( exon )
else:
# Exons of the next gene have begun
sys.stderr.write("\rGenerating chimera for '%s' with %d exons... " % (cur_exons[0]['gene_id'], len(cur_exons)) )
_print_long_chimeric_trx(cur_exons)
processed_genes[ cur_exons[0]['gene_id'] ] = 1
assert exon['gene_id'] not in processed_genes.keys(), "The input file is not sorted: gene '%s' is among already processed genes!" % exon['gene_id']
cur_exons = [ exon ]
# END: for line in f
_print_long_chimeric_trx(cur_exons)
# END: with open(gtf_fn) as f
sys.exit()
def _print_long_chimeric_trx(all_exons):
if( len(all_exons) == 0 ):
return
bed_txt = ''
for exon in all_exons:
# Print something like 'chr1 1018110 1018979 CATG00000000002 . +'
vals = [exon[k] for k in ['chr', 'start', 'end', 'gene_id', 'c8', 'strand']]
bed_txt += "\t".join(vals) + "\n"
# https://stackoverflow.com/a/8475367/310453
proc = Popen(['bedtools', 'merge', '-s'], stdin=PIPE, stdout=PIPE)
out_bytes = proc.communicate( bytes(bed_txt, 'utf-8') )[0]
# bed2gff
gene_id = all_exons[0]['gene_id']
info_str = 'gene_id "%s"; transcript_id "%s"' % (gene_id, gene_id)
for line in out_bytes.decode("utf-8").split("\n"):
vals = line.split()
if len(vals) == 0:
continue
elif len(vals) != 4:
sys.stderr.write("Wrong line in the bedtools output: '%s'" % line)
continue
# line = 'chr1 4873173 4873320 +'
(chrom, left, right, strand) = vals
print("\t".join([chrom, 'chimerome', 'exon', left, right, '.', strand, '.', info_str]))
def usage(msg = ''):
script = os.path.basename(sys.argv[0])
sys.stderr.write('''%(msg)s
DESCRIPTION:
<TODO>
* make_long_chimerome <ANNOTATION.gtf> > <CHIMEROME.gtf>
- Requirements: bedtools (v2.26.0) and gffread (0.9.9)
- The input file must be sorted by chrom, the gene_id, then start:
sort -k1,1 -k10,10 -k4,4n F6_CAT.transcript.gtf > F6_CAT.transcript.SORTED.gtf
USAGE:
%(script)s [OPTIONS] <TODO> <ARG1> <ARG2> ...
OPTIONS:
--silent\n''' % locals() )
###
# Parse command line arguments
if len(sys.argv) < 2:
usage()
sys.exit()
###
#my $START_TIME = time;
run({
'todo' : sys.argv[1],
'arg1' : sys.argv[2] if len(sys.argv) >=2 else '',
});
#warn "\nElapsed time: ".(time-$START_TIME)." sec\n" if !$SILENT;
###
| #!/usr/bin/env python3
# $Id: gtf_manager.py 2905 2018-08-07 15:42:08Z antonov $
###
# <NAME> (<EMAIL>)
#
import sys, os, re
from pprint import pprint
from subprocess import Popen, PIPE
###
# CONSTANTS
###
# SUBROUTINES
def run(opts):
if opts['todo'] == 'make_long_chimerome':
make_long_chimerome(opts['arg1'], opts)
else:
usage("Unknown TODO = '%s'" % opts['todo'])
def make_long_chimerome(gtf_fn, opts={}):
keys = ['chr', 'c2', 'c3', 'start', 'end', 'c6', 'strand', 'c8', 'info']
(cur_exons, processed_genes) = ([], {})
with open(gtf_fn) as f:
for line in f:
if line == '\n':
continue
line.rstrip() # remove \n
vals = line.split('\t')
if len(vals) != len(keys):
sys.stderr.write("Wrong line: '%s'", line)
continue
# chr1 FANTOM6 exon 91421 91629 . - . gene_id "ENSG00000225880"; transcript_id "FTMT20100027365.C1";
exon = dict(zip(keys, vals))
gene_mo = re.compile(r'gene_id\s*"(.+?)"').search(exon['info'])
if gene_mo == None:
sys.stderr.write("Can't find gene_id in string '%s'", exon['info'])
continue
exon['gene_id'] = gene_mo.group(1)
if (len(cur_exons) == 0) or (cur_exons[0]['gene_id'] == exon['gene_id']):
cur_exons.append( exon )
else:
# Exons of the next gene have begun
sys.stderr.write("\rGenerating chimera for '%s' with %d exons... " % (cur_exons[0]['gene_id'], len(cur_exons)) )
_print_long_chimeric_trx(cur_exons)
processed_genes[ cur_exons[0]['gene_id'] ] = 1
assert exon['gene_id'] not in processed_genes.keys(), "The input file is not sorted: gene '%s' is among already processed genes!" % exon['gene_id']
cur_exons = [ exon ]
# END: for line in f
_print_long_chimeric_trx(cur_exons)
# END: with open(gtf_fn) as f
sys.exit()
def _print_long_chimeric_trx(all_exons):
if( len(all_exons) == 0 ):
return
bed_txt = ''
for exon in all_exons:
# Print something like 'chr1 1018110 1018979 CATG00000000002 . +'
vals = [exon[k] for k in ['chr', 'start', 'end', 'gene_id', 'c8', 'strand']]
bed_txt += "\t".join(vals) + "\n"
# https://stackoverflow.com/a/8475367/310453
proc = Popen(['bedtools', 'merge', '-s'], stdin=PIPE, stdout=PIPE)
out_bytes = proc.communicate( bytes(bed_txt, 'utf-8') )[0]
# bed2gff
gene_id = all_exons[0]['gene_id']
info_str = 'gene_id "%s"; transcript_id "%s"' % (gene_id, gene_id)
for line in out_bytes.decode("utf-8").split("\n"):
vals = line.split()
if len(vals) == 0:
continue
elif len(vals) != 4:
sys.stderr.write("Wrong line in the bedtools output: '%s'" % line)
continue
# line = 'chr1 4873173 4873320 +'
(chrom, left, right, strand) = vals
print("\t".join([chrom, 'chimerome', 'exon', left, right, '.', strand, '.', info_str]))
def usage(msg = ''):
script = os.path.basename(sys.argv[0])
sys.stderr.write('''%(msg)s
DESCRIPTION:
<TODO>
* make_long_chimerome <ANNOTATION.gtf> > <CHIMEROME.gtf>
- Requirements: bedtools (v2.26.0) and gffread (0.9.9)
- The input file must be sorted by chrom, the gene_id, then start:
sort -k1,1 -k10,10 -k4,4n F6_CAT.transcript.gtf > F6_CAT.transcript.SORTED.gtf
USAGE:
%(script)s [OPTIONS] <TODO> <ARG1> <ARG2> ...
OPTIONS:
--silent\n''' % locals() )
###
# Parse command line arguments
if len(sys.argv) < 2:
usage()
sys.exit()
###
#my $START_TIME = time;
run({
'todo' : sys.argv[1],
'arg1' : sys.argv[2] if len(sys.argv) >=2 else '',
});
#warn "\nElapsed time: ".(time-$START_TIME)." sec\n" if !$SILENT;
###
| en | 0.2587 | #!/usr/bin/env python3 # $Id: gtf_manager.py 2905 2018-08-07 15:42:08Z antonov $ ### # <NAME> (<EMAIL>) # ### # CONSTANTS ### # SUBROUTINES # remove \n # chr1 FANTOM6 exon 91421 91629 . - . gene_id "ENSG00000225880"; transcript_id "FTMT20100027365.C1"; # Exons of the next gene have begun # END: for line in f # END: with open(gtf_fn) as f # Print something like 'chr1 1018110 1018979 CATG00000000002 . +' # https://stackoverflow.com/a/8475367/310453 # bed2gff # line = 'chr1 4873173 4873320 +' %(msg)s DESCRIPTION: <TODO> * make_long_chimerome <ANNOTATION.gtf> > <CHIMEROME.gtf> - Requirements: bedtools (v2.26.0) and gffread (0.9.9) - The input file must be sorted by chrom, the gene_id, then start: sort -k1,1 -k10,10 -k4,4n F6_CAT.transcript.gtf > F6_CAT.transcript.SORTED.gtf USAGE: %(script)s [OPTIONS] <TODO> <ARG1> <ARG2> ... OPTIONS: --silent\n ### # Parse command line arguments ### #my $START_TIME = time; #warn "\nElapsed time: ".(time-$START_TIME)." sec\n" if !$SILENT; ### | 2.406411 | 2 |
Module2/assignment4.py | jatraug/Dataclass | 0 | 6613556 | import pandas as pd
# TODO: Load up the table, and extract the dataset
# out of it. If you're having issues with this, look
# carefully at the sample code provided in the reading
#
# .. your code here ..
df = pd.read_html('http://www.espn.com/nhl/statistics/player/_/stat/points/sort/points/year/2015/seasontype/2', header=None)[0]
##print(df.dtype)
# TODO: Rename the columns so that they are similar to the
# column definitions provided to you on the website.
# Be careful and don't accidentially use any names twice.
#
# .. your code here ..
df.columns= ['RK', 'PLAYER', 'TEAM', 'GP', 'G', 'A', 'PTS', '+/-', 'PIM', 'PTS/G', 'SOG', 'PCT', 'GWG', 'PPG', 'PPA', 'SHG', 'SHA']
#print(df)
df1 = df.drop(df.index[[0,1]])
print(df1)
# TODO: Get rid of any row that has at least 4 NANs in it,
# e.g. that do not contain player points statistics
#
# .. your code here ..
df2 = df1.dropna(axis=0, thresh=4)
print(df2)
# TODO: At this point, look through your dataset by printing
# it. There probably still are some erroneous rows in there.
# What indexing command(s) can you use to select all rows
# EXCEPT those rows?
#
# .. your code here ..
print(df2['RK'])
#print(df2.loc['RK'])
#iris.ix[iris['sepal length (cm)'] >= 5]
df3 = df2[df2.RK != 'RK']
print(df3)
df4 = df3.drop(labels=['RK'],axis=1)
print(df4)
# TODO: Get rid of the 'RK' column
#
# .. your code here ..
# TODO: Ensure there are no holes in your index by resetting
# it. By the way, don't store the original index
#
# .. your code here ..
df4 = df4.reset_index()
print(df4)
# TODO: Check the data type of all columns, and ensure those
# that should be numeric are numeric
#
# .. your code here ..
print (df4.dtypes)
# TODO: Your dataframe is now ready! Use the appropriate
# commands to answer the questions on the course lab page.
#
# .. your code here ..
print(df4.PCT.unique())
| import pandas as pd
# TODO: Load up the table, and extract the dataset
# out of it. If you're having issues with this, look
# carefully at the sample code provided in the reading
#
# .. your code here ..
df = pd.read_html('http://www.espn.com/nhl/statistics/player/_/stat/points/sort/points/year/2015/seasontype/2', header=None)[0]
##print(df.dtype)
# TODO: Rename the columns so that they are similar to the
# column definitions provided to you on the website.
# Be careful and don't accidentially use any names twice.
#
# .. your code here ..
df.columns= ['RK', 'PLAYER', 'TEAM', 'GP', 'G', 'A', 'PTS', '+/-', 'PIM', 'PTS/G', 'SOG', 'PCT', 'GWG', 'PPG', 'PPA', 'SHG', 'SHA']
#print(df)
df1 = df.drop(df.index[[0,1]])
print(df1)
# TODO: Get rid of any row that has at least 4 NANs in it,
# e.g. that do not contain player points statistics
#
# .. your code here ..
df2 = df1.dropna(axis=0, thresh=4)
print(df2)
# TODO: At this point, look through your dataset by printing
# it. There probably still are some erroneous rows in there.
# What indexing command(s) can you use to select all rows
# EXCEPT those rows?
#
# .. your code here ..
print(df2['RK'])
#print(df2.loc['RK'])
#iris.ix[iris['sepal length (cm)'] >= 5]
df3 = df2[df2.RK != 'RK']
print(df3)
df4 = df3.drop(labels=['RK'],axis=1)
print(df4)
# TODO: Get rid of the 'RK' column
#
# .. your code here ..
# TODO: Ensure there are no holes in your index by resetting
# it. By the way, don't store the original index
#
# .. your code here ..
df4 = df4.reset_index()
print(df4)
# TODO: Check the data type of all columns, and ensure those
# that should be numeric are numeric
#
# .. your code here ..
print (df4.dtypes)
# TODO: Your dataframe is now ready! Use the appropriate
# commands to answer the questions on the course lab page.
#
# .. your code here ..
print(df4.PCT.unique())
| en | 0.861598 | # TODO: Load up the table, and extract the dataset # out of it. If you're having issues with this, look # carefully at the sample code provided in the reading # # .. your code here .. ##print(df.dtype) # TODO: Rename the columns so that they are similar to the # column definitions provided to you on the website. # Be careful and don't accidentially use any names twice. # # .. your code here .. #print(df) # TODO: Get rid of any row that has at least 4 NANs in it, # e.g. that do not contain player points statistics # # .. your code here .. # TODO: At this point, look through your dataset by printing # it. There probably still are some erroneous rows in there. # What indexing command(s) can you use to select all rows # EXCEPT those rows? # # .. your code here .. #print(df2.loc['RK']) #iris.ix[iris['sepal length (cm)'] >= 5] # TODO: Get rid of the 'RK' column # # .. your code here .. # TODO: Ensure there are no holes in your index by resetting # it. By the way, don't store the original index # # .. your code here .. # TODO: Check the data type of all columns, and ensure those # that should be numeric are numeric # # .. your code here .. # TODO: Your dataframe is now ready! Use the appropriate # commands to answer the questions on the course lab page. # # .. your code here .. | 3.390272 | 3 |
time_util/time_util.py | fenglwh/timeutil | 0 | 6613557 | from _typeshed import StrPath
import datetime
class Period():
def __init__(self,start,stop):
if (isinstance(start,datetime.datetime) and isinstance(stop,datetime.datetime)) or (isinstance(start,datetime.date) and isinstance(stop,datetime.datet)):
self.start = start
self.stop = stop
else:
raise Exception("The start and stop parameter is not datetime or date instance")
def as_datetime(self):
if isinstance(self.start,datetime.date):
self.start = datetime.datetime(self.start)
if isinstance(self.stop,datetime.date):
self.stop = datetime.datetime(self.stop)+datetime.timedelta(hours=23,minutes=59,seconds=59,milliseconds=59,microseconds=59)
return self
def as_date(self):
if isinstance(self.start,datetime.datetime):
self.start = datetime.date(self.start)
if isinstance(self.stop,datetime.datetime):
self.stop = datetime.date(self.stop)
return self
def by_year(year):
first_day = datetime.date(year=year)
return first_day, first_day+datetime.timedelta(year=1)-datetime.timedelta(days=1)
def by_month(year,month):
first_day = datetime.date(year=year,month=month)
return first_day, first_day+datetime.timedelta(month=1)-datetime.timedelta(days=1)
def by_week(year,week):
first_day =datetime.date(year=year)
time_start= first_day+datetime.timedelta(weeks=(week-1),days=1)
time_stop=first_day+datetime.timedelta(6)
return time_start, time_stop
def yestoday(current=datetime.date.today()):
return current-datetime.timedelta(days=1)
def last_week(current):
today=datetime.date.today()
return today-datetime.timedelta(days=7), today-datetime.timedelta(days=7)
def this_week():
pass
def next_week():
pass
def last_month():
pass
def last_year():
pass
def tomorrow():
return datetime.date.today()+datetime.timedelta(days=1)
def the_day_next_tomorrow():
return datetime.date.today()+datetime.timedelta(days=2) | from _typeshed import StrPath
import datetime
class Period():
def __init__(self,start,stop):
if (isinstance(start,datetime.datetime) and isinstance(stop,datetime.datetime)) or (isinstance(start,datetime.date) and isinstance(stop,datetime.datet)):
self.start = start
self.stop = stop
else:
raise Exception("The start and stop parameter is not datetime or date instance")
def as_datetime(self):
if isinstance(self.start,datetime.date):
self.start = datetime.datetime(self.start)
if isinstance(self.stop,datetime.date):
self.stop = datetime.datetime(self.stop)+datetime.timedelta(hours=23,minutes=59,seconds=59,milliseconds=59,microseconds=59)
return self
def as_date(self):
if isinstance(self.start,datetime.datetime):
self.start = datetime.date(self.start)
if isinstance(self.stop,datetime.datetime):
self.stop = datetime.date(self.stop)
return self
def by_year(year):
first_day = datetime.date(year=year)
return first_day, first_day+datetime.timedelta(year=1)-datetime.timedelta(days=1)
def by_month(year,month):
first_day = datetime.date(year=year,month=month)
return first_day, first_day+datetime.timedelta(month=1)-datetime.timedelta(days=1)
def by_week(year,week):
first_day =datetime.date(year=year)
time_start= first_day+datetime.timedelta(weeks=(week-1),days=1)
time_stop=first_day+datetime.timedelta(6)
return time_start, time_stop
def yestoday(current=datetime.date.today()):
return current-datetime.timedelta(days=1)
def last_week(current):
today=datetime.date.today()
return today-datetime.timedelta(days=7), today-datetime.timedelta(days=7)
def this_week():
pass
def next_week():
pass
def last_month():
pass
def last_year():
pass
def tomorrow():
return datetime.date.today()+datetime.timedelta(days=1)
def the_day_next_tomorrow():
return datetime.date.today()+datetime.timedelta(days=2) | none | 1 | 3.46467 | 3 | |
aws_lambda_typing/responses/s3_batch.py | curekoshimizu/aws-lambda-typing | 0 | 6613558 | #!/usr/bin/env python
import typing
class S3BatchResponseResult(typing.TypedDict):
"""
S3BatchRequestTask
Attributes:
----------
taskId: str
resultCode: str
resultString: str
"""
taskId: str
resultCode: str
resultString: str
class S3BatchResponse(typing.TypedDict):
"""
S3BatchResponse https://docs.aws.amazon.com/lambda/latest/dg/services-s3-batch.html
Attributes:
----------
invocationSchemaVersion: str
treatMissingKeysAs: typing.Literal['Succeeded', 'TemporaryFailure', 'PermanentFailure']
invocationId: str
results: typing.List[:py:class:`S3BatchResponseResult`]
"""
invocationSchemaVersion: str
treatMissingKeysAs: typing.Literal['Succeeded', 'TemporaryFailure', 'PermanentFailure']
invocationId: str
results: typing.List[S3BatchResponseResult]
| #!/usr/bin/env python
import typing
class S3BatchResponseResult(typing.TypedDict):
"""
S3BatchRequestTask
Attributes:
----------
taskId: str
resultCode: str
resultString: str
"""
taskId: str
resultCode: str
resultString: str
class S3BatchResponse(typing.TypedDict):
"""
S3BatchResponse https://docs.aws.amazon.com/lambda/latest/dg/services-s3-batch.html
Attributes:
----------
invocationSchemaVersion: str
treatMissingKeysAs: typing.Literal['Succeeded', 'TemporaryFailure', 'PermanentFailure']
invocationId: str
results: typing.List[:py:class:`S3BatchResponseResult`]
"""
invocationSchemaVersion: str
treatMissingKeysAs: typing.Literal['Succeeded', 'TemporaryFailure', 'PermanentFailure']
invocationId: str
results: typing.List[S3BatchResponseResult]
| en | 0.391996 | #!/usr/bin/env python S3BatchRequestTask Attributes: ---------- taskId: str resultCode: str resultString: str S3BatchResponse https://docs.aws.amazon.com/lambda/latest/dg/services-s3-batch.html Attributes: ---------- invocationSchemaVersion: str treatMissingKeysAs: typing.Literal['Succeeded', 'TemporaryFailure', 'PermanentFailure'] invocationId: str results: typing.List[:py:class:`S3BatchResponseResult`] | 2.235281 | 2 |
labdrivers/attocube.py | cdoolin/labdrivers | 0 | 6613559 | import serial
#def linspace(a, b, n):
# dx = (float(b) - float(a)) / (n - 1.)
# x = a
# while n > 0:
# yield x
# x += dx
# n -= 1
def stepto(a, b, d):
d = abs(d) if b > a else -abs(d)
while abs(b - a) > abs(d):
a += d
yield a
yield b
class Attocube(object):
def __init__(self, port):
self.port = port
self.ser = serial.Serial(port, timeout=1)
if self.ask("echo off") is not '':
print("attocube controller not working")
def ask(self, q):
# give command to attocube controller and get response.
self.ser.write(q + "\r\n")
resp = ""
last = ""
while True:
last = self.ser.readline()
if last == "OK\r\n":
# message finished OK
return resp
elif last == "ERROR\r\n":
return "error"
elif last == "":
# serialport timedout
return "timeout"
else:
# record the response
resp += last
def get_offset(self, axis):
return float(self.ask("geta %d" % int(axis)).split()[2])
def step_up(self, axis):
self.ask("stepu %d" % int(axis))
def step_down(self, axis):
self.ask("stepd %d" % int(axis))
def set_offset(self, axis, volt):
self.ask("seta %d %f" % (axis, volt))
def slideto(self, axis, offset, dx=.1):
for offset in stepto(self.get_offset(axis), offset, dx):
self.set_offset(axis, offset)
if __name__ == "__main__":
port = raw_input("serial port: ")
a = Attocube(port)
import IPython
IPython.embed()
| import serial
#def linspace(a, b, n):
# dx = (float(b) - float(a)) / (n - 1.)
# x = a
# while n > 0:
# yield x
# x += dx
# n -= 1
def stepto(a, b, d):
d = abs(d) if b > a else -abs(d)
while abs(b - a) > abs(d):
a += d
yield a
yield b
class Attocube(object):
def __init__(self, port):
self.port = port
self.ser = serial.Serial(port, timeout=1)
if self.ask("echo off") is not '':
print("attocube controller not working")
def ask(self, q):
# give command to attocube controller and get response.
self.ser.write(q + "\r\n")
resp = ""
last = ""
while True:
last = self.ser.readline()
if last == "OK\r\n":
# message finished OK
return resp
elif last == "ERROR\r\n":
return "error"
elif last == "":
# serialport timedout
return "timeout"
else:
# record the response
resp += last
def get_offset(self, axis):
return float(self.ask("geta %d" % int(axis)).split()[2])
def step_up(self, axis):
self.ask("stepu %d" % int(axis))
def step_down(self, axis):
self.ask("stepd %d" % int(axis))
def set_offset(self, axis, volt):
self.ask("seta %d %f" % (axis, volt))
def slideto(self, axis, offset, dx=.1):
for offset in stepto(self.get_offset(axis), offset, dx):
self.set_offset(axis, offset)
if __name__ == "__main__":
port = raw_input("serial port: ")
a = Attocube(port)
import IPython
IPython.embed()
| en | 0.65481 | #def linspace(a, b, n): # dx = (float(b) - float(a)) / (n - 1.) # x = a # while n > 0: # yield x # x += dx # n -= 1 # give command to attocube controller and get response. # message finished OK # serialport timedout # record the response | 3.214887 | 3 |
examples/python/estimate_matrix.py | KonScanner/transitionMatrix | 46 | 6613560 | # encoding: utf-8
# (c) 2017-2021 Open Risk, all rights reserved
#
# TransitionMatrix is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of TransitionMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
"""
An end-to-end example of estimating a credit rating matrix from historical data using two different estimators
"""
import pprint as pp
import pandas as pd
from scipy.linalg import expm
import transitionMatrix as tm
from transitionMatrix.estimators.aalen_johansen_estimator import AalenJohansenEstimator
from transitionMatrix.estimators.cohort_estimator import CohortEstimator
from transitionMatrix.statespaces.statespace import StateSpace
from transitionMatrix.utils import transitions_summary
from transitionMatrix.utils.converters import to_canonical
# Load the data into a pandas frame
input_data = pd.read_csv('../../datasets/rating_data.csv')
print('> Transitions Summary Input Data')
pp.pprint(transitions_summary(input_data))
# Infer and describe state space
myState = StateSpace(transition_data=input_data)
myState.describe()
print('> The order of states is not important for estimation but it is important for presentation!')
# Convert format to canonical form
canonical_data = to_canonical(input_data)
# Group the data into temporal cohorts
print(80 * '=')
cohort_data, cohort_intervals = tm.utils.bin_timestamps(input_data, cohorts=5, remove_stale=True)
print('Intervals : ', cohort_intervals)
print('> Transitions Summary Cohorted Data')
pp.pprint(transitions_summary(cohort_data))
myEstimator = CohortEstimator(states=myState, cohort_bounds=cohort_intervals, ci={'method': 'goodman', 'alpha': 0.05})
myEstimator.fit(cohort_data)
myMatrix = tm.TransitionMatrix(myEstimator.average_matrix, states=myState)
myMatrix.print_matrix(accuracy=3, format_type='Standard', labels=False)
myEstimator2 = AalenJohansenEstimator(states=myState)
labels = {'Time': 'Time', 'From': 'From', 'To': 'To', 'ID': 'ID'}
etm, times = myEstimator2.fit(canonical_data, labels=labels)
myMatrix2 = tm.TransitionMatrix(etm[:, :, -1])
G = myMatrix2.generator()
oneyear = tm.TransitionMatrix(expm(0.2 * G))
oneyear.print_matrix(accuracy=3)
def main():
print("Done")
if __name__ == "__main__":
main()
| # encoding: utf-8
# (c) 2017-2021 Open Risk, all rights reserved
#
# TransitionMatrix is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of TransitionMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
"""
An end-to-end example of estimating a credit rating matrix from historical data using two different estimators
"""
import pprint as pp
import pandas as pd
from scipy.linalg import expm
import transitionMatrix as tm
from transitionMatrix.estimators.aalen_johansen_estimator import AalenJohansenEstimator
from transitionMatrix.estimators.cohort_estimator import CohortEstimator
from transitionMatrix.statespaces.statespace import StateSpace
from transitionMatrix.utils import transitions_summary
from transitionMatrix.utils.converters import to_canonical
# Load the data into a pandas frame
input_data = pd.read_csv('../../datasets/rating_data.csv')
print('> Transitions Summary Input Data')
pp.pprint(transitions_summary(input_data))
# Infer and describe state space
myState = StateSpace(transition_data=input_data)
myState.describe()
print('> The order of states is not important for estimation but it is important for presentation!')
# Convert format to canonical form
canonical_data = to_canonical(input_data)
# Group the data into temporal cohorts
print(80 * '=')
cohort_data, cohort_intervals = tm.utils.bin_timestamps(input_data, cohorts=5, remove_stale=True)
print('Intervals : ', cohort_intervals)
print('> Transitions Summary Cohorted Data')
pp.pprint(transitions_summary(cohort_data))
myEstimator = CohortEstimator(states=myState, cohort_bounds=cohort_intervals, ci={'method': 'goodman', 'alpha': 0.05})
myEstimator.fit(cohort_data)
myMatrix = tm.TransitionMatrix(myEstimator.average_matrix, states=myState)
myMatrix.print_matrix(accuracy=3, format_type='Standard', labels=False)
myEstimator2 = AalenJohansenEstimator(states=myState)
labels = {'Time': 'Time', 'From': 'From', 'To': 'To', 'ID': 'ID'}
etm, times = myEstimator2.fit(canonical_data, labels=labels)
myMatrix2 = tm.TransitionMatrix(etm[:, :, -1])
G = myMatrix2.generator()
oneyear = tm.TransitionMatrix(expm(0.2 * G))
oneyear.print_matrix(accuracy=3)
def main():
print("Done")
if __name__ == "__main__":
main()
| en | 0.878581 | # encoding: utf-8 # (c) 2017-2021 Open Risk, all rights reserved # # TransitionMatrix is licensed under the Apache 2.0 license a copy of which is included # in the source distribution of TransitionMatrix. This is notwithstanding any licenses of # third-party software included in this distribution. You may not use this file except in # compliance with the License. # # Unless required by applicable law or agreed to in writing, software distributed under # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific language governing permissions and # limitations under the License. An end-to-end example of estimating a credit rating matrix from historical data using two different estimators # Load the data into a pandas frame # Infer and describe state space # Convert format to canonical form # Group the data into temporal cohorts | 1.986112 | 2 |
models/pytorch/weights.py | luigy-mach/Keras-OneClassAnomalyDetection | 118 | 6613561 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
__weights_dict = dict()
def load_weights(weight_file):
if weight_file == None:
return
try:
weights_dict = np.load(weight_file).item()
except:
weights_dict = np.load(weight_file, encoding='bytes').item()
return weights_dict
class KitModel(nn.Module):
def __init__(self, weight_file):
super(KitModel, self).__init__()
global __weights_dict
__weights_dict = load_weights(weight_file)
self.Conv1 = self.__conv(2, name='Conv1', in_channels=3, out_channels=16, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=False)
self.bn_Conv1 = self.__batch_normalization(2, 'bn_Conv1', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.expanded_conv_depthwise = self.__conv(2, name='expanded_conv_depthwise', in_channels=16, out_channels=16, kernel_size=(3, 3), stride=(1, 1), groups=16, bias=False)
self.expanded_conv_depthwise_BN = self.__batch_normalization(2, 'expanded_conv_depthwise_BN', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.expanded_conv_project = self.__conv(2, name='expanded_conv_project', in_channels=16, out_channels=8, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.expanded_conv_project_BN = self.__batch_normalization(2, 'expanded_conv_project_BN', num_features=8, eps=0.0010000000474974513, momentum=0.0)
self.block_1_expand = self.__conv(2, name='block_1_expand', in_channels=8, out_channels=48, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_1_expand_BN = self.__batch_normalization(2, 'block_1_expand_BN', num_features=48, eps=0.0010000000474974513, momentum=0.0)
self.block_1_depthwise = self.__conv(2, name='block_1_depthwise', in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(2, 2), groups=48, bias=False)
self.block_1_depthwise_BN = self.__batch_normalization(2, 'block_1_depthwise_BN', num_features=48, eps=0.0010000000474974513, momentum=0.0)
self.block_1_project = self.__conv(2, name='block_1_project', in_channels=48, out_channels=16, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_1_project_BN = self.__batch_normalization(2, 'block_1_project_BN', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.block_2_expand = self.__conv(2, name='block_2_expand', in_channels=16, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_2_expand_BN = self.__batch_normalization(2, 'block_2_expand_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_2_depthwise = self.__conv(2, name='block_2_depthwise', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=96, bias=False)
self.block_2_depthwise_BN = self.__batch_normalization(2, 'block_2_depthwise_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_2_project = self.__conv(2, name='block_2_project', in_channels=96, out_channels=16, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_2_project_BN = self.__batch_normalization(2, 'block_2_project_BN', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.block_3_expand = self.__conv(2, name='block_3_expand', in_channels=16, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_3_expand_BN = self.__batch_normalization(2, 'block_3_expand_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_3_depthwise = self.__conv(2, name='block_3_depthwise', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(2, 2), groups=96, bias=False)
self.block_3_depthwise_BN = self.__batch_normalization(2, 'block_3_depthwise_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_3_project = self.__conv(2, name='block_3_project', in_channels=96, out_channels=16, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_3_project_BN = self.__batch_normalization(2, 'block_3_project_BN', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.block_4_expand = self.__conv(2, name='block_4_expand', in_channels=16, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_4_expand_BN = self.__batch_normalization(2, 'block_4_expand_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_4_depthwise = self.__conv(2, name='block_4_depthwise', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=96, bias=False)
self.block_4_depthwise_BN = self.__batch_normalization(2, 'block_4_depthwise_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_4_project = self.__conv(2, name='block_4_project', in_channels=96, out_channels=16, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_4_project_BN = self.__batch_normalization(2, 'block_4_project_BN', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.block_5_expand = self.__conv(2, name='block_5_expand', in_channels=16, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_5_expand_BN = self.__batch_normalization(2, 'block_5_expand_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_5_depthwise = self.__conv(2, name='block_5_depthwise', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=96, bias=False)
self.block_5_depthwise_BN = self.__batch_normalization(2, 'block_5_depthwise_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_5_project = self.__conv(2, name='block_5_project', in_channels=96, out_channels=16, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_5_project_BN = self.__batch_normalization(2, 'block_5_project_BN', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.block_6_expand = self.__conv(2, name='block_6_expand', in_channels=16, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_6_expand_BN = self.__batch_normalization(2, 'block_6_expand_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_6_depthwise = self.__conv(2, name='block_6_depthwise', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(2, 2), groups=96, bias=False)
self.block_6_depthwise_BN = self.__batch_normalization(2, 'block_6_depthwise_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_6_project = self.__conv(2, name='block_6_project', in_channels=96, out_channels=32, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_6_project_BN = self.__batch_normalization(2, 'block_6_project_BN', num_features=32, eps=0.0010000000474974513, momentum=0.0)
self.block_7_expand = self.__conv(2, name='block_7_expand', in_channels=32, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_7_expand_BN = self.__batch_normalization(2, 'block_7_expand_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_7_depthwise = self.__conv(2, name='block_7_depthwise', in_channels=192, out_channels=192, kernel_size=(3, 3), stride=(1, 1), groups=192, bias=False)
self.block_7_depthwise_BN = self.__batch_normalization(2, 'block_7_depthwise_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_7_project = self.__conv(2, name='block_7_project', in_channels=192, out_channels=32, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_7_project_BN = self.__batch_normalization(2, 'block_7_project_BN', num_features=32, eps=0.0010000000474974513, momentum=0.0)
self.block_8_expand = self.__conv(2, name='block_8_expand', in_channels=32, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_8_expand_BN = self.__batch_normalization(2, 'block_8_expand_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_8_depthwise = self.__conv(2, name='block_8_depthwise', in_channels=192, out_channels=192, kernel_size=(3, 3), stride=(1, 1), groups=192, bias=False)
self.block_8_depthwise_BN = self.__batch_normalization(2, 'block_8_depthwise_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_8_project = self.__conv(2, name='block_8_project', in_channels=192, out_channels=32, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_8_project_BN = self.__batch_normalization(2, 'block_8_project_BN', num_features=32, eps=0.0010000000474974513, momentum=0.0)
self.block_9_expand = self.__conv(2, name='block_9_expand', in_channels=32, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_9_expand_BN = self.__batch_normalization(2, 'block_9_expand_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_9_depthwise = self.__conv(2, name='block_9_depthwise', in_channels=192, out_channels=192, kernel_size=(3, 3), stride=(1, 1), groups=192, bias=False)
self.block_9_depthwise_BN = self.__batch_normalization(2, 'block_9_depthwise_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_9_project = self.__conv(2, name='block_9_project', in_channels=192, out_channels=32, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_9_project_BN = self.__batch_normalization(2, 'block_9_project_BN', num_features=32, eps=0.0010000000474974513, momentum=0.0)
self.block_10_expand = self.__conv(2, name='block_10_expand', in_channels=32, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_10_expand_BN = self.__batch_normalization(2, 'block_10_expand_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_10_depthwise = self.__conv(2, name='block_10_depthwise', in_channels=192, out_channels=192, kernel_size=(3, 3), stride=(1, 1), groups=192, bias=False)
self.block_10_depthwise_BN = self.__batch_normalization(2, 'block_10_depthwise_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_10_project = self.__conv(2, name='block_10_project', in_channels=192, out_channels=48, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_10_project_BN = self.__batch_normalization(2, 'block_10_project_BN', num_features=48, eps=0.0010000000474974513, momentum=0.0)
self.block_11_expand = self.__conv(2, name='block_11_expand', in_channels=48, out_channels=288, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_11_expand_BN = self.__batch_normalization(2, 'block_11_expand_BN', num_features=288, eps=0.0010000000474974513, momentum=0.0)
self.block_11_depthwise = self.__conv(2, name='block_11_depthwise', in_channels=288, out_channels=288, kernel_size=(3, 3), stride=(1, 1), groups=288, bias=False)
self.block_11_depthwise_BN = self.__batch_normalization(2, 'block_11_depthwise_BN', num_features=288, eps=0.0010000000474974513, momentum=0.0)
self.block_11_project = self.__conv(2, name='block_11_project', in_channels=288, out_channels=48, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_11_project_BN = self.__batch_normalization(2, 'block_11_project_BN', num_features=48, eps=0.0010000000474974513, momentum=0.0)
self.block_12_expand = self.__conv(2, name='block_12_expand', in_channels=48, out_channels=288, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_12_expand_BN = self.__batch_normalization(2, 'block_12_expand_BN', num_features=288, eps=0.0010000000474974513, momentum=0.0)
self.block_12_depthwise = self.__conv(2, name='block_12_depthwise', in_channels=288, out_channels=288, kernel_size=(3, 3), stride=(1, 1), groups=288, bias=False)
self.block_12_depthwise_BN = self.__batch_normalization(2, 'block_12_depthwise_BN', num_features=288, eps=0.0010000000474974513, momentum=0.0)
self.block_12_project = self.__conv(2, name='block_12_project', in_channels=288, out_channels=48, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_12_project_BN = self.__batch_normalization(2, 'block_12_project_BN', num_features=48, eps=0.0010000000474974513, momentum=0.0)
self.block_13_expand = self.__conv(2, name='block_13_expand', in_channels=48, out_channels=288, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_13_expand_BN = self.__batch_normalization(2, 'block_13_expand_BN', num_features=288, eps=0.0010000000474974513, momentum=0.0)
self.block_13_depthwise = self.__conv(2, name='block_13_depthwise', in_channels=288, out_channels=288, kernel_size=(3, 3), stride=(2, 2), groups=288, bias=False)
self.block_13_depthwise_BN = self.__batch_normalization(2, 'block_13_depthwise_BN', num_features=288, eps=0.0010000000474974513, momentum=0.0)
self.block_13_project = self.__conv(2, name='block_13_project', in_channels=288, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_13_project_BN = self.__batch_normalization(2, 'block_13_project_BN', num_features=80, eps=0.0010000000474974513, momentum=0.0)
self.block_14_expand = self.__conv(2, name='block_14_expand', in_channels=80, out_channels=480, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_14_expand_BN = self.__batch_normalization(2, 'block_14_expand_BN', num_features=480, eps=0.0010000000474974513, momentum=0.0)
self.block_14_depthwise = self.__conv(2, name='block_14_depthwise', in_channels=480, out_channels=480, kernel_size=(3, 3), stride=(1, 1), groups=480, bias=False)
self.block_14_depthwise_BN = self.__batch_normalization(2, 'block_14_depthwise_BN', num_features=480, eps=0.0010000000474974513, momentum=0.0)
self.block_14_project = self.__conv(2, name='block_14_project', in_channels=480, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_14_project_BN = self.__batch_normalization(2, 'block_14_project_BN', num_features=80, eps=0.0010000000474974513, momentum=0.0)
self.block_15_expand = self.__conv(2, name='block_15_expand', in_channels=80, out_channels=480, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_15_expand_BN = self.__batch_normalization(2, 'block_15_expand_BN', num_features=480, eps=0.0010000000474974513, momentum=0.0)
self.block_15_depthwise = self.__conv(2, name='block_15_depthwise', in_channels=480, out_channels=480, kernel_size=(3, 3), stride=(1, 1), groups=480, bias=False)
self.block_15_depthwise_BN = self.__batch_normalization(2, 'block_15_depthwise_BN', num_features=480, eps=0.0010000000474974513, momentum=0.0)
self.block_15_project = self.__conv(2, name='block_15_project', in_channels=480, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_15_project_BN = self.__batch_normalization(2, 'block_15_project_BN', num_features=80, eps=0.0010000000474974513, momentum=0.0)
self.block_16_expand = self.__conv(2, name='block_16_expand', in_channels=80, out_channels=480, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_16_expand_BN = self.__batch_normalization(2, 'block_16_expand_BN', num_features=480, eps=0.0010000000474974513, momentum=0.0)
self.block_16_depthwise = self.__conv(2, name='block_16_depthwise', in_channels=480, out_channels=480, kernel_size=(3, 3), stride=(1, 1), groups=480, bias=False)
self.block_16_depthwise_BN = self.__batch_normalization(2, 'block_16_depthwise_BN', num_features=480, eps=0.0010000000474974513, momentum=0.0)
self.block_16_project = self.__conv(2, name='block_16_project', in_channels=480, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_16_project_BN = self.__batch_normalization(2, 'block_16_project_BN', num_features=160, eps=0.0010000000474974513, momentum=0.0)
self.Conv_1 = self.__conv(2, name='Conv_1', in_channels=160, out_channels=1280, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.Conv_1_bn = self.__batch_normalization(2, 'Conv_1_bn', num_features=1280, eps=0.0010000000474974513, momentum=0.0)
def forward(self, x):
Conv1_pad = F.pad(x, (0, 1, 0, 1), mode = 'constant', value = 0)
Conv1 = self.Conv1(Conv1_pad)
bn_Conv1 = self.bn_Conv1(Conv1)
Conv1_relu = F.relu6(bn_Conv1)
expanded_conv_depthwise_pad = F.pad(Conv1_relu, (1, 1, 1, 1))
expanded_conv_depthwise = self.expanded_conv_depthwise(expanded_conv_depthwise_pad)
expanded_conv_depthwise_BN = self.expanded_conv_depthwise_BN(expanded_conv_depthwise)
expanded_conv_depthwise_relu = F.relu6(expanded_conv_depthwise_BN)
expanded_conv_project = self.expanded_conv_project(expanded_conv_depthwise_relu)
expanded_conv_project_BN = self.expanded_conv_project_BN(expanded_conv_project)
block_1_expand = self.block_1_expand(expanded_conv_project_BN)
block_1_expand_BN = self.block_1_expand_BN(block_1_expand)
block_1_expand_relu = F.relu6(block_1_expand_BN)
block_1_pad = F.pad(block_1_expand_relu, (0, 1, 0, 1), mode = 'constant', value = 0)
block_1_depthwise = self.block_1_depthwise(block_1_pad)
block_1_depthwise_BN = self.block_1_depthwise_BN(block_1_depthwise)
block_1_depthwise_relu = F.relu6(block_1_depthwise_BN)
block_1_project = self.block_1_project(block_1_depthwise_relu)
block_1_project_BN = self.block_1_project_BN(block_1_project)
block_2_expand = self.block_2_expand(block_1_project_BN)
block_2_expand_BN = self.block_2_expand_BN(block_2_expand)
block_2_expand_relu = F.relu6(block_2_expand_BN)
block_2_depthwise_pad = F.pad(block_2_expand_relu, (1, 1, 1, 1))
block_2_depthwise = self.block_2_depthwise(block_2_depthwise_pad)
block_2_depthwise_BN = self.block_2_depthwise_BN(block_2_depthwise)
block_2_depthwise_relu = F.relu6(block_2_depthwise_BN)
block_2_project = self.block_2_project(block_2_depthwise_relu)
block_2_project_BN = self.block_2_project_BN(block_2_project)
block_2_add = block_1_project_BN + block_2_project_BN
block_3_expand = self.block_3_expand(block_2_add)
block_3_expand_BN = self.block_3_expand_BN(block_3_expand)
block_3_expand_relu = F.relu6(block_3_expand_BN)
block_3_pad = F.pad(block_3_expand_relu, (0, 1, 0, 1), mode = 'constant', value = 0)
block_3_depthwise = self.block_3_depthwise(block_3_pad)
block_3_depthwise_BN = self.block_3_depthwise_BN(block_3_depthwise)
block_3_depthwise_relu = F.relu6(block_3_depthwise_BN)
block_3_project = self.block_3_project(block_3_depthwise_relu)
block_3_project_BN = self.block_3_project_BN(block_3_project)
block_4_expand = self.block_4_expand(block_3_project_BN)
block_4_expand_BN = self.block_4_expand_BN(block_4_expand)
block_4_expand_relu = F.relu6(block_4_expand_BN)
block_4_depthwise_pad = F.pad(block_4_expand_relu, (1, 1, 1, 1))
block_4_depthwise = self.block_4_depthwise(block_4_depthwise_pad)
block_4_depthwise_BN = self.block_4_depthwise_BN(block_4_depthwise)
block_4_depthwise_relu = F.relu6(block_4_depthwise_BN)
block_4_project = self.block_4_project(block_4_depthwise_relu)
block_4_project_BN = self.block_4_project_BN(block_4_project)
block_4_add = block_3_project_BN + block_4_project_BN
block_5_expand = self.block_5_expand(block_4_add)
block_5_expand_BN = self.block_5_expand_BN(block_5_expand)
block_5_expand_relu = F.relu6(block_5_expand_BN)
block_5_depthwise_pad = F.pad(block_5_expand_relu, (1, 1, 1, 1))
block_5_depthwise = self.block_5_depthwise(block_5_depthwise_pad)
block_5_depthwise_BN = self.block_5_depthwise_BN(block_5_depthwise)
block_5_depthwise_relu = F.relu6(block_5_depthwise_BN)
block_5_project = self.block_5_project(block_5_depthwise_relu)
block_5_project_BN = self.block_5_project_BN(block_5_project)
block_5_add = block_4_add + block_5_project_BN
block_6_expand = self.block_6_expand(block_5_add)
block_6_expand_BN = self.block_6_expand_BN(block_6_expand)
block_6_expand_relu = F.relu6(block_6_expand_BN)
block_6_pad = F.pad(block_6_expand_relu, (0, 1, 0, 1), mode = 'constant', value = 0)
block_6_depthwise = self.block_6_depthwise(block_6_pad)
block_6_depthwise_BN = self.block_6_depthwise_BN(block_6_depthwise)
block_6_depthwise_relu = F.relu6(block_6_depthwise_BN)
block_6_project = self.block_6_project(block_6_depthwise_relu)
block_6_project_BN = self.block_6_project_BN(block_6_project)
block_7_expand = self.block_7_expand(block_6_project_BN)
block_7_expand_BN = self.block_7_expand_BN(block_7_expand)
block_7_expand_relu = F.relu6(block_7_expand_BN)
block_7_depthwise_pad = F.pad(block_7_expand_relu, (1, 1, 1, 1))
block_7_depthwise = self.block_7_depthwise(block_7_depthwise_pad)
block_7_depthwise_BN = self.block_7_depthwise_BN(block_7_depthwise)
block_7_depthwise_relu = F.relu6(block_7_depthwise_BN)
block_7_project = self.block_7_project(block_7_depthwise_relu)
block_7_project_BN = self.block_7_project_BN(block_7_project)
block_7_add = block_6_project_BN + block_7_project_BN
block_8_expand = self.block_8_expand(block_7_add)
block_8_expand_BN = self.block_8_expand_BN(block_8_expand)
block_8_expand_relu = F.relu6(block_8_expand_BN)
block_8_depthwise_pad = F.pad(block_8_expand_relu, (1, 1, 1, 1))
block_8_depthwise = self.block_8_depthwise(block_8_depthwise_pad)
block_8_depthwise_BN = self.block_8_depthwise_BN(block_8_depthwise)
block_8_depthwise_relu = F.relu6(block_8_depthwise_BN)
block_8_project = self.block_8_project(block_8_depthwise_relu)
block_8_project_BN = self.block_8_project_BN(block_8_project)
block_8_add = block_7_add + block_8_project_BN
block_9_expand = self.block_9_expand(block_8_add)
block_9_expand_BN = self.block_9_expand_BN(block_9_expand)
block_9_expand_relu = F.relu6(block_9_expand_BN)
block_9_depthwise_pad = F.pad(block_9_expand_relu, (1, 1, 1, 1))
block_9_depthwise = self.block_9_depthwise(block_9_depthwise_pad)
block_9_depthwise_BN = self.block_9_depthwise_BN(block_9_depthwise)
block_9_depthwise_relu = F.relu6(block_9_depthwise_BN)
block_9_project = self.block_9_project(block_9_depthwise_relu)
block_9_project_BN = self.block_9_project_BN(block_9_project)
block_9_add = block_8_add + block_9_project_BN
block_10_expand = self.block_10_expand(block_9_add)
block_10_expand_BN = self.block_10_expand_BN(block_10_expand)
block_10_expand_relu = F.relu6(block_10_expand_BN)
block_10_depthwise_pad = F.pad(block_10_expand_relu, (1, 1, 1, 1))
block_10_depthwise = self.block_10_depthwise(block_10_depthwise_pad)
block_10_depthwise_BN = self.block_10_depthwise_BN(block_10_depthwise)
block_10_depthwise_relu = F.relu6(block_10_depthwise_BN)
block_10_project = self.block_10_project(block_10_depthwise_relu)
block_10_project_BN = self.block_10_project_BN(block_10_project)
block_11_expand = self.block_11_expand(block_10_project_BN)
block_11_expand_BN = self.block_11_expand_BN(block_11_expand)
block_11_expand_relu = F.relu6(block_11_expand_BN)
block_11_depthwise_pad = F.pad(block_11_expand_relu, (1, 1, 1, 1))
block_11_depthwise = self.block_11_depthwise(block_11_depthwise_pad)
block_11_depthwise_BN = self.block_11_depthwise_BN(block_11_depthwise)
block_11_depthwise_relu = F.relu6(block_11_depthwise_BN)
block_11_project = self.block_11_project(block_11_depthwise_relu)
block_11_project_BN = self.block_11_project_BN(block_11_project)
block_11_add = block_10_project_BN + block_11_project_BN
block_12_expand = self.block_12_expand(block_11_add)
block_12_expand_BN = self.block_12_expand_BN(block_12_expand)
block_12_expand_relu = F.relu6(block_12_expand_BN)
block_12_depthwise_pad = F.pad(block_12_expand_relu, (1, 1, 1, 1))
block_12_depthwise = self.block_12_depthwise(block_12_depthwise_pad)
block_12_depthwise_BN = self.block_12_depthwise_BN(block_12_depthwise)
block_12_depthwise_relu = F.relu6(block_12_depthwise_BN)
block_12_project = self.block_12_project(block_12_depthwise_relu)
block_12_project_BN = self.block_12_project_BN(block_12_project)
block_12_add = block_11_add + block_12_project_BN
block_13_expand = self.block_13_expand(block_12_add)
block_13_expand_BN = self.block_13_expand_BN(block_13_expand)
block_13_expand_relu = F.relu6(block_13_expand_BN)
block_13_pad = F.pad(block_13_expand_relu, (0, 1, 0, 1), mode = 'constant', value = 0)
block_13_depthwise = self.block_13_depthwise(block_13_pad)
block_13_depthwise_BN = self.block_13_depthwise_BN(block_13_depthwise)
block_13_depthwise_relu = F.relu6(block_13_depthwise_BN)
block_13_project = self.block_13_project(block_13_depthwise_relu)
block_13_project_BN = self.block_13_project_BN(block_13_project)
block_14_expand = self.block_14_expand(block_13_project_BN)
block_14_expand_BN = self.block_14_expand_BN(block_14_expand)
block_14_expand_relu = F.relu6(block_14_expand_BN)
block_14_depthwise_pad = F.pad(block_14_expand_relu, (1, 1, 1, 1))
block_14_depthwise = self.block_14_depthwise(block_14_depthwise_pad)
block_14_depthwise_BN = self.block_14_depthwise_BN(block_14_depthwise)
block_14_depthwise_relu = F.relu6(block_14_depthwise_BN)
block_14_project = self.block_14_project(block_14_depthwise_relu)
block_14_project_BN = self.block_14_project_BN(block_14_project)
block_14_add = block_13_project_BN + block_14_project_BN
block_15_expand = self.block_15_expand(block_14_add)
block_15_expand_BN = self.block_15_expand_BN(block_15_expand)
block_15_expand_relu = F.relu6(block_15_expand_BN)
block_15_depthwise_pad = F.pad(block_15_expand_relu, (1, 1, 1, 1))
block_15_depthwise = self.block_15_depthwise(block_15_depthwise_pad)
block_15_depthwise_BN = self.block_15_depthwise_BN(block_15_depthwise)
block_15_depthwise_relu = F.relu6(block_15_depthwise_BN)
block_15_project = self.block_15_project(block_15_depthwise_relu)
block_15_project_BN = self.block_15_project_BN(block_15_project)
block_15_add = block_14_add + block_15_project_BN
block_16_expand = self.block_16_expand(block_15_add)
block_16_expand_BN = self.block_16_expand_BN(block_16_expand)
block_16_expand_relu = F.relu6(block_16_expand_BN)
block_16_depthwise_pad = F.pad(block_16_expand_relu, (1, 1, 1, 1))
block_16_depthwise = self.block_16_depthwise(block_16_depthwise_pad)
block_16_depthwise_BN = self.block_16_depthwise_BN(block_16_depthwise)
block_16_depthwise_relu = F.relu6(block_16_depthwise_BN)
block_16_project = self.block_16_project(block_16_depthwise_relu)
block_16_project_BN = self.block_16_project_BN(block_16_project)
Conv_1 = self.Conv_1(block_16_project_BN)
Conv_1_bn = self.Conv_1_bn(Conv_1)
out_relu = F.relu6(Conv_1_bn)
global_average_pooling2d_1 = F.avg_pool2d(input = out_relu, kernel_size = out_relu.size()[2:])
global_average_pooling2d_1_flatten = global_average_pooling2d_1.view(global_average_pooling2d_1.size(0), -1)
return global_average_pooling2d_1_flatten
@staticmethod
def __batch_normalization(dim, name, **kwargs):
if dim == 1: layer = nn.BatchNorm1d(**kwargs)
elif dim == 2: layer = nn.BatchNorm2d(**kwargs)
elif dim == 3: layer = nn.BatchNorm3d(**kwargs)
else: raise NotImplementedError()
if 'scale' in __weights_dict[name]:
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale']))
else:
layer.weight.data.fill_(1)
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
else:
layer.bias.data.fill_(0)
layer.state_dict()['running_mean'].copy_(torch.from_numpy(__weights_dict[name]['mean']))
layer.state_dict()['running_var'].copy_(torch.from_numpy(__weights_dict[name]['var']))
return layer
@staticmethod
def __conv(dim, name, **kwargs):
if dim == 1: layer = nn.Conv1d(**kwargs)
elif dim == 2: layer = nn.Conv2d(**kwargs)
elif dim == 3: layer = nn.Conv3d(**kwargs)
else: raise NotImplementedError()
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
| import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
__weights_dict = dict()
def load_weights(weight_file):
if weight_file == None:
return
try:
weights_dict = np.load(weight_file).item()
except:
weights_dict = np.load(weight_file, encoding='bytes').item()
return weights_dict
class KitModel(nn.Module):
def __init__(self, weight_file):
super(KitModel, self).__init__()
global __weights_dict
__weights_dict = load_weights(weight_file)
self.Conv1 = self.__conv(2, name='Conv1', in_channels=3, out_channels=16, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=False)
self.bn_Conv1 = self.__batch_normalization(2, 'bn_Conv1', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.expanded_conv_depthwise = self.__conv(2, name='expanded_conv_depthwise', in_channels=16, out_channels=16, kernel_size=(3, 3), stride=(1, 1), groups=16, bias=False)
self.expanded_conv_depthwise_BN = self.__batch_normalization(2, 'expanded_conv_depthwise_BN', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.expanded_conv_project = self.__conv(2, name='expanded_conv_project', in_channels=16, out_channels=8, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.expanded_conv_project_BN = self.__batch_normalization(2, 'expanded_conv_project_BN', num_features=8, eps=0.0010000000474974513, momentum=0.0)
self.block_1_expand = self.__conv(2, name='block_1_expand', in_channels=8, out_channels=48, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_1_expand_BN = self.__batch_normalization(2, 'block_1_expand_BN', num_features=48, eps=0.0010000000474974513, momentum=0.0)
self.block_1_depthwise = self.__conv(2, name='block_1_depthwise', in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(2, 2), groups=48, bias=False)
self.block_1_depthwise_BN = self.__batch_normalization(2, 'block_1_depthwise_BN', num_features=48, eps=0.0010000000474974513, momentum=0.0)
self.block_1_project = self.__conv(2, name='block_1_project', in_channels=48, out_channels=16, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_1_project_BN = self.__batch_normalization(2, 'block_1_project_BN', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.block_2_expand = self.__conv(2, name='block_2_expand', in_channels=16, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_2_expand_BN = self.__batch_normalization(2, 'block_2_expand_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_2_depthwise = self.__conv(2, name='block_2_depthwise', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=96, bias=False)
self.block_2_depthwise_BN = self.__batch_normalization(2, 'block_2_depthwise_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_2_project = self.__conv(2, name='block_2_project', in_channels=96, out_channels=16, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_2_project_BN = self.__batch_normalization(2, 'block_2_project_BN', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.block_3_expand = self.__conv(2, name='block_3_expand', in_channels=16, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_3_expand_BN = self.__batch_normalization(2, 'block_3_expand_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_3_depthwise = self.__conv(2, name='block_3_depthwise', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(2, 2), groups=96, bias=False)
self.block_3_depthwise_BN = self.__batch_normalization(2, 'block_3_depthwise_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_3_project = self.__conv(2, name='block_3_project', in_channels=96, out_channels=16, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_3_project_BN = self.__batch_normalization(2, 'block_3_project_BN', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.block_4_expand = self.__conv(2, name='block_4_expand', in_channels=16, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_4_expand_BN = self.__batch_normalization(2, 'block_4_expand_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_4_depthwise = self.__conv(2, name='block_4_depthwise', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=96, bias=False)
self.block_4_depthwise_BN = self.__batch_normalization(2, 'block_4_depthwise_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_4_project = self.__conv(2, name='block_4_project', in_channels=96, out_channels=16, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_4_project_BN = self.__batch_normalization(2, 'block_4_project_BN', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.block_5_expand = self.__conv(2, name='block_5_expand', in_channels=16, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_5_expand_BN = self.__batch_normalization(2, 'block_5_expand_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_5_depthwise = self.__conv(2, name='block_5_depthwise', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=96, bias=False)
self.block_5_depthwise_BN = self.__batch_normalization(2, 'block_5_depthwise_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_5_project = self.__conv(2, name='block_5_project', in_channels=96, out_channels=16, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_5_project_BN = self.__batch_normalization(2, 'block_5_project_BN', num_features=16, eps=0.0010000000474974513, momentum=0.0)
self.block_6_expand = self.__conv(2, name='block_6_expand', in_channels=16, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_6_expand_BN = self.__batch_normalization(2, 'block_6_expand_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_6_depthwise = self.__conv(2, name='block_6_depthwise', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(2, 2), groups=96, bias=False)
self.block_6_depthwise_BN = self.__batch_normalization(2, 'block_6_depthwise_BN', num_features=96, eps=0.0010000000474974513, momentum=0.0)
self.block_6_project = self.__conv(2, name='block_6_project', in_channels=96, out_channels=32, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_6_project_BN = self.__batch_normalization(2, 'block_6_project_BN', num_features=32, eps=0.0010000000474974513, momentum=0.0)
self.block_7_expand = self.__conv(2, name='block_7_expand', in_channels=32, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_7_expand_BN = self.__batch_normalization(2, 'block_7_expand_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_7_depthwise = self.__conv(2, name='block_7_depthwise', in_channels=192, out_channels=192, kernel_size=(3, 3), stride=(1, 1), groups=192, bias=False)
self.block_7_depthwise_BN = self.__batch_normalization(2, 'block_7_depthwise_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_7_project = self.__conv(2, name='block_7_project', in_channels=192, out_channels=32, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_7_project_BN = self.__batch_normalization(2, 'block_7_project_BN', num_features=32, eps=0.0010000000474974513, momentum=0.0)
self.block_8_expand = self.__conv(2, name='block_8_expand', in_channels=32, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_8_expand_BN = self.__batch_normalization(2, 'block_8_expand_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_8_depthwise = self.__conv(2, name='block_8_depthwise', in_channels=192, out_channels=192, kernel_size=(3, 3), stride=(1, 1), groups=192, bias=False)
self.block_8_depthwise_BN = self.__batch_normalization(2, 'block_8_depthwise_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_8_project = self.__conv(2, name='block_8_project', in_channels=192, out_channels=32, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_8_project_BN = self.__batch_normalization(2, 'block_8_project_BN', num_features=32, eps=0.0010000000474974513, momentum=0.0)
self.block_9_expand = self.__conv(2, name='block_9_expand', in_channels=32, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_9_expand_BN = self.__batch_normalization(2, 'block_9_expand_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_9_depthwise = self.__conv(2, name='block_9_depthwise', in_channels=192, out_channels=192, kernel_size=(3, 3), stride=(1, 1), groups=192, bias=False)
self.block_9_depthwise_BN = self.__batch_normalization(2, 'block_9_depthwise_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_9_project = self.__conv(2, name='block_9_project', in_channels=192, out_channels=32, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_9_project_BN = self.__batch_normalization(2, 'block_9_project_BN', num_features=32, eps=0.0010000000474974513, momentum=0.0)
self.block_10_expand = self.__conv(2, name='block_10_expand', in_channels=32, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_10_expand_BN = self.__batch_normalization(2, 'block_10_expand_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_10_depthwise = self.__conv(2, name='block_10_depthwise', in_channels=192, out_channels=192, kernel_size=(3, 3), stride=(1, 1), groups=192, bias=False)
self.block_10_depthwise_BN = self.__batch_normalization(2, 'block_10_depthwise_BN', num_features=192, eps=0.0010000000474974513, momentum=0.0)
self.block_10_project = self.__conv(2, name='block_10_project', in_channels=192, out_channels=48, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_10_project_BN = self.__batch_normalization(2, 'block_10_project_BN', num_features=48, eps=0.0010000000474974513, momentum=0.0)
self.block_11_expand = self.__conv(2, name='block_11_expand', in_channels=48, out_channels=288, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_11_expand_BN = self.__batch_normalization(2, 'block_11_expand_BN', num_features=288, eps=0.0010000000474974513, momentum=0.0)
self.block_11_depthwise = self.__conv(2, name='block_11_depthwise', in_channels=288, out_channels=288, kernel_size=(3, 3), stride=(1, 1), groups=288, bias=False)
self.block_11_depthwise_BN = self.__batch_normalization(2, 'block_11_depthwise_BN', num_features=288, eps=0.0010000000474974513, momentum=0.0)
self.block_11_project = self.__conv(2, name='block_11_project', in_channels=288, out_channels=48, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_11_project_BN = self.__batch_normalization(2, 'block_11_project_BN', num_features=48, eps=0.0010000000474974513, momentum=0.0)
self.block_12_expand = self.__conv(2, name='block_12_expand', in_channels=48, out_channels=288, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_12_expand_BN = self.__batch_normalization(2, 'block_12_expand_BN', num_features=288, eps=0.0010000000474974513, momentum=0.0)
self.block_12_depthwise = self.__conv(2, name='block_12_depthwise', in_channels=288, out_channels=288, kernel_size=(3, 3), stride=(1, 1), groups=288, bias=False)
self.block_12_depthwise_BN = self.__batch_normalization(2, 'block_12_depthwise_BN', num_features=288, eps=0.0010000000474974513, momentum=0.0)
self.block_12_project = self.__conv(2, name='block_12_project', in_channels=288, out_channels=48, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_12_project_BN = self.__batch_normalization(2, 'block_12_project_BN', num_features=48, eps=0.0010000000474974513, momentum=0.0)
self.block_13_expand = self.__conv(2, name='block_13_expand', in_channels=48, out_channels=288, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_13_expand_BN = self.__batch_normalization(2, 'block_13_expand_BN', num_features=288, eps=0.0010000000474974513, momentum=0.0)
self.block_13_depthwise = self.__conv(2, name='block_13_depthwise', in_channels=288, out_channels=288, kernel_size=(3, 3), stride=(2, 2), groups=288, bias=False)
self.block_13_depthwise_BN = self.__batch_normalization(2, 'block_13_depthwise_BN', num_features=288, eps=0.0010000000474974513, momentum=0.0)
self.block_13_project = self.__conv(2, name='block_13_project', in_channels=288, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_13_project_BN = self.__batch_normalization(2, 'block_13_project_BN', num_features=80, eps=0.0010000000474974513, momentum=0.0)
self.block_14_expand = self.__conv(2, name='block_14_expand', in_channels=80, out_channels=480, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_14_expand_BN = self.__batch_normalization(2, 'block_14_expand_BN', num_features=480, eps=0.0010000000474974513, momentum=0.0)
self.block_14_depthwise = self.__conv(2, name='block_14_depthwise', in_channels=480, out_channels=480, kernel_size=(3, 3), stride=(1, 1), groups=480, bias=False)
self.block_14_depthwise_BN = self.__batch_normalization(2, 'block_14_depthwise_BN', num_features=480, eps=0.0010000000474974513, momentum=0.0)
self.block_14_project = self.__conv(2, name='block_14_project', in_channels=480, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_14_project_BN = self.__batch_normalization(2, 'block_14_project_BN', num_features=80, eps=0.0010000000474974513, momentum=0.0)
self.block_15_expand = self.__conv(2, name='block_15_expand', in_channels=80, out_channels=480, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_15_expand_BN = self.__batch_normalization(2, 'block_15_expand_BN', num_features=480, eps=0.0010000000474974513, momentum=0.0)
self.block_15_depthwise = self.__conv(2, name='block_15_depthwise', in_channels=480, out_channels=480, kernel_size=(3, 3), stride=(1, 1), groups=480, bias=False)
self.block_15_depthwise_BN = self.__batch_normalization(2, 'block_15_depthwise_BN', num_features=480, eps=0.0010000000474974513, momentum=0.0)
self.block_15_project = self.__conv(2, name='block_15_project', in_channels=480, out_channels=80, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_15_project_BN = self.__batch_normalization(2, 'block_15_project_BN', num_features=80, eps=0.0010000000474974513, momentum=0.0)
self.block_16_expand = self.__conv(2, name='block_16_expand', in_channels=80, out_channels=480, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_16_expand_BN = self.__batch_normalization(2, 'block_16_expand_BN', num_features=480, eps=0.0010000000474974513, momentum=0.0)
self.block_16_depthwise = self.__conv(2, name='block_16_depthwise', in_channels=480, out_channels=480, kernel_size=(3, 3), stride=(1, 1), groups=480, bias=False)
self.block_16_depthwise_BN = self.__batch_normalization(2, 'block_16_depthwise_BN', num_features=480, eps=0.0010000000474974513, momentum=0.0)
self.block_16_project = self.__conv(2, name='block_16_project', in_channels=480, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.block_16_project_BN = self.__batch_normalization(2, 'block_16_project_BN', num_features=160, eps=0.0010000000474974513, momentum=0.0)
self.Conv_1 = self.__conv(2, name='Conv_1', in_channels=160, out_channels=1280, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.Conv_1_bn = self.__batch_normalization(2, 'Conv_1_bn', num_features=1280, eps=0.0010000000474974513, momentum=0.0)
def forward(self, x):
Conv1_pad = F.pad(x, (0, 1, 0, 1), mode = 'constant', value = 0)
Conv1 = self.Conv1(Conv1_pad)
bn_Conv1 = self.bn_Conv1(Conv1)
Conv1_relu = F.relu6(bn_Conv1)
expanded_conv_depthwise_pad = F.pad(Conv1_relu, (1, 1, 1, 1))
expanded_conv_depthwise = self.expanded_conv_depthwise(expanded_conv_depthwise_pad)
expanded_conv_depthwise_BN = self.expanded_conv_depthwise_BN(expanded_conv_depthwise)
expanded_conv_depthwise_relu = F.relu6(expanded_conv_depthwise_BN)
expanded_conv_project = self.expanded_conv_project(expanded_conv_depthwise_relu)
expanded_conv_project_BN = self.expanded_conv_project_BN(expanded_conv_project)
block_1_expand = self.block_1_expand(expanded_conv_project_BN)
block_1_expand_BN = self.block_1_expand_BN(block_1_expand)
block_1_expand_relu = F.relu6(block_1_expand_BN)
block_1_pad = F.pad(block_1_expand_relu, (0, 1, 0, 1), mode = 'constant', value = 0)
block_1_depthwise = self.block_1_depthwise(block_1_pad)
block_1_depthwise_BN = self.block_1_depthwise_BN(block_1_depthwise)
block_1_depthwise_relu = F.relu6(block_1_depthwise_BN)
block_1_project = self.block_1_project(block_1_depthwise_relu)
block_1_project_BN = self.block_1_project_BN(block_1_project)
block_2_expand = self.block_2_expand(block_1_project_BN)
block_2_expand_BN = self.block_2_expand_BN(block_2_expand)
block_2_expand_relu = F.relu6(block_2_expand_BN)
block_2_depthwise_pad = F.pad(block_2_expand_relu, (1, 1, 1, 1))
block_2_depthwise = self.block_2_depthwise(block_2_depthwise_pad)
block_2_depthwise_BN = self.block_2_depthwise_BN(block_2_depthwise)
block_2_depthwise_relu = F.relu6(block_2_depthwise_BN)
block_2_project = self.block_2_project(block_2_depthwise_relu)
block_2_project_BN = self.block_2_project_BN(block_2_project)
block_2_add = block_1_project_BN + block_2_project_BN
block_3_expand = self.block_3_expand(block_2_add)
block_3_expand_BN = self.block_3_expand_BN(block_3_expand)
block_3_expand_relu = F.relu6(block_3_expand_BN)
block_3_pad = F.pad(block_3_expand_relu, (0, 1, 0, 1), mode = 'constant', value = 0)
block_3_depthwise = self.block_3_depthwise(block_3_pad)
block_3_depthwise_BN = self.block_3_depthwise_BN(block_3_depthwise)
block_3_depthwise_relu = F.relu6(block_3_depthwise_BN)
block_3_project = self.block_3_project(block_3_depthwise_relu)
block_3_project_BN = self.block_3_project_BN(block_3_project)
block_4_expand = self.block_4_expand(block_3_project_BN)
block_4_expand_BN = self.block_4_expand_BN(block_4_expand)
block_4_expand_relu = F.relu6(block_4_expand_BN)
block_4_depthwise_pad = F.pad(block_4_expand_relu, (1, 1, 1, 1))
block_4_depthwise = self.block_4_depthwise(block_4_depthwise_pad)
block_4_depthwise_BN = self.block_4_depthwise_BN(block_4_depthwise)
block_4_depthwise_relu = F.relu6(block_4_depthwise_BN)
block_4_project = self.block_4_project(block_4_depthwise_relu)
block_4_project_BN = self.block_4_project_BN(block_4_project)
block_4_add = block_3_project_BN + block_4_project_BN
block_5_expand = self.block_5_expand(block_4_add)
block_5_expand_BN = self.block_5_expand_BN(block_5_expand)
block_5_expand_relu = F.relu6(block_5_expand_BN)
block_5_depthwise_pad = F.pad(block_5_expand_relu, (1, 1, 1, 1))
block_5_depthwise = self.block_5_depthwise(block_5_depthwise_pad)
block_5_depthwise_BN = self.block_5_depthwise_BN(block_5_depthwise)
block_5_depthwise_relu = F.relu6(block_5_depthwise_BN)
block_5_project = self.block_5_project(block_5_depthwise_relu)
block_5_project_BN = self.block_5_project_BN(block_5_project)
block_5_add = block_4_add + block_5_project_BN
block_6_expand = self.block_6_expand(block_5_add)
block_6_expand_BN = self.block_6_expand_BN(block_6_expand)
block_6_expand_relu = F.relu6(block_6_expand_BN)
block_6_pad = F.pad(block_6_expand_relu, (0, 1, 0, 1), mode = 'constant', value = 0)
block_6_depthwise = self.block_6_depthwise(block_6_pad)
block_6_depthwise_BN = self.block_6_depthwise_BN(block_6_depthwise)
block_6_depthwise_relu = F.relu6(block_6_depthwise_BN)
block_6_project = self.block_6_project(block_6_depthwise_relu)
block_6_project_BN = self.block_6_project_BN(block_6_project)
block_7_expand = self.block_7_expand(block_6_project_BN)
block_7_expand_BN = self.block_7_expand_BN(block_7_expand)
block_7_expand_relu = F.relu6(block_7_expand_BN)
block_7_depthwise_pad = F.pad(block_7_expand_relu, (1, 1, 1, 1))
block_7_depthwise = self.block_7_depthwise(block_7_depthwise_pad)
block_7_depthwise_BN = self.block_7_depthwise_BN(block_7_depthwise)
block_7_depthwise_relu = F.relu6(block_7_depthwise_BN)
block_7_project = self.block_7_project(block_7_depthwise_relu)
block_7_project_BN = self.block_7_project_BN(block_7_project)
block_7_add = block_6_project_BN + block_7_project_BN
block_8_expand = self.block_8_expand(block_7_add)
block_8_expand_BN = self.block_8_expand_BN(block_8_expand)
block_8_expand_relu = F.relu6(block_8_expand_BN)
block_8_depthwise_pad = F.pad(block_8_expand_relu, (1, 1, 1, 1))
block_8_depthwise = self.block_8_depthwise(block_8_depthwise_pad)
block_8_depthwise_BN = self.block_8_depthwise_BN(block_8_depthwise)
block_8_depthwise_relu = F.relu6(block_8_depthwise_BN)
block_8_project = self.block_8_project(block_8_depthwise_relu)
block_8_project_BN = self.block_8_project_BN(block_8_project)
block_8_add = block_7_add + block_8_project_BN
block_9_expand = self.block_9_expand(block_8_add)
block_9_expand_BN = self.block_9_expand_BN(block_9_expand)
block_9_expand_relu = F.relu6(block_9_expand_BN)
block_9_depthwise_pad = F.pad(block_9_expand_relu, (1, 1, 1, 1))
block_9_depthwise = self.block_9_depthwise(block_9_depthwise_pad)
block_9_depthwise_BN = self.block_9_depthwise_BN(block_9_depthwise)
block_9_depthwise_relu = F.relu6(block_9_depthwise_BN)
block_9_project = self.block_9_project(block_9_depthwise_relu)
block_9_project_BN = self.block_9_project_BN(block_9_project)
block_9_add = block_8_add + block_9_project_BN
block_10_expand = self.block_10_expand(block_9_add)
block_10_expand_BN = self.block_10_expand_BN(block_10_expand)
block_10_expand_relu = F.relu6(block_10_expand_BN)
block_10_depthwise_pad = F.pad(block_10_expand_relu, (1, 1, 1, 1))
block_10_depthwise = self.block_10_depthwise(block_10_depthwise_pad)
block_10_depthwise_BN = self.block_10_depthwise_BN(block_10_depthwise)
block_10_depthwise_relu = F.relu6(block_10_depthwise_BN)
block_10_project = self.block_10_project(block_10_depthwise_relu)
block_10_project_BN = self.block_10_project_BN(block_10_project)
block_11_expand = self.block_11_expand(block_10_project_BN)
block_11_expand_BN = self.block_11_expand_BN(block_11_expand)
block_11_expand_relu = F.relu6(block_11_expand_BN)
block_11_depthwise_pad = F.pad(block_11_expand_relu, (1, 1, 1, 1))
block_11_depthwise = self.block_11_depthwise(block_11_depthwise_pad)
block_11_depthwise_BN = self.block_11_depthwise_BN(block_11_depthwise)
block_11_depthwise_relu = F.relu6(block_11_depthwise_BN)
block_11_project = self.block_11_project(block_11_depthwise_relu)
block_11_project_BN = self.block_11_project_BN(block_11_project)
block_11_add = block_10_project_BN + block_11_project_BN
block_12_expand = self.block_12_expand(block_11_add)
block_12_expand_BN = self.block_12_expand_BN(block_12_expand)
block_12_expand_relu = F.relu6(block_12_expand_BN)
block_12_depthwise_pad = F.pad(block_12_expand_relu, (1, 1, 1, 1))
block_12_depthwise = self.block_12_depthwise(block_12_depthwise_pad)
block_12_depthwise_BN = self.block_12_depthwise_BN(block_12_depthwise)
block_12_depthwise_relu = F.relu6(block_12_depthwise_BN)
block_12_project = self.block_12_project(block_12_depthwise_relu)
block_12_project_BN = self.block_12_project_BN(block_12_project)
block_12_add = block_11_add + block_12_project_BN
block_13_expand = self.block_13_expand(block_12_add)
block_13_expand_BN = self.block_13_expand_BN(block_13_expand)
block_13_expand_relu = F.relu6(block_13_expand_BN)
block_13_pad = F.pad(block_13_expand_relu, (0, 1, 0, 1), mode = 'constant', value = 0)
block_13_depthwise = self.block_13_depthwise(block_13_pad)
block_13_depthwise_BN = self.block_13_depthwise_BN(block_13_depthwise)
block_13_depthwise_relu = F.relu6(block_13_depthwise_BN)
block_13_project = self.block_13_project(block_13_depthwise_relu)
block_13_project_BN = self.block_13_project_BN(block_13_project)
block_14_expand = self.block_14_expand(block_13_project_BN)
block_14_expand_BN = self.block_14_expand_BN(block_14_expand)
block_14_expand_relu = F.relu6(block_14_expand_BN)
block_14_depthwise_pad = F.pad(block_14_expand_relu, (1, 1, 1, 1))
block_14_depthwise = self.block_14_depthwise(block_14_depthwise_pad)
block_14_depthwise_BN = self.block_14_depthwise_BN(block_14_depthwise)
block_14_depthwise_relu = F.relu6(block_14_depthwise_BN)
block_14_project = self.block_14_project(block_14_depthwise_relu)
block_14_project_BN = self.block_14_project_BN(block_14_project)
block_14_add = block_13_project_BN + block_14_project_BN
block_15_expand = self.block_15_expand(block_14_add)
block_15_expand_BN = self.block_15_expand_BN(block_15_expand)
block_15_expand_relu = F.relu6(block_15_expand_BN)
block_15_depthwise_pad = F.pad(block_15_expand_relu, (1, 1, 1, 1))
block_15_depthwise = self.block_15_depthwise(block_15_depthwise_pad)
block_15_depthwise_BN = self.block_15_depthwise_BN(block_15_depthwise)
block_15_depthwise_relu = F.relu6(block_15_depthwise_BN)
block_15_project = self.block_15_project(block_15_depthwise_relu)
block_15_project_BN = self.block_15_project_BN(block_15_project)
block_15_add = block_14_add + block_15_project_BN
block_16_expand = self.block_16_expand(block_15_add)
block_16_expand_BN = self.block_16_expand_BN(block_16_expand)
block_16_expand_relu = F.relu6(block_16_expand_BN)
block_16_depthwise_pad = F.pad(block_16_expand_relu, (1, 1, 1, 1))
block_16_depthwise = self.block_16_depthwise(block_16_depthwise_pad)
block_16_depthwise_BN = self.block_16_depthwise_BN(block_16_depthwise)
block_16_depthwise_relu = F.relu6(block_16_depthwise_BN)
block_16_project = self.block_16_project(block_16_depthwise_relu)
block_16_project_BN = self.block_16_project_BN(block_16_project)
Conv_1 = self.Conv_1(block_16_project_BN)
Conv_1_bn = self.Conv_1_bn(Conv_1)
out_relu = F.relu6(Conv_1_bn)
global_average_pooling2d_1 = F.avg_pool2d(input = out_relu, kernel_size = out_relu.size()[2:])
global_average_pooling2d_1_flatten = global_average_pooling2d_1.view(global_average_pooling2d_1.size(0), -1)
return global_average_pooling2d_1_flatten
@staticmethod
def __batch_normalization(dim, name, **kwargs):
if dim == 1: layer = nn.BatchNorm1d(**kwargs)
elif dim == 2: layer = nn.BatchNorm2d(**kwargs)
elif dim == 3: layer = nn.BatchNorm3d(**kwargs)
else: raise NotImplementedError()
if 'scale' in __weights_dict[name]:
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale']))
else:
layer.weight.data.fill_(1)
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
else:
layer.bias.data.fill_(0)
layer.state_dict()['running_mean'].copy_(torch.from_numpy(__weights_dict[name]['mean']))
layer.state_dict()['running_var'].copy_(torch.from_numpy(__weights_dict[name]['var']))
return layer
@staticmethod
def __conv(dim, name, **kwargs):
if dim == 1: layer = nn.Conv1d(**kwargs)
elif dim == 2: layer = nn.Conv2d(**kwargs)
elif dim == 3: layer = nn.Conv3d(**kwargs)
else: raise NotImplementedError()
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
| none | 1 | 2.483349 | 2 | |
src/Python/1-100/24.ListSwapPairs.py | Peefy/PeefyLeetCode | 2 | 6613562 | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is not None and head.next is not None:
head.next.next = self.swapPairs(head.next.next)
second = head.next
head.next = second.next
second.next = head
return second
return head
if __name__ == "__main__":
solution = Solution()
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
print(solution.swapPairs(head)) | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is not None and head.next is not None:
head.next.next = self.swapPairs(head.next.next)
second = head.next
head.next = second.next
second.next = head
return second
return head
if __name__ == "__main__":
solution = Solution()
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
print(solution.swapPairs(head)) | en | 0.548726 | # Definition for singly-linked list. :type head: ListNode :rtype: ListNode | 3.806587 | 4 |
repokid/tests/test_logging.py | rezamt/repokid | 0 | 6613563 | from mock import patch
from repokid.utils.logging import JSONFormatter
class MockRecord(object):
def __init__(self, message):
self.created = 1579129029
self.levelname = "INFO"
self.name = "repokid_test"
self.message = message
self.process = 12345
self.threadName = "MainThread"
self.exc_info = None
self.filename = "hack_the_planet.py"
self.funcName = "exploit"
self.lineno = 42
def getMessage(self):
return self.message
class TestLogging(object):
formatter = JSONFormatter()
formatter.hostname = "test_host"
def test_format(self):
record = MockRecord("Hi there!")
result = self.formatter.format(record)
expected = """{"time": "2020-01-15T22:57:09", "level": "INFO", "name": "repokid_test", "message": "Hi there!", "process": 12345, "thread": "MainThread", "hostname": "test_host", "filename": "hack_the_planet.py", "function": "exploit", "lineNo": 42}""" # noqa: E501
assert result == expected
def test_format_with_exception(self):
record = MockRecord("Hi there!")
record.exc_info = (
AttributeError,
AttributeError("you did a wrong thing"),
None,
)
with patch("traceback.format_exc", return_value="this is totally a traceback"):
result = self.formatter.format(record)
expected = """{"time": "2020-01-15T22:57:09", "level": "INFO", "name": "repokid_test", "message": "Hi there!", "process": 12345, "thread": "MainThread", "hostname": "test_host", "filename": "hack_the_planet.py", "function": "exploit", "lineNo": 42, "exception": "AttributeError: you did a wrong thing", "traceback": "this is totally a traceback"}""" # noqa: E501
assert result == expected
| from mock import patch
from repokid.utils.logging import JSONFormatter
class MockRecord(object):
def __init__(self, message):
self.created = 1579129029
self.levelname = "INFO"
self.name = "repokid_test"
self.message = message
self.process = 12345
self.threadName = "MainThread"
self.exc_info = None
self.filename = "hack_the_planet.py"
self.funcName = "exploit"
self.lineno = 42
def getMessage(self):
return self.message
class TestLogging(object):
formatter = JSONFormatter()
formatter.hostname = "test_host"
def test_format(self):
record = MockRecord("Hi there!")
result = self.formatter.format(record)
expected = """{"time": "2020-01-15T22:57:09", "level": "INFO", "name": "repokid_test", "message": "Hi there!", "process": 12345, "thread": "MainThread", "hostname": "test_host", "filename": "hack_the_planet.py", "function": "exploit", "lineNo": 42}""" # noqa: E501
assert result == expected
def test_format_with_exception(self):
record = MockRecord("Hi there!")
record.exc_info = (
AttributeError,
AttributeError("you did a wrong thing"),
None,
)
with patch("traceback.format_exc", return_value="this is totally a traceback"):
result = self.formatter.format(record)
expected = """{"time": "2020-01-15T22:57:09", "level": "INFO", "name": "repokid_test", "message": "Hi there!", "process": 12345, "thread": "MainThread", "hostname": "test_host", "filename": "hack_the_planet.py", "function": "exploit", "lineNo": 42, "exception": "AttributeError: you did a wrong thing", "traceback": "this is totally a traceback"}""" # noqa: E501
assert result == expected
| en | 0.470639 | {"time": "2020-01-15T22:57:09", "level": "INFO", "name": "repokid_test", "message": "Hi there!", "process": 12345, "thread": "MainThread", "hostname": "test_host", "filename": "hack_the_planet.py", "function": "exploit", "lineNo": 42} # noqa: E501 {"time": "2020-01-15T22:57:09", "level": "INFO", "name": "repokid_test", "message": "Hi there!", "process": 12345, "thread": "MainThread", "hostname": "test_host", "filename": "hack_the_planet.py", "function": "exploit", "lineNo": 42, "exception": "AttributeError: you did a wrong thing", "traceback": "this is totally a traceback"} # noqa: E501 | 2.841055 | 3 |
1_beginner/chapter2/solutions/print_data_types.py | code4tomorrow/Python | 4 | 6613564 | <filename>1_beginner/chapter2/solutions/print_data_types.py
# Print Data Types
# Come up with 3 examples each of
# floating numbers, integers, and strings and print them.
# floats
print(1.56)
print(32.0)
print(-35.25)
# integers
print(25)
print(0)
print(-1)
# strings
print("Tahiti, it's a magical place")
print("May the Force be with you")
print("Hey guys")
| <filename>1_beginner/chapter2/solutions/print_data_types.py
# Print Data Types
# Come up with 3 examples each of
# floating numbers, integers, and strings and print them.
# floats
print(1.56)
print(32.0)
print(-35.25)
# integers
print(25)
print(0)
print(-1)
# strings
print("Tahiti, it's a magical place")
print("May the Force be with you")
print("Hey guys")
| en | 0.817164 | # Print Data Types # Come up with 3 examples each of # floating numbers, integers, and strings and print them. # floats # integers # strings | 4.406107 | 4 |
Cards/views/files.py | vabene1111/LearningCards | 1 | 6613565 | <gh_stars>1-10
from io import BytesIO
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.template.loader import get_template
from xhtml2pdf import pisa
from Cards.helper import course_helper
from Cards.models import Course
def render_to_pdf(template_src, context_dict={}):
template = get_template(template_src)
html = template.render(context_dict)
result = BytesIO()
pdf = pisa.pisaDocument(BytesIO(html.encode("utf8")), result, encoding='utf8')
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return None
def export_course(request, pk):
course = get_object_or_404(Course, pk=pk)
chapters = course_helper.get_chapters(course, format='object')
return render_to_pdf('export_question_pdf.html', {'course': course, 'chapters': chapters})
| from io import BytesIO
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.template.loader import get_template
from xhtml2pdf import pisa
from Cards.helper import course_helper
from Cards.models import Course
def render_to_pdf(template_src, context_dict={}):
template = get_template(template_src)
html = template.render(context_dict)
result = BytesIO()
pdf = pisa.pisaDocument(BytesIO(html.encode("utf8")), result, encoding='utf8')
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return None
def export_course(request, pk):
course = get_object_or_404(Course, pk=pk)
chapters = course_helper.get_chapters(course, format='object')
return render_to_pdf('export_question_pdf.html', {'course': course, 'chapters': chapters}) | none | 1 | 2.152099 | 2 | |
supporting-layer/uiux-authoring-tool/accounts/apps.py | taqdirali/Mining-Minds | 42 | 6613566 | """
# UI/UX Authoring Tool
# @license http://www.apache.org/licenses/LICENSE-2.0
# Author @ <NAME>
"""
from django.apps import AppConfig
class AccountsConfig(AppConfig):
name = 'accounts'
| """
# UI/UX Authoring Tool
# @license http://www.apache.org/licenses/LICENSE-2.0
# Author @ <NAME>
"""
from django.apps import AppConfig
class AccountsConfig(AppConfig):
name = 'accounts'
| en | 0.537743 | # UI/UX Authoring Tool # @license http://www.apache.org/licenses/LICENSE-2.0 # Author @ <NAME> | 1.566265 | 2 |
src/python/backends/py/runtime/state/queue.py | andyjost/Sprite | 1 | 6613567 | import collections
__all__ = ['Queue']
class Queue(collections.deque):
def __init__(self, *args, **kwds):
sid = kwds.pop('sid', None)
collections.deque.__init__(self, *args, **kwds)
self.sid = sid
def __copy__(self):
cp = super(Queue, self).__copy__()
cp.sid = self.sid
return cp
def copy(self):
return self.__copy__()
| import collections
__all__ = ['Queue']
class Queue(collections.deque):
def __init__(self, *args, **kwds):
sid = kwds.pop('sid', None)
collections.deque.__init__(self, *args, **kwds)
self.sid = sid
def __copy__(self):
cp = super(Queue, self).__copy__()
cp.sid = self.sid
return cp
def copy(self):
return self.__copy__()
| none | 1 | 3.015526 | 3 | |
python-flask-mysql/app.py | Mikael3001/BCC2 | 0 | 6613568 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://felipebasnun:<EMAIL>/felipebasnun$primeiro'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# Relacional
# Orientada a Objeto
# ORM = Object Relational Mapping
# Mapeamento Objeto Relacional
class Usuario(db.Model):
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
@app.route('/')
def index():
return 'Hello world!'
@app.route('/add/<nnome>/<nemail>')
def add(nnome, nemail):
novousuario = Usuario(nome=nnome, email=nemail)
db.session.add(novousuario)
db.session.commit()
return "Foi"
@app.route('/listaTudo')
def listaTudo():
usuarios = Usuario.query.all()
resposta = ''
for usuario in usuarios:
resposta = resposta + 'Nome: '+usuario.nome+' email: '+usuario.email+'<br>'
return resposta
@app.route('/qualEmail/<nnome>')
def busca(nnome):
quem = Usuario.query.filter_by(nome=nnome).first()
return quem.email
@app.route('/delete/<nnome>')
def delete(nnome):
quem = Usuario.query.filter_by(nome=nnome).first()
db.session.delete(quem)
db.session.commit()
return "Deletei"
@app.route('/atualiza/<nnomeAntigo>/<nnome>')
def atualiza(nnomeAntigo, nnome):
quem = Usuario.query.filter_by(nome=nnomeAntigo).first()
quem.nome = nnome
db.session.add(quem)
db.session.commit()
return "Atualizei"
db.create_all()
| from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://felipebasnun:<EMAIL>/felipebasnun$primeiro'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# Relacional
# Orientada a Objeto
# ORM = Object Relational Mapping
# Mapeamento Objeto Relacional
class Usuario(db.Model):
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
@app.route('/')
def index():
return 'Hello world!'
@app.route('/add/<nnome>/<nemail>')
def add(nnome, nemail):
novousuario = Usuario(nome=nnome, email=nemail)
db.session.add(novousuario)
db.session.commit()
return "Foi"
@app.route('/listaTudo')
def listaTudo():
usuarios = Usuario.query.all()
resposta = ''
for usuario in usuarios:
resposta = resposta + 'Nome: '+usuario.nome+' email: '+usuario.email+'<br>'
return resposta
@app.route('/qualEmail/<nnome>')
def busca(nnome):
quem = Usuario.query.filter_by(nome=nnome).first()
return quem.email
@app.route('/delete/<nnome>')
def delete(nnome):
quem = Usuario.query.filter_by(nome=nnome).first()
db.session.delete(quem)
db.session.commit()
return "Deletei"
@app.route('/atualiza/<nnomeAntigo>/<nnome>')
def atualiza(nnomeAntigo, nnome):
quem = Usuario.query.filter_by(nome=nnomeAntigo).first()
quem.nome = nnome
db.session.add(quem)
db.session.commit()
return "Atualizei"
db.create_all()
| es | 0.360641 | # Relacional # Orientada a Objeto # ORM = Object Relational Mapping # Mapeamento Objeto Relacional | 3.165369 | 3 |
scripts/balancing/visualizer/visualizing/model.py | Lesstat/osmgraphing | 14 | 6613569 | <reponame>Lesstat/osmgraphing<filename>scripts/balancing/visualizer/visualizing/model.py
import os
import filecmp
import numpy as np
import csv
from visualizing.simulating import Simulation
class GlobalData():
def __init__(self):
self._max_workload = None
@property
def max_workload(self):
return self._max_workload
@staticmethod
def fill(sim: Simulation):
global_data = GlobalData()
data = Data(global_data)
for i in range(sim.num_iter):
data.prepare_new_iteration(sim=sim)
if global_data._max_workload is None:
global_data._max_workload = data.workloads.max
else:
if data.workloads.max > global_data._max_workload:
global_data._max_workload = data.workloads.max
return global_data
class Values():
'''
Just a struct of values
'''
def __init__(self):
self.raw = []
@property
def raw(self):
return self._raw
@property
def raw_nz(self):
return list(filter(lambda w: w > 0.0, self._raw))
@raw.setter
def raw(self, new_raw):
self._raw = new_raw
self._min = None
self._max = None
self._center = None
self._mean = None
self._std = None
@property
def min(self):
if self._min is None:
self._min = np.min(self._raw)
return self._min
@property
def center(self):
if self._center is None:
self._center = (self.min + self.max) / 2.0
return self._center
@property
def max(self):
if self._max is None:
self._max = np.max(self._raw)
return self._max
@property
def mean(self):
if self._mean is None:
self._mean = np.mean(self._raw)
return self._mean
@property
def std(self):
if self._std is None:
self._std = np.std(self._raw)
return self._std
class Data():
'''
Just a struct of values
'''
def __init__(self, global_data):
self._iteration = -1
self._lats = Values()
self._lons = Values()
self._kilometers = Values()
self._lane_counts = Values()
self._old_workloads = Values()
self._workloads = Values()
self._delta_workloads = None
self._global_data = global_data
def prepare_new_iteration(self, sim: Simulation):
self._iteration += 1
# reset all current data
tmp = self.old_workloads.raw
self.old_workloads.raw = self.workloads.raw
self.workloads.raw = tmp
self.workloads.raw.clear()
self._delta_workloads = None
# continue TODO
if self.iteration == 0:
# self.check_for_equal_edge_files(sim=sim)
self.read_in_edge_info(sim=sim)
self.read_in_workloads(sim=sim)
def path_to_edge_info(self, iteration=None):
if iteration is None:
iteration = self.iteration
return os.path.join(f'{iteration}', 'stats', 'edges-info.csv')
def path_to_abs_workloads(self, iteration=None):
if iteration is None:
iteration = self.iteration
return os.path.join(f'{self.iteration}', 'stats', 'abs_workloads.csv')
def path_to_new_metrics(self, iteration=None):
if iteration is None:
iteration = self.iteration
return os.path.join(f'{self._iteration}', 'stats', 'new_metrics.csv')
@property
def global_data(self):
return self._global_data
@property
def iteration(self):
return self._iteration
@property
def lats(self):
return self._lats
@property
def lons(self):
return self._lons
@property
def kilometers(self):
return self._kilometers
@property
def lane_counts(self):
return self._lane_counts
def volume(self, edge_idx: int) -> float:
'''
It's used for hopefully greater numbers
Nagel-Schreckenberg-Model: 7.5 m per vehicle
'''
num_vehicles = max(1.0, self._kilometers.raw[edge_idx] / 0.0075)
return num_vehicles * self._lane_counts.raw[edge_idx]
def volumes(self) -> float:
return list(map(self.volume, range(len(self._kilometers.raw))))
@property
def old_workloads(self):
return self._old_workloads
@property
def workloads(self):
return self._workloads
def sorted_lon_lat_workloads(self):
return np.array(sorted(
list(map(list, zip(
self.lons.raw,
self.lats.raw,
self.workloads.raw
))),
key=lambda x: x[2]
))
def sorted_lon_lat_deltas(self):
return np.array(sorted(
list(map(list, zip(
self.lons.raw,
self.lats.raw,
self.delta_workloads.raw
))),
key=lambda x: x[2]
))
def abs_sorted_lon_lat_deltas(self):
return np.array(sorted(
list(map(list, zip(
self.lons.raw,
self.lats.raw,
self.delta_workloads.raw
))),
key=lambda x: abs(x[2])
))
@property
def delta_workloads(self):
if self._delta_workloads is None:
self._delta_workloads = Values()
for new, old in zip(self.workloads.raw, self.old_workloads.raw):
self._delta_workloads.raw.append(new - old)
return self._delta_workloads
def check_for_equal_edge_files(self, sim: Simulation):
'''
If this is not successful, the rows of edges from iteration `i`
don't fit to the rows of edges from iteration `i+1`.
'''
last_file = os.path.join(
sim.results_dir,
self.path_to_edge_info(0)
)
for i in range(1, sim.num_iter):
next_file = os.path.join(
sim.results_dir,
self.path_to_edge_info(i)
)
if not filecmp.cmp(last_file, next_file, shallow=False):
raise RuntimeError(
f'The edge-info {i} isn\'t equal to edge-info {i-1}.'
)
last_file = next_file
def read_in_edge_info(self, sim: Simulation):
coords_csv_path = os.path.join(
f'{sim.results_dir}',
self.path_to_edge_info()
)
with open(coords_csv_path, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=' ')
# read unsorted data
edges_info = []
for row in csv_reader:
edge_id = int(row['edge-id'])
src_lat = float(row['src-lat'])
src_lon = float(row['src-lon'])
dst_lat = float(row['dst-lat'])
dst_lon = float(row['dst-lon'])
kilometers = float(row['kilometers'])
lane_count = float(row['lane-count'])
edges_info.append((
edge_id,
(src_lat + dst_lat) / 2.0,
(src_lon + dst_lon) / 2.0,
kilometers,
lane_count
))
# sort by edge-id and add data
edges_info.sort(key=lambda edge_info: edge_info[0])
# add sorted data
for (
_edge_id, mid_lat, mid_lon, kilometers, lane_count
) in edges_info:
self.lats.raw.append(mid_lat)
self.lons.raw.append(mid_lon)
self.kilometers.raw.append(kilometers)
self.lane_counts.raw.append(lane_count)
def read_in_workloads(self, sim: Simulation):
workloads_csv_path = os.path.join(
f'{sim.results_dir}',
self.path_to_abs_workloads()
)
# read unsorted data
unsorted_values = []
with open(workloads_csv_path, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=' ')
for row in csv_reader:
unsorted_values.append((
int(row['edge-id']),
int(row['num_routes'])
))
# sort by edge-id and add data
unsorted_values.sort(key=lambda val: val[0])
# add sorted data
for (_edge_idx, (_edge_id, value)) in enumerate(unsorted_values):
# self.workloads.raw.append(value / self.volume(edge_idx))
self.workloads.raw.append(value)
def _read_in_new_metrics(self, sim: Simulation):
workloads_csv_path = os.path.join(
sim.results_dir,
self.path_to_new_metrics()
)
# read unsorted data
unsorted_values = []
with open(workloads_csv_path, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=' ')
for row in csv_reader:
unsorted_values.append((
int(row['edge-id']),
float(row['new_metrics'])
))
# sort by edge-id and add data
unsorted_values.sort(key=lambda val: val[0])
# add sorted data
for (_edge_id, value) in unsorted_values:
self.workloads.raw.append(value)
| import os
import filecmp
import numpy as np
import csv
from visualizing.simulating import Simulation
class GlobalData():
def __init__(self):
self._max_workload = None
@property
def max_workload(self):
return self._max_workload
@staticmethod
def fill(sim: Simulation):
global_data = GlobalData()
data = Data(global_data)
for i in range(sim.num_iter):
data.prepare_new_iteration(sim=sim)
if global_data._max_workload is None:
global_data._max_workload = data.workloads.max
else:
if data.workloads.max > global_data._max_workload:
global_data._max_workload = data.workloads.max
return global_data
class Values():
'''
Just a struct of values
'''
def __init__(self):
self.raw = []
@property
def raw(self):
return self._raw
@property
def raw_nz(self):
return list(filter(lambda w: w > 0.0, self._raw))
@raw.setter
def raw(self, new_raw):
self._raw = new_raw
self._min = None
self._max = None
self._center = None
self._mean = None
self._std = None
@property
def min(self):
if self._min is None:
self._min = np.min(self._raw)
return self._min
@property
def center(self):
if self._center is None:
self._center = (self.min + self.max) / 2.0
return self._center
@property
def max(self):
if self._max is None:
self._max = np.max(self._raw)
return self._max
@property
def mean(self):
if self._mean is None:
self._mean = np.mean(self._raw)
return self._mean
@property
def std(self):
if self._std is None:
self._std = np.std(self._raw)
return self._std
class Data():
'''
Just a struct of values
'''
def __init__(self, global_data):
self._iteration = -1
self._lats = Values()
self._lons = Values()
self._kilometers = Values()
self._lane_counts = Values()
self._old_workloads = Values()
self._workloads = Values()
self._delta_workloads = None
self._global_data = global_data
def prepare_new_iteration(self, sim: Simulation):
self._iteration += 1
# reset all current data
tmp = self.old_workloads.raw
self.old_workloads.raw = self.workloads.raw
self.workloads.raw = tmp
self.workloads.raw.clear()
self._delta_workloads = None
# continue TODO
if self.iteration == 0:
# self.check_for_equal_edge_files(sim=sim)
self.read_in_edge_info(sim=sim)
self.read_in_workloads(sim=sim)
def path_to_edge_info(self, iteration=None):
if iteration is None:
iteration = self.iteration
return os.path.join(f'{iteration}', 'stats', 'edges-info.csv')
def path_to_abs_workloads(self, iteration=None):
if iteration is None:
iteration = self.iteration
return os.path.join(f'{self.iteration}', 'stats', 'abs_workloads.csv')
def path_to_new_metrics(self, iteration=None):
if iteration is None:
iteration = self.iteration
return os.path.join(f'{self._iteration}', 'stats', 'new_metrics.csv')
@property
def global_data(self):
return self._global_data
@property
def iteration(self):
return self._iteration
@property
def lats(self):
return self._lats
@property
def lons(self):
return self._lons
@property
def kilometers(self):
return self._kilometers
@property
def lane_counts(self):
return self._lane_counts
def volume(self, edge_idx: int) -> float:
'''
It's used for hopefully greater numbers
Nagel-Schreckenberg-Model: 7.5 m per vehicle
'''
num_vehicles = max(1.0, self._kilometers.raw[edge_idx] / 0.0075)
return num_vehicles * self._lane_counts.raw[edge_idx]
def volumes(self) -> float:
return list(map(self.volume, range(len(self._kilometers.raw))))
@property
def old_workloads(self):
return self._old_workloads
@property
def workloads(self):
return self._workloads
def sorted_lon_lat_workloads(self):
return np.array(sorted(
list(map(list, zip(
self.lons.raw,
self.lats.raw,
self.workloads.raw
))),
key=lambda x: x[2]
))
def sorted_lon_lat_deltas(self):
return np.array(sorted(
list(map(list, zip(
self.lons.raw,
self.lats.raw,
self.delta_workloads.raw
))),
key=lambda x: x[2]
))
def abs_sorted_lon_lat_deltas(self):
return np.array(sorted(
list(map(list, zip(
self.lons.raw,
self.lats.raw,
self.delta_workloads.raw
))),
key=lambda x: abs(x[2])
))
@property
def delta_workloads(self):
if self._delta_workloads is None:
self._delta_workloads = Values()
for new, old in zip(self.workloads.raw, self.old_workloads.raw):
self._delta_workloads.raw.append(new - old)
return self._delta_workloads
def check_for_equal_edge_files(self, sim: Simulation):
'''
If this is not successful, the rows of edges from iteration `i`
don't fit to the rows of edges from iteration `i+1`.
'''
last_file = os.path.join(
sim.results_dir,
self.path_to_edge_info(0)
)
for i in range(1, sim.num_iter):
next_file = os.path.join(
sim.results_dir,
self.path_to_edge_info(i)
)
if not filecmp.cmp(last_file, next_file, shallow=False):
raise RuntimeError(
f'The edge-info {i} isn\'t equal to edge-info {i-1}.'
)
last_file = next_file
def read_in_edge_info(self, sim: Simulation):
coords_csv_path = os.path.join(
f'{sim.results_dir}',
self.path_to_edge_info()
)
with open(coords_csv_path, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=' ')
# read unsorted data
edges_info = []
for row in csv_reader:
edge_id = int(row['edge-id'])
src_lat = float(row['src-lat'])
src_lon = float(row['src-lon'])
dst_lat = float(row['dst-lat'])
dst_lon = float(row['dst-lon'])
kilometers = float(row['kilometers'])
lane_count = float(row['lane-count'])
edges_info.append((
edge_id,
(src_lat + dst_lat) / 2.0,
(src_lon + dst_lon) / 2.0,
kilometers,
lane_count
))
# sort by edge-id and add data
edges_info.sort(key=lambda edge_info: edge_info[0])
# add sorted data
for (
_edge_id, mid_lat, mid_lon, kilometers, lane_count
) in edges_info:
self.lats.raw.append(mid_lat)
self.lons.raw.append(mid_lon)
self.kilometers.raw.append(kilometers)
self.lane_counts.raw.append(lane_count)
def read_in_workloads(self, sim: Simulation):
workloads_csv_path = os.path.join(
f'{sim.results_dir}',
self.path_to_abs_workloads()
)
# read unsorted data
unsorted_values = []
with open(workloads_csv_path, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=' ')
for row in csv_reader:
unsorted_values.append((
int(row['edge-id']),
int(row['num_routes'])
))
# sort by edge-id and add data
unsorted_values.sort(key=lambda val: val[0])
# add sorted data
for (_edge_idx, (_edge_id, value)) in enumerate(unsorted_values):
# self.workloads.raw.append(value / self.volume(edge_idx))
self.workloads.raw.append(value)
def _read_in_new_metrics(self, sim: Simulation):
workloads_csv_path = os.path.join(
sim.results_dir,
self.path_to_new_metrics()
)
# read unsorted data
unsorted_values = []
with open(workloads_csv_path, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=' ')
for row in csv_reader:
unsorted_values.append((
int(row['edge-id']),
float(row['new_metrics'])
))
# sort by edge-id and add data
unsorted_values.sort(key=lambda val: val[0])
# add sorted data
for (_edge_id, value) in unsorted_values:
self.workloads.raw.append(value) | en | 0.629177 | Just a struct of values Just a struct of values # reset all current data # continue TODO # self.check_for_equal_edge_files(sim=sim) It's used for hopefully greater numbers Nagel-Schreckenberg-Model: 7.5 m per vehicle If this is not successful, the rows of edges from iteration `i` don't fit to the rows of edges from iteration `i+1`. # read unsorted data # sort by edge-id and add data # add sorted data # read unsorted data # sort by edge-id and add data # add sorted data # self.workloads.raw.append(value / self.volume(edge_idx)) # read unsorted data # sort by edge-id and add data # add sorted data | 3.093101 | 3 |
github/errors.py | ShineyDev/github.py | 17 | 6613570 | <filename>github/errors.py
import graphql
class ClientError(graphql.client.ClientError):
__doc__ = graphql.client.ClientError.__doc__
__slots__ = ()
class ClientResponseError(graphql.client.ClientResponseError, ClientError):
__doc__ = graphql.client.ClientResponseError.__doc__
__slots__ = ()
class ClientResponseHTTPError(graphql.client.ClientResponseHTTPError, ClientResponseError):
__doc__ = graphql.client.ClientResponseHTTPError.__doc__
__slots__ = ()
class ClientResponseHTTPUnauthorizedError(ClientResponseHTTPError):
"""
Represents an HTTP 401 response.
Attributes
----------
message: :class:`str`
The error message.
response: :class:`aiohttp.ClientResponse`
The client response.
data: Optional[:class:`dict`]
The response data.
"""
__slots__ = ()
class ClientResponseGraphQLError(graphql.client.ClientResponseGraphQLError, ClientResponseError):
__doc__ = graphql.client.ClientResponseGraphQLError.__doc__
__slots__ = ()
class ClientResponseGraphQLForbiddenError(ClientResponseGraphQLError):
"""
Represents a GraphQL ``"FORBIDDEN"`` response.
Attributes
----------
message: :class:`str`
The error message.
response: :class:`aiohttp.ClientResponse`
The client response.
data: :class:`dict`
The response data.
"""
__slots__ = ()
class ClientResponseGraphQLInternalError(ClientResponseGraphQLError):
"""
Represents a GraphQL ``"INTERNAL"`` response.
Attributes
----------
message: :class:`str`
The error message.
response: :class:`aiohttp.ClientResponse`
The client response.
data: :class:`dict`
The response data.
"""
__slots__ = ()
class ClientResponseGraphQLNotFoundError(ClientResponseGraphQLError):
"""
Represents a GraphQL ``"NOT_FOUND"`` response.
Attributes
----------
message: :class:`str`
The error message.
response: :class:`aiohttp.ClientResponse`
The client response.
data: :class:`dict`
The response data.
"""
__slots__ = ()
class ClientResponseGraphQLUnprocessableError(ClientResponseGraphQLError):
"""
Represents a GraphQL ``"UNPROCESSABLE"`` response.
Attributes
----------
message: :class:`str`
The error message.
response: :class:`aiohttp.ClientResponse`
The client response.
data: :class:`dict`
The response data.
"""
__slots__ = ()
class ClientResponseGraphQLValidationError(graphql.client.ClientResponseGraphQLValidationError, ClientResponseGraphQLError):
"""
Represents a GraphQL response that failed internal data validation.
Attributes
----------
message: :class:`str`
The error message.
response: :class:`aiohttp.ClientResponse`
The client response.
data: :class:`dict`
The response data.
"""
__slots__ = ()
_response_error_map = {
401: ClientResponseHTTPUnauthorizedError,
"FORBIDDEN": ClientResponseGraphQLForbiddenError,
"INTERNAL": ClientResponseGraphQLInternalError,
"NOT_FOUND": ClientResponseGraphQLNotFoundError,
"UNPROCESSABLE": ClientResponseGraphQLUnprocessableError,
}
class ClientDeprecationWarning(DeprecationWarning):
"""
Represents a :exc:`DeprecationWarning` from the GraphQL client.
"""
__slots__ = ()
class ServerDeprecationWarning(DeprecationWarning):
"""
Represents a :exc:`DeprecationWarning` from the GraphQL server.
"""
__slots__ = ()
__all__ = [
"ClientError",
"ClientResponseError",
"ClientResponseHTTPError",
"ClientResponseHTTPUnauthorizedError",
"ClientResponseGraphQLError",
"ClientResponseGraphQLForbiddenError",
"ClientResponseGraphQLInternalError",
"ClientResponseGraphQLNotFoundError",
"ClientResponseGraphQLUnprocessableError",
"ClientResponseGraphQLValidationError",
"ClientDeprecationWarning",
"ServerDeprecationWarning",
]
| <filename>github/errors.py
import graphql
class ClientError(graphql.client.ClientError):
__doc__ = graphql.client.ClientError.__doc__
__slots__ = ()
class ClientResponseError(graphql.client.ClientResponseError, ClientError):
__doc__ = graphql.client.ClientResponseError.__doc__
__slots__ = ()
class ClientResponseHTTPError(graphql.client.ClientResponseHTTPError, ClientResponseError):
__doc__ = graphql.client.ClientResponseHTTPError.__doc__
__slots__ = ()
class ClientResponseHTTPUnauthorizedError(ClientResponseHTTPError):
"""
Represents an HTTP 401 response.
Attributes
----------
message: :class:`str`
The error message.
response: :class:`aiohttp.ClientResponse`
The client response.
data: Optional[:class:`dict`]
The response data.
"""
__slots__ = ()
class ClientResponseGraphQLError(graphql.client.ClientResponseGraphQLError, ClientResponseError):
__doc__ = graphql.client.ClientResponseGraphQLError.__doc__
__slots__ = ()
class ClientResponseGraphQLForbiddenError(ClientResponseGraphQLError):
"""
Represents a GraphQL ``"FORBIDDEN"`` response.
Attributes
----------
message: :class:`str`
The error message.
response: :class:`aiohttp.ClientResponse`
The client response.
data: :class:`dict`
The response data.
"""
__slots__ = ()
class ClientResponseGraphQLInternalError(ClientResponseGraphQLError):
"""
Represents a GraphQL ``"INTERNAL"`` response.
Attributes
----------
message: :class:`str`
The error message.
response: :class:`aiohttp.ClientResponse`
The client response.
data: :class:`dict`
The response data.
"""
__slots__ = ()
class ClientResponseGraphQLNotFoundError(ClientResponseGraphQLError):
"""
Represents a GraphQL ``"NOT_FOUND"`` response.
Attributes
----------
message: :class:`str`
The error message.
response: :class:`aiohttp.ClientResponse`
The client response.
data: :class:`dict`
The response data.
"""
__slots__ = ()
class ClientResponseGraphQLUnprocessableError(ClientResponseGraphQLError):
"""
Represents a GraphQL ``"UNPROCESSABLE"`` response.
Attributes
----------
message: :class:`str`
The error message.
response: :class:`aiohttp.ClientResponse`
The client response.
data: :class:`dict`
The response data.
"""
__slots__ = ()
class ClientResponseGraphQLValidationError(graphql.client.ClientResponseGraphQLValidationError, ClientResponseGraphQLError):
"""
Represents a GraphQL response that failed internal data validation.
Attributes
----------
message: :class:`str`
The error message.
response: :class:`aiohttp.ClientResponse`
The client response.
data: :class:`dict`
The response data.
"""
__slots__ = ()
_response_error_map = {
401: ClientResponseHTTPUnauthorizedError,
"FORBIDDEN": ClientResponseGraphQLForbiddenError,
"INTERNAL": ClientResponseGraphQLInternalError,
"NOT_FOUND": ClientResponseGraphQLNotFoundError,
"UNPROCESSABLE": ClientResponseGraphQLUnprocessableError,
}
class ClientDeprecationWarning(DeprecationWarning):
"""
Represents a :exc:`DeprecationWarning` from the GraphQL client.
"""
__slots__ = ()
class ServerDeprecationWarning(DeprecationWarning):
"""
Represents a :exc:`DeprecationWarning` from the GraphQL server.
"""
__slots__ = ()
__all__ = [
"ClientError",
"ClientResponseError",
"ClientResponseHTTPError",
"ClientResponseHTTPUnauthorizedError",
"ClientResponseGraphQLError",
"ClientResponseGraphQLForbiddenError",
"ClientResponseGraphQLInternalError",
"ClientResponseGraphQLNotFoundError",
"ClientResponseGraphQLUnprocessableError",
"ClientResponseGraphQLValidationError",
"ClientDeprecationWarning",
"ServerDeprecationWarning",
]
| en | 0.381659 | Represents an HTTP 401 response. Attributes ---------- message: :class:`str` The error message. response: :class:`aiohttp.ClientResponse` The client response. data: Optional[:class:`dict`] The response data. Represents a GraphQL ``"FORBIDDEN"`` response. Attributes ---------- message: :class:`str` The error message. response: :class:`aiohttp.ClientResponse` The client response. data: :class:`dict` The response data. Represents a GraphQL ``"INTERNAL"`` response. Attributes ---------- message: :class:`str` The error message. response: :class:`aiohttp.ClientResponse` The client response. data: :class:`dict` The response data. Represents a GraphQL ``"NOT_FOUND"`` response. Attributes ---------- message: :class:`str` The error message. response: :class:`aiohttp.ClientResponse` The client response. data: :class:`dict` The response data. Represents a GraphQL ``"UNPROCESSABLE"`` response. Attributes ---------- message: :class:`str` The error message. response: :class:`aiohttp.ClientResponse` The client response. data: :class:`dict` The response data. Represents a GraphQL response that failed internal data validation. Attributes ---------- message: :class:`str` The error message. response: :class:`aiohttp.ClientResponse` The client response. data: :class:`dict` The response data. Represents a :exc:`DeprecationWarning` from the GraphQL client. Represents a :exc:`DeprecationWarning` from the GraphQL server. | 2.376309 | 2 |
setup.py | wildgeece96/prowav | 0 | 6613571 | <gh_stars>0
from setuptools import setup
requires = [
"scipy>=1.3.0", "numpy>=1.16.1", "librosa>=0.6.3", "wavio>=0.0.4",
"joblib", "EMD-signal", "tqdm"
]
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='prowav',
version='0.6',
description='The package for preprocessing wave data',
url='https://github.com/wildgeece96/prowav',
author='Soh',
author_email='<EMAIL>',
license='MIT',
keywords='wave mfcc fft',
packages=[
"prowav",
],
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=requires,
classifiers=[
'Programming Language :: Python :: 3.6',
],
)
| from setuptools import setup
requires = [
"scipy>=1.3.0", "numpy>=1.16.1", "librosa>=0.6.3", "wavio>=0.0.4",
"joblib", "EMD-signal", "tqdm"
]
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='prowav',
version='0.6',
description='The package for preprocessing wave data',
url='https://github.com/wildgeece96/prowav',
author='Soh',
author_email='<EMAIL>',
license='MIT',
keywords='wave mfcc fft',
packages=[
"prowav",
],
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=requires,
classifiers=[
'Programming Language :: Python :: 3.6',
],
) | none | 1 | 1.303251 | 1 | |
project/sema2/views.py | eorygen/sema2_web | 0 | 6613572 | <reponame>eorygen/sema2_web
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.template.response import TemplateResponse
from django.views.generic import TemplateView, View, RedirectView
import jwt
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from sema2 import tasks
from sema2.api import ProgramSerializer, ProgramVersionSerializer, AnswerSetSerializer
from sema2.models import Program, ProgramInvite, ProgramParticipantState, ProgramParticipantBridge, AnswerSet
import tokens
class HomeRedirectView(View):
def get(self, request):
if request.user.groups.filter(name='sema_admin').exists():
return HttpResponseRedirect(redirect_to=reverse('program-list'))
else:
return HttpResponseRedirect(redirect_to=reverse('home'))
class HomeView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
context['show_welcome'] = self.request.GET.get('welcome', False)
return context
class ProgramListView(TemplateView):
template_name = 'program_list.html'
class ProgramRedirectView(View):
def get(self, request, program_id):
return HttpResponseRedirect(redirect_to=reverse('dashboard', kwargs={'program_id': program_id}))
class ProgramDashboardView(TemplateView):
template_name = 'program_dashboard.html'
def dispatch(self, request, *args, **kwargs):
user = request.user
try:
program = Program.objects.get(pk=kwargs['program_id'])
is_admin = program.admins.filter(pk=user.pk).exists()
except Program.DoesNotExist:
program = None
if not user.is_authenticated() or not program or not is_admin:
return HttpResponseRedirect(redirect_to=reverse('program-list'))
return super(ProgramDashboardView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ProgramDashboardView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class ProgramParticipantsView(TemplateView):
template_name = 'program_participants.html'
def get_context_data(self, **kwargs):
context = super(ProgramParticipantsView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class ProgramAdminsView(TemplateView):
template_name = 'program_admins.html'
def get_context_data(self, **kwargs):
context = super(ProgramAdminsView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class ProgramQuestionSetsView(TemplateView):
template_name = 'program_question_sets.html'
def get_context_data(self, **kwargs):
context = super(ProgramQuestionSetsView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class ProgramSurveysView(TemplateView):
template_name = 'program_surveys.html'
def get_context_data(self, **kwargs):
context = super(ProgramSurveysView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class ProgramSchedulesView(TemplateView):
template_name = 'program_schedules.html'
def get_context_data(self, **kwargs):
context = super(ProgramSchedulesView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class ProgramResponsesView(TemplateView):
template_name = 'program_responses.html'
def get_context_data(self, **kwargs):
context = super(ProgramResponsesView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
context['cur_page'] = self.request.GET.get('p', 1)
context['sort_by'] = self.request.GET.get('s', '')
context['filtered_user_id'] = self.request.GET.get('u', -1)
return context
class ProgramResponseView(TemplateView):
template_name = 'program_response.html'
def get_context_data(self, **kwargs):
context = super(ProgramResponseView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
answer_set = AnswerSet.objects.get(pk=kwargs['set_id'])
context['answer_set_json'] = JSONRenderer().render(AnswerSetSerializer(answer_set).data)
context['cur_page'] = self.request.GET.get('p', 1)
return context
class ProgramActivityView(TemplateView):
template_name = 'program_activity.html'
def get_context_data(self, **kwargs):
context = super(ProgramActivityView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class MailTest(APIView):
def get(self, request):
invite, token = tasks.generate_confirmation_token(request.GET.get('id'))
return Response({'token': token})
class MailTest2(APIView):
def get(self, request):
# from django.core.mail import send_mail
# send_mail('Subject here', 'Here is the message.', '<EMAIL>', ['<EMAIL>'], fail_silently=False)
# tasks.send_participant_invite(1)
return Response({})
class ConfirmInvite(View):
def get(self, request, confirmation_token, *args, **kwargs):
try:
payload = jwt.decode(confirmation_token, key=settings.JWT_SECRET)
invitation_id = payload['invitation_id']
try:
# Create a new user from the invite
invitation = ProgramInvite.objects.get(pk=invitation_id)
url = tasks.confirm_invite_and_get_welcome_url(invitation)
invitation.delete()
return HttpResponseRedirect(redirect_to=url)
except ProgramInvite.DoesNotExist:
return HttpResponseRedirect(redirect_to=reverse('home'))
except jwt.InvalidTokenError:
return HttpResponse(status=403)
class InitialSetup(View):
def get(self, request):
site = Site.objects.all().first()
if settings.DEBUG:
site.domain = 'exo:8000'
site.name = 'Development'
else:
site.domain = 'sema-survey.com'
site.name = 'SEMA'
site.save()
from django.contrib.auth.models import Group
if Group.objects.all().count() == 0:
Group.objects.create(
name='sema_participant'
)
Group.objects.create(
name='sema_admin'
)
admin = User.objects.get(username='admin')
g = Group.objects.get(name='sema_admin')
g.user_set.add(admin)
admin.save()
return HttpResponse("Ok")
| from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.template.response import TemplateResponse
from django.views.generic import TemplateView, View, RedirectView
import jwt
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from sema2 import tasks
from sema2.api import ProgramSerializer, ProgramVersionSerializer, AnswerSetSerializer
from sema2.models import Program, ProgramInvite, ProgramParticipantState, ProgramParticipantBridge, AnswerSet
import tokens
class HomeRedirectView(View):
def get(self, request):
if request.user.groups.filter(name='sema_admin').exists():
return HttpResponseRedirect(redirect_to=reverse('program-list'))
else:
return HttpResponseRedirect(redirect_to=reverse('home'))
class HomeView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
context['show_welcome'] = self.request.GET.get('welcome', False)
return context
class ProgramListView(TemplateView):
template_name = 'program_list.html'
class ProgramRedirectView(View):
def get(self, request, program_id):
return HttpResponseRedirect(redirect_to=reverse('dashboard', kwargs={'program_id': program_id}))
class ProgramDashboardView(TemplateView):
template_name = 'program_dashboard.html'
def dispatch(self, request, *args, **kwargs):
user = request.user
try:
program = Program.objects.get(pk=kwargs['program_id'])
is_admin = program.admins.filter(pk=user.pk).exists()
except Program.DoesNotExist:
program = None
if not user.is_authenticated() or not program or not is_admin:
return HttpResponseRedirect(redirect_to=reverse('program-list'))
return super(ProgramDashboardView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ProgramDashboardView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class ProgramParticipantsView(TemplateView):
template_name = 'program_participants.html'
def get_context_data(self, **kwargs):
context = super(ProgramParticipantsView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class ProgramAdminsView(TemplateView):
template_name = 'program_admins.html'
def get_context_data(self, **kwargs):
context = super(ProgramAdminsView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class ProgramQuestionSetsView(TemplateView):
template_name = 'program_question_sets.html'
def get_context_data(self, **kwargs):
context = super(ProgramQuestionSetsView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class ProgramSurveysView(TemplateView):
template_name = 'program_surveys.html'
def get_context_data(self, **kwargs):
context = super(ProgramSurveysView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class ProgramSchedulesView(TemplateView):
template_name = 'program_schedules.html'
def get_context_data(self, **kwargs):
context = super(ProgramSchedulesView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class ProgramResponsesView(TemplateView):
template_name = 'program_responses.html'
def get_context_data(self, **kwargs):
context = super(ProgramResponsesView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
context['cur_page'] = self.request.GET.get('p', 1)
context['sort_by'] = self.request.GET.get('s', '')
context['filtered_user_id'] = self.request.GET.get('u', -1)
return context
class ProgramResponseView(TemplateView):
template_name = 'program_response.html'
def get_context_data(self, **kwargs):
context = super(ProgramResponseView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
answer_set = AnswerSet.objects.get(pk=kwargs['set_id'])
context['answer_set_json'] = JSONRenderer().render(AnswerSetSerializer(answer_set).data)
context['cur_page'] = self.request.GET.get('p', 1)
return context
class ProgramActivityView(TemplateView):
template_name = 'program_activity.html'
def get_context_data(self, **kwargs):
context = super(ProgramActivityView, self).get_context_data(**kwargs)
program = Program.objects.get(pk=kwargs['program_id'])
context['program_json'] = JSONRenderer().render(ProgramSerializer(program).data)
version = self.request.GET.get('v', None)
program_version = program.versions.get(revision_number=version) if version else program.versions.all().order_by('-pk').first()
context['program_version_json'] = JSONRenderer().render(ProgramVersionSerializer(program_version).data)
return context
class MailTest(APIView):
def get(self, request):
invite, token = tasks.generate_confirmation_token(request.GET.get('id'))
return Response({'token': token})
class MailTest2(APIView):
def get(self, request):
# from django.core.mail import send_mail
# send_mail('Subject here', 'Here is the message.', '<EMAIL>', ['<EMAIL>'], fail_silently=False)
# tasks.send_participant_invite(1)
return Response({})
class ConfirmInvite(View):
def get(self, request, confirmation_token, *args, **kwargs):
try:
payload = jwt.decode(confirmation_token, key=settings.JWT_SECRET)
invitation_id = payload['invitation_id']
try:
# Create a new user from the invite
invitation = ProgramInvite.objects.get(pk=invitation_id)
url = tasks.confirm_invite_and_get_welcome_url(invitation)
invitation.delete()
return HttpResponseRedirect(redirect_to=url)
except ProgramInvite.DoesNotExist:
return HttpResponseRedirect(redirect_to=reverse('home'))
except jwt.InvalidTokenError:
return HttpResponse(status=403)
class InitialSetup(View):
def get(self, request):
site = Site.objects.all().first()
if settings.DEBUG:
site.domain = 'exo:8000'
site.name = 'Development'
else:
site.domain = 'sema-survey.com'
site.name = 'SEMA'
site.save()
from django.contrib.auth.models import Group
if Group.objects.all().count() == 0:
Group.objects.create(
name='sema_participant'
)
Group.objects.create(
name='sema_admin'
)
admin = User.objects.get(username='admin')
g = Group.objects.get(name='sema_admin')
g.user_set.add(admin)
admin.save()
return HttpResponse("Ok") | en | 0.52883 | # from django.core.mail import send_mail # send_mail('Subject here', 'Here is the message.', '<EMAIL>', ['<EMAIL>'], fail_silently=False) # tasks.send_participant_invite(1) # Create a new user from the invite | 1.95741 | 2 |
uni_ticket/migrations/0020_auto_20190424_1144.py | biotech2021/uniTicket | 15 | 6613573 | <gh_stars>10-100
# Generated by Django 2.1.7 on 2019-04-24 09:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('uni_ticket', '0019_task_priority'),
]
operations = [
migrations.AlterModelOptions(
name='tickethistory',
options={'ordering': ['ticket', '-modified'], 'verbose_name': 'Cronologia Stati Ticket', 'verbose_name_plural': 'Cronologia Stati Ticket'},
),
migrations.RemoveField(
model_name='taskhistory',
name='employee',
),
migrations.AddField(
model_name='taskhistory',
name='modified_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
| # Generated by Django 2.1.7 on 2019-04-24 09:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('uni_ticket', '0019_task_priority'),
]
operations = [
migrations.AlterModelOptions(
name='tickethistory',
options={'ordering': ['ticket', '-modified'], 'verbose_name': 'Cronologia Stati Ticket', 'verbose_name_plural': 'Cronologia Stati Ticket'},
),
migrations.RemoveField(
model_name='taskhistory',
name='employee',
),
migrations.AddField(
model_name='taskhistory',
name='modified_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
] | en | 0.699198 | # Generated by Django 2.1.7 on 2019-04-24 09:44 | 1.597775 | 2 |
tests/pretrain_enwsik8.py | iliasprc/IDPMetagenome | 0 | 6613574 | # constants
import argparse
import datetime
import gzip
import json
import os
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter
# d = torchtext.datasets.EnWik9(root='.dataenwik9', split=('train', ))
#
# exit()
parser = argparse.ArgumentParser(description='PyTorch Language Model')
parser.add_argument('--dataset', type=str, default='enwik8')
parser.add_argument('--data', type=str,
default='/home/iliask/PycharmProjects/MScThesis/data/',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='Reformer',
help='type of net (RNN_TANH, RNN_RELU, LSTM, GRU, Transformer,Reformer)')
parser.add_argument('--n_hashes', type=int, default=4)
parser.add_argument('--nhead', type=int, default=46,
help='the number of heads in the encoder/decoder of the transformer model')
parser.add_argument('--emsize', type=int, default=128,
help='size of word embeddings')
parser.add_argument('--depth', type=int, default=6, help='number of layers')
parser.add_argument('--gradient_steps', type=int, default=32)
parser.add_argument('--causal', action='store_true', default=False)
parser.add_argument('--tied_connections', action='store_true', default=False)
parser.add_argument('--kmeans', action='store_true', default=False)
parser.add_argument('--full_attention', action='store_true', default=False)
parser.add_argument('--seqlen', type=int, default=1024,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true', default=True,
help='tie the word embedding and softmax weights')
parser.add_argument('--lr', type=float, default=1e-4,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=1.0,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=5,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=4, metavar='N',
help='batch size')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true', default=True,
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval')
parser.add_argument('--dry-run', action='store_true',
help='verify the code and the model')
parser.add_argument('--cpkt_dir', type=str, default='./cpktsenwik8',
help='checkpoint directory')
args = parser.parse_args()
EPOCHS = args.epochs
BATCH_SIZE = args.batch_size
GRADIENT_ACCUMULATE_EVERY = args.gradient_steps
LEARNING_RATE = args.lr
VALIDATE_EVERY = 10000
GENERATE_EVERY = 2500
SEQ_LEN = args.seqlen
GENERATE_LENGTH = SEQ_LEN
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
def select_model(args, name, n_classes, pretrained=False):
dim = args.emsize
if name == 'idptransformer':
from models.transformer import IDPTransformer
return IDPTransformer(dim=dim, blocks=args.depth, heads=args.nhead, dim_head=None, dim_linear_block=dim * 2,
dropout=0.1,
prenorm=False, classes=n_classes)
elif name == 'idpcct':
from models.transformer import IDP_cct
return IDP_cct(dim=dim, blocks=args.depth, heads=args.nhead, dim_head=None, dim_linear_block=dim * 2,
dropout=0.2,
prenorm=False, classes=n_classes)
name = 'idpcct'
model = select_model(args, 'idpcct', 256)
if use_cuda:
model.cuda()
time_string = datetime.datetime.now().strftime("%d_%m_%Y_%H.%M.%S")
pathdir = os.path.join(args.cpkt_dir, time_string, name)
# prepare enwik8 data
writer = SummaryWriter(pathdir + '/runs')
with gzip.open(args.data + 'enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq # .cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE)
len_epoch = len(train_loader) * BATCH_SIZE
print(len(train_loader))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
from models.utils import Cosine_LR_Scheduler
scheduler = Cosine_LR_Scheduler(
optim,
warmup_epochs=3, warmup_lr=0,
num_epochs=EPOCHS, base_lr=LEARNING_RATE, final_lr=1e-5,
iter_per_epoch=len(train_loader) // GRADIENT_ACCUMULATE_EVERY,
constant_predictor_lr=True # see the end of section 4.2 predictor
)
print(model)
# training
best_loss = 1000
idx = 0
for i in range(EPOCHS):
model.train()
criterion = torch.nn.CrossEntropyLoss()
trainloss = 0
for idx, data in enumerate(train_loader):
# data=data.unsqueeze(-1)
target = data[:, 1:].to(device)
data = data[:, 0:-1].to(device)
# print(data.shape)
output = model(data)
b, t, _ = output.shape
output = output.view(b * t, -1)
target = target.reshape(-1)
# print(output.shape,target.shape)
loss = criterion(output, target)
writer_step = (i - 1) * len_epoch + idx
writer.add_scalar('Train/Loss', loss.item(), writer_step)
# print(f'Train loss {trainloss / (idx + 1)} batch {idx}/ {len(train_loader)}')
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
trainloss += loss.item()
# if idx % VALIDATE_EVERY
if idx % GRADIENT_ACCUMULATE_EVERY == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
scheduler.step()
optim.step()
optim.zero_grad()
if idx % 1000 == 0:
print(f'Train loss {trainloss / (idx + 1)} batch {idx}/ {len(train_loader)}')
if idx % VALIDATE_EVERY == 0:
print(f'Train loss {trainloss / (idx + 1)} batch {idx}/ {len(train_loader)}')
model.eval()
valloss = 0
with torch.no_grad():
for validx, data in enumerate(val_loader):
target = data[:, 1:].to(device)
data = data[:, 0:-1].to(device)
# print(data.shape)
output = model(data)
b, t, _ = output.shape
output = output.view(b * t, -1)
target = target.reshape(-1)
# print(output.shape, target.shape)
loss = criterion(output, target)
writer.add_scalar('Val/Loss', loss.item(), writer_step)
valloss += loss.item()
print(f'VAL LOSS {valloss / validx} ')
if valloss < best_loss:
print('BEST'
)
best_loss = valloss
torch.save(model.state_dict(),
pathdir + f'/bestmodel.pth')
with open(pathdir + '/commandline_args.txt', 'w') as f:
json.dump(args.__dict__, f, indent=2)
best_loss = valloss
torch.save(model.state_dict(),
pathdir + f'/lastmodel.pth')
model.train()
| # constants
import argparse
import datetime
import gzip
import json
import os
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter
# d = torchtext.datasets.EnWik9(root='.dataenwik9', split=('train', ))
#
# exit()
parser = argparse.ArgumentParser(description='PyTorch Language Model')
parser.add_argument('--dataset', type=str, default='enwik8')
parser.add_argument('--data', type=str,
default='/home/iliask/PycharmProjects/MScThesis/data/',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='Reformer',
help='type of net (RNN_TANH, RNN_RELU, LSTM, GRU, Transformer,Reformer)')
parser.add_argument('--n_hashes', type=int, default=4)
parser.add_argument('--nhead', type=int, default=46,
help='the number of heads in the encoder/decoder of the transformer model')
parser.add_argument('--emsize', type=int, default=128,
help='size of word embeddings')
parser.add_argument('--depth', type=int, default=6, help='number of layers')
parser.add_argument('--gradient_steps', type=int, default=32)
parser.add_argument('--causal', action='store_true', default=False)
parser.add_argument('--tied_connections', action='store_true', default=False)
parser.add_argument('--kmeans', action='store_true', default=False)
parser.add_argument('--full_attention', action='store_true', default=False)
parser.add_argument('--seqlen', type=int, default=1024,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true', default=True,
help='tie the word embedding and softmax weights')
parser.add_argument('--lr', type=float, default=1e-4,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=1.0,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=5,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=4, metavar='N',
help='batch size')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true', default=True,
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval')
parser.add_argument('--dry-run', action='store_true',
help='verify the code and the model')
parser.add_argument('--cpkt_dir', type=str, default='./cpktsenwik8',
help='checkpoint directory')
args = parser.parse_args()
EPOCHS = args.epochs
BATCH_SIZE = args.batch_size
GRADIENT_ACCUMULATE_EVERY = args.gradient_steps
LEARNING_RATE = args.lr
VALIDATE_EVERY = 10000
GENERATE_EVERY = 2500
SEQ_LEN = args.seqlen
GENERATE_LENGTH = SEQ_LEN
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
def select_model(args, name, n_classes, pretrained=False):
dim = args.emsize
if name == 'idptransformer':
from models.transformer import IDPTransformer
return IDPTransformer(dim=dim, blocks=args.depth, heads=args.nhead, dim_head=None, dim_linear_block=dim * 2,
dropout=0.1,
prenorm=False, classes=n_classes)
elif name == 'idpcct':
from models.transformer import IDP_cct
return IDP_cct(dim=dim, blocks=args.depth, heads=args.nhead, dim_head=None, dim_linear_block=dim * 2,
dropout=0.2,
prenorm=False, classes=n_classes)
name = 'idpcct'
model = select_model(args, 'idpcct', 256)
if use_cuda:
model.cuda()
time_string = datetime.datetime.now().strftime("%d_%m_%Y_%H.%M.%S")
pathdir = os.path.join(args.cpkt_dir, time_string, name)
# prepare enwik8 data
writer = SummaryWriter(pathdir + '/runs')
with gzip.open(args.data + 'enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq # .cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE)
len_epoch = len(train_loader) * BATCH_SIZE
print(len(train_loader))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
from models.utils import Cosine_LR_Scheduler
scheduler = Cosine_LR_Scheduler(
optim,
warmup_epochs=3, warmup_lr=0,
num_epochs=EPOCHS, base_lr=LEARNING_RATE, final_lr=1e-5,
iter_per_epoch=len(train_loader) // GRADIENT_ACCUMULATE_EVERY,
constant_predictor_lr=True # see the end of section 4.2 predictor
)
print(model)
# training
best_loss = 1000
idx = 0
for i in range(EPOCHS):
model.train()
criterion = torch.nn.CrossEntropyLoss()
trainloss = 0
for idx, data in enumerate(train_loader):
# data=data.unsqueeze(-1)
target = data[:, 1:].to(device)
data = data[:, 0:-1].to(device)
# print(data.shape)
output = model(data)
b, t, _ = output.shape
output = output.view(b * t, -1)
target = target.reshape(-1)
# print(output.shape,target.shape)
loss = criterion(output, target)
writer_step = (i - 1) * len_epoch + idx
writer.add_scalar('Train/Loss', loss.item(), writer_step)
# print(f'Train loss {trainloss / (idx + 1)} batch {idx}/ {len(train_loader)}')
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
trainloss += loss.item()
# if idx % VALIDATE_EVERY
if idx % GRADIENT_ACCUMULATE_EVERY == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
scheduler.step()
optim.step()
optim.zero_grad()
if idx % 1000 == 0:
print(f'Train loss {trainloss / (idx + 1)} batch {idx}/ {len(train_loader)}')
if idx % VALIDATE_EVERY == 0:
print(f'Train loss {trainloss / (idx + 1)} batch {idx}/ {len(train_loader)}')
model.eval()
valloss = 0
with torch.no_grad():
for validx, data in enumerate(val_loader):
target = data[:, 1:].to(device)
data = data[:, 0:-1].to(device)
# print(data.shape)
output = model(data)
b, t, _ = output.shape
output = output.view(b * t, -1)
target = target.reshape(-1)
# print(output.shape, target.shape)
loss = criterion(output, target)
writer.add_scalar('Val/Loss', loss.item(), writer_step)
valloss += loss.item()
print(f'VAL LOSS {valloss / validx} ')
if valloss < best_loss:
print('BEST'
)
best_loss = valloss
torch.save(model.state_dict(),
pathdir + f'/bestmodel.pth')
with open(pathdir + '/commandline_args.txt', 'w') as f:
json.dump(args.__dict__, f, indent=2)
best_loss = valloss
torch.save(model.state_dict(),
pathdir + f'/lastmodel.pth')
model.train()
| en | 0.434241 | # constants # d = torchtext.datasets.EnWik9(root='.dataenwik9', split=('train', )) # # exit() # helpers # instantiate model # prepare enwik8 data # .cuda() # optimizer # see the end of section 4.2 predictor # training # data=data.unsqueeze(-1) # print(data.shape) # print(output.shape,target.shape) # print(f'Train loss {trainloss / (idx + 1)} batch {idx}/ {len(train_loader)}') # if idx % VALIDATE_EVERY # print(data.shape) # print(output.shape, target.shape) | 2.242185 | 2 |
py_lock.py | markmumba/password_locker | 0 | 6613575 | <filename>py_lock.py
import random
import string
import pyperclip
class personas:
personas_list = []
def __init__(self, username, password):
self.username = username
self.password = password
def save_persona(self):
personas.personas_list.append(self)
class profiles:
profiles_list = []
@classmethod
def confirm_persona(cls, username, password):
active_persona = ''
for persona in personas.personas_list:
if(persona.username == username and persona.password == password):
active_persona == persona.username
return active_persona
def __init__(self, app, username, password):
self.app = app
self.username = username
self.password = password
def save_profile(self):
profiles.profiles_list.append(self)
def delete_profile(self):
profiles.profiles_list.remove(self)
@classmethod
def search_profile(cls, app):
for profile in cls.profiles_list:
if profile.app == app:
return profile
@classmethod
def profile_exist(cls, app):
for profile in cls.profiles_list:
if profile.app == app:
return True
return False
@classmethod
def display_profile(cls):
return cls.profiles_list
def gen_password():
chars = char = string.ascii_uppercase+string.ascii_lowercase+string.digits
length = 9
print('here is are your password:')
password = ''.join(random.choice(chars) for _ in range(-1, length))
print(password)
return password
| <filename>py_lock.py
import random
import string
import pyperclip
class personas:
personas_list = []
def __init__(self, username, password):
self.username = username
self.password = password
def save_persona(self):
personas.personas_list.append(self)
class profiles:
profiles_list = []
@classmethod
def confirm_persona(cls, username, password):
active_persona = ''
for persona in personas.personas_list:
if(persona.username == username and persona.password == password):
active_persona == persona.username
return active_persona
def __init__(self, app, username, password):
self.app = app
self.username = username
self.password = password
def save_profile(self):
profiles.profiles_list.append(self)
def delete_profile(self):
profiles.profiles_list.remove(self)
@classmethod
def search_profile(cls, app):
for profile in cls.profiles_list:
if profile.app == app:
return profile
@classmethod
def profile_exist(cls, app):
for profile in cls.profiles_list:
if profile.app == app:
return True
return False
@classmethod
def display_profile(cls):
return cls.profiles_list
def gen_password():
chars = char = string.ascii_uppercase+string.ascii_lowercase+string.digits
length = 9
print('here is are your password:')
password = ''.join(random.choice(chars) for _ in range(-1, length))
print(password)
return password
| none | 1 | 3.29073 | 3 | |
programming-laboratory-I/pmk6/grep.py | MisaelAugusto/computer-science | 0 | 6613576 | <reponame>MisaelAugusto/computer-science<gh_stars>0
# coding: utf-8
# Aluno: <NAME>
# Matrícula: 117110525
# Problema: Grep
palavra_chave = raw_input()
N = int(raw_input())
for i in range(N):
frase = raw_input()
for j in range(len(frase) - 2):
palavra = frase[j] + frase[j + 1] + frase[j + 2]
if palavra == palavra_chave:
print frase
break
| # coding: utf-8
# Aluno: <NAME>
# Matrícula: 117110525
# Problema: Grep
palavra_chave = raw_input()
N = int(raw_input())
for i in range(N):
frase = raw_input()
for j in range(len(frase) - 2):
palavra = frase[j] + frase[j + 1] + frase[j + 2]
if palavra == palavra_chave:
print frase
break | en | 0.249412 | # coding: utf-8 # Aluno: <NAME> # Matrícula: 117110525 # Problema: Grep | 3.619893 | 4 |
tml/rules/__init__.py | translationexchange/tml-python | 1 | 6613577 | # encoding: UTF-8
"""
# Translation rules
#
# Copyright (c) 2015, Translation Exchange, Inc.
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = '<EMAIL>'
from .engine import RulesEngine, Error as EngineError
from .functions import SUPPORTED_FUNCTIONS
from .parser import parse
DEFAULT_ENGINE = RulesEngine(SUPPORTED_FUNCTIONS) # default engine
class ContextRules(object):
""" Case of rules """
def __init__(self, choices, default, engine = None):
""" .ctor
choices ((conditions, operations)[]): list of instructions to engine
default (list): default engine instruction, will be executed if each condition is False
engine (RulesEngine): engine to execute insructions
"""
self.choices = choices
self.default = default
self.engine = engine if engine else DEFAULT_ENGINE
def apply(self, data):
""" Apply rule for data """
for conditions, operations in self.choices:
if (self.engine.execute(conditions, data)):
# if data is under conditions execute operations:
return self.engine.execute(operations, data)
# Defalt:
return self.engine.execute(self.default, data)
@classmethod
def from_rules(cls, rules, default = None):
""" Build case from rules
Args:
rules (dict): view API response contexts.*.rules or cases.*.rules
"""
ret = cls([], ['quote', default])
for key in rules:
rule = rules[key]
operation = '(quote %s)' % key
if 'conditions' in rule:
# has conditions:
ret._append(rules[key]['conditions'], operation)
else:
# no conditions - default:
ret.default = parse(operation)
return ret
def _append(self, condition, operation):
self.choices.append((parse(condition), parse(operation)))
| # encoding: UTF-8
"""
# Translation rules
#
# Copyright (c) 2015, Translation Exchange, Inc.
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = '<EMAIL>'
from .engine import RulesEngine, Error as EngineError
from .functions import SUPPORTED_FUNCTIONS
from .parser import parse
DEFAULT_ENGINE = RulesEngine(SUPPORTED_FUNCTIONS) # default engine
class ContextRules(object):
""" Case of rules """
def __init__(self, choices, default, engine = None):
""" .ctor
choices ((conditions, operations)[]): list of instructions to engine
default (list): default engine instruction, will be executed if each condition is False
engine (RulesEngine): engine to execute insructions
"""
self.choices = choices
self.default = default
self.engine = engine if engine else DEFAULT_ENGINE
def apply(self, data):
""" Apply rule for data """
for conditions, operations in self.choices:
if (self.engine.execute(conditions, data)):
# if data is under conditions execute operations:
return self.engine.execute(operations, data)
# Defalt:
return self.engine.execute(self.default, data)
@classmethod
def from_rules(cls, rules, default = None):
""" Build case from rules
Args:
rules (dict): view API response contexts.*.rules or cases.*.rules
"""
ret = cls([], ['quote', default])
for key in rules:
rule = rules[key]
operation = '(quote %s)' % key
if 'conditions' in rule:
# has conditions:
ret._append(rules[key]['conditions'], operation)
else:
# no conditions - default:
ret.default = parse(operation)
return ret
def _append(self, condition, operation):
self.choices.append((parse(condition), parse(operation)))
| en | 0.774122 | # encoding: UTF-8 # Translation rules # # Copyright (c) 2015, Translation Exchange, Inc. # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # default engine Case of rules .ctor choices ((conditions, operations)[]): list of instructions to engine default (list): default engine instruction, will be executed if each condition is False engine (RulesEngine): engine to execute insructions Apply rule for data # if data is under conditions execute operations: # Defalt: Build case from rules Args: rules (dict): view API response contexts.*.rules or cases.*.rules # has conditions: # no conditions - default: | 2.296136 | 2 |
tests/unit/test_rest_client.py | windies21/loopchain | 105 | 6613578 | import os
import pytest
from loopchain.baseservice import RestClient, RestMethod
from loopchain.blockchain.types import Hash32, ExternalAddress
from loopchain.blockchain.transactions import TransactionBuilder, TransactionSerializer, TransactionVersioner
from loopchain.crypto.signature import Signer
class TestRestClient:
@pytest.fixture
def rest_client(self):
client = RestClient()
client._target = request_target
return client
@pytest.mark.parametrize("rest_method", RestMethod)
def test_url(self, rest_client: RestClient, rest_method: RestMethod):
url = rest_client.create_url(rest_client._target, rest_method)
assert url == request_urls[rest_method]
@pytest.mark.parametrize("rest_method", RestMethod)
def test_params(self, rest_client: RestClient, rest_method: RestMethod):
params = rest_client.create_params(rest_method, request_params[rest_method])
params.pop('id', None)
assert params == request_params_results[rest_method]
tv = TransactionVersioner()
tb = TransactionBuilder.new(version="0x2", type_=None, versioner=tv)
tb.signer = Signer.new()
tb.to_address = ExternalAddress(os.urandom(20))
tb.fee = 10
tb.value = 1000
tb.nonce = 123
request_tx2 = tb.build()
request_tx2_param = TransactionSerializer.new("0x2", None, tv).to_raw_data(request_tx2)
request_tx2_param["from_"] = request_tx2_param.pop("from")
tb = TransactionBuilder.new(version="0x3", type_=None, versioner=tv)
tb.step_limit = 1000000
tb.value = 100000
tb.signer = Signer.new()
tb.to_address = ExternalAddress(os.urandom(20))
tb.nid = 3
tb.nonce = 1
tb.data = "test"
tb.data_type = "message"
request_tx3 = tb.build()
request_tx3_param = TransactionSerializer.new("0x3", None, tv).to_raw_data(request_tx3)
request_tx3_param["from_"] = request_tx3_param.pop("from")
request_target = "https://fakewallet.icon.foundation:443"
request_urls = {
RestMethod.GetChannelInfos: request_target + "/api/node/icon_dex",
RestMethod.GetBlockByHeight: request_target + "/api/node/icon_dex",
RestMethod.Status: request_target + "/api/v1/status/peer",
RestMethod.GetLastBlock: request_target + "/api/v3/icon_dex",
RestMethod.GetReps: request_target + "/api/v3/icon_dex",
RestMethod.SendTransaction2: request_target + "/api/v2",
RestMethod.SendTransaction3: request_target + "/api/v3/icon_dex"
}
request_params = {
RestMethod.GetChannelInfos: RestMethod.GetChannelInfos.value.params,
RestMethod.GetBlockByHeight: RestMethod.GetBlockByHeight.value.params("100"),
RestMethod.Status: RestMethod.Status.value.params,
RestMethod.GetLastBlock: RestMethod.GetLastBlock.value.params,
RestMethod.GetReps: RestMethod.GetReps.value.params(Hash32.new().hex_0x()),
RestMethod.SendTransaction2: RestMethod.SendTransaction2.value.params(**request_tx2_param),
RestMethod.SendTransaction3: RestMethod.SendTransaction3.value.params(**request_tx3_param)
}
request_tx2_param["from"] = request_tx2_param.pop("from_")
request_tx3_param["from"] = request_tx3_param.pop("from_")
request_params_results = {
RestMethod.GetChannelInfos: {'jsonrpc': '2.0', 'method': 'node_getChannelInfos'},
RestMethod.GetBlockByHeight: {'jsonrpc': '2.0', 'method': 'node_getBlockByHeight', 'params': {'height': '100'}},
RestMethod.Status: {'channel': 'icon_dex'},
RestMethod.GetLastBlock: {'jsonrpc': '2.0', 'method': 'icx_getLastBlock'},
RestMethod.GetReps: {'jsonrpc': '2.0', 'method': 'rep_getListByHash', 'params': {'repsHash': Hash32.new().hex_0x()}},
RestMethod.SendTransaction2: {'jsonrpc': '2.0', 'method': 'icx_sendTransaction', 'params': request_tx2_param},
RestMethod.SendTransaction3: {'jsonrpc': '2.0', 'method': 'icx_sendTransaction', 'params': request_tx3_param}
}
| import os
import pytest
from loopchain.baseservice import RestClient, RestMethod
from loopchain.blockchain.types import Hash32, ExternalAddress
from loopchain.blockchain.transactions import TransactionBuilder, TransactionSerializer, TransactionVersioner
from loopchain.crypto.signature import Signer
class TestRestClient:
@pytest.fixture
def rest_client(self):
client = RestClient()
client._target = request_target
return client
@pytest.mark.parametrize("rest_method", RestMethod)
def test_url(self, rest_client: RestClient, rest_method: RestMethod):
url = rest_client.create_url(rest_client._target, rest_method)
assert url == request_urls[rest_method]
@pytest.mark.parametrize("rest_method", RestMethod)
def test_params(self, rest_client: RestClient, rest_method: RestMethod):
params = rest_client.create_params(rest_method, request_params[rest_method])
params.pop('id', None)
assert params == request_params_results[rest_method]
tv = TransactionVersioner()
tb = TransactionBuilder.new(version="0x2", type_=None, versioner=tv)
tb.signer = Signer.new()
tb.to_address = ExternalAddress(os.urandom(20))
tb.fee = 10
tb.value = 1000
tb.nonce = 123
request_tx2 = tb.build()
request_tx2_param = TransactionSerializer.new("0x2", None, tv).to_raw_data(request_tx2)
request_tx2_param["from_"] = request_tx2_param.pop("from")
tb = TransactionBuilder.new(version="0x3", type_=None, versioner=tv)
tb.step_limit = 1000000
tb.value = 100000
tb.signer = Signer.new()
tb.to_address = ExternalAddress(os.urandom(20))
tb.nid = 3
tb.nonce = 1
tb.data = "test"
tb.data_type = "message"
request_tx3 = tb.build()
request_tx3_param = TransactionSerializer.new("0x3", None, tv).to_raw_data(request_tx3)
request_tx3_param["from_"] = request_tx3_param.pop("from")
request_target = "https://fakewallet.icon.foundation:443"
request_urls = {
RestMethod.GetChannelInfos: request_target + "/api/node/icon_dex",
RestMethod.GetBlockByHeight: request_target + "/api/node/icon_dex",
RestMethod.Status: request_target + "/api/v1/status/peer",
RestMethod.GetLastBlock: request_target + "/api/v3/icon_dex",
RestMethod.GetReps: request_target + "/api/v3/icon_dex",
RestMethod.SendTransaction2: request_target + "/api/v2",
RestMethod.SendTransaction3: request_target + "/api/v3/icon_dex"
}
request_params = {
RestMethod.GetChannelInfos: RestMethod.GetChannelInfos.value.params,
RestMethod.GetBlockByHeight: RestMethod.GetBlockByHeight.value.params("100"),
RestMethod.Status: RestMethod.Status.value.params,
RestMethod.GetLastBlock: RestMethod.GetLastBlock.value.params,
RestMethod.GetReps: RestMethod.GetReps.value.params(Hash32.new().hex_0x()),
RestMethod.SendTransaction2: RestMethod.SendTransaction2.value.params(**request_tx2_param),
RestMethod.SendTransaction3: RestMethod.SendTransaction3.value.params(**request_tx3_param)
}
request_tx2_param["from"] = request_tx2_param.pop("from_")
request_tx3_param["from"] = request_tx3_param.pop("from_")
request_params_results = {
RestMethod.GetChannelInfos: {'jsonrpc': '2.0', 'method': 'node_getChannelInfos'},
RestMethod.GetBlockByHeight: {'jsonrpc': '2.0', 'method': 'node_getBlockByHeight', 'params': {'height': '100'}},
RestMethod.Status: {'channel': 'icon_dex'},
RestMethod.GetLastBlock: {'jsonrpc': '2.0', 'method': 'icx_getLastBlock'},
RestMethod.GetReps: {'jsonrpc': '2.0', 'method': 'rep_getListByHash', 'params': {'repsHash': Hash32.new().hex_0x()}},
RestMethod.SendTransaction2: {'jsonrpc': '2.0', 'method': 'icx_sendTransaction', 'params': request_tx2_param},
RestMethod.SendTransaction3: {'jsonrpc': '2.0', 'method': 'icx_sendTransaction', 'params': request_tx3_param}
}
| none | 1 | 1.963393 | 2 | |
synthesizer/prep_emo.py | fujiaxiang/Real-Time-Voice-Cloning | 0 | 6613579 | <reponame>fujiaxiang/Real-Time-Voice-Cloning
import os
import sys
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence
from encoder.model import SpeakerEncoder
from encoder.emo_models import EmoEncoder
from encoder.train_emo import collate_fn
from encoder.data_objects.iemocap_dataset import IemocapDataset
def create_embeddings(model, loader, enc_type='speaker'):
results = []
model.eval()
with torch.no_grad():
for batch in tqdm(loader):
uttid, features, labels, texts, lengths = batch
features = features.to(device)
lengths = lengths.cpu()
packed_features = pack_padded_sequence(features, lengths, batch_first=True, enforce_sorted=False)
if enc_type == 'speaker':
embeds = model(packed_features)
else:
embeds, _ = model(packed_features)
embeds = embeds.cpu().detach().numpy()
results.append(embeds)
return results
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
speaker_enc_path = Path("encoder/saved_models/pretrained.pt")
# emotion_enc_path = Path("encoder/saved_models/test2_backups/test2_bak_180000.pt")
emotion_enc_path = Path("encoder/saved_models/transfer_1_backups/transfer_1_bak_1670000.pt")
speaker_enc = SpeakerEncoder(device, torch.device("cpu"))
checkpoint = torch.load(speaker_enc_path, device)
speaker_enc.load_state_dict(checkpoint["model_state"])
emotion_enc = EmoEncoder(device)
checkpoint = torch.load(emotion_enc_path, device)
emotion_enc.load_state_dict(checkpoint["model_state"])
output_dir = Path("data/iemocap/synthesizer")
output_dir.mkdir(parents=True, exist_ok=True)
data = {
'train': "iemocap_meta_train.csv",
'dev': "iemocap_meta_dev.csv",
'test': "iemocap_meta_test.csv",
}
for env, meta in data.items():
print("Env: ", env)
dataset = IemocapDataset(Path(meta))
loader = DataLoader(
dataset,
batch_size=64,
shuffle=False,
num_workers=os.cpu_count() - 1 if sys.platform.startswith('linux') else 0,
collate_fn=collate_fn
)
print("Creating speaker embeddings...")
speaker_embeds = create_embeddings(speaker_enc, loader)
speaker_embeds = np.concatenate(speaker_embeds)
out_fpath = output_dir.joinpath(f'speaker_enc_{env}' + '.npy')
np.save(out_fpath, speaker_embeds)
print("Creating emotion embeddings...")
emotion_embeds = create_embeddings(emotion_enc, loader, 'emotion')
emotion_embeds = np.concatenate(emotion_embeds)
out_fpath = output_dir.joinpath(f'emotion_enc_{env}' + '.npy')
np.save(out_fpath, emotion_embeds)
# python -m synthesizer.prep_emo
| import os
import sys
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence
from encoder.model import SpeakerEncoder
from encoder.emo_models import EmoEncoder
from encoder.train_emo import collate_fn
from encoder.data_objects.iemocap_dataset import IemocapDataset
def create_embeddings(model, loader, enc_type='speaker'):
results = []
model.eval()
with torch.no_grad():
for batch in tqdm(loader):
uttid, features, labels, texts, lengths = batch
features = features.to(device)
lengths = lengths.cpu()
packed_features = pack_padded_sequence(features, lengths, batch_first=True, enforce_sorted=False)
if enc_type == 'speaker':
embeds = model(packed_features)
else:
embeds, _ = model(packed_features)
embeds = embeds.cpu().detach().numpy()
results.append(embeds)
return results
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
speaker_enc_path = Path("encoder/saved_models/pretrained.pt")
# emotion_enc_path = Path("encoder/saved_models/test2_backups/test2_bak_180000.pt")
emotion_enc_path = Path("encoder/saved_models/transfer_1_backups/transfer_1_bak_1670000.pt")
speaker_enc = SpeakerEncoder(device, torch.device("cpu"))
checkpoint = torch.load(speaker_enc_path, device)
speaker_enc.load_state_dict(checkpoint["model_state"])
emotion_enc = EmoEncoder(device)
checkpoint = torch.load(emotion_enc_path, device)
emotion_enc.load_state_dict(checkpoint["model_state"])
output_dir = Path("data/iemocap/synthesizer")
output_dir.mkdir(parents=True, exist_ok=True)
data = {
'train': "iemocap_meta_train.csv",
'dev': "iemocap_meta_dev.csv",
'test': "iemocap_meta_test.csv",
}
for env, meta in data.items():
print("Env: ", env)
dataset = IemocapDataset(Path(meta))
loader = DataLoader(
dataset,
batch_size=64,
shuffle=False,
num_workers=os.cpu_count() - 1 if sys.platform.startswith('linux') else 0,
collate_fn=collate_fn
)
print("Creating speaker embeddings...")
speaker_embeds = create_embeddings(speaker_enc, loader)
speaker_embeds = np.concatenate(speaker_embeds)
out_fpath = output_dir.joinpath(f'speaker_enc_{env}' + '.npy')
np.save(out_fpath, speaker_embeds)
print("Creating emotion embeddings...")
emotion_embeds = create_embeddings(emotion_enc, loader, 'emotion')
emotion_embeds = np.concatenate(emotion_embeds)
out_fpath = output_dir.joinpath(f'emotion_enc_{env}' + '.npy')
np.save(out_fpath, emotion_embeds)
# python -m synthesizer.prep_emo | en | 0.227732 | # emotion_enc_path = Path("encoder/saved_models/test2_backups/test2_bak_180000.pt") # python -m synthesizer.prep_emo | 2.184698 | 2 |
code/installation/THP/tools/split_spritesheet.py | CreativeInquiry/TeenieHarrisProject | 0 | 6613580 | <reponame>CreativeInquiry/TeenieHarrisProject<filename>code/installation/THP/tools/split_spritesheet.py
import cv2
im = cv2.imread("thumbs_64x64.png",0)
h,w = im.shape
H = 10000
for i in range(0,h,H):
print(i)
cv2.imwrite("thumbs_64x64_"+str(i)+"-"+str(i+H)+".png",im[i:i+H]) | import cv2
im = cv2.imread("thumbs_64x64.png",0)
h,w = im.shape
H = 10000
for i in range(0,h,H):
print(i)
cv2.imwrite("thumbs_64x64_"+str(i)+"-"+str(i+H)+".png",im[i:i+H]) | none | 1 | 2.484214 | 2 | |
python/pypoly2tri/utils.py | popupcad/code_pypoly2tri | 1 | 6613581 | <filename>python/pypoly2tri/utils.py
# -*- coding: utf-8 -*-
'''
Written by <NAME> and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
'''
#from enum import Enum
import math
def enum(**enums):
return type('Enum', (), enums)
PI_3div4 = 3 * math.pi / 4
EPSILON = 1e-12
Orientation = enum(CW=101, CCW=102, COLLINEAR=103)
def Orient2d(pa, pb, pc):
detleft = (pa.x - pc.x) * (pb.y - pc.y)
detright = (pa.y - pc.y) * (pb.x - pc.x)
val = detleft - detright
if val > -EPSILON and val < EPSILON:
return Orientation.COLLINEAR
elif val > 0:
return Orientation.CCW
return Orientation.CW
def InScanArea(pa, pb, pc, pd):
pdx = pd.x
pdy = pd.y
adx = pa.x - pdx
ady = pa.y - pdy
bdx = pb.x - pdx
bdy = pb.y - pdy
adxbdy = adx * bdy
bdxady = bdx * ady
oabd = adxbdy - bdxady
if oabd <= EPSILON:
return False
cdx = pc.x - pdx
cdy = pc.y - pdy
cdxady = cdx * ady
adxcdy = adx * cdy
ocad = cdxady - adxcdy
if ocad <= EPSILON:
return False
return True
| <filename>python/pypoly2tri/utils.py
# -*- coding: utf-8 -*-
'''
Written by <NAME> and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
'''
#from enum import Enum
import math
def enum(**enums):
return type('Enum', (), enums)
PI_3div4 = 3 * math.pi / 4
EPSILON = 1e-12
Orientation = enum(CW=101, CCW=102, COLLINEAR=103)
def Orient2d(pa, pb, pc):
detleft = (pa.x - pc.x) * (pb.y - pc.y)
detright = (pa.y - pc.y) * (pb.x - pc.x)
val = detleft - detright
if val > -EPSILON and val < EPSILON:
return Orientation.COLLINEAR
elif val > 0:
return Orientation.CCW
return Orientation.CW
def InScanArea(pa, pb, pc, pd):
pdx = pd.x
pdy = pd.y
adx = pa.x - pdx
ady = pa.y - pdy
bdx = pb.x - pdx
bdy = pb.y - pdy
adxbdy = adx * bdy
bdxady = bdx * ady
oabd = adxbdy - bdxady
if oabd <= EPSILON:
return False
cdx = pc.x - pdx
cdy = pc.y - pdy
cdxady = cdx * ady
adxcdy = adx * cdy
ocad = cdxady - adxcdy
if ocad <= EPSILON:
return False
return True
| en | 0.641105 | # -*- coding: utf-8 -*- Written by <NAME> and CONTRIBUTORS Email: danaukes<at>asu.edu. Please see LICENSE for full license. #from enum import Enum | 2.625976 | 3 |
rlplay/algo/returns.py | ivannz/rlplay | 4 | 6613582 | import numpy
import torch
# returns, baselined or not, or advantage estimates are not diff-able in PG
def npy_returns(rew, fin, *, gamma, bootstrap=0., omega=None, r_bar=None):
r"""Compute the on-policy returns (the present value of the future rewards).
G_t = r_{t+1}
+ \gamma \omega_{t+1} r_{t+2}
+ \gamma^2 \omega_{t+1} \omega_{t+2} r_{t+3} + ...
= \sum_{j\geq t} r_{j+1} \gamma^{j-t} \prod_{s=t+1}^j \omega_s
= r_{t+1} + \gamma \omega_{t+1} G_{t+1}
"""
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
n_steps, *shape = rew.shape
G_t = numpy.zeros((1 + n_steps, *shape), dtype=rew[-1].dtype)
if omega is not None:
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
rho = numpy.minimum(numpy.exp(omega), r_bar or float('+inf'))
rho = rho.reshape(*rho.shape, *trailing)
# rew[t], fin[t] is r_{t+1} and d_{t+1}
G_t[-1] = bootstrap
for j in range(1, n_steps + 1):
# get G_t = r_{t+1} + \gamma \rho_t G_{t+1} 1_{\neg d_{t+1}}
# XXX G_t[-j-1] is all zeros
numpy.multiply(G_t[-j], gamma, where=~fin[-j], out=G_t[-j-1])
if omega is not None:
G_t[-j-1] *= rho[-j]
G_t[-j-1] += rew[-j]
return G_t[:-1]
def npy_multistep(
rew,
fin,
val,
*,
gamma,
n_lookahead=None,
bootstrap=0.,
omega=None,
r_bar=None,
):
r"""Compute the h-lookahead multistep returns bootstrapped with values.
G_t = r_{t+1}
+ \gamma \omega_{t+1} r_{t+2}
+ \gamma^2 \omega_{t+1} \omega_{t+2} r_{t+3} + ...
= \sum_{j\geq t} r_{j+1} \gamma^{j-t} \prod_{s=t+1}^j \omega_s
= \sum_{j=0}^{h-1} \gamma^j r_{t+j+1} \prod_{s=1}^j \omega_{t+s}
+ \gamma^h \prod_{s=1}^h \omega_{t+s}
G_{t+h}
\approx
\sum_{j=0}^{h-1}
\gamma^j \Bigl( \prod_{s=1}^j \omega_{t+s} \Bigr)
r_{t+j+1}
+ \gamma^h \Bigl( \prod_{s=1}^h \omega_{t+s} \Bigr)
v_{t+h}
"""
# r(t) = rew[t] = r_{t+1}, ditto for d = fin,
# v(t) = val[t] if t < T, bsv if t=T, 0 o/w
# let op F be def-nd as
# (F x)(t) := \omega_{t+1} d_{t+1} x(t+1) = d[t] * x[t+1]
# then the m-step lookahead bootstrapped value estimate is
# v_0 = v, v_{j+1} = r + \gamma F v_j, j=0..h-1
# or after unrolling:
# v_h = \sum_{j=0}^{h-1} \gamma^j F^j r + F^h v
n_steps, *shape = rew.shape
n_lookahead = n_lookahead or n_steps
# XXX this function has at most the same complexity as `npy_returns`
# for `n_lookahead = None`.
# eff[t] = (~fin[t]) * gamma = 1_{\neg d_{t+1}} \gamma, t=0..T-1
eff = numpy.where(fin, 0., gamma)
if omega is not None:
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
eff *= numpy.minimum(numpy.exp(omega), r_bar or float('+inf'))
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
eff = eff.reshape(*eff.shape, *trailing)
# out[t] = val[t] = v(s_t), t=0..T-1, out[T] = bsv = v(s_T)
# compute the multistep returns by shifting t to t-1 repeatedly
out = numpy.concatenate([val, bootstrap], axis=0) # assume bsv has len = 1
# XXX no need for double buffering
for _ in range(n_lookahead):
# out[t] = rew[t] + eff[t] * out[t+1], t=0..T-1
# = r_{t+1} + \gamma 1_{\neg d_{t+1}} v(s_{t+1})
out[:-1] = rew + eff * out[1:] # out[-1] is to be kept intact!
# do not cut off incomplete returns
return out[:-1]
def npy_deltas(rew, fin, val, *, gamma, bootstrap=0., omega=None, r_bar=None):
r"""Compute the importance weighted td-error estimates:
\delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
# \delta^v_s = 0 for all s \geq t if d_t = \top
"""
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
n_steps, *shape = rew.shape
a_hat = numpy.zeros_like(rew)
a_hat[-1:] = bootstrap
a_hat[:-1] = val[1:]
# \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
numpy.copyto(a_hat, 0., where=fin) # `.putmask` is weird with broadcasting
a_hat *= gamma
a_hat += rew
a_hat -= val
if omega is not None:
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
rho = numpy.minimum(numpy.exp(omega), r_bar or float('+inf'))
a_hat *= rho.reshape(*rho.shape, *trailing)
return a_hat
def npy_gae(rew, fin, val, *, gamma, C, bootstrap=0.):
r"""Compute the Generalized Advantage Estimator (C is `lambda`).
\delta^v_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
A_t = \delta^v_t + (\gamma \lambda) \delta^v_{t+1}
+ (\gamma \lambda)^2 \delta^v_{t+2} + ...
= \delta^v_t + \gamma \lambda A_{t+1} 1_{\neg d_{t+1}}
"""
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
n_steps, *shape = rew.shape
gae_t = numpy.zeros((1 + n_steps, *shape), dtype=rew[-1].dtype)
delta = numpy.zeros(shape, dtype=rew[-1].dtype)
# rew[t], fin[t], val[t] is r_{t+1}, d_{t+1} and v(s_t)
# t is -j, t+1 is -j-1 (j=1..T)
for j in range(1, n_steps + 1):
# \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
# numpy.multiply(bootstrap, gamma, out=delta)
# numpy.putmask(delta, fin[-j], 0.)
numpy.multiply(bootstrap, gamma, out=delta, where=~fin[-j])
bootstrap = val[-j] # v(s_t) is next iter's bootstrap
delta += rew[-j] - bootstrap
# A_t = \delta_t + \lambda \gamma A_{t+1} 1_{\neg d_{t+1}}
numpy.multiply(gae_t[-j], C * gamma, out=gae_t[-j-1], where=~fin[-j])
gae_t[-j-1] += delta
# reset delta for the next conditional multiply
delta[:] = 0
return gae_t[:-1]
def npy_vtrace(rew, fin, val, omega, *, gamma, r_bar, c_bar, bootstrap=0.):
r"""Compute the V-trace value estimates ($n \to \infty$ limit):
\delta^v_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
# \delta^v_s = 0 for all s \geq t if d_t = \top
# \hat{v}^n_s = 0 for all s \geq t if d_t = \top
\hat{v}^n_t
= v(s_t) + \sum_{j=t}^{t+n-1} \gamma^{j-t}
\delta^v_j \rho_j \prod_{p=t}^{j-1} c_p
= v(s_t) + \gamma c_t \bigl( \hat{v}^n_{t+1} - v(s_{t+1}) \bigr)
+ \rho_t \delta^v_t
- \gamma^n
\delta^v_{t+n} \rho_{t+n} \prod_{p=t}^{t+n-1} c_p
\hat{v}^\infty_t
= v(s_t) + \rho_t \delta^v_t + \gamma c_t \bigl(
\hat{v}^\infty_{t+1} - v(s_{t+1}) \bigr) 1_{\neg d_{t+1}}
where $c_j = \min\{e^\omega_j, \bar{c} \}$ and $
\rho_j = \min\{e^\omega_j, \bar{\rho} \}
$, $\omega_t = \log \pi(a_t \mid x_t) - \log \mu(a_t \mid x_t)$, and
$\mu$ is the behavior policy, while $\pi$ is the target policy.
Let $
\hat{a}_t := \hat{v}^\infty_{t+1} - v(s_{t+1}
$, then
\hat{a}_t
= \rho_t \delta^v_t
+ \gamma c_t \hat{a}_{t+1} 1_{\neg d_{t+1}}
"""
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
# clamp(max=a) is the same is min(..., a)
rho = numpy.minimum(numpy.exp(omega), r_bar or float('+inf'))
see = numpy.minimum(numpy.exp(omega), c_bar or float('+inf'))
# V-trace uses importance weights to correct for off-policy PG
n_steps, *shape = rew.shape
a_hat = numpy.zeros((1 + n_steps, *shape), dtype=rew[-1].dtype)
delta = numpy.zeros(shape, dtype=rew[-1].dtype)
# rew[t], fin[t], val[t] is r_{t+1}, d_{t+1} and v(s_t)
# t is -j, t+1 is -j-1 (j=1..T)
rho = rho.reshape(*rho.shape, *trailing)
see = see.reshape(*see.shape, *trailing)
for j in range(1, n_steps + 1):
# \rho_t \bigl( r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t) \bigr)
numpy.multiply(bootstrap, gamma, out=delta, where=~fin[-j])
delta += rew[-j] - val[-j]
delta *= rho[-j]
# A_t = \rho_t \delta_t + \c_t \gamma A_{t+1} 1_{\neg d_{t+1}}
numpy.multiply(a_hat[-j], gamma, out=a_hat[-j-1], where=~fin[-j])
a_hat[-j-1] *= see[-j]
a_hat[-j-1] += delta
# reset delta for the next conditional multiply
bootstrap = val[-j] # v(s_t) is next iter's bootstrap
delta[:] = 0
return a_hat[:-1] + val
@torch.no_grad()
def pyt_returns(rew, fin, *, gamma, bootstrap=0., omega=None, r_bar=None):
r"""Compute the importance weighted present-value estimate:
G_t = r_{t+1} + \gamma \rho_t G_{t+1} 1_{\neg d_{t+1}}
"""
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
bootstrap = torch.as_tensor(bootstrap)
# v(s_t) ~ G_t = r_{t+1} + \gamma G_{t+1} 1_{\neg d_{t+1}}
# r_{t+1}, s_{t+1} \sim p(r, s, \mid s_t, a_t), a_t \sim \pi(a \mid s_t)
# d_{t+1} indicates if $s_{t+1}$ is terminal
if omega is not None:
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
rho = omega.exp().clamp_(max=r_bar or float('+inf'))
rho = rho.reshape(*rho.shape, *trailing)
n_steps, *shape = rew.shape
G_t = rew.new_zeros((1 + n_steps, *shape))
G_t[-1].copy_(bootstrap) # bootstrap of \approx (r_{H+k+1})_{k\geq 0}
for j in range(1, n_steps + 1):
# G_t = \rho_t \delta_t + \gamma \rho_t G_{t+1} 1_{\neg d_{t+1}}
# XXX G[-j] is G_{t+1} and G[-j-1] is G_t, and G_t[-j-1] is all zeros
if omega is not None:
G_t[-j-1].addcmul_(G_t[-j], rho[-j], value=gamma)
else:
G_t[-j-1].add_(G_t[-j], alpha=gamma)
G_t[-j-1].masked_fill_(fin[-j], 0.)
G_t[-j-1].add_(rew[-j]) # add the received reward r_{t+1}
return G_t[:-1]
@torch.no_grad()
def pyt_multistep(
rew,
fin,
val,
*,
gamma,
n_lookahead=None,
bootstrap=0.,
omega=None,
r_bar=None,
):
r"""Compute the importance weighted present-value estimate:
G_t = r_{t+1} + \gamma \rho_t G_{t+1} 1_{\neg d_{t+1}}
"""
# v(s_t) ~ G_t = r_{t+1} + \gamma G_{t+1} 1_{\neg d_{t+1}}
# r_{t+1}, s_{t+1} \sim p(r, s, \mid s_t, a_t), a_t \sim \pi(a \mid s_t)
# d_{t+1} indicates if $s_{t+1}$ is terminal
n_steps, *shape = rew.shape
n_lookahead = n_lookahead or n_steps
# eff[t] = (~fin[t]) * gamma = 1_{\neg d_{t+1}} \gamma, t=0..T-1
eff = rew.new_full(fin.shape, gamma).masked_fill_(fin, 0.)
if omega is not None:
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
eff.mul_(omega.exp().clamp_(max=r_bar or float('+inf')))
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
eff = eff.reshape(*eff.shape, *trailing)
# a double buffer for the intermediate calculations is autodiff-friendly
# XXX for autodiff it is better to make `val` have n_steps+1 length
# with its last value being the bootstrap
out = val.new_zeros((2, n_steps + 1, *shape))
out[:, -1:].copy_(torch.as_tensor(bootstrap))
out[:, :-1].copy_(val)
j = 0 # index into double buffer that we read from
for _ in range(n_lookahead):
# out[1-j, t] = rew[t] + eff[t] * out[j, t+1], t=0..T-1
# out[1-j, :-1] = torch.addcmul(rew, eff, out[j, 1:])
torch.addcmul(rew, eff, out[j, 1:], out=out[1-j, :-1])
# flip the buffer
j = 1 - j
# do not cut off incomplete returns
return out[j, :-1]
@torch.no_grad()
def pyt_deltas(rew, fin, val, *, gamma, bootstrap=0., omega=None, r_bar=None):
r"""Compute the importance weighted td-error estimates:
\delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
# \delta^v_s = 0 for all s \geq t if d_t = \top
"""
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
bootstrap = torch.as_tensor(bootstrap)
# a_hat[t] = val[t+1]
a_hat = torch.empty_like(rew).copy_(bootstrap)
a_hat[:-1].copy_(val[1:])
# \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
a_hat.masked_fill_(fin, 0.).mul_(gamma).add_(rew).sub_(val)
if omega is None:
return a_hat
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
rho = omega.exp().clamp_(max=r_bar or float('+inf'))
return a_hat.mul_(rho.reshape(*rho.shape, *trailing))
@torch.no_grad()
def pyt_gae(rew, fin, val, *, gamma, C, bootstrap=0.):
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
n_steps, *shape = rew.shape
bootstrap = torch.as_tensor(bootstrap)
gae_t, delta = rew.new_zeros((1 + n_steps, *shape)), rew.new_zeros(shape)
# rew[t], fin[t], val[t] is r_{t+1}, d_{t+1} and v(s_t)
# t is -j, t+1 is -j-1 (j=1..T)
for j in range(1, n_steps + 1):
# \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
delta.add_(bootstrap, alpha=gamma).masked_fill_(fin[-j], 0.)
delta.add_(rew[-j]).sub_(val[-j]) # add r_{t+1} - v(s_t)
# A_t = \delta_t + \lambda \gamma A_{t+1} 1_{\neg d_{t+1}}
gae_t[-j-1].add_(gae_t[-j], alpha=C * gamma).masked_fill_(fin[-j], 0.)
gae_t[-j-1].add_(delta)
bootstrap = val[-j] # v(s_t) is next iter's bootstrap
delta.zero_()
return gae_t[:-1]
@torch.no_grad()
def pyt_vtrace(rew, fin, val, *, gamma, bootstrap=0., omega=None, r_bar, c_bar):
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
# raise NotImplementedError
n_steps, *shape = rew.shape
bootstrap = torch.as_tensor(bootstrap)
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
rho = omega.exp().clamp_(max=r_bar or float('+inf'))
rho = rho.reshape(*rho.shape, *trailing)
# c_t = \min\{ \bar{c}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
see = omega.exp().clamp_(max=c_bar or float('+inf'))
see = see.reshape(*see.shape, *trailing)
a_hat, delta = rew.new_zeros((1 + n_steps, *shape)), rew.new_zeros(shape)
# rew[t], fin[t], val[t] is r_{t+1}, d_{t+1} and v(s_t)
# t is -j, t+1 is -j-1 (j=1..T)
for j in range(1, n_steps + 1):
# \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
delta.add_(bootstrap, alpha=gamma).masked_fill_(fin[-j], 0.)
delta.add_(rew[-j]).sub_(val[-j]) # add r_{t+1} - v(s_t)
# A_t = \rho_t \delta_t + \gamma \c_t A_{t+1} 1_{\neg d_{t+1}}
a_hat[-j-1].addcmul_(a_hat[-j], see[-j], value=gamma)
a_hat[-j-1].masked_fill_(fin[-j], 0.)
a_hat[-j-1].addcmul_(delta, rho[-j])
bootstrap = val[-j] # v(s_t) is next iter's bootstrap
delta.zero_()
return a_hat[:-1] + val
| import numpy
import torch
# returns, baselined or not, or advantage estimates are not diff-able in PG
def npy_returns(rew, fin, *, gamma, bootstrap=0., omega=None, r_bar=None):
r"""Compute the on-policy returns (the present value of the future rewards).
G_t = r_{t+1}
+ \gamma \omega_{t+1} r_{t+2}
+ \gamma^2 \omega_{t+1} \omega_{t+2} r_{t+3} + ...
= \sum_{j\geq t} r_{j+1} \gamma^{j-t} \prod_{s=t+1}^j \omega_s
= r_{t+1} + \gamma \omega_{t+1} G_{t+1}
"""
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
n_steps, *shape = rew.shape
G_t = numpy.zeros((1 + n_steps, *shape), dtype=rew[-1].dtype)
if omega is not None:
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
rho = numpy.minimum(numpy.exp(omega), r_bar or float('+inf'))
rho = rho.reshape(*rho.shape, *trailing)
# rew[t], fin[t] is r_{t+1} and d_{t+1}
G_t[-1] = bootstrap
for j in range(1, n_steps + 1):
# get G_t = r_{t+1} + \gamma \rho_t G_{t+1} 1_{\neg d_{t+1}}
# XXX G_t[-j-1] is all zeros
numpy.multiply(G_t[-j], gamma, where=~fin[-j], out=G_t[-j-1])
if omega is not None:
G_t[-j-1] *= rho[-j]
G_t[-j-1] += rew[-j]
return G_t[:-1]
def npy_multistep(
rew,
fin,
val,
*,
gamma,
n_lookahead=None,
bootstrap=0.,
omega=None,
r_bar=None,
):
r"""Compute the h-lookahead multistep returns bootstrapped with values.
G_t = r_{t+1}
+ \gamma \omega_{t+1} r_{t+2}
+ \gamma^2 \omega_{t+1} \omega_{t+2} r_{t+3} + ...
= \sum_{j\geq t} r_{j+1} \gamma^{j-t} \prod_{s=t+1}^j \omega_s
= \sum_{j=0}^{h-1} \gamma^j r_{t+j+1} \prod_{s=1}^j \omega_{t+s}
+ \gamma^h \prod_{s=1}^h \omega_{t+s}
G_{t+h}
\approx
\sum_{j=0}^{h-1}
\gamma^j \Bigl( \prod_{s=1}^j \omega_{t+s} \Bigr)
r_{t+j+1}
+ \gamma^h \Bigl( \prod_{s=1}^h \omega_{t+s} \Bigr)
v_{t+h}
"""
# r(t) = rew[t] = r_{t+1}, ditto for d = fin,
# v(t) = val[t] if t < T, bsv if t=T, 0 o/w
# let op F be def-nd as
# (F x)(t) := \omega_{t+1} d_{t+1} x(t+1) = d[t] * x[t+1]
# then the m-step lookahead bootstrapped value estimate is
# v_0 = v, v_{j+1} = r + \gamma F v_j, j=0..h-1
# or after unrolling:
# v_h = \sum_{j=0}^{h-1} \gamma^j F^j r + F^h v
n_steps, *shape = rew.shape
n_lookahead = n_lookahead or n_steps
# XXX this function has at most the same complexity as `npy_returns`
# for `n_lookahead = None`.
# eff[t] = (~fin[t]) * gamma = 1_{\neg d_{t+1}} \gamma, t=0..T-1
eff = numpy.where(fin, 0., gamma)
if omega is not None:
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
eff *= numpy.minimum(numpy.exp(omega), r_bar or float('+inf'))
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
eff = eff.reshape(*eff.shape, *trailing)
# out[t] = val[t] = v(s_t), t=0..T-1, out[T] = bsv = v(s_T)
# compute the multistep returns by shifting t to t-1 repeatedly
out = numpy.concatenate([val, bootstrap], axis=0) # assume bsv has len = 1
# XXX no need for double buffering
for _ in range(n_lookahead):
# out[t] = rew[t] + eff[t] * out[t+1], t=0..T-1
# = r_{t+1} + \gamma 1_{\neg d_{t+1}} v(s_{t+1})
out[:-1] = rew + eff * out[1:] # out[-1] is to be kept intact!
# do not cut off incomplete returns
return out[:-1]
def npy_deltas(rew, fin, val, *, gamma, bootstrap=0., omega=None, r_bar=None):
r"""Compute the importance weighted td-error estimates:
\delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
# \delta^v_s = 0 for all s \geq t if d_t = \top
"""
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
n_steps, *shape = rew.shape
a_hat = numpy.zeros_like(rew)
a_hat[-1:] = bootstrap
a_hat[:-1] = val[1:]
# \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
numpy.copyto(a_hat, 0., where=fin) # `.putmask` is weird with broadcasting
a_hat *= gamma
a_hat += rew
a_hat -= val
if omega is not None:
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
rho = numpy.minimum(numpy.exp(omega), r_bar or float('+inf'))
a_hat *= rho.reshape(*rho.shape, *trailing)
return a_hat
def npy_gae(rew, fin, val, *, gamma, C, bootstrap=0.):
r"""Compute the Generalized Advantage Estimator (C is `lambda`).
\delta^v_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
A_t = \delta^v_t + (\gamma \lambda) \delta^v_{t+1}
+ (\gamma \lambda)^2 \delta^v_{t+2} + ...
= \delta^v_t + \gamma \lambda A_{t+1} 1_{\neg d_{t+1}}
"""
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
n_steps, *shape = rew.shape
gae_t = numpy.zeros((1 + n_steps, *shape), dtype=rew[-1].dtype)
delta = numpy.zeros(shape, dtype=rew[-1].dtype)
# rew[t], fin[t], val[t] is r_{t+1}, d_{t+1} and v(s_t)
# t is -j, t+1 is -j-1 (j=1..T)
for j in range(1, n_steps + 1):
# \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
# numpy.multiply(bootstrap, gamma, out=delta)
# numpy.putmask(delta, fin[-j], 0.)
numpy.multiply(bootstrap, gamma, out=delta, where=~fin[-j])
bootstrap = val[-j] # v(s_t) is next iter's bootstrap
delta += rew[-j] - bootstrap
# A_t = \delta_t + \lambda \gamma A_{t+1} 1_{\neg d_{t+1}}
numpy.multiply(gae_t[-j], C * gamma, out=gae_t[-j-1], where=~fin[-j])
gae_t[-j-1] += delta
# reset delta for the next conditional multiply
delta[:] = 0
return gae_t[:-1]
def npy_vtrace(rew, fin, val, omega, *, gamma, r_bar, c_bar, bootstrap=0.):
r"""Compute the V-trace value estimates ($n \to \infty$ limit):
\delta^v_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
# \delta^v_s = 0 for all s \geq t if d_t = \top
# \hat{v}^n_s = 0 for all s \geq t if d_t = \top
\hat{v}^n_t
= v(s_t) + \sum_{j=t}^{t+n-1} \gamma^{j-t}
\delta^v_j \rho_j \prod_{p=t}^{j-1} c_p
= v(s_t) + \gamma c_t \bigl( \hat{v}^n_{t+1} - v(s_{t+1}) \bigr)
+ \rho_t \delta^v_t
- \gamma^n
\delta^v_{t+n} \rho_{t+n} \prod_{p=t}^{t+n-1} c_p
\hat{v}^\infty_t
= v(s_t) + \rho_t \delta^v_t + \gamma c_t \bigl(
\hat{v}^\infty_{t+1} - v(s_{t+1}) \bigr) 1_{\neg d_{t+1}}
where $c_j = \min\{e^\omega_j, \bar{c} \}$ and $
\rho_j = \min\{e^\omega_j, \bar{\rho} \}
$, $\omega_t = \log \pi(a_t \mid x_t) - \log \mu(a_t \mid x_t)$, and
$\mu$ is the behavior policy, while $\pi$ is the target policy.
Let $
\hat{a}_t := \hat{v}^\infty_{t+1} - v(s_{t+1}
$, then
\hat{a}_t
= \rho_t \delta^v_t
+ \gamma c_t \hat{a}_{t+1} 1_{\neg d_{t+1}}
"""
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
# clamp(max=a) is the same is min(..., a)
rho = numpy.minimum(numpy.exp(omega), r_bar or float('+inf'))
see = numpy.minimum(numpy.exp(omega), c_bar or float('+inf'))
# V-trace uses importance weights to correct for off-policy PG
n_steps, *shape = rew.shape
a_hat = numpy.zeros((1 + n_steps, *shape), dtype=rew[-1].dtype)
delta = numpy.zeros(shape, dtype=rew[-1].dtype)
# rew[t], fin[t], val[t] is r_{t+1}, d_{t+1} and v(s_t)
# t is -j, t+1 is -j-1 (j=1..T)
rho = rho.reshape(*rho.shape, *trailing)
see = see.reshape(*see.shape, *trailing)
for j in range(1, n_steps + 1):
# \rho_t \bigl( r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t) \bigr)
numpy.multiply(bootstrap, gamma, out=delta, where=~fin[-j])
delta += rew[-j] - val[-j]
delta *= rho[-j]
# A_t = \rho_t \delta_t + \c_t \gamma A_{t+1} 1_{\neg d_{t+1}}
numpy.multiply(a_hat[-j], gamma, out=a_hat[-j-1], where=~fin[-j])
a_hat[-j-1] *= see[-j]
a_hat[-j-1] += delta
# reset delta for the next conditional multiply
bootstrap = val[-j] # v(s_t) is next iter's bootstrap
delta[:] = 0
return a_hat[:-1] + val
@torch.no_grad()
def pyt_returns(rew, fin, *, gamma, bootstrap=0., omega=None, r_bar=None):
r"""Compute the importance weighted present-value estimate:
G_t = r_{t+1} + \gamma \rho_t G_{t+1} 1_{\neg d_{t+1}}
"""
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
bootstrap = torch.as_tensor(bootstrap)
# v(s_t) ~ G_t = r_{t+1} + \gamma G_{t+1} 1_{\neg d_{t+1}}
# r_{t+1}, s_{t+1} \sim p(r, s, \mid s_t, a_t), a_t \sim \pi(a \mid s_t)
# d_{t+1} indicates if $s_{t+1}$ is terminal
if omega is not None:
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
rho = omega.exp().clamp_(max=r_bar or float('+inf'))
rho = rho.reshape(*rho.shape, *trailing)
n_steps, *shape = rew.shape
G_t = rew.new_zeros((1 + n_steps, *shape))
G_t[-1].copy_(bootstrap) # bootstrap of \approx (r_{H+k+1})_{k\geq 0}
for j in range(1, n_steps + 1):
# G_t = \rho_t \delta_t + \gamma \rho_t G_{t+1} 1_{\neg d_{t+1}}
# XXX G[-j] is G_{t+1} and G[-j-1] is G_t, and G_t[-j-1] is all zeros
if omega is not None:
G_t[-j-1].addcmul_(G_t[-j], rho[-j], value=gamma)
else:
G_t[-j-1].add_(G_t[-j], alpha=gamma)
G_t[-j-1].masked_fill_(fin[-j], 0.)
G_t[-j-1].add_(rew[-j]) # add the received reward r_{t+1}
return G_t[:-1]
@torch.no_grad()
def pyt_multistep(
rew,
fin,
val,
*,
gamma,
n_lookahead=None,
bootstrap=0.,
omega=None,
r_bar=None,
):
r"""Compute the importance weighted present-value estimate:
G_t = r_{t+1} + \gamma \rho_t G_{t+1} 1_{\neg d_{t+1}}
"""
# v(s_t) ~ G_t = r_{t+1} + \gamma G_{t+1} 1_{\neg d_{t+1}}
# r_{t+1}, s_{t+1} \sim p(r, s, \mid s_t, a_t), a_t \sim \pi(a \mid s_t)
# d_{t+1} indicates if $s_{t+1}$ is terminal
n_steps, *shape = rew.shape
n_lookahead = n_lookahead or n_steps
# eff[t] = (~fin[t]) * gamma = 1_{\neg d_{t+1}} \gamma, t=0..T-1
eff = rew.new_full(fin.shape, gamma).masked_fill_(fin, 0.)
if omega is not None:
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
eff.mul_(omega.exp().clamp_(max=r_bar or float('+inf')))
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
eff = eff.reshape(*eff.shape, *trailing)
# a double buffer for the intermediate calculations is autodiff-friendly
# XXX for autodiff it is better to make `val` have n_steps+1 length
# with its last value being the bootstrap
out = val.new_zeros((2, n_steps + 1, *shape))
out[:, -1:].copy_(torch.as_tensor(bootstrap))
out[:, :-1].copy_(val)
j = 0 # index into double buffer that we read from
for _ in range(n_lookahead):
# out[1-j, t] = rew[t] + eff[t] * out[j, t+1], t=0..T-1
# out[1-j, :-1] = torch.addcmul(rew, eff, out[j, 1:])
torch.addcmul(rew, eff, out[j, 1:], out=out[1-j, :-1])
# flip the buffer
j = 1 - j
# do not cut off incomplete returns
return out[j, :-1]
@torch.no_grad()
def pyt_deltas(rew, fin, val, *, gamma, bootstrap=0., omega=None, r_bar=None):
r"""Compute the importance weighted td-error estimates:
\delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
# \delta^v_s = 0 for all s \geq t if d_t = \top
"""
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
bootstrap = torch.as_tensor(bootstrap)
# a_hat[t] = val[t+1]
a_hat = torch.empty_like(rew).copy_(bootstrap)
a_hat[:-1].copy_(val[1:])
# \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
a_hat.masked_fill_(fin, 0.).mul_(gamma).add_(rew).sub_(val)
if omega is None:
return a_hat
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
rho = omega.exp().clamp_(max=r_bar or float('+inf'))
return a_hat.mul_(rho.reshape(*rho.shape, *trailing))
@torch.no_grad()
def pyt_gae(rew, fin, val, *, gamma, C, bootstrap=0.):
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
n_steps, *shape = rew.shape
bootstrap = torch.as_tensor(bootstrap)
gae_t, delta = rew.new_zeros((1 + n_steps, *shape)), rew.new_zeros(shape)
# rew[t], fin[t], val[t] is r_{t+1}, d_{t+1} and v(s_t)
# t is -j, t+1 is -j-1 (j=1..T)
for j in range(1, n_steps + 1):
# \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
delta.add_(bootstrap, alpha=gamma).masked_fill_(fin[-j], 0.)
delta.add_(rew[-j]).sub_(val[-j]) # add r_{t+1} - v(s_t)
# A_t = \delta_t + \lambda \gamma A_{t+1} 1_{\neg d_{t+1}}
gae_t[-j-1].add_(gae_t[-j], alpha=C * gamma).masked_fill_(fin[-j], 0.)
gae_t[-j-1].add_(delta)
bootstrap = val[-j] # v(s_t) is next iter's bootstrap
delta.zero_()
return gae_t[:-1]
@torch.no_grad()
def pyt_vtrace(rew, fin, val, *, gamma, bootstrap=0., omega=None, r_bar, c_bar):
# add extra trailing unitary dims for broadcasting
trailing = (1,) * max(rew.ndim - fin.ndim, 0)
fin = fin.reshape(*fin.shape, *trailing)
# raise NotImplementedError
n_steps, *shape = rew.shape
bootstrap = torch.as_tensor(bootstrap)
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
rho = omega.exp().clamp_(max=r_bar or float('+inf'))
rho = rho.reshape(*rho.shape, *trailing)
# c_t = \min\{ \bar{c}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
see = omega.exp().clamp_(max=c_bar or float('+inf'))
see = see.reshape(*see.shape, *trailing)
a_hat, delta = rew.new_zeros((1 + n_steps, *shape)), rew.new_zeros(shape)
# rew[t], fin[t], val[t] is r_{t+1}, d_{t+1} and v(s_t)
# t is -j, t+1 is -j-1 (j=1..T)
for j in range(1, n_steps + 1):
# \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
delta.add_(bootstrap, alpha=gamma).masked_fill_(fin[-j], 0.)
delta.add_(rew[-j]).sub_(val[-j]) # add r_{t+1} - v(s_t)
# A_t = \rho_t \delta_t + \gamma \c_t A_{t+1} 1_{\neg d_{t+1}}
a_hat[-j-1].addcmul_(a_hat[-j], see[-j], value=gamma)
a_hat[-j-1].masked_fill_(fin[-j], 0.)
a_hat[-j-1].addcmul_(delta, rho[-j])
bootstrap = val[-j] # v(s_t) is next iter's bootstrap
delta.zero_()
return a_hat[:-1] + val
| en | 0.620397 | # returns, baselined or not, or advantage estimates are not diff-able in PG Compute the on-policy returns (the present value of the future rewards). G_t = r_{t+1} + \gamma \omega_{t+1} r_{t+2} + \gamma^2 \omega_{t+1} \omega_{t+2} r_{t+3} + ... = \sum_{j\geq t} r_{j+1} \gamma^{j-t} \prod_{s=t+1}^j \omega_s = r_{t+1} + \gamma \omega_{t+1} G_{t+1} # add extra trailing unitary dims for broadcasting # \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \} # rew[t], fin[t] is r_{t+1} and d_{t+1} # get G_t = r_{t+1} + \gamma \rho_t G_{t+1} 1_{\neg d_{t+1}} # XXX G_t[-j-1] is all zeros Compute the h-lookahead multistep returns bootstrapped with values. G_t = r_{t+1} + \gamma \omega_{t+1} r_{t+2} + \gamma^2 \omega_{t+1} \omega_{t+2} r_{t+3} + ... = \sum_{j\geq t} r_{j+1} \gamma^{j-t} \prod_{s=t+1}^j \omega_s = \sum_{j=0}^{h-1} \gamma^j r_{t+j+1} \prod_{s=1}^j \omega_{t+s} + \gamma^h \prod_{s=1}^h \omega_{t+s} G_{t+h} \approx \sum_{j=0}^{h-1} \gamma^j \Bigl( \prod_{s=1}^j \omega_{t+s} \Bigr) r_{t+j+1} + \gamma^h \Bigl( \prod_{s=1}^h \omega_{t+s} \Bigr) v_{t+h} # r(t) = rew[t] = r_{t+1}, ditto for d = fin, # v(t) = val[t] if t < T, bsv if t=T, 0 o/w # let op F be def-nd as # (F x)(t) := \omega_{t+1} d_{t+1} x(t+1) = d[t] * x[t+1] # then the m-step lookahead bootstrapped value estimate is # v_0 = v, v_{j+1} = r + \gamma F v_j, j=0..h-1 # or after unrolling: # v_h = \sum_{j=0}^{h-1} \gamma^j F^j r + F^h v # XXX this function has at most the same complexity as `npy_returns` # for `n_lookahead = None`. # eff[t] = (~fin[t]) * gamma = 1_{\neg d_{t+1}} \gamma, t=0..T-1 # \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \} # add extra trailing unitary dims for broadcasting # out[t] = val[t] = v(s_t), t=0..T-1, out[T] = bsv = v(s_T) # compute the multistep returns by shifting t to t-1 repeatedly # assume bsv has len = 1 # XXX no need for double buffering # out[t] = rew[t] + eff[t] * out[t+1], t=0..T-1 # = r_{t+1} + \gamma 1_{\neg d_{t+1}} v(s_{t+1}) # out[-1] is to be kept intact! # do not cut off incomplete returns Compute the importance weighted td-error estimates: \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t) # \delta^v_s = 0 for all s \geq t if d_t = \top # add extra trailing unitary dims for broadcasting # \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t) # `.putmask` is weird with broadcasting # \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \} Compute the Generalized Advantage Estimator (C is `lambda`). \delta^v_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t) A_t = \delta^v_t + (\gamma \lambda) \delta^v_{t+1} + (\gamma \lambda)^2 \delta^v_{t+2} + ... = \delta^v_t + \gamma \lambda A_{t+1} 1_{\neg d_{t+1}} # add extra trailing unitary dims for broadcasting # rew[t], fin[t], val[t] is r_{t+1}, d_{t+1} and v(s_t) # t is -j, t+1 is -j-1 (j=1..T) # \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t) # numpy.multiply(bootstrap, gamma, out=delta) # numpy.putmask(delta, fin[-j], 0.) # v(s_t) is next iter's bootstrap # A_t = \delta_t + \lambda \gamma A_{t+1} 1_{\neg d_{t+1}} # reset delta for the next conditional multiply Compute the V-trace value estimates ($n \to \infty$ limit): \delta^v_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t) # \delta^v_s = 0 for all s \geq t if d_t = \top # \hat{v}^n_s = 0 for all s \geq t if d_t = \top \hat{v}^n_t = v(s_t) + \sum_{j=t}^{t+n-1} \gamma^{j-t} \delta^v_j \rho_j \prod_{p=t}^{j-1} c_p = v(s_t) + \gamma c_t \bigl( \hat{v}^n_{t+1} - v(s_{t+1}) \bigr) + \rho_t \delta^v_t - \gamma^n \delta^v_{t+n} \rho_{t+n} \prod_{p=t}^{t+n-1} c_p \hat{v}^\infty_t = v(s_t) + \rho_t \delta^v_t + \gamma c_t \bigl( \hat{v}^\infty_{t+1} - v(s_{t+1}) \bigr) 1_{\neg d_{t+1}} where $c_j = \min\{e^\omega_j, \bar{c} \}$ and $ \rho_j = \min\{e^\omega_j, \bar{\rho} \} $, $\omega_t = \log \pi(a_t \mid x_t) - \log \mu(a_t \mid x_t)$, and $\mu$ is the behavior policy, while $\pi$ is the target policy. Let $ \hat{a}_t := \hat{v}^\infty_{t+1} - v(s_{t+1} $, then \hat{a}_t = \rho_t \delta^v_t + \gamma c_t \hat{a}_{t+1} 1_{\neg d_{t+1}} # add extra trailing unitary dims for broadcasting # clamp(max=a) is the same is min(..., a) # V-trace uses importance weights to correct for off-policy PG # rew[t], fin[t], val[t] is r_{t+1}, d_{t+1} and v(s_t) # t is -j, t+1 is -j-1 (j=1..T) # \rho_t \bigl( r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t) \bigr) # A_t = \rho_t \delta_t + \c_t \gamma A_{t+1} 1_{\neg d_{t+1}} # reset delta for the next conditional multiply # v(s_t) is next iter's bootstrap Compute the importance weighted present-value estimate: G_t = r_{t+1} + \gamma \rho_t G_{t+1} 1_{\neg d_{t+1}} # add extra trailing unitary dims for broadcasting # v(s_t) ~ G_t = r_{t+1} + \gamma G_{t+1} 1_{\neg d_{t+1}} # r_{t+1}, s_{t+1} \sim p(r, s, \mid s_t, a_t), a_t \sim \pi(a \mid s_t) # d_{t+1} indicates if $s_{t+1}$ is terminal # \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \} # bootstrap of \approx (r_{H+k+1})_{k\geq 0} # G_t = \rho_t \delta_t + \gamma \rho_t G_{t+1} 1_{\neg d_{t+1}} # XXX G[-j] is G_{t+1} and G[-j-1] is G_t, and G_t[-j-1] is all zeros # add the received reward r_{t+1} Compute the importance weighted present-value estimate: G_t = r_{t+1} + \gamma \rho_t G_{t+1} 1_{\neg d_{t+1}} # v(s_t) ~ G_t = r_{t+1} + \gamma G_{t+1} 1_{\neg d_{t+1}} # r_{t+1}, s_{t+1} \sim p(r, s, \mid s_t, a_t), a_t \sim \pi(a \mid s_t) # d_{t+1} indicates if $s_{t+1}$ is terminal # eff[t] = (~fin[t]) * gamma = 1_{\neg d_{t+1}} \gamma, t=0..T-1 # \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \} # add extra trailing unitary dims for broadcasting # a double buffer for the intermediate calculations is autodiff-friendly # XXX for autodiff it is better to make `val` have n_steps+1 length # with its last value being the bootstrap # index into double buffer that we read from # out[1-j, t] = rew[t] + eff[t] * out[j, t+1], t=0..T-1 # out[1-j, :-1] = torch.addcmul(rew, eff, out[j, 1:]) # flip the buffer # do not cut off incomplete returns Compute the importance weighted td-error estimates: \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t) # \delta^v_s = 0 for all s \geq t if d_t = \top # add extra trailing unitary dims for broadcasting # a_hat[t] = val[t+1] # \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t) # \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \} # add extra trailing unitary dims for broadcasting # rew[t], fin[t], val[t] is r_{t+1}, d_{t+1} and v(s_t) # t is -j, t+1 is -j-1 (j=1..T) # \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t) # add r_{t+1} - v(s_t) # A_t = \delta_t + \lambda \gamma A_{t+1} 1_{\neg d_{t+1}} # v(s_t) is next iter's bootstrap # add extra trailing unitary dims for broadcasting # raise NotImplementedError # \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \} # c_t = \min\{ \bar{c}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \} # rew[t], fin[t], val[t] is r_{t+1}, d_{t+1} and v(s_t) # t is -j, t+1 is -j-1 (j=1..T) # \delta_t = r_{t+1} + \gamma v(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t) # add r_{t+1} - v(s_t) # A_t = \rho_t \delta_t + \gamma \c_t A_{t+1} 1_{\neg d_{t+1}} # v(s_t) is next iter's bootstrap | 1.818312 | 2 |
python/dolfinx_mpc/assemble_matrix.py | cmaurini/dolfinx_mpc | 0 | 6613583 | # Copyright (C) 2020-2021 <NAME>
#
# This file is part of DOLFINX_MPC
#
# SPDX-License-Identifier: MIT
from typing import Sequence, Union
import dolfinx.fem as _fem
import dolfinx.cpp as _cpp
from dolfinx_mpc import cpp
from petsc4py import PETSc as _PETSc
from .multipointconstraint import MultiPointConstraint
def assemble_matrix(form: _fem.FormMetaClass,
constraint: Union[MultiPointConstraint,
Sequence[MultiPointConstraint]],
bcs: Sequence[_fem.DirichletBCMetaClass] = [],
diagval: _PETSc.ScalarType = 1,
A: _PETSc.Mat = None) -> _PETSc.Mat:
"""
Assemble a compiled DOLFINx bilinear form into a PETSc matrix with corresponding multi point constraints
and Dirichlet boundary conditions.
Parameters
----------
form
The compiled bilinear variational form
constraint
The multi point constraint
bcs
Sequence of Dirichlet boundary conditions
diagval
Value to set on the diagonal of the matrix (Default 1)
A
PETSc matrix to assemble into (optional)
Returns
-------
_PETSc.Mat
The assembled bi-linear form
"""
if not isinstance(constraint, Sequence):
assert(form.function_spaces[0] == form.function_spaces[1])
constraint = (constraint, constraint)
# Generate matrix with MPC sparsity pattern
if A is None:
A = cpp.mpc.create_matrix(form, constraint[0]._cpp_object,
constraint[1]._cpp_object)
A.zeroEntries()
# Assemble matrix in C++
cpp.mpc.assemble_matrix(A, form, constraint[0]._cpp_object,
constraint[1]._cpp_object, bcs, diagval)
# Add one on diagonal for Dirichlet boundary conditions
if form.function_spaces[0].id == form.function_spaces[1].id:
A.assemblyBegin(_PETSc.Mat.AssemblyType.FLUSH)
A.assemblyEnd(_PETSc.Mat.AssemblyType.FLUSH)
_cpp.fem.petsc.insert_diagonal(A, form.function_spaces[0], bcs, diagval)
A.assemble()
return A
def create_sparsity_pattern(form: _fem.FormMetaClass,
mpc: Union[MultiPointConstraint,
Sequence[MultiPointConstraint]]):
"""
Create sparsity-pattern for MPC given a compiled DOLFINx form
Parameters
----------
form
The form
mpc
For square forms, the MPC. For rectangular forms a list of 2 MPCs on
axis 0 & 1, respectively
"""
if not isinstance(mpc, list):
mpc = [mpc, mpc]
assert len(mpc) == 2
for mpc_ in mpc:
mpc_._not_finalized()
return cpp.mpc.create_sparsity_pattern(form, mpc[0]._cpp_object,
mpc[1]._cpp_object)
def create_matrix_nest(
a: Sequence[Sequence[_fem.FormMetaClass]],
constraints: Sequence[MultiPointConstraint]):
"""
Create a PETSc matrix of type "nest" with appropriate sparsity pattern
given the provided multi points constraints
Parameters
----------
a
The compiled bilinear variational form provided in a rank 2 list
constraints
An ordered list of multi point constraints
"""
assert len(constraints) == len(a)
A_ = [[None for _ in range(len(a[0]))] for _ in range(len(a))]
for i, a_row in enumerate(a):
for j, a_block in enumerate(a_row):
if a[i][j] is None:
continue
A_[i][j] = cpp.mpc.create_matrix(
a[i][j], constraints[i]._cpp_object, constraints[j]._cpp_object)
A = _PETSc.Mat().createNest(
A_, comm=constraints[0].function_space.mesh.comm)
return A
def assemble_matrix_nest(
A: _PETSc.Mat,
a: Sequence[Sequence[_fem.FormMetaClass]],
constraints: Sequence[MultiPointConstraint],
bcs: Sequence[_fem.DirichletBCMetaClass] = [],
diagval: _PETSc.ScalarType = 1):
"""
Assemble a compiled DOLFINx bilinear form into a PETSc matrix of type
"nest" with corresponding multi point constraints and Dirichlet boundary
conditions.
Parameters
----------
a
The compiled bilinear variational form provided in a rank 2 list
constraints
An ordered list of multi point constraints
bcs
Sequence of Dirichlet boundary conditions
diagval
Value to set on the diagonal of the matrix (Default 1)
A
PETSc matrix to assemble into
"""
for i, a_row in enumerate(a):
for j, a_block in enumerate(a_row):
if a_block is not None:
Asub = A.getNestSubMatrix(i, j)
assemble_matrix(
a_block, (constraints[i], constraints[j]),
bcs=bcs, diagval=diagval, A=Asub)
| # Copyright (C) 2020-2021 <NAME>
#
# This file is part of DOLFINX_MPC
#
# SPDX-License-Identifier: MIT
from typing import Sequence, Union
import dolfinx.fem as _fem
import dolfinx.cpp as _cpp
from dolfinx_mpc import cpp
from petsc4py import PETSc as _PETSc
from .multipointconstraint import MultiPointConstraint
def assemble_matrix(form: _fem.FormMetaClass,
constraint: Union[MultiPointConstraint,
Sequence[MultiPointConstraint]],
bcs: Sequence[_fem.DirichletBCMetaClass] = [],
diagval: _PETSc.ScalarType = 1,
A: _PETSc.Mat = None) -> _PETSc.Mat:
"""
Assemble a compiled DOLFINx bilinear form into a PETSc matrix with corresponding multi point constraints
and Dirichlet boundary conditions.
Parameters
----------
form
The compiled bilinear variational form
constraint
The multi point constraint
bcs
Sequence of Dirichlet boundary conditions
diagval
Value to set on the diagonal of the matrix (Default 1)
A
PETSc matrix to assemble into (optional)
Returns
-------
_PETSc.Mat
The assembled bi-linear form
"""
if not isinstance(constraint, Sequence):
assert(form.function_spaces[0] == form.function_spaces[1])
constraint = (constraint, constraint)
# Generate matrix with MPC sparsity pattern
if A is None:
A = cpp.mpc.create_matrix(form, constraint[0]._cpp_object,
constraint[1]._cpp_object)
A.zeroEntries()
# Assemble matrix in C++
cpp.mpc.assemble_matrix(A, form, constraint[0]._cpp_object,
constraint[1]._cpp_object, bcs, diagval)
# Add one on diagonal for Dirichlet boundary conditions
if form.function_spaces[0].id == form.function_spaces[1].id:
A.assemblyBegin(_PETSc.Mat.AssemblyType.FLUSH)
A.assemblyEnd(_PETSc.Mat.AssemblyType.FLUSH)
_cpp.fem.petsc.insert_diagonal(A, form.function_spaces[0], bcs, diagval)
A.assemble()
return A
def create_sparsity_pattern(form: _fem.FormMetaClass,
mpc: Union[MultiPointConstraint,
Sequence[MultiPointConstraint]]):
"""
Create sparsity-pattern for MPC given a compiled DOLFINx form
Parameters
----------
form
The form
mpc
For square forms, the MPC. For rectangular forms a list of 2 MPCs on
axis 0 & 1, respectively
"""
if not isinstance(mpc, list):
mpc = [mpc, mpc]
assert len(mpc) == 2
for mpc_ in mpc:
mpc_._not_finalized()
return cpp.mpc.create_sparsity_pattern(form, mpc[0]._cpp_object,
mpc[1]._cpp_object)
def create_matrix_nest(
a: Sequence[Sequence[_fem.FormMetaClass]],
constraints: Sequence[MultiPointConstraint]):
"""
Create a PETSc matrix of type "nest" with appropriate sparsity pattern
given the provided multi points constraints
Parameters
----------
a
The compiled bilinear variational form provided in a rank 2 list
constraints
An ordered list of multi point constraints
"""
assert len(constraints) == len(a)
A_ = [[None for _ in range(len(a[0]))] for _ in range(len(a))]
for i, a_row in enumerate(a):
for j, a_block in enumerate(a_row):
if a[i][j] is None:
continue
A_[i][j] = cpp.mpc.create_matrix(
a[i][j], constraints[i]._cpp_object, constraints[j]._cpp_object)
A = _PETSc.Mat().createNest(
A_, comm=constraints[0].function_space.mesh.comm)
return A
def assemble_matrix_nest(
A: _PETSc.Mat,
a: Sequence[Sequence[_fem.FormMetaClass]],
constraints: Sequence[MultiPointConstraint],
bcs: Sequence[_fem.DirichletBCMetaClass] = [],
diagval: _PETSc.ScalarType = 1):
"""
Assemble a compiled DOLFINx bilinear form into a PETSc matrix of type
"nest" with corresponding multi point constraints and Dirichlet boundary
conditions.
Parameters
----------
a
The compiled bilinear variational form provided in a rank 2 list
constraints
An ordered list of multi point constraints
bcs
Sequence of Dirichlet boundary conditions
diagval
Value to set on the diagonal of the matrix (Default 1)
A
PETSc matrix to assemble into
"""
for i, a_row in enumerate(a):
for j, a_block in enumerate(a_row):
if a_block is not None:
Asub = A.getNestSubMatrix(i, j)
assemble_matrix(
a_block, (constraints[i], constraints[j]),
bcs=bcs, diagval=diagval, A=Asub)
| en | 0.749927 | # Copyright (C) 2020-2021 <NAME> # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT Assemble a compiled DOLFINx bilinear form into a PETSc matrix with corresponding multi point constraints and Dirichlet boundary conditions. Parameters ---------- form The compiled bilinear variational form constraint The multi point constraint bcs Sequence of Dirichlet boundary conditions diagval Value to set on the diagonal of the matrix (Default 1) A PETSc matrix to assemble into (optional) Returns ------- _PETSc.Mat The assembled bi-linear form # Generate matrix with MPC sparsity pattern # Assemble matrix in C++ # Add one on diagonal for Dirichlet boundary conditions Create sparsity-pattern for MPC given a compiled DOLFINx form Parameters ---------- form The form mpc For square forms, the MPC. For rectangular forms a list of 2 MPCs on axis 0 & 1, respectively Create a PETSc matrix of type "nest" with appropriate sparsity pattern given the provided multi points constraints Parameters ---------- a The compiled bilinear variational form provided in a rank 2 list constraints An ordered list of multi point constraints Assemble a compiled DOLFINx bilinear form into a PETSc matrix of type "nest" with corresponding multi point constraints and Dirichlet boundary conditions. Parameters ---------- a The compiled bilinear variational form provided in a rank 2 list constraints An ordered list of multi point constraints bcs Sequence of Dirichlet boundary conditions diagval Value to set on the diagonal of the matrix (Default 1) A PETSc matrix to assemble into | 2.023815 | 2 |
sample/scripts/test_failure.py | Kairiw/pysilhouette | 3 | 6613584 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
fpath = '/tmp/pysilhouette_job_failure.txt'
if __name__ == '__main__':
fp= open(fpath, 'w')
fp.write('Failure!!\n')
fp.close()
try:
# os.unlink(fpath)
raise Exception('Failure!!')
except Exception, e:
print >>sys.stderr, 'stderr : %s!!' % e.args
sys.exit(1)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
fpath = '/tmp/pysilhouette_job_failure.txt'
if __name__ == '__main__':
fp= open(fpath, 'w')
fp.write('Failure!!\n')
fp.close()
try:
# os.unlink(fpath)
raise Exception('Failure!!')
except Exception, e:
print >>sys.stderr, 'stderr : %s!!' % e.args
sys.exit(1)
| en | 0.208379 | #!/usr/bin/env python # -*- coding: utf-8 -*- # os.unlink(fpath) | 2.538563 | 3 |
bertrpc/client.py | mjrusso/python-bertrpc | 15 | 6613585 | <filename>bertrpc/client.py<gh_stars>10-100
import bert
import error
import socket
import struct
class Service(object):
def __init__(self, host, port, timeout = None):
self.host = host
self.port = port
self.timeout = timeout
def request(self, kind, options=None):
if kind in ['call', 'cast']:
self._verify_options(options)
return Request(self, bert.Atom(kind), options)
else:
raise error.InvalidRequest('unsupported request of kind: "%s"' % kind)
def _verify_options(self, options):
if options is not None:
cache = options.get('cache', None)
if cache is not None:
if len(cache) >= 2 and cache[0] == 'validation' and type(cache[1]) == type(str()):
pass
else:
raise error.InvalidOption('Valid cache args are [validation, String]')
else:
raise error.InvalidOption('Valid options are: cache')
class Request(object):
def __init__(self, service, kind, options):
self.service = service
self.kind = kind
self.options = options
def __getattr__(self, attr):
return Module(self.service, self, bert.Atom(attr))
class Module(object):
def __init__(self, service, request, module):
self.service = service
self.request = request
self.module = module
def __getattr__(self, attr):
def callable(*args, **kwargs):
return self.method_missing(attr, *args, **kwargs)
return callable
def method_missing(self, *args, **kwargs):
return Action(self.service,
self.request,
self.module,
bert.Atom(args[0]),
list(args[1:])).execute()
class Action(object):
def __init__(self, service, request, module, function, arguments):
self.service = service
self.request = request
self.module = module
self.function = function
self.arguments = arguments
def execute(self):
python_request = (self.request.kind,
self.module,
self.function,
self.arguments)
bert_request = Encoder().encode(python_request)
bert_response = self._transaction(bert_request)
python_response = Decoder().decode(bert_response)
return python_response
def _transaction(self, bert_request):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.service.timeout is not None: sock.settimeout(self.service.timeout)
sock.connect((self.service.host, self.service.port))
if self.request.options is not None:
if self.request.options.get('cache', None) is not None:
if self.request.options['cache'][0] == 'validation':
token = self.request.options['cache'][1]
info_bert = Encoder().encode(
(bert.Atom('info'), bert.Atom('cache'), [bert.Atom('validation'), bert.Atom(token)]))
info_header = struct.pack(">l", len(info_bert))
sock.sendall(info_header)
sock.sendall(info_bert)
header = struct.pack(">l", len(bert_request))
sock.sendall(header)
sock.sendall(bert_request)
lenheader = sock.recv(4)
if lenheader is None: raise error.ProtocolError(error.ProtocolError.NO_HEADER)
length = struct.unpack(">l",lenheader)[0]
bert_response = ''
while len(bert_response) < length:
response_part = sock.recv(length - len(bert_response))
if response_part is None or len(response_part) == 0:
raise error.ProtocolError(error.ProtocolError.NO_DATA)
bert_response += response_part
sock.close()
return bert_response
except socket.timeout, e:
raise error.ReadTimeoutError('No response from %s:%s in %ss' %
(self.service.host, self.service.port, self.service.timeout))
except socket.error, e:
raise error.ConnectionError('Unable to connect to %s:%s' % (self.service.host, self.service.port))
class Encoder(object):
def encode(self, python_request):
return bert.encode(python_request)
class Decoder(object):
def decode(self, bert_response):
python_response = bert.decode(bert_response)
if python_response[0] == bert.Atom('reply'):
return python_response[1]
elif python_response[0] == bert.Atom('noreply'):
return None
elif python_response[0] == bert.Atom('error'):
return self._error(python_response[1])
else:
raise error.BERTRPCError('invalid response received from server')
def _error(self, err):
level, code, klass, message, backtrace = err
exception_map = {
bert.Atom('protocol'): error.ProtocolError,
bert.Atom('server'): error.ServerError,
bert.Atom('user'): error.UserError,
bert.Atom('proxy'): error.ProxyError
}
exception = exception_map.get(level, None)
if level is not None:
raise exception([code, message], klass, backtrace)
else:
raise error.BERTRPCError('invalid error code received from server')
if __name__ == '__main__':
print 'initializing service now'
service = Service('localhost', 9999)
print 'RPC call now'
response = service.request('call').calc.add(1, 2)
print 'response is: %s' % repr(response)
print 'RPC call now, with options'
options = {'cache': ['validation','myToken']}
response = service.request('call', options).calc.add(5, 6)
print 'response is: %s' % repr(response)
print 'RPC cast now'
response = service.request('cast').stats.incr()
print 'response is: %s' % repr(response)
| <filename>bertrpc/client.py<gh_stars>10-100
import bert
import error
import socket
import struct
class Service(object):
def __init__(self, host, port, timeout = None):
self.host = host
self.port = port
self.timeout = timeout
def request(self, kind, options=None):
if kind in ['call', 'cast']:
self._verify_options(options)
return Request(self, bert.Atom(kind), options)
else:
raise error.InvalidRequest('unsupported request of kind: "%s"' % kind)
def _verify_options(self, options):
if options is not None:
cache = options.get('cache', None)
if cache is not None:
if len(cache) >= 2 and cache[0] == 'validation' and type(cache[1]) == type(str()):
pass
else:
raise error.InvalidOption('Valid cache args are [validation, String]')
else:
raise error.InvalidOption('Valid options are: cache')
class Request(object):
def __init__(self, service, kind, options):
self.service = service
self.kind = kind
self.options = options
def __getattr__(self, attr):
return Module(self.service, self, bert.Atom(attr))
class Module(object):
def __init__(self, service, request, module):
self.service = service
self.request = request
self.module = module
def __getattr__(self, attr):
def callable(*args, **kwargs):
return self.method_missing(attr, *args, **kwargs)
return callable
def method_missing(self, *args, **kwargs):
return Action(self.service,
self.request,
self.module,
bert.Atom(args[0]),
list(args[1:])).execute()
class Action(object):
def __init__(self, service, request, module, function, arguments):
self.service = service
self.request = request
self.module = module
self.function = function
self.arguments = arguments
def execute(self):
python_request = (self.request.kind,
self.module,
self.function,
self.arguments)
bert_request = Encoder().encode(python_request)
bert_response = self._transaction(bert_request)
python_response = Decoder().decode(bert_response)
return python_response
def _transaction(self, bert_request):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.service.timeout is not None: sock.settimeout(self.service.timeout)
sock.connect((self.service.host, self.service.port))
if self.request.options is not None:
if self.request.options.get('cache', None) is not None:
if self.request.options['cache'][0] == 'validation':
token = self.request.options['cache'][1]
info_bert = Encoder().encode(
(bert.Atom('info'), bert.Atom('cache'), [bert.Atom('validation'), bert.Atom(token)]))
info_header = struct.pack(">l", len(info_bert))
sock.sendall(info_header)
sock.sendall(info_bert)
header = struct.pack(">l", len(bert_request))
sock.sendall(header)
sock.sendall(bert_request)
lenheader = sock.recv(4)
if lenheader is None: raise error.ProtocolError(error.ProtocolError.NO_HEADER)
length = struct.unpack(">l",lenheader)[0]
bert_response = ''
while len(bert_response) < length:
response_part = sock.recv(length - len(bert_response))
if response_part is None or len(response_part) == 0:
raise error.ProtocolError(error.ProtocolError.NO_DATA)
bert_response += response_part
sock.close()
return bert_response
except socket.timeout, e:
raise error.ReadTimeoutError('No response from %s:%s in %ss' %
(self.service.host, self.service.port, self.service.timeout))
except socket.error, e:
raise error.ConnectionError('Unable to connect to %s:%s' % (self.service.host, self.service.port))
class Encoder(object):
def encode(self, python_request):
return bert.encode(python_request)
class Decoder(object):
def decode(self, bert_response):
python_response = bert.decode(bert_response)
if python_response[0] == bert.Atom('reply'):
return python_response[1]
elif python_response[0] == bert.Atom('noreply'):
return None
elif python_response[0] == bert.Atom('error'):
return self._error(python_response[1])
else:
raise error.BERTRPCError('invalid response received from server')
def _error(self, err):
level, code, klass, message, backtrace = err
exception_map = {
bert.Atom('protocol'): error.ProtocolError,
bert.Atom('server'): error.ServerError,
bert.Atom('user'): error.UserError,
bert.Atom('proxy'): error.ProxyError
}
exception = exception_map.get(level, None)
if level is not None:
raise exception([code, message], klass, backtrace)
else:
raise error.BERTRPCError('invalid error code received from server')
if __name__ == '__main__':
print 'initializing service now'
service = Service('localhost', 9999)
print 'RPC call now'
response = service.request('call').calc.add(1, 2)
print 'response is: %s' % repr(response)
print 'RPC call now, with options'
options = {'cache': ['validation','myToken']}
response = service.request('call', options).calc.add(5, 6)
print 'response is: %s' % repr(response)
print 'RPC cast now'
response = service.request('cast').stats.incr()
print 'response is: %s' % repr(response)
| none | 1 | 2.591453 | 3 | |
devtools/conda-recipe-dev/manage_local_dev_version.py | uibcdf/NetLabTools | 1 | 6613586 | import os
import sys
from numpy.distutils.exec_command import exec_command
def installing():
status, output = exec_command('conda build . --no-anaconda-upload')
status, output = exec_command('conda build . --output')
status, output = exec_command('conda install --use-local '+output)
status, output = exec_command('conda build purge')
def remove():
status, output = exec_command('conda remove kinnetmt --yes')
def update():
remove()
installing()
if '--install' in sys.argv[1:]:
print('Building and installing local dev version via conda')
installing()
elif '--remove' in sys.argv[1:]:
print('Removing local dev package')
remove()
elif '--update' in sys.argv[1:]:
print('Updating local dev package')
update()
| import os
import sys
from numpy.distutils.exec_command import exec_command
def installing():
status, output = exec_command('conda build . --no-anaconda-upload')
status, output = exec_command('conda build . --output')
status, output = exec_command('conda install --use-local '+output)
status, output = exec_command('conda build purge')
def remove():
status, output = exec_command('conda remove kinnetmt --yes')
def update():
remove()
installing()
if '--install' in sys.argv[1:]:
print('Building and installing local dev version via conda')
installing()
elif '--remove' in sys.argv[1:]:
print('Removing local dev package')
remove()
elif '--update' in sys.argv[1:]:
print('Updating local dev package')
update()
| none | 1 | 2.577459 | 3 | |
programmes/migrations/0004_programme_departement.py | MTES-MCT/appel | 0 | 6613587 | <filename>programmes/migrations/0004_programme_departement.py
# Generated by Django 3.2.5 on 2021-07-26 11:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("programmes", "0003_auto_20210726_1100"),
]
operations = [
migrations.AddField(
model_name="programme",
name="departement",
field=models.IntegerField(null=True),
),
]
| <filename>programmes/migrations/0004_programme_departement.py
# Generated by Django 3.2.5 on 2021-07-26 11:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("programmes", "0003_auto_20210726_1100"),
]
operations = [
migrations.AddField(
model_name="programme",
name="departement",
field=models.IntegerField(null=True),
),
]
| en | 0.826579 | # Generated by Django 3.2.5 on 2021-07-26 11:24 | 1.360661 | 1 |
observers/models.py | zsiciarz/variablestars.net | 0 | 6613588 | <filename>observers/models.py<gh_stars>0
from datetime import timedelta
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Count
from django.db.models.query import QuerySet
from django.db.models.signals import post_save
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
import ephem
from geoposition.fields import GeopositionField
from model_utils.models import TimeStampedModel
from observations.models import Observation
class ObserverQuerySet(QuerySet):
def with_observations_count(self):
return self.annotate(observations_count=Count("observations"))
def get_total_stats(self):
today = timezone.now()
last_month = today - timedelta(days=30)
last_week = today - timedelta(days=7)
return {
"total_observer_count": self.count(),
"last_month_active_count": self.filter(
user__last_login__gt=last_month
).count(),
"last_week_active_count": self.filter(
user__last_login__gt=last_week
).count(),
}
class Observer(TimeStampedModel):
user = models.OneToOneField(
"auth.User", editable=False, related_name="observer", on_delete=models.CASCADE
)
aavso_code = models.CharField(
max_length=10,
blank=True,
default="",
verbose_name=_("AAVSO observer code"),
help_text=_("This is the code that is officially assigned to you by AAVSO."),
)
limiting_magnitude = models.FloatField(
blank=True,
null=True,
default=6.0,
verbose_name=_("Limiting magnitude of your equipment"),
help_text=_(
"The magnitude of the faintest stars you can see with your eyes/binoculars/telescope. Setting this value will affect which stars will have their brightness value(s) grayed out."
),
)
location = GeopositionField(blank=True)
city = models.CharField(max_length=255, blank=True, default="")
objects = ObserverQuerySet.as_manager()
class Meta:
verbose_name = _("Observer")
verbose_name_plural = _("Observers")
ordering = ("-created",)
def __str__(self):
full_name = self.user.get_full_name()
if full_name:
return "%s (%s)" % (self.user, full_name)
else:
return str(self.user)
def get_absolute_url(self):
return reverse("observers:observer_detail", kwargs={"pk": self.pk})
def top_stars(self):
return Observation.objects.top_stars().filter(observer=self)
def recent_observations(self):
return self.observations.select_related("star").order_by("-jd")
def observed_stars_count(self):
return self.observations.aggregate(c=Count("star", distinct=True))["c"]
def get_pyephem_city(self):
city = ephem.Observer()
# convert coordinates from degrees to radians
city.lon = float(self.location.longitude) * ephem.pi / 180.0
city.lat = float(self.location.latitude) * ephem.pi / 180.0
return city
def create_observer(sender, instance, created, **kwargs):
if created:
Observer.objects.create(user=instance)
post_save.connect(
create_observer, sender=User, dispatch_uid="observers.models.create_observer"
)
| <filename>observers/models.py<gh_stars>0
from datetime import timedelta
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Count
from django.db.models.query import QuerySet
from django.db.models.signals import post_save
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
import ephem
from geoposition.fields import GeopositionField
from model_utils.models import TimeStampedModel
from observations.models import Observation
class ObserverQuerySet(QuerySet):
def with_observations_count(self):
return self.annotate(observations_count=Count("observations"))
def get_total_stats(self):
today = timezone.now()
last_month = today - timedelta(days=30)
last_week = today - timedelta(days=7)
return {
"total_observer_count": self.count(),
"last_month_active_count": self.filter(
user__last_login__gt=last_month
).count(),
"last_week_active_count": self.filter(
user__last_login__gt=last_week
).count(),
}
class Observer(TimeStampedModel):
user = models.OneToOneField(
"auth.User", editable=False, related_name="observer", on_delete=models.CASCADE
)
aavso_code = models.CharField(
max_length=10,
blank=True,
default="",
verbose_name=_("AAVSO observer code"),
help_text=_("This is the code that is officially assigned to you by AAVSO."),
)
limiting_magnitude = models.FloatField(
blank=True,
null=True,
default=6.0,
verbose_name=_("Limiting magnitude of your equipment"),
help_text=_(
"The magnitude of the faintest stars you can see with your eyes/binoculars/telescope. Setting this value will affect which stars will have their brightness value(s) grayed out."
),
)
location = GeopositionField(blank=True)
city = models.CharField(max_length=255, blank=True, default="")
objects = ObserverQuerySet.as_manager()
class Meta:
verbose_name = _("Observer")
verbose_name_plural = _("Observers")
ordering = ("-created",)
def __str__(self):
full_name = self.user.get_full_name()
if full_name:
return "%s (%s)" % (self.user, full_name)
else:
return str(self.user)
def get_absolute_url(self):
return reverse("observers:observer_detail", kwargs={"pk": self.pk})
def top_stars(self):
return Observation.objects.top_stars().filter(observer=self)
def recent_observations(self):
return self.observations.select_related("star").order_by("-jd")
def observed_stars_count(self):
return self.observations.aggregate(c=Count("star", distinct=True))["c"]
def get_pyephem_city(self):
city = ephem.Observer()
# convert coordinates from degrees to radians
city.lon = float(self.location.longitude) * ephem.pi / 180.0
city.lat = float(self.location.latitude) * ephem.pi / 180.0
return city
def create_observer(sender, instance, created, **kwargs):
if created:
Observer.objects.create(user=instance)
post_save.connect(
create_observer, sender=User, dispatch_uid="observers.models.create_observer"
)
| en | 0.558461 | # convert coordinates from degrees to radians | 2.409919 | 2 |
maestro/regex/regex_tester.py | fabiommendes/maestro | 0 | 6613589 | import re
class RegexTesterMeta(type):
"""
Metaclass for regex-tester classes.
"""
Re = type.__new__(RegexTesterMeta, (), {'regex': r''})
def make_examples(re_class, accept=5, reject=5):
"""
Takes a regex class and make a list of accept/reject examples.
"""
# The teacher
class Integer:
"""
Match any valid positive integer.
"""
regex = r'[0-9]+'
ok = ['42', '0', '1']
bad = ['foo', '41.0']
# For the students
class Integer:
"""
Match any valid positive integer.
Good:
42
0
1
Bad:
foo
41.0
"""
regex = r''
def test_class(cls, data):
"""
Test a regex class definition.
Return None if class was not tested and a tuple (n_ok, n_error) with the
number of correct/wrong test cases.
"""
cls_examples = data[cls.__name__]
title = cls.__name__.replace('_', ' ')
# Compile regex
try:
regex = re.compile(cls.regex)
except AttributeError:
print('%s: class does not define a regex attribute.')
return None
except Exception:
print('%s: invalid regular expression.')
return None
# Test each suite of examples
accept = cls_examples['accept']
reject = cls_examples['reject']
n_ok = 0
msgs = []
for case in accept:
if not regex.fullmatch(case):
msgs.append('did not match %r.' % case)
for case in reject:
if regex.fullmatch(case):
msgs.append('match %r, but should have rejected it.' % case)
# Render message
if msgs:
print('%s:' % title)
print(' correct: %s' % n_ok)
print(' wrong:')
for msg in msgs:
print(' -', msg)
else:
print('%s: ok!' % title)
return n_ok, len(msgs)
| import re
class RegexTesterMeta(type):
"""
Metaclass for regex-tester classes.
"""
Re = type.__new__(RegexTesterMeta, (), {'regex': r''})
def make_examples(re_class, accept=5, reject=5):
"""
Takes a regex class and make a list of accept/reject examples.
"""
# The teacher
class Integer:
"""
Match any valid positive integer.
"""
regex = r'[0-9]+'
ok = ['42', '0', '1']
bad = ['foo', '41.0']
# For the students
class Integer:
"""
Match any valid positive integer.
Good:
42
0
1
Bad:
foo
41.0
"""
regex = r''
def test_class(cls, data):
"""
Test a regex class definition.
Return None if class was not tested and a tuple (n_ok, n_error) with the
number of correct/wrong test cases.
"""
cls_examples = data[cls.__name__]
title = cls.__name__.replace('_', ' ')
# Compile regex
try:
regex = re.compile(cls.regex)
except AttributeError:
print('%s: class does not define a regex attribute.')
return None
except Exception:
print('%s: invalid regular expression.')
return None
# Test each suite of examples
accept = cls_examples['accept']
reject = cls_examples['reject']
n_ok = 0
msgs = []
for case in accept:
if not regex.fullmatch(case):
msgs.append('did not match %r.' % case)
for case in reject:
if regex.fullmatch(case):
msgs.append('match %r, but should have rejected it.' % case)
# Render message
if msgs:
print('%s:' % title)
print(' correct: %s' % n_ok)
print(' wrong:')
for msg in msgs:
print(' -', msg)
else:
print('%s: ok!' % title)
return n_ok, len(msgs)
| en | 0.672923 | Metaclass for regex-tester classes. Takes a regex class and make a list of accept/reject examples. # The teacher Match any valid positive integer. # For the students Match any valid positive integer. Good: 42 0 1 Bad: foo 41.0 Test a regex class definition. Return None if class was not tested and a tuple (n_ok, n_error) with the number of correct/wrong test cases. # Compile regex # Test each suite of examples # Render message | 3.246768 | 3 |
basars_addons/losses/dice.py | Basars/basars-addons | 0 | 6613590 | <filename>basars_addons/losses/dice.py
import tensorflow as tf
from tensorflow.keras.losses import Loss, Reduction
class Dice(Loss):
def __init__(self,
num_classes=1,
epsilon=1e-5,
reduction: str = Reduction.AUTO,
from_logits=False,
name=None):
super(Dice, self).__init__(reduction, name)
self.num_classes = num_classes
self.epsilon = epsilon
self.from_logits = from_logits
def dice_coefficient(self, y_true, y_pred):
intersection = tf.reduce_sum(y_true * y_pred)
y_sum = tf.reduce_sum(y_true * y_true)
z_sum = tf.reduce_sum(y_pred * y_pred)
return 1 - (2 * intersection + self.epsilon) / (z_sum + y_sum + self.epsilon)
def call(self, y_true, y_pred):
if self.from_logits:
y_pred = tf.nn.softmax(y_pred, axis=-1)
loss_value = 0.0
for c in range(self.num_classes):
loss_value += self.dice_coefficient(y_true[:, :, :, c], y_pred[:, :, :, c])
return loss_value / self.num_classes
| <filename>basars_addons/losses/dice.py
import tensorflow as tf
from tensorflow.keras.losses import Loss, Reduction
class Dice(Loss):
def __init__(self,
num_classes=1,
epsilon=1e-5,
reduction: str = Reduction.AUTO,
from_logits=False,
name=None):
super(Dice, self).__init__(reduction, name)
self.num_classes = num_classes
self.epsilon = epsilon
self.from_logits = from_logits
def dice_coefficient(self, y_true, y_pred):
intersection = tf.reduce_sum(y_true * y_pred)
y_sum = tf.reduce_sum(y_true * y_true)
z_sum = tf.reduce_sum(y_pred * y_pred)
return 1 - (2 * intersection + self.epsilon) / (z_sum + y_sum + self.epsilon)
def call(self, y_true, y_pred):
if self.from_logits:
y_pred = tf.nn.softmax(y_pred, axis=-1)
loss_value = 0.0
for c in range(self.num_classes):
loss_value += self.dice_coefficient(y_true[:, :, :, c], y_pred[:, :, :, c])
return loss_value / self.num_classes
| none | 1 | 2.446226 | 2 | |
pytest_pylint_xdist_vcs.py | rebkwok/pytest-pylint-xdist-vcs | 1 | 6613591 | """Pylint plugin for py.test"""
from os import sep
from os.path import dirname
from os.path import exists
from os.path import join
import re
from six.moves.configparser import ( # pylint: disable=import-error
ConfigParser,
NoSectionError,
NoOptionError
)
from pylint import lint
from pylint.config import PYLINTRC
from pylint.interfaces import IReporter
from pylint.reporters import BaseReporter
import pytest
import svn
SCM_LIST = [svn]
class PyLintException(Exception):
"""Exception to raise if a file has a specified pylint error"""
class ProgrammaticReporter(BaseReporter):
"""Reporter that replaces output with storage in list of dictionaries"""
__implements__ = IReporter
name = 'pylint-vcs-reporter'
extension = 'prog'
def __init__(self, output=None):
BaseReporter.__init__(self, output)
self.data = []
def handle_message(self, msg):
"""Get message and append to our data structure"""
self.data.append(msg)
def _display(self, layout):
"""launch layouts display"""
def get_rel_path(path, parent_path):
"""
Give the path to object relative to ``parent_path``.
"""
replaced_path = path.replace(parent_path, '', 1)
if replaced_path[0] == sep:
rel_path = replaced_path[1:]
else:
rel_path = replaced_path
return rel_path
def pytest_addoption(parser):
"""Add plugin command line options to pytest command line options"""
group = parser.getgroup("general")
group.addoption(
"--pylint",
action='store_true', default=False,
help="run pylint on all python files"
)
group.addoption(
"--pylint-vcs",
action='store_true', default=False,
help="run pylint only on python files changed in current rev. \
If not SCM working copy detected it fallbacks to --pylint option"
)
group.addoption(
'--no-pylint',
action="store_true", default=False,
help='disable running pylint'
)
group.addoption(
'--pylint-no-vcs',
action="store_true", default=False,
help='Disable vcs files linting mode. Note: this option does not turn off pylint'
)
group.addoption(
'--pylint-rcfile',
default=None,
help='Location of RC file if not pylintrc'
)
def pytest_sessionstart(session):
"""Storing pylint settings on the session"""
config = session.config
terminal_reporter = config.pluginmanager.get_plugin('terminalreporter')
capture_manager = config.pluginmanager.get_plugin('capturemanager')
session.pylint_enabled = config.option.pylint or config.option.pylint_vcs and not config.option.no_pylint
if session.pylint_enabled:
session.pylint_config = None
session.pylintrc_file = None
session.pylint_ignore = []
session.pylint_ignore_patterns = []
session.pylint_msg_template = None
if config.option.pylint_vcs:
if not config.option.pylint_no_vcs:
scm, scm_root = _get_vcs_root(str(config.rootdir))
if scm:
session.pylint_vcs_enabled = True
session.pylint_vcs_changed_filepaths = scm.get_mod_files(scm_root)
with capture_manager.global_and_fixture_disabled():
terminal_reporter.write('VCS working copy detected. VCS linting mode enabled\n')
else:
with capture_manager.global_and_fixture_disabled():
terminal_reporter.write(
'No VCS working copy detected. VCS linting mode disabled: linting all the files\n')
# Find pylintrc to check ignore list
pylintrc_file = config.option.pylint_rcfile or PYLINTRC
if pylintrc_file and not exists(pylintrc_file):
# The directory of pytest.ini got a chance
pylintrc_file = join(dirname(str(config.inifile)), pylintrc_file)
if pylintrc_file and exists(pylintrc_file):
session.pylintrc_file = pylintrc_file
session.pylint_config = ConfigParser()
session.pylint_config.read(pylintrc_file)
try:
ignore_string = session.pylint_config.get('MASTER', 'ignore')
if ignore_string:
session.pylint_ignore = ignore_string.split(',')
except (NoSectionError, NoOptionError):
pass
try:
session.pylint_ignore_patterns = session.pylint_config.get(
'MASTER', 'ignore-patterns')
except (NoSectionError, NoOptionError):
pass
try:
session.pylint_msg_template = session.pylint_config.get(
'REPORTS', 'msg-template'
)
except (NoSectionError, NoOptionError):
pass
def pytest_report_header(config, startdir):
"""Add the message_ix import path to the pytest report header."""
if 'pylint_no_vcs' in config.option:
return 'VCS linting mode set to disabled'
return None
def include_file(path, ignore_list, ignore_patterns=None):
"""Checks if a file should be included in the collection."""
if ignore_patterns:
for pattern in ignore_patterns:
if re.match(pattern, path):
return False
parts = path.split(sep)
return not set(parts) & set(ignore_list)
def pytest_collect_file(path, parent):
"""Collect files on which pylint should run"""
item = None
if not parent.session.pylint_enabled:
return None
if path.ext != '.py':
return None
if getattr(parent.session, 'pylint_vcs_enabled', False):
if str(path) in parent.session.pylint_vcs_changed_filepaths:
item = PyLintItem(path, parent)
else:
rel_path = get_rel_path(str(path), str(parent.session.fspath))
session = parent.session
if session.pylint_config is None:
item = PyLintItem(path, parent)
elif include_file(rel_path, session.pylint_ignore, session.pylint_ignore_patterns):
item = PyLintItem(path, parent, session.pylint_msg_template, session.pylintrc_file)
return item
class PyLintItem(pytest.Item, pytest.File):
"""pylint test running class."""
# pylint doesn't deal well with dynamic modules and there isn't an
# astng plugin for pylint in pypi yet, so we'll have to disable
# the checks.
# pylint: disable=no-member,abstract-method
def __init__(self, fspath, parent, msg_format=None, pylintrc_file=None):
super(PyLintItem, self).__init__(fspath, parent)
self.add_marker('pylint')
self._nodeid = self.nodeid + '[pylint]'
self.rel_path = get_rel_path(
fspath.strpath,
parent.session.fspath.strpath
)
if msg_format is None:
self._msg_format = '{C}:{line:3d},{column:2d}: {msg} ({symbol})'
else:
self._msg_format = msg_format
self.pylintrc_file = pylintrc_file
def runtest(self):
"""Check the pylint messages to see if any errors were reported."""
reported_errors = []
reporter = ProgrammaticReporter()
args_list = [self.fspath.strpath]
if self.pylintrc_file:
args_list.append('--rcfile={0}'.format(self.pylintrc_file))
result = lint.Run(args_list, reporter=reporter, do_exit=False)
errors = result.linter.reporter.data
for error in errors:
reported_errors.append(
error.format(self._msg_format)
)
if reported_errors:
raise PyLintException('\n'.join(reported_errors))
def repr_failure(self, excinfo): # pylint: disable=arguments-differ
"""Handle any test failures by checkint that they were ours."""
if excinfo.errisinstance(PyLintException):
return excinfo.value.args[0]
return super(PyLintItem, self).repr_failure(excinfo)
def reportinfo(self):
"""Generate our test report"""
return self.fspath, None, '[pylint] {0}'.format(self.rel_path)
def _get_vcs_root(path):
"""Returns the vcs module and the root of the repo.
Returns:
A tuple containing the vcs module to use (svn, git) and the root of the
repository. If repository is unidentified, then (None, None) is returned.
"""
for vcs in SCM_LIST:
repo_root = vcs.repository_root(path)
if repo_root:
return vcs, repo_root
return (None, None)
| """Pylint plugin for py.test"""
from os import sep
from os.path import dirname
from os.path import exists
from os.path import join
import re
from six.moves.configparser import ( # pylint: disable=import-error
ConfigParser,
NoSectionError,
NoOptionError
)
from pylint import lint
from pylint.config import PYLINTRC
from pylint.interfaces import IReporter
from pylint.reporters import BaseReporter
import pytest
import svn
SCM_LIST = [svn]
class PyLintException(Exception):
"""Exception to raise if a file has a specified pylint error"""
class ProgrammaticReporter(BaseReporter):
"""Reporter that replaces output with storage in list of dictionaries"""
__implements__ = IReporter
name = 'pylint-vcs-reporter'
extension = 'prog'
def __init__(self, output=None):
BaseReporter.__init__(self, output)
self.data = []
def handle_message(self, msg):
"""Get message and append to our data structure"""
self.data.append(msg)
def _display(self, layout):
"""launch layouts display"""
def get_rel_path(path, parent_path):
"""
Give the path to object relative to ``parent_path``.
"""
replaced_path = path.replace(parent_path, '', 1)
if replaced_path[0] == sep:
rel_path = replaced_path[1:]
else:
rel_path = replaced_path
return rel_path
def pytest_addoption(parser):
"""Add plugin command line options to pytest command line options"""
group = parser.getgroup("general")
group.addoption(
"--pylint",
action='store_true', default=False,
help="run pylint on all python files"
)
group.addoption(
"--pylint-vcs",
action='store_true', default=False,
help="run pylint only on python files changed in current rev. \
If not SCM working copy detected it fallbacks to --pylint option"
)
group.addoption(
'--no-pylint',
action="store_true", default=False,
help='disable running pylint'
)
group.addoption(
'--pylint-no-vcs',
action="store_true", default=False,
help='Disable vcs files linting mode. Note: this option does not turn off pylint'
)
group.addoption(
'--pylint-rcfile',
default=None,
help='Location of RC file if not pylintrc'
)
def pytest_sessionstart(session):
"""Storing pylint settings on the session"""
config = session.config
terminal_reporter = config.pluginmanager.get_plugin('terminalreporter')
capture_manager = config.pluginmanager.get_plugin('capturemanager')
session.pylint_enabled = config.option.pylint or config.option.pylint_vcs and not config.option.no_pylint
if session.pylint_enabled:
session.pylint_config = None
session.pylintrc_file = None
session.pylint_ignore = []
session.pylint_ignore_patterns = []
session.pylint_msg_template = None
if config.option.pylint_vcs:
if not config.option.pylint_no_vcs:
scm, scm_root = _get_vcs_root(str(config.rootdir))
if scm:
session.pylint_vcs_enabled = True
session.pylint_vcs_changed_filepaths = scm.get_mod_files(scm_root)
with capture_manager.global_and_fixture_disabled():
terminal_reporter.write('VCS working copy detected. VCS linting mode enabled\n')
else:
with capture_manager.global_and_fixture_disabled():
terminal_reporter.write(
'No VCS working copy detected. VCS linting mode disabled: linting all the files\n')
# Find pylintrc to check ignore list
pylintrc_file = config.option.pylint_rcfile or PYLINTRC
if pylintrc_file and not exists(pylintrc_file):
# The directory of pytest.ini got a chance
pylintrc_file = join(dirname(str(config.inifile)), pylintrc_file)
if pylintrc_file and exists(pylintrc_file):
session.pylintrc_file = pylintrc_file
session.pylint_config = ConfigParser()
session.pylint_config.read(pylintrc_file)
try:
ignore_string = session.pylint_config.get('MASTER', 'ignore')
if ignore_string:
session.pylint_ignore = ignore_string.split(',')
except (NoSectionError, NoOptionError):
pass
try:
session.pylint_ignore_patterns = session.pylint_config.get(
'MASTER', 'ignore-patterns')
except (NoSectionError, NoOptionError):
pass
try:
session.pylint_msg_template = session.pylint_config.get(
'REPORTS', 'msg-template'
)
except (NoSectionError, NoOptionError):
pass
def pytest_report_header(config, startdir):
"""Add the message_ix import path to the pytest report header."""
if 'pylint_no_vcs' in config.option:
return 'VCS linting mode set to disabled'
return None
def include_file(path, ignore_list, ignore_patterns=None):
"""Checks if a file should be included in the collection."""
if ignore_patterns:
for pattern in ignore_patterns:
if re.match(pattern, path):
return False
parts = path.split(sep)
return not set(parts) & set(ignore_list)
def pytest_collect_file(path, parent):
"""Collect files on which pylint should run"""
item = None
if not parent.session.pylint_enabled:
return None
if path.ext != '.py':
return None
if getattr(parent.session, 'pylint_vcs_enabled', False):
if str(path) in parent.session.pylint_vcs_changed_filepaths:
item = PyLintItem(path, parent)
else:
rel_path = get_rel_path(str(path), str(parent.session.fspath))
session = parent.session
if session.pylint_config is None:
item = PyLintItem(path, parent)
elif include_file(rel_path, session.pylint_ignore, session.pylint_ignore_patterns):
item = PyLintItem(path, parent, session.pylint_msg_template, session.pylintrc_file)
return item
class PyLintItem(pytest.Item, pytest.File):
"""pylint test running class."""
# pylint doesn't deal well with dynamic modules and there isn't an
# astng plugin for pylint in pypi yet, so we'll have to disable
# the checks.
# pylint: disable=no-member,abstract-method
def __init__(self, fspath, parent, msg_format=None, pylintrc_file=None):
super(PyLintItem, self).__init__(fspath, parent)
self.add_marker('pylint')
self._nodeid = self.nodeid + '[pylint]'
self.rel_path = get_rel_path(
fspath.strpath,
parent.session.fspath.strpath
)
if msg_format is None:
self._msg_format = '{C}:{line:3d},{column:2d}: {msg} ({symbol})'
else:
self._msg_format = msg_format
self.pylintrc_file = pylintrc_file
def runtest(self):
"""Check the pylint messages to see if any errors were reported."""
reported_errors = []
reporter = ProgrammaticReporter()
args_list = [self.fspath.strpath]
if self.pylintrc_file:
args_list.append('--rcfile={0}'.format(self.pylintrc_file))
result = lint.Run(args_list, reporter=reporter, do_exit=False)
errors = result.linter.reporter.data
for error in errors:
reported_errors.append(
error.format(self._msg_format)
)
if reported_errors:
raise PyLintException('\n'.join(reported_errors))
def repr_failure(self, excinfo): # pylint: disable=arguments-differ
"""Handle any test failures by checkint that they were ours."""
if excinfo.errisinstance(PyLintException):
return excinfo.value.args[0]
return super(PyLintItem, self).repr_failure(excinfo)
def reportinfo(self):
"""Generate our test report"""
return self.fspath, None, '[pylint] {0}'.format(self.rel_path)
def _get_vcs_root(path):
"""Returns the vcs module and the root of the repo.
Returns:
A tuple containing the vcs module to use (svn, git) and the root of the
repository. If repository is unidentified, then (None, None) is returned.
"""
for vcs in SCM_LIST:
repo_root = vcs.repository_root(path)
if repo_root:
return vcs, repo_root
return (None, None)
| en | 0.813901 | Pylint plugin for py.test # pylint: disable=import-error Exception to raise if a file has a specified pylint error Reporter that replaces output with storage in list of dictionaries Get message and append to our data structure launch layouts display Give the path to object relative to ``parent_path``. Add plugin command line options to pytest command line options Storing pylint settings on the session # Find pylintrc to check ignore list # The directory of pytest.ini got a chance Add the message_ix import path to the pytest report header. Checks if a file should be included in the collection. Collect files on which pylint should run pylint test running class. # pylint doesn't deal well with dynamic modules and there isn't an # astng plugin for pylint in pypi yet, so we'll have to disable # the checks. # pylint: disable=no-member,abstract-method Check the pylint messages to see if any errors were reported. # pylint: disable=arguments-differ Handle any test failures by checkint that they were ours. Generate our test report Returns the vcs module and the root of the repo. Returns: A tuple containing the vcs module to use (svn, git) and the root of the repository. If repository is unidentified, then (None, None) is returned. | 2.31243 | 2 |
src/sentry/plugins2/__init__.py | withrocks/commonlims | 4 | 6613592 | from __future__ import absolute_import
# TODO: Refactor
# This module should actually be merged with the plugin
# module. However, it imports a ton of django stuff which leads to
# an error with apps not being registered yet. So for the POC we'll keep the decorators here
class Container(object):
pass
# TODO: Use the Django model directly? If so, figure out how to automatically setup django
# for testing purposes from a plugin
# class Sample(object):
# def __init__(self, sample_name, sample_type, concentration, volume, custom_fields):
# self.sample_name = sample_name
# self.sample_type = sample_type
# self.concentration = concentration
# self.volume = volume
# self.custom_fields = custom_fields
# def __repr__(self):
# return self.sample_name
class SampleService():
def __init__(self, namespace):
self.containers = list()
self.samples = list()
self.namespace = namespace
def add(self, sample):
raise NotImplementedError()
def new_sample(self, sample_name, sample_type, concentration, volume, **kwargs):
"""Creates a Sample object with the specified default parameters and any domain specific
parameters in kwargs. The domain specific arguments will be registered per the calling plugin,
which will automatically add a namespace to the keys
"""
raise NotImplementedError()
class App(object):
"""An interface for plugins that need to communicate back to the app"""
def __init__(self, namespace):
self.samples = SampleService(namespace)
class FileHandlersRegistry(object):
def __init__(self):
self.handlers = set()
def register(self, fn):
self.handlers.add(fn)
def handle_file_uploaded(self, file_like):
raise NotImplementedError()
for handler in self.handlers:
if type(handler) == type:
obj = handler()
if not hasattr(obj, "handle"):
raise HandlerNotDefinedException(
"A handler must contain a method called `handle`")
handler = obj.handle
handler(file_like, App(handler.__module__))
# TODO: Look into if we can reuse something from sentry instead
file_handlers_registry = FileHandlersRegistry()
class HandlerNotDefinedException(Exception):
pass
| from __future__ import absolute_import
# TODO: Refactor
# This module should actually be merged with the plugin
# module. However, it imports a ton of django stuff which leads to
# an error with apps not being registered yet. So for the POC we'll keep the decorators here
class Container(object):
pass
# TODO: Use the Django model directly? If so, figure out how to automatically setup django
# for testing purposes from a plugin
# class Sample(object):
# def __init__(self, sample_name, sample_type, concentration, volume, custom_fields):
# self.sample_name = sample_name
# self.sample_type = sample_type
# self.concentration = concentration
# self.volume = volume
# self.custom_fields = custom_fields
# def __repr__(self):
# return self.sample_name
class SampleService():
def __init__(self, namespace):
self.containers = list()
self.samples = list()
self.namespace = namespace
def add(self, sample):
raise NotImplementedError()
def new_sample(self, sample_name, sample_type, concentration, volume, **kwargs):
"""Creates a Sample object with the specified default parameters and any domain specific
parameters in kwargs. The domain specific arguments will be registered per the calling plugin,
which will automatically add a namespace to the keys
"""
raise NotImplementedError()
class App(object):
"""An interface for plugins that need to communicate back to the app"""
def __init__(self, namespace):
self.samples = SampleService(namespace)
class FileHandlersRegistry(object):
def __init__(self):
self.handlers = set()
def register(self, fn):
self.handlers.add(fn)
def handle_file_uploaded(self, file_like):
raise NotImplementedError()
for handler in self.handlers:
if type(handler) == type:
obj = handler()
if not hasattr(obj, "handle"):
raise HandlerNotDefinedException(
"A handler must contain a method called `handle`")
handler = obj.handle
handler(file_like, App(handler.__module__))
# TODO: Look into if we can reuse something from sentry instead
file_handlers_registry = FileHandlersRegistry()
class HandlerNotDefinedException(Exception):
pass
| en | 0.667687 | # TODO: Refactor # This module should actually be merged with the plugin # module. However, it imports a ton of django stuff which leads to # an error with apps not being registered yet. So for the POC we'll keep the decorators here # TODO: Use the Django model directly? If so, figure out how to automatically setup django # for testing purposes from a plugin # class Sample(object): # def __init__(self, sample_name, sample_type, concentration, volume, custom_fields): # self.sample_name = sample_name # self.sample_type = sample_type # self.concentration = concentration # self.volume = volume # self.custom_fields = custom_fields # def __repr__(self): # return self.sample_name Creates a Sample object with the specified default parameters and any domain specific parameters in kwargs. The domain specific arguments will be registered per the calling plugin, which will automatically add a namespace to the keys An interface for plugins that need to communicate back to the app # TODO: Look into if we can reuse something from sentry instead | 2.315353 | 2 |
official/cv/brdnet/modelarts/start_train.py | leelige/mindspore | 0 | 6613593 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''training script for modelarts'''
import os
import glob
import datetime
import argparse
import moxing as mox
import numpy as np
import PIL.Image as Image
import mindspore
import mindspore.nn as nn
from mindspore import context, export
from mindspore.train import Model
from mindspore.common import set_seed
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore.context import ParallelMode
from mindspore.common.tensor import Tensor
from mindspore.train.callback import TimeMonitor, LossMonitor
from mindspore import load_checkpoint, load_param_into_net
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint
from mindspore.communication.management import init, get_rank, get_group_size
from src.logger import get_logger
from src.dataset import create_BRDNetDataset
from src.models import BRDNet, BRDWithLossCell, TrainingWrapper
## Params
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=32, type=int, help='batch size')
parser.add_argument('--train_data', default='../dataset/waterloo5050step40colorimage/'
, type=str, help='path of train data')
parser.add_argument('--test_dir', default='./Test/Kodak24/'
, type=str, help='directory of test dataset')
parser.add_argument('--sigma', default=15, type=int, help='noise level')
parser.add_argument('--channel', default=3, type=int
, help='image channel, 3 for color, 1 for gray')
parser.add_argument('--epoch', default=50, type=int, help='number of train epoches')
parser.add_argument('--lr', default=1e-3, type=float, help='initial learning rate for Adam')
parser.add_argument('--save_every', default=1, type=int, help='save model at every x epoches')
parser.add_argument('--resume_path', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--resume_name', type=str, default=None,
help='resuming file name')
parser.add_argument("--image_height", type=int, default=500, help="Image height for exporting model.")
parser.add_argument("--image_width", type=int, default=500, help="Image width for exporting model.")
parser.add_argument('--train_url', type=str, default='train_url/'
, help='needed by modelarts, but we donot use it because the name is ambiguous')
parser.add_argument('--data_url', type=str, default='data_url/'
, help='needed by modelarts, but we donot use it because the name is ambiguous')
parser.add_argument('--output_path', type=str, default='./output/'
, help='output_path,when use_modelarts is set True, it will be cache/output/')
parser.add_argument('--outer_path', type=str, default='s3://output/'
, help='obs path,to store e.g ckpt files ')
parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR"\
, help="file format")
parser.add_argument('--device_target', type=str, default='Ascend'
, help='device where the code will be implemented. (Default: Ascend)')
parser.add_argument('--is_distributed', type=int, default=0, help='if multi device')
parser.add_argument('--rank', type=int, default=0, help='local rank of distributed')
parser.add_argument('--group_size', type=int, default=1, help='world size of distributed')
parser.add_argument('--is_save_on_master', type=int, default=1, help='save ckpt on master or all rank')
parser.add_argument('--ckpt_save_max', type=int, default=20
, help='Maximum number of checkpoint files can be saved. Default: 20.')
set_seed(1)
args = parser.parse_args()
save_dir = os.path.join(args.output_path, 'sigma_' + str(args.sigma) \
+ '_' + datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
def get_lr(steps_per_epoch, max_epoch, init_lr):
lr_each_step = []
while max_epoch > 0:
tem = min(30, max_epoch)
for _ in range(steps_per_epoch*tem):
lr_each_step.append(init_lr)
max_epoch -= tem
init_lr /= 10
return lr_each_step
device_id = int(os.getenv('DEVICE_ID', '0'))
context.set_context(mode=context.GRAPH_MODE, enable_auto_mixed_precision=True,
device_target=args.device_target, save_graphs=False)
def copy_data_from_obs():
args.logger.info("copying train data from obs to cache....")
mox.file.copy_parallel(args.train_data, 'cache/dataset')
args.logger.info("copying train data finished....")
args.train_data = 'cache/dataset/'
# resume checkpoint if needed
if args.resume_path:
args.logger.info("copying resume checkpoint from obs to cache....")
mox.file.copy_parallel(args.resume_path, 'cache/resume_path')
args.logger.info("copying resume checkpoint finished....")
args.resume_path = 'cache/resume_path/'
args.logger.info("copying test data from obs to cache....")
mox.file.copy_parallel(args.test_dir, 'cache/test')
args.logger.info("copying test data finished....")
args.test_dir = 'cache/test/'
def copy_data_to_obs():
args.logger.info("copying files from cache to obs....")
mox.file.copy_parallel(save_dir, args.outer_path)
args.logger.info("copying finished....")
def check_best_model():
ckpt_list = glob.glob(os.path.join(save_dir, 'ckpt_' + str(args.rank) + '/*.ckpt'))
model = BRDNet(args.channel)
transpose = P.Transpose()
expand_dims = P.ExpandDims()
compare_psnr = nn.PSNR()
compare_ssim = nn.SSIM()
best_psnr = 0.
args.best_ckpt = ""
for ckpt in sorted(ckpt_list):
args.logger.info("testing ckpt: " + str(ckpt))
load_param_into_net(model, load_checkpoint(ckpt))
psnr = [] #after denoise
ssim = [] #after denoise
file_list = glob.glob(os.path.join(args.test_dir, "*"))
model.set_train(False)
for file in file_list:
# read image
if args.channel == 3:
img_clean = np.array(Image.open(file), dtype='float32') / 255.0
else:
img_clean = np.expand_dims(np.array(Image.open(file).convert('L'), \
dtype='float32') / 255.0, axis=2)
np.random.seed(0) #obtain the same random data when it is in the test phase
img_test = img_clean + np.random.normal(0, args.sigma/255.0, img_clean.shape)
img_clean = Tensor(img_clean, mindspore.float32) #HWC
img_test = Tensor(img_test, mindspore.float32) #HWC
# predict
img_clean = expand_dims(transpose(img_clean, (2, 0, 1)), 0)#NCHW
img_test = expand_dims(transpose(img_test, (2, 0, 1)), 0)#NCHW
y_predict = model(img_test) #NCHW
# calculate numeric metrics
img_out = C.clip_by_value(y_predict, 0, 1)
psnr_denoised = compare_psnr(img_clean, img_out)
ssim_denoised = compare_ssim(img_clean, img_out)
psnr.append(psnr_denoised.asnumpy()[0])
ssim.append(ssim_denoised.asnumpy()[0])
psnr_avg = sum(psnr)/len(psnr)
ssim_avg = sum(ssim)/len(ssim)
if psnr_avg > best_psnr:
best_psnr = psnr_avg
args.best_ckpt = ckpt
args.logger.info("new best ckpt: " + str(ckpt) + ", psnr: " +\
str(psnr_avg) + ", ssim: " + str(ssim_avg))
def export_models():
args.logger.info("exporting best model....")
net = BRDNet(args.channel)
load_param_into_net(net, load_checkpoint(args.best_ckpt))
input_arr = Tensor(np.zeros([1, args.channel, \
args.image_height, args.image_width]), mindspore.float32)
export(net, input_arr, file_name=os.path.join(save_dir, "best_ckpt"), \
file_format=args.file_format)
args.logger.info("export best model finished....")
def train():
dataset, args.steps_per_epoch = create_BRDNetDataset(args.train_data, args.sigma, \
args.channel, args.batch_size, args.group_size, args.rank, shuffle=True)
model = BRDNet(args.channel)
# resume checkpoint if needed
if args.resume_path:
args.resume_path = os.path.join(args.resume_path, args.resume_name)
args.logger.info('loading resume checkpoint {} into network'.format(args.resume_path))
load_param_into_net(model, load_checkpoint(args.resume_path))
args.logger.info('loaded resume checkpoint {} into network'.format(args.resume_path))
model = BRDWithLossCell(model)
model.set_train()
lr_list = get_lr(args.steps_per_epoch, args.epoch, args.lr)
optimizer = nn.Adam(params=model.trainable_params(), learning_rate=Tensor(lr_list, mindspore.float32))
model = TrainingWrapper(model, optimizer)
model = Model(model)
# define callbacks
if args.rank == 0:
time_cb = TimeMonitor(data_size=args.steps_per_epoch)
loss_cb = LossMonitor(per_print_times=10)
callbacks = [time_cb, loss_cb]
else:
callbacks = []
if args.rank_save_ckpt_flag:
ckpt_config = CheckpointConfig(save_checkpoint_steps=args.steps_per_epoch*args.save_every,
keep_checkpoint_max=args.ckpt_save_max)
save_ckpt_path = os.path.join(save_dir, 'ckpt_' + str(args.rank) + '/')
ckpt_cb = ModelCheckpoint(config=ckpt_config,
directory=save_ckpt_path,
prefix='channel_'+str(args.channel)+'_sigma_'+str(args.sigma)+'_rank_'+str(args.rank))
callbacks.append(ckpt_cb)
model.train(args.epoch, dataset, callbacks=callbacks, dataset_sink_mode=True)
args.logger.info("training finished....")
if __name__ == '__main__':
if args.is_distributed:
assert args.device_target == "Ascend"
init()
context.set_context(device_id=device_id)
args.rank = get_rank()
args.group_size = get_group_size()
device_num = args.group_size
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL)
else:
if args.device_target == "Ascend":
context.set_context(device_id=device_id)
# select for master rank save ckpt or all rank save, compatible for model parallel
args.rank_save_ckpt_flag = 0
if args.is_save_on_master:
if args.rank == 0:
args.rank_save_ckpt_flag = 1
else:
args.rank_save_ckpt_flag = 1
args.logger = get_logger(save_dir, "BRDNet", args.rank)
args.logger.save_args(args)
print('Starting training, Total Epochs: %d' % (args.epoch))
copy_data_from_obs()
train()
if args.rank_save_ckpt_flag:
check_best_model()
export_models()
copy_data_to_obs()
args.logger.info('All task finished!')
| # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''training script for modelarts'''
import os
import glob
import datetime
import argparse
import moxing as mox
import numpy as np
import PIL.Image as Image
import mindspore
import mindspore.nn as nn
from mindspore import context, export
from mindspore.train import Model
from mindspore.common import set_seed
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore.context import ParallelMode
from mindspore.common.tensor import Tensor
from mindspore.train.callback import TimeMonitor, LossMonitor
from mindspore import load_checkpoint, load_param_into_net
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint
from mindspore.communication.management import init, get_rank, get_group_size
from src.logger import get_logger
from src.dataset import create_BRDNetDataset
from src.models import BRDNet, BRDWithLossCell, TrainingWrapper
## Params
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=32, type=int, help='batch size')
parser.add_argument('--train_data', default='../dataset/waterloo5050step40colorimage/'
, type=str, help='path of train data')
parser.add_argument('--test_dir', default='./Test/Kodak24/'
, type=str, help='directory of test dataset')
parser.add_argument('--sigma', default=15, type=int, help='noise level')
parser.add_argument('--channel', default=3, type=int
, help='image channel, 3 for color, 1 for gray')
parser.add_argument('--epoch', default=50, type=int, help='number of train epoches')
parser.add_argument('--lr', default=1e-3, type=float, help='initial learning rate for Adam')
parser.add_argument('--save_every', default=1, type=int, help='save model at every x epoches')
parser.add_argument('--resume_path', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--resume_name', type=str, default=None,
help='resuming file name')
parser.add_argument("--image_height", type=int, default=500, help="Image height for exporting model.")
parser.add_argument("--image_width", type=int, default=500, help="Image width for exporting model.")
parser.add_argument('--train_url', type=str, default='train_url/'
, help='needed by modelarts, but we donot use it because the name is ambiguous')
parser.add_argument('--data_url', type=str, default='data_url/'
, help='needed by modelarts, but we donot use it because the name is ambiguous')
parser.add_argument('--output_path', type=str, default='./output/'
, help='output_path,when use_modelarts is set True, it will be cache/output/')
parser.add_argument('--outer_path', type=str, default='s3://output/'
, help='obs path,to store e.g ckpt files ')
parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR"\
, help="file format")
parser.add_argument('--device_target', type=str, default='Ascend'
, help='device where the code will be implemented. (Default: Ascend)')
parser.add_argument('--is_distributed', type=int, default=0, help='if multi device')
parser.add_argument('--rank', type=int, default=0, help='local rank of distributed')
parser.add_argument('--group_size', type=int, default=1, help='world size of distributed')
parser.add_argument('--is_save_on_master', type=int, default=1, help='save ckpt on master or all rank')
parser.add_argument('--ckpt_save_max', type=int, default=20
, help='Maximum number of checkpoint files can be saved. Default: 20.')
set_seed(1)
args = parser.parse_args()
save_dir = os.path.join(args.output_path, 'sigma_' + str(args.sigma) \
+ '_' + datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
def get_lr(steps_per_epoch, max_epoch, init_lr):
lr_each_step = []
while max_epoch > 0:
tem = min(30, max_epoch)
for _ in range(steps_per_epoch*tem):
lr_each_step.append(init_lr)
max_epoch -= tem
init_lr /= 10
return lr_each_step
device_id = int(os.getenv('DEVICE_ID', '0'))
context.set_context(mode=context.GRAPH_MODE, enable_auto_mixed_precision=True,
device_target=args.device_target, save_graphs=False)
def copy_data_from_obs():
args.logger.info("copying train data from obs to cache....")
mox.file.copy_parallel(args.train_data, 'cache/dataset')
args.logger.info("copying train data finished....")
args.train_data = 'cache/dataset/'
# resume checkpoint if needed
if args.resume_path:
args.logger.info("copying resume checkpoint from obs to cache....")
mox.file.copy_parallel(args.resume_path, 'cache/resume_path')
args.logger.info("copying resume checkpoint finished....")
args.resume_path = 'cache/resume_path/'
args.logger.info("copying test data from obs to cache....")
mox.file.copy_parallel(args.test_dir, 'cache/test')
args.logger.info("copying test data finished....")
args.test_dir = 'cache/test/'
def copy_data_to_obs():
args.logger.info("copying files from cache to obs....")
mox.file.copy_parallel(save_dir, args.outer_path)
args.logger.info("copying finished....")
def check_best_model():
ckpt_list = glob.glob(os.path.join(save_dir, 'ckpt_' + str(args.rank) + '/*.ckpt'))
model = BRDNet(args.channel)
transpose = P.Transpose()
expand_dims = P.ExpandDims()
compare_psnr = nn.PSNR()
compare_ssim = nn.SSIM()
best_psnr = 0.
args.best_ckpt = ""
for ckpt in sorted(ckpt_list):
args.logger.info("testing ckpt: " + str(ckpt))
load_param_into_net(model, load_checkpoint(ckpt))
psnr = [] #after denoise
ssim = [] #after denoise
file_list = glob.glob(os.path.join(args.test_dir, "*"))
model.set_train(False)
for file in file_list:
# read image
if args.channel == 3:
img_clean = np.array(Image.open(file), dtype='float32') / 255.0
else:
img_clean = np.expand_dims(np.array(Image.open(file).convert('L'), \
dtype='float32') / 255.0, axis=2)
np.random.seed(0) #obtain the same random data when it is in the test phase
img_test = img_clean + np.random.normal(0, args.sigma/255.0, img_clean.shape)
img_clean = Tensor(img_clean, mindspore.float32) #HWC
img_test = Tensor(img_test, mindspore.float32) #HWC
# predict
img_clean = expand_dims(transpose(img_clean, (2, 0, 1)), 0)#NCHW
img_test = expand_dims(transpose(img_test, (2, 0, 1)), 0)#NCHW
y_predict = model(img_test) #NCHW
# calculate numeric metrics
img_out = C.clip_by_value(y_predict, 0, 1)
psnr_denoised = compare_psnr(img_clean, img_out)
ssim_denoised = compare_ssim(img_clean, img_out)
psnr.append(psnr_denoised.asnumpy()[0])
ssim.append(ssim_denoised.asnumpy()[0])
psnr_avg = sum(psnr)/len(psnr)
ssim_avg = sum(ssim)/len(ssim)
if psnr_avg > best_psnr:
best_psnr = psnr_avg
args.best_ckpt = ckpt
args.logger.info("new best ckpt: " + str(ckpt) + ", psnr: " +\
str(psnr_avg) + ", ssim: " + str(ssim_avg))
def export_models():
args.logger.info("exporting best model....")
net = BRDNet(args.channel)
load_param_into_net(net, load_checkpoint(args.best_ckpt))
input_arr = Tensor(np.zeros([1, args.channel, \
args.image_height, args.image_width]), mindspore.float32)
export(net, input_arr, file_name=os.path.join(save_dir, "best_ckpt"), \
file_format=args.file_format)
args.logger.info("export best model finished....")
def train():
dataset, args.steps_per_epoch = create_BRDNetDataset(args.train_data, args.sigma, \
args.channel, args.batch_size, args.group_size, args.rank, shuffle=True)
model = BRDNet(args.channel)
# resume checkpoint if needed
if args.resume_path:
args.resume_path = os.path.join(args.resume_path, args.resume_name)
args.logger.info('loading resume checkpoint {} into network'.format(args.resume_path))
load_param_into_net(model, load_checkpoint(args.resume_path))
args.logger.info('loaded resume checkpoint {} into network'.format(args.resume_path))
model = BRDWithLossCell(model)
model.set_train()
lr_list = get_lr(args.steps_per_epoch, args.epoch, args.lr)
optimizer = nn.Adam(params=model.trainable_params(), learning_rate=Tensor(lr_list, mindspore.float32))
model = TrainingWrapper(model, optimizer)
model = Model(model)
# define callbacks
if args.rank == 0:
time_cb = TimeMonitor(data_size=args.steps_per_epoch)
loss_cb = LossMonitor(per_print_times=10)
callbacks = [time_cb, loss_cb]
else:
callbacks = []
if args.rank_save_ckpt_flag:
ckpt_config = CheckpointConfig(save_checkpoint_steps=args.steps_per_epoch*args.save_every,
keep_checkpoint_max=args.ckpt_save_max)
save_ckpt_path = os.path.join(save_dir, 'ckpt_' + str(args.rank) + '/')
ckpt_cb = ModelCheckpoint(config=ckpt_config,
directory=save_ckpt_path,
prefix='channel_'+str(args.channel)+'_sigma_'+str(args.sigma)+'_rank_'+str(args.rank))
callbacks.append(ckpt_cb)
model.train(args.epoch, dataset, callbacks=callbacks, dataset_sink_mode=True)
args.logger.info("training finished....")
if __name__ == '__main__':
if args.is_distributed:
assert args.device_target == "Ascend"
init()
context.set_context(device_id=device_id)
args.rank = get_rank()
args.group_size = get_group_size()
device_num = args.group_size
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL)
else:
if args.device_target == "Ascend":
context.set_context(device_id=device_id)
# select for master rank save ckpt or all rank save, compatible for model parallel
args.rank_save_ckpt_flag = 0
if args.is_save_on_master:
if args.rank == 0:
args.rank_save_ckpt_flag = 1
else:
args.rank_save_ckpt_flag = 1
args.logger = get_logger(save_dir, "BRDNet", args.rank)
args.logger.save_args(args)
print('Starting training, Total Epochs: %d' % (args.epoch))
copy_data_from_obs()
train()
if args.rank_save_ckpt_flag:
check_best_model()
export_models()
copy_data_to_obs()
args.logger.info('All task finished!')
| fr | 0.166036 | # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ training script for modelarts ## Params # resume checkpoint if needed #after denoise #after denoise # read image #obtain the same random data when it is in the test phase #HWC #HWC # predict #NCHW #NCHW #NCHW # calculate numeric metrics # resume checkpoint if needed # define callbacks # select for master rank save ckpt or all rank save, compatible for model parallel | 1.628152 | 2 |
multiped/kinematics4.py | MultipedRobotics/quadruped | 14 | 6613594 | ##############################################
# The MIT License (MIT)
# Copyright (c) 2016 <NAME>
# see LICENSE for full details
##############################################
from __future__ import print_function
from __future__ import division
from math import sin, cos, acos, atan2, sqrt, pi
from math import radians as d2r
# from math import degrees as r2d
# import logging
# from quadruped.Servo import Servo
# logging.basicConfig(level=logging.DEBUG)
# logging.basicConfig(level=logging.ERROR)
# from collections import namedtuple
#
# Link = namedtuple('Link', 'length offset')
# Leg4Info = namedtuple('Leg4Info', 'coxa femur tibia fibia tarsus')
# def ramp(val, length):
# """
# Simple triangle for scaling speed. it always returns 0.5 at the lowest
# and is 1.0 at max in the middle
#
# in: val - step
# length - number of steps
# out: 0.5 - 1.0
# """
# val = val % length
# # print("ramp: {} {} {}".format(val, length/2, length))
# slope = 0.5/(length/2)
# if val > length/2:
# # since it is symetric, just mirror the answer
# val = (length - val)
# return slope*val + 0.5
class KinematicsException(Exception):
pass
class Kinematics4(object):
"""
Leg class outputs the servo angles for some requested foot location (x,y,z)
Leg knows:
- leg dimensions
- number of servos and their parameters/limits
- fk/ik equations
- sit/stand sequence
"""
# these are fixed by the 3D printing, not changing
coxaLength = None
tibiaLength = None
femurLength = None
tarsusLength = None
# positions = {
# 'stand': None,
# # 'sit': None,
# # 'neutral': None
# }
def __init__(self, params):
"""
Each leg has 4 servos/channels
"""
# # setup kinematics and servos
# self.servos = []
# for ID, seg in enumerate(['coxa', 'femur', 'tibia', 'tarsus']):
# self.servos.append(Servo(ID, params[seg][1], params[seg][2]))
self.coxaLength = params['coxa'][0]
self.femurLength = params['femur'][0]
self.tibiaLength = params['tibia'][0]
self.tarsusLength = params['tarsus'][0]
# if 'stand' in params:
# self.positions['neutral'] = self.forward(*params['stand'])
# else:
# raise Exception('Need to have "stand" angles in params file')
def __del__(self):
pass
def forward(self, t1, t2, t3, t4, degrees=True):
"""
Forward kinematics of the leg, note, default angles are all degrees.
The input angles are referenced to the DH frame arrangement.
"""
l1 = self.coxaLength
l2 = self.femurLength
l3 = self.tibiaLength
l4 = self.tarsusLength
if degrees:
t1 = d2r(t1)
t2 = d2r(t2)
t3 = d2r(t3)
t4 = d2r(t4)
x = (l1 + l2*cos(t2) + l3*cos(t2 + t3) + l4*cos(t2 + t3 + t4))*cos(t1)
y = (l1 + l2*cos(t2) + l3*cos(t2 + t3) + l4*cos(t2 + t3 + t4))*sin(t1)
z = l2*sin(t2) + l3*sin(t2 + t3) + l4*sin(t2 + t3 + t4)
return (x, y, z,)
def inverse(self, x, y, z, o=90, degrees=True):
"""
Azimuth angle is between x and w and lies in the x-y plane
^ x
w |
\ |
l1 \ |
\ |
\|
<----------+ (z is out of the page - right hand rule)
y
Most of the robot arm move in the plane defined by w-z
^ z l3
| o-----o
| / \ l4
| / l2 E
| /
+--o-------------> w
l1
l1: coxa
l2: femur
l3: tibia
l4: tarsus
All joint angles returned are in degrees: (t1, t2, t3, t4)
"""
def cosinelaw(a, b, c):
# cosine law only used by this function
# cos(g) = (a^2+b^2-c^2)/2ab
try:
ans = acos((a**2+b**2-c**2)/(2*a*b))
except ValueError:
print("num: {}".format(a**2+b**2-c**2))
print("den: {}".format(2*a*b))
print("acos({})".format((a**2+b**2-c**2)/(2*a*b)))
raise
return ans
l1 = self.coxaLength
l2 = self.femurLength
l3 = self.tibiaLength
l4 = self.tarsusLength
t1 = atan2(y, x)
if degrees:
o = o*pi/180
try:
w = sqrt(x**2 + y**2) - l1
j4w = w + l4*cos(o)
j4z = z + l4*sin(o)
r = sqrt(j4w**2 + j4z**2)
g1 = atan2(j4z, j4w)
g2 = cosinelaw(l2, r, l3)
t2 = g1+g2
t3 = pi+cosinelaw(l2, l3, r)
j2w = l2*cos(t2)
j2z = l2*sin(t2)
c = sqrt((w-j2w)**2 + (z-j2z)**2)
t4 = pi+cosinelaw(l3, l4, c)
except Exception as e:
print('inverse({:.2f},{:.2f},{:.2f},{:.2f})'.format(x, y, z, o))
print('Error:', e)
raise
def check(t):
if t > 150*pi/180:
t -= 2*pi
elif t < -150*pi/180:
t += 2*pi
return t
# maxa = 150*pi/180
# t1 = t1 if t1 <= maxa else t1-2*pi
t1 = check(t1) # value?? check elsewhere?
t2 = check(t2)
t3 = check(t3)
t4 = check(t4)
if degrees:
t1 *= 180/pi
t2 *= 180/pi
t3 *= 180/pi
t4 *= 180/pi
return (t1, t2, t3, t4)
def generateDHAngles(self, footLoc, speed):
"""
This is a bulk process and takes all of the foot locations for an entire
sequence of a gait cycle. It handles all legs at once.
speed: this is the max movement speed
footLoc: locations of feet from gait
{ step0 step1 ...
0: [(x,y,z), (x,y,z), ...] # leg0
2: [(x,y,z), (x,y,z), ...] # leg2
...
}
return
{ step 0 step 1 ...
0: [(t1,t2,t3,t4,speed), (t1,t2,t3,t4,speed), ...] # leg0
2: [(t1,t2,t3,t4,speed), (t1,t2,t3,t4,speed), ...] # leg2
...
} where t=theta in DH space
"""
# get the keys and figure out some stuff
keys = list(footLoc.keys())
angles = {}
print("=[generateServoAngles2 speed servo[0-3]]===================")
for legNum in keys:
print("Leg[{}]-------------".format(legNum))
pos = footLoc[legNum] # grab foot positions for leg k
angles[legNum] = []
# calculate the inverse DH angles
for step, p in enumerate(pos):
s = self.inverse(*p) # s0,s1,s2,s3
scaled_speed = speed
angles[legNum].append(s + (scaled_speed,))
print(" {:2}: {} {:7.2f} {:7.2f} {:7.2f} {:7.2f}".format(step, scaled_speed, *s))
return angles
# def generateServoAngles(self, footLoc, speed):
# """
# This is a bulk process and takes all of the foot locations for an entire
# sequence of a gait cycle. It handles all legs at once.
#
# speed: this is the max movement speed
#
# footLoc: locations of feet from gait
# { step0 step1 ...
# 0: [(x,y,z), (x,y,z), ...] # leg0
# 2: [(x,y,z), (x,y,z), ...] # leg2
# ...
# }
#
# return
# { step 0 step 1 ...
# 0: [[(t1,s1),(t2,s2),(t3,s3),(t4,s4)], [(t1,s1),(t2,s2),(t3,s3),(t4,s4)], ...] # leg0
# 2: [[(t1,s1),(t2,s2),(t3,s3),(t4,s4)], [(t1,s1),(t2,s2),(t3,s3),(t4,s4)], ...] # leg2
# ...
# } where t=theta s=speed
# """
# # FIXME: fix this to handle N legs, right now it only does 4
#
# # get the keys and figure out some stuff
# keys = list(footLoc.keys())
# angles = {}
#
# for k in keys:
# pos = footLoc[k] # grab foot positions for leg k
# angles[k] = []
#
# # calculate the inverse DH angles
# numStep = len(pos)
# for step, p in enumerate(pos):
# s = self.inverse(*p) # s0,s1,s2,s3
# tmp = self.DH2Servo(s)
# # scaled_speed = int(speed*ramp(step, numStep))
# # if p[2] > -70: scaled_speed = speed
# # else: scaled_speed = int(0.6*speed)
# scaled_speed = speed
# tmp2 = [(x, scaled_speed) for x in tmp]
# angles[k].append(tmp2)
# # print("speed", speed)
# # print("tmp", tmp)
# # exit(0)
#
# return angles
# def generateServoAngles2(self, footLoc, speed):
# """
# This is a bulk process and takes all of the foot locations for an entire
# sequence of a gait cycle. It handles all legs at once.
#
# speed: this is the max movement speed
#
# footLoc: locations of feet from gait
# { step0 step1 ...
# 0: [(x,y,z), (x,y,z), ...] # leg0
# 2: [(x,y,z), (x,y,z), ...] # leg2
# ...
# }
#
# return
# { step 0 step 1 ...
# 0: [(t1,t2,t3,t4,speed), (t1,t2,t3,t4,speed), ...] # leg0
# 2: [(t1,t2,t3,t4,speed), (t1,t2,t3,t4,speed), ...] # leg2
# ...
# } where t=theta
# """
#
# # get the keys and figure out some stuff
# keys = list(footLoc.keys())
# angles = {}
#
# print("=[generateServoAngles2 speed servo[0-3]]===================")
# for legNum in keys:
# print("Leg[{}]-------------".format(legNum))
# pos = footLoc[legNum] # grab foot positions for leg k
# angles[legNum] = []
# # print('pos', pos)
#
# # calculate the inverse DH angles
# # numStep = len(pos)
# for step, p in enumerate(pos):
# # print(' {:2}: {:7.2f} {:7.2f} {:7.2f}'.format(i, *pt))
# # print('step: {} p: {}'.format(step, p))
# s = self.inverse(*p) # s0,s1,s2,s3
# tmp = self.DH2Servo(s)
# # scaled_speed = int(speed*ramp(step, numStep))
# # if p[2] > -70: scaled_speed = speed
# # else: scaled_speed = int(0.6*speed)
# # tmp2 = [(x, scaled_speed) for x in tmp]
# scaled_speed = speed
# angles[legNum].append(tmp + (scaled_speed,))
# # print("speed", speed)
# # print("tmp", tmp)
# # # exit(0)
# # print(" {:2}: {:7.2f} {:7.2f} {:7.2f} {:7.2f}".format(step, *tmp))
# print(" {:2}: {} {:7.2f} {:7.2f} {:7.2f} {:7.2f}".format(step, scaled_speed, *tmp))
#
# return angles
# def DH2Servo(self, angles):
# tmp = []
# for s, a in list(zip(self.servos, angles)):
# tmp.append(s.DH2Servo(a))
# return tuple(tmp)
def pprint(self, step):
print('*'*25)
for leg in step:
print(' DH: [{:.0f} {:.0f} {:.0f} {:.0f}]'.format(*leg))
# def getNeutralPos(self):
# return self.positions['neutral']
# def generateServoAngles_DH(self, angles):
# """
# This is a bulk process and takes all of the foot locations for an entire
# sequence of a gait cycle. It handles all legs at once.
#
# footLoc: locations of feet from gait
# { step0 step1 ...
# 0: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg0 DH space
# 2: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg2 DH space
# ...
# }
#
# return
# { step 0 step 1 ...
# 0: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg0 servo space
# 2: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg2 servo space
# ...
# }
# """
# # get the keys and figure out some stuff
# keys = list(footLoc.keys())
# angles = {}
#
# for k in keys:
# angles[k] = []
# # calculate the inverse DH angles
# for s in angles:
# # tmp = [0]*4
# # tmp[0] = self.servos[0].DH2Servo(s[0])
# # tmp[1] = self.servos[1].DH2Servo(s[1])
# # tmp[2] = self.servos[2].DH2Servo(s[2])
# # tmp[3] = self.servos[3].DH2Servo(s[3])
# tmp = self.DH2Servo(s)
# angles[k].append(tmp)
#
# return angles
# def generateServoAngles(self, footLoc):
# """
# This is a bulk process and takes all of the foot locations for an entire
# sequence of a gait cycle. It handles all legs at once.
#
# footLoc: locations of feet from gait
# { step0 step1 ...
# 0: [(x,y,z), (x,y,z), ...] # leg0
# 2: [(x,y,z), (x,y,z), ...] # leg2
# ...
# }
#
# return
# { step 0 step 1 ...
# 0: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg0
# 2: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg2
# ...
# }
# """
# # get the keys and figure out some stuff
# keys = list(footLoc.keys())
# angles = {}
#
# for k in keys:
# pos = footLoc[k] # grab foot positions for leg k
# angles[k] = []
#
# # calculate the inverse DH angles
# for p in pos:
# s = self.inverse(*p) # s0,s1,s2,s3
# tmp = self.DH2Servo(s)
# angles[k].append(tmp)
#
# return angles
| ##############################################
# The MIT License (MIT)
# Copyright (c) 2016 <NAME>
# see LICENSE for full details
##############################################
from __future__ import print_function
from __future__ import division
from math import sin, cos, acos, atan2, sqrt, pi
from math import radians as d2r
# from math import degrees as r2d
# import logging
# from quadruped.Servo import Servo
# logging.basicConfig(level=logging.DEBUG)
# logging.basicConfig(level=logging.ERROR)
# from collections import namedtuple
#
# Link = namedtuple('Link', 'length offset')
# Leg4Info = namedtuple('Leg4Info', 'coxa femur tibia fibia tarsus')
# def ramp(val, length):
# """
# Simple triangle for scaling speed. it always returns 0.5 at the lowest
# and is 1.0 at max in the middle
#
# in: val - step
# length - number of steps
# out: 0.5 - 1.0
# """
# val = val % length
# # print("ramp: {} {} {}".format(val, length/2, length))
# slope = 0.5/(length/2)
# if val > length/2:
# # since it is symetric, just mirror the answer
# val = (length - val)
# return slope*val + 0.5
class KinematicsException(Exception):
pass
class Kinematics4(object):
"""
Leg class outputs the servo angles for some requested foot location (x,y,z)
Leg knows:
- leg dimensions
- number of servos and their parameters/limits
- fk/ik equations
- sit/stand sequence
"""
# these are fixed by the 3D printing, not changing
coxaLength = None
tibiaLength = None
femurLength = None
tarsusLength = None
# positions = {
# 'stand': None,
# # 'sit': None,
# # 'neutral': None
# }
def __init__(self, params):
"""
Each leg has 4 servos/channels
"""
# # setup kinematics and servos
# self.servos = []
# for ID, seg in enumerate(['coxa', 'femur', 'tibia', 'tarsus']):
# self.servos.append(Servo(ID, params[seg][1], params[seg][2]))
self.coxaLength = params['coxa'][0]
self.femurLength = params['femur'][0]
self.tibiaLength = params['tibia'][0]
self.tarsusLength = params['tarsus'][0]
# if 'stand' in params:
# self.positions['neutral'] = self.forward(*params['stand'])
# else:
# raise Exception('Need to have "stand" angles in params file')
def __del__(self):
pass
def forward(self, t1, t2, t3, t4, degrees=True):
"""
Forward kinematics of the leg, note, default angles are all degrees.
The input angles are referenced to the DH frame arrangement.
"""
l1 = self.coxaLength
l2 = self.femurLength
l3 = self.tibiaLength
l4 = self.tarsusLength
if degrees:
t1 = d2r(t1)
t2 = d2r(t2)
t3 = d2r(t3)
t4 = d2r(t4)
x = (l1 + l2*cos(t2) + l3*cos(t2 + t3) + l4*cos(t2 + t3 + t4))*cos(t1)
y = (l1 + l2*cos(t2) + l3*cos(t2 + t3) + l4*cos(t2 + t3 + t4))*sin(t1)
z = l2*sin(t2) + l3*sin(t2 + t3) + l4*sin(t2 + t3 + t4)
return (x, y, z,)
def inverse(self, x, y, z, o=90, degrees=True):
"""
Azimuth angle is between x and w and lies in the x-y plane
^ x
w |
\ |
l1 \ |
\ |
\|
<----------+ (z is out of the page - right hand rule)
y
Most of the robot arm move in the plane defined by w-z
^ z l3
| o-----o
| / \ l4
| / l2 E
| /
+--o-------------> w
l1
l1: coxa
l2: femur
l3: tibia
l4: tarsus
All joint angles returned are in degrees: (t1, t2, t3, t4)
"""
def cosinelaw(a, b, c):
# cosine law only used by this function
# cos(g) = (a^2+b^2-c^2)/2ab
try:
ans = acos((a**2+b**2-c**2)/(2*a*b))
except ValueError:
print("num: {}".format(a**2+b**2-c**2))
print("den: {}".format(2*a*b))
print("acos({})".format((a**2+b**2-c**2)/(2*a*b)))
raise
return ans
l1 = self.coxaLength
l2 = self.femurLength
l3 = self.tibiaLength
l4 = self.tarsusLength
t1 = atan2(y, x)
if degrees:
o = o*pi/180
try:
w = sqrt(x**2 + y**2) - l1
j4w = w + l4*cos(o)
j4z = z + l4*sin(o)
r = sqrt(j4w**2 + j4z**2)
g1 = atan2(j4z, j4w)
g2 = cosinelaw(l2, r, l3)
t2 = g1+g2
t3 = pi+cosinelaw(l2, l3, r)
j2w = l2*cos(t2)
j2z = l2*sin(t2)
c = sqrt((w-j2w)**2 + (z-j2z)**2)
t4 = pi+cosinelaw(l3, l4, c)
except Exception as e:
print('inverse({:.2f},{:.2f},{:.2f},{:.2f})'.format(x, y, z, o))
print('Error:', e)
raise
def check(t):
if t > 150*pi/180:
t -= 2*pi
elif t < -150*pi/180:
t += 2*pi
return t
# maxa = 150*pi/180
# t1 = t1 if t1 <= maxa else t1-2*pi
t1 = check(t1) # value?? check elsewhere?
t2 = check(t2)
t3 = check(t3)
t4 = check(t4)
if degrees:
t1 *= 180/pi
t2 *= 180/pi
t3 *= 180/pi
t4 *= 180/pi
return (t1, t2, t3, t4)
def generateDHAngles(self, footLoc, speed):
"""
This is a bulk process and takes all of the foot locations for an entire
sequence of a gait cycle. It handles all legs at once.
speed: this is the max movement speed
footLoc: locations of feet from gait
{ step0 step1 ...
0: [(x,y,z), (x,y,z), ...] # leg0
2: [(x,y,z), (x,y,z), ...] # leg2
...
}
return
{ step 0 step 1 ...
0: [(t1,t2,t3,t4,speed), (t1,t2,t3,t4,speed), ...] # leg0
2: [(t1,t2,t3,t4,speed), (t1,t2,t3,t4,speed), ...] # leg2
...
} where t=theta in DH space
"""
# get the keys and figure out some stuff
keys = list(footLoc.keys())
angles = {}
print("=[generateServoAngles2 speed servo[0-3]]===================")
for legNum in keys:
print("Leg[{}]-------------".format(legNum))
pos = footLoc[legNum] # grab foot positions for leg k
angles[legNum] = []
# calculate the inverse DH angles
for step, p in enumerate(pos):
s = self.inverse(*p) # s0,s1,s2,s3
scaled_speed = speed
angles[legNum].append(s + (scaled_speed,))
print(" {:2}: {} {:7.2f} {:7.2f} {:7.2f} {:7.2f}".format(step, scaled_speed, *s))
return angles
# def generateServoAngles(self, footLoc, speed):
# """
# This is a bulk process and takes all of the foot locations for an entire
# sequence of a gait cycle. It handles all legs at once.
#
# speed: this is the max movement speed
#
# footLoc: locations of feet from gait
# { step0 step1 ...
# 0: [(x,y,z), (x,y,z), ...] # leg0
# 2: [(x,y,z), (x,y,z), ...] # leg2
# ...
# }
#
# return
# { step 0 step 1 ...
# 0: [[(t1,s1),(t2,s2),(t3,s3),(t4,s4)], [(t1,s1),(t2,s2),(t3,s3),(t4,s4)], ...] # leg0
# 2: [[(t1,s1),(t2,s2),(t3,s3),(t4,s4)], [(t1,s1),(t2,s2),(t3,s3),(t4,s4)], ...] # leg2
# ...
# } where t=theta s=speed
# """
# # FIXME: fix this to handle N legs, right now it only does 4
#
# # get the keys and figure out some stuff
# keys = list(footLoc.keys())
# angles = {}
#
# for k in keys:
# pos = footLoc[k] # grab foot positions for leg k
# angles[k] = []
#
# # calculate the inverse DH angles
# numStep = len(pos)
# for step, p in enumerate(pos):
# s = self.inverse(*p) # s0,s1,s2,s3
# tmp = self.DH2Servo(s)
# # scaled_speed = int(speed*ramp(step, numStep))
# # if p[2] > -70: scaled_speed = speed
# # else: scaled_speed = int(0.6*speed)
# scaled_speed = speed
# tmp2 = [(x, scaled_speed) for x in tmp]
# angles[k].append(tmp2)
# # print("speed", speed)
# # print("tmp", tmp)
# # exit(0)
#
# return angles
# def generateServoAngles2(self, footLoc, speed):
# """
# This is a bulk process and takes all of the foot locations for an entire
# sequence of a gait cycle. It handles all legs at once.
#
# speed: this is the max movement speed
#
# footLoc: locations of feet from gait
# { step0 step1 ...
# 0: [(x,y,z), (x,y,z), ...] # leg0
# 2: [(x,y,z), (x,y,z), ...] # leg2
# ...
# }
#
# return
# { step 0 step 1 ...
# 0: [(t1,t2,t3,t4,speed), (t1,t2,t3,t4,speed), ...] # leg0
# 2: [(t1,t2,t3,t4,speed), (t1,t2,t3,t4,speed), ...] # leg2
# ...
# } where t=theta
# """
#
# # get the keys and figure out some stuff
# keys = list(footLoc.keys())
# angles = {}
#
# print("=[generateServoAngles2 speed servo[0-3]]===================")
# for legNum in keys:
# print("Leg[{}]-------------".format(legNum))
# pos = footLoc[legNum] # grab foot positions for leg k
# angles[legNum] = []
# # print('pos', pos)
#
# # calculate the inverse DH angles
# # numStep = len(pos)
# for step, p in enumerate(pos):
# # print(' {:2}: {:7.2f} {:7.2f} {:7.2f}'.format(i, *pt))
# # print('step: {} p: {}'.format(step, p))
# s = self.inverse(*p) # s0,s1,s2,s3
# tmp = self.DH2Servo(s)
# # scaled_speed = int(speed*ramp(step, numStep))
# # if p[2] > -70: scaled_speed = speed
# # else: scaled_speed = int(0.6*speed)
# # tmp2 = [(x, scaled_speed) for x in tmp]
# scaled_speed = speed
# angles[legNum].append(tmp + (scaled_speed,))
# # print("speed", speed)
# # print("tmp", tmp)
# # # exit(0)
# # print(" {:2}: {:7.2f} {:7.2f} {:7.2f} {:7.2f}".format(step, *tmp))
# print(" {:2}: {} {:7.2f} {:7.2f} {:7.2f} {:7.2f}".format(step, scaled_speed, *tmp))
#
# return angles
# def DH2Servo(self, angles):
# tmp = []
# for s, a in list(zip(self.servos, angles)):
# tmp.append(s.DH2Servo(a))
# return tuple(tmp)
def pprint(self, step):
print('*'*25)
for leg in step:
print(' DH: [{:.0f} {:.0f} {:.0f} {:.0f}]'.format(*leg))
# def getNeutralPos(self):
# return self.positions['neutral']
# def generateServoAngles_DH(self, angles):
# """
# This is a bulk process and takes all of the foot locations for an entire
# sequence of a gait cycle. It handles all legs at once.
#
# footLoc: locations of feet from gait
# { step0 step1 ...
# 0: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg0 DH space
# 2: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg2 DH space
# ...
# }
#
# return
# { step 0 step 1 ...
# 0: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg0 servo space
# 2: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg2 servo space
# ...
# }
# """
# # get the keys and figure out some stuff
# keys = list(footLoc.keys())
# angles = {}
#
# for k in keys:
# angles[k] = []
# # calculate the inverse DH angles
# for s in angles:
# # tmp = [0]*4
# # tmp[0] = self.servos[0].DH2Servo(s[0])
# # tmp[1] = self.servos[1].DH2Servo(s[1])
# # tmp[2] = self.servos[2].DH2Servo(s[2])
# # tmp[3] = self.servos[3].DH2Servo(s[3])
# tmp = self.DH2Servo(s)
# angles[k].append(tmp)
#
# return angles
# def generateServoAngles(self, footLoc):
# """
# This is a bulk process and takes all of the foot locations for an entire
# sequence of a gait cycle. It handles all legs at once.
#
# footLoc: locations of feet from gait
# { step0 step1 ...
# 0: [(x,y,z), (x,y,z), ...] # leg0
# 2: [(x,y,z), (x,y,z), ...] # leg2
# ...
# }
#
# return
# { step 0 step 1 ...
# 0: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg0
# 2: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg2
# ...
# }
# """
# # get the keys and figure out some stuff
# keys = list(footLoc.keys())
# angles = {}
#
# for k in keys:
# pos = footLoc[k] # grab foot positions for leg k
# angles[k] = []
#
# # calculate the inverse DH angles
# for p in pos:
# s = self.inverse(*p) # s0,s1,s2,s3
# tmp = self.DH2Servo(s)
# angles[k].append(tmp)
#
# return angles
| en | 0.601533 | ############################################## # The MIT License (MIT) # Copyright (c) 2016 <NAME> # see LICENSE for full details ############################################## # from math import degrees as r2d # import logging # from quadruped.Servo import Servo # logging.basicConfig(level=logging.DEBUG) # logging.basicConfig(level=logging.ERROR) # from collections import namedtuple # # Link = namedtuple('Link', 'length offset') # Leg4Info = namedtuple('Leg4Info', 'coxa femur tibia fibia tarsus') # def ramp(val, length): # """ # Simple triangle for scaling speed. it always returns 0.5 at the lowest # and is 1.0 at max in the middle # # in: val - step # length - number of steps # out: 0.5 - 1.0 # """ # val = val % length # # print("ramp: {} {} {}".format(val, length/2, length)) # slope = 0.5/(length/2) # if val > length/2: # # since it is symetric, just mirror the answer # val = (length - val) # return slope*val + 0.5 Leg class outputs the servo angles for some requested foot location (x,y,z) Leg knows: - leg dimensions - number of servos and their parameters/limits - fk/ik equations - sit/stand sequence # these are fixed by the 3D printing, not changing # positions = { # 'stand': None, # # 'sit': None, # # 'neutral': None # } Each leg has 4 servos/channels # # setup kinematics and servos # self.servos = [] # for ID, seg in enumerate(['coxa', 'femur', 'tibia', 'tarsus']): # self.servos.append(Servo(ID, params[seg][1], params[seg][2])) # if 'stand' in params: # self.positions['neutral'] = self.forward(*params['stand']) # else: # raise Exception('Need to have "stand" angles in params file') Forward kinematics of the leg, note, default angles are all degrees. The input angles are referenced to the DH frame arrangement. Azimuth angle is between x and w and lies in the x-y plane ^ x w | \ | l1 \ | \ | \| <----------+ (z is out of the page - right hand rule) y Most of the robot arm move in the plane defined by w-z ^ z l3 | o-----o | / \ l4 | / l2 E | / +--o-------------> w l1 l1: coxa l2: femur l3: tibia l4: tarsus All joint angles returned are in degrees: (t1, t2, t3, t4) # cosine law only used by this function # cos(g) = (a^2+b^2-c^2)/2ab # maxa = 150*pi/180 # t1 = t1 if t1 <= maxa else t1-2*pi # value?? check elsewhere? This is a bulk process and takes all of the foot locations for an entire sequence of a gait cycle. It handles all legs at once. speed: this is the max movement speed footLoc: locations of feet from gait { step0 step1 ... 0: [(x,y,z), (x,y,z), ...] # leg0 2: [(x,y,z), (x,y,z), ...] # leg2 ... } return { step 0 step 1 ... 0: [(t1,t2,t3,t4,speed), (t1,t2,t3,t4,speed), ...] # leg0 2: [(t1,t2,t3,t4,speed), (t1,t2,t3,t4,speed), ...] # leg2 ... } where t=theta in DH space # get the keys and figure out some stuff # grab foot positions for leg k # calculate the inverse DH angles # s0,s1,s2,s3 # def generateServoAngles(self, footLoc, speed): # """ # This is a bulk process and takes all of the foot locations for an entire # sequence of a gait cycle. It handles all legs at once. # # speed: this is the max movement speed # # footLoc: locations of feet from gait # { step0 step1 ... # 0: [(x,y,z), (x,y,z), ...] # leg0 # 2: [(x,y,z), (x,y,z), ...] # leg2 # ... # } # # return # { step 0 step 1 ... # 0: [[(t1,s1),(t2,s2),(t3,s3),(t4,s4)], [(t1,s1),(t2,s2),(t3,s3),(t4,s4)], ...] # leg0 # 2: [[(t1,s1),(t2,s2),(t3,s3),(t4,s4)], [(t1,s1),(t2,s2),(t3,s3),(t4,s4)], ...] # leg2 # ... # } where t=theta s=speed # """ # # FIXME: fix this to handle N legs, right now it only does 4 # # # get the keys and figure out some stuff # keys = list(footLoc.keys()) # angles = {} # # for k in keys: # pos = footLoc[k] # grab foot positions for leg k # angles[k] = [] # # # calculate the inverse DH angles # numStep = len(pos) # for step, p in enumerate(pos): # s = self.inverse(*p) # s0,s1,s2,s3 # tmp = self.DH2Servo(s) # # scaled_speed = int(speed*ramp(step, numStep)) # # if p[2] > -70: scaled_speed = speed # # else: scaled_speed = int(0.6*speed) # scaled_speed = speed # tmp2 = [(x, scaled_speed) for x in tmp] # angles[k].append(tmp2) # # print("speed", speed) # # print("tmp", tmp) # # exit(0) # # return angles # def generateServoAngles2(self, footLoc, speed): # """ # This is a bulk process and takes all of the foot locations for an entire # sequence of a gait cycle. It handles all legs at once. # # speed: this is the max movement speed # # footLoc: locations of feet from gait # { step0 step1 ... # 0: [(x,y,z), (x,y,z), ...] # leg0 # 2: [(x,y,z), (x,y,z), ...] # leg2 # ... # } # # return # { step 0 step 1 ... # 0: [(t1,t2,t3,t4,speed), (t1,t2,t3,t4,speed), ...] # leg0 # 2: [(t1,t2,t3,t4,speed), (t1,t2,t3,t4,speed), ...] # leg2 # ... # } where t=theta # """ # # # get the keys and figure out some stuff # keys = list(footLoc.keys()) # angles = {} # # print("=[generateServoAngles2 speed servo[0-3]]===================") # for legNum in keys: # print("Leg[{}]-------------".format(legNum)) # pos = footLoc[legNum] # grab foot positions for leg k # angles[legNum] = [] # # print('pos', pos) # # # calculate the inverse DH angles # # numStep = len(pos) # for step, p in enumerate(pos): # # print(' {:2}: {:7.2f} {:7.2f} {:7.2f}'.format(i, *pt)) # # print('step: {} p: {}'.format(step, p)) # s = self.inverse(*p) # s0,s1,s2,s3 # tmp = self.DH2Servo(s) # # scaled_speed = int(speed*ramp(step, numStep)) # # if p[2] > -70: scaled_speed = speed # # else: scaled_speed = int(0.6*speed) # # tmp2 = [(x, scaled_speed) for x in tmp] # scaled_speed = speed # angles[legNum].append(tmp + (scaled_speed,)) # # print("speed", speed) # # print("tmp", tmp) # # # exit(0) # # print(" {:2}: {:7.2f} {:7.2f} {:7.2f} {:7.2f}".format(step, *tmp)) # print(" {:2}: {} {:7.2f} {:7.2f} {:7.2f} {:7.2f}".format(step, scaled_speed, *tmp)) # # return angles # def DH2Servo(self, angles): # tmp = [] # for s, a in list(zip(self.servos, angles)): # tmp.append(s.DH2Servo(a)) # return tuple(tmp) # def getNeutralPos(self): # return self.positions['neutral'] # def generateServoAngles_DH(self, angles): # """ # This is a bulk process and takes all of the foot locations for an entire # sequence of a gait cycle. It handles all legs at once. # # footLoc: locations of feet from gait # { step0 step1 ... # 0: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg0 DH space # 2: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg2 DH space # ... # } # # return # { step 0 step 1 ... # 0: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg0 servo space # 2: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg2 servo space # ... # } # """ # # get the keys and figure out some stuff # keys = list(footLoc.keys()) # angles = {} # # for k in keys: # angles[k] = [] # # calculate the inverse DH angles # for s in angles: # # tmp = [0]*4 # # tmp[0] = self.servos[0].DH2Servo(s[0]) # # tmp[1] = self.servos[1].DH2Servo(s[1]) # # tmp[2] = self.servos[2].DH2Servo(s[2]) # # tmp[3] = self.servos[3].DH2Servo(s[3]) # tmp = self.DH2Servo(s) # angles[k].append(tmp) # # return angles # def generateServoAngles(self, footLoc): # """ # This is a bulk process and takes all of the foot locations for an entire # sequence of a gait cycle. It handles all legs at once. # # footLoc: locations of feet from gait # { step0 step1 ... # 0: [(x,y,z), (x,y,z), ...] # leg0 # 2: [(x,y,z), (x,y,z), ...] # leg2 # ... # } # # return # { step 0 step 1 ... # 0: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg0 # 2: [[t1,t2,t3,t4], [t1,t2,t3,t4], ...] # leg2 # ... # } # """ # # get the keys and figure out some stuff # keys = list(footLoc.keys()) # angles = {} # # for k in keys: # pos = footLoc[k] # grab foot positions for leg k # angles[k] = [] # # # calculate the inverse DH angles # for p in pos: # s = self.inverse(*p) # s0,s1,s2,s3 # tmp = self.DH2Servo(s) # angles[k].append(tmp) # # return angles | 3.138136 | 3 |
setup.py | isislovecruft/farfetchd | 7 | 6613595 | <reponame>isislovecruft/farfetchd<gh_stars>1-10
#!/usr/bin/env python2
#_____________________________________________________________________________
#
# This file is part of farfetchd, a CAPTCHA service.
#
# :authors: <NAME> <<EMAIL>>
# :copyright: (c) 2007-2017, The Tor Project, Inc.
# (c) 2007-2017, <NAME>
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
from __future__ import print_function
import os
import setuptools
import sys
import versioneer
def get_cmdclass():
"""Get our cmdclass dictionary for use in setuptool.setup().
This must be done outside the call to setuptools.setup() because we need
to add our own classes to the cmdclass dictionary, and then update that
dictionary with the one returned from versioneer.get_cmdclass().
"""
cmdclass = {'test': Trial,}
cmdclass.update(versioneer.get_cmdclass())
return cmdclass
def get_requirements():
"""Extract the list of requirements from our requirements.txt.
:rtype: 2-tuple
:returns: Two lists, the first is a list of requirements in the form of
pkgname==version. The second is a list of URIs or VCS checkout strings
which specify the dependency links for obtaining a copy of the
requirement.
"""
requirements_file = os.path.join(os.getcwd(), 'requirements.txt')
requirements = []
links=[]
try:
with open(requirements_file) as reqfile:
for line in reqfile.readlines():
line = line.strip()
if line.startswith('#'):
continue
if line.startswith(('git+', 'hg+', 'svn+')):
line = line[line.index('+') + 1:]
if line.startswith(
('https://', 'git://', 'hg://', 'svn://')):
links.append(line)
else:
requirements.append(line)
except (IOError, OSError) as error:
print(error)
return requirements, links
class Trial(setuptools.Command):
"""Twisted Trial setuptools command.
Based on the setuptools Trial command in Zooko's Tahoe-LAFS, as well as
https://github.com/simplegeo/setuptools-trial/ (which is also based on the
Tahoe-LAFS code).
Pieces of the original implementation of this 'test' command (that is, for
the original pyunit-based BridgeDB tests which, a long time ago, in a
galaxy far far away, lived in bridgedb.Tests) were based on setup.py from
<NAME>'s mixminion, which was based on the setup.py from Zooko's
pyutil package, which was in turn based on
http://mail.python.org/pipermail/distutils-sig/2002-January/002714.html.
Crusty, old-ass Python, like hella wut.
"""
description = "Run Twisted Trial-based tests."
user_options = [
('debug', 'b', ("Run tests in a debugger. If that debugger is pdb, will "
"load '.pdbrc' from current directory if it exists.")),
('debug-stacktraces', 'B', "Report Deferred creation and callback stack traces"),
('debugger=', None, ("The fully qualified name of a debugger to use if "
"--debug is passed (default: pdb)")),
('disablegc', None, "Disable the garbage collector"),
('force-gc', None, "Have Trial run gc.collect() before and after each test case"),
('jobs=', 'j', "Number of local workers to run, a strictly positive integer"),
('profile', None, "Run tests under the Python profiler"),
('random=', 'Z', "Run tests in random order using the specified seed"),
('reactor=', 'r', "Which reactor to use"),
('reporter=', None, "Customize Trial's output with a reporter plugin"),
('rterrors', 'e', "Realtime errors: print out tracebacks as soon as they occur"),
('spew', None, "Print an insanely verbose log of everything that happens"),
('testmodule=', None, "Filename to grep for test cases (-*- test-case-name)"),
('tbformat=', None, ("Specify the format to display tracebacks with. Valid "
"formats are 'plain', 'emacs', and 'cgitb' which uses "
"the nicely verbose stdlib cgitb.text function")),
('unclean-warnings', None, "Turn dirty reactor errors into warnings"),
('until-failure', 'u', "Repeat a test (specified by -s) until it fails."),
('without-module=', None, ("Fake the lack of the specified modules, separated "
"with commas")),
]
boolean_options = ['debug', 'debug-stacktraces', 'disablegc', 'force-gc',
'profile', 'rterrors', 'spew', 'unclean-warnings',
'until-failure']
def initialize_options(self):
self.debug = None
self.debug_stacktraces = None
self.debugger = None
self.disablegc = None
self.force_gc = None
self.jobs = None
self.profile = None
self.random = None
self.reactor = None
self.reporter = None
self.rterrors = None
self.spew = None
self.testmodule = None
self.tbformat = None
self.unclean_warnings = None
self.until_failure = None
self.without_module = None
def finalize_options(self):
build = self.get_finalized_command('build')
self.build_purelib = build.build_purelib
self.build_platlib = build.build_platlib
def run(self):
self.run_command('build')
old_path = sys.path[:]
sys.path[0:0] = [self.build_purelib, self.build_platlib]
result = 1
try:
result = self.run_tests()
finally:
sys.path = old_path
raise SystemExit(result)
def run_tests(self):
# We do the import from Twisted inside the function instead of the top
# of the file because since Twisted is a setup_requires, we can't
# assume that Twisted will be installed on the user's system prior, so
# if we don't do the import here, then importing from this plugin will
# fail.
from twisted.scripts import trial
if not self.testmodule:
self.testmodule = "farfetchd.test"
# Handle parsing the trial options passed through the setuptools
# trial command.
cmd_options = []
for opt in self.boolean_options:
if getattr(self, opt.replace('-', '_'), None):
cmd_options.append('--%s' % opt)
for opt in ('debugger', 'jobs', 'random', 'reactor', 'reporter',
'testmodule', 'tbformat', 'without-module'):
value = getattr(self, opt.replace('-', '_'), None)
if value is not None:
cmd_options.extend(['--%s' % opt, value])
config = trial.Options()
config.parseOptions(cmd_options)
config['tests'] = [self.testmodule,]
trial._initialDebugSetup(config)
trialRunner = trial._makeRunner(config)
suite = trial._getSuite(config)
# run the tests
if self.until_failure:
test_result = trialRunner.runUntilFailure(suite)
else:
test_result = trialRunner.run(suite)
if test_result.wasSuccessful():
return 0 # success
return 1 # failure
# If there is an environment variable FARFETCHD_INSTALL_DEPENDENCIES=0, it will
# disable checking for, fetching, and installing farfetchd's dependencies with
# easy_install.
#
# Setting FARFETCHD_INSTALL_DEPENDENCIES=0 is *highly* recommended, because
# easy_install is a security nightmare. Automatically installing dependencies
# is enabled by default, however, because this is how all Python packages are
# supposed to work.
if bool(int(os.environ.get("FARFETCHD_INSTALL_DEPENDENCIES", 1))):
requires, deplinks = get_requirements()
else:
requires, deplinks = [], []
setuptools.setup(
name='farfetchd',
version=versioneer.get_version(),
description='Twisted Python CAPTCHA server',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://www.torproject.org',
download_url='https://gitweb.torproject.org/farfetchd.git',
package_dir={'farfetchd': 'farfetchd'},
packages=[
'farfetchd',
'farfetchd.test',
],
package_data={
'farfetchd': [
'API.html',
]
},
scripts=['scripts/farfetchd'],
cmdclass=get_cmdclass(),
include_package_data=True,
install_requires=requires,
dependency_links=deplinks,
zip_safe=False,
)
| #!/usr/bin/env python2
#_____________________________________________________________________________
#
# This file is part of farfetchd, a CAPTCHA service.
#
# :authors: <NAME> <<EMAIL>>
# :copyright: (c) 2007-2017, The Tor Project, Inc.
# (c) 2007-2017, <NAME>
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
from __future__ import print_function
import os
import setuptools
import sys
import versioneer
def get_cmdclass():
"""Get our cmdclass dictionary for use in setuptool.setup().
This must be done outside the call to setuptools.setup() because we need
to add our own classes to the cmdclass dictionary, and then update that
dictionary with the one returned from versioneer.get_cmdclass().
"""
cmdclass = {'test': Trial,}
cmdclass.update(versioneer.get_cmdclass())
return cmdclass
def get_requirements():
"""Extract the list of requirements from our requirements.txt.
:rtype: 2-tuple
:returns: Two lists, the first is a list of requirements in the form of
pkgname==version. The second is a list of URIs or VCS checkout strings
which specify the dependency links for obtaining a copy of the
requirement.
"""
requirements_file = os.path.join(os.getcwd(), 'requirements.txt')
requirements = []
links=[]
try:
with open(requirements_file) as reqfile:
for line in reqfile.readlines():
line = line.strip()
if line.startswith('#'):
continue
if line.startswith(('git+', 'hg+', 'svn+')):
line = line[line.index('+') + 1:]
if line.startswith(
('https://', 'git://', 'hg://', 'svn://')):
links.append(line)
else:
requirements.append(line)
except (IOError, OSError) as error:
print(error)
return requirements, links
class Trial(setuptools.Command):
"""Twisted Trial setuptools command.
Based on the setuptools Trial command in Zooko's Tahoe-LAFS, as well as
https://github.com/simplegeo/setuptools-trial/ (which is also based on the
Tahoe-LAFS code).
Pieces of the original implementation of this 'test' command (that is, for
the original pyunit-based BridgeDB tests which, a long time ago, in a
galaxy far far away, lived in bridgedb.Tests) were based on setup.py from
<NAME>'s mixminion, which was based on the setup.py from Zooko's
pyutil package, which was in turn based on
http://mail.python.org/pipermail/distutils-sig/2002-January/002714.html.
Crusty, old-ass Python, like hella wut.
"""
description = "Run Twisted Trial-based tests."
user_options = [
('debug', 'b', ("Run tests in a debugger. If that debugger is pdb, will "
"load '.pdbrc' from current directory if it exists.")),
('debug-stacktraces', 'B', "Report Deferred creation and callback stack traces"),
('debugger=', None, ("The fully qualified name of a debugger to use if "
"--debug is passed (default: pdb)")),
('disablegc', None, "Disable the garbage collector"),
('force-gc', None, "Have Trial run gc.collect() before and after each test case"),
('jobs=', 'j', "Number of local workers to run, a strictly positive integer"),
('profile', None, "Run tests under the Python profiler"),
('random=', 'Z', "Run tests in random order using the specified seed"),
('reactor=', 'r', "Which reactor to use"),
('reporter=', None, "Customize Trial's output with a reporter plugin"),
('rterrors', 'e', "Realtime errors: print out tracebacks as soon as they occur"),
('spew', None, "Print an insanely verbose log of everything that happens"),
('testmodule=', None, "Filename to grep for test cases (-*- test-case-name)"),
('tbformat=', None, ("Specify the format to display tracebacks with. Valid "
"formats are 'plain', 'emacs', and 'cgitb' which uses "
"the nicely verbose stdlib cgitb.text function")),
('unclean-warnings', None, "Turn dirty reactor errors into warnings"),
('until-failure', 'u', "Repeat a test (specified by -s) until it fails."),
('without-module=', None, ("Fake the lack of the specified modules, separated "
"with commas")),
]
boolean_options = ['debug', 'debug-stacktraces', 'disablegc', 'force-gc',
'profile', 'rterrors', 'spew', 'unclean-warnings',
'until-failure']
def initialize_options(self):
self.debug = None
self.debug_stacktraces = None
self.debugger = None
self.disablegc = None
self.force_gc = None
self.jobs = None
self.profile = None
self.random = None
self.reactor = None
self.reporter = None
self.rterrors = None
self.spew = None
self.testmodule = None
self.tbformat = None
self.unclean_warnings = None
self.until_failure = None
self.without_module = None
def finalize_options(self):
build = self.get_finalized_command('build')
self.build_purelib = build.build_purelib
self.build_platlib = build.build_platlib
def run(self):
self.run_command('build')
old_path = sys.path[:]
sys.path[0:0] = [self.build_purelib, self.build_platlib]
result = 1
try:
result = self.run_tests()
finally:
sys.path = old_path
raise SystemExit(result)
def run_tests(self):
# We do the import from Twisted inside the function instead of the top
# of the file because since Twisted is a setup_requires, we can't
# assume that Twisted will be installed on the user's system prior, so
# if we don't do the import here, then importing from this plugin will
# fail.
from twisted.scripts import trial
if not self.testmodule:
self.testmodule = "farfetchd.test"
# Handle parsing the trial options passed through the setuptools
# trial command.
cmd_options = []
for opt in self.boolean_options:
if getattr(self, opt.replace('-', '_'), None):
cmd_options.append('--%s' % opt)
for opt in ('debugger', 'jobs', 'random', 'reactor', 'reporter',
'testmodule', 'tbformat', 'without-module'):
value = getattr(self, opt.replace('-', '_'), None)
if value is not None:
cmd_options.extend(['--%s' % opt, value])
config = trial.Options()
config.parseOptions(cmd_options)
config['tests'] = [self.testmodule,]
trial._initialDebugSetup(config)
trialRunner = trial._makeRunner(config)
suite = trial._getSuite(config)
# run the tests
if self.until_failure:
test_result = trialRunner.runUntilFailure(suite)
else:
test_result = trialRunner.run(suite)
if test_result.wasSuccessful():
return 0 # success
return 1 # failure
# If there is an environment variable FARFETCHD_INSTALL_DEPENDENCIES=0, it will
# disable checking for, fetching, and installing farfetchd's dependencies with
# easy_install.
#
# Setting FARFETCHD_INSTALL_DEPENDENCIES=0 is *highly* recommended, because
# easy_install is a security nightmare. Automatically installing dependencies
# is enabled by default, however, because this is how all Python packages are
# supposed to work.
if bool(int(os.environ.get("FARFETCHD_INSTALL_DEPENDENCIES", 1))):
requires, deplinks = get_requirements()
else:
requires, deplinks = [], []
setuptools.setup(
name='farfetchd',
version=versioneer.get_version(),
description='Twisted Python CAPTCHA server',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://www.torproject.org',
download_url='https://gitweb.torproject.org/farfetchd.git',
package_dir={'farfetchd': 'farfetchd'},
packages=[
'farfetchd',
'farfetchd.test',
],
package_data={
'farfetchd': [
'API.html',
]
},
scripts=['scripts/farfetchd'],
cmdclass=get_cmdclass(),
include_package_data=True,
install_requires=requires,
dependency_links=deplinks,
zip_safe=False,
) | en | 0.853661 | #!/usr/bin/env python2 #_____________________________________________________________________________ # # This file is part of farfetchd, a CAPTCHA service. # # :authors: <NAME> <<EMAIL>> # :copyright: (c) 2007-2017, The Tor Project, Inc. # (c) 2007-2017, <NAME> # :license: see LICENSE for licensing information #_____________________________________________________________________________ Get our cmdclass dictionary for use in setuptool.setup(). This must be done outside the call to setuptools.setup() because we need to add our own classes to the cmdclass dictionary, and then update that dictionary with the one returned from versioneer.get_cmdclass(). Extract the list of requirements from our requirements.txt. :rtype: 2-tuple :returns: Two lists, the first is a list of requirements in the form of pkgname==version. The second is a list of URIs or VCS checkout strings which specify the dependency links for obtaining a copy of the requirement. Twisted Trial setuptools command. Based on the setuptools Trial command in Zooko's Tahoe-LAFS, as well as https://github.com/simplegeo/setuptools-trial/ (which is also based on the Tahoe-LAFS code). Pieces of the original implementation of this 'test' command (that is, for the original pyunit-based BridgeDB tests which, a long time ago, in a galaxy far far away, lived in bridgedb.Tests) were based on setup.py from <NAME>'s mixminion, which was based on the setup.py from Zooko's pyutil package, which was in turn based on http://mail.python.org/pipermail/distutils-sig/2002-January/002714.html. Crusty, old-ass Python, like hella wut. # We do the import from Twisted inside the function instead of the top # of the file because since Twisted is a setup_requires, we can't # assume that Twisted will be installed on the user's system prior, so # if we don't do the import here, then importing from this plugin will # fail. # Handle parsing the trial options passed through the setuptools # trial command. # run the tests # success # failure # If there is an environment variable FARFETCHD_INSTALL_DEPENDENCIES=0, it will # disable checking for, fetching, and installing farfetchd's dependencies with # easy_install. # # Setting FARFETCHD_INSTALL_DEPENDENCIES=0 is *highly* recommended, because # easy_install is a security nightmare. Automatically installing dependencies # is enabled by default, however, because this is how all Python packages are # supposed to work. | 2.036608 | 2 |
src/data_processing/pairwise_generation/generate_data.py | rz4/DeepProteinScoring | 2 | 6613596 | '''
generate_data.py
Updated: 3/29/18
This script is used to generate pairwise distance matricies used for
convolutional neural network training. The script will store representations
in npz files within a /pairwise_data/ subdirectory. This script is used specifically to
generate data used for CASP experiments.
'''
import os
import numpy as np
from mpi4py import MPI
from scipy.ndimage.filters import gaussian_filter
from scipy.spatial.distance import pdist
from itertools import combinations
# Data generation parameters
data_folder = '../../../data/T0/' # Path to data folder
pairwise_distance_bins = [i*5 for i in range(10)]
################################################################################
# Static Parameters
chain = 'A' # Chain Id might need to be changed for PDBs missing identifier
seed = 458762 # For random distribution of tasks using MPI
residues = ['ALA', 'ARG', 'ASN', 'ASP', 'ASX', 'CYS', 'GLN',
'GLU', 'GLX', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS',
'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR',
'UNK', 'VAL']
def parse_pdb(path, chain):
'''
Method parses atomic coordinate data from PDB.
Params:
path - str; PDB file path
chain - str; chain identifier
Returns:
data - np.array; PDB data
'''
# Parse residue, atom type and atomic coordinates
data = []
with open(path, 'r') as f:
lines = f.readlines()
residue = None
residue_data = []
flag = False
for row in lines:
if row[:4] == 'ATOM' and row[21] == chain:
flag = True
if residue != row[17:20]:
data.append(residue_data)
residue_data = []
residue = row[17:20]
atom_data = [row[17:20], row[12:16].strip(), row[30:38], row[38:46], row[47:54]]
residue_data.append(atom_data)
if row[:3] == 'TER' and flag: break
data = np.array(data[1:])
return data
def bin_pairwise_distances(protein_data, pairwise_distance_bins):
'''
Method bins pairwise distances of residue alpha carbons into 2D data grids.
Params:
protein_data - np.array;
pairwise_distance_bins - list; list of bins used to bin pairwise distances
Returns:
binned_pairwise - np.array;
'''
# Get alpha carbons
alpha_carbons = []
for i in range(len(protein_data)):
residue = np.array(protein_data[i])
ac_i = np.where(residue[:,1] == 'CA')
alpha_carbons.append(residue[ac_i][0])
alpha_carbons = np.array(alpha_carbons)
# Pairwise distances
dist = np.array(pdist(alpha_carbons[:,2:]))
labels = list(combinations(alpha_carbons[:,0],2))
labels = np.array([i[0] + i[1] for i in labels])
# Bin pairwise distances
bin_x = []
for r1 in residues:
bin_y = []
for r2 in residues:
i = np.where(labels == r1+r2)
H, bins = np.histogram(dist[i], bins=pairwise_distance_bins)
H = gaussian_filter(H, 0.5)
bin_y.append(H)
bin_x.append(bin_y)
binned_pairwise = np.array(bin_x)
return binned_pairwise
if __name__ == '__main__':
# Set paths relative to this file
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# MPI init
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
cores = comm.Get_size()
# MPI task distribution
if rank == 0:
tasks = []
if not os.path.exists(data_folder+'pairwise_data'): os.mkdir(data_folder+'pairwise_data')
# Search for data directories
for data_path in sorted(os.listdir(data_folder+'pdbs')):
if data_path.endswith('.pdb'):
tasks.append(data_folder+'pdbs/'+data_path)
# Shuffle for random distribution
np.random.seed(seed)
np.random.shuffle(tasks)
else: tasks = None
# Broadcast tasks to all nodes and select tasks according to rank
tasks = comm.bcast(tasks, root=0)
tasks = np.array_split(tasks, cores)[rank]
for t in tasks:
path = t
if chain == None: chain == 'A'
save_path = '/'.join(t.split('/')[:-2]) + '/pairwise_data/'+ t.split('/')[-1][:-3]+'npz'
# Parse PDB
protein_data = parse_pdb(path, chain)
try:
# Bin pairwise distances
binned_pairwise_distances = bin_pairwise_distances(protein_data, pairwise_distance_bins)
# Save data
np.savez(save_path, binned_pairwise_distances)
print("Generated:", '/'.join(save_path.split('/')[-3:]))
except: print("Error generating data...")
print("Data Generation Complete.")
| '''
generate_data.py
Updated: 3/29/18
This script is used to generate pairwise distance matricies used for
convolutional neural network training. The script will store representations
in npz files within a /pairwise_data/ subdirectory. This script is used specifically to
generate data used for CASP experiments.
'''
import os
import numpy as np
from mpi4py import MPI
from scipy.ndimage.filters import gaussian_filter
from scipy.spatial.distance import pdist
from itertools import combinations
# Data generation parameters
data_folder = '../../../data/T0/' # Path to data folder
pairwise_distance_bins = [i*5 for i in range(10)]
################################################################################
# Static Parameters
chain = 'A' # Chain Id might need to be changed for PDBs missing identifier
seed = 458762 # For random distribution of tasks using MPI
residues = ['ALA', 'ARG', 'ASN', 'ASP', 'ASX', 'CYS', 'GLN',
'GLU', 'GLX', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS',
'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR',
'UNK', 'VAL']
def parse_pdb(path, chain):
'''
Method parses atomic coordinate data from PDB.
Params:
path - str; PDB file path
chain - str; chain identifier
Returns:
data - np.array; PDB data
'''
# Parse residue, atom type and atomic coordinates
data = []
with open(path, 'r') as f:
lines = f.readlines()
residue = None
residue_data = []
flag = False
for row in lines:
if row[:4] == 'ATOM' and row[21] == chain:
flag = True
if residue != row[17:20]:
data.append(residue_data)
residue_data = []
residue = row[17:20]
atom_data = [row[17:20], row[12:16].strip(), row[30:38], row[38:46], row[47:54]]
residue_data.append(atom_data)
if row[:3] == 'TER' and flag: break
data = np.array(data[1:])
return data
def bin_pairwise_distances(protein_data, pairwise_distance_bins):
'''
Method bins pairwise distances of residue alpha carbons into 2D data grids.
Params:
protein_data - np.array;
pairwise_distance_bins - list; list of bins used to bin pairwise distances
Returns:
binned_pairwise - np.array;
'''
# Get alpha carbons
alpha_carbons = []
for i in range(len(protein_data)):
residue = np.array(protein_data[i])
ac_i = np.where(residue[:,1] == 'CA')
alpha_carbons.append(residue[ac_i][0])
alpha_carbons = np.array(alpha_carbons)
# Pairwise distances
dist = np.array(pdist(alpha_carbons[:,2:]))
labels = list(combinations(alpha_carbons[:,0],2))
labels = np.array([i[0] + i[1] for i in labels])
# Bin pairwise distances
bin_x = []
for r1 in residues:
bin_y = []
for r2 in residues:
i = np.where(labels == r1+r2)
H, bins = np.histogram(dist[i], bins=pairwise_distance_bins)
H = gaussian_filter(H, 0.5)
bin_y.append(H)
bin_x.append(bin_y)
binned_pairwise = np.array(bin_x)
return binned_pairwise
if __name__ == '__main__':
# Set paths relative to this file
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# MPI init
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
cores = comm.Get_size()
# MPI task distribution
if rank == 0:
tasks = []
if not os.path.exists(data_folder+'pairwise_data'): os.mkdir(data_folder+'pairwise_data')
# Search for data directories
for data_path in sorted(os.listdir(data_folder+'pdbs')):
if data_path.endswith('.pdb'):
tasks.append(data_folder+'pdbs/'+data_path)
# Shuffle for random distribution
np.random.seed(seed)
np.random.shuffle(tasks)
else: tasks = None
# Broadcast tasks to all nodes and select tasks according to rank
tasks = comm.bcast(tasks, root=0)
tasks = np.array_split(tasks, cores)[rank]
for t in tasks:
path = t
if chain == None: chain == 'A'
save_path = '/'.join(t.split('/')[:-2]) + '/pairwise_data/'+ t.split('/')[-1][:-3]+'npz'
# Parse PDB
protein_data = parse_pdb(path, chain)
try:
# Bin pairwise distances
binned_pairwise_distances = bin_pairwise_distances(protein_data, pairwise_distance_bins)
# Save data
np.savez(save_path, binned_pairwise_distances)
print("Generated:", '/'.join(save_path.split('/')[-3:]))
except: print("Error generating data...")
print("Data Generation Complete.")
| en | 0.539219 | generate_data.py Updated: 3/29/18 This script is used to generate pairwise distance matricies used for convolutional neural network training. The script will store representations in npz files within a /pairwise_data/ subdirectory. This script is used specifically to generate data used for CASP experiments. # Data generation parameters # Path to data folder ################################################################################ # Static Parameters # Chain Id might need to be changed for PDBs missing identifier # For random distribution of tasks using MPI Method parses atomic coordinate data from PDB. Params: path - str; PDB file path chain - str; chain identifier Returns: data - np.array; PDB data # Parse residue, atom type and atomic coordinates Method bins pairwise distances of residue alpha carbons into 2D data grids. Params: protein_data - np.array; pairwise_distance_bins - list; list of bins used to bin pairwise distances Returns: binned_pairwise - np.array; # Get alpha carbons # Pairwise distances # Bin pairwise distances # Set paths relative to this file # MPI init # MPI task distribution # Search for data directories # Shuffle for random distribution # Broadcast tasks to all nodes and select tasks according to rank # Parse PDB # Bin pairwise distances # Save data | 2.457714 | 2 |
apps/wedding/models.py | andyzsf/django-blog | 0 | 6613597 | <gh_stars>0
# -*- coding: utf-8 -*-
from django.db import models
# Create your models here.
class Comment(models.Model):
name = models.CharField(max_length=20)
phone = models.CharField(max_length=11)
body = models.CharField(max_length=2000, blank=True)
def __str__(self):
return ":".join([self.name, self.body[:100]])
| # -*- coding: utf-8 -*-
from django.db import models
# Create your models here.
class Comment(models.Model):
name = models.CharField(max_length=20)
phone = models.CharField(max_length=11)
body = models.CharField(max_length=2000, blank=True)
def __str__(self):
return ":".join([self.name, self.body[:100]]) | en | 0.937712 | # -*- coding: utf-8 -*- # Create your models here. | 2.508555 | 3 |
generator/base/templates/src/settings.py | codotype/codotype-python-falcon-mongodb-generator | 0 | 6613598 | DEBUG = True
MONGO = {
'DATABASE': 'database-1',
'HOST': 'localhost',
'PORT': 27017,
'USERNAME': '',
'PASSWORD': ''
}
| DEBUG = True
MONGO = {
'DATABASE': 'database-1',
'HOST': 'localhost',
'PORT': 27017,
'USERNAME': '',
'PASSWORD': ''
}
| none | 1 | 1.406922 | 1 | |
vee/pipeline/rpm.py | immersionroom/vee | 6 | 6613599 | <filename>vee/pipeline/rpm.py
import os
import re
from vee import log
from vee.cli import style, style_note
from vee.pipeline.base import PipelineStep
from vee.subproc import call
from vee.utils import cached_property
from vee.exceptions import AlreadyInstalled, PipelineError
_installed_packages = set()
class RPMChecker(PipelineStep):
factory_priority = 1000
@cached_property
def installed_packages(self):
if _installed_packages:
return _installed_packages
packages = _installed_packages
out = call(['rpm', '-qa'], stdout=True)
for line in out.splitlines():
line = line.strip().lower()
if not line:
continue
packages.add(line)
chunks = line.split('-')
for i in range(1, len(chunks)):
packages.add('-'.join(chunks[:i]))
chunks = line.split('.')
for i in range(1, len(chunks)):
packages.add('.'.join(chunks[:i]))
return packages
@classmethod
def factory(cls, step, pkg):
if step == 'init' and re.match(r'^rpm:', pkg.url):
return cls()
def get_next(self, step, pkg):
return self
def init(self, pkg):
# Signal that we should not be persisted to the database.
pkg.virtual = True
def fetch(self, pkg):
if pkg.name.lower() not in self.installed_packages:
raise PipelineError('rpm package "%s" is not installed.' % pkg.name)
raise AlreadyInstalled()
def inspect(self, pkg):
pass
def extract(self, pkg):
pass
def build(self, pkg):
pass
def install(self, pkg):
pass
def optlink(self, pkg):
pass
def relocate(self, pkg):
pass
| <filename>vee/pipeline/rpm.py
import os
import re
from vee import log
from vee.cli import style, style_note
from vee.pipeline.base import PipelineStep
from vee.subproc import call
from vee.utils import cached_property
from vee.exceptions import AlreadyInstalled, PipelineError
_installed_packages = set()
class RPMChecker(PipelineStep):
factory_priority = 1000
@cached_property
def installed_packages(self):
if _installed_packages:
return _installed_packages
packages = _installed_packages
out = call(['rpm', '-qa'], stdout=True)
for line in out.splitlines():
line = line.strip().lower()
if not line:
continue
packages.add(line)
chunks = line.split('-')
for i in range(1, len(chunks)):
packages.add('-'.join(chunks[:i]))
chunks = line.split('.')
for i in range(1, len(chunks)):
packages.add('.'.join(chunks[:i]))
return packages
@classmethod
def factory(cls, step, pkg):
if step == 'init' and re.match(r'^rpm:', pkg.url):
return cls()
def get_next(self, step, pkg):
return self
def init(self, pkg):
# Signal that we should not be persisted to the database.
pkg.virtual = True
def fetch(self, pkg):
if pkg.name.lower() not in self.installed_packages:
raise PipelineError('rpm package "%s" is not installed.' % pkg.name)
raise AlreadyInstalled()
def inspect(self, pkg):
pass
def extract(self, pkg):
pass
def build(self, pkg):
pass
def install(self, pkg):
pass
def optlink(self, pkg):
pass
def relocate(self, pkg):
pass
| en | 0.89499 | # Signal that we should not be persisted to the database. | 2.074116 | 2 |
draw_natural_training.py | ziqizh/cifar10_challenge | 0 | 6613600 | <filename>draw_natural_training.py
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import argparse
parser = argparse.ArgumentParser(description='CIFAR ACCURACY')
parser.add_argument('--path', default='natural-training-log.txt',
help='model name.')
args = parser.parse_args()
# log_file = open(args.log_path, 'w')
if __name__ == '__main__':
plt.switch_backend('agg')
log1 = open(args.path)
label1 = "Natral"
data1 = []
log_lines1 = log1.readlines()
for i in range(len(log_lines1)):
data1.append([eval(j) for j in log_lines1[i].split(' ')])
print(len(data1))
x = np.array([i[0] for i in data1]) + 1
nat_acc1 = np.array([i[1] for i in data1])
current_palette = sns.color_palette()
plt.plot(x, nat_acc1, color=current_palette[0], lw=2, label=label1)
plt.xlabel("Training iterations", fontsize=15)
plt.ylabel("Accuracy", fontsize=15)
plt.tick_params(labelsize=10)
plt.legend(fontsize='x-large')
plt.savefig('data-pic/natural-training.png')
| <filename>draw_natural_training.py
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import argparse
parser = argparse.ArgumentParser(description='CIFAR ACCURACY')
parser.add_argument('--path', default='natural-training-log.txt',
help='model name.')
args = parser.parse_args()
# log_file = open(args.log_path, 'w')
if __name__ == '__main__':
plt.switch_backend('agg')
log1 = open(args.path)
label1 = "Natral"
data1 = []
log_lines1 = log1.readlines()
for i in range(len(log_lines1)):
data1.append([eval(j) for j in log_lines1[i].split(' ')])
print(len(data1))
x = np.array([i[0] for i in data1]) + 1
nat_acc1 = np.array([i[1] for i in data1])
current_palette = sns.color_palette()
plt.plot(x, nat_acc1, color=current_palette[0], lw=2, label=label1)
plt.xlabel("Training iterations", fontsize=15)
plt.ylabel("Accuracy", fontsize=15)
plt.tick_params(labelsize=10)
plt.legend(fontsize='x-large')
plt.savefig('data-pic/natural-training.png')
| en | 0.689498 | # log_file = open(args.log_path, 'w') | 2.793746 | 3 |
plot_drugs_month_temp.py | rionbr/ddi-blumenau | 1 | 6613601 | # coding=utf-8
# Author: <NAME>
# Date: Nov 16, 2014
#
# Description: Plot DDI timelines
#
#
# coding=utf-8
import matplotlib as mpl
import matplotlib.style
mpl.style.use('classic')
mpl.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.dates import MonthLocator, WeekdayLocator, DateFormatter
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import util
from datetime import datetime
# Plot Styles
styles = ['r-o','g-o','b-o','c-o','m-o', 'r-s','g-s','b-s','c-s','m-s', 'r^','g^','b^','c^','m^']
months = ['Jan','April','Jul','Oct','Jan','April','Jul']
#
# Load CSVs
#
df_file = 'data/dumpsql_final.csv'
df = pd.read_csv(df_file, encoding='utf-8', parse_dates=['date_disp'], nrows=None, dtype={'id_usuario':np.int64})
#dfu, dfc, dfi, dfs = util.dfUsersInteractionsSummary()
df['qt_drugs'] = 1
print df.head()
# Load Clima
dfClima = util.dfBnuClima()
dfClima = pd.concat([dfClima, dfClima , dfClima ])
print dfClima
dfClima['date'] = pd.date_range(start='2013-01-01', end='2015-12-31', freq='MS')
dfClima = dfClima.set_index('date')
print '>> dfClima'
print dfClima
#
# Plot Timelines of DDI
#
print '--- Grouping Month-Dispensed (Month) ---'
dfg = df.groupby(pd.Grouper(key='date_disp', freq='MS')).agg(
{
'qt_drugs':'sum'
})
print dfg.head()
# Transform in Thousands
dfg['qt_drugs'] = dfg['qt_drugs'] / 1000.
# Remove
#dfsg = dfsg.loc[ ~dfsg.index.isin(['2015-07','2015-08']), : ]
#
# Plot
#
print '- Plotting -'
#fig = plt.figure(figsize=(10,4))
fig = plt.figure(figsize=(5.5,3))
ax = plt.subplot(1, 1 ,1)
plt.rc('font', size=12)
plt.rc('legend', fontsize=10)
plt.rc('legend', numpoints=1)
ax.set_title('Drug intervals dispensed')
ax.plot(dfg.loc[:,:].index , dfg.loc[:,'qt_drugs'].values, label='Dispensed', c='green', ls='-', marker='o', markersize=8, zorder=99)
ax.tick_params(axis='both', which='major')
ax.tick_params(axis='both', which='minor', labelsize=0)
ax.grid(which='major')
ax.set_ylabel(r'$\alpha$ (in thousands)')
months_maj = MonthLocator(range(1, 13), bymonthday=1, interval=4)
months_min = MonthLocator(range(1, 13), bymonthday=1, interval=1)
months_maj_fmt = DateFormatter("%b %y")
ax.xaxis.set_major_locator(months_maj)
ax.xaxis.set_major_formatter(months_maj_fmt)
ax.xaxis.set_minor_locator(months_min)
ax.set_xlim(datetime(2013,12,15),datetime(2015,07,01))
#ax.set_ylim(50,115)
#
axb = ax.twinx()
axb.plot(dfClima.index.values, dfClima['temp_c_mean'].values, c='orange',ls='-', marker='', lw=4, alpha=0.6, zorder=5)
axb.fill_between(dfClima.index.values, dfClima['temp_c_min'].values, dfClima['temp_c_max'].values, facecolor='orange', linewidth=2, edgecolor='orange', alpha=.3, zorder=4)
axb.axvspan(datetime(2014,01,01), datetime(2014,06,30), facecolor='grey', alpha=0.3, zorder=1)
axb.set_ylabel('Temp. $^{\circ}$C')
axb.set_ylim(0,30)
axb.xaxis.set_major_locator(months_maj)
axb.xaxis.set_major_formatter(months_maj_fmt)
ax.set_zorder(axb.get_zorder()+1) #put ax in front of axb
ax.patch.set_visible(False) # hide the 'canvas'
def lagged_corr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas.Series objects of equal length
Returns
----------
crosscorr : float
"""
return datax.corr(datay.shift(lag))
print dfClima.loc[ '2014-06-01':'2015-06-30','temp_c_mean']
print dfg.loc['2014-06': , 'qt_drugs']
print 'AutoCorrelation:'
print 'Clima:' , dfClima.loc[ '2014-06-01':'2015-06-30','temp_c_mean'].autocorr(lag=1)
print 'QtDrugs:' , dfg.loc['2014-06': , 'qt_drugs'].autocorr(lag=1)
print 'Correlation:'
print dfClima.loc[ '2014-06-01':'2015-06-30','temp_c_mean'].corr(dfg.loc['2014-06':,'qt_drugs'])
print 'Lagged Correlation:'
print lagged_corr( dfClima.loc[ '2014-06-01':'2015-06-30','temp_c_mean'] , dfg.loc['2014-06':,'qt_drugs'] , lag=1)
print 'Export Plot File'
#plt.subplots_adjust(left=0.08, bottom=0.22, right=0.98, top=0.92, wspace=0.35, hspace=0.0)
plt.subplots_adjust(left=0.08, bottom=0.08, right=0.98, top=0.92, wspace=0.35, hspace=0.0)
plt.tight_layout()
plt.savefig('images/img-drugs-month-temp.pdf', dpi=300)
plt.close()
| # coding=utf-8
# Author: <NAME>
# Date: Nov 16, 2014
#
# Description: Plot DDI timelines
#
#
# coding=utf-8
import matplotlib as mpl
import matplotlib.style
mpl.style.use('classic')
mpl.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.dates import MonthLocator, WeekdayLocator, DateFormatter
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import util
from datetime import datetime
# Plot Styles
styles = ['r-o','g-o','b-o','c-o','m-o', 'r-s','g-s','b-s','c-s','m-s', 'r^','g^','b^','c^','m^']
months = ['Jan','April','Jul','Oct','Jan','April','Jul']
#
# Load CSVs
#
df_file = 'data/dumpsql_final.csv'
df = pd.read_csv(df_file, encoding='utf-8', parse_dates=['date_disp'], nrows=None, dtype={'id_usuario':np.int64})
#dfu, dfc, dfi, dfs = util.dfUsersInteractionsSummary()
df['qt_drugs'] = 1
print df.head()
# Load Clima
dfClima = util.dfBnuClima()
dfClima = pd.concat([dfClima, dfClima , dfClima ])
print dfClima
dfClima['date'] = pd.date_range(start='2013-01-01', end='2015-12-31', freq='MS')
dfClima = dfClima.set_index('date')
print '>> dfClima'
print dfClima
#
# Plot Timelines of DDI
#
print '--- Grouping Month-Dispensed (Month) ---'
dfg = df.groupby(pd.Grouper(key='date_disp', freq='MS')).agg(
{
'qt_drugs':'sum'
})
print dfg.head()
# Transform in Thousands
dfg['qt_drugs'] = dfg['qt_drugs'] / 1000.
# Remove
#dfsg = dfsg.loc[ ~dfsg.index.isin(['2015-07','2015-08']), : ]
#
# Plot
#
print '- Plotting -'
#fig = plt.figure(figsize=(10,4))
fig = plt.figure(figsize=(5.5,3))
ax = plt.subplot(1, 1 ,1)
plt.rc('font', size=12)
plt.rc('legend', fontsize=10)
plt.rc('legend', numpoints=1)
ax.set_title('Drug intervals dispensed')
ax.plot(dfg.loc[:,:].index , dfg.loc[:,'qt_drugs'].values, label='Dispensed', c='green', ls='-', marker='o', markersize=8, zorder=99)
ax.tick_params(axis='both', which='major')
ax.tick_params(axis='both', which='minor', labelsize=0)
ax.grid(which='major')
ax.set_ylabel(r'$\alpha$ (in thousands)')
months_maj = MonthLocator(range(1, 13), bymonthday=1, interval=4)
months_min = MonthLocator(range(1, 13), bymonthday=1, interval=1)
months_maj_fmt = DateFormatter("%b %y")
ax.xaxis.set_major_locator(months_maj)
ax.xaxis.set_major_formatter(months_maj_fmt)
ax.xaxis.set_minor_locator(months_min)
ax.set_xlim(datetime(2013,12,15),datetime(2015,07,01))
#ax.set_ylim(50,115)
#
axb = ax.twinx()
axb.plot(dfClima.index.values, dfClima['temp_c_mean'].values, c='orange',ls='-', marker='', lw=4, alpha=0.6, zorder=5)
axb.fill_between(dfClima.index.values, dfClima['temp_c_min'].values, dfClima['temp_c_max'].values, facecolor='orange', linewidth=2, edgecolor='orange', alpha=.3, zorder=4)
axb.axvspan(datetime(2014,01,01), datetime(2014,06,30), facecolor='grey', alpha=0.3, zorder=1)
axb.set_ylabel('Temp. $^{\circ}$C')
axb.set_ylim(0,30)
axb.xaxis.set_major_locator(months_maj)
axb.xaxis.set_major_formatter(months_maj_fmt)
ax.set_zorder(axb.get_zorder()+1) #put ax in front of axb
ax.patch.set_visible(False) # hide the 'canvas'
def lagged_corr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas.Series objects of equal length
Returns
----------
crosscorr : float
"""
return datax.corr(datay.shift(lag))
print dfClima.loc[ '2014-06-01':'2015-06-30','temp_c_mean']
print dfg.loc['2014-06': , 'qt_drugs']
print 'AutoCorrelation:'
print 'Clima:' , dfClima.loc[ '2014-06-01':'2015-06-30','temp_c_mean'].autocorr(lag=1)
print 'QtDrugs:' , dfg.loc['2014-06': , 'qt_drugs'].autocorr(lag=1)
print 'Correlation:'
print dfClima.loc[ '2014-06-01':'2015-06-30','temp_c_mean'].corr(dfg.loc['2014-06':,'qt_drugs'])
print 'Lagged Correlation:'
print lagged_corr( dfClima.loc[ '2014-06-01':'2015-06-30','temp_c_mean'] , dfg.loc['2014-06':,'qt_drugs'] , lag=1)
print 'Export Plot File'
#plt.subplots_adjust(left=0.08, bottom=0.22, right=0.98, top=0.92, wspace=0.35, hspace=0.0)
plt.subplots_adjust(left=0.08, bottom=0.08, right=0.98, top=0.92, wspace=0.35, hspace=0.0)
plt.tight_layout()
plt.savefig('images/img-drugs-month-temp.pdf', dpi=300)
plt.close()
| en | 0.395148 | # coding=utf-8 # Author: <NAME> # Date: Nov 16, 2014 # # Description: Plot DDI timelines # # # coding=utf-8 # Plot Styles # # Load CSVs # #dfu, dfc, dfi, dfs = util.dfUsersInteractionsSummary() # Load Clima # # Plot Timelines of DDI # # Transform in Thousands # Remove #dfsg = dfsg.loc[ ~dfsg.index.isin(['2015-07','2015-08']), : ] # # Plot # #fig = plt.figure(figsize=(10,4)) #ax.set_ylim(50,115) # #put ax in front of axb # hide the 'canvas' Lag-N cross correlation. Parameters ---------- lag : int, default 0 datax, datay : pandas.Series objects of equal length Returns ---------- crosscorr : float #plt.subplots_adjust(left=0.08, bottom=0.22, right=0.98, top=0.92, wspace=0.35, hspace=0.0) | 2.310635 | 2 |
Privateers.py | lwwiley17/gamescraper | 0 | 6613602 | <gh_stars>0
from bs4 import BeautifulSoup
import requests
import csv
#Use this line for individual games
link = input("Provide a link to the game you want to break down: ")
source = requests.get(link).text
#Use this line for entire seasons
#TO BE COMPLETED
soup = BeautifulSoup(source, 'lxml')
name = soup.find("title").text.strip()
name = name.replace('/','.')
game = soup.find(id="play-by-play")
with open(f"{name}.csv", "w", newline="") as f:
thewriter = csv.writer(f)
count = 0
for table in game.find_all('table'):
for row in table.find_all('tr'):
temp = []
for data in row.find_all('td'):
temp.append(data.text.strip())
if len(temp) > 0:
thewriter.writerow(temp) | from bs4 import BeautifulSoup
import requests
import csv
#Use this line for individual games
link = input("Provide a link to the game you want to break down: ")
source = requests.get(link).text
#Use this line for entire seasons
#TO BE COMPLETED
soup = BeautifulSoup(source, 'lxml')
name = soup.find("title").text.strip()
name = name.replace('/','.')
game = soup.find(id="play-by-play")
with open(f"{name}.csv", "w", newline="") as f:
thewriter = csv.writer(f)
count = 0
for table in game.find_all('table'):
for row in table.find_all('tr'):
temp = []
for data in row.find_all('td'):
temp.append(data.text.strip())
if len(temp) > 0:
thewriter.writerow(temp) | en | 0.852491 | #Use this line for individual games #Use this line for entire seasons #TO BE COMPLETED | 3.135441 | 3 |
src/test_code.py | skarifahmed/FGrade | 2 | 6613603 | # -*- coding: utf-8 -*-
"""
Created on Fri May 15 06:39:17 2020
@author: Sikha
"""
import cv2
from keras.models import model_from_json
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import numpy as np
from keras.utils import to_categorical
import os
import glob
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
array=[]
path_image='Test_10/'
def Read_data(path_image):
array=[]
label=[]
folder=os.listdir(path_image)
for j,out_folder in enumerate(folder):
image_path=os.path.join(path_image,out_folder)
image_list=glob.glob(image_path+'/*.jpg')
for i,image in enumerate(image_list):
img=cv2.imread(image)
resize_image=cv2.resize(img,(224,224))
array.append(resize_image)
label.append(int(j))
x_test=np.asarray(array,dtype='float32')/255.0
y_test=np.asarray(label,dtype='int')
# x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.10,random_state=4)
yy_test=y_test.copy()
y_test=to_categorical(y_test)
return x_test,y_test,yy_test
##Read the single image
#def single_image():
# img = cv2.imread(r'C:\Users\Sikha\Desktop\16.jpg')
# resized_img=cv2.resize(img,(128,128))
# array.append(resized_img)
# x=np.asarray(array,dtype='float32')/255.0
# return x
# load json and create model
def reload():
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("ckpt.h5")
loaded_model.summary()
return loaded_model
#check the accuracy
def Score():
X_test,Y_test,YY_test=Read_data(path_image)
loaded_model=reload()
Y_pred=loaded_model.predict(X_test).argmax(axis=1)
score=accuracy_score(YY_test,Y_pred)
print("Classification Reports:\n",classification_report(Y_test,Y_pred))
print('Accuracy=',score)
con_matrix=confusion_matrix(YY_test,Y_pred)
print('Confusion Matrix:\n',con_matrix)
Score()
| # -*- coding: utf-8 -*-
"""
Created on Fri May 15 06:39:17 2020
@author: Sikha
"""
import cv2
from keras.models import model_from_json
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import numpy as np
from keras.utils import to_categorical
import os
import glob
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
array=[]
path_image='Test_10/'
def Read_data(path_image):
array=[]
label=[]
folder=os.listdir(path_image)
for j,out_folder in enumerate(folder):
image_path=os.path.join(path_image,out_folder)
image_list=glob.glob(image_path+'/*.jpg')
for i,image in enumerate(image_list):
img=cv2.imread(image)
resize_image=cv2.resize(img,(224,224))
array.append(resize_image)
label.append(int(j))
x_test=np.asarray(array,dtype='float32')/255.0
y_test=np.asarray(label,dtype='int')
# x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.10,random_state=4)
yy_test=y_test.copy()
y_test=to_categorical(y_test)
return x_test,y_test,yy_test
##Read the single image
#def single_image():
# img = cv2.imread(r'C:\Users\Sikha\Desktop\16.jpg')
# resized_img=cv2.resize(img,(128,128))
# array.append(resized_img)
# x=np.asarray(array,dtype='float32')/255.0
# return x
# load json and create model
def reload():
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("ckpt.h5")
loaded_model.summary()
return loaded_model
#check the accuracy
def Score():
X_test,Y_test,YY_test=Read_data(path_image)
loaded_model=reload()
Y_pred=loaded_model.predict(X_test).argmax(axis=1)
score=accuracy_score(YY_test,Y_pred)
print("Classification Reports:\n",classification_report(Y_test,Y_pred))
print('Accuracy=',score)
con_matrix=confusion_matrix(YY_test,Y_pred)
print('Confusion Matrix:\n',con_matrix)
Score()
| en | 0.523557 | # -*- coding: utf-8 -*- Created on Fri May 15 06:39:17 2020 @author: Sikha # x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.10,random_state=4) ##Read the single image #def single_image(): # img = cv2.imread(r'C:\Users\Sikha\Desktop\16.jpg') # resized_img=cv2.resize(img,(128,128)) # array.append(resized_img) # x=np.asarray(array,dtype='float32')/255.0 # return x # load json and create model #check the accuracy | 2.753757 | 3 |
webapp.py | Knudah/Keggviewer | 1 | 6613604 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import urllib
from flask import Flask, request, url_for, redirect, render_template, flash
app = Flask(__name__)
app.config.update(dict(
DEBUG=True,
SECRET_KEY=os.urandom(24),
USERNAME='admin',
PASSWORD='<PASSWORD>'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
socket = urllib.urlopen("http://rest.kegg.jp/list/pathway/hsa")
htmlSource = socket.read()
socket.close()
pathways = re.findall('path:((?:.)*?) ', htmlSource)
# numberofpathways = len(pathways)
pathwayname = re.findall('(?: ).*', htmlSource)
for line, i in enumerate(pathwayname):
pathwayname[line] = pathwayname[line].strip("\t")
@app.route("/", methods=['GET', 'POST'])
def home():
if request.method == 'GET':
return render_template('index.html', pathways=pathways, pathwayname=pathwayname)
else:
path = request.form['path']
return redirect(url_for('preview', path=path))
@app.route("/<string:path>", methods=['GET', 'POST'])
def preview(path):
if request.method == 'GET':
return render_template('view.html', pathways=pathways, path=path, pathname=pathwayname[pathways.index(path)])
else:
path = request.form['path']
return redirect(url_for('preview', path=path))
@app.errorhandler(404)
def not_found(error):
flash('404 - Page not found!')
return redirect(url_for('home'))
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port) | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import urllib
from flask import Flask, request, url_for, redirect, render_template, flash
app = Flask(__name__)
app.config.update(dict(
DEBUG=True,
SECRET_KEY=os.urandom(24),
USERNAME='admin',
PASSWORD='<PASSWORD>'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
socket = urllib.urlopen("http://rest.kegg.jp/list/pathway/hsa")
htmlSource = socket.read()
socket.close()
pathways = re.findall('path:((?:.)*?) ', htmlSource)
# numberofpathways = len(pathways)
pathwayname = re.findall('(?: ).*', htmlSource)
for line, i in enumerate(pathwayname):
pathwayname[line] = pathwayname[line].strip("\t")
@app.route("/", methods=['GET', 'POST'])
def home():
if request.method == 'GET':
return render_template('index.html', pathways=pathways, pathwayname=pathwayname)
else:
path = request.form['path']
return redirect(url_for('preview', path=path))
@app.route("/<string:path>", methods=['GET', 'POST'])
def preview(path):
if request.method == 'GET':
return render_template('view.html', pathways=pathways, path=path, pathname=pathwayname[pathways.index(path)])
else:
path = request.form['path']
return redirect(url_for('preview', path=path))
@app.errorhandler(404)
def not_found(error):
flash('404 - Page not found!')
return redirect(url_for('home'))
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port) | en | 0.465366 | #!/usr/bin/env python # -*- coding: utf-8 -*- # numberofpathways = len(pathways) | 2.471611 | 2 |
unittest_reinvent/scaffoldfilter_tests/__init__.py | fujirock/Reinvent | 4 | 6613605 | <gh_stars>1-10
from unittest_reinvent.scaffoldfilter_tests.test_no_filter import *
from unittest_reinvent.scaffoldfilter_tests.test_murcko_scaffold_filter import * | from unittest_reinvent.scaffoldfilter_tests.test_no_filter import *
from unittest_reinvent.scaffoldfilter_tests.test_murcko_scaffold_filter import * | none | 1 | 1.11213 | 1 | |
Atto_test.py | ScopeFoundry/HW_attocube_ecc100 | 0 | 6613606 | '''
Created on Jul 30, 2014
@author: Frank
'''
from __future__ import absolute_import
import time
print 'atto test here'
try:
from .attocube_ecc100 import AttoCubeECC100
except Exception as err:
print "could not load modules needed for AttoCubeECC100:", err
import winsound
X_AXIS = 0
Y_AXIS = 1
def beep( msec = 100 ):
print chr(7),
Freq = 2000 # Set Frequency To 2500 Hertz
winsound.Beep(Freq,msec)
def setup():
ecc = AttoCubeECC100()
ecc.enable_axis(X_AXIS, enable=True)
ecc.enable_axis(Y_AXIS, enable=True)
return ecc
def set_x( x, loop = 50, delay = 0.05, reset = -5):
#ecc.write_target_position_axis(X_AXIS,reset)
#time.sleep(delay)
ecc.write_target_position_axis(X_AXIS,x)
print 'set position ', x
pos = 0
beep()
for i in range(loop):
pos += ecc.read_position_axis(X_AXIS)
time.sleep(delay)
#print i, pos
pos /= float(loop)
print 'mean position ', pos
return pos
delta = 2
count = 8
wait = 20
ecc = setup()
for i in range(count ):
set_x( i*delta, wait )
set_x(0)
for i in range(count/2):
set_x(delta,wait)
set_x(0,wait)
ecc.close()
# clean up hardware object
if __name__ == '__main__':
pass | '''
Created on Jul 30, 2014
@author: Frank
'''
from __future__ import absolute_import
import time
print 'atto test here'
try:
from .attocube_ecc100 import AttoCubeECC100
except Exception as err:
print "could not load modules needed for AttoCubeECC100:", err
import winsound
X_AXIS = 0
Y_AXIS = 1
def beep( msec = 100 ):
print chr(7),
Freq = 2000 # Set Frequency To 2500 Hertz
winsound.Beep(Freq,msec)
def setup():
ecc = AttoCubeECC100()
ecc.enable_axis(X_AXIS, enable=True)
ecc.enable_axis(Y_AXIS, enable=True)
return ecc
def set_x( x, loop = 50, delay = 0.05, reset = -5):
#ecc.write_target_position_axis(X_AXIS,reset)
#time.sleep(delay)
ecc.write_target_position_axis(X_AXIS,x)
print 'set position ', x
pos = 0
beep()
for i in range(loop):
pos += ecc.read_position_axis(X_AXIS)
time.sleep(delay)
#print i, pos
pos /= float(loop)
print 'mean position ', pos
return pos
delta = 2
count = 8
wait = 20
ecc = setup()
for i in range(count ):
set_x( i*delta, wait )
set_x(0)
for i in range(count/2):
set_x(delta,wait)
set_x(0,wait)
ecc.close()
# clean up hardware object
if __name__ == '__main__':
pass | en | 0.594531 | Created on Jul 30, 2014 @author: Frank # Set Frequency To 2500 Hertz #ecc.write_target_position_axis(X_AXIS,reset) #time.sleep(delay) #print i, pos # clean up hardware object | 2.110116 | 2 |
drains/__init__.py | fmarani/drains | 0 | 6613607 | <filename>drains/__init__.py<gh_stars>0
"""
Drains is an ASGI middleware for Server sent events backed by Redis streams
"""
import asyncio
import logging
import aioredis
__version__ = "0.1.2"
logger = logging.getLogger(__name__)
async def ssend_async(stream, *, event, data=None):
logger.info("sending event to %s", stream)
fields = {b"event": event}
if data:
fields[b"data"] = data
redis = await aioredis.create_redis("redis://localhost")
result = await redis.xadd(stream, fields)
redis.close()
await redis.wait_closed()
def ssend(*args, **kwargs):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(ssend_async(*args, **kwargs))
| <filename>drains/__init__.py<gh_stars>0
"""
Drains is an ASGI middleware for Server sent events backed by Redis streams
"""
import asyncio
import logging
import aioredis
__version__ = "0.1.2"
logger = logging.getLogger(__name__)
async def ssend_async(stream, *, event, data=None):
logger.info("sending event to %s", stream)
fields = {b"event": event}
if data:
fields[b"data"] = data
redis = await aioredis.create_redis("redis://localhost")
result = await redis.xadd(stream, fields)
redis.close()
await redis.wait_closed()
def ssend(*args, **kwargs):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(ssend_async(*args, **kwargs))
| en | 0.928808 | Drains is an ASGI middleware for Server sent events backed by Redis streams | 2.463617 | 2 |
moviecreator/create_movie.py | rhuygen/movie_creator | 1 | 6613608 | """Create a movie from a list of images."""
import imageio
import glob
import argparse
from skimage.transform import resize
from skimage.util import img_as_ubyte
from rich.console import Console
def create_movie(video_name, video_format, fn_glob, *, shape, loop, noresize, fps):
"""
Create an MP4 movie from the PNG images
All images need to be the same size. Therefore they will be resized. If the
shape argument is not given, all images will be resized to the shape of the first image.
Please note the image shape is a tuple with three values (x-size, y-size, depth=4).
The image files in 'fn_glob' will be sorted by name.
Args:
video_name (str): The name of the output video. The format is MP4.
video_format (str): FFMPEG or MP4
fn_glob (str): a filename glob [default='*.png']
shape (tuple): the required shape of the images
loop (int): number of times to repeat the sequence of images
fps (int): the number of frames per second
"""
images = []
for img in sorted(glob.glob(fn_glob)):
image = imageio.imread(img)
if not shape:
shape = image.shape
else:
if not noresize:
image = img_as_ubyte(resize(image, shape, anti_aliasing=True))
if verbose > 1:
console.print(f"{img}, {type(image)}, {image.shape=}")
images.append(image)
if verbose:
console.print(f"Number of original images: {len(images)}")
all_images = []
for _ in range(loop):
all_images.extend(images)
if verbose:
console.print(f"Number of concatenated images: {len(all_images)}")
if video_format.lower() == "ffmpeg":
kwargs = {'fps': fps, 'pixelformat': 'yuv420p'}
imageio.mimwrite(video_name, all_images, 'FFMPEG', **kwargs)
else:
kwargs = {}
imageio.mimwrite(video_name, all_images, 'MP4', **kwargs)
def parse_arguments():
"""
Prepare the arguments that are specific for this application.
"""
parser = argparse.ArgumentParser(
prog="create_movie",
description=(
"Create an MP4 movie from the given PNG image files.\n\n"
"Note that color images can be easily converted to grayscale if you set the \n"
"last element of shape to 1."
),
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
"--verbose", "-v",
action="count",
default=0,
help=("Print verbose messages. "
"If this option is specified multiple times, output will be more verbose.")
)
parser.add_argument(
"--video-name",
required=True,
type=str, default="output.mp4",
help="The name of the output video [default='output.mp4'].",
)
parser.add_argument(
"--video-format",
type=str, default="FFMPEG",
help="The format of the output video.",
)
parser.add_argument(
"--files",
required=True,
type=str, default="*.png",
help="A file glob [default='*.png']. Should be put in single quotes.",
)
parser.add_argument(
"--shape",
type=str, default=None,
help="The required shape to which the images will be resized, e.g. '(2186, 3496, 4)'.",
)
parser.add_argument(
"--fps",
type=int, default=20,
help="The number of frames per second [default=20].",
)
parser.add_argument(
"--loop",
type=int, default=1,
help="The number of times the video has to loop over all the frames [default=1].",
)
parser.add_argument(
"--noresize", "--no-resize",
action="store_true",
help="Don't resize if all images already have the same size.",
)
arguments = parser.parse_args()
if arguments.shape:
shape = arguments.shape
if not (shape.startswith('(') and shape.endswith(')')):
parser.error("--shape must be a tuple, i.e. (width, height, depth).")
shape = shape[1:-1].split(',')
if not (len(shape) == 2 or len(shape) == 3):
parser.error("--shape must be a tuple, i.e. (width, height, depth).")
shape = tuple(int(x) for x in shape)
arguments.shape = shape
return arguments
def main():
global verbose
args = parse_arguments()
verbose = args.verbose
create_movie(args.video_name, args.video_format, args.files,
shape=args.shape, loop=args.loop, noresize=args.noresize, fps=args.fps)
console = Console()
verbose = 0
if __name__ == "__main__":
main()
| """Create a movie from a list of images."""
import imageio
import glob
import argparse
from skimage.transform import resize
from skimage.util import img_as_ubyte
from rich.console import Console
def create_movie(video_name, video_format, fn_glob, *, shape, loop, noresize, fps):
"""
Create an MP4 movie from the PNG images
All images need to be the same size. Therefore they will be resized. If the
shape argument is not given, all images will be resized to the shape of the first image.
Please note the image shape is a tuple with three values (x-size, y-size, depth=4).
The image files in 'fn_glob' will be sorted by name.
Args:
video_name (str): The name of the output video. The format is MP4.
video_format (str): FFMPEG or MP4
fn_glob (str): a filename glob [default='*.png']
shape (tuple): the required shape of the images
loop (int): number of times to repeat the sequence of images
fps (int): the number of frames per second
"""
images = []
for img in sorted(glob.glob(fn_glob)):
image = imageio.imread(img)
if not shape:
shape = image.shape
else:
if not noresize:
image = img_as_ubyte(resize(image, shape, anti_aliasing=True))
if verbose > 1:
console.print(f"{img}, {type(image)}, {image.shape=}")
images.append(image)
if verbose:
console.print(f"Number of original images: {len(images)}")
all_images = []
for _ in range(loop):
all_images.extend(images)
if verbose:
console.print(f"Number of concatenated images: {len(all_images)}")
if video_format.lower() == "ffmpeg":
kwargs = {'fps': fps, 'pixelformat': 'yuv420p'}
imageio.mimwrite(video_name, all_images, 'FFMPEG', **kwargs)
else:
kwargs = {}
imageio.mimwrite(video_name, all_images, 'MP4', **kwargs)
def parse_arguments():
"""
Prepare the arguments that are specific for this application.
"""
parser = argparse.ArgumentParser(
prog="create_movie",
description=(
"Create an MP4 movie from the given PNG image files.\n\n"
"Note that color images can be easily converted to grayscale if you set the \n"
"last element of shape to 1."
),
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
"--verbose", "-v",
action="count",
default=0,
help=("Print verbose messages. "
"If this option is specified multiple times, output will be more verbose.")
)
parser.add_argument(
"--video-name",
required=True,
type=str, default="output.mp4",
help="The name of the output video [default='output.mp4'].",
)
parser.add_argument(
"--video-format",
type=str, default="FFMPEG",
help="The format of the output video.",
)
parser.add_argument(
"--files",
required=True,
type=str, default="*.png",
help="A file glob [default='*.png']. Should be put in single quotes.",
)
parser.add_argument(
"--shape",
type=str, default=None,
help="The required shape to which the images will be resized, e.g. '(2186, 3496, 4)'.",
)
parser.add_argument(
"--fps",
type=int, default=20,
help="The number of frames per second [default=20].",
)
parser.add_argument(
"--loop",
type=int, default=1,
help="The number of times the video has to loop over all the frames [default=1].",
)
parser.add_argument(
"--noresize", "--no-resize",
action="store_true",
help="Don't resize if all images already have the same size.",
)
arguments = parser.parse_args()
if arguments.shape:
shape = arguments.shape
if not (shape.startswith('(') and shape.endswith(')')):
parser.error("--shape must be a tuple, i.e. (width, height, depth).")
shape = shape[1:-1].split(',')
if not (len(shape) == 2 or len(shape) == 3):
parser.error("--shape must be a tuple, i.e. (width, height, depth).")
shape = tuple(int(x) for x in shape)
arguments.shape = shape
return arguments
def main():
global verbose
args = parse_arguments()
verbose = args.verbose
create_movie(args.video_name, args.video_format, args.files,
shape=args.shape, loop=args.loop, noresize=args.noresize, fps=args.fps)
console = Console()
verbose = 0
if __name__ == "__main__":
main()
| en | 0.737122 | Create a movie from a list of images. Create an MP4 movie from the PNG images All images need to be the same size. Therefore they will be resized. If the shape argument is not given, all images will be resized to the shape of the first image. Please note the image shape is a tuple with three values (x-size, y-size, depth=4). The image files in 'fn_glob' will be sorted by name. Args: video_name (str): The name of the output video. The format is MP4. video_format (str): FFMPEG or MP4 fn_glob (str): a filename glob [default='*.png'] shape (tuple): the required shape of the images loop (int): number of times to repeat the sequence of images fps (int): the number of frames per second Prepare the arguments that are specific for this application. | 3.411694 | 3 |
sxs/utilities/lvcnr/conversion.py | dongzesun/sxs | 8 | 6613609 | """Class and function to convert SXS data to LVC-NR format"""
class SimulationConverter(object):
class Log(object):
"""Object to replace `log` function that used global `history`
Instead of using a global `history` variable, just create an instance of this
class, and pass it around to any function that called the old `log` function.
Just like that function, this instance can be called with a string and will
print the string while storing all the strings passed to it.
Functions expecting an instance of this class can also use `print` as a default
argument, which will work the same, but not store the value.
"""
def __init__(self, quiet):
self.history = ""
self.quiet = quiet
def __call__(self, string):
if not self.quiet:
print(string)
self.history += string + "\n"
def __str__(self):
return str(self.history)
def __repr__(self):
return repr(self.history)
def __init__(self, modes=8, tolerance=1e-06, quiet=False):
"""Create an object to be used for converting many waveforms to LVC format
Parameters
----------
modes : {int, '22only'}, optional
Modes to be placed in the output file. Passing '22only' results in the
(2,2) and (2,-2) modes being output. Otherwise, each (l,m) mode up to and
including the given integer value will be output. Note that for backwards
compatibility, 'all' is also supported, and is equivalent to the default
value of `8`.
tolerance : float, optional
Target tolerance used in `sxs.utilities.greedy_spline.minimal_indices`.
quiet : bool, optional
If False (the default), echo each line of the log as it is created;
otherwise just store the final log in the output file.
"""
import os
import time
import json
import platform
import numpy
import scipy
import h5py
import sxs
self.modes = modes
self.tolerance = tolerance
self.quiet = quiet
self.code_versions = (
f"python=={platform.python_version()}\n"
f"numpy=={numpy.version.version}\n"
f"scipy=={scipy.version.full_version}\n"
f"h5py=={h5py.version.version}\n"
f"# h5py_api=={h5py.version.api_version}\n"
f"# h5py_hdf5=={h5py.version.hdf5_version}\n"
f"sxs=={sxs.__version__}\n"
)
self.command = (
f"sxs.utilities.lvcnr.convert_simulation(\n"
f" sxs_data_path={{sxs_data_path!r}},\n"
f" out_path={{out_path!r}},\n"
f" truncation_time={{truncation_time!r}},\n"
f" resolution={{resolution!r}},\n"
f" modes={modes!r},\n"
f" tolerance={tolerance!r},\n"
f" quiet={quiet!r}\n"
f")"
)
# Make sense of the `modes` parameter
if modes == 'all':
self.modes = [[l, m] for l in range(2, 9) for m in range(-l, l+1)]
elif modes == '22only':
self.modes = [[2, 2], [2, -2]]
else:
l_max = int(modes)
self.modes = [[l, m] for l in range(2, l_max+1) for m in range(-l, l+1)]
self.ell_max = max(lm[0] for lm in self.modes)
# Load catalog metadata
catalog = sxs.load("catalog")
self.sxs_catalog = {
'simulations': catalog.simulations,
'records': catalog.records,
}
self.sxs_catalog_resolutions = sxs.zenodo.catalog.resolutions_for_simulations(self.sxs_catalog)
def convert(self, sxs_data_path, out_path, truncation_time=None, resolution=None, truncation_tol=None):
"""Convert a simulation from the SXS BBH catalog into the LVC format.
This function outputs a file in LVC format named SXS_BBH_####_Res#.h5 in
out_path.
Parameters
----------
sxs_data_path : string
Path to directory containing rhOverM_Asymptotic_GeometricUnits_CoM.h5,
Horizons.h5, and metadata.json files.
out_path : string
Path where LVC-format file is to be output
truncation_time : {None, float}
If specified, truncate time series at this time instead of at the reference
time
resolution : {None, int}
Integer giving the resolution (Lev) of the data to convert. If this is not
given, the resolution is determined automatically from sxs_data_path.
truncation_tol : {None, bool, callable, float, array_like}, optional
If None (the default) or False, nothing happens. If True, the waveform
data (amplitude and phase) are "truncated" so that bits with significance
lower than `5e-2 * self.tolerance` are set to zero, for improved
compression. Any other input is passed to `sxs.TimeSeries.truncate`. Note
that this is not typically a very effective setting — perhaps providing
another 10% compression; the output file sizes are dominated by fairly
redundant time data unaffected by this parameter.
"""
import os
import time
import json
import h5py
import sxs
from .metadata import sxs_id_from_alt_names, write_metadata_from_sxs
from .horizons import horizon_splines_from_sxs, write_horizon_splines_from_sxs
from .waveforms import convert_modes
log = self.Log(self.quiet)
log(self.command.format(sxs_data_path=sxs_data_path, out_path=out_path,
truncation_time=truncation_time, resolution=resolution))
log("Starting at "+time.strftime('%H:%M%p %Z on %b %d, %Y'))
# Load metadata.json from this simulation
with open(os.path.join(sxs_data_path, "metadata.json"), 'r') as f:
metadata = json.load(f)
# Determine the resolution of the input simulation, if needed
if resolution is None:
resolution = sxs.lev_number(sxs_data_path)
if resolution is None:
raise ValueError('No `resolution` value found in input arguments or data path.')
sxs_id = sxs_id_from_alt_names(metadata['alternative_names'])
log("Converting " + sxs_id)
extrapolation_order = "Extrapolated_N2"
log("Extrapolation order: " + extrapolation_order)
out_name = out_path + "/" + sxs_id.replace(':', '_') + "_Res" + str(resolution) + ".h5"
log("Output filename is '{0}'".format(out_name))
start_time, peak_time, version_hist = convert_modes(
sxs_data_path + "/rhOverM_Asymptotic_GeometricUnits_CoM.h5",
metadata, out_name, self.modes, extrapolation_order, log,
truncation_time, tolerance=self.tolerance/2.0, truncation_tol=truncation_tol
)
with h5py.File(sxs_data_path + "/Horizons.h5", 'r') as horizons:
horizon_splines_to_write, t_A, t_B, t_C = horizon_splines_from_sxs(
horizons, start_time, peak_time, log, truncation_tol=truncation_tol
)
write_horizon_splines_from_sxs(out_name, horizon_splines_to_write, t_A, t_B, t_C, log)
write_metadata_from_sxs(out_name, resolution, metadata,
self.sxs_catalog, self.sxs_catalog_resolutions,
start_time, peak_time, self.ell_max, log)
with h5py.File(out_name, 'a') as out_file:
# Save information about versions of code used in this function
out_file["auxiliary-info"].create_dataset('CodeVersions.txt', data=self.code_versions)
# Copy VersionHist.ver into the new file, if available
if version_hist is not None:
log("Writing VersionHist.ver")
out_file["auxiliary-info"].create_dataset('VersionHist.ver', data=version_hist)
else:
log("No VersionHist.ver found. Data being converted is version 0.")
# Store the log output by this script as a dataset
log("Finishing at "+time.strftime('%H:%M%p %Z on %b %d, %Y'))
log("Writing log")
out_file["auxiliary-info"].create_dataset('ConversionLog.txt', data=log.history)
def convert_simulation(sxs_data_path, out_path, truncation_time=None, resolution=None,
modes=8, tolerance=1e-06, quiet=False):
"""Convert a simulation from the SXS BBH catalog into the LVC format.
This function outputs a file in LVC format named SXS_BBH_####_Res#.h5 in
out_path.
Note that this function is essentially a wrapper for
`SimulationConverter.convert`. If you have very many systems to convert, it is
significantly faster to create the SimulationConverter object once, and then
call the `convert` method for each system.
Parameters
----------
sxs_data_path : string
Path to directory containing rhOverM_Asymptotic_GeometricUnits_CoM.h5, Horizons.h5,
and metadata.json files.
out_path : string
Path where LVC format file is to be output
truncation_time : {None, float}, optional
If specified, truncate time series at this time instead of at the reference time
resolution : {None, int}, optional
Integer giving the resolution (Lev) of the data to convert. If this is not given,
the resolution is determined automatically from sxs_data_path.
modes : {int, '22only'}, optional
Modes to be placed in the output file. Passing '22only' results in the (2,2)
and (2,-2) modes being output. Otherwise, each (l,m) mode up to and including
the given l value will be output. Note that for backwards compatibility, 'all'
is also supported, and is equivalent to the default value of `8`.
tolerance : float, optional
Target tolerance used in `sxs.utilities.greedy_spline.minimal_indices`.
quiet : bool, optional
If False (the default), echo each line of the log as it is created; otherwise
just store the final log in the output file.
"""
lvc_converter = SimulationConverter(modes, tolerance, quiet)
return lvc_converter.convert(sxs_data_path, out_path, truncation_time, resolution)
| """Class and function to convert SXS data to LVC-NR format"""
class SimulationConverter(object):
class Log(object):
"""Object to replace `log` function that used global `history`
Instead of using a global `history` variable, just create an instance of this
class, and pass it around to any function that called the old `log` function.
Just like that function, this instance can be called with a string and will
print the string while storing all the strings passed to it.
Functions expecting an instance of this class can also use `print` as a default
argument, which will work the same, but not store the value.
"""
def __init__(self, quiet):
self.history = ""
self.quiet = quiet
def __call__(self, string):
if not self.quiet:
print(string)
self.history += string + "\n"
def __str__(self):
return str(self.history)
def __repr__(self):
return repr(self.history)
def __init__(self, modes=8, tolerance=1e-06, quiet=False):
"""Create an object to be used for converting many waveforms to LVC format
Parameters
----------
modes : {int, '22only'}, optional
Modes to be placed in the output file. Passing '22only' results in the
(2,2) and (2,-2) modes being output. Otherwise, each (l,m) mode up to and
including the given integer value will be output. Note that for backwards
compatibility, 'all' is also supported, and is equivalent to the default
value of `8`.
tolerance : float, optional
Target tolerance used in `sxs.utilities.greedy_spline.minimal_indices`.
quiet : bool, optional
If False (the default), echo each line of the log as it is created;
otherwise just store the final log in the output file.
"""
import os
import time
import json
import platform
import numpy
import scipy
import h5py
import sxs
self.modes = modes
self.tolerance = tolerance
self.quiet = quiet
self.code_versions = (
f"python=={platform.python_version()}\n"
f"numpy=={numpy.version.version}\n"
f"scipy=={scipy.version.full_version}\n"
f"h5py=={h5py.version.version}\n"
f"# h5py_api=={h5py.version.api_version}\n"
f"# h5py_hdf5=={h5py.version.hdf5_version}\n"
f"sxs=={sxs.__version__}\n"
)
self.command = (
f"sxs.utilities.lvcnr.convert_simulation(\n"
f" sxs_data_path={{sxs_data_path!r}},\n"
f" out_path={{out_path!r}},\n"
f" truncation_time={{truncation_time!r}},\n"
f" resolution={{resolution!r}},\n"
f" modes={modes!r},\n"
f" tolerance={tolerance!r},\n"
f" quiet={quiet!r}\n"
f")"
)
# Make sense of the `modes` parameter
if modes == 'all':
self.modes = [[l, m] for l in range(2, 9) for m in range(-l, l+1)]
elif modes == '22only':
self.modes = [[2, 2], [2, -2]]
else:
l_max = int(modes)
self.modes = [[l, m] for l in range(2, l_max+1) for m in range(-l, l+1)]
self.ell_max = max(lm[0] for lm in self.modes)
# Load catalog metadata
catalog = sxs.load("catalog")
self.sxs_catalog = {
'simulations': catalog.simulations,
'records': catalog.records,
}
self.sxs_catalog_resolutions = sxs.zenodo.catalog.resolutions_for_simulations(self.sxs_catalog)
def convert(self, sxs_data_path, out_path, truncation_time=None, resolution=None, truncation_tol=None):
"""Convert a simulation from the SXS BBH catalog into the LVC format.
This function outputs a file in LVC format named SXS_BBH_####_Res#.h5 in
out_path.
Parameters
----------
sxs_data_path : string
Path to directory containing rhOverM_Asymptotic_GeometricUnits_CoM.h5,
Horizons.h5, and metadata.json files.
out_path : string
Path where LVC-format file is to be output
truncation_time : {None, float}
If specified, truncate time series at this time instead of at the reference
time
resolution : {None, int}
Integer giving the resolution (Lev) of the data to convert. If this is not
given, the resolution is determined automatically from sxs_data_path.
truncation_tol : {None, bool, callable, float, array_like}, optional
If None (the default) or False, nothing happens. If True, the waveform
data (amplitude and phase) are "truncated" so that bits with significance
lower than `5e-2 * self.tolerance` are set to zero, for improved
compression. Any other input is passed to `sxs.TimeSeries.truncate`. Note
that this is not typically a very effective setting — perhaps providing
another 10% compression; the output file sizes are dominated by fairly
redundant time data unaffected by this parameter.
"""
import os
import time
import json
import h5py
import sxs
from .metadata import sxs_id_from_alt_names, write_metadata_from_sxs
from .horizons import horizon_splines_from_sxs, write_horizon_splines_from_sxs
from .waveforms import convert_modes
log = self.Log(self.quiet)
log(self.command.format(sxs_data_path=sxs_data_path, out_path=out_path,
truncation_time=truncation_time, resolution=resolution))
log("Starting at "+time.strftime('%H:%M%p %Z on %b %d, %Y'))
# Load metadata.json from this simulation
with open(os.path.join(sxs_data_path, "metadata.json"), 'r') as f:
metadata = json.load(f)
# Determine the resolution of the input simulation, if needed
if resolution is None:
resolution = sxs.lev_number(sxs_data_path)
if resolution is None:
raise ValueError('No `resolution` value found in input arguments or data path.')
sxs_id = sxs_id_from_alt_names(metadata['alternative_names'])
log("Converting " + sxs_id)
extrapolation_order = "Extrapolated_N2"
log("Extrapolation order: " + extrapolation_order)
out_name = out_path + "/" + sxs_id.replace(':', '_') + "_Res" + str(resolution) + ".h5"
log("Output filename is '{0}'".format(out_name))
start_time, peak_time, version_hist = convert_modes(
sxs_data_path + "/rhOverM_Asymptotic_GeometricUnits_CoM.h5",
metadata, out_name, self.modes, extrapolation_order, log,
truncation_time, tolerance=self.tolerance/2.0, truncation_tol=truncation_tol
)
with h5py.File(sxs_data_path + "/Horizons.h5", 'r') as horizons:
horizon_splines_to_write, t_A, t_B, t_C = horizon_splines_from_sxs(
horizons, start_time, peak_time, log, truncation_tol=truncation_tol
)
write_horizon_splines_from_sxs(out_name, horizon_splines_to_write, t_A, t_B, t_C, log)
write_metadata_from_sxs(out_name, resolution, metadata,
self.sxs_catalog, self.sxs_catalog_resolutions,
start_time, peak_time, self.ell_max, log)
with h5py.File(out_name, 'a') as out_file:
# Save information about versions of code used in this function
out_file["auxiliary-info"].create_dataset('CodeVersions.txt', data=self.code_versions)
# Copy VersionHist.ver into the new file, if available
if version_hist is not None:
log("Writing VersionHist.ver")
out_file["auxiliary-info"].create_dataset('VersionHist.ver', data=version_hist)
else:
log("No VersionHist.ver found. Data being converted is version 0.")
# Store the log output by this script as a dataset
log("Finishing at "+time.strftime('%H:%M%p %Z on %b %d, %Y'))
log("Writing log")
out_file["auxiliary-info"].create_dataset('ConversionLog.txt', data=log.history)
def convert_simulation(sxs_data_path, out_path, truncation_time=None, resolution=None,
modes=8, tolerance=1e-06, quiet=False):
"""Convert a simulation from the SXS BBH catalog into the LVC format.
This function outputs a file in LVC format named SXS_BBH_####_Res#.h5 in
out_path.
Note that this function is essentially a wrapper for
`SimulationConverter.convert`. If you have very many systems to convert, it is
significantly faster to create the SimulationConverter object once, and then
call the `convert` method for each system.
Parameters
----------
sxs_data_path : string
Path to directory containing rhOverM_Asymptotic_GeometricUnits_CoM.h5, Horizons.h5,
and metadata.json files.
out_path : string
Path where LVC format file is to be output
truncation_time : {None, float}, optional
If specified, truncate time series at this time instead of at the reference time
resolution : {None, int}, optional
Integer giving the resolution (Lev) of the data to convert. If this is not given,
the resolution is determined automatically from sxs_data_path.
modes : {int, '22only'}, optional
Modes to be placed in the output file. Passing '22only' results in the (2,2)
and (2,-2) modes being output. Otherwise, each (l,m) mode up to and including
the given l value will be output. Note that for backwards compatibility, 'all'
is also supported, and is equivalent to the default value of `8`.
tolerance : float, optional
Target tolerance used in `sxs.utilities.greedy_spline.minimal_indices`.
quiet : bool, optional
If False (the default), echo each line of the log as it is created; otherwise
just store the final log in the output file.
"""
lvc_converter = SimulationConverter(modes, tolerance, quiet)
return lvc_converter.convert(sxs_data_path, out_path, truncation_time, resolution)
| en | 0.757134 | Class and function to convert SXS data to LVC-NR format Object to replace `log` function that used global `history` Instead of using a global `history` variable, just create an instance of this class, and pass it around to any function that called the old `log` function. Just like that function, this instance can be called with a string and will print the string while storing all the strings passed to it. Functions expecting an instance of this class can also use `print` as a default argument, which will work the same, but not store the value. Create an object to be used for converting many waveforms to LVC format Parameters ---------- modes : {int, '22only'}, optional Modes to be placed in the output file. Passing '22only' results in the (2,2) and (2,-2) modes being output. Otherwise, each (l,m) mode up to and including the given integer value will be output. Note that for backwards compatibility, 'all' is also supported, and is equivalent to the default value of `8`. tolerance : float, optional Target tolerance used in `sxs.utilities.greedy_spline.minimal_indices`. quiet : bool, optional If False (the default), echo each line of the log as it is created; otherwise just store the final log in the output file. # Make sense of the `modes` parameter # Load catalog metadata Convert a simulation from the SXS BBH catalog into the LVC format. This function outputs a file in LVC format named SXS_BBH_####_Res#.h5 in out_path. Parameters ---------- sxs_data_path : string Path to directory containing rhOverM_Asymptotic_GeometricUnits_CoM.h5, Horizons.h5, and metadata.json files. out_path : string Path where LVC-format file is to be output truncation_time : {None, float} If specified, truncate time series at this time instead of at the reference time resolution : {None, int} Integer giving the resolution (Lev) of the data to convert. If this is not given, the resolution is determined automatically from sxs_data_path. truncation_tol : {None, bool, callable, float, array_like}, optional If None (the default) or False, nothing happens. If True, the waveform data (amplitude and phase) are "truncated" so that bits with significance lower than `5e-2 * self.tolerance` are set to zero, for improved compression. Any other input is passed to `sxs.TimeSeries.truncate`. Note that this is not typically a very effective setting — perhaps providing another 10% compression; the output file sizes are dominated by fairly redundant time data unaffected by this parameter. # Load metadata.json from this simulation # Determine the resolution of the input simulation, if needed # Save information about versions of code used in this function # Copy VersionHist.ver into the new file, if available # Store the log output by this script as a dataset Convert a simulation from the SXS BBH catalog into the LVC format. This function outputs a file in LVC format named SXS_BBH_####_Res#.h5 in out_path. Note that this function is essentially a wrapper for `SimulationConverter.convert`. If you have very many systems to convert, it is significantly faster to create the SimulationConverter object once, and then call the `convert` method for each system. Parameters ---------- sxs_data_path : string Path to directory containing rhOverM_Asymptotic_GeometricUnits_CoM.h5, Horizons.h5, and metadata.json files. out_path : string Path where LVC format file is to be output truncation_time : {None, float}, optional If specified, truncate time series at this time instead of at the reference time resolution : {None, int}, optional Integer giving the resolution (Lev) of the data to convert. If this is not given, the resolution is determined automatically from sxs_data_path. modes : {int, '22only'}, optional Modes to be placed in the output file. Passing '22only' results in the (2,2) and (2,-2) modes being output. Otherwise, each (l,m) mode up to and including the given l value will be output. Note that for backwards compatibility, 'all' is also supported, and is equivalent to the default value of `8`. tolerance : float, optional Target tolerance used in `sxs.utilities.greedy_spline.minimal_indices`. quiet : bool, optional If False (the default), echo each line of the log as it is created; otherwise just store the final log in the output file. | 3.25994 | 3 |
2_seh_overflow/2.5_register_to_offset.py | RainbowCache/my-osed-scripts | 1 | 6613610 | #!/usr/bin/env python3
import struct
import sys
import subprocess
try:
if len(sys.argv) < 2:
print("Usage: {} <REGISTER>".format(sys.argv[0]))
print("Example: {} 33654132".format(sys.argv[0]))
exit()
register = int(sys.argv[1], 16)
ascii_data = struct.pack("<I", register).decode("ASCII")
result = subprocess.check_output(["/usr/bin/msf-pattern_offset", "-q", ascii_data]).strip().decode("ASCII")
print(result)
except Exception as e:
print("HONK!")
print(str(e))
| #!/usr/bin/env python3
import struct
import sys
import subprocess
try:
if len(sys.argv) < 2:
print("Usage: {} <REGISTER>".format(sys.argv[0]))
print("Example: {} 33654132".format(sys.argv[0]))
exit()
register = int(sys.argv[1], 16)
ascii_data = struct.pack("<I", register).decode("ASCII")
result = subprocess.check_output(["/usr/bin/msf-pattern_offset", "-q", ascii_data]).strip().decode("ASCII")
print(result)
except Exception as e:
print("HONK!")
print(str(e))
| fr | 0.221828 | #!/usr/bin/env python3 | 2.785478 | 3 |
pb/__init__.py | andelf/fuck-ume-trip | 8 | 6613611 |
import sys
import os.path
sys.path.append(os.path.dirname(__file__))
from ume_pb2 import * |
import sys
import os.path
sys.path.append(os.path.dirname(__file__))
from ume_pb2 import * | none | 1 | 1.491561 | 1 | |
accounts/models/client.py | Boot-Loop/SEBE | 1 | 6613612 | from django.db import models
from django import forms
class Client(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
phone_number = models.CharField(max_length=20)
email = models.EmailField()
address = models.CharField(max_length=100)
## other details
def __str__(self):
return self.first_name
#from rest_framework import serializers
'''
class ClientSerializer(serializers.ModelSerializer):
class Meta:
model = Client
fields = [ 'id', 'first_name', 'last_name', 'phone_number', 'email', 'address' ]
##def create(self, validated_data):
## profile_data = validated_data.pop('profile')
## user = User.objects.create(**validated_data)
## Profile.objects.create(user=user, **profile_data)
## return user
##def update(self, instance, validated_data):
## instance.first_name = validated_data.get('first_name', instance.first_name )
## instance.last_name = validated_data.get('last_name', instance.last_name )
## instance.phone_number = validated_data.get('phone_number', instance.phone_number)
## instance.email = validated_data.get('email', instance.email )
## instance.address = validated_data.get('address', instance.address )
##
## instance.save()
## return instance
#''' | from django.db import models
from django import forms
class Client(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
phone_number = models.CharField(max_length=20)
email = models.EmailField()
address = models.CharField(max_length=100)
## other details
def __str__(self):
return self.first_name
#from rest_framework import serializers
'''
class ClientSerializer(serializers.ModelSerializer):
class Meta:
model = Client
fields = [ 'id', 'first_name', 'last_name', 'phone_number', 'email', 'address' ]
##def create(self, validated_data):
## profile_data = validated_data.pop('profile')
## user = User.objects.create(**validated_data)
## Profile.objects.create(user=user, **profile_data)
## return user
##def update(self, instance, validated_data):
## instance.first_name = validated_data.get('first_name', instance.first_name )
## instance.last_name = validated_data.get('last_name', instance.last_name )
## instance.phone_number = validated_data.get('phone_number', instance.phone_number)
## instance.email = validated_data.get('email', instance.email )
## instance.address = validated_data.get('address', instance.address )
##
## instance.save()
## return instance
#''' | en | 0.235317 | ## other details #from rest_framework import serializers class ClientSerializer(serializers.ModelSerializer): class Meta: model = Client fields = [ 'id', 'first_name', 'last_name', 'phone_number', 'email', 'address' ] ##def create(self, validated_data): ## profile_data = validated_data.pop('profile') ## user = User.objects.create(**validated_data) ## Profile.objects.create(user=user, **profile_data) ## return user ##def update(self, instance, validated_data): ## instance.first_name = validated_data.get('first_name', instance.first_name ) ## instance.last_name = validated_data.get('last_name', instance.last_name ) ## instance.phone_number = validated_data.get('phone_number', instance.phone_number) ## instance.email = validated_data.get('email', instance.email ) ## instance.address = validated_data.get('address', instance.address ) ## ## instance.save() ## return instance # | 2.190565 | 2 |
backend/app/constructor/applications/views/__init__.py | air-services/boilerplate | 0 | 6613613 | from app.core.crud import CrudView
from .crud import UpdateNested
from .generate_files import ApplicationGenerate
class ApplicationView(UpdateNested, CrudView, ApplicationGenerate):
pass
| from app.core.crud import CrudView
from .crud import UpdateNested
from .generate_files import ApplicationGenerate
class ApplicationView(UpdateNested, CrudView, ApplicationGenerate):
pass
| none | 1 | 1.279661 | 1 | |
code/backend/appointments/filters.py | rollethu/noe | 16 | 6613614 | <reponame>rollethu/noe
import datetime as dt
import pytz
from django.db.models import F
from django.utils import timezone
from django_filters import fields
from django_filters import rest_framework as filters
from . import models as m
class SpaceTolerantIsoDateTimeField(fields.IsoDateTimeField):
"""
Browsers by default replace `space` to `+` or `%20` in URIs.
UTC offset `+01:00` comes in as ` 01:00` which is invalid.
"""
def strptime(self, value, format):
value = value.replace(" ", "+")
return super().strptime(value, format)
class SpaceTolerantIsoDateTimeFilter(filters.IsoDateTimeFilter):
field_class = SpaceTolerantIsoDateTimeField
class TimeSlotFilter(filters.FilterSet):
start_date = SpaceTolerantIsoDateTimeFilter(method="filter_start_date")
min_availability = filters.NumberFilter(method="filter_min_availability")
class Meta:
model = m.TimeSlot
fields = ["location"]
def filter_start_date(self, queryset, name, value):
current_timezone = value.tzinfo
day_start_in_timezone = value.replace(hour=0, minute=0, second=0, microsecond=0)
day_start_in_utc = day_start_in_timezone.astimezone(pytz.UTC)
day_end_in_utc = day_start_in_utc + dt.timedelta(days=1)
return queryset.filter(start__range=[day_start_in_utc, day_end_in_utc])
def filter_min_availability(self, queryset, name, value):
return queryset.filter(capacity__gte=F("usage") + value)
| import datetime as dt
import pytz
from django.db.models import F
from django.utils import timezone
from django_filters import fields
from django_filters import rest_framework as filters
from . import models as m
class SpaceTolerantIsoDateTimeField(fields.IsoDateTimeField):
"""
Browsers by default replace `space` to `+` or `%20` in URIs.
UTC offset `+01:00` comes in as ` 01:00` which is invalid.
"""
def strptime(self, value, format):
value = value.replace(" ", "+")
return super().strptime(value, format)
class SpaceTolerantIsoDateTimeFilter(filters.IsoDateTimeFilter):
field_class = SpaceTolerantIsoDateTimeField
class TimeSlotFilter(filters.FilterSet):
start_date = SpaceTolerantIsoDateTimeFilter(method="filter_start_date")
min_availability = filters.NumberFilter(method="filter_min_availability")
class Meta:
model = m.TimeSlot
fields = ["location"]
def filter_start_date(self, queryset, name, value):
current_timezone = value.tzinfo
day_start_in_timezone = value.replace(hour=0, minute=0, second=0, microsecond=0)
day_start_in_utc = day_start_in_timezone.astimezone(pytz.UTC)
day_end_in_utc = day_start_in_utc + dt.timedelta(days=1)
return queryset.filter(start__range=[day_start_in_utc, day_end_in_utc])
def filter_min_availability(self, queryset, name, value):
return queryset.filter(capacity__gte=F("usage") + value) | en | 0.792032 | Browsers by default replace `space` to `+` or `%20` in URIs. UTC offset `+01:00` comes in as ` 01:00` which is invalid. | 2.261579 | 2 |
fraccalc/numeric/diffintegral.py | JerryALee/fraccalc | 0 | 6613615 | import numpy as np
from ..basic import gamma, gammaRatio
def coeff(v, N=7, method='2'):
'''
Return the fractional coefficients.
Parameters
----------
v : float
Order of the diffinetration.
N : int, optional
Length of the corresponding coefficients. Default is 7.
method : str
Diffintegration operator. {'1' or '2' (default)}.
Returns
----------
coefficients : ndarray
Coefficients are from from C_{0} to C_{N-1}.
'''
if method == '2':
n = N - 2
coefficients = np.zeros(N)
temp = np.array([v/4 + v**2 / 8, 1 - v**2 / 4, -v/4 + v**2 / 8])
coefficients[0] = temp[0]
coefficients[1] = 1 - v**2 / 2 - v**3 / 8
for k in range(1, n - 1):
coefficients[k + 1] = gammaRatio(k - v + 1, -v) / gamma(k + 2) * temp[0] + gammaRatio(
k - v, -v) / gamma(k + 1) * temp[1] + gammaRatio(k - v - 1, -v) / gamma(k) * temp[2]
coefficients[n] = gammaRatio(n - v - 1, -v) / gamma(n) * \
temp[1] + gammaRatio(n - v - 2, -v) / gamma(n - 1) * temp[2]
coefficients[-1] = gammaRatio(n - v - 1, -v) / gamma(n) * temp[2]
return coefficients
elif method == '1':
n = N - 1
coefficients = np.zeros(N)
coefficients[0] = 1
coefficients[1] = -v
for k in range(2, N):
coefficients[k] = gammaRatio(k - v, -v) / gamma(k + 1)
return coefficients
def dotPos(xq, N=7, a=0, method='2'):
'''
Return the position array for the mask convolution.
Parameters
----------
xq : float
Point at which function is diffintegrated.
N : int, optional
Length of the corresponding coefficients. Default is 7.
a : float, optional
Lower limit of the diffintegration. Default is 0.
method : str
Diffintegration operator. {'1' or '2' (default)}.
Returns
----------
h : float
Step size of the interval.
x_arr : ndarray
Positions for mask convolution.
'''
if method == '2':
h = (xq - a) / (N - 2)
x_arr = np.linspace(xq + h, a, N)
return h, x_arr
elif method == '1':
h = (xq - a) / N
x_arr = np.linspace(xq, a + h, N)
return h, x_arr
def deriv(fun, xq, v, N=7, a=0, method='2'):
'''
Calculate the fractional diffintegral.
Parameters
----------
fun : callable
Diffintegrand function.
xq : ndarray or float
Point at which fun is diffintegrated.
v : float
Diffintegration order.
N : int, optional
Length of the corresponding coefficients. Default is 7.
a : float, optional
Lower limit of the diffintegration. Default is 0.
method : str
Diffintegration operator. {'1' or '2' (default)}.
Returns
----------
yq : ndarray or float
The diffintegral value at xq.
'''
C = coeff(v, N, method)
if hasattr(xq, "__len__"):
num = len(xq)
yq = np.zeros(num)
for i in range(num):
h, x_tmp = dotPos(xq[i], N, a, method)
yq[i] = np.dot(C, fun(x_tmp)) / h**(v)
return yq
else:
h, x_tmp = dotPos(xq, N, a, method)
return np.dot(C, fun(x_tmp)) / h**(v)
def mask(v, N=13, method='Tiansi'):
'''
Return fractional mask operator.
Parameters
----------
v : float
Diffintegration order.
N : int, optional
Mask size of the corresponding operator. Default is 13 x 13.
method : str
Diffintegration operator. {'Tiansi' (1, default) or 'lcr' (2)}.
Returns
----------
result_mask : 2darray
The fractional mask.
'''
center = int((N - 1) / 2)
result_mask = np.zeros((N, N))
if method == 'Tiansi' or method == '1':
C = coeff(v, center + 1, '1')
elif method == 'lcr' or method == '2':
C = coeff(v, center + 2, '2')
C[2] += C[0]
C = C[1:]
result_mask[center, center] = 8 * C[0]
for i in range(1, center + 1):
c = C[i]
result_mask[center - i, center] = c
result_mask[center + i, center] = c
result_mask[center, center - i] = c
result_mask[center, center + i] = c
result_mask[center + i, center - i] = c
result_mask[center - i, center + i] = c
result_mask[center - i, center - i] = c
result_mask[center + i, center + i] = c
return result_mask
def deriv8(A, v, method='2', N=7):
'''
Compute the fractional diffintegral in the eight direction of a matrix A
Parameters
----------
A : 2darray
Matrix (image) that need to be diffintegrated.
v : float
Diffintegration order.
method : str
Diffintegration operator. {'1' or '2' (default)}.
N : int, optional
Length of the corresponding coefficients. Default is 7.
Returns
----------
d8 : 3darray
fractional diffintegral result. First dimension represents direction in the following order: u, d, l, r, ld, ru, lu, rd.
'''
len_x, len_y = A.shape
C = coeff(v, N, method)
d8 = np.zeros((8, len_x, len_y))
if method == '1':
A_pad = np.pad(A, N - 1, mode='symmetric')
for k in range(N):
c = C[k]
d8[0] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 1):(N - 1 + len_y)]
d8[1] += c * A_pad[(N - 1 + k):(N - 1 + k + len_x), (N - 1):(N - 1 + len_y)]
d8[2] += c * A_pad[(N - 1):(N - 1 + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[3] += c * A_pad[(N - 1):(N - 1 + len_x), (N - 1 + k):(N - 1 + k + len_y)]
d8[4] += c * A_pad[(N - 1 + k):(N - 1 + k + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[5] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 1 + k):(N - 1 + k + len_y)]
d8[6] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[7] += c * A_pad[(N - 1 + k):(N - 1 + k + len_x), (N - 1 + k):(N - 1 + k + len_y)]
elif method == '2':
A_pad = np.pad(A, N - 2, mode='symmetric')
for k in range(N):
c = C[k]
d8[0] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 2):(N - 2 + len_y)]
d8[1] += c * A_pad[(N - 3 + k):(N - 3 + k + len_x), (N - 2):(N - 2 + len_y)]
d8[2] += c * A_pad[(N - 2):(N - 2 + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[3] += c * A_pad[(N - 2):(N - 2 + len_x), (N - 3 + k):(N - 3 + k + len_y)]
d8[4] += c * A_pad[(N - 3 + k):(N - 3 + k + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[5] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 3 + k):(N - 3 + k + len_y)]
d8[6] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[7] += c * A_pad[(N - 3 + k):(N - 3 + k + len_x), (N - 3 + k):(N - 3 + k + len_y)]
return d8
def derivTotal(d8, mode='sum'):
if mode == 'sum':
d_total = np.sum(d8, axis=0)
elif mode == 'L1':
d_total = np.sum(np.abs(d8), axis=0)
elif mode == 'L2':
d_total = np.sum(np.square(d8), axis=0)
elif mode == 'max':
d_total = np.max(np.abs(d8), axis=0)
return d_total
| import numpy as np
from ..basic import gamma, gammaRatio
def coeff(v, N=7, method='2'):
'''
Return the fractional coefficients.
Parameters
----------
v : float
Order of the diffinetration.
N : int, optional
Length of the corresponding coefficients. Default is 7.
method : str
Diffintegration operator. {'1' or '2' (default)}.
Returns
----------
coefficients : ndarray
Coefficients are from from C_{0} to C_{N-1}.
'''
if method == '2':
n = N - 2
coefficients = np.zeros(N)
temp = np.array([v/4 + v**2 / 8, 1 - v**2 / 4, -v/4 + v**2 / 8])
coefficients[0] = temp[0]
coefficients[1] = 1 - v**2 / 2 - v**3 / 8
for k in range(1, n - 1):
coefficients[k + 1] = gammaRatio(k - v + 1, -v) / gamma(k + 2) * temp[0] + gammaRatio(
k - v, -v) / gamma(k + 1) * temp[1] + gammaRatio(k - v - 1, -v) / gamma(k) * temp[2]
coefficients[n] = gammaRatio(n - v - 1, -v) / gamma(n) * \
temp[1] + gammaRatio(n - v - 2, -v) / gamma(n - 1) * temp[2]
coefficients[-1] = gammaRatio(n - v - 1, -v) / gamma(n) * temp[2]
return coefficients
elif method == '1':
n = N - 1
coefficients = np.zeros(N)
coefficients[0] = 1
coefficients[1] = -v
for k in range(2, N):
coefficients[k] = gammaRatio(k - v, -v) / gamma(k + 1)
return coefficients
def dotPos(xq, N=7, a=0, method='2'):
'''
Return the position array for the mask convolution.
Parameters
----------
xq : float
Point at which function is diffintegrated.
N : int, optional
Length of the corresponding coefficients. Default is 7.
a : float, optional
Lower limit of the diffintegration. Default is 0.
method : str
Diffintegration operator. {'1' or '2' (default)}.
Returns
----------
h : float
Step size of the interval.
x_arr : ndarray
Positions for mask convolution.
'''
if method == '2':
h = (xq - a) / (N - 2)
x_arr = np.linspace(xq + h, a, N)
return h, x_arr
elif method == '1':
h = (xq - a) / N
x_arr = np.linspace(xq, a + h, N)
return h, x_arr
def deriv(fun, xq, v, N=7, a=0, method='2'):
'''
Calculate the fractional diffintegral.
Parameters
----------
fun : callable
Diffintegrand function.
xq : ndarray or float
Point at which fun is diffintegrated.
v : float
Diffintegration order.
N : int, optional
Length of the corresponding coefficients. Default is 7.
a : float, optional
Lower limit of the diffintegration. Default is 0.
method : str
Diffintegration operator. {'1' or '2' (default)}.
Returns
----------
yq : ndarray or float
The diffintegral value at xq.
'''
C = coeff(v, N, method)
if hasattr(xq, "__len__"):
num = len(xq)
yq = np.zeros(num)
for i in range(num):
h, x_tmp = dotPos(xq[i], N, a, method)
yq[i] = np.dot(C, fun(x_tmp)) / h**(v)
return yq
else:
h, x_tmp = dotPos(xq, N, a, method)
return np.dot(C, fun(x_tmp)) / h**(v)
def mask(v, N=13, method='Tiansi'):
'''
Return fractional mask operator.
Parameters
----------
v : float
Diffintegration order.
N : int, optional
Mask size of the corresponding operator. Default is 13 x 13.
method : str
Diffintegration operator. {'Tiansi' (1, default) or 'lcr' (2)}.
Returns
----------
result_mask : 2darray
The fractional mask.
'''
center = int((N - 1) / 2)
result_mask = np.zeros((N, N))
if method == 'Tiansi' or method == '1':
C = coeff(v, center + 1, '1')
elif method == 'lcr' or method == '2':
C = coeff(v, center + 2, '2')
C[2] += C[0]
C = C[1:]
result_mask[center, center] = 8 * C[0]
for i in range(1, center + 1):
c = C[i]
result_mask[center - i, center] = c
result_mask[center + i, center] = c
result_mask[center, center - i] = c
result_mask[center, center + i] = c
result_mask[center + i, center - i] = c
result_mask[center - i, center + i] = c
result_mask[center - i, center - i] = c
result_mask[center + i, center + i] = c
return result_mask
def deriv8(A, v, method='2', N=7):
'''
Compute the fractional diffintegral in the eight direction of a matrix A
Parameters
----------
A : 2darray
Matrix (image) that need to be diffintegrated.
v : float
Diffintegration order.
method : str
Diffintegration operator. {'1' or '2' (default)}.
N : int, optional
Length of the corresponding coefficients. Default is 7.
Returns
----------
d8 : 3darray
fractional diffintegral result. First dimension represents direction in the following order: u, d, l, r, ld, ru, lu, rd.
'''
len_x, len_y = A.shape
C = coeff(v, N, method)
d8 = np.zeros((8, len_x, len_y))
if method == '1':
A_pad = np.pad(A, N - 1, mode='symmetric')
for k in range(N):
c = C[k]
d8[0] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 1):(N - 1 + len_y)]
d8[1] += c * A_pad[(N - 1 + k):(N - 1 + k + len_x), (N - 1):(N - 1 + len_y)]
d8[2] += c * A_pad[(N - 1):(N - 1 + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[3] += c * A_pad[(N - 1):(N - 1 + len_x), (N - 1 + k):(N - 1 + k + len_y)]
d8[4] += c * A_pad[(N - 1 + k):(N - 1 + k + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[5] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 1 + k):(N - 1 + k + len_y)]
d8[6] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[7] += c * A_pad[(N - 1 + k):(N - 1 + k + len_x), (N - 1 + k):(N - 1 + k + len_y)]
elif method == '2':
A_pad = np.pad(A, N - 2, mode='symmetric')
for k in range(N):
c = C[k]
d8[0] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 2):(N - 2 + len_y)]
d8[1] += c * A_pad[(N - 3 + k):(N - 3 + k + len_x), (N - 2):(N - 2 + len_y)]
d8[2] += c * A_pad[(N - 2):(N - 2 + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[3] += c * A_pad[(N - 2):(N - 2 + len_x), (N - 3 + k):(N - 3 + k + len_y)]
d8[4] += c * A_pad[(N - 3 + k):(N - 3 + k + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[5] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 3 + k):(N - 3 + k + len_y)]
d8[6] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[7] += c * A_pad[(N - 3 + k):(N - 3 + k + len_x), (N - 3 + k):(N - 3 + k + len_y)]
return d8
def derivTotal(d8, mode='sum'):
if mode == 'sum':
d_total = np.sum(d8, axis=0)
elif mode == 'L1':
d_total = np.sum(np.abs(d8), axis=0)
elif mode == 'L2':
d_total = np.sum(np.square(d8), axis=0)
elif mode == 'max':
d_total = np.max(np.abs(d8), axis=0)
return d_total
| en | 0.572704 | Return the fractional coefficients. Parameters ---------- v : float Order of the diffinetration. N : int, optional Length of the corresponding coefficients. Default is 7. method : str Diffintegration operator. {'1' or '2' (default)}. Returns ---------- coefficients : ndarray Coefficients are from from C_{0} to C_{N-1}. Return the position array for the mask convolution. Parameters ---------- xq : float Point at which function is diffintegrated. N : int, optional Length of the corresponding coefficients. Default is 7. a : float, optional Lower limit of the diffintegration. Default is 0. method : str Diffintegration operator. {'1' or '2' (default)}. Returns ---------- h : float Step size of the interval. x_arr : ndarray Positions for mask convolution. Calculate the fractional diffintegral. Parameters ---------- fun : callable Diffintegrand function. xq : ndarray or float Point at which fun is diffintegrated. v : float Diffintegration order. N : int, optional Length of the corresponding coefficients. Default is 7. a : float, optional Lower limit of the diffintegration. Default is 0. method : str Diffintegration operator. {'1' or '2' (default)}. Returns ---------- yq : ndarray or float The diffintegral value at xq. Return fractional mask operator. Parameters ---------- v : float Diffintegration order. N : int, optional Mask size of the corresponding operator. Default is 13 x 13. method : str Diffintegration operator. {'Tiansi' (1, default) or 'lcr' (2)}. Returns ---------- result_mask : 2darray The fractional mask. Compute the fractional diffintegral in the eight direction of a matrix A Parameters ---------- A : 2darray Matrix (image) that need to be diffintegrated. v : float Diffintegration order. method : str Diffintegration operator. {'1' or '2' (default)}. N : int, optional Length of the corresponding coefficients. Default is 7. Returns ---------- d8 : 3darray fractional diffintegral result. First dimension represents direction in the following order: u, d, l, r, ld, ru, lu, rd. | 2.74537 | 3 |
src/firmware/potduino.py | malkam03/potduino | 2 | 6613616 | # -*- coding: utf-8 -*-
import logging
import time
import ahtx0 # noqa: E0401
import machine # noqa: E0401
from bh1750 import BH1750 # noqa: E0401
from ds18x20 import DS18X20 # noqa: E0401
from machine import I2C, Pin # noqa: E0401
from onewire import OneWire # noqa: E0401
from pot import Pot
class Potduino():
"""The potduino class encapsulates the hardware functionality
Args:
scl_pin (int): I2C Clock pin in which the ATH10 and BH1750 are
connected to, default 5.
sda_pin (int): I2C Data pin in which the ATH10 and BH1750 are
connected to, default 4.
ds_pin (int): OneWire Data pin in which the BH1750 sensors are
connected to, default 14.
sleep_minutes (int): minutes in which the device will be in deep
sleep between operations.
"""
def __init__(self,
scl_pin: int = 5,
sda_pin: int = 4,
ds_pin: int = 14,
sleep_minutes: int = 60) -> None:
self._i2c = I2C(scl=Pin(scl_pin), sda=Pin(sda_pin))
self._ambient_sensor = ahtx0.AHT10(self._i2c)
self._luminosity_sensor = BH1750(self._i2c)
self._ds_sensors = DS18X20(OneWire(Pin(ds_pin)))
self._roms = self._ds_sensors.scan()
self._sleep_time = int(sleep_minutes * 1e3 * 60)
self._pots: list(Pot) = []
def add_pot(self, pot: Pot) -> None:
"""Add a pot configuration to the device
Args:
pot (Pot): pot object with the settings specific to that pot
"""
self._pots.append(pot)
def log_sensor_data(self, log_file: str = "sensor_data.log") -> None:
"""Writes the sensor data into the `file_name` with timestamp
Args:
log_file (str): path to the logging file
"""
logging.basicConfig(level=logging.INFO,
filename=log_file,
format='%(asctime)s;%(message)s')
sensor_data = self.get_sensors_data()
logging.info(str(sensor_data))
def get_sensors_data(self) -> "dict[str, int]":
"""Return a dictionary with a sensor ID and the retrieved value
Note:
IDS:
AL: Ambient Light Sensor
AH: Ambient Humidity Sensor
AT: Ambient Temperature Sensor
TXX: Pot Soil temperature Sensor (XX is a two digit ID for the
pot)
Returns:
dict{str,int}: a dict with the sensor iD and the retrieved value
"""
light = self._luminosity_sensor.luminance(BH1750.ONCE_HIRES_2)
temp = self._ambient_sensor.temperature
hum = self._ambient_sensor.relative_humidity
sensor_data = {"AL": light, "AH": hum, "AT": temp}
pot_temperatures = self._pots_temperature()
for pot in self._pots:
sensor_data["T{:0>2d}".format(
pot.id)] = pot_temperatures[pot.temperature_sensor_address]
return sensor_data
def _pots_temperature(self) -> "dict[str, int]":
"""Get temperatures from the DS18X20 sensors
Returns:
dict{str: int}: a dict with the sensor one wire address and
the temperature
"""
self._ds_sensors.convert_temp()
time.sleep_ms(750)
return {
str(rom): self._ds_sensors.read_temp(rom)
for rom in self._roms
}
def sleep(self):
"""Puts the device in deep sleep mode for the predefined time
"""
rtc = machine.RTC()
rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)
rtc.alarm(rtc.ALARM0, self._sleep_time)
machine.deepsleep()
| # -*- coding: utf-8 -*-
import logging
import time
import ahtx0 # noqa: E0401
import machine # noqa: E0401
from bh1750 import BH1750 # noqa: E0401
from ds18x20 import DS18X20 # noqa: E0401
from machine import I2C, Pin # noqa: E0401
from onewire import OneWire # noqa: E0401
from pot import Pot
class Potduino():
"""The potduino class encapsulates the hardware functionality
Args:
scl_pin (int): I2C Clock pin in which the ATH10 and BH1750 are
connected to, default 5.
sda_pin (int): I2C Data pin in which the ATH10 and BH1750 are
connected to, default 4.
ds_pin (int): OneWire Data pin in which the BH1750 sensors are
connected to, default 14.
sleep_minutes (int): minutes in which the device will be in deep
sleep between operations.
"""
def __init__(self,
scl_pin: int = 5,
sda_pin: int = 4,
ds_pin: int = 14,
sleep_minutes: int = 60) -> None:
self._i2c = I2C(scl=Pin(scl_pin), sda=Pin(sda_pin))
self._ambient_sensor = ahtx0.AHT10(self._i2c)
self._luminosity_sensor = BH1750(self._i2c)
self._ds_sensors = DS18X20(OneWire(Pin(ds_pin)))
self._roms = self._ds_sensors.scan()
self._sleep_time = int(sleep_minutes * 1e3 * 60)
self._pots: list(Pot) = []
def add_pot(self, pot: Pot) -> None:
"""Add a pot configuration to the device
Args:
pot (Pot): pot object with the settings specific to that pot
"""
self._pots.append(pot)
def log_sensor_data(self, log_file: str = "sensor_data.log") -> None:
"""Writes the sensor data into the `file_name` with timestamp
Args:
log_file (str): path to the logging file
"""
logging.basicConfig(level=logging.INFO,
filename=log_file,
format='%(asctime)s;%(message)s')
sensor_data = self.get_sensors_data()
logging.info(str(sensor_data))
def get_sensors_data(self) -> "dict[str, int]":
"""Return a dictionary with a sensor ID and the retrieved value
Note:
IDS:
AL: Ambient Light Sensor
AH: Ambient Humidity Sensor
AT: Ambient Temperature Sensor
TXX: Pot Soil temperature Sensor (XX is a two digit ID for the
pot)
Returns:
dict{str,int}: a dict with the sensor iD and the retrieved value
"""
light = self._luminosity_sensor.luminance(BH1750.ONCE_HIRES_2)
temp = self._ambient_sensor.temperature
hum = self._ambient_sensor.relative_humidity
sensor_data = {"AL": light, "AH": hum, "AT": temp}
pot_temperatures = self._pots_temperature()
for pot in self._pots:
sensor_data["T{:0>2d}".format(
pot.id)] = pot_temperatures[pot.temperature_sensor_address]
return sensor_data
def _pots_temperature(self) -> "dict[str, int]":
"""Get temperatures from the DS18X20 sensors
Returns:
dict{str: int}: a dict with the sensor one wire address and
the temperature
"""
self._ds_sensors.convert_temp()
time.sleep_ms(750)
return {
str(rom): self._ds_sensors.read_temp(rom)
for rom in self._roms
}
def sleep(self):
"""Puts the device in deep sleep mode for the predefined time
"""
rtc = machine.RTC()
rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)
rtc.alarm(rtc.ALARM0, self._sleep_time)
machine.deepsleep()
| en | 0.710278 | # -*- coding: utf-8 -*- # noqa: E0401 # noqa: E0401 # noqa: E0401 # noqa: E0401 # noqa: E0401 # noqa: E0401 The potduino class encapsulates the hardware functionality Args: scl_pin (int): I2C Clock pin in which the ATH10 and BH1750 are connected to, default 5. sda_pin (int): I2C Data pin in which the ATH10 and BH1750 are connected to, default 4. ds_pin (int): OneWire Data pin in which the BH1750 sensors are connected to, default 14. sleep_minutes (int): minutes in which the device will be in deep sleep between operations. Add a pot configuration to the device Args: pot (Pot): pot object with the settings specific to that pot Writes the sensor data into the `file_name` with timestamp Args: log_file (str): path to the logging file Return a dictionary with a sensor ID and the retrieved value Note: IDS: AL: Ambient Light Sensor AH: Ambient Humidity Sensor AT: Ambient Temperature Sensor TXX: Pot Soil temperature Sensor (XX is a two digit ID for the pot) Returns: dict{str,int}: a dict with the sensor iD and the retrieved value Get temperatures from the DS18X20 sensors Returns: dict{str: int}: a dict with the sensor one wire address and the temperature Puts the device in deep sleep mode for the predefined time | 2.579253 | 3 |
palendromic_substring/palendromic_substring.py | eyvonne/LeetCodePractice | 0 | 6613617 | '''Given a string s, find the longest palindromic substring in s.
You may assume that the maximum length of s is 1000.
Example 1:
Input: "babad"
Output: "bab"
Note: "aba" is also a valid answer.
Example 2:
Input: "cbbd"
Output: "bb"'''
# Plan:
# iterate through the two letter and three letter combos in the string
# if the two or three is a palendrome check one letter out for also being a palendrome
# I believe that this code runs in O(n log n) time.
def find_substring(word):
# helper function to check if a given palendrome is part of a larger palendrome
def extend_pal(word, index, factor):
# normalize ref because Aba is a palendrome
ref = word.lower()
# create the palendrome
pal = word[index:index + factor]
# set the start and end of the palendrome check
if index - 1 >= 0 and index + factor < len(word):
a = index - 1
b = index + factor
else:
return pal
# move out the edges of the palendrome until they don't match
while ref[a] == ref[b]:
pal = word[a:b+1]
# check that we haven't reached the end of the string
if a - 1 >= 0 and b + 1 < len(word):
a -= 1
b += 1
else:
# thats as good as it gets if either end has been reached
break
return pal
# if the word is a palendrome then it's always the longest palendrome
if word == word[::-1]:
return word
# otherwise, pal should be the first letter to start
pal = word[0] if len(word) > 0 else ''
for i, _ in enumerate(word):
# check that i isn't too large and is at a two letter palendrome
if i+2 <= len(word) and word[i] == word[i+1]:
# extend_pal just finds how big the pal is
extension = extend_pal(word, i, 2)
# if its longer than the longest save it
if len(extension) > len(pal):
pal = extension
# essentially the same as other block
if i+3 <= len(word) and word[i] == word[i+2]:
extension = extend_pal(word, i, 3)
if len(extension) > len(pal):
pal = extension
return pal
def find_substring_slow(word):
def is_pal(subWord: str) -> bool:
return subWord == subWord[::-1]
pal = ''
max = 0
for i, letter in enumerate(word):
if letter.lower() in set(word[i+1:].lower()):
for q, _ in enumerate(word[i:], i+1):
sub = word[i:q].lower()
if is_pal(sub):
if len(sub) > max:
pal = word[i:q]
max = len(sub)
return pal
| '''Given a string s, find the longest palindromic substring in s.
You may assume that the maximum length of s is 1000.
Example 1:
Input: "babad"
Output: "bab"
Note: "aba" is also a valid answer.
Example 2:
Input: "cbbd"
Output: "bb"'''
# Plan:
# iterate through the two letter and three letter combos in the string
# if the two or three is a palendrome check one letter out for also being a palendrome
# I believe that this code runs in O(n log n) time.
def find_substring(word):
# helper function to check if a given palendrome is part of a larger palendrome
def extend_pal(word, index, factor):
# normalize ref because Aba is a palendrome
ref = word.lower()
# create the palendrome
pal = word[index:index + factor]
# set the start and end of the palendrome check
if index - 1 >= 0 and index + factor < len(word):
a = index - 1
b = index + factor
else:
return pal
# move out the edges of the palendrome until they don't match
while ref[a] == ref[b]:
pal = word[a:b+1]
# check that we haven't reached the end of the string
if a - 1 >= 0 and b + 1 < len(word):
a -= 1
b += 1
else:
# thats as good as it gets if either end has been reached
break
return pal
# if the word is a palendrome then it's always the longest palendrome
if word == word[::-1]:
return word
# otherwise, pal should be the first letter to start
pal = word[0] if len(word) > 0 else ''
for i, _ in enumerate(word):
# check that i isn't too large and is at a two letter palendrome
if i+2 <= len(word) and word[i] == word[i+1]:
# extend_pal just finds how big the pal is
extension = extend_pal(word, i, 2)
# if its longer than the longest save it
if len(extension) > len(pal):
pal = extension
# essentially the same as other block
if i+3 <= len(word) and word[i] == word[i+2]:
extension = extend_pal(word, i, 3)
if len(extension) > len(pal):
pal = extension
return pal
def find_substring_slow(word):
def is_pal(subWord: str) -> bool:
return subWord == subWord[::-1]
pal = ''
max = 0
for i, letter in enumerate(word):
if letter.lower() in set(word[i+1:].lower()):
for q, _ in enumerate(word[i:], i+1):
sub = word[i:q].lower()
if is_pal(sub):
if len(sub) > max:
pal = word[i:q]
max = len(sub)
return pal
| en | 0.903583 | Given a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000. Example 1: Input: "babad" Output: "bab" Note: "aba" is also a valid answer. Example 2: Input: "cbbd" Output: "bb" # Plan: # iterate through the two letter and three letter combos in the string # if the two or three is a palendrome check one letter out for also being a palendrome # I believe that this code runs in O(n log n) time. # helper function to check if a given palendrome is part of a larger palendrome # normalize ref because Aba is a palendrome # create the palendrome # set the start and end of the palendrome check # move out the edges of the palendrome until they don't match # check that we haven't reached the end of the string # thats as good as it gets if either end has been reached # if the word is a palendrome then it's always the longest palendrome # otherwise, pal should be the first letter to start # check that i isn't too large and is at a two letter palendrome # extend_pal just finds how big the pal is # if its longer than the longest save it # essentially the same as other block | 4.234073 | 4 |
src/tantalus/logic/transaction.py | thijsmie/tantalus | 3 | 6613618 | <gh_stars>1-10
from tantalus_db.base import db
from tantalus_db.models import Referencing, Transaction, TransactionLine, ServiceLine, Relation, Product, BtwType
from tantalus_db.utility import get_or_none, transactional
from tantalus.logic.rows import transform_collection
from collections import defaultdict
from datetime import datetime
@transactional
def new_transaction(data):
relation = get_or_none(data['relation'], Relation)
if relation is None:
raise Exception("Relation does not exist!")
if relation.numbered_reference:
reference = Referencing.get_reference()
else:
reference = 0
tr = Transaction.query.filter(Transaction.relation == relation).order_by(
Transaction.informal_reference.desc()).first()
if tr is None:
informal_reference = 1
else:
informal_reference = tr.informal_reference + 1
t = Transaction(
reference=reference,
informal_reference=informal_reference,
relation=relation,
deliverydate=datetime.strptime(data["deliverydate"], "%Y-%m-%d").date(),
processeddate=datetime.now().date(),
description=data.get("description", ""),
two_to_one_has_btw=data.get("two_to_one_has_btw", False),
two_to_one_btw_per_row=data.get("two_to_one_btw_per_row", False)
)
for prd in data["sell"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = product.take(int(prd['amount']))
t.one_to_two.append(line)
for prd in data["buy"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = TransactionLine(
product=product,
amount=int(prd['amount']),
prevalue=int(prd['price']),
value=product.value * int(prd['amount']),
btwtype=product.btwtype
)
product.give(line)
t.two_to_one.append(line)
for prd in data["service"]:
btw = prd.get('btw', 0)
btwtype = BtwType.query.filter(BtwType.percentage == btw).first()
if btwtype is None:
btwtype = BtwType(
name=str(btw)+"%",
percentage=btw
)
db.session.add(btwtype)
line = ServiceLine(
service=prd['contenttype'],
amount=int(prd['amount']),
value=int(prd['price']),
btwtype=btwtype
)
t.services.append(line)
rec = transaction_record(t)
t.total = rec["total"]
db.session.add(t)
relation.budget -= rec["total"]
return t
@transactional
def edit_transaction(t, data):
# Easy stuff first
old_total = t.total
t.revision += 1
t.two_to_one_has_btw = data.get("two_to_one_has_btw", t.two_to_one_has_btw)
t.two_to_one_btw_per_row = data.get("two_to_one_btw_per_row", t.two_to_one_btw_per_row)
if "deliverydate" in data:
t.deliverydate = datetime.strptime(data["deliverydate"], "%Y-%m-%d").date()
if "description" in data:
t.description = data["description"]
newsell = []
for prd in data["sell"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = TransactionLine(
value=int(prd['amount'])*product.value,
prevalue=int(prd['amount'])*product.value,
amount=int(prd['amount']),
product=product,
btwtype=product.btwtype
)
newsell.append(line)
t.one_to_two = transform_collection(t.one_to_two, newsell, True)
newbuy = []
for prd in data["buy"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = TransactionLine(
product=product,
amount=int(prd['amount']),
prevalue=int(prd['price']),
value=int(prd['amount'])*product.value,
btwtype=product.btwtype
)
newbuy.append(line)
t.two_to_one = transform_collection(t.two_to_one, newbuy, False)
t.services = []
for prd in data["service"]:
btw = prd.get('btw', 0)
btwtype = BtwType.query.filter(BtwType.percentage == btw).first()
if btwtype is None:
btwtype = BtwType(
name=str(btw)+"%",
percentage=btw
)
db.session.add(btwtype)
line = ServiceLine(
service=prd['contenttype'],
amount=int(prd['amount']),
value=int(prd['price']),
btwtype=btwtype
)
t.services.append(line)
record = transaction_record(t)
t.total = record["total"]
db.session.add(t)
t.relation.budget += old_total - t.total
return t
def make_row_record(row):
return {
"contenttype": row.product.contenttype,
"group": row.product.group.name,
"prevalue": row.prevalue,
"value": row.value,
"amount": row.amount,
"btw": row.btwtype.percentage
}
def make_service_record(row):
return {
"contenttype": row.service,
"amount": row.amount,
"prevalue": row.value,
"value": row.value,
"btw": row.btwtype.percentage
}
def transaction_process(transaction):
sellrows = [make_row_record(row) for row in transaction.one_to_two]
buyrows = [make_row_record(row) for row in transaction.two_to_one]
servicerows = [make_service_record(row) for row in transaction.services]
btwtotals = defaultdict(float)
btwvalues = defaultdict(int)
# Current total including btw, btw rounded per invoice
for row in sellrows:
btw = row["prevalue"] * row["btw"] / 100. / (row["btw"]/100. + 1)
btwtotals[row["btw"]] -= btw
btwvalues[row["btw"]] -= row["prevalue"]
row["btwvalue"] = btw
# Current total including btw, btw rounded per invoice
for row in servicerows:
btw = row["prevalue"] * row["btw"] / 100. / (row["btw"]/100. + 1)
btwtotals[row["btw"]] -= btw
btwvalues[row["btw"]] -= row["prevalue"]
row["btwvalue"] = btw
buybtwtotals = defaultdict(float)
for row in buyrows:
if transaction.two_to_one_has_btw:
if transaction.two_to_one_btw_per_row:
# Current total including btw, btw rounded per row
btw = round(row["prevalue"] * row["btw"] / 100.0 / (row["btw"]/100. + 1))
else:
# Current total including btw, btw rounded for full invoice
# We should use decimals here, but floats are good enough for now
btw = row["prevalue"] * row["btw"] / 100. / (row["btw"]/100. + 1)
else:
if transaction.two_to_one_btw_per_row:
# Current total excluding btw, btw rounded per row
btw = round(row["prevalue"] * row["btw"] / 100.0)
btwvalues[row["btw"]] += btw
else:
# Current total excluding btw, btw rounded for full invoice
# We should use decimals here, but floats are good enough for now
btw = row["prevalue"] * row["btw"] / 100.0
btwvalues[row["btw"]] += btw
btwvalues[row["btw"]] += row["prevalue"]
btwtotals[row["btw"]] += btw
buybtwtotals[row["btw"]] += btw
row["btwvalue"] = btw
row["value_exl"] = row["value"] * (1 - row["btw"] / 100.0 / (row["btw"]/100. + 1))
for k, v in btwtotals.items():
btwtotals[k] = int(round(v))
return dict(btwtotals), dict(btwvalues), dict(buybtwtotals), sellrows, buyrows, servicerows
def transaction_record(transaction):
btwtotals, btwvalues, buybtwtotals, sellrows, buyrows, servicerows = transaction_process(transaction)
selltotal = sum(r['prevalue'] for r in sellrows)
buytotal = sum(r['prevalue'] for r in buyrows)
servicetotal = sum(r['prevalue'] for r in servicerows)
total = selltotal - buytotal + servicetotal
if not transaction.two_to_one_has_btw:
total -= sum(buybtwtotals.values())
return {
"reference": str(transaction.reference).zfill(4),
"name": transaction.relation.name + " " + str(transaction.informal_reference).zfill(3),
"sell": sellrows,
"buy": buyrows,
"service": servicerows,
"selltotal": selltotal,
"buytotal": buytotal,
"btwtotals": btwtotals,
"btwvalues": btwvalues,
"btwtotal": sum(btwtotals.values()),
"servicetotal": servicetotal,
"description": transaction.description,
"processeddate": transaction.processeddate,
"deliverydate": transaction.deliverydate,
"total": int(total),
"id": transaction.id,
"revision": transaction.revision,
"lastedit": transaction.time_updated,
"two_to_one_has_btw": transaction.two_to_one_has_btw,
"two_to_one_btw_per_row": transaction.two_to_one_btw_per_row
}
| from tantalus_db.base import db
from tantalus_db.models import Referencing, Transaction, TransactionLine, ServiceLine, Relation, Product, BtwType
from tantalus_db.utility import get_or_none, transactional
from tantalus.logic.rows import transform_collection
from collections import defaultdict
from datetime import datetime
@transactional
def new_transaction(data):
relation = get_or_none(data['relation'], Relation)
if relation is None:
raise Exception("Relation does not exist!")
if relation.numbered_reference:
reference = Referencing.get_reference()
else:
reference = 0
tr = Transaction.query.filter(Transaction.relation == relation).order_by(
Transaction.informal_reference.desc()).first()
if tr is None:
informal_reference = 1
else:
informal_reference = tr.informal_reference + 1
t = Transaction(
reference=reference,
informal_reference=informal_reference,
relation=relation,
deliverydate=datetime.strptime(data["deliverydate"], "%Y-%m-%d").date(),
processeddate=datetime.now().date(),
description=data.get("description", ""),
two_to_one_has_btw=data.get("two_to_one_has_btw", False),
two_to_one_btw_per_row=data.get("two_to_one_btw_per_row", False)
)
for prd in data["sell"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = product.take(int(prd['amount']))
t.one_to_two.append(line)
for prd in data["buy"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = TransactionLine(
product=product,
amount=int(prd['amount']),
prevalue=int(prd['price']),
value=product.value * int(prd['amount']),
btwtype=product.btwtype
)
product.give(line)
t.two_to_one.append(line)
for prd in data["service"]:
btw = prd.get('btw', 0)
btwtype = BtwType.query.filter(BtwType.percentage == btw).first()
if btwtype is None:
btwtype = BtwType(
name=str(btw)+"%",
percentage=btw
)
db.session.add(btwtype)
line = ServiceLine(
service=prd['contenttype'],
amount=int(prd['amount']),
value=int(prd['price']),
btwtype=btwtype
)
t.services.append(line)
rec = transaction_record(t)
t.total = rec["total"]
db.session.add(t)
relation.budget -= rec["total"]
return t
@transactional
def edit_transaction(t, data):
# Easy stuff first
old_total = t.total
t.revision += 1
t.two_to_one_has_btw = data.get("two_to_one_has_btw", t.two_to_one_has_btw)
t.two_to_one_btw_per_row = data.get("two_to_one_btw_per_row", t.two_to_one_btw_per_row)
if "deliverydate" in data:
t.deliverydate = datetime.strptime(data["deliverydate"], "%Y-%m-%d").date()
if "description" in data:
t.description = data["description"]
newsell = []
for prd in data["sell"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = TransactionLine(
value=int(prd['amount'])*product.value,
prevalue=int(prd['amount'])*product.value,
amount=int(prd['amount']),
product=product,
btwtype=product.btwtype
)
newsell.append(line)
t.one_to_two = transform_collection(t.one_to_two, newsell, True)
newbuy = []
for prd in data["buy"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = TransactionLine(
product=product,
amount=int(prd['amount']),
prevalue=int(prd['price']),
value=int(prd['amount'])*product.value,
btwtype=product.btwtype
)
newbuy.append(line)
t.two_to_one = transform_collection(t.two_to_one, newbuy, False)
t.services = []
for prd in data["service"]:
btw = prd.get('btw', 0)
btwtype = BtwType.query.filter(BtwType.percentage == btw).first()
if btwtype is None:
btwtype = BtwType(
name=str(btw)+"%",
percentage=btw
)
db.session.add(btwtype)
line = ServiceLine(
service=prd['contenttype'],
amount=int(prd['amount']),
value=int(prd['price']),
btwtype=btwtype
)
t.services.append(line)
record = transaction_record(t)
t.total = record["total"]
db.session.add(t)
t.relation.budget += old_total - t.total
return t
def make_row_record(row):
return {
"contenttype": row.product.contenttype,
"group": row.product.group.name,
"prevalue": row.prevalue,
"value": row.value,
"amount": row.amount,
"btw": row.btwtype.percentage
}
def make_service_record(row):
return {
"contenttype": row.service,
"amount": row.amount,
"prevalue": row.value,
"value": row.value,
"btw": row.btwtype.percentage
}
def transaction_process(transaction):
sellrows = [make_row_record(row) for row in transaction.one_to_two]
buyrows = [make_row_record(row) for row in transaction.two_to_one]
servicerows = [make_service_record(row) for row in transaction.services]
btwtotals = defaultdict(float)
btwvalues = defaultdict(int)
# Current total including btw, btw rounded per invoice
for row in sellrows:
btw = row["prevalue"] * row["btw"] / 100. / (row["btw"]/100. + 1)
btwtotals[row["btw"]] -= btw
btwvalues[row["btw"]] -= row["prevalue"]
row["btwvalue"] = btw
# Current total including btw, btw rounded per invoice
for row in servicerows:
btw = row["prevalue"] * row["btw"] / 100. / (row["btw"]/100. + 1)
btwtotals[row["btw"]] -= btw
btwvalues[row["btw"]] -= row["prevalue"]
row["btwvalue"] = btw
buybtwtotals = defaultdict(float)
for row in buyrows:
if transaction.two_to_one_has_btw:
if transaction.two_to_one_btw_per_row:
# Current total including btw, btw rounded per row
btw = round(row["prevalue"] * row["btw"] / 100.0 / (row["btw"]/100. + 1))
else:
# Current total including btw, btw rounded for full invoice
# We should use decimals here, but floats are good enough for now
btw = row["prevalue"] * row["btw"] / 100. / (row["btw"]/100. + 1)
else:
if transaction.two_to_one_btw_per_row:
# Current total excluding btw, btw rounded per row
btw = round(row["prevalue"] * row["btw"] / 100.0)
btwvalues[row["btw"]] += btw
else:
# Current total excluding btw, btw rounded for full invoice
# We should use decimals here, but floats are good enough for now
btw = row["prevalue"] * row["btw"] / 100.0
btwvalues[row["btw"]] += btw
btwvalues[row["btw"]] += row["prevalue"]
btwtotals[row["btw"]] += btw
buybtwtotals[row["btw"]] += btw
row["btwvalue"] = btw
row["value_exl"] = row["value"] * (1 - row["btw"] / 100.0 / (row["btw"]/100. + 1))
for k, v in btwtotals.items():
btwtotals[k] = int(round(v))
return dict(btwtotals), dict(btwvalues), dict(buybtwtotals), sellrows, buyrows, servicerows
def transaction_record(transaction):
btwtotals, btwvalues, buybtwtotals, sellrows, buyrows, servicerows = transaction_process(transaction)
selltotal = sum(r['prevalue'] for r in sellrows)
buytotal = sum(r['prevalue'] for r in buyrows)
servicetotal = sum(r['prevalue'] for r in servicerows)
total = selltotal - buytotal + servicetotal
if not transaction.two_to_one_has_btw:
total -= sum(buybtwtotals.values())
return {
"reference": str(transaction.reference).zfill(4),
"name": transaction.relation.name + " " + str(transaction.informal_reference).zfill(3),
"sell": sellrows,
"buy": buyrows,
"service": servicerows,
"selltotal": selltotal,
"buytotal": buytotal,
"btwtotals": btwtotals,
"btwvalues": btwvalues,
"btwtotal": sum(btwtotals.values()),
"servicetotal": servicetotal,
"description": transaction.description,
"processeddate": transaction.processeddate,
"deliverydate": transaction.deliverydate,
"total": int(total),
"id": transaction.id,
"revision": transaction.revision,
"lastedit": transaction.time_updated,
"two_to_one_has_btw": transaction.two_to_one_has_btw,
"two_to_one_btw_per_row": transaction.two_to_one_btw_per_row
} | en | 0.916197 | # Easy stuff first # Current total including btw, btw rounded per invoice # Current total including btw, btw rounded per invoice # Current total including btw, btw rounded per row # Current total including btw, btw rounded for full invoice # We should use decimals here, but floats are good enough for now # Current total excluding btw, btw rounded per row # Current total excluding btw, btw rounded for full invoice # We should use decimals here, but floats are good enough for now | 2.363985 | 2 |
salesforce/backend/test_helpers.py | JohnJorgensen19/salesforce | 1 | 6613619 | <reponame>JohnJorgensen19/salesforce
"""
Common helpers for tests, like test decorators
"""
from django.conf import settings
from salesforce import router
import uuid
from unittest import skip, skipUnless, expectedFailure
# random string for tests that accidentally run concurrent
uid = '-' + str(uuid.uuid4())[:7]
sf_alias = getattr(settings, 'SALESFORCE_DB_ALIAS', 'salesforce')
default_is_sf = router.is_sf_database(sf_alias)
current_user = settings.DATABASES[sf_alias]['USER']
def expectedFailureIf(condition):
"""Conditional 'expectedFailure' decorator for TestCase"""
if condition:
return expectedFailure
else:
return lambda func: func
| """
Common helpers for tests, like test decorators
"""
from django.conf import settings
from salesforce import router
import uuid
from unittest import skip, skipUnless, expectedFailure
# random string for tests that accidentally run concurrent
uid = '-' + str(uuid.uuid4())[:7]
sf_alias = getattr(settings, 'SALESFORCE_DB_ALIAS', 'salesforce')
default_is_sf = router.is_sf_database(sf_alias)
current_user = settings.DATABASES[sf_alias]['USER']
def expectedFailureIf(condition):
"""Conditional 'expectedFailure' decorator for TestCase"""
if condition:
return expectedFailure
else:
return lambda func: func | en | 0.731131 | Common helpers for tests, like test decorators # random string for tests that accidentally run concurrent Conditional 'expectedFailure' decorator for TestCase | 2.589913 | 3 |
demo/user/urls.py | sonmon/demo1 | 0 | 6613620 | <filename>demo/user/urls.py
from django.urls import path
from . import views
urlpatterns = [
# 用户信息
path(r'add_user/',views.add_user),
path(r'read_user/',views.read_user),
path(r'edit_user/',views.edit_user),
path(r'del_user/',views.del_user),
path(r'list_user/',views.list_user),
# 系统登录,登出
path(r'login/',views.login),
path(r'logout/',views.logout),
# 权限
path(r'add_permission/',views.add_permission),
path(r'read_permission/',views.read_permission),
path(r'list_permission/',views.list_permission),
path(r'edit_permission/',views.edit_permission),
path(r'del_permission/',views.del_permission),
# 角色
path(r'add_role/', views.add_role),
path(r'read_role/', views.read_role),
path(r'edit_role/', views.edit_role),
path(r'del_role/', views.del_role),
path(r'list_role/', views.list_role),
# 用户拥有的角色
path(r'select_user_role/', views.select_user_role),
path(r'list_user_role/', views.list_user_role),
# 角色拥有的权限
path(r'select_role_permission/', views.select_role_permission),
path(r'list_role_permission/', views.list_role_permission),
] | <filename>demo/user/urls.py
from django.urls import path
from . import views
urlpatterns = [
# 用户信息
path(r'add_user/',views.add_user),
path(r'read_user/',views.read_user),
path(r'edit_user/',views.edit_user),
path(r'del_user/',views.del_user),
path(r'list_user/',views.list_user),
# 系统登录,登出
path(r'login/',views.login),
path(r'logout/',views.logout),
# 权限
path(r'add_permission/',views.add_permission),
path(r'read_permission/',views.read_permission),
path(r'list_permission/',views.list_permission),
path(r'edit_permission/',views.edit_permission),
path(r'del_permission/',views.del_permission),
# 角色
path(r'add_role/', views.add_role),
path(r'read_role/', views.read_role),
path(r'edit_role/', views.edit_role),
path(r'del_role/', views.del_role),
path(r'list_role/', views.list_role),
# 用户拥有的角色
path(r'select_user_role/', views.select_user_role),
path(r'list_user_role/', views.list_user_role),
# 角色拥有的权限
path(r'select_role_permission/', views.select_role_permission),
path(r'list_role_permission/', views.list_role_permission),
] | zh | 0.999762 | # 用户信息 # 系统登录,登出 # 权限 # 角色 # 用户拥有的角色 # 角色拥有的权限 | 1.956788 | 2 |
gaia-sdk-python/gaia_sdk/graphql/request/type/ConnectNodeKnowledge.py | leftshiftone/gaia-sdk | 0 | 6613621 |
from gaia_sdk.graphql.request.type.ConnectNodeRemovedImpulse import ConnectNodeRemovedImpulse
from gaia_sdk.graphql.request.type.ConnectNodeUnsetImpulse import ConnectNodeUnsetImpulse
from gaia_sdk.graphql.request.type.ConnectNodeAppendedImpulse import ConnectNodeAppendedImpulse
from gaia_sdk.graphql.request.type.ConnectNodeSetImpulse import ConnectNodeSetImpulse
from gaia_sdk.graphql.request.input.ConnectSetNodeImpulse import ConnectSetNodeImpulse
from gaia_sdk.graphql.request.input.ConnectAppendNodeImpulse import ConnectAppendNodeImpulse
from gaia_sdk.graphql.request.input.ConnectUnsetNodeImpulse import ConnectUnsetNodeImpulse
from gaia_sdk.graphql.request.input.ConnectRemoveNodeImpulse import ConnectRemoveNodeImpulse
from typing import Callable, List
from gaia_sdk.api.VariableRegistry import VariableRegistry
from gaia_sdk.graphql.request.enumeration.Order import Order
from gaia_sdk.graphql.request.enumeration.OrderByField import OrderByField
from gaia_sdk.graphql.request.enumeration.EdgeOrderByField import EdgeOrderByField
from gaia_sdk.graphql.request.enumeration.EdgeType import EdgeType
class ConnectNodeKnowledge(list):
def append(self, impulse: ConnectAppendNodeImpulse, config: Callable[['ConnectNodeAppendedImpulse'], None]):
def callback(registry: VariableRegistry):
name1 = registry.register("impulse", impulse)
entity = ConnectNodeAppendedImpulse()
config(entity)
return f'append(impulse:{name1})' + '{' + entity.render(registry) + '}'
self.append(callback)
def remove(self, impulse: ConnectRemoveNodeImpulse, config: Callable[['ConnectNodeRemovedImpulse'], None]):
def callback(registry: VariableRegistry):
name1 = registry.register("impulse", impulse)
entity = ConnectNodeRemovedImpulse()
config(entity)
return f'remove(impulse:{name1})' + '{' + entity.render(registry) + '}'
self.append(callback)
def set(self, impulse: ConnectSetNodeImpulse, config: Callable[['ConnectNodeSetImpulse'], None]):
def callback(registry: VariableRegistry):
name1 = registry.register("impulse", impulse)
entity = ConnectNodeSetImpulse()
config(entity)
return f'set(impulse:{name1})' + '{' + entity.render(registry) + '}'
self.append(callback)
def unset(self, impulse: ConnectUnsetNodeImpulse, config: Callable[['ConnectNodeUnsetImpulse'], None]):
def callback(registry: VariableRegistry):
name1 = registry.register("impulse", impulse)
entity = ConnectNodeUnsetImpulse()
config(entity)
return f'unset(impulse:{name1})' + '{' + entity.render(registry) + '}'
self.append(callback)
def render(self, registry: VariableRegistry):
return " ".join(map(lambda e: e(registry), self))
|
from gaia_sdk.graphql.request.type.ConnectNodeRemovedImpulse import ConnectNodeRemovedImpulse
from gaia_sdk.graphql.request.type.ConnectNodeUnsetImpulse import ConnectNodeUnsetImpulse
from gaia_sdk.graphql.request.type.ConnectNodeAppendedImpulse import ConnectNodeAppendedImpulse
from gaia_sdk.graphql.request.type.ConnectNodeSetImpulse import ConnectNodeSetImpulse
from gaia_sdk.graphql.request.input.ConnectSetNodeImpulse import ConnectSetNodeImpulse
from gaia_sdk.graphql.request.input.ConnectAppendNodeImpulse import ConnectAppendNodeImpulse
from gaia_sdk.graphql.request.input.ConnectUnsetNodeImpulse import ConnectUnsetNodeImpulse
from gaia_sdk.graphql.request.input.ConnectRemoveNodeImpulse import ConnectRemoveNodeImpulse
from typing import Callable, List
from gaia_sdk.api.VariableRegistry import VariableRegistry
from gaia_sdk.graphql.request.enumeration.Order import Order
from gaia_sdk.graphql.request.enumeration.OrderByField import OrderByField
from gaia_sdk.graphql.request.enumeration.EdgeOrderByField import EdgeOrderByField
from gaia_sdk.graphql.request.enumeration.EdgeType import EdgeType
class ConnectNodeKnowledge(list):
def append(self, impulse: ConnectAppendNodeImpulse, config: Callable[['ConnectNodeAppendedImpulse'], None]):
def callback(registry: VariableRegistry):
name1 = registry.register("impulse", impulse)
entity = ConnectNodeAppendedImpulse()
config(entity)
return f'append(impulse:{name1})' + '{' + entity.render(registry) + '}'
self.append(callback)
def remove(self, impulse: ConnectRemoveNodeImpulse, config: Callable[['ConnectNodeRemovedImpulse'], None]):
def callback(registry: VariableRegistry):
name1 = registry.register("impulse", impulse)
entity = ConnectNodeRemovedImpulse()
config(entity)
return f'remove(impulse:{name1})' + '{' + entity.render(registry) + '}'
self.append(callback)
def set(self, impulse: ConnectSetNodeImpulse, config: Callable[['ConnectNodeSetImpulse'], None]):
def callback(registry: VariableRegistry):
name1 = registry.register("impulse", impulse)
entity = ConnectNodeSetImpulse()
config(entity)
return f'set(impulse:{name1})' + '{' + entity.render(registry) + '}'
self.append(callback)
def unset(self, impulse: ConnectUnsetNodeImpulse, config: Callable[['ConnectNodeUnsetImpulse'], None]):
def callback(registry: VariableRegistry):
name1 = registry.register("impulse", impulse)
entity = ConnectNodeUnsetImpulse()
config(entity)
return f'unset(impulse:{name1})' + '{' + entity.render(registry) + '}'
self.append(callback)
def render(self, registry: VariableRegistry):
return " ".join(map(lambda e: e(registry), self))
| none | 1 | 1.884421 | 2 | |
cogbot/extensions/kick.py | Arcensoth/cogbot | 8 | 6613622 | <filename>cogbot/extensions/kick.py<gh_stars>1-10
import logging
import re
import discord
from discord.ext import commands
from discord.ext.commands import Bot, Context
from cogbot import checks
from cogbot.cog_bot import CogBot
log = logging.getLogger(__name__)
class Kick:
MENTION_PATTERN = re.compile(r"<@\!?(\w+)>")
ID_PATTERN = re.compile(r"\d+")
def __init__(self, bot: CogBot, ext: str):
self.bot = bot
@checks.is_staff()
@commands.has_permissions(kick_members=True)
@commands.command(pass_context=True, hidden=True)
async def kick(self, ctx: Context, user: str, *, reason: str = None):
cmd: discord.Message = ctx.message
server: discord.Server = cmd.server
# 1. check for a mention
mention_match = self.MENTION_PATTERN.match(user)
if mention_match:
(user_id,) = mention_match.groups()
member = server.get_member(user_id)
# 2. check for a raw user id
elif self.ID_PATTERN.match(user):
member = server.get_member(user)
# 3. check for a user string (doesn't work with spaces, etc)
elif "#" in user:
member = server.get_member_named(user)
# otherwise, error
else:
# response = "Please provide a mention, an id, or a username + discriminator (without spaces)"
# await self.bot.send_message(cmd.channel, response)
await self.bot.add_reaction(cmd, "➖")
return
await self.bot.mod_log(
cmd.author,
f"kicked {member.mention} with a warning!",
message=ctx.message,
icon=":boot:",
)
if not member:
# response = f"Couldn't find anyone matching the input: {user}"
# await self.bot.send_message(cmd.channel, response)
await self.bot.add_reaction(cmd, "❓")
return
elif member == self.bot.user:
# response = f"I don't think you want to do that."
# await self.bot.send_message(cmd.channel, response)
await self.bot.add_reaction(cmd, "🤖")
return
# attempt to DM if a reason was included
if reason:
direct_message = (
f"You were kicked from **{server.name}** for:\n>>> {reason}"
)
log.info(f"Kicking <{member.name}> with message: {direct_message}")
try:
await self.bot.send_message(member, direct_message)
await self.bot.mod_log(
self.bot.as_member_of(ctx.message.server),
f"messaged {member.mention} about being kicked for:\n>>> {reason}",
message=ctx.message,
icon=":envelope:",
)
except:
log.warning(f"Unable to warn <{member}> about being kicked")
await self.bot.mod_log(
self.bot.as_member_of(ctx.message.server),
f"couldn't message {member.mention} about being kicked. They may have DMs disabled.",
message=ctx.message,
icon=":warning:",
color=discord.Color.gold(),
)
try:
await self.bot.kick(member)
except:
log.error(f"Failed to kick <{member}>")
await self.bot.mod_log(
self.bot.as_member_of(ctx.message.server),
f"couldn't kick {member.mention}! You should look into this.",
message=ctx.message,
icon=":rotating_light:",
color=discord.Color.red(),
)
# await self.bot.send_message(
# cmd.channel,
# f"Uh oh! Couldn't kick {member.mention}! You should look into this.",
# )
await self.bot.add_reaction(cmd, "❗")
return
# await self.bot.send_message(
# cmd.channel, f"Kicked {member.mention} with a warning!"
# )
await self.bot.add_reaction(cmd, "👢")
def setup(bot):
bot.add_cog(Kick(bot, __name__))
| <filename>cogbot/extensions/kick.py<gh_stars>1-10
import logging
import re
import discord
from discord.ext import commands
from discord.ext.commands import Bot, Context
from cogbot import checks
from cogbot.cog_bot import CogBot
log = logging.getLogger(__name__)
class Kick:
MENTION_PATTERN = re.compile(r"<@\!?(\w+)>")
ID_PATTERN = re.compile(r"\d+")
def __init__(self, bot: CogBot, ext: str):
self.bot = bot
@checks.is_staff()
@commands.has_permissions(kick_members=True)
@commands.command(pass_context=True, hidden=True)
async def kick(self, ctx: Context, user: str, *, reason: str = None):
cmd: discord.Message = ctx.message
server: discord.Server = cmd.server
# 1. check for a mention
mention_match = self.MENTION_PATTERN.match(user)
if mention_match:
(user_id,) = mention_match.groups()
member = server.get_member(user_id)
# 2. check for a raw user id
elif self.ID_PATTERN.match(user):
member = server.get_member(user)
# 3. check for a user string (doesn't work with spaces, etc)
elif "#" in user:
member = server.get_member_named(user)
# otherwise, error
else:
# response = "Please provide a mention, an id, or a username + discriminator (without spaces)"
# await self.bot.send_message(cmd.channel, response)
await self.bot.add_reaction(cmd, "➖")
return
await self.bot.mod_log(
cmd.author,
f"kicked {member.mention} with a warning!",
message=ctx.message,
icon=":boot:",
)
if not member:
# response = f"Couldn't find anyone matching the input: {user}"
# await self.bot.send_message(cmd.channel, response)
await self.bot.add_reaction(cmd, "❓")
return
elif member == self.bot.user:
# response = f"I don't think you want to do that."
# await self.bot.send_message(cmd.channel, response)
await self.bot.add_reaction(cmd, "🤖")
return
# attempt to DM if a reason was included
if reason:
direct_message = (
f"You were kicked from **{server.name}** for:\n>>> {reason}"
)
log.info(f"Kicking <{member.name}> with message: {direct_message}")
try:
await self.bot.send_message(member, direct_message)
await self.bot.mod_log(
self.bot.as_member_of(ctx.message.server),
f"messaged {member.mention} about being kicked for:\n>>> {reason}",
message=ctx.message,
icon=":envelope:",
)
except:
log.warning(f"Unable to warn <{member}> about being kicked")
await self.bot.mod_log(
self.bot.as_member_of(ctx.message.server),
f"couldn't message {member.mention} about being kicked. They may have DMs disabled.",
message=ctx.message,
icon=":warning:",
color=discord.Color.gold(),
)
try:
await self.bot.kick(member)
except:
log.error(f"Failed to kick <{member}>")
await self.bot.mod_log(
self.bot.as_member_of(ctx.message.server),
f"couldn't kick {member.mention}! You should look into this.",
message=ctx.message,
icon=":rotating_light:",
color=discord.Color.red(),
)
# await self.bot.send_message(
# cmd.channel,
# f"Uh oh! Couldn't kick {member.mention}! You should look into this.",
# )
await self.bot.add_reaction(cmd, "❗")
return
# await self.bot.send_message(
# cmd.channel, f"Kicked {member.mention} with a warning!"
# )
await self.bot.add_reaction(cmd, "👢")
def setup(bot):
bot.add_cog(Kick(bot, __name__))
| en | 0.831872 | # 1. check for a mention # 2. check for a raw user id # 3. check for a user string (doesn't work with spaces, etc) # otherwise, error # response = "Please provide a mention, an id, or a username + discriminator (without spaces)" # await self.bot.send_message(cmd.channel, response) # response = f"Couldn't find anyone matching the input: {user}" # await self.bot.send_message(cmd.channel, response) # response = f"I don't think you want to do that." # await self.bot.send_message(cmd.channel, response) # attempt to DM if a reason was included # await self.bot.send_message( # cmd.channel, # f"Uh oh! Couldn't kick {member.mention}! You should look into this.", # ) # await self.bot.send_message( # cmd.channel, f"Kicked {member.mention} with a warning!" # ) | 2.638531 | 3 |
rsHRF/rsHRF_GUI/datatypes/misc/subject.py | BIDS-Apps/rsHRF | 16 | 6613623 | <filename>rsHRF/rsHRF_GUI/datatypes/misc/subject.py
import numpy as np
class Subject():
"""
Stores the information corresponding to a particular subject.
Attrbutes:
1. index : This is the index of the subject (as determined by BIDS convention)
2. BOLD_raw : An array which stores the corresponding Raw BOLD time-series for the subject
3. BOLD_pre : An array which stores the corresponding Preprocessed-BOLD time-series for the subject
4. BOLD_deconv : An array which stores the corresponding Deconvolved-BOLD time-series for the subject
5. HRF : An array which stores the corresponding Hemodynamic Response Function time-series for the subject
-> All the attributes from 2-5, are arrays of TimeSeries objects
"""
def __init__(self, index):
self.index = index
self.BOLD_raw = []
self.BOLD_pre = []
self.BOLD_deconv = []
self.HRF = []
# getters
def get_input_filename(self):
return self.input_filename
def get_subject_index(self):
return self.index
def get_BOLD_raw(self):
return tuple(self.BOLD_raw)
def get_BOLD_pre(self):
return tuple(self.BOLD_pre)
def get_BOLD_deconv(self):
return tuple(self.BOLD_deconv)
def get_HRF(self):
return tuple(self.HRF)
# adding to time-series objects of the existing subject
def add_BOLD_raw(self, ts):
self.BOLD_raw.append(ts)
return len(self.BOLD_raw) - 1
def add_BOLD_deconv(self, ts):
self.BOLD_deconv.append(ts)
return len(self.BOLD_raw) - 1
def add_BOLD_pre(self, ts):
self.BOLD_pre.append(ts)
return len(self.BOLD_raw) - 1
def add_HRF(self, ts):
self.HRF.append(ts)
return len(self.BOLD_raw) - 1
# misc.
def is_present(self, label, misc, getts=False):
""" Checks whether a time-series is already present
Misc takes in all the relevant information which determines the uniqueness
of a time-series
If getts = True, the function returns the time-series object if it is present """
if label == "BOLD":
# Looking for Raw BOLD Data
for each in self.BOLD_raw:
# Determines whether the Raw BOLD data is already present
# Checks for the input-file
if misc == each.get_inputfile():
if getts :
return each
return True
elif label == "Preprocessed-BOLD":
# Looking for Preprocessed BOLD Data
para = misc[0]
mask = misc[1]
bold = misc[2]
for each in self.BOLD_pre:
# Determines whether Preprocessed BOLD data is already present
# Checks the parameters, mask-file and RAW Bold
if para.compareParameters(each.get_parameters()) \
and each.get_maskfile() == misc[1] \
and bold.compareTimeSeries(each.get_BOLD_Raw()):
if getts:
return each
return True
elif label == "HRF":
# Looking for HRF Data
para = misc[0]
BOLD_pre = misc[1]
for each in self.HRF:
# Determines whether the HRF is already present
# Checks the parameters and Preprocessed BOLD
if para.compareParameters(each.get_parameters()) \
and BOLD_pre.compareTimeSeries(each.get_associated_BOLD()):
if getts:
return each
return True
elif label == "Deconvolved-BOLD":
# Looking for Deconvolved BOLD Data
para = misc[0]
HRF = misc[1]
for each in self.BOLD_deconv:
# Determines whether the Deconvolved BOLD is already present
# Checks the associated HRF
if para.compareParameters(each.get_parameters()) \
and HRF.compareTimeSeries(each.get_associated_HRF()):
if getts :
return each
return True
return False
def get_time_series_pos(self, ts):
"""
Takes the time-series as input and returns its position in the array
"""
label = ts.get_label()
if label == "BOLD":
arr = self.BOLD_raw
elif label == "Preprocessed-BOLD":
arr = self.BOLD_pre
elif label == "Deconvolved-BOLD":
arr = self.BOLD_deconv
elif label == "HRF":
arr = self.HRF
else :
arr = []
for i in range(len(arr)):
if ts.compareTimeSeries(arr[i]):
return str(i)
return None
def get_time_series_by_index(self, ts_type, index):
""" Takes the index of a time-series and returns the time-series """
if ts_type == "BOLD":
arr = self.BOLD_raw
elif ts_type == "Preprocessed-BOLD":
arr = self.BOLD_pre
elif ts_type == "Deconvolved-BOLD":
arr = self.BOLD_deconv
elif ts_type == "HRF":
arr = self.HRF
else:
return
return arr[index]
def get_plotables(self):
"""
Returns an array of all the time-series objects that can be plotted for the subject
The array contains of tuples of the format : (time-series labels, time-series numpy arrays)
"""
out = []
for i in range(len(self.BOLD_raw)):
out.append((self.index+"_BOLD_"+str(i),self.BOLD_raw[i].get_ts()))
for i in range(len(self.BOLD_pre)):
out.append((self.index+"_Preprocessed-BOLD_"+str(i),self.BOLD_pre[i].get_ts()))
for i in range(len(self.BOLD_deconv)):
out.append((self.index+"_Deconvolved-BOLD_"+str(i),self.BOLD_deconv[i].get_ts()))
for i in range(len(self.HRF)):
out.append((self.index+"_HRF_"+str(i),self.HRF[i].get_ts()))
return out
def get_data_labels(self):
"""
Returns an array with labels for all the time-series objects for the subject
"""
out = []
for i in range(len(self.BOLD_raw)):
out.append(self.index+"_BOLD_"+str(i))
for i in range(len(self.BOLD_pre)):
out.append(self.index+"_Preprocessed-BOLD_"+str(i))
for i in range(len(self.BOLD_deconv)):
out.append(self.index+"_Deconvolved-BOLD_"+str(i))
for i in range(len(self.HRF)):
out.append(self.index+"_HRF_"+str(i))
return out
| <filename>rsHRF/rsHRF_GUI/datatypes/misc/subject.py
import numpy as np
class Subject():
"""
Stores the information corresponding to a particular subject.
Attrbutes:
1. index : This is the index of the subject (as determined by BIDS convention)
2. BOLD_raw : An array which stores the corresponding Raw BOLD time-series for the subject
3. BOLD_pre : An array which stores the corresponding Preprocessed-BOLD time-series for the subject
4. BOLD_deconv : An array which stores the corresponding Deconvolved-BOLD time-series for the subject
5. HRF : An array which stores the corresponding Hemodynamic Response Function time-series for the subject
-> All the attributes from 2-5, are arrays of TimeSeries objects
"""
def __init__(self, index):
self.index = index
self.BOLD_raw = []
self.BOLD_pre = []
self.BOLD_deconv = []
self.HRF = []
# getters
def get_input_filename(self):
return self.input_filename
def get_subject_index(self):
return self.index
def get_BOLD_raw(self):
return tuple(self.BOLD_raw)
def get_BOLD_pre(self):
return tuple(self.BOLD_pre)
def get_BOLD_deconv(self):
return tuple(self.BOLD_deconv)
def get_HRF(self):
return tuple(self.HRF)
# adding to time-series objects of the existing subject
def add_BOLD_raw(self, ts):
self.BOLD_raw.append(ts)
return len(self.BOLD_raw) - 1
def add_BOLD_deconv(self, ts):
self.BOLD_deconv.append(ts)
return len(self.BOLD_raw) - 1
def add_BOLD_pre(self, ts):
self.BOLD_pre.append(ts)
return len(self.BOLD_raw) - 1
def add_HRF(self, ts):
self.HRF.append(ts)
return len(self.BOLD_raw) - 1
# misc.
def is_present(self, label, misc, getts=False):
""" Checks whether a time-series is already present
Misc takes in all the relevant information which determines the uniqueness
of a time-series
If getts = True, the function returns the time-series object if it is present """
if label == "BOLD":
# Looking for Raw BOLD Data
for each in self.BOLD_raw:
# Determines whether the Raw BOLD data is already present
# Checks for the input-file
if misc == each.get_inputfile():
if getts :
return each
return True
elif label == "Preprocessed-BOLD":
# Looking for Preprocessed BOLD Data
para = misc[0]
mask = misc[1]
bold = misc[2]
for each in self.BOLD_pre:
# Determines whether Preprocessed BOLD data is already present
# Checks the parameters, mask-file and RAW Bold
if para.compareParameters(each.get_parameters()) \
and each.get_maskfile() == misc[1] \
and bold.compareTimeSeries(each.get_BOLD_Raw()):
if getts:
return each
return True
elif label == "HRF":
# Looking for HRF Data
para = misc[0]
BOLD_pre = misc[1]
for each in self.HRF:
# Determines whether the HRF is already present
# Checks the parameters and Preprocessed BOLD
if para.compareParameters(each.get_parameters()) \
and BOLD_pre.compareTimeSeries(each.get_associated_BOLD()):
if getts:
return each
return True
elif label == "Deconvolved-BOLD":
# Looking for Deconvolved BOLD Data
para = misc[0]
HRF = misc[1]
for each in self.BOLD_deconv:
# Determines whether the Deconvolved BOLD is already present
# Checks the associated HRF
if para.compareParameters(each.get_parameters()) \
and HRF.compareTimeSeries(each.get_associated_HRF()):
if getts :
return each
return True
return False
def get_time_series_pos(self, ts):
"""
Takes the time-series as input and returns its position in the array
"""
label = ts.get_label()
if label == "BOLD":
arr = self.BOLD_raw
elif label == "Preprocessed-BOLD":
arr = self.BOLD_pre
elif label == "Deconvolved-BOLD":
arr = self.BOLD_deconv
elif label == "HRF":
arr = self.HRF
else :
arr = []
for i in range(len(arr)):
if ts.compareTimeSeries(arr[i]):
return str(i)
return None
def get_time_series_by_index(self, ts_type, index):
""" Takes the index of a time-series and returns the time-series """
if ts_type == "BOLD":
arr = self.BOLD_raw
elif ts_type == "Preprocessed-BOLD":
arr = self.BOLD_pre
elif ts_type == "Deconvolved-BOLD":
arr = self.BOLD_deconv
elif ts_type == "HRF":
arr = self.HRF
else:
return
return arr[index]
def get_plotables(self):
"""
Returns an array of all the time-series objects that can be plotted for the subject
The array contains of tuples of the format : (time-series labels, time-series numpy arrays)
"""
out = []
for i in range(len(self.BOLD_raw)):
out.append((self.index+"_BOLD_"+str(i),self.BOLD_raw[i].get_ts()))
for i in range(len(self.BOLD_pre)):
out.append((self.index+"_Preprocessed-BOLD_"+str(i),self.BOLD_pre[i].get_ts()))
for i in range(len(self.BOLD_deconv)):
out.append((self.index+"_Deconvolved-BOLD_"+str(i),self.BOLD_deconv[i].get_ts()))
for i in range(len(self.HRF)):
out.append((self.index+"_HRF_"+str(i),self.HRF[i].get_ts()))
return out
def get_data_labels(self):
"""
Returns an array with labels for all the time-series objects for the subject
"""
out = []
for i in range(len(self.BOLD_raw)):
out.append(self.index+"_BOLD_"+str(i))
for i in range(len(self.BOLD_pre)):
out.append(self.index+"_Preprocessed-BOLD_"+str(i))
for i in range(len(self.BOLD_deconv)):
out.append(self.index+"_Deconvolved-BOLD_"+str(i))
for i in range(len(self.HRF)):
out.append(self.index+"_HRF_"+str(i))
return out
| en | 0.785724 | Stores the information corresponding to a particular subject. Attrbutes: 1. index : This is the index of the subject (as determined by BIDS convention) 2. BOLD_raw : An array which stores the corresponding Raw BOLD time-series for the subject 3. BOLD_pre : An array which stores the corresponding Preprocessed-BOLD time-series for the subject 4. BOLD_deconv : An array which stores the corresponding Deconvolved-BOLD time-series for the subject 5. HRF : An array which stores the corresponding Hemodynamic Response Function time-series for the subject -> All the attributes from 2-5, are arrays of TimeSeries objects # getters # adding to time-series objects of the existing subject # misc. Checks whether a time-series is already present Misc takes in all the relevant information which determines the uniqueness of a time-series If getts = True, the function returns the time-series object if it is present # Looking for Raw BOLD Data # Determines whether the Raw BOLD data is already present # Checks for the input-file # Looking for Preprocessed BOLD Data # Determines whether Preprocessed BOLD data is already present # Checks the parameters, mask-file and RAW Bold # Looking for HRF Data # Determines whether the HRF is already present # Checks the parameters and Preprocessed BOLD # Looking for Deconvolved BOLD Data # Determines whether the Deconvolved BOLD is already present # Checks the associated HRF Takes the time-series as input and returns its position in the array Takes the index of a time-series and returns the time-series Returns an array of all the time-series objects that can be plotted for the subject The array contains of tuples of the format : (time-series labels, time-series numpy arrays) Returns an array with labels for all the time-series objects for the subject | 2.881718 | 3 |
src/ploomber/clients/storage/aws.py | idomic/ploomber | 0 | 6613624 | import json
from pathlib import PurePosixPath, Path
try:
import boto3
except ImportError:
boto3 = None
try:
import botocore
except ImportError:
botocore = None
from ploomber.util.default import find_root_recursively
from ploomber.util.util import requires
from ploomber.clients.storage.abc import AbstractStorageClient
from ploomber.exceptions import RemoteFileNotFound
class S3Client(AbstractStorageClient):
"""Client for Amazon S3
Parameters
----------
bucket_name : str
Bucket to use
parent : str
Parent folder in the bucket to save files
json_credentials_path : str, default=None
Use the given JSON file to authenticate the client. Must contain
aws_access_key_id and aws_secret_access_key.
If None, client is initialized without arguments
path_to_project_root : str, default=None
Path to project root. Product locations are stored in a path relative
to this folder. e.g. If project root is ``/my-project``, backup is
``/backup`` and you save a file in ``/my-project/reports/report.html``,
it will be saved at ``/backup/reports/report.html``. If None, it
looks up recursively for ``environment.yml``, ``requirements.txt`` and
``setup.py`` (in that order) file and assigns its parent as project
root folder.
credentials_relative_to_project_root : bool, default=True
If True, relative paths in json_credentials_path are so to the
path_to_project_root, instead of the current working directory
**kwargs
Keyword arguments for the client constructor
"""
@requires(['boto3', 'botocore'], name='S3Client')
def __init__(self,
bucket_name,
parent,
json_credentials_path=None,
path_to_project_root=None,
credentials_relative_to_project_root=True,
**kwargs):
project_root = (path_to_project_root
or find_root_recursively(raise_=True))
self._path_to_project_root = Path(project_root).resolve()
if (credentials_relative_to_project_root and json_credentials_path
and not Path(json_credentials_path).is_absolute()):
json_credentials_path = Path(self._path_to_project_root,
json_credentials_path)
self._client_kwargs = kwargs
if json_credentials_path:
c = json.loads(Path(json_credentials_path).read_text())
self._client_kwargs = {
'aws_access_key_id': c['aws_access_key_id'],
'aws_secret_access_key': c['aws_secret_access_key'],
**kwargs
}
self._client = self._init_client()
self._parent = parent
self._bucket_name = bucket_name
def _init_client(self):
return boto3.client('s3', **self._client_kwargs)
def download(self, local, destination=None):
remote = self._remote_path(local)
destination = destination or local
# FIXME: call _download directly and catch the exception to avoid
# doing to api calls
if self._is_file(remote):
self._download(destination, remote)
else:
paginator = self._client.get_paginator('list_objects_v2')
for page in paginator.paginate(Bucket=self._bucket_name,
Prefix=remote):
if 'Contents' not in page:
raise RemoteFileNotFound('Could not download '
f'{local!r} using client {self}: '
'No such file or directory')
for remote_file in page['Contents']:
remote_path = remote_file['Key']
rel = PurePosixPath(remote_path).relative_to(remote)
destination_file = Path(destination, *rel.parts)
destination_file.parent.mkdir(exist_ok=True, parents=True)
self._download(str(destination_file), remote_path)
def _upload(self, local):
remote = self._remote_path(local)
self._client.upload_file(str(local), self._bucket_name, remote)
def _download(self, local, remote):
Path(local).parent.mkdir(exist_ok=True, parents=True)
self._client.download_file(self._bucket_name, remote, str(local))
def _is_file(self, remote):
resource = boto3.resource('s3', **self._client_kwargs)
try:
resource.Object(self._bucket_name, remote).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == '404':
return False
else:
raise
else:
return True
def _is_dir(self, remote):
bucket = boto3.resource('s3', **self._client_kwargs).Bucket(
self._bucket_name)
return any(bucket.objects.filter(Prefix=remote))
def __getstate__(self):
state = self.__dict__.copy()
del state['_client']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._client = self._init_client()
def __repr__(self):
return (f'{type(self).__name__}(bucket_name={self._bucket_name!r}, '
f'parent={self._parent!r}, '
f'path_to_project_root={str(self._path_to_project_root)!r})')
| import json
from pathlib import PurePosixPath, Path
try:
import boto3
except ImportError:
boto3 = None
try:
import botocore
except ImportError:
botocore = None
from ploomber.util.default import find_root_recursively
from ploomber.util.util import requires
from ploomber.clients.storage.abc import AbstractStorageClient
from ploomber.exceptions import RemoteFileNotFound
class S3Client(AbstractStorageClient):
"""Client for Amazon S3
Parameters
----------
bucket_name : str
Bucket to use
parent : str
Parent folder in the bucket to save files
json_credentials_path : str, default=None
Use the given JSON file to authenticate the client. Must contain
aws_access_key_id and aws_secret_access_key.
If None, client is initialized without arguments
path_to_project_root : str, default=None
Path to project root. Product locations are stored in a path relative
to this folder. e.g. If project root is ``/my-project``, backup is
``/backup`` and you save a file in ``/my-project/reports/report.html``,
it will be saved at ``/backup/reports/report.html``. If None, it
looks up recursively for ``environment.yml``, ``requirements.txt`` and
``setup.py`` (in that order) file and assigns its parent as project
root folder.
credentials_relative_to_project_root : bool, default=True
If True, relative paths in json_credentials_path are so to the
path_to_project_root, instead of the current working directory
**kwargs
Keyword arguments for the client constructor
"""
@requires(['boto3', 'botocore'], name='S3Client')
def __init__(self,
bucket_name,
parent,
json_credentials_path=None,
path_to_project_root=None,
credentials_relative_to_project_root=True,
**kwargs):
project_root = (path_to_project_root
or find_root_recursively(raise_=True))
self._path_to_project_root = Path(project_root).resolve()
if (credentials_relative_to_project_root and json_credentials_path
and not Path(json_credentials_path).is_absolute()):
json_credentials_path = Path(self._path_to_project_root,
json_credentials_path)
self._client_kwargs = kwargs
if json_credentials_path:
c = json.loads(Path(json_credentials_path).read_text())
self._client_kwargs = {
'aws_access_key_id': c['aws_access_key_id'],
'aws_secret_access_key': c['aws_secret_access_key'],
**kwargs
}
self._client = self._init_client()
self._parent = parent
self._bucket_name = bucket_name
def _init_client(self):
return boto3.client('s3', **self._client_kwargs)
def download(self, local, destination=None):
remote = self._remote_path(local)
destination = destination or local
# FIXME: call _download directly and catch the exception to avoid
# doing to api calls
if self._is_file(remote):
self._download(destination, remote)
else:
paginator = self._client.get_paginator('list_objects_v2')
for page in paginator.paginate(Bucket=self._bucket_name,
Prefix=remote):
if 'Contents' not in page:
raise RemoteFileNotFound('Could not download '
f'{local!r} using client {self}: '
'No such file or directory')
for remote_file in page['Contents']:
remote_path = remote_file['Key']
rel = PurePosixPath(remote_path).relative_to(remote)
destination_file = Path(destination, *rel.parts)
destination_file.parent.mkdir(exist_ok=True, parents=True)
self._download(str(destination_file), remote_path)
def _upload(self, local):
remote = self._remote_path(local)
self._client.upload_file(str(local), self._bucket_name, remote)
def _download(self, local, remote):
Path(local).parent.mkdir(exist_ok=True, parents=True)
self._client.download_file(self._bucket_name, remote, str(local))
def _is_file(self, remote):
resource = boto3.resource('s3', **self._client_kwargs)
try:
resource.Object(self._bucket_name, remote).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == '404':
return False
else:
raise
else:
return True
def _is_dir(self, remote):
bucket = boto3.resource('s3', **self._client_kwargs).Bucket(
self._bucket_name)
return any(bucket.objects.filter(Prefix=remote))
def __getstate__(self):
state = self.__dict__.copy()
del state['_client']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._client = self._init_client()
def __repr__(self):
return (f'{type(self).__name__}(bucket_name={self._bucket_name!r}, '
f'parent={self._parent!r}, '
f'path_to_project_root={str(self._path_to_project_root)!r})')
| en | 0.762994 | Client for Amazon S3 Parameters ---------- bucket_name : str Bucket to use parent : str Parent folder in the bucket to save files json_credentials_path : str, default=None Use the given JSON file to authenticate the client. Must contain aws_access_key_id and aws_secret_access_key. If None, client is initialized without arguments path_to_project_root : str, default=None Path to project root. Product locations are stored in a path relative to this folder. e.g. If project root is ``/my-project``, backup is ``/backup`` and you save a file in ``/my-project/reports/report.html``, it will be saved at ``/backup/reports/report.html``. If None, it looks up recursively for ``environment.yml``, ``requirements.txt`` and ``setup.py`` (in that order) file and assigns its parent as project root folder. credentials_relative_to_project_root : bool, default=True If True, relative paths in json_credentials_path are so to the path_to_project_root, instead of the current working directory **kwargs Keyword arguments for the client constructor # FIXME: call _download directly and catch the exception to avoid # doing to api calls | 2.60501 | 3 |
scripts/update_translation_csv.py | vrk-kpa/opendata | 16 | 6613625 | <gh_stars>10-100
import csv
import sys
if len(sys.argv) < 4:
print("Usage: %s <combined_csv> <lang_csv> <lang>" % sys.argv[0])
sys.exit()
csv_file_combined = sys.argv[1]
csv_file = sys.argv[2]
csv_lang = sys.argv[3]
combined_data = {row['msgid']: row for row in csv.DictReader(open(csv_file_combined, 'r'))}
rows = list(csv.DictReader(open(csv_file, 'r')))
fields = ['msgid','msgid_plural','flags','references','extractedComments','comments','msgstr[0]','msgstr[1]']
writer = csv.DictWriter(sys.stdout, fields, quoting=csv.QUOTE_ALL)
writer.writeheader()
for row in rows:
combined_values = combined_data.get(row['msgid'], {})
row['msgstr[0]'] = combined_values.get('%s' % csv_lang, row['msgstr[0]'])
row['msgstr[1]'] = combined_values.get('%s_plural' % csv_lang, row['msgstr[1]'])
writer.writerow(row)
| import csv
import sys
if len(sys.argv) < 4:
print("Usage: %s <combined_csv> <lang_csv> <lang>" % sys.argv[0])
sys.exit()
csv_file_combined = sys.argv[1]
csv_file = sys.argv[2]
csv_lang = sys.argv[3]
combined_data = {row['msgid']: row for row in csv.DictReader(open(csv_file_combined, 'r'))}
rows = list(csv.DictReader(open(csv_file, 'r')))
fields = ['msgid','msgid_plural','flags','references','extractedComments','comments','msgstr[0]','msgstr[1]']
writer = csv.DictWriter(sys.stdout, fields, quoting=csv.QUOTE_ALL)
writer.writeheader()
for row in rows:
combined_values = combined_data.get(row['msgid'], {})
row['msgstr[0]'] = combined_values.get('%s' % csv_lang, row['msgstr[0]'])
row['msgstr[1]'] = combined_values.get('%s_plural' % csv_lang, row['msgstr[1]'])
writer.writerow(row) | none | 1 | 2.887544 | 3 | |
2017/10_Oct/19/01-.precision.py | z727354123/pyCharmTest | 0 | 6613626 | # 默认是 .6
print("%f" % 18) # 18.000000
# 保留 2 位
print("%.2f" % 18) # 18.00
# 保留 0 位
print("%.0f" % 18) # 18
# 进制表示方法
print('%i' % 0b10) # 2
print('%i' % 0o10) # 8
print('%i' % 0x10) # 16
| # 默认是 .6
print("%f" % 18) # 18.000000
# 保留 2 位
print("%.2f" % 18) # 18.00
# 保留 0 位
print("%.0f" % 18) # 18
# 进制表示方法
print('%i' % 0b10) # 2
print('%i' % 0o10) # 8
print('%i' % 0x10) # 16
| zh | 0.672943 | # 默认是 .6 # 18.000000 # 保留 2 位 # 18.00 # 保留 0 位 # 18 # 进制表示方法 # 2 # 8 # 16 | 2.303301 | 2 |
tests/testapp/test_template_tags.py | AgDude/gargoyle | 138 | 6613627 | <reponame>AgDude/gargoyle<gh_stars>100-1000
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.template import Context, Template, TemplateSyntaxError
from django.test import TestCase
from gargoyle.builtins import UserConditionSet
from gargoyle.manager import SwitchManager
from gargoyle.models import DISABLED, GLOBAL, SELECTIVE, Switch
class BaseTemplateTagTests(TestCase):
def setUp(self):
self.user = User.objects.create(username='foo', email='<EMAIL>')
self.gargoyle = SwitchManager(Switch, key='key', value='value', instances=True)
self.gargoyle.register(UserConditionSet(User))
class IfSwitchTests(BaseTemplateTagTests):
def test_simple(self):
Switch.objects.create(key='test', status=GLOBAL)
template = Template("""
{% load gargoyle_tags %}
{% ifswitch test %}
hello world!
{% endifswitch %}
""")
rendered = template.render(Context())
assert 'hello world!' in rendered
def test_else(self):
Switch.objects.create(key='test', status=DISABLED)
template = Template("""
{% load gargoyle_tags %}
{% ifswitch test %}
hello world!
{% else %}
foo bar baz
{% endifswitch %}
""")
rendered = template.render(Context())
assert 'foo bar baz' in rendered
assert 'hello world!' not in rendered
def test_with_request(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(key='test', status=SELECTIVE)
switch = self.gargoyle['test']
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
request = HttpRequest()
request.user = self.user
template = Template("""
{% load gargoyle_tags %}
{% ifswitch test %}
hello world!
{% else %}
foo bar baz
{% endifswitch %}
""")
rendered = template.render(Context({'request': request}))
assert 'foo bar baz' not in rendered
assert 'hello world!' in rendered
def test_missing_name(self):
with pytest.raises(TemplateSyntaxError):
Template("""
{% load gargoyle_tags %}
{% ifswitch %}
hello world!
{% endifswitch %}
""")
def test_with_custom_objects(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(key='test', status=SELECTIVE)
switch = self.gargoyle['test']
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
request = HttpRequest()
request.user = self.user
# Pass in request.user explicitly.
template = Template("""
{% load gargoyle_tags %}
{% ifswitch test request.user %}
hello world!
{% else %}
foo bar baz
{% endifswitch %}
""")
rendered = template.render(Context({'request': request}))
assert 'foo bar baz' not in rendered
assert 'hello world!' in rendered
class IfNotSwitchTests(BaseTemplateTagTests):
def test_simple(self):
Switch.objects.create(key='test', status=GLOBAL)
template = Template("""
{% load gargoyle_tags %}
{% ifnotswitch test %}
hello world!
{% endifnotswitch %}
""")
rendered = template.render(Context())
assert 'hello world!' not in rendered
def test_else(self):
Switch.objects.create(key='test', status=DISABLED)
template = Template("""
{% load gargoyle_tags %}
{% ifnotswitch test %}
hello world!
{% else %}
foo bar baz
{% endifnotswitch %}
""")
rendered = template.render(Context())
assert 'foo bar baz' not in rendered
assert 'hello world!' in rendered
def test_with_request(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(key='test', status=SELECTIVE)
switch = self.gargoyle['test']
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
request = HttpRequest()
request.user = self.user
template = Template("""
{% load gargoyle_tags %}
{% ifnotswitch test %}
hello world!
{% else %}
foo bar baz
{% endifnotswitch %}
""")
rendered = template.render(Context({'request': request}))
assert 'foo bar baz' in rendered
assert 'hello world!' not in rendered
def test_missing_name(self):
with pytest.raises(TemplateSyntaxError):
Template("""
{% load gargoyle_tags %}
{% ifnotswitch %}
hello world!
{% endifnotswitch %}
""")
def test_with_custom_objects(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(key='test', status=SELECTIVE)
switch = self.gargoyle['test']
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
request = HttpRequest()
request.user = self.user
# Pass in request.user explicitly.
template = Template("""
{% load gargoyle_tags %}
{% ifnotswitch test request.user %}
hello world!
{% else %}
foo bar baz
{% endifnotswitch %}
""")
rendered = template.render(Context({'request': request}))
assert 'foo bar baz' in rendered
assert 'hello world!' not in rendered
| from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.template import Context, Template, TemplateSyntaxError
from django.test import TestCase
from gargoyle.builtins import UserConditionSet
from gargoyle.manager import SwitchManager
from gargoyle.models import DISABLED, GLOBAL, SELECTIVE, Switch
class BaseTemplateTagTests(TestCase):
def setUp(self):
self.user = User.objects.create(username='foo', email='<EMAIL>')
self.gargoyle = SwitchManager(Switch, key='key', value='value', instances=True)
self.gargoyle.register(UserConditionSet(User))
class IfSwitchTests(BaseTemplateTagTests):
def test_simple(self):
Switch.objects.create(key='test', status=GLOBAL)
template = Template("""
{% load gargoyle_tags %}
{% ifswitch test %}
hello world!
{% endifswitch %}
""")
rendered = template.render(Context())
assert 'hello world!' in rendered
def test_else(self):
Switch.objects.create(key='test', status=DISABLED)
template = Template("""
{% load gargoyle_tags %}
{% ifswitch test %}
hello world!
{% else %}
foo bar baz
{% endifswitch %}
""")
rendered = template.render(Context())
assert 'foo bar baz' in rendered
assert 'hello world!' not in rendered
def test_with_request(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(key='test', status=SELECTIVE)
switch = self.gargoyle['test']
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
request = HttpRequest()
request.user = self.user
template = Template("""
{% load gargoyle_tags %}
{% ifswitch test %}
hello world!
{% else %}
foo bar baz
{% endifswitch %}
""")
rendered = template.render(Context({'request': request}))
assert 'foo bar baz' not in rendered
assert 'hello world!' in rendered
def test_missing_name(self):
with pytest.raises(TemplateSyntaxError):
Template("""
{% load gargoyle_tags %}
{% ifswitch %}
hello world!
{% endifswitch %}
""")
def test_with_custom_objects(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(key='test', status=SELECTIVE)
switch = self.gargoyle['test']
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
request = HttpRequest()
request.user = self.user
# Pass in request.user explicitly.
template = Template("""
{% load gargoyle_tags %}
{% ifswitch test request.user %}
hello world!
{% else %}
foo bar baz
{% endifswitch %}
""")
rendered = template.render(Context({'request': request}))
assert 'foo bar baz' not in rendered
assert 'hello world!' in rendered
class IfNotSwitchTests(BaseTemplateTagTests):
def test_simple(self):
Switch.objects.create(key='test', status=GLOBAL)
template = Template("""
{% load gargoyle_tags %}
{% ifnotswitch test %}
hello world!
{% endifnotswitch %}
""")
rendered = template.render(Context())
assert 'hello world!' not in rendered
def test_else(self):
Switch.objects.create(key='test', status=DISABLED)
template = Template("""
{% load gargoyle_tags %}
{% ifnotswitch test %}
hello world!
{% else %}
foo bar baz
{% endifnotswitch %}
""")
rendered = template.render(Context())
assert 'foo bar baz' not in rendered
assert 'hello world!' in rendered
def test_with_request(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(key='test', status=SELECTIVE)
switch = self.gargoyle['test']
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
request = HttpRequest()
request.user = self.user
template = Template("""
{% load gargoyle_tags %}
{% ifnotswitch test %}
hello world!
{% else %}
foo bar baz
{% endifnotswitch %}
""")
rendered = template.render(Context({'request': request}))
assert 'foo bar baz' in rendered
assert 'hello world!' not in rendered
def test_missing_name(self):
with pytest.raises(TemplateSyntaxError):
Template("""
{% load gargoyle_tags %}
{% ifnotswitch %}
hello world!
{% endifnotswitch %}
""")
def test_with_custom_objects(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(key='test', status=SELECTIVE)
switch = self.gargoyle['test']
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
request = HttpRequest()
request.user = self.user
# Pass in request.user explicitly.
template = Template("""
{% load gargoyle_tags %}
{% ifnotswitch test request.user %}
hello world!
{% else %}
foo bar baz
{% endifnotswitch %}
""")
rendered = template.render(Context({'request': request}))
assert 'foo bar baz' in rendered
assert 'hello world!' not in rendered | en | 0.124395 | {% load gargoyle_tags %} {% ifswitch test %} hello world! {% endifswitch %} {% load gargoyle_tags %} {% ifswitch test %} hello world! {% else %} foo bar baz {% endifswitch %} {% load gargoyle_tags %} {% ifswitch test %} hello world! {% else %} foo bar baz {% endifswitch %} {% load gargoyle_tags %} {% ifswitch %} hello world! {% endifswitch %} # Pass in request.user explicitly. {% load gargoyle_tags %} {% ifswitch test request.user %} hello world! {% else %} foo bar baz {% endifswitch %} {% load gargoyle_tags %} {% ifnotswitch test %} hello world! {% endifnotswitch %} {% load gargoyle_tags %} {% ifnotswitch test %} hello world! {% else %} foo bar baz {% endifnotswitch %} {% load gargoyle_tags %} {% ifnotswitch test %} hello world! {% else %} foo bar baz {% endifnotswitch %} {% load gargoyle_tags %} {% ifnotswitch %} hello world! {% endifnotswitch %} # Pass in request.user explicitly. {% load gargoyle_tags %} {% ifnotswitch test request.user %} hello world! {% else %} foo bar baz {% endifnotswitch %} | 1.981848 | 2 |
utils/parse-corpus-header.py | TomPlano/varbench | 7 | 6613628 | #!/usr/bin/env python
"""
Script to add syscall benchmarks to the existing libsyzcorpus
"""
header ="""
#define MAX_SYSCALLS 4207
#define TO_NSECS(sec,nsec)\\
((sec) * 1000000000 + (nsec))
#include <time.h>
#include <stdint.h>
typedef struct {
int16_t syscall_number;
intptr_t ret_val;
unsigned long long time_in;
unsigned long long time_out;
} vb_syscall_info_t;
"""
#Do this later
def usage():
return
def parse_file(program_src):
with open(program_src, "r") as f:
lines = f.readlines()
s=[]
for line_number, line in enumerate(lines):
if ("int _" in line) and ("(void);" in line):
line = line.replace("void", "vb_syscall_info_t * scall_info, int * num_calls")
if "#define __LIBSYZCORPUS_H__" in line:
line += header
s.append(line)
return ''.join(s) + '\n'
if __name__ == "__main__":
s= parse_file("../src/kernels/corpuses/sample-corpus/libsyzcorpus.h");
print s
| #!/usr/bin/env python
"""
Script to add syscall benchmarks to the existing libsyzcorpus
"""
header ="""
#define MAX_SYSCALLS 4207
#define TO_NSECS(sec,nsec)\\
((sec) * 1000000000 + (nsec))
#include <time.h>
#include <stdint.h>
typedef struct {
int16_t syscall_number;
intptr_t ret_val;
unsigned long long time_in;
unsigned long long time_out;
} vb_syscall_info_t;
"""
#Do this later
def usage():
return
def parse_file(program_src):
with open(program_src, "r") as f:
lines = f.readlines()
s=[]
for line_number, line in enumerate(lines):
if ("int _" in line) and ("(void);" in line):
line = line.replace("void", "vb_syscall_info_t * scall_info, int * num_calls")
if "#define __LIBSYZCORPUS_H__" in line:
line += header
s.append(line)
return ''.join(s) + '\n'
if __name__ == "__main__":
s= parse_file("../src/kernels/corpuses/sample-corpus/libsyzcorpus.h");
print s
| en | 0.419951 | #!/usr/bin/env python Script to add syscall benchmarks to the existing libsyzcorpus #define MAX_SYSCALLS 4207 #define TO_NSECS(sec,nsec)\\ ((sec) * 1000000000 + (nsec)) #include <time.h> #include <stdint.h> typedef struct { int16_t syscall_number; intptr_t ret_val; unsigned long long time_in; unsigned long long time_out; } vb_syscall_info_t; #Do this later | 2.341753 | 2 |
catalogos/models.py | abcdatoz/noocs | 0 | 6613629 | <filename>catalogos/models.py
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Tipo(models.Model):
clave = models.CharField(max_length=20)
nombre = models.CharField(max_length=100)
def __str__(self):
return self.nombre
class UsuarioEscuela(models.Model):
usuario = models.IntegerField()
municipio = models.IntegerField()
escuela = models.IntegerField()
class Banner(models.Model):
titulo = models.CharField(max_length=255)
imagen = models.ImageField(upload_to='noocs_images/banner')
status = models.BooleanField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
updated_by = models.CharField(max_length=256)
class Municipio(models.Model):
clave = models.CharField(max_length=20)
nombre = models.CharField(max_length=255)
status = models.BooleanField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
updated_by = models.CharField(max_length=256)
class Meta:
ordering = ['clave']
class Escuela(models.Model):
municipio = models.ForeignKey(Municipio, on_delete=models.CASCADE, null=True)
clave = models.CharField(max_length=50)
nombre = models.CharField(max_length=255)
direccion = models.CharField(max_length=255)
status = models.BooleanField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
updated_by = models.CharField(max_length=256)
class Curso(models.Model):
nombre = models.CharField(max_length=255)
imagen = models.ImageField(upload_to='noocs_images/cursos')
descripcionA =models.CharField(max_length=500)
descripcionB =models.CharField(max_length=500)
descripcionC =models.CharField(max_length=500)
status = models.BooleanField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
updated_by = models.CharField(max_length=256)
class VideoActividades(models.Model):
curso = models.ForeignKey(Curso, on_delete=models.CASCADE, null=False)
tipo = models.CharField(max_length=50)
orden = models.IntegerField()
nombre = models.CharField(max_length=250)
direccionURL = models.CharField(max_length=1028)
class Meta:
ordering = ['orden', 'nombre']
class Question(models.Model):
curso = models.ForeignKey(Curso, on_delete=models.CASCADE, null=False)
inciso = models.CharField(max_length=1)
pregunta = models.CharField(max_length=255)
class Meta:
ordering = ['inciso']
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, null=False)
opcion = models.CharField(max_length=100)
es_correcta = models.BooleanField()
class Meta:
ordering = ['-es_correcta']
class MisCursos(models.Model):
curso = models.ForeignKey(Curso, on_delete=models.CASCADE, null=False)
usuario = models.IntegerField()
fecha = models.DateTimeField(auto_now_add=True)
estatus = models.CharField(max_length=255)
| <filename>catalogos/models.py
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Tipo(models.Model):
clave = models.CharField(max_length=20)
nombre = models.CharField(max_length=100)
def __str__(self):
return self.nombre
class UsuarioEscuela(models.Model):
usuario = models.IntegerField()
municipio = models.IntegerField()
escuela = models.IntegerField()
class Banner(models.Model):
titulo = models.CharField(max_length=255)
imagen = models.ImageField(upload_to='noocs_images/banner')
status = models.BooleanField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
updated_by = models.CharField(max_length=256)
class Municipio(models.Model):
clave = models.CharField(max_length=20)
nombre = models.CharField(max_length=255)
status = models.BooleanField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
updated_by = models.CharField(max_length=256)
class Meta:
ordering = ['clave']
class Escuela(models.Model):
municipio = models.ForeignKey(Municipio, on_delete=models.CASCADE, null=True)
clave = models.CharField(max_length=50)
nombre = models.CharField(max_length=255)
direccion = models.CharField(max_length=255)
status = models.BooleanField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
updated_by = models.CharField(max_length=256)
class Curso(models.Model):
nombre = models.CharField(max_length=255)
imagen = models.ImageField(upload_to='noocs_images/cursos')
descripcionA =models.CharField(max_length=500)
descripcionB =models.CharField(max_length=500)
descripcionC =models.CharField(max_length=500)
status = models.BooleanField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
updated_by = models.CharField(max_length=256)
class VideoActividades(models.Model):
curso = models.ForeignKey(Curso, on_delete=models.CASCADE, null=False)
tipo = models.CharField(max_length=50)
orden = models.IntegerField()
nombre = models.CharField(max_length=250)
direccionURL = models.CharField(max_length=1028)
class Meta:
ordering = ['orden', 'nombre']
class Question(models.Model):
curso = models.ForeignKey(Curso, on_delete=models.CASCADE, null=False)
inciso = models.CharField(max_length=1)
pregunta = models.CharField(max_length=255)
class Meta:
ordering = ['inciso']
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, null=False)
opcion = models.CharField(max_length=100)
es_correcta = models.BooleanField()
class Meta:
ordering = ['-es_correcta']
class MisCursos(models.Model):
curso = models.ForeignKey(Curso, on_delete=models.CASCADE, null=False)
usuario = models.IntegerField()
fecha = models.DateTimeField(auto_now_add=True)
estatus = models.CharField(max_length=255)
| en | 0.963489 | # Create your models here. | 2.27737 | 2 |
other/custom_checks.py | bad-decisions/YolkBot-1 | 0 | 6613630 | <gh_stars>0
def is_on_team(ctx):
return ctx.author.id in ctx.bot.team["member_ids"]
| def is_on_team(ctx):
return ctx.author.id in ctx.bot.team["member_ids"] | none | 1 | 1.948227 | 2 | |
tests/bootstrap_adv.py | amehta1/t1-python | 24 | 6613631 | #!/usr/bin/env python
from datetime import datetime, timedelta
import json
import logging
import sys
from terminalone import T1
from terminalone.utils import credentials
if sys.version_info.major > 2:
PY3 = True
else:
PY3 = False
def iteritems(d):
if PY3:
return d.items()
return d.iteritems()
def edit_name(name):
if not name:
return
last_char = name[-1]
if not last_char.isdigit():
if last_char != ' ':
return name + ' 1'
return name + '1'
return name[:-1] + str(int(last_char) + 1)
def setup(credentials):
return T1(auth_method='cookie', **credentials)
now = datetime.now()
learned_vars = {
'advertiser_id': None,
'agency_id': None,
'campaign_id': None,
'provider_id': None,
'concept_id': None,
}
campaigns = (
[
{
'name': 'Main Campaign',
'status': False,
'use_default_ad_server': True,
'ad_server_id': 9,
'advertiser_id': None,
'currency_code': 'USD',
'start_date': now + timedelta(days=30),
'end_date': now + timedelta(days=60),
'frequency_type': 'no-limit',
'goal_category': 'audience',
'goal_type': 'spend',
'goal_value': 1.00,
'margin_pct': 0.00,
'service_type': 'SELF',
'total_budget': 1.00,
},
], 'campaigns', 'campaign_id',
)
strategies = (
[
{
'name': 'RTB Test Strategy',
'budget': 1.00,
'campaign_id': None,
'use_campaign_start': True,
'use_campaign_end': True,
'frequency_type': 'no-limit',
'goal_type': 'spend',
'max_bid': 1.00,
'pacing_amount': 1.00,
'pacing_interval': 'day',
'pacing_type': 'even',
'status': False,
'type': 'REM',
},
], 'strategies', None,
)
pixels = (
[
{
'name': 'Test Event Pixel',
'advertiser_id': None,
'eligible': True,
'pixel_type': 'event',
'status': True,
'tag_type': 'js',
},
{
'name': 'Test Data Pixel',
'agency_id': None,
'provider_id': None,
'cost_cpm': 0.00,
'cost_cpts': 0.00,
'cost_pct_cpm': 0.00,
'eligible': True,
'pixel_type': 'data',
'pricing': 'CPM',
'tag_type': 'image',
}
], 'pixel_bundles', None,
)
concepts = (
[
{
'name': 'AdAge',
'advertiser_id': None,
'status': True,
}
], 'concepts', 'concept_id',
)
creatives = (
[
{
'name': 'AdAge 300x250',
'advertiser_id': None,
'ad_server_type': 'OTHER',
'concept_id': None,
'external_identifier': '1',
'height': 1,
'width': 1,
'status': True,
'tag': '<script type="text/javascript"></script>',
'tag_type': 'SCRIPT',
'tpas_ad_tag_name': 'not-applicable',
}
], 'atomic_creatives', None,
)
# Need to iterate in a certain order. campaign needs to be created before
# strategy is created, for instance, so that we can fill in campaign_id
order = [
campaigns,
strategies,
concepts,
creatives,
pixels,
]
def learn_props(props):
for key, value in iteritems(props):
if value is None and key in learned_vars:
props[key] = learned_vars[key]
def bootstrap_advertiser(t1):
for item in order:
items, count = t1.get(item[1], count=True)
if count < len(item[0]):
for propset in item[0]:
learn_props(propset)
i = t1.new(item[1], properties=propset)
i.save()
if item[2] is not None:
learned_vars[item[2]] = i.id
else:
if item[2] is not None:
learned_vars[item[2]] = next(items).id
def load_defaults(filename):
with open(filename) as f:
data = json.load(f)
learned_vars.update(data)
def main():
t1 = setup(credentials())
load_defaults('defaults.json')
bootstrap_advertiser(t1)
if __name__ == '__main__':
import argparse
__parser = argparse.ArgumentParser(description='bootstrap helper')
__parser.add_argument('-v', '--verbose', action='store_true', help='debug logging')
args = __parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
main()
| #!/usr/bin/env python
from datetime import datetime, timedelta
import json
import logging
import sys
from terminalone import T1
from terminalone.utils import credentials
if sys.version_info.major > 2:
PY3 = True
else:
PY3 = False
def iteritems(d):
if PY3:
return d.items()
return d.iteritems()
def edit_name(name):
if not name:
return
last_char = name[-1]
if not last_char.isdigit():
if last_char != ' ':
return name + ' 1'
return name + '1'
return name[:-1] + str(int(last_char) + 1)
def setup(credentials):
return T1(auth_method='cookie', **credentials)
now = datetime.now()
learned_vars = {
'advertiser_id': None,
'agency_id': None,
'campaign_id': None,
'provider_id': None,
'concept_id': None,
}
campaigns = (
[
{
'name': 'Main Campaign',
'status': False,
'use_default_ad_server': True,
'ad_server_id': 9,
'advertiser_id': None,
'currency_code': 'USD',
'start_date': now + timedelta(days=30),
'end_date': now + timedelta(days=60),
'frequency_type': 'no-limit',
'goal_category': 'audience',
'goal_type': 'spend',
'goal_value': 1.00,
'margin_pct': 0.00,
'service_type': 'SELF',
'total_budget': 1.00,
},
], 'campaigns', 'campaign_id',
)
strategies = (
[
{
'name': 'RTB Test Strategy',
'budget': 1.00,
'campaign_id': None,
'use_campaign_start': True,
'use_campaign_end': True,
'frequency_type': 'no-limit',
'goal_type': 'spend',
'max_bid': 1.00,
'pacing_amount': 1.00,
'pacing_interval': 'day',
'pacing_type': 'even',
'status': False,
'type': 'REM',
},
], 'strategies', None,
)
pixels = (
[
{
'name': 'Test Event Pixel',
'advertiser_id': None,
'eligible': True,
'pixel_type': 'event',
'status': True,
'tag_type': 'js',
},
{
'name': 'Test Data Pixel',
'agency_id': None,
'provider_id': None,
'cost_cpm': 0.00,
'cost_cpts': 0.00,
'cost_pct_cpm': 0.00,
'eligible': True,
'pixel_type': 'data',
'pricing': 'CPM',
'tag_type': 'image',
}
], 'pixel_bundles', None,
)
concepts = (
[
{
'name': 'AdAge',
'advertiser_id': None,
'status': True,
}
], 'concepts', 'concept_id',
)
creatives = (
[
{
'name': 'AdAge 300x250',
'advertiser_id': None,
'ad_server_type': 'OTHER',
'concept_id': None,
'external_identifier': '1',
'height': 1,
'width': 1,
'status': True,
'tag': '<script type="text/javascript"></script>',
'tag_type': 'SCRIPT',
'tpas_ad_tag_name': 'not-applicable',
}
], 'atomic_creatives', None,
)
# Need to iterate in a certain order. campaign needs to be created before
# strategy is created, for instance, so that we can fill in campaign_id
order = [
campaigns,
strategies,
concepts,
creatives,
pixels,
]
def learn_props(props):
for key, value in iteritems(props):
if value is None and key in learned_vars:
props[key] = learned_vars[key]
def bootstrap_advertiser(t1):
for item in order:
items, count = t1.get(item[1], count=True)
if count < len(item[0]):
for propset in item[0]:
learn_props(propset)
i = t1.new(item[1], properties=propset)
i.save()
if item[2] is not None:
learned_vars[item[2]] = i.id
else:
if item[2] is not None:
learned_vars[item[2]] = next(items).id
def load_defaults(filename):
with open(filename) as f:
data = json.load(f)
learned_vars.update(data)
def main():
t1 = setup(credentials())
load_defaults('defaults.json')
bootstrap_advertiser(t1)
if __name__ == '__main__':
import argparse
__parser = argparse.ArgumentParser(description='bootstrap helper')
__parser.add_argument('-v', '--verbose', action='store_true', help='debug logging')
args = __parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
main()
| en | 0.912203 | #!/usr/bin/env python # Need to iterate in a certain order. campaign needs to be created before # strategy is created, for instance, so that we can fill in campaign_id | 2.160504 | 2 |
arbitrager_wrapper.py | orrelln/arbitrage-trader | 4 | 6613632 | <filename>arbitrager_wrapper.py
from arbitrage.initializer import Initializer
import sys
from arbitrage.arbitrager import Arbitrager
from scripts.wrappers import indef_call, timed_call
from scripts.decorators import exception_catch
@exception_catch('error')
def inner_loop(arbitrager_obj):
arbitrager_obj.load_tickers()
arbitrager_obj.ticker_percentages()
arbitrager_obj.log_tickers()
arbitrager_obj.order_book_profit()
arbitrager_obj.create_trader()
@exception_catch('error')
def arbitrager_loop(arbitrager_obj):
timed_call(inner_loop, 30, int(1200 / 30), arbitrager_obj)
arbitrager_obj.exchanges = arbitrager_obj.load_exchanges()
arbitrager_obj.exchange_pairs = arbitrager_obj.load_exchange_pairs()
arbitrager_obj.inter_pairs = arbitrager_obj.load_inter_pairs()
def main():
arbitrager_obj = Arbitrager()
indef_call(arbitrager_loop, 0, arbitrager_obj)
if __name__ == '__main__':
main() | <filename>arbitrager_wrapper.py
from arbitrage.initializer import Initializer
import sys
from arbitrage.arbitrager import Arbitrager
from scripts.wrappers import indef_call, timed_call
from scripts.decorators import exception_catch
@exception_catch('error')
def inner_loop(arbitrager_obj):
arbitrager_obj.load_tickers()
arbitrager_obj.ticker_percentages()
arbitrager_obj.log_tickers()
arbitrager_obj.order_book_profit()
arbitrager_obj.create_trader()
@exception_catch('error')
def arbitrager_loop(arbitrager_obj):
timed_call(inner_loop, 30, int(1200 / 30), arbitrager_obj)
arbitrager_obj.exchanges = arbitrager_obj.load_exchanges()
arbitrager_obj.exchange_pairs = arbitrager_obj.load_exchange_pairs()
arbitrager_obj.inter_pairs = arbitrager_obj.load_inter_pairs()
def main():
arbitrager_obj = Arbitrager()
indef_call(arbitrager_loop, 0, arbitrager_obj)
if __name__ == '__main__':
main() | none | 1 | 2.545885 | 3 | |
align/inventory.py | jwestgard/aws-invalign | 0 | 6613633 | import csv
from io import StringIO
from zipfile import ZipFile
import os
import sys
class Inventory():
'''Class representing an inventory of assets,
with option to read from various file formats'''
def __init__(self, path):
encodings = ['ascii', 'utf-8', 'latin-1']
self.path = path
self.reachable = os.path.isfile(path)
if self.reachable:
for encoding in encodings:
try:
with open(path, encoding=encoding) as handle:
self.contents = handle.read()
break
except ValueError:
continue
print('could not decode file')
else:
print(f'Could not access {self.path}')
sys.exit(1)
def from_zipfile(self, filename, ziparchive):
with ZipFile(ziparchive) as source:
with source.open(filename) as handle:
self.bytes = handle.read()
try:
self.text = self.bytes.decode('utf-8')
except UnicodeDecodeError:
self.text = self.bytes.decode('latin-1')
if self.text.startswith(' Volume in drive'):
self.type = 'dirlist'
else:
self.type = 'csv'
def from_csv(self):
pass
def from_dirlist(self):
self.assets = []
for line in StringIO(self.contents).readlines():
if line.startswith(' '):
continue
parts = line.strip('\n').split()
length = len(parts)
if length == 0 or parts[3] == '<DIR>':
continue
elif length >= 5:
timestamp = ' '.join(parts[:3])
bytes = int(''.join(
[char for char in parts[3] if char.isdigit()]
))
filename = ' '.join(parts[4:])
self.assets.append((filename, bytes, timestamp))
| import csv
from io import StringIO
from zipfile import ZipFile
import os
import sys
class Inventory():
'''Class representing an inventory of assets,
with option to read from various file formats'''
def __init__(self, path):
encodings = ['ascii', 'utf-8', 'latin-1']
self.path = path
self.reachable = os.path.isfile(path)
if self.reachable:
for encoding in encodings:
try:
with open(path, encoding=encoding) as handle:
self.contents = handle.read()
break
except ValueError:
continue
print('could not decode file')
else:
print(f'Could not access {self.path}')
sys.exit(1)
def from_zipfile(self, filename, ziparchive):
with ZipFile(ziparchive) as source:
with source.open(filename) as handle:
self.bytes = handle.read()
try:
self.text = self.bytes.decode('utf-8')
except UnicodeDecodeError:
self.text = self.bytes.decode('latin-1')
if self.text.startswith(' Volume in drive'):
self.type = 'dirlist'
else:
self.type = 'csv'
def from_csv(self):
pass
def from_dirlist(self):
self.assets = []
for line in StringIO(self.contents).readlines():
if line.startswith(' '):
continue
parts = line.strip('\n').split()
length = len(parts)
if length == 0 or parts[3] == '<DIR>':
continue
elif length >= 5:
timestamp = ' '.join(parts[:3])
bytes = int(''.join(
[char for char in parts[3] if char.isdigit()]
))
filename = ' '.join(parts[4:])
self.assets.append((filename, bytes, timestamp))
| en | 0.96389 | Class representing an inventory of assets, with option to read from various file formats | 3.299577 | 3 |
pyfire/stream/stanzas/errors.py | RavidLevi98/pyfire | 0 | 6613634 | <filename>pyfire/stream/stanzas/errors.py
# -*- coding: utf-8 -*-
"""
pyfire.stream.stanzas.errors
~~~~~~~~~~~~~~~~~~~~
Holds all Stanzas Errors/Exceptions defined in RFC6120 Section 8.3.3
:copyright: 2011 by the pyfire Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import xml.etree.ElementTree as ET
from pyfire.errors import XMPPProtocolError
import pyfire.configuration as config
class StanzaError(XMPPProtocolError):
"""Base class for all stanza errors that are
caused while stanza processing
"""
def __init__(self, request, error_type, error_name):
XMPPProtocolError.__init__(self,
request.tag,
""
)
try:
if request.get("id") is not None:
self.element.set("id", request.get("id"))
self.element.set("to", request.get("from"))
self.element.set("from", config.getlist('listeners', 'domains')[0])
except KeyError:
pass
self.error = ET.Element("error")
self.error.set("type", error_type)
self.message = ET.Element(error_name)
self.message.set("xmlns", "urn:ietf:params:xml:ns:xmpp-stanzas")
self.error.append(self.message)
self.element.append(self.error)
class BadRequestError(StanzaError):
"""The sender has sent a stanza containing XML that does not conform to
the appropriate schema or that it cannot be processed
"""
def __init__(self, request):
StanzaError.__init__(self, request, "modify", "bad-request")
class ConflictError(StanzaError):
"""Access cannot be granted because an existing resource exists with the
same name or address
"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel", "conflict")
class FeatureNotImplementedError(StanzaError):
"""The feature represented in the XML stanza is not implemented by the
intended recipient or an intermediate server and therefore the stanza
cannot be processed
"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel",
"feature-not-implemented")
class ForbiddenError(StanzaError):
"""The requesting entity does not possess the necessary permissions to
perform an action that only certain authorized roles or individuals
are allowed to complete
"""
def __init__(self, request):
StanzaError.__init__(self, request, "auth", "forbidden")
class GoneError(StanzaError):
"""The recipient or server can no longer be contacted at this address"""
def __init__(self, request, uri):
StanzaError.__init__(self, request, "cancel", "gone")
self.message.text = uri
# TODO: Add "by" attribute to self.error
# if we can determine who we are
class InternalServerError(StanzaError):
"""The server has experienced a misconfiguration or other internal error
that prevents it from processing the stanza
"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel", "internal-server-error")
class ItemNotFoundError(StanzaError):
"""The addressed JID or item requested cannot be found"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel", "item-not-found")
class JIDMalformedError(StanzaError):
"""Invalid JID has been set in Stanzas"""
def __init__(self, request):
StanzaError.__init__(self, request, "modify", "jid-malformed")
class NotAcceptableError(StanzaError):
"""The recipient or server understands the request but cannot process it
because the request does not meet criteria defined by the recipient
or server
"""
def __init__(self, request):
StanzaError.__init__(self, request, "modify", "not-acceptable")
class NotAllowedError(StanzaError):
"""The recipient or server does not allow any entity to perform the
action
"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel", "not-allowed")
class NotAuthorizedError(StanzaError):
"""The sender needs to provide valid credentials before being allowed to
perform the action
"""
def __init__(self, request):
StanzaError.__init__(self, request, "auth", "not-authorized")
class PolicyViolationError(StanzaError):
"""The entity has violated some local service policy"""
def __init__(self, request, policy_text=None):
StanzaError.__init__(self, request, "modify", "policy-violation")
# TODO: Add "by" attribute to self.error
# if we can determine who we are
if policy_text is not None:
text = ET.Element("text")
text.set("xmlns", "urn:ietf:params:xml:ns:xmpp-stanzas")
text.text = policy_text
self.message.append(text)
class RecipientUnavailableError(StanzaError):
"""The intended recipient is temporarily unavailable, undergoing
maintenance, etc.
"""
def __init__(self, request):
StanzaError.__init__(self, request, "wait", "recipient-unavailable")
class RedirectError(StanzaError):
"""The recipient or server is redirecting requests for this information
to another entity
"""
def __init__(self, request, redirect_to):
StanzaError.__init__(self, request, "modify", "redirect")
self.message.text = redirect_to
class RegistrationRequiredError(StanzaError):
"""The requesting entity is not authorized to access the requested
service because prior registration is necessary
"""
def __init__(self, request):
StanzaError.__init__(self, request, "auth", "registration-required")
class RemoteServerNotFoundError(StanzaError):
"""A remote server or service specified as part or all of the JID of the
intended recipient does not exist or cannot be resolved
"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel",
"remote-server-not-found")
class RemoteServerTimeoutError(StanzaError):
"""A remote server or service specified as part or all of the JID of the
intended recipient (or needed to fulfill a request) was resolved but
communications could not be established within a reasonable amount of
time
"""
def __init__(self, request):
StanzaError.__init__(self, request, "wait", "remote-server-timeout")
class ResourceConstraintError(StanzaError):
"""The server or recipient is busy or lacks the system resources
necessary to service the request
"""
def __init__(self, request):
StanzaError.__init__(self, request, "wait", "resource-constraint")
class ServiceUnavailableError(StanzaError):
"""The server or recipient does not currently provide the requested
service
"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel", "resource-unavailable")
class SubscriptionRequiredError(StanzaError):
"""The requesting entity is not authorized to access the requested
service because a prior subscription is necessary
"""
def __init__(self, request):
StanzaError.__init__(self, request, "auth", "subscription-required")
class UndefinedConditionError(StanzaError):
"""The error condition is not one of those defined by the other
conditions
"""
def __init__(self, request):
StanzaError.__init__(self, request, "modify", "undefined-condition")
class UnexpectedRequestError(StanzaError):
"""The recipient or server understood the request but was not expecting
it at this time
"""
def __init__(self, request):
StanzaError.__init__(self, request, "modify", "unexpected-request")
| <filename>pyfire/stream/stanzas/errors.py
# -*- coding: utf-8 -*-
"""
pyfire.stream.stanzas.errors
~~~~~~~~~~~~~~~~~~~~
Holds all Stanzas Errors/Exceptions defined in RFC6120 Section 8.3.3
:copyright: 2011 by the pyfire Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import xml.etree.ElementTree as ET
from pyfire.errors import XMPPProtocolError
import pyfire.configuration as config
class StanzaError(XMPPProtocolError):
"""Base class for all stanza errors that are
caused while stanza processing
"""
def __init__(self, request, error_type, error_name):
XMPPProtocolError.__init__(self,
request.tag,
""
)
try:
if request.get("id") is not None:
self.element.set("id", request.get("id"))
self.element.set("to", request.get("from"))
self.element.set("from", config.getlist('listeners', 'domains')[0])
except KeyError:
pass
self.error = ET.Element("error")
self.error.set("type", error_type)
self.message = ET.Element(error_name)
self.message.set("xmlns", "urn:ietf:params:xml:ns:xmpp-stanzas")
self.error.append(self.message)
self.element.append(self.error)
class BadRequestError(StanzaError):
"""The sender has sent a stanza containing XML that does not conform to
the appropriate schema or that it cannot be processed
"""
def __init__(self, request):
StanzaError.__init__(self, request, "modify", "bad-request")
class ConflictError(StanzaError):
"""Access cannot be granted because an existing resource exists with the
same name or address
"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel", "conflict")
class FeatureNotImplementedError(StanzaError):
"""The feature represented in the XML stanza is not implemented by the
intended recipient or an intermediate server and therefore the stanza
cannot be processed
"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel",
"feature-not-implemented")
class ForbiddenError(StanzaError):
"""The requesting entity does not possess the necessary permissions to
perform an action that only certain authorized roles or individuals
are allowed to complete
"""
def __init__(self, request):
StanzaError.__init__(self, request, "auth", "forbidden")
class GoneError(StanzaError):
"""The recipient or server can no longer be contacted at this address"""
def __init__(self, request, uri):
StanzaError.__init__(self, request, "cancel", "gone")
self.message.text = uri
# TODO: Add "by" attribute to self.error
# if we can determine who we are
class InternalServerError(StanzaError):
"""The server has experienced a misconfiguration or other internal error
that prevents it from processing the stanza
"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel", "internal-server-error")
class ItemNotFoundError(StanzaError):
"""The addressed JID or item requested cannot be found"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel", "item-not-found")
class JIDMalformedError(StanzaError):
"""Invalid JID has been set in Stanzas"""
def __init__(self, request):
StanzaError.__init__(self, request, "modify", "jid-malformed")
class NotAcceptableError(StanzaError):
"""The recipient or server understands the request but cannot process it
because the request does not meet criteria defined by the recipient
or server
"""
def __init__(self, request):
StanzaError.__init__(self, request, "modify", "not-acceptable")
class NotAllowedError(StanzaError):
"""The recipient or server does not allow any entity to perform the
action
"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel", "not-allowed")
class NotAuthorizedError(StanzaError):
"""The sender needs to provide valid credentials before being allowed to
perform the action
"""
def __init__(self, request):
StanzaError.__init__(self, request, "auth", "not-authorized")
class PolicyViolationError(StanzaError):
"""The entity has violated some local service policy"""
def __init__(self, request, policy_text=None):
StanzaError.__init__(self, request, "modify", "policy-violation")
# TODO: Add "by" attribute to self.error
# if we can determine who we are
if policy_text is not None:
text = ET.Element("text")
text.set("xmlns", "urn:ietf:params:xml:ns:xmpp-stanzas")
text.text = policy_text
self.message.append(text)
class RecipientUnavailableError(StanzaError):
"""The intended recipient is temporarily unavailable, undergoing
maintenance, etc.
"""
def __init__(self, request):
StanzaError.__init__(self, request, "wait", "recipient-unavailable")
class RedirectError(StanzaError):
"""The recipient or server is redirecting requests for this information
to another entity
"""
def __init__(self, request, redirect_to):
StanzaError.__init__(self, request, "modify", "redirect")
self.message.text = redirect_to
class RegistrationRequiredError(StanzaError):
"""The requesting entity is not authorized to access the requested
service because prior registration is necessary
"""
def __init__(self, request):
StanzaError.__init__(self, request, "auth", "registration-required")
class RemoteServerNotFoundError(StanzaError):
"""A remote server or service specified as part or all of the JID of the
intended recipient does not exist or cannot be resolved
"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel",
"remote-server-not-found")
class RemoteServerTimeoutError(StanzaError):
"""A remote server or service specified as part or all of the JID of the
intended recipient (or needed to fulfill a request) was resolved but
communications could not be established within a reasonable amount of
time
"""
def __init__(self, request):
StanzaError.__init__(self, request, "wait", "remote-server-timeout")
class ResourceConstraintError(StanzaError):
"""The server or recipient is busy or lacks the system resources
necessary to service the request
"""
def __init__(self, request):
StanzaError.__init__(self, request, "wait", "resource-constraint")
class ServiceUnavailableError(StanzaError):
"""The server or recipient does not currently provide the requested
service
"""
def __init__(self, request):
StanzaError.__init__(self, request, "cancel", "resource-unavailable")
class SubscriptionRequiredError(StanzaError):
"""The requesting entity is not authorized to access the requested
service because a prior subscription is necessary
"""
def __init__(self, request):
StanzaError.__init__(self, request, "auth", "subscription-required")
class UndefinedConditionError(StanzaError):
"""The error condition is not one of those defined by the other
conditions
"""
def __init__(self, request):
StanzaError.__init__(self, request, "modify", "undefined-condition")
class UnexpectedRequestError(StanzaError):
"""The recipient or server understood the request but was not expecting
it at this time
"""
def __init__(self, request):
StanzaError.__init__(self, request, "modify", "unexpected-request")
| en | 0.907562 | # -*- coding: utf-8 -*- pyfire.stream.stanzas.errors ~~~~~~~~~~~~~~~~~~~~ Holds all Stanzas Errors/Exceptions defined in RFC6120 Section 8.3.3 :copyright: 2011 by the pyfire Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. Base class for all stanza errors that are caused while stanza processing The sender has sent a stanza containing XML that does not conform to the appropriate schema or that it cannot be processed Access cannot be granted because an existing resource exists with the same name or address The feature represented in the XML stanza is not implemented by the intended recipient or an intermediate server and therefore the stanza cannot be processed The requesting entity does not possess the necessary permissions to perform an action that only certain authorized roles or individuals are allowed to complete The recipient or server can no longer be contacted at this address # TODO: Add "by" attribute to self.error # if we can determine who we are The server has experienced a misconfiguration or other internal error that prevents it from processing the stanza The addressed JID or item requested cannot be found Invalid JID has been set in Stanzas The recipient or server understands the request but cannot process it because the request does not meet criteria defined by the recipient or server The recipient or server does not allow any entity to perform the action The sender needs to provide valid credentials before being allowed to perform the action The entity has violated some local service policy # TODO: Add "by" attribute to self.error # if we can determine who we are The intended recipient is temporarily unavailable, undergoing maintenance, etc. The recipient or server is redirecting requests for this information to another entity The requesting entity is not authorized to access the requested service because prior registration is necessary A remote server or service specified as part or all of the JID of the intended recipient does not exist or cannot be resolved A remote server or service specified as part or all of the JID of the intended recipient (or needed to fulfill a request) was resolved but communications could not be established within a reasonable amount of time The server or recipient is busy or lacks the system resources necessary to service the request The server or recipient does not currently provide the requested service The requesting entity is not authorized to access the requested service because a prior subscription is necessary The error condition is not one of those defined by the other conditions The recipient or server understood the request but was not expecting it at this time | 1.967907 | 2 |
setup.py | SirJakesalot/xmasclock | 0 | 6613635 | <reponame>SirJakesalot/xmasclock<gh_stars>0
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name='xmasclock',
version='0.0.1',
description='Countdown until Christmas executable',
author='<NAME>',
long_description=long_description,
long_description_context_type='text/markdown',
scripts=['xmasclock/xmasclock.py'],
url='https://github.com/SirJakesalot/xmasclock.git',
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
)
| import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name='xmasclock',
version='0.0.1',
description='Countdown until Christmas executable',
author='<NAME>',
long_description=long_description,
long_description_context_type='text/markdown',
scripts=['xmasclock/xmasclock.py'],
url='https://github.com/SirJakesalot/xmasclock.git',
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
) | none | 1 | 1.549274 | 2 | |
study/conf_scores.py | sealuzh/benchmark-instability-prediction-replication-package | 0 | 6613636 | <reponame>sealuzh/benchmark-instability-prediction-replication-package<filename>study/conf_scores.py
import warnings
import numpy as np
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score, f1_score, matthews_corrcoef, precision_score, recall_score, roc_auc_score
def mcc_score(y_true, y_pred):
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
return matthews_corrcoef(y_true, y_pred)
def auc_score(y_true, y_pred):
try:
return roc_auc_score(y_true, y_pred)
except ValueError:
return 0.0
PRECISION_SCORER = make_scorer(precision_score, zero_division=0)
RECALL_SCORER = make_scorer(recall_score, zero_division=0)
ACCURACY_SCORER = make_scorer(accuracy_score)
FMEASURE_SCORER = make_scorer(f1_score, zero_division=0)
AUC_SCORER = make_scorer(auc_score, needs_proba=True)
MCC_SCORER = make_scorer(mcc_score)
SCORES = [
('precision', PRECISION_SCORER),
('recall', RECALL_SCORER),
('accuracy', ACCURACY_SCORER),
('fmeasure', FMEASURE_SCORER),
('auc', AUC_SCORER),
('mcc', MCC_SCORER),
]
def compute_multiple_scores(estimator, X, y_true, scores=SCORES):
result = {}
for score_name, score_function in scores:
score = score_function(estimator, X, y_true)
result[score_name] = score
return result
| import warnings
import numpy as np
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score, f1_score, matthews_corrcoef, precision_score, recall_score, roc_auc_score
def mcc_score(y_true, y_pred):
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
return matthews_corrcoef(y_true, y_pred)
def auc_score(y_true, y_pred):
try:
return roc_auc_score(y_true, y_pred)
except ValueError:
return 0.0
PRECISION_SCORER = make_scorer(precision_score, zero_division=0)
RECALL_SCORER = make_scorer(recall_score, zero_division=0)
ACCURACY_SCORER = make_scorer(accuracy_score)
FMEASURE_SCORER = make_scorer(f1_score, zero_division=0)
AUC_SCORER = make_scorer(auc_score, needs_proba=True)
MCC_SCORER = make_scorer(mcc_score)
SCORES = [
('precision', PRECISION_SCORER),
('recall', RECALL_SCORER),
('accuracy', ACCURACY_SCORER),
('fmeasure', FMEASURE_SCORER),
('auc', AUC_SCORER),
('mcc', MCC_SCORER),
]
def compute_multiple_scores(estimator, X, y_true, scores=SCORES):
result = {}
for score_name, score_function in scores:
score = score_function(estimator, X, y_true)
result[score_name] = score
return result | none | 1 | 2.488992 | 2 | |
genword/lib/element.py | di3g0bs0n/genword | 0 | 6613637 | <filename>genword/lib/element.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
from . import *
class wElement(wComponent):
"""
Class which models an element. Every object what can be added to a page, is an element.
"""
def __init__(self):
wComponent.__init__(self)
| <filename>genword/lib/element.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
from . import *
class wElement(wComponent):
"""
Class which models an element. Every object what can be added to a page, is an element.
"""
def __init__(self):
wComponent.__init__(self)
| en | 0.826615 | #!/usr/bin/env python # -*- coding:utf-8 -*- Class which models an element. Every object what can be added to a page, is an element. | 2.092201 | 2 |
runs/PlotUtils/plot_yz.py | luiarthur/CytofResearch | 1 | 6613638 | <gh_stars>1-10
import matplotlib.pyplot as plt
from matplotlib import gridspec
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from mpl_toolkits.axes_grid1.colorbar import colorbar
import numpy as np
import blue2red
def relabel_lam(lami_est, wi_mean):
K = wi_mean.shape[0]
k_ord = np.argsort(wi_mean)
lami_new = lami_est + 0
counts = []
for k in range(K + 1):
if k == 0:
idx_k = (lami_est == 0)
else:
idx_k = (lami_est - 1 == k_ord[k - 1])
lami_new[idx_k] = k
counts.append(idx_k.sum())
return (lami_new, counts)
def add_gridlines_Z(Z):
J, K = Z.shape
for j in range(J):
plt.axhline(y=j+.5, color='grey', linewidth=.5)
for k in range(K):
plt.axvline(x=k+.5, color='grey', linewidth=.5)
def plot_y(yi, wi_mean, lami_est, fs_lab=10, fs_cbar=10, lw=3,
cm=blue2red.cm(6), vlim=(-3, 3), fs_xlab=10, fs_ylab=10,
markernames=[]):
J = yi.shape[1]
vmin, vmax = vlim
lami_new, counts = relabel_lam(lami_est, wi_mean)
counts_cumsum = np.cumsum(counts)
yi_sorted = yi[np.argsort(lami_new), :]
im = plt.imshow(yi_sorted, aspect='auto', vmin=vmin, vmax=vmax, cmap=cm)
for c in counts_cumsum[:-1]:
plt.axhline(c, color='yellow', linewidth=lw)
plt.xticks(rotation=90)
if len(markernames) == 0:
plt.xticks(np.arange(J), np.arange(J) + 1, fontsize=fs_xlab)
else:
plt.xticks(np.arange(J), markernames, fontsize=fs_xlab)
plt.yticks(fontsize=fs_ylab)
plt.xlabel("markers", fontsize=fs_lab)
plt.ylabel("cells", fontsize=fs_lab)
ax = plt.gca()
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes("top", size="7%", pad="2%")
cax.xaxis.set_ticks_position("top")
cbar = colorbar(im, cax=cax, orientation="horizontal")
cbar.ax.tick_params(labelsize=fs_cbar)
def plot_Z_only(Z, fs=10, xlab=None, ylab=None, rotate_xticks=True,
cm_greys=plt.cm.get_cmap('Greys', 5)):
plt.imshow(Z, aspect='auto', vmin=0, vmax=1, cmap=cm_greys)
plt.xlabel(xlab, fontsize=fs)
plt.ylabel(ylab, fontsize=fs)
J, K = Z.shape
plt.yticks(np.arange(J), np.arange(J) + 1, fontsize=fs)
add_gridlines_Z(Z)
if rotate_xticks:
plt.xticks(rotation=90, fontsize=fs)
else:
plt.xticks(fontsize=fs)
plt.xticks(np.arange(K), np.arange(K) + 1)
def plot_Z(Z_mean, wi_mean, lami_est, w_thresh=.01,
cm_greys=plt.cm.get_cmap('Greys', 5), fs_lab=10,
add_colorbar=True, fs_cbar=10, fs_w=10, fs_celltypes=10,
xlab="markers", ylab="cell subpopulations (abundance)",
markernames=[], fs_markers=10, w_digits=1):
J = Z_mean.shape[0]
k_ord = wi_mean.argsort()
z_cols = []
for k in k_ord.tolist():
if wi_mean[k] > w_thresh:
z_cols.append(k)
z_cols = np.array(z_cols)
Z_hat = Z_mean[:, z_cols].T
im = plt.imshow(Z_hat, aspect='auto', vmin=0, vmax=1, cmap=cm_greys)
plt.xlabel(xlab, fontsize=fs_lab)
plt.ylabel(ylab, fontsize=fs_lab)
# W percentages
w_perc = wi_mean[z_cols]
w_perc = [str((wp * 100).round(w_digits)) + '%' for wp in w_perc]
ax = plt.gca()
# plt.xticks([])
labels = ['{} ({})'.format(zc + 1, wp) for (zc, wp) in zip(z_cols, w_perc)]
plt.yticks(np.arange(len(z_cols)), labels, fontsize=fs_celltypes)
add_gridlines_Z(Z_hat)
plt.xticks(rotation=90, fontsize=fs_markers)
if len(markernames) == 0:
plt.xticks(np.arange(J), np.arange(J) + 1)
else:
plt.xticks(np.arange(J), markernames)
# add wi_mean on right side
# K = z_cols.shape[0]
# ax2 = ax.twinx()
# ax2.set_yticks(range(K))
# plt.yticks((K-1) / K * np.arange(K) + .5, w_perc[::-1], fontsize=fs_w)
# ax2.tick_params(length=0)
# colorbar
if add_colorbar:
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes("top", size="7%", pad="2%")
cax.xaxis.set_ticks_position("top")
cbar = colorbar(im, cax=cax, orientation="horizontal")
cbar.ax.tick_params(labelsize=fs_cbar)
def plot_yz(yi, Z_mean, wi_mean, lami_est, w_thresh=.01,
cm_greys = plt.cm.get_cmap('Greys', 5), markernames=[],
cm_y=blue2red.cm(6), vlim_y=(-3, 3), fs_w=10, w_digits=1):
J = yi.shape[1]
vmin_y, vmax_y = vlim_y
# cm_y.set_bad(color='black')
# cm_y.set_under(color='blue')
# cm_y.set_over(color='red')
# gs = gridspec.GridSpec(1, 2, width_ratios=[2, 5])
gs = gridspec.GridSpec(2, 1, height_ratios=[5, 2])
# Plot y
lami_new, counts = relabel_lam(lami_est, wi_mean)
counts_cumsum = np.cumsum(counts)
yi_sorted = yi[np.argsort(lami_new), :]
plt.subplot(gs[0])
im = plt.imshow(yi_sorted, aspect='auto', vmin=vmin_y, vmax=vmax_y, cmap=cm_y)
for c in counts_cumsum[:-1]:
plt.axhline(c, color='yellow')
plt.xticks(rotation=90)
if len(markernames) == 0:
plt.xticks(np.arange(J), np.arange(J) + 1)
else:
plt.xticks(np.arange(J), markernames)
ax = plt.gca()
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes("top", size="7%", pad="2%")
cax.xaxis.set_ticks_position("top")
colorbar(im, cax=cax, orientation="horizontal")
# Plot Z
k_ord = wi_mean.argsort()
z_cols = []
for k in k_ord.tolist():
if wi_mean[k] > w_thresh:
z_cols.append(k)
z_cols = np.array(z_cols)
Z_hat = Z_mean[:, z_cols].T
plt.subplot(gs[1])
im = plt.imshow(Z_hat, aspect='auto', vmin=0, vmax=1, cmap=cm_greys)
ax = plt.gca()
plt.xticks([])
plt.yticks(np.arange(len(z_cols)), z_cols + 1, fontsize=fs_w)
add_gridlines_Z(Z_hat)
plt.colorbar(orientation='horizontal', pad=.05)
# add wi_mean on right side
K = z_cols.shape[0]
ax2 = ax.twinx()
ax2.set_yticks(range(K))
w_perc = wi_mean[z_cols]
w_perc = [str((wp * 100).round(w_digits)) + '%' for wp in w_perc]
plt.yticks((K-1) / K * np.arange(K) + .5, w_perc[::-1], fontsize=fs_w)
plt.yticks()
ax2.tick_params(length=0)
fig = plt.gcf()
fig.subplots_adjust(hspace=0.2)
| import matplotlib.pyplot as plt
from matplotlib import gridspec
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from mpl_toolkits.axes_grid1.colorbar import colorbar
import numpy as np
import blue2red
def relabel_lam(lami_est, wi_mean):
K = wi_mean.shape[0]
k_ord = np.argsort(wi_mean)
lami_new = lami_est + 0
counts = []
for k in range(K + 1):
if k == 0:
idx_k = (lami_est == 0)
else:
idx_k = (lami_est - 1 == k_ord[k - 1])
lami_new[idx_k] = k
counts.append(idx_k.sum())
return (lami_new, counts)
def add_gridlines_Z(Z):
J, K = Z.shape
for j in range(J):
plt.axhline(y=j+.5, color='grey', linewidth=.5)
for k in range(K):
plt.axvline(x=k+.5, color='grey', linewidth=.5)
def plot_y(yi, wi_mean, lami_est, fs_lab=10, fs_cbar=10, lw=3,
cm=blue2red.cm(6), vlim=(-3, 3), fs_xlab=10, fs_ylab=10,
markernames=[]):
J = yi.shape[1]
vmin, vmax = vlim
lami_new, counts = relabel_lam(lami_est, wi_mean)
counts_cumsum = np.cumsum(counts)
yi_sorted = yi[np.argsort(lami_new), :]
im = plt.imshow(yi_sorted, aspect='auto', vmin=vmin, vmax=vmax, cmap=cm)
for c in counts_cumsum[:-1]:
plt.axhline(c, color='yellow', linewidth=lw)
plt.xticks(rotation=90)
if len(markernames) == 0:
plt.xticks(np.arange(J), np.arange(J) + 1, fontsize=fs_xlab)
else:
plt.xticks(np.arange(J), markernames, fontsize=fs_xlab)
plt.yticks(fontsize=fs_ylab)
plt.xlabel("markers", fontsize=fs_lab)
plt.ylabel("cells", fontsize=fs_lab)
ax = plt.gca()
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes("top", size="7%", pad="2%")
cax.xaxis.set_ticks_position("top")
cbar = colorbar(im, cax=cax, orientation="horizontal")
cbar.ax.tick_params(labelsize=fs_cbar)
def plot_Z_only(Z, fs=10, xlab=None, ylab=None, rotate_xticks=True,
cm_greys=plt.cm.get_cmap('Greys', 5)):
plt.imshow(Z, aspect='auto', vmin=0, vmax=1, cmap=cm_greys)
plt.xlabel(xlab, fontsize=fs)
plt.ylabel(ylab, fontsize=fs)
J, K = Z.shape
plt.yticks(np.arange(J), np.arange(J) + 1, fontsize=fs)
add_gridlines_Z(Z)
if rotate_xticks:
plt.xticks(rotation=90, fontsize=fs)
else:
plt.xticks(fontsize=fs)
plt.xticks(np.arange(K), np.arange(K) + 1)
def plot_Z(Z_mean, wi_mean, lami_est, w_thresh=.01,
cm_greys=plt.cm.get_cmap('Greys', 5), fs_lab=10,
add_colorbar=True, fs_cbar=10, fs_w=10, fs_celltypes=10,
xlab="markers", ylab="cell subpopulations (abundance)",
markernames=[], fs_markers=10, w_digits=1):
J = Z_mean.shape[0]
k_ord = wi_mean.argsort()
z_cols = []
for k in k_ord.tolist():
if wi_mean[k] > w_thresh:
z_cols.append(k)
z_cols = np.array(z_cols)
Z_hat = Z_mean[:, z_cols].T
im = plt.imshow(Z_hat, aspect='auto', vmin=0, vmax=1, cmap=cm_greys)
plt.xlabel(xlab, fontsize=fs_lab)
plt.ylabel(ylab, fontsize=fs_lab)
# W percentages
w_perc = wi_mean[z_cols]
w_perc = [str((wp * 100).round(w_digits)) + '%' for wp in w_perc]
ax = plt.gca()
# plt.xticks([])
labels = ['{} ({})'.format(zc + 1, wp) for (zc, wp) in zip(z_cols, w_perc)]
plt.yticks(np.arange(len(z_cols)), labels, fontsize=fs_celltypes)
add_gridlines_Z(Z_hat)
plt.xticks(rotation=90, fontsize=fs_markers)
if len(markernames) == 0:
plt.xticks(np.arange(J), np.arange(J) + 1)
else:
plt.xticks(np.arange(J), markernames)
# add wi_mean on right side
# K = z_cols.shape[0]
# ax2 = ax.twinx()
# ax2.set_yticks(range(K))
# plt.yticks((K-1) / K * np.arange(K) + .5, w_perc[::-1], fontsize=fs_w)
# ax2.tick_params(length=0)
# colorbar
if add_colorbar:
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes("top", size="7%", pad="2%")
cax.xaxis.set_ticks_position("top")
cbar = colorbar(im, cax=cax, orientation="horizontal")
cbar.ax.tick_params(labelsize=fs_cbar)
def plot_yz(yi, Z_mean, wi_mean, lami_est, w_thresh=.01,
cm_greys = plt.cm.get_cmap('Greys', 5), markernames=[],
cm_y=blue2red.cm(6), vlim_y=(-3, 3), fs_w=10, w_digits=1):
J = yi.shape[1]
vmin_y, vmax_y = vlim_y
# cm_y.set_bad(color='black')
# cm_y.set_under(color='blue')
# cm_y.set_over(color='red')
# gs = gridspec.GridSpec(1, 2, width_ratios=[2, 5])
gs = gridspec.GridSpec(2, 1, height_ratios=[5, 2])
# Plot y
lami_new, counts = relabel_lam(lami_est, wi_mean)
counts_cumsum = np.cumsum(counts)
yi_sorted = yi[np.argsort(lami_new), :]
plt.subplot(gs[0])
im = plt.imshow(yi_sorted, aspect='auto', vmin=vmin_y, vmax=vmax_y, cmap=cm_y)
for c in counts_cumsum[:-1]:
plt.axhline(c, color='yellow')
plt.xticks(rotation=90)
if len(markernames) == 0:
plt.xticks(np.arange(J), np.arange(J) + 1)
else:
plt.xticks(np.arange(J), markernames)
ax = plt.gca()
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes("top", size="7%", pad="2%")
cax.xaxis.set_ticks_position("top")
colorbar(im, cax=cax, orientation="horizontal")
# Plot Z
k_ord = wi_mean.argsort()
z_cols = []
for k in k_ord.tolist():
if wi_mean[k] > w_thresh:
z_cols.append(k)
z_cols = np.array(z_cols)
Z_hat = Z_mean[:, z_cols].T
plt.subplot(gs[1])
im = plt.imshow(Z_hat, aspect='auto', vmin=0, vmax=1, cmap=cm_greys)
ax = plt.gca()
plt.xticks([])
plt.yticks(np.arange(len(z_cols)), z_cols + 1, fontsize=fs_w)
add_gridlines_Z(Z_hat)
plt.colorbar(orientation='horizontal', pad=.05)
# add wi_mean on right side
K = z_cols.shape[0]
ax2 = ax.twinx()
ax2.set_yticks(range(K))
w_perc = wi_mean[z_cols]
w_perc = [str((wp * 100).round(w_digits)) + '%' for wp in w_perc]
plt.yticks((K-1) / K * np.arange(K) + .5, w_perc[::-1], fontsize=fs_w)
plt.yticks()
ax2.tick_params(length=0)
fig = plt.gcf()
fig.subplots_adjust(hspace=0.2) | en | 0.201478 | # W percentages # plt.xticks([]) # add wi_mean on right side # K = z_cols.shape[0] # ax2 = ax.twinx() # ax2.set_yticks(range(K)) # plt.yticks((K-1) / K * np.arange(K) + .5, w_perc[::-1], fontsize=fs_w) # ax2.tick_params(length=0) # colorbar # cm_y.set_bad(color='black') # cm_y.set_under(color='blue') # cm_y.set_over(color='red') # gs = gridspec.GridSpec(1, 2, width_ratios=[2, 5]) # Plot y # Plot Z # add wi_mean on right side | 2.419006 | 2 |
spectral/tests/spytest.py | wwlswj/spectral | 398 | 6613639 | '''
Base class for all tests.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import sys
class SpyTest(object):
'''Base class for test cases.
Test classes are created by sub-classing SpyTest and defining methods
whose names start with "test_".
'''
def setup(self):
'''Method to be run before derived class test methods are called.'''
pass
def finish(self):
'''Method run after all test methods have run.'''
pass
def run(self):
'''Runs all "test_*" methods in a derived class.
Before running subclass test_ methods, the `startup` method will be
called. After all test_ methods have been run, the `finish` method
is called.
'''
import spectral.tests as tests
from spectral.tests import abort_on_fail
self.setup()
class NullStdOut(object):
def write(*args, **kwargs):
pass
def flush(self):
pass
null = NullStdOut()
methods = [getattr(self, s) for s in sorted(dir(self)) if s.startswith('test_')]
methods = [m for m in methods if isinstance(m, collections.Callable)]
stdout = sys.stdout
for method in methods:
print(format('Testing ' + method.__name__.split('_', 1)[-1],
'.<60'), end=' ')
tests._num_tests_run += 1
try:
sys.stdout = null
method()
stdout.write('OK\n')
except AssertionError:
stdout.write('FAILED\n')
tests._num_tests_failed += 1
if tests.abort_on_fail:
raise
finally:
sys.stdout = stdout
self.finish()
# The following test method is now deprecated and should no longer be used.
def test_method(method):
'''Decorator function for unit tests.'''
import spectral.tests as tests
def meth(self):
print(format('Testing ' + method.__name__.split('_', 1)[-1],
'.<40'), end=' ')
try:
method(self)
print('OK')
tests._num_tests_run += 1
except AssertionError:
print('FAILED')
tests._num_tests_failed += 1
if tests.abort_on_fail:
raise
return meth
| '''
Base class for all tests.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import sys
class SpyTest(object):
'''Base class for test cases.
Test classes are created by sub-classing SpyTest and defining methods
whose names start with "test_".
'''
def setup(self):
'''Method to be run before derived class test methods are called.'''
pass
def finish(self):
'''Method run after all test methods have run.'''
pass
def run(self):
'''Runs all "test_*" methods in a derived class.
Before running subclass test_ methods, the `startup` method will be
called. After all test_ methods have been run, the `finish` method
is called.
'''
import spectral.tests as tests
from spectral.tests import abort_on_fail
self.setup()
class NullStdOut(object):
def write(*args, **kwargs):
pass
def flush(self):
pass
null = NullStdOut()
methods = [getattr(self, s) for s in sorted(dir(self)) if s.startswith('test_')]
methods = [m for m in methods if isinstance(m, collections.Callable)]
stdout = sys.stdout
for method in methods:
print(format('Testing ' + method.__name__.split('_', 1)[-1],
'.<60'), end=' ')
tests._num_tests_run += 1
try:
sys.stdout = null
method()
stdout.write('OK\n')
except AssertionError:
stdout.write('FAILED\n')
tests._num_tests_failed += 1
if tests.abort_on_fail:
raise
finally:
sys.stdout = stdout
self.finish()
# The following test method is now deprecated and should no longer be used.
def test_method(method):
'''Decorator function for unit tests.'''
import spectral.tests as tests
def meth(self):
print(format('Testing ' + method.__name__.split('_', 1)[-1],
'.<40'), end=' ')
try:
method(self)
print('OK')
tests._num_tests_run += 1
except AssertionError:
print('FAILED')
tests._num_tests_failed += 1
if tests.abort_on_fail:
raise
return meth
| en | 0.919417 | Base class for all tests. Base class for test cases. Test classes are created by sub-classing SpyTest and defining methods whose names start with "test_". Method to be run before derived class test methods are called. Method run after all test methods have run. Runs all "test_*" methods in a derived class. Before running subclass test_ methods, the `startup` method will be called. After all test_ methods have been run, the `finish` method is called. # The following test method is now deprecated and should no longer be used. Decorator function for unit tests. | 2.995982 | 3 |
main.py | Habdio/GROUP-AutoManageBot | 0 | 6613640 | <filename>main.py
from pyrogram import Client
PyrogramBot = Client(
"PyrogramBot",
api_hash="09ca4ef17b06c5030d4e8f7cbd92f1a9",
api_id="10342078",
bot_token="<PASSWORD>",
plugins=dict(root="PyrogramBot")
)
PyrogramBot.run()
| <filename>main.py
from pyrogram import Client
PyrogramBot = Client(
"PyrogramBot",
api_hash="09ca4ef17b06c5030d4e8f7cbd92f1a9",
api_id="10342078",
bot_token="<PASSWORD>",
plugins=dict(root="PyrogramBot")
)
PyrogramBot.run()
| none | 1 | 1.673634 | 2 | |
tests/test_script.py | KaoruNishikawa/nanten_tools | 0 | 6613641 | <reponame>KaoruNishikawa/nanten_tools
import script
def test_metadata():
assert script.__author__ == "<NAME>"
assert script.__version__ == "0.1.0"
| import script
def test_metadata():
assert script.__author__ == "<NAME>"
assert script.__version__ == "0.1.0" | none | 1 | 1.641772 | 2 | |
scripts/coverage/js-coverage.py | 353swe/Marvin-353 | 7 | 6613642 | import coverage_report
import subprocess
import shutil
run, error = subprocess.Popen(["npm", "run", "js-coverage"], stdout=subprocess.PIPE).communicate()
if error is None:
subprocess.Popen(["./node_modules/.bin/nyc", "report", "--reporter=lcov"]).communicate()
coverage_report.push("JS")
else:
print error
exit(1)
exit(0)
| import coverage_report
import subprocess
import shutil
run, error = subprocess.Popen(["npm", "run", "js-coverage"], stdout=subprocess.PIPE).communicate()
if error is None:
subprocess.Popen(["./node_modules/.bin/nyc", "report", "--reporter=lcov"]).communicate()
coverage_report.push("JS")
else:
print error
exit(1)
exit(0)
| none | 1 | 1.633084 | 2 | |
server/handler/GameHandler.py | xiaojieluo/dove-admin | 1 | 6613643 | <filename>server/handler/GameHandler.py
from handler.APIHandler import APIHandler
from sanic.response import json
from web import log
from model import Game
from settings import api_settings
class index(APIHandler):
async def get(self, request):
log.info(request.args)
# args = request.args
# if args.get('pages', None) is not None:
# try:
# args['pages'] = int(args.get('pages'))
# except ValueError:
# break
game = Game()
data = game.find({}).skip(0).limit(0)
games = list()
for k in data:
k['_id'] = str(k['_id'])
games.append(k)
log.info(games)
# log.info(game.find({}).skip(), limit(5))
return json(games)
async def post(self, request):
'''
新增游戏
'''
game = Game()
# print(type(request.json))
result = game.replace_one(request.json, request.json, True)
print(result.matched_count)
print(result.modified_count)
return json(request.json, 201)
| <filename>server/handler/GameHandler.py
from handler.APIHandler import APIHandler
from sanic.response import json
from web import log
from model import Game
from settings import api_settings
class index(APIHandler):
async def get(self, request):
log.info(request.args)
# args = request.args
# if args.get('pages', None) is not None:
# try:
# args['pages'] = int(args.get('pages'))
# except ValueError:
# break
game = Game()
data = game.find({}).skip(0).limit(0)
games = list()
for k in data:
k['_id'] = str(k['_id'])
games.append(k)
log.info(games)
# log.info(game.find({}).skip(), limit(5))
return json(games)
async def post(self, request):
'''
新增游戏
'''
game = Game()
# print(type(request.json))
result = game.replace_one(request.json, request.json, True)
print(result.matched_count)
print(result.modified_count)
return json(request.json, 201)
| en | 0.280148 | # args = request.args # if args.get('pages', None) is not None: # try: # args['pages'] = int(args.get('pages')) # except ValueError: # break # log.info(game.find({}).skip(), limit(5)) 新增游戏 # print(type(request.json)) | 2.371067 | 2 |
corpusLoader.py | poodarchu/SogouPersona | 1 | 6613644 | <filename>corpusLoader.py
# -*- coding=utf-8 -*-
import codecs
import jieba
from sklearn import preprocessing
userID = []
# userTags = [] # userTag[i][0:3] : user i's three tags gender, age and certification
userQueries = [] # userQueries[i][:] user i's many queries
ages = []
genders = []
educations = []
with codecs.open('./data/train.csv', 'r', 'utf-8') as fr:
for user in fr.readlines():
userInfo = user.split('\t')
# userTags.append([userInfo[1:4]])
userID.append(userInfo[0])
ages.append(userInfo[1])
genders.append(userInfo[2])
educations.append(userInfo[3])
userQueries.append(userInfo[4:])
fr.close()
with codecs.open('./data/test.csv', 'r', 'utf-8') as frt:
for testUser in frt.readlines():
userInfo = testUser.split('\t')
userID.append(user[0])
userQueries.append(userInfo[1:])
frt.close()
stop_tokens = []
fr = codecs.open('./data/stop_tokens.txt', 'r', 'utf-8')
for token in fr.readlines():
stop_tokens.append(token.strip())
fr.close()
queryLists = []
def cut2rtn():
fw = codecs.open('./data/output/queries_tokenized.csv', 'w', 'utf-8')
# fw_ages = codecs.open('./data/output/ages.csv', 'w', 'utf-8')
# fw_genders = codecs.open('./data/output/genders.csv', 'w', 'utf-8')
# fw_educations = codecs.open('./data/output/educations.csv', 'w', 'utf-8')
for queriesPerUser in userQueries:
queryList = [] # query list per user.
for query in queriesPerUser:
qry_tks = jieba.lcut(query, cut_all=False)
final = ''
for tk in qry_tks:
if tk not in stop_tokens:
if tk != ' ':
queryList.append(tk)
final += tk + ','
fw.write(final)
fw.write('\n')
queryLists.append(queryList)
# Split train set to train and validation set.
trainQueryLists = queryLists[:20000]
testQueryLists = queryLists[20000:]
return userID, ages, genders, educations, trainQueryLists, testQueryLists
def cutTest2Rtn():
fw = codecs.open('./data/output/test.csv', 'w', 'utf-8')
testUIDs = []
testQueryLists = []
for queryPerLine in fw.readlines():
queries = []
userInfo = queryPerLine.split('\t')
testUIDs.append(userInfo[0])
for query in userInfo[1:]:
qryTks = jieba.lcut(query)
final = ''
for i in qryTks:
if i not in stop_tokens:
final += i + ','
queries.append(i)
fw.write(final)
testQueryLists.append(queries)
return testUIDs, testQueryLists
if __name__ == '__main__':
cut2rtn()
# cutTest2Rtn()
| <filename>corpusLoader.py
# -*- coding=utf-8 -*-
import codecs
import jieba
from sklearn import preprocessing
userID = []
# userTags = [] # userTag[i][0:3] : user i's three tags gender, age and certification
userQueries = [] # userQueries[i][:] user i's many queries
ages = []
genders = []
educations = []
with codecs.open('./data/train.csv', 'r', 'utf-8') as fr:
for user in fr.readlines():
userInfo = user.split('\t')
# userTags.append([userInfo[1:4]])
userID.append(userInfo[0])
ages.append(userInfo[1])
genders.append(userInfo[2])
educations.append(userInfo[3])
userQueries.append(userInfo[4:])
fr.close()
with codecs.open('./data/test.csv', 'r', 'utf-8') as frt:
for testUser in frt.readlines():
userInfo = testUser.split('\t')
userID.append(user[0])
userQueries.append(userInfo[1:])
frt.close()
stop_tokens = []
fr = codecs.open('./data/stop_tokens.txt', 'r', 'utf-8')
for token in fr.readlines():
stop_tokens.append(token.strip())
fr.close()
queryLists = []
def cut2rtn():
fw = codecs.open('./data/output/queries_tokenized.csv', 'w', 'utf-8')
# fw_ages = codecs.open('./data/output/ages.csv', 'w', 'utf-8')
# fw_genders = codecs.open('./data/output/genders.csv', 'w', 'utf-8')
# fw_educations = codecs.open('./data/output/educations.csv', 'w', 'utf-8')
for queriesPerUser in userQueries:
queryList = [] # query list per user.
for query in queriesPerUser:
qry_tks = jieba.lcut(query, cut_all=False)
final = ''
for tk in qry_tks:
if tk not in stop_tokens:
if tk != ' ':
queryList.append(tk)
final += tk + ','
fw.write(final)
fw.write('\n')
queryLists.append(queryList)
# Split train set to train and validation set.
trainQueryLists = queryLists[:20000]
testQueryLists = queryLists[20000:]
return userID, ages, genders, educations, trainQueryLists, testQueryLists
def cutTest2Rtn():
fw = codecs.open('./data/output/test.csv', 'w', 'utf-8')
testUIDs = []
testQueryLists = []
for queryPerLine in fw.readlines():
queries = []
userInfo = queryPerLine.split('\t')
testUIDs.append(userInfo[0])
for query in userInfo[1:]:
qryTks = jieba.lcut(query)
final = ''
for i in qryTks:
if i not in stop_tokens:
final += i + ','
queries.append(i)
fw.write(final)
testQueryLists.append(queries)
return testUIDs, testQueryLists
if __name__ == '__main__':
cut2rtn()
# cutTest2Rtn()
| en | 0.601583 | # -*- coding=utf-8 -*- # userTags = [] # userTag[i][0:3] : user i's three tags gender, age and certification # userQueries[i][:] user i's many queries # userTags.append([userInfo[1:4]]) # fw_ages = codecs.open('./data/output/ages.csv', 'w', 'utf-8') # fw_genders = codecs.open('./data/output/genders.csv', 'w', 'utf-8') # fw_educations = codecs.open('./data/output/educations.csv', 'w', 'utf-8') # query list per user. # Split train set to train and validation set. # cutTest2Rtn() | 2.825724 | 3 |
15610 Abbey Courtyard.py | jangThang/Baekjoon-problem | 0 | 6613645 | <reponame>jangThang/Baekjoon-problem<filename>15610 Abbey Courtyard.py
# 입력
a = int(input())
# 출력
print(a**0.5 *4)
| Abbey Courtyard.py
# 입력
a = int(input())
# 출력
print(a**0.5 *4) | none | 1 | 1.969432 | 2 | |
thrift/compiler/py/generate/t_cpp_context.py | yuhonghong66/fbthrift | 0 | 6613646 | <reponame>yuhonghong66/fbthrift
#! /usr/bin/env python2 -tt
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import re
from t_output import CompositeOutput
from t_output_aggregator import create_scope_factory
from t_output_aggregator import OutputContext
from t_output_aggregator import Primitive
from t_output_aggregator import PrimitiveFactory
from t_output_aggregator import Scope
# ---------------------------------------------------------------
# Scope
# ---------------------------------------------------------------
class CppScope (Scope):
# Make sure the line is flagged only when an open brace was printed and
# while it wasn't closed
def acquire(self):
print >>self._out, ' {',
self._out.flag_this_line()
self._out.indent(2)
def release(self):
self._out.unindent(2)
if not self._out.on_flagged_line:
self._out.line_feed()
self._out.flag_this_line(False)
self._out.write('}')
# ---------------------------------------------------------------
# PrimitiveFactory and primitives
# ---------------------------------------------------------------
class Class(Primitive):
# String Format: type folly abspath::name
# Example: class FOLLY_DEPRECATE("msg") classname::function : extrastuff
_pattern_type = "(?P<type>class |struct )"
_pattern_folly = "(?P<folly>\w+\(.*?\) )*"
_pattern_name = "(?:\s*(?P<name>\w+))"
_pattern_scope = "(?:\s*::{pname})*".format(pname=_pattern_name)
_pattern_abspath = "(?P<abspath>\w+{pscope})".format(pscope=_pattern_scope)
_pattern = "{ptype}{pfolly}{pabspath}".format(
ptype=_pattern_type,
pfolly=_pattern_folly,
pabspath=_pattern_abspath)
_classRegex = re.compile(_pattern, re.S)
def _write(self, context):
# deduce name
m = self._classRegex.match(str(self))
if not m:
raise SyntaxError("C++ class/struct incorrectly defined")
self.name, self.abspath = m.group('name', 'abspath')
if 'abspath' in self.parent.opts and self.parent.opts.abspath:
self.abspath = '::'.join((self.parent.opts.abspath, self.abspath))
# this is magic! Basically what it does it it checks if we're
# already on an empty line. If we are not then we introduce a
# newline before the class defn
context.h.double_space()
print >>context.h, self,
# the scope of this will be written to output_h
self.output = context.h
# no custom epilogue? we'll just set our own haha
if 'epilogue' not in self:
self.epilogue = ';'
# basically force two newlines after a class definition if it's
# toplevel (not within another class)
if not issubclass(self.parent.opts.type, Class):
self.epilogue += '\n\n'
class Statement(Primitive):
def _write(self, context):
txt = str(self)
# statements always start on new lines
context.output.line_feed()
context.output.write(txt)
class Namespace(Primitive):
def __init__(self, parent, path):
super(Namespace, self).__init__(parent, text=None, path=path)
self.epilogue = None
def _write(self, context):
path = filter(None, self.path)
if path:
parts = [r'namespace {0} {{'.format(i) for i in path]
text = ' '.join(parts) + '\n'
self.epilogue = '}' * len(path) + ' // ' + '::'.join(path)
context.outputs.line_feed()
print >>context.outputs, text
def enter_scope_callback(self, context, scope):
return dict(physical_scope=False)
def exit_scope_callback(self, context, scope):
if scope.opts.epilogue:
# namespaces don't have physical_scope cause they have an ending
# text hardcoded into .epilogue by the write_primitive method
context.outputs.double_space()
# => write the epilogue statement for all outputs
print >>context.outputs, scope.opts.epilogue,
return dict(physical_scope=False)
class CppPrimitiveFactory(PrimitiveFactory):
# TODO enforce somehow that each PrimitiveFactory subclass defines a types
# staticvar (method_name => class to instantiate with default parameters)
types = {'cls': Class}
def namespace(self, ns):
path = ns.split('.')
return Namespace(self._scope(), path)
def stmt(self, text='\n'):
'non-special statement, default to newline'
return Statement(self._scope(), text)
__call__ = stmt
# ---------------------------------------------------------------
# OutputContext
# ---------------------------------------------------------------
class CppOutputContext(OutputContext):
def __init__(self, output_h, header_path):
self._output_h = output_h
self._header_path = header_path
outputs = [output_h]
for output in outputs:
output.make_scope = create_scope_factory(CppScope, output)
# shorthand to write to all outputs at the same time
self._all_outputs = CompositeOutput(*outputs)
# start writing in the header
self.output = output_h
@property
def h(self):
return self._output_h
@property
def output(self):
return self._output_crt
@output.setter
def output(self, output):
self._output_crt = output
@property
def outputs(self):
return self._all_outputs
def _enter_scope_handler(self, scope, physical_scope=True):
if scope.parent is None:
# save the default "current output" in the parent scope
scope.opts.output = self.output
# start guard in h
print >>self._output_h, '#pragma once\n'
return
# set the output of the real scope's content according to the
# logical scope's output
if not 'output' in scope.opts:
# if it doesn't then it's a namespace or something, just pass
# the output of its parent on
scope.opts.output = scope.parent.opts.output
self.output = scope.opts.output
if physical_scope:
pscope = self.output.make_scope()
scope.physical_scope = pscope
pscope.acquire()
def _exit_scope_handler(self, scope, physical_scope=True):
if scope.parent is None:
# Make sure file is newline terminated.
self.outputs.line_feed()
return
if physical_scope:
scope.physical_scope.release()
if 'epilogue' in scope.opts:
self.output.write(scope.opts.epilogue)
# reset the output to the parent scope's output
self.output = scope.parent.opts.output
| #! /usr/bin/env python2 -tt
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import re
from t_output import CompositeOutput
from t_output_aggregator import create_scope_factory
from t_output_aggregator import OutputContext
from t_output_aggregator import Primitive
from t_output_aggregator import PrimitiveFactory
from t_output_aggregator import Scope
# ---------------------------------------------------------------
# Scope
# ---------------------------------------------------------------
class CppScope (Scope):
# Make sure the line is flagged only when an open brace was printed and
# while it wasn't closed
def acquire(self):
print >>self._out, ' {',
self._out.flag_this_line()
self._out.indent(2)
def release(self):
self._out.unindent(2)
if not self._out.on_flagged_line:
self._out.line_feed()
self._out.flag_this_line(False)
self._out.write('}')
# ---------------------------------------------------------------
# PrimitiveFactory and primitives
# ---------------------------------------------------------------
class Class(Primitive):
# String Format: type folly abspath::name
# Example: class FOLLY_DEPRECATE("msg") classname::function : extrastuff
_pattern_type = "(?P<type>class |struct )"
_pattern_folly = "(?P<folly>\w+\(.*?\) )*"
_pattern_name = "(?:\s*(?P<name>\w+))"
_pattern_scope = "(?:\s*::{pname})*".format(pname=_pattern_name)
_pattern_abspath = "(?P<abspath>\w+{pscope})".format(pscope=_pattern_scope)
_pattern = "{ptype}{pfolly}{pabspath}".format(
ptype=_pattern_type,
pfolly=_pattern_folly,
pabspath=_pattern_abspath)
_classRegex = re.compile(_pattern, re.S)
def _write(self, context):
# deduce name
m = self._classRegex.match(str(self))
if not m:
raise SyntaxError("C++ class/struct incorrectly defined")
self.name, self.abspath = m.group('name', 'abspath')
if 'abspath' in self.parent.opts and self.parent.opts.abspath:
self.abspath = '::'.join((self.parent.opts.abspath, self.abspath))
# this is magic! Basically what it does it it checks if we're
# already on an empty line. If we are not then we introduce a
# newline before the class defn
context.h.double_space()
print >>context.h, self,
# the scope of this will be written to output_h
self.output = context.h
# no custom epilogue? we'll just set our own haha
if 'epilogue' not in self:
self.epilogue = ';'
# basically force two newlines after a class definition if it's
# toplevel (not within another class)
if not issubclass(self.parent.opts.type, Class):
self.epilogue += '\n\n'
class Statement(Primitive):
def _write(self, context):
txt = str(self)
# statements always start on new lines
context.output.line_feed()
context.output.write(txt)
class Namespace(Primitive):
def __init__(self, parent, path):
super(Namespace, self).__init__(parent, text=None, path=path)
self.epilogue = None
def _write(self, context):
path = filter(None, self.path)
if path:
parts = [r'namespace {0} {{'.format(i) for i in path]
text = ' '.join(parts) + '\n'
self.epilogue = '}' * len(path) + ' // ' + '::'.join(path)
context.outputs.line_feed()
print >>context.outputs, text
def enter_scope_callback(self, context, scope):
return dict(physical_scope=False)
def exit_scope_callback(self, context, scope):
if scope.opts.epilogue:
# namespaces don't have physical_scope cause they have an ending
# text hardcoded into .epilogue by the write_primitive method
context.outputs.double_space()
# => write the epilogue statement for all outputs
print >>context.outputs, scope.opts.epilogue,
return dict(physical_scope=False)
class CppPrimitiveFactory(PrimitiveFactory):
# TODO enforce somehow that each PrimitiveFactory subclass defines a types
# staticvar (method_name => class to instantiate with default parameters)
types = {'cls': Class}
def namespace(self, ns):
path = ns.split('.')
return Namespace(self._scope(), path)
def stmt(self, text='\n'):
'non-special statement, default to newline'
return Statement(self._scope(), text)
__call__ = stmt
# ---------------------------------------------------------------
# OutputContext
# ---------------------------------------------------------------
class CppOutputContext(OutputContext):
def __init__(self, output_h, header_path):
self._output_h = output_h
self._header_path = header_path
outputs = [output_h]
for output in outputs:
output.make_scope = create_scope_factory(CppScope, output)
# shorthand to write to all outputs at the same time
self._all_outputs = CompositeOutput(*outputs)
# start writing in the header
self.output = output_h
@property
def h(self):
return self._output_h
@property
def output(self):
return self._output_crt
@output.setter
def output(self, output):
self._output_crt = output
@property
def outputs(self):
return self._all_outputs
def _enter_scope_handler(self, scope, physical_scope=True):
if scope.parent is None:
# save the default "current output" in the parent scope
scope.opts.output = self.output
# start guard in h
print >>self._output_h, '#pragma once\n'
return
# set the output of the real scope's content according to the
# logical scope's output
if not 'output' in scope.opts:
# if it doesn't then it's a namespace or something, just pass
# the output of its parent on
scope.opts.output = scope.parent.opts.output
self.output = scope.opts.output
if physical_scope:
pscope = self.output.make_scope()
scope.physical_scope = pscope
pscope.acquire()
def _exit_scope_handler(self, scope, physical_scope=True):
if scope.parent is None:
# Make sure file is newline terminated.
self.outputs.line_feed()
return
if physical_scope:
scope.physical_scope.release()
if 'epilogue' in scope.opts:
self.output.write(scope.opts.epilogue)
# reset the output to the parent scope's output
self.output = scope.parent.opts.output | en | 0.783859 | #! /usr/bin/env python2 -tt # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # --------------------------------------------------------------- # Scope # --------------------------------------------------------------- # Make sure the line is flagged only when an open brace was printed and # while it wasn't closed # --------------------------------------------------------------- # PrimitiveFactory and primitives # --------------------------------------------------------------- # String Format: type folly abspath::name # Example: class FOLLY_DEPRECATE("msg") classname::function : extrastuff # deduce name # this is magic! Basically what it does it it checks if we're # already on an empty line. If we are not then we introduce a # newline before the class defn # the scope of this will be written to output_h # no custom epilogue? we'll just set our own haha # basically force two newlines after a class definition if it's # toplevel (not within another class) # statements always start on new lines # namespaces don't have physical_scope cause they have an ending # text hardcoded into .epilogue by the write_primitive method # => write the epilogue statement for all outputs # TODO enforce somehow that each PrimitiveFactory subclass defines a types # staticvar (method_name => class to instantiate with default parameters) # --------------------------------------------------------------- # OutputContext # --------------------------------------------------------------- # shorthand to write to all outputs at the same time # start writing in the header # save the default "current output" in the parent scope # start guard in h # set the output of the real scope's content according to the # logical scope's output # if it doesn't then it's a namespace or something, just pass # the output of its parent on # Make sure file is newline terminated. # reset the output to the parent scope's output | 1.978423 | 2 |
app/tests/v2/test_products.py | Deekerubo/Store-Manager-API | 0 | 6613647 | import unittest
import os
import json
from app import create_app
from .base_test import UserAuth
from app.api.database import create_tables, destroy_tables
ADD_ENTRY_URL = '/api/v2/products'
GET_SINGLE_ENTRY = '/api/v2/products/1'
GET_ALL_ENTRY = '/api/v2/products'
class Test_Entry_Case(UserAuth):
'''Initialize app and define test variables'''
def setUp(self):
super().setUp()
destroy_tables()
create_tables()
self.entry_item = { "product_name":"name",
"product_description":"description",
"quantity":4675,
"price": 23,
"category":"category"
}
self.empty_product_name={"product_name":"",
"product_description":"description",
"quantity":4675,
"price": 23,
"category":"category"
}
self.empty_product_description={"product_name":"name",
"product_description":"",
"quantity":4675,
"price": 23,
"category":"category"
}
self.empty_product_category={"product_name":"name",
"product_description":"description",
"quantity":4675,
"price": 23,
"category":""
}
self.quatinty_as_integer = { "product_name":"name",
"product_description":"description",
"quantity":"4675",
"price": 23,
"category":"category"
}
self.price_as_integer = { "product_name":"name",
"product_description":"description",
"quantity":4675,
"price": "23",
"category":"category"
}
def test_add_entry(self):
'''Test to add a new product'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data = json.dumps(self.entry_item),
content_type = 'application/json'
)
data = json.loads(res.get_data().decode("UTF-8"))
self.assertIn('Product Created!', data['message'])
self.assertEqual(res.status_code, 201)
def test_get_single_entry(self):
'''Test to get a single entry'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
self.app.post(ADD_ENTRY_URL,
data = json.dumps(self.entry_item),
content_type = 'application/json')
'''Return a single entry of the product created'''
res = self.app.get(GET_SINGLE_ENTRY,
headers=dict(Authorization="Bearer " + token),
data = json.dumps(self.entry_item),
content_type = 'application/json')
data = json.loads(res.get_data().decode("UTF-8"))
self.assertIn('Product not Found', data['message'])
self.assertEqual(res.status_code, 200)
def test_get_sale_records(self):
'''Test get a sale record'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
self.app.post(ADD_ENTRY_URL,
data = json.dumps(self.entry_item),
content_type = 'application/json')
'''Test gets all the sale entries'''
res = self.app.get(GET_ALL_ENTRY,
headers=dict(Authorization="Bearer " + token),
data = json.dumps(self.entry_item),
content_type = 'application/json')
data = json.loads(res.get_data().decode("UTF-8"))
self.assertIn('All Products Retrieved',data['message'])
self.assertEqual(res.status_code, 200)
def test_delete_product(self):
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
self.app.post(ADD_ENTRY_URL,
data = json.dumps(self.entry_item),
content_type = 'application/json')
'''Test gets all the sale entries'''
res = self.app.delete(GET_SINGLE_ENTRY,
headers=dict(Authorization="Bearer " + token),
data = json.dumps(self.entry_item),
content_type = 'application/json')
data = json.loads(res.get_data().decode("UTF-8"))
self.assertIn('Product Deleted!',data['message'])
self.assertEqual(res.status_code, 200)
def test_modify_product(self):
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
self.app.post(ADD_ENTRY_URL,
data = json.dumps(self.entry_item),
content_type = 'application/json')
'''Test gets all the sale entries'''
res = self.app.put(GET_SINGLE_ENTRY,
headers=dict(Authorization="Bearer " + token),
data = json.dumps(self.entry_item),
content_type = 'application/json')
data = json.loads(res.get_data().decode("UTF-8"))
self.assertIn('Product updated succesfully!',data['message'])
self.assertEqual(res.status_code, 200)
def test_empty_description(self):
'''Test signup with an empty email address'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data=json.dumps(self.empty_product_description),
content_type='application/json')
data = json.loads(res.data.decode())
self.assertEqual('Product description can not be empty!',data['message'])
self.assertEqual(res.status_code, 400)
def test_empty_name(self):
'''Test signup with an empty email address'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data=json.dumps(self.empty_product_name),
content_type='application/json')
data = json.loads(res.data.decode())
self.assertEqual('Product can not be empty!',data['message'])
self.assertEqual(res.status_code, 400)
def test_quantity_integer(self):
'''Test signup with an empty email address'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data=json.dumps(self.quatinty_as_integer),
content_type='application/json')
data = json.loads(res.data.decode())
self.assertEqual('Quantity must be integer!',data['message'])
self.assertEqual(res.status_code, 400)
def test_price_integer(self):
'''Test signup with an empty email address'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data=json.dumps(self.price_as_integer),
content_type='application/json')
data = json.loads(res.data.decode())
self.assertEqual('Price must be integer!',data['message'])
self.assertEqual(res.status_code, 400)
def product_addition_twice(self):
'''Test add product twice'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
self.app.post(ADD_ENTRY_URL,
data = json.dumps(self.entry_item),
content_type = 'application/json')
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data=json.dumps(self.entry_item),
content_type='application/json')
data = json.loads(res.data.decode())
self.assertEqual('Product item already exists!',data['message'])
self.assertEqual(res.status_code, 400)
def test_empty_category(self):
'''Test add product with empty category'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data=json.dumps(self.empty_product_category),
content_type='application/json')
data = json.loads(res.data.decode())
self.assertEqual('Product category can not be empty!',data['message'])
self.assertEqual(res.status_code, 400)
# def test_not_nameModify(self):
# login = super(Test_Entry_Case, self).Auth(self.signup_data)
# data = json.loads(login.data.decode())
# token = data['access_token']
# res = self.app.put(ADD_ENTRY_URL,
# headers=dict(Authorization="Bearer " + token),
# data=json.dumps(self.not_name),
# content_type='application/json')
# data = json.loads(res.data.decode())
# self.assertEqual('The method is not allowed for the requested URL.', data['message'])
| import unittest
import os
import json
from app import create_app
from .base_test import UserAuth
from app.api.database import create_tables, destroy_tables
ADD_ENTRY_URL = '/api/v2/products'
GET_SINGLE_ENTRY = '/api/v2/products/1'
GET_ALL_ENTRY = '/api/v2/products'
class Test_Entry_Case(UserAuth):
'''Initialize app and define test variables'''
def setUp(self):
super().setUp()
destroy_tables()
create_tables()
self.entry_item = { "product_name":"name",
"product_description":"description",
"quantity":4675,
"price": 23,
"category":"category"
}
self.empty_product_name={"product_name":"",
"product_description":"description",
"quantity":4675,
"price": 23,
"category":"category"
}
self.empty_product_description={"product_name":"name",
"product_description":"",
"quantity":4675,
"price": 23,
"category":"category"
}
self.empty_product_category={"product_name":"name",
"product_description":"description",
"quantity":4675,
"price": 23,
"category":""
}
self.quatinty_as_integer = { "product_name":"name",
"product_description":"description",
"quantity":"4675",
"price": 23,
"category":"category"
}
self.price_as_integer = { "product_name":"name",
"product_description":"description",
"quantity":4675,
"price": "23",
"category":"category"
}
def test_add_entry(self):
'''Test to add a new product'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data = json.dumps(self.entry_item),
content_type = 'application/json'
)
data = json.loads(res.get_data().decode("UTF-8"))
self.assertIn('Product Created!', data['message'])
self.assertEqual(res.status_code, 201)
def test_get_single_entry(self):
'''Test to get a single entry'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
self.app.post(ADD_ENTRY_URL,
data = json.dumps(self.entry_item),
content_type = 'application/json')
'''Return a single entry of the product created'''
res = self.app.get(GET_SINGLE_ENTRY,
headers=dict(Authorization="Bearer " + token),
data = json.dumps(self.entry_item),
content_type = 'application/json')
data = json.loads(res.get_data().decode("UTF-8"))
self.assertIn('Product not Found', data['message'])
self.assertEqual(res.status_code, 200)
def test_get_sale_records(self):
'''Test get a sale record'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
self.app.post(ADD_ENTRY_URL,
data = json.dumps(self.entry_item),
content_type = 'application/json')
'''Test gets all the sale entries'''
res = self.app.get(GET_ALL_ENTRY,
headers=dict(Authorization="Bearer " + token),
data = json.dumps(self.entry_item),
content_type = 'application/json')
data = json.loads(res.get_data().decode("UTF-8"))
self.assertIn('All Products Retrieved',data['message'])
self.assertEqual(res.status_code, 200)
def test_delete_product(self):
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
self.app.post(ADD_ENTRY_URL,
data = json.dumps(self.entry_item),
content_type = 'application/json')
'''Test gets all the sale entries'''
res = self.app.delete(GET_SINGLE_ENTRY,
headers=dict(Authorization="Bearer " + token),
data = json.dumps(self.entry_item),
content_type = 'application/json')
data = json.loads(res.get_data().decode("UTF-8"))
self.assertIn('Product Deleted!',data['message'])
self.assertEqual(res.status_code, 200)
def test_modify_product(self):
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
self.app.post(ADD_ENTRY_URL,
data = json.dumps(self.entry_item),
content_type = 'application/json')
'''Test gets all the sale entries'''
res = self.app.put(GET_SINGLE_ENTRY,
headers=dict(Authorization="Bearer " + token),
data = json.dumps(self.entry_item),
content_type = 'application/json')
data = json.loads(res.get_data().decode("UTF-8"))
self.assertIn('Product updated succesfully!',data['message'])
self.assertEqual(res.status_code, 200)
def test_empty_description(self):
'''Test signup with an empty email address'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data=json.dumps(self.empty_product_description),
content_type='application/json')
data = json.loads(res.data.decode())
self.assertEqual('Product description can not be empty!',data['message'])
self.assertEqual(res.status_code, 400)
def test_empty_name(self):
'''Test signup with an empty email address'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data=json.dumps(self.empty_product_name),
content_type='application/json')
data = json.loads(res.data.decode())
self.assertEqual('Product can not be empty!',data['message'])
self.assertEqual(res.status_code, 400)
def test_quantity_integer(self):
'''Test signup with an empty email address'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data=json.dumps(self.quatinty_as_integer),
content_type='application/json')
data = json.loads(res.data.decode())
self.assertEqual('Quantity must be integer!',data['message'])
self.assertEqual(res.status_code, 400)
def test_price_integer(self):
'''Test signup with an empty email address'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data=json.dumps(self.price_as_integer),
content_type='application/json')
data = json.loads(res.data.decode())
self.assertEqual('Price must be integer!',data['message'])
self.assertEqual(res.status_code, 400)
def product_addition_twice(self):
'''Test add product twice'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
self.app.post(ADD_ENTRY_URL,
data = json.dumps(self.entry_item),
content_type = 'application/json')
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data=json.dumps(self.entry_item),
content_type='application/json')
data = json.loads(res.data.decode())
self.assertEqual('Product item already exists!',data['message'])
self.assertEqual(res.status_code, 400)
def test_empty_category(self):
'''Test add product with empty category'''
login = super(Test_Entry_Case, self).Auth(self.signup_data)
data = json.loads(login.data.decode())
token = data['access_token']
res = self.app.post(ADD_ENTRY_URL,
headers=dict(Authorization="Bearer " + token),
data=json.dumps(self.empty_product_category),
content_type='application/json')
data = json.loads(res.data.decode())
self.assertEqual('Product category can not be empty!',data['message'])
self.assertEqual(res.status_code, 400)
# def test_not_nameModify(self):
# login = super(Test_Entry_Case, self).Auth(self.signup_data)
# data = json.loads(login.data.decode())
# token = data['access_token']
# res = self.app.put(ADD_ENTRY_URL,
# headers=dict(Authorization="Bearer " + token),
# data=json.dumps(self.not_name),
# content_type='application/json')
# data = json.loads(res.data.decode())
# self.assertEqual('The method is not allowed for the requested URL.', data['message'])
| en | 0.545477 | Initialize app and define test variables Test to add a new product Test to get a single entry Return a single entry of the product created Test get a sale record Test gets all the sale entries Test gets all the sale entries Test gets all the sale entries Test signup with an empty email address Test signup with an empty email address Test signup with an empty email address Test signup with an empty email address Test add product twice Test add product with empty category # def test_not_nameModify(self): # login = super(Test_Entry_Case, self).Auth(self.signup_data) # data = json.loads(login.data.decode()) # token = data['access_token'] # res = self.app.put(ADD_ENTRY_URL, # headers=dict(Authorization="Bearer " + token), # data=json.dumps(self.not_name), # content_type='application/json') # data = json.loads(res.data.decode()) # self.assertEqual('The method is not allowed for the requested URL.', data['message']) | 2.990371 | 3 |
Chapter01/create_venv.py | PacktPublishing/Secret-Recipes-of-the-Python-Ninja | 13 | 6613648 | >>> python3 -m venv <dir_name>
| >>> python3 -m venv <dir_name>
| none | 1 | 1.175724 | 1 | |
tools/ingester_migrate/migrate.py | sguduguntla/xboswave | 9 | 6613649 | <reponame>sguduguntla/xboswave
from influxdb import InfluxDBClient
client = InfluxDBClient('localhost', 8086, '', '', 'xbos')
measurements = client.get_list_measurements()
to_delete = []
for m in measurements:
if m['name'].startswith('xbos/'):
to_delete.append(m)
q = client.query('select * from "{0}"'.format(m['name']))
col = m['name']
count = 0
for p in q.get_points():
newp = {
'tags': {
'collection': col,
'unit': p['unit'],
'name': p['name'],
'uuid': p['uuid'],
'prediction_step': p.get('prediction_step', None),
},
'measurement': 'timeseries',
'time': p['time'],
'fields': {
'prediction_time': p.get('prediction_time', None),
'value': float(p['value'])
}
}
client.write_points([newp])
count += 1
print("Wrote {0} points from {1}".format(count, col))
print("\nCheck the 'timeseries' collection and then remove the following")
for measurement in to_delete:
print(measurement['name'])
| from influxdb import InfluxDBClient
client = InfluxDBClient('localhost', 8086, '', '', 'xbos')
measurements = client.get_list_measurements()
to_delete = []
for m in measurements:
if m['name'].startswith('xbos/'):
to_delete.append(m)
q = client.query('select * from "{0}"'.format(m['name']))
col = m['name']
count = 0
for p in q.get_points():
newp = {
'tags': {
'collection': col,
'unit': p['unit'],
'name': p['name'],
'uuid': p['uuid'],
'prediction_step': p.get('prediction_step', None),
},
'measurement': 'timeseries',
'time': p['time'],
'fields': {
'prediction_time': p.get('prediction_time', None),
'value': float(p['value'])
}
}
client.write_points([newp])
count += 1
print("Wrote {0} points from {1}".format(count, col))
print("\nCheck the 'timeseries' collection and then remove the following")
for measurement in to_delete:
print(measurement['name']) | none | 1 | 2.712647 | 3 | |
website/vpn/sts/models.py | lenz-li/FlexGW-1 | 212 | 6613650 | <filename>website/vpn/sts/models.py
# -*- coding: utf-8 -*-
"""
website.vpn.sts.models
~~~~~~~~~~~~~~~~~~~~~~
vpn sts system models.
"""
from datetime import datetime
from website import db
class Tunnels(db.Model):
'''tunnels models.'''
__tablename__ = 'sts_tunnels'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True, index=True)
rules = db.Column(db.String(500))
psk = db.Column(db.String(80))
created_at = db.Column(db.DateTime)
def __init__(self, name, rules, psk, created_at=datetime.now()):
self.name = name
self.rules = rules
self.psk = psk
self.created_at = created_at
def __repr__(self):
return '<Tunnels %s:%s>' % (self.name, self.created_at)
| <filename>website/vpn/sts/models.py
# -*- coding: utf-8 -*-
"""
website.vpn.sts.models
~~~~~~~~~~~~~~~~~~~~~~
vpn sts system models.
"""
from datetime import datetime
from website import db
class Tunnels(db.Model):
'''tunnels models.'''
__tablename__ = 'sts_tunnels'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True, index=True)
rules = db.Column(db.String(500))
psk = db.Column(db.String(80))
created_at = db.Column(db.DateTime)
def __init__(self, name, rules, psk, created_at=datetime.now()):
self.name = name
self.rules = rules
self.psk = psk
self.created_at = created_at
def __repr__(self):
return '<Tunnels %s:%s>' % (self.name, self.created_at)
| en | 0.412016 | # -*- coding: utf-8 -*- website.vpn.sts.models ~~~~~~~~~~~~~~~~~~~~~~ vpn sts system models. tunnels models. | 2.568376 | 3 |