hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
591a3cacdd285aff8631717d90ecd13067b4060d | 1,117 | py | Python | consulate/exceptions.py | python-microservices/consulate | 9d95a946b3a5c3095437801488cf84836d72192c | [
"BSD-3-Clause"
] | 309 | 2015-01-09T19:34:43.000Z | 2022-02-11T17:27:55.000Z | consulate/exceptions.py | python-microservices/consulate | 9d95a946b3a5c3095437801488cf84836d72192c | [
"BSD-3-Clause"
] | 101 | 2015-01-15T13:26:56.000Z | 2021-11-25T17:25:28.000Z | consulate/exceptions.py | python-microservices/consulate | 9d95a946b3a5c3095437801488cf84836d72192c | [
"BSD-3-Clause"
] | 114 | 2015-03-02T17:35:43.000Z | 2022-03-26T16:26:46.000Z | """
Consulate Exceptions
"""
class ConsulateException(Exception):
"""Base Consul exception"""
class RequestError(ConsulateException):
"""There was an error making the request to the consul server"""
class ClientError(ConsulateException):
"""There was an error in the request that was made to consul"""
class ServerError(ConsulateException):
"""An internal Consul server error occurred"""
class ACLDisabled(ConsulateException):
"""Raised when ACL related calls are made while ACLs are disabled"""
class ACLFormatError(ConsulateException):
"""Raised when PolicyLinks is missing 'ID' and 'Name' in a PolicyLink or
when ServiceIdentities is missing 'ServiceName' field in a ServiceIdentity.
"""
class Forbidden(ConsulateException):
"""Raised when ACLs are enabled and the token does not validate"""
class NotFound(ConsulateException):
"""Raised when an operation is attempted with a value that can not be
found.
"""
class LockFailure(ConsulateException):
"""Raised by :class:`~consulate.api.lock.Lock` if the lock can not be
acquired.
"""
| 22.34 | 79 | 0.722471 |
a04755d2f1373fbfbb891dff65fa1a2bb5e94ea2 | 767 | py | Python | alphadoc/main.py | MLH-Fellowship/alphadoc | e1e867e40d0affc9b67b6f5c02120680863ee81a | [
"MIT"
] | 1 | 2020-11-25T17:56:55.000Z | 2020-11-25T17:56:55.000Z | alphadoc/main.py | MLH-Fellowship/alphadoc | e1e867e40d0affc9b67b6f5c02120680863ee81a | [
"MIT"
] | 16 | 2020-11-22T18:29:25.000Z | 2021-02-01T07:58:35.000Z | alphadoc/main.py | V2dha/alphadoc | e1e867e40d0affc9b67b6f5c02120680863ee81a | [
"MIT"
] | 6 | 2021-01-20T06:57:52.000Z | 2021-01-25T04:06:15.000Z | import click
from alphadoc.docstring import get_docstring
info = '''Automatic docstring generator and style guide that
supports a number of specified conventions for formatting
as well as documentation in Python.'''
doc_help = '''Specified format for docstrings from Options-
ReST : For ReStructured Text (default);
Epytext : For Epytext (Javadoc);
Google : For Google-Style ;
Numpydoc : For Numpydoc'''
@click.command(help=info)
@click.argument('filename')
@click.option('--doc_format', '-d', default='ReST', help=doc_help)
def main(filename, doc_format):
get_docstring(filename, doc_format)
if __name__ == "__main__" :
main()
| 34.863636 | 71 | 0.628422 |
5bcdc3f245463e7c731f3b0d4f2aac621adc5ad6 | 9,340 | py | Python | astroNN/models/misc_models.py | igomezv/astroNN | 50af116f9cbfc684b63e7ddcf8829343a455722b | [
"MIT"
] | 156 | 2017-10-22T01:29:10.000Z | 2022-03-14T10:28:09.000Z | astroNN/models/misc_models.py | AbdulfattahBaalawi/astroNN | 0b970dd1a8d4d5e6d611ffa52cfd3c2ffdcb4643 | [
"MIT"
] | 16 | 2017-11-02T21:29:28.000Z | 2022-03-14T08:40:41.000Z | astroNN/models/misc_models.py | AbdulfattahBaalawi/astroNN | 0b970dd1a8d4d5e6d611ffa52cfd3c2ffdcb4643 | [
"MIT"
] | 46 | 2017-11-01T18:56:03.000Z | 2022-03-07T06:44:22.000Z | # ---------------------------------------------------------#
# astroNN.models.misc_models: Contain Misc. Models
# ---------------------------------------------------------#
import tensorflow.keras as tfk
from astroNN.models.base_bayesian_cnn import BayesianCNNBase
from astroNN.models.base_cnn import CNNBase
from astroNN.nn.layers import MCDropout, PolyFit
from astroNN.nn.losses import bayesian_binary_crossentropy_wrapper, bayesian_binary_crossentropy_var_wrapper
from astroNN.nn.losses import bayesian_categorical_crossentropy_wrapper, bayesian_categorical_crossentropy_var_wrapper
regularizers = tfk.regularizers
Dense = tfk.layers.Dense
Input = tfk.layers.Input
Conv2D = tfk.layers.Conv2D
Dropout = tfk.layers.Dropout
Flatten = tfk.layers.Flatten
Activation = tfk.layers.Activation
concatenate = tfk.layers.concatenate
MaxPooling2D = tfk.layers.MaxPooling2D
Model = tfk.models.Model
MaxNorm = tfk.constraints.MaxNorm
class Cifar10CNN(CNNBase):
"""
NAME:
Cifar10CNN
PURPOSE:
To create Convolutional Neural Network model for Cifar10 for the purpose of demo
HISTORY:
2018-Jan-11 - Written - Henry Leung (University of Toronto)
"""
def __init__(self, lr=0.005):
"""
NAME:
model
PURPOSE:
To create Convolutional Neural Network model
INPUT:
OUTPUT:
HISTORY:
2018-Jan-11 - Written - Henry Leung (University of Toronto)
"""
super().__init__()
self._implementation_version = '1.0'
self.initializer = 'he_normal'
self.activation = 'relu'
self.num_filters = [8, 16]
self.filter_len = (3, 3)
self.pool_length = (4, 4)
self.num_hidden = [256, 128]
self.max_epochs = 30
self.lr = lr
self.reduce_lr_epsilon = 0.00005
self.reduce_lr_min = 1e-8
self.reduce_lr_patience = 1
self.l2 = 1e-4
self.dropout_rate = 0.1
self.task = 'classification'
self.targetname = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
self.input_norm_mode = 255
self.labels_norm_mode = 0
def model(self):
input_tensor = Input(shape=self._input_shape['input'], name='input')
cnn_layer_1 = Conv2D(kernel_initializer=self.initializer, padding="same", filters=self.num_filters[0],
kernel_size=self.filter_len, kernel_regularizer=regularizers.l2(self.l2))(input_tensor)
activation_1 = Activation(activation=self.activation)(cnn_layer_1)
cnn_layer_2 = Conv2D(kernel_initializer=self.initializer, padding="same", filters=self.num_filters[1],
kernel_size=self.filter_len, kernel_regularizer=regularizers.l2(self.l2))(activation_1)
activation_2 = Activation(activation=self.activation)(cnn_layer_2)
maxpool_1 = MaxPooling2D(pool_size=self.pool_length)(activation_2)
flattener = Flatten()(maxpool_1)
dropout_1 = Dropout(self.dropout_rate)(flattener)
layer_3 = Dense(units=self.num_hidden[0], kernel_regularizer=regularizers.l2(self.l2),
kernel_initializer=self.initializer)(dropout_1)
activation_3 = Activation(activation=self.activation)(layer_3)
dropout_2 = Dropout(self.dropout_rate)(activation_3)
layer_4 = Dense(units=self.num_hidden[1], kernel_regularizer=regularizers.l2(self.l2),
kernel_initializer=self.initializer, kernel_constraint=MaxNorm(2))(dropout_2)
activation_4 = Activation(activation=self.activation)(layer_4)
layer_5 = Dense(units=self._labels_shape['output'])(activation_4)
output = Activation(activation=self._last_layer_activation, name='output')(layer_5)
model = Model(inputs=input_tensor, outputs=output)
return model
# noinspection PyCallingNonCallable
class MNIST_BCNN(BayesianCNNBase):
"""
NAME:
MNIST_BCNN
PURPOSE:
To create Convolutional Neural Network model for Cifar10 for the purpose of demo
HISTORY:
2018-Jan-11 - Written - Henry Leung (University of Toronto)
"""
def __init__(self, lr=0.005):
"""
NAME:
model
PURPOSE:
To create Convolutional Neural Network model
INPUT:
OUTPUT:
HISTORY:
2018-Jan-11 - Written - Henry Leung (University of Toronto)
"""
super().__init__()
self._implementation_version = '1.0'
self.initializer = 'he_normal'
self.activation = 'relu'
self.num_filters = [8, 16]
self.filter_len = (3, 3)
self.pool_length = (4, 4)
self.num_hidden = [256, 128]
self.max_epochs = 30
self.lr = lr
self.reduce_lr_epsilon = 0.00005
self.reduce_lr_min = 1e-8
self.reduce_lr_patience = 1
self.l2 = 1e-4
self.dropout_rate = 0.1
self.task = 'classification'
self.targetname = ['Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine']
self.input_norm_mode = 255
self.labels_norm_mode = 0
def model(self):
input_tensor = Input(shape=self._input_shape['input'], name='input')
cnn_layer_1 = Conv2D(kernel_initializer=self.initializer, padding="same", filters=self.num_filters[0],
kernel_size=self.filter_len, kernel_regularizer=regularizers.l2(self.l2))(input_tensor)
activation_1 = Activation(activation=self.activation)(cnn_layer_1)
dropout_1 = MCDropout(self.dropout_rate, disable=self.disable_dropout)(activation_1)
cnn_layer_2 = Conv2D(kernel_initializer=self.initializer, padding="same", filters=self.num_filters[1],
kernel_size=self.filter_len, kernel_regularizer=regularizers.l2(self.l2))(dropout_1)
activation_2 = Activation(activation=self.activation)(cnn_layer_2)
dropout_2 = MCDropout(self.dropout_rate, disable=self.disable_dropout)(activation_2)
maxpool_1 = MaxPooling2D(pool_size=self.pool_length)(dropout_2)
flattener = Flatten()(maxpool_1)
layer_3 = Dense(units=self.num_hidden[0], kernel_regularizer=regularizers.l2(self.l2),
kernel_initializer=self.initializer)(flattener)
activation_3 = Activation(activation=self.activation)(layer_3)
dropout_4 = MCDropout(self.dropout_rate, disable=self.disable_dropout)(activation_3)
layer_4 = Dense(units=self.num_hidden[1], kernel_regularizer=regularizers.l2(self.l2),
kernel_initializer=self.initializer, kernel_constraint=MaxNorm(2))(dropout_4)
activation_4 = Activation(activation=self.activation)(layer_4)
output = Dense(units=self._labels_shape['output'], activation='linear', name='output')(activation_4)
output_activated = Activation(self._last_layer_activation)(output)
variance_output = Dense(units=self._labels_shape['output'], activation='softplus', name='variance_output')(activation_4)
model = Model(inputs=[input_tensor], outputs=[output, variance_output])
# new astroNN high performance dropout variational inference on GPU expects single output
model_prediction = Model(inputs=[input_tensor], outputs=concatenate([output_activated, variance_output]))
if self.task == 'classification':
output_loss = bayesian_categorical_crossentropy_wrapper(variance_output)
variance_loss = bayesian_categorical_crossentropy_var_wrapper(output)
elif self.task == 'binary_classification':
output_loss = bayesian_binary_crossentropy_wrapper(variance_output)
variance_loss = bayesian_binary_crossentropy_var_wrapper(output)
else:
raise RuntimeError('Only "regression", "classification" and "binary_classification" are supported')
return model, model_prediction, output_loss, variance_loss
# noinspection PyCallingNonCallable
class SimplePolyNN(CNNBase):
"""
Class for Neural Network for Gaia Polynomial fitting
:History: 2018-Jul-23 - Written - Henry Leung (University of Toronto)
"""
def __init__(self, lr=0.005, init_w=None, use_xbias=False):
super().__init__()
self._implementation_version = '1.0'
self.max_epochs = 40
self.lr = lr
self.reduce_lr_epsilon = 0.00005
self.num_hidden = 3 # equals degree of polynomial to fit
self.reduce_lr_min = 1e-8
self.reduce_lr_patience = 2
self.input_norm_mode = 0
self.labels_norm_mode = 0
self.init_w = init_w
self.use_xbias = use_xbias
self.task = 'regression'
self.targetname = ['unbiased_parallax']
def model(self):
input_tensor = Input(shape=self._input_shape, name='input')
flattener = Flatten()(input_tensor)
output = PolyFit(deg=self.num_hidden,
output_units=self._labels_shape,
use_xbias=self.use_xbias,
name='output',
init_w=self.init_w,
kernel_regularizer=regularizers.l2(self.l2))(flattener)
model = Model(inputs=input_tensor, outputs=output)
return model
| 42.262443 | 128 | 0.659422 |
7dc517a03e8e7475ed98e672cb6fde9c3af2ae5c | 1,680 | py | Python | conans/test/command/copy_packages_test.py | datalogics-kam/conan | 7bf230cd5f8ef68eb804908777ebaad75e951b16 | [
"MIT"
] | null | null | null | conans/test/command/copy_packages_test.py | datalogics-kam/conan | 7bf230cd5f8ef68eb804908777ebaad75e951b16 | [
"MIT"
] | null | null | null | conans/test/command/copy_packages_test.py | datalogics-kam/conan | 7bf230cd5f8ef68eb804908777ebaad75e951b16 | [
"MIT"
] | null | null | null | import unittest
from conans.test.utils.tools import TestClient
import os
from conans.model.ref import ConanFileReference
class CopyPackagesTest(unittest.TestCase):
def test_copy_command(self):
client = TestClient()
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
settings = "os"
"""
client.save({"conanfile.py": conanfile})
client.run("export . Hello0/0.1@lasote/stable")
client.run("install Hello0/0.1@lasote/stable -s os=Windows --build missing")
client.run("install Hello0/0.1@lasote/stable -s os=Linux --build missing")
client.run("install Hello0/0.1@lasote/stable -s os=Macos --build missing")
# Copy all packages
client.run("copy Hello0/0.1@lasote/stable pepe/testing --all")
pkgdir = client.paths.packages(ConanFileReference.loads("Hello0/0.1@pepe/testing"))
packages = os.listdir(pkgdir)
self.assertEquals(len(packages), 3)
# Copy just one
client.run("copy Hello0/0.1@lasote/stable pepe/stable -p %s" % packages[0])
pkgdir = client.paths.packages(ConanFileReference.loads("Hello0/0.1@pepe/stable"))
packages = os.listdir(pkgdir)
self.assertEquals(len(packages), 1)
# Force
client.run("copy Hello0/0.1@lasote/stable pepe/stable -p %s --force" % packages[0])
packages = os.listdir(pkgdir)
self.assertEquals(len(packages), 1)
# Copy only recipe
client.run("copy Hello0/0.1@lasote/stable pepe/alpha", ignore_error=True)
pkgdir = client.paths.packages(ConanFileReference.loads("Hello0/0.1@pepe/alpha"))
self.assertFalse(os.path.exists(pkgdir))
| 40 | 91 | 0.669048 |
e1b119fe90b5b0d65f1b697c7f8ebe783a6afdbc | 887 | py | Python | ResizableMatrix.py | JFlaherty347/Pokemon-Red-AI | 9848c56d14c68bc5f215f6b81937ddf69ad0f297 | [
"Apache-2.0"
] | 2 | 2022-03-22T17:33:18.000Z | 2022-03-22T17:34:16.000Z | ResizableMatrix.py | JFlaherty347/Pokemon-Red-AI | 9848c56d14c68bc5f215f6b81937ddf69ad0f297 | [
"Apache-2.0"
] | null | null | null | ResizableMatrix.py | JFlaherty347/Pokemon-Red-AI | 9848c56d14c68bc5f215f6b81937ddf69ad0f297 | [
"Apache-2.0"
] | 1 | 2022-01-22T00:53:40.000Z | 2022-01-22T00:53:40.000Z | from typing import Collection, List
import numpy as np
import copy
class ResizeableMatrix:
def __init__(self, matrix = None, dtype = int) -> None:
if matrix:
self.matrix = matrix
else:
self.matrix = np.matrix([[]])
def append(self, toAppend:List):
if not np.array_equal(self.matrix, np.matrix([[]])):
temp_list = self.matrix.tolist()
temp_list.append(toAppend)
else:
temp_list = [toAppend]
self.matrix = np.matrix(temp_list)
del temp_list
def __getitem__(self, key:Collection):
if self.matrix is None:
raise IndexError("Matrix not initialized!")
x,y = key
return self.matrix[x, y]
def __str__(self) -> str:
return str(self.matrix)
def retrieve_matrix(self):
return self.matrix | 26.878788 | 60 | 0.573844 |
b05908a157f99b11b21f4af60a5483ffe67125f3 | 464 | py | Python | tests/cpydiff/modules_random_randint.py | rxchen/micropython | 037b2c72a1d5b54a5508a58ab2044628a7a39fa4 | [
"MIT"
] | 13,648 | 2015-01-01T01:34:51.000Z | 2022-03-31T16:19:53.000Z | tests/cpydiff/modules_random_randint.py | rxchen/micropython | 037b2c72a1d5b54a5508a58ab2044628a7a39fa4 | [
"MIT"
] | 7,092 | 2015-01-01T07:59:11.000Z | 2022-03-31T23:52:18.000Z | tests/cpydiff/modules_random_randint.py | rxchen/micropython | 037b2c72a1d5b54a5508a58ab2044628a7a39fa4 | [
"MIT"
] | 4,942 | 2015-01-02T11:48:50.000Z | 2022-03-31T19:57:10.000Z | """
categories: Modules,random
description: ``randint`` method can only return an integer that is at most the native word size.
cause: PRNG is only able to generate 32 bits of state at a time. The result is then cast into a native sized int instead of a full int object.
workaround: If you need integers larger than native wordsize use the random module from micropython-lib.
"""
import random
x = random.randint(2 ** 128 - 1, 2 ** 128)
print("x={}".format(x))
| 35.692308 | 142 | 0.739224 |
a4f83346ca7b1ff7b9c96ebb0b3a7f5e9e5450fa | 8,279 | py | Python | ThirdParty/Twisted/twisted/plugin.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 12 | 2015-01-21T00:24:06.000Z | 2021-07-01T03:06:39.000Z | ThirdParty/Twisted/twisted/plugin.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 4 | 2017-02-19T23:58:13.000Z | 2019-11-01T15:31:22.000Z | ThirdParty/Twisted/twisted/plugin.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 6 | 2017-02-13T09:11:02.000Z | 2021-06-29T11:22:18.000Z | # -*- test-case-name: twisted.test.test_plugin -*-
# Copyright (c) 2005 Divmod, Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Plugin system for Twisted.
@author: Jp Calderone
@author: Glyph Lefkowitz
"""
import os
import sys
from zope.interface import Interface, providedBy
def _determinePickleModule():
"""
Determine which 'pickle' API module to use.
"""
try:
import cPickle
return cPickle
except ImportError:
import pickle
return pickle
pickle = _determinePickleModule()
from twisted.python.components import getAdapterFactory
from twisted.python.reflect import namedAny
from twisted.python import log
from twisted.python.modules import getModule
class IPlugin(Interface):
"""
Interface that must be implemented by all plugins.
Only objects which implement this interface will be considered for return
by C{getPlugins}. To be useful, plugins should also implement some other
application-specific interface.
"""
class CachedPlugin(object):
def __init__(self, dropin, name, description, provided):
self.dropin = dropin
self.name = name
self.description = description
self.provided = provided
self.dropin.plugins.append(self)
def __repr__(self):
return '<CachedPlugin %r/%r (provides %r)>' % (
self.name, self.dropin.moduleName,
', '.join([i.__name__ for i in self.provided]))
def load(self):
return namedAny(self.dropin.moduleName + '.' + self.name)
def __conform__(self, interface, registry=None, default=None):
for providedInterface in self.provided:
if providedInterface.isOrExtends(interface):
return self.load()
if getAdapterFactory(providedInterface, interface, None) is not None:
return interface(self.load(), default)
return default
# backwards compat HOORJ
getComponent = __conform__
class CachedDropin(object):
"""
A collection of L{CachedPlugin} instances from a particular module in a
plugin package.
@type moduleName: C{str}
@ivar moduleName: The fully qualified name of the plugin module this
represents.
@type description: C{str} or C{NoneType}
@ivar description: A brief explanation of this collection of plugins
(probably the plugin module's docstring).
@type plugins: C{list}
@ivar plugins: The L{CachedPlugin} instances which were loaded from this
dropin.
"""
def __init__(self, moduleName, description):
self.moduleName = moduleName
self.description = description
self.plugins = []
def _generateCacheEntry(provider):
dropin = CachedDropin(provider.__name__,
provider.__doc__)
for k, v in provider.__dict__.iteritems():
plugin = IPlugin(v, None)
if plugin is not None:
# Instantiated for its side-effects.
CachedPlugin(dropin, k, v.__doc__, list(providedBy(plugin)))
return dropin
try:
fromkeys = dict.fromkeys
except AttributeError:
def fromkeys(keys, value=None):
d = {}
for k in keys:
d[k] = value
return d
def getCache(module):
"""
Compute all the possible loadable plugins, while loading as few as
possible and hitting the filesystem as little as possible.
@param module: a Python module object. This represents a package to search
for plugins.
@return: a dictionary mapping module names to L{CachedDropin} instances.
"""
allCachesCombined = {}
mod = getModule(module.__name__)
# don't want to walk deep, only immediate children.
buckets = {}
# Fill buckets with modules by related entry on the given package's
# __path__. There's an abstraction inversion going on here, because this
# information is already represented internally in twisted.python.modules,
# but it's simple enough that I'm willing to live with it. If anyone else
# wants to fix up this iteration so that it's one path segment at a time,
# be my guest. --glyph
for plugmod in mod.iterModules():
fpp = plugmod.filePath.parent()
if fpp not in buckets:
buckets[fpp] = []
bucket = buckets[fpp]
bucket.append(plugmod)
for pseudoPackagePath, bucket in buckets.iteritems():
dropinPath = pseudoPackagePath.child('dropin.cache')
try:
lastCached = dropinPath.getModificationTime()
dropinDotCache = pickle.load(dropinPath.open('r'))
except:
dropinDotCache = {}
lastCached = 0
needsWrite = False
existingKeys = {}
for pluginModule in bucket:
pluginKey = pluginModule.name.split('.')[-1]
existingKeys[pluginKey] = True
if ((pluginKey not in dropinDotCache) or
(pluginModule.filePath.getModificationTime() >= lastCached)):
needsWrite = True
try:
provider = pluginModule.load()
except:
# dropinDotCache.pop(pluginKey, None)
log.err()
else:
entry = _generateCacheEntry(provider)
dropinDotCache[pluginKey] = entry
# Make sure that the cache doesn't contain any stale plugins.
for pluginKey in dropinDotCache.keys():
if pluginKey not in existingKeys:
del dropinDotCache[pluginKey]
needsWrite = True
if needsWrite:
try:
dropinPath.setContent(pickle.dumps(dropinDotCache))
except OSError, e:
log.msg(
format=(
"Unable to write to plugin cache %(path)s: error "
"number %(errno)d"),
path=dropinPath.path, errno=e.errno)
except:
log.err(None, "Unexpected error while writing cache file")
allCachesCombined.update(dropinDotCache)
return allCachesCombined
def getPlugins(interface, package=None):
"""
Retrieve all plugins implementing the given interface beneath the given module.
@param interface: An interface class. Only plugins which implement this
interface will be returned.
@param package: A package beneath which plugins are installed. For
most uses, the default value is correct.
@return: An iterator of plugins.
"""
if package is None:
import twisted.plugins as package
allDropins = getCache(package)
for dropin in allDropins.itervalues():
for plugin in dropin.plugins:
try:
adapted = interface(plugin, None)
except:
log.err()
else:
if adapted is not None:
yield adapted
# Old, backwards compatible name. Don't use this.
getPlugIns = getPlugins
def pluginPackagePaths(name):
"""
Return a list of additional directories which should be searched for
modules to be included as part of the named plugin package.
@type name: C{str}
@param name: The fully-qualified Python name of a plugin package, eg
C{'twisted.plugins'}.
@rtype: C{list} of C{str}
@return: The absolute paths to other directories which may contain plugin
modules for the named plugin package.
"""
package = name.split('.')
# Note that this may include directories which do not exist. It may be
# preferable to remove such directories at this point, rather than allow
# them to be searched later on.
#
# Note as well that only '__init__.py' will be considered to make a
# directory a package (and thus exclude it from this list). This means
# that if you create a master plugin package which has some other kind of
# __init__ (eg, __init__.pyc) it will be incorrectly treated as a
# supplementary plugin directory.
return [
os.path.abspath(os.path.join(x, *package))
for x
in sys.path
if
not os.path.exists(os.path.join(x, *package + ['__init__.py']))]
__all__ = ['getPlugins', 'pluginPackagePaths']
| 32.339844 | 83 | 0.635342 |
ad2e07c53a1bd4ef254b702f01426912700b1662 | 180 | py | Python | examples/turtlebot.py | harryrobotics/pyrobot | 006c6c3cc9dc96ba199c4cfe0f2caac30a6acd26 | [
"MIT"
] | null | null | null | examples/turtlebot.py | harryrobotics/pyrobot | 006c6c3cc9dc96ba199c4cfe0f2caac30a6acd26 | [
"MIT"
] | 16 | 2020-01-28T22:49:47.000Z | 2022-03-11T23:51:24.000Z | examples/turtlebot.py | harryrobotics/pyrobot | 006c6c3cc9dc96ba199c4cfe0f2caac30a6acd26 | [
"MIT"
] | null | null | null | from pyrobot import Robot
robot = Robot('turtlebot',
use_base=True,
use_arm=False,
use_camera=False,
use_gripper=False)
while True:
print(robot.base.get_state('odom'))
| 15 | 36 | 0.733333 |
a94ac25aab2b6bf61393abc55301c064eb2ccad2 | 2,113 | py | Python | tests/test_alternative_tuple.py | BBVA/python-etl | f4c0e613792c93fe5833f9d5e670e8fa6cf675da | [
"Apache-2.0"
] | 20 | 2017-11-07T15:09:45.000Z | 2021-08-21T00:18:09.000Z | tests/test_alternative_tuple.py | BBVA/python-etl | f4c0e613792c93fe5833f9d5e670e8fa6cf675da | [
"Apache-2.0"
] | 4 | 2017-11-21T13:15:30.000Z | 2018-01-17T14:06:14.000Z | tests/test_alternative_tuple.py | BBVA/python-etl | f4c0e613792c93fe5833f9d5e670e8fa6cf675da | [
"Apache-2.0"
] | 9 | 2017-11-08T10:53:43.000Z | 2018-04-20T11:26:29.000Z | # Copyright 2017 BBVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datarefinery.TupleOperations import alternative, substitution, wrap
def test_empty():
def _fail_etl_func(i, e=None):
return None, "nop"
operation = alternative(
substitution(["a"], etl_func=_fail_etl_func),
substitution(["b"], etl_func=wrap(lambda x: x+1))
)
(res, err) = operation(None)
assert res is None
assert err == {'b': 'b not found'}
def test_some_working():
def _fail_etl_func(i, e=None):
return None, "nop"
inp = {"a": "jajaja", "b": 1}
operation = alternative(
substitution(["a"], etl_func=_fail_etl_func),
substitution(["b"], etl_func=wrap(lambda x: x + 1))
)
(res, err) = operation(inp)
assert inp == {"a": "jajaja", "b": 1}
assert res is not None
assert "a" not in res
assert "b" in res
assert res["b"] == 2
assert err is None
def test_multiple_alternatives():
def _fail_etl_func(i, e=None):
return None, "nop"
inp = {"a": "jajaja", "b": 1}
operation = alternative(
substitution(["a"], etl_func=_fail_etl_func),
substitution(["a"], etl_func=_fail_etl_func),
substitution(["a"], etl_func=_fail_etl_func),
substitution(["b"], etl_func=wrap(lambda x: x + 1))
)
(res, err) = operation(inp)
assert inp is not None
assert "a" in inp
assert inp["a"] == "jajaja"
assert "b" in inp
assert inp["b"] == 1
assert res is not None
assert "a" not in res
assert "b" in res
assert res["b"] == 2
assert err is None
| 28.554054 | 74 | 0.637009 |
f77b5153482bacdb3babb15dbf3bda3dc464f4d2 | 3,603 | py | Python | main.py | kant/open-solution-value-prediction | 708d0958cba1b8a551a10a9959678441f3eaa1dc | [
"MIT"
] | 27 | 2018-06-29T18:38:53.000Z | 2019-08-22T21:49:19.000Z | main.py | kant/open-solution-value-prediction | 708d0958cba1b8a551a10a9959678441f3eaa1dc | [
"MIT"
] | 16 | 2018-06-19T22:15:18.000Z | 2018-06-26T09:15:59.000Z | main.py | kant/open-solution-value-prediction | 708d0958cba1b8a551a10a9959678441f3eaa1dc | [
"MIT"
] | 15 | 2018-07-01T00:28:39.000Z | 2019-08-08T01:16:24.000Z | import click
from src.pipeline_manager import PipelineManager
pipeline_manager = PipelineManager()
@click.group()
def main():
pass
@main.command()
@click.option('-p', '--pipeline_name', help='pipeline to be trained', required=True)
@click.option('-d', '--dev_mode', help='if true only a small sample of data will be used', is_flag=True, required=False)
def train(pipeline_name, dev_mode):
pipeline_manager.train(pipeline_name, dev_mode)
@main.command()
@click.option('-p', '--pipeline_name', help='pipeline to be trained', required=True)
@click.option('-d', '--dev_mode', help='if true only a small sample of data will be used', is_flag=True, required=False)
def evaluate(pipeline_name, dev_mode):
pipeline_manager.evaluate(pipeline_name, dev_mode)
@main.command()
@click.option('-p', '--pipeline_name', help='pipeline to be trained', required=True)
@click.option('-d', '--dev_mode', help='if true only a small sample of data will be used', is_flag=True, required=False)
@click.option('-s', '--submit_predictions', help='submit predictions if true', is_flag=True, required=False)
def predict(pipeline_name, dev_mode, submit_predictions):
pipeline_manager.predict(pipeline_name, dev_mode, submit_predictions)
@main.command()
@click.option('-p', '--pipeline_name', help='pipeline to be trained', required=True)
@click.option('-s', '--submit_predictions', help='submit predictions if true', is_flag=True, required=False)
@click.option('-d', '--dev_mode', help='if true only a small sample of data will be used', is_flag=True, required=False)
def train_evaluate_predict(pipeline_name, submit_predictions, dev_mode):
pipeline_manager.train(pipeline_name, dev_mode)
pipeline_manager.evaluate(pipeline_name, dev_mode)
pipeline_manager.predict(pipeline_name, dev_mode, submit_predictions)
@main.command()
@click.option('-p', '--pipeline_name', help='pipeline to be trained', required=True)
@click.option('-d', '--dev_mode', help='if true only a small sample of data will be used', is_flag=True, required=False)
def train_evaluate(pipeline_name, dev_mode):
pipeline_manager.train(pipeline_name, dev_mode)
pipeline_manager.evaluate(pipeline_name, dev_mode)
@main.command()
@click.option('-p', '--pipeline_name', help='pipeline to be trained', required=True)
@click.option('-s', '--submit_predictions', help='submit predictions if true', is_flag=True, required=False)
@click.option('-d', '--dev_mode', help='if true only a small sample of data will be used', is_flag=True, required=False)
def evaluate_predict(pipeline_name, submit_predictions, dev_mode):
pipeline_manager.evaluate(pipeline_name, dev_mode)
pipeline_manager.predict(pipeline_name, dev_mode, submit_predictions)
@main.command()
@click.option('-p', '--pipeline_name', help='pipeline to be trained', required=True)
@click.option('-d', '--dev_mode', help='if true only a small sample of data will be used', is_flag=True, required=False)
def train_evaluate_cv(pipeline_name, dev_mode):
pipeline_manager.train_evaluate_cv(pipeline_name, dev_mode)
@main.command()
@click.option('-p', '--pipeline_name', help='pipeline to be trained', required=True)
@click.option('-s', '--submit_predictions', help='submit predictions if true', is_flag=True, required=False)
@click.option('-d', '--dev_mode', help='if true only a small sample of data will be used', is_flag=True, required=False)
def train_evaluate_predict_cv(pipeline_name, submit_predictions, dev_mode):
pipeline_manager.train_evaluate_predict_cv(pipeline_name, dev_mode, submit_predictions)
if __name__ == "__main__":
main()
| 45.607595 | 120 | 0.752984 |
aa120034da96261de3fcb4f5dc9ae57703fe7003 | 6,114 | py | Python | slot_language/stronger_model/unet_train.py | jiaqi-xi/slot_attention | 8420414eb261501e5b056e4d409c338d909397ef | [
"Apache-2.0"
] | null | null | null | slot_language/stronger_model/unet_train.py | jiaqi-xi/slot_attention | 8420414eb261501e5b056e4d409c338d909397ef | [
"Apache-2.0"
] | null | null | null | slot_language/stronger_model/unet_train.py | jiaqi-xi/slot_attention | 8420414eb261501e5b056e4d409c338d909397ef | [
"Apache-2.0"
] | null | null | null | import os
import sys
import importlib
import argparse
import numpy as np
from typing import Optional
import pytorch_lightning.loggers as pl_loggers
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
import clip
from unet_model import UNetSlotAttentionModel
from unet_params import SlotAttentionParams
sys.path.append('../')
from train import build_text2slot_model, build_data_module, process_ckp
from utils import VideoLogCallback, ImageLogCallback
from method import SlotAttentionVideoLanguageMethod as SlotAttentionMethod
def build_slot_attention_model(params: SlotAttentionParams):
clip_model, _ = clip.load(params.clip_arch)
text2slot_model = build_text2slot_model(params)
model = UNetSlotAttentionModel(
clip_model=clip_model,
use_clip_vision=params.use_clip_vision,
use_clip_text=params.use_text2slot,
text2slot_model=text2slot_model,
resolution=params.resolution,
num_slots=params.num_slots,
num_iterations=params.num_iterations,
slot_size=params.slot_size,
slot_mlp_size=params.slot_mlp_size,
kernel_size=params.kernel_size,
enc_channels=params.enc_channels,
dec_channels=params.dec_channels,
enc_pos_enc=params.enc_pos_enc,
dec_resolution=params.dec_resolution,
spec_decoder=params.spec_decoder,
use_double_conv=params.use_double_conv,
use_maxpool=params.use_maxpool,
use_bilinear=params.use_bilinear,
use_bn=params.use_bn,
use_word_set=params.use_text2slot
and params.text2slot_arch in ['Transformer', 'DETR'],
use_padding_mask=params.use_text2slot
and params.text2slot_arch in ['Transformer', 'DETR']
and params.text2slot_padding_mask,
use_entropy_loss=params.use_entropy_loss,
use_bg_sep_slot=params.use_bg_sep_slot,
)
return model
def main(params: Optional[SlotAttentionParams] = None):
if params is None:
params = SlotAttentionParams()
assert params.num_slots > 1, "Must have at least 2 slots."
if params.is_verbose:
print(f"INFO: model has {params.num_slots} slots")
if params.num_train_images:
print("INFO: restricting the train dataset size to "
f"`num_train_images`: {params.num_train_images}")
if params.num_val_images:
print("INFO: restricting the validation dataset size to "
f"`num_val_images`: {params.num_val_images}")
if args.fp16:
print('INFO: using FP16 training!')
if args.weight:
print(f'INFO: loading checkpoint {args.weight}')
model = build_slot_attention_model(params)
clevr_datamodule = build_data_module(params)
print('Not using max_object_num constraint here!')
method = SlotAttentionMethod(
model=model, datamodule=clevr_datamodule, params=params)
# we want to also resume wandb log if restoring from previous training
logger_name = f'{args.params}-fp16' if args.fp16 else args.params
if SLURM_JOB_ID:
logger_name = f'{logger_name}-{SLURM_JOB_ID}'
logger = pl_loggers.WandbLogger(
project="slot-attention-clevr6-language-video",
name=logger_name,
id=logger_name) # we assume only run one exp per one params setting
# saves a file like: 'path/to/ckp/CLEVRVideo-001-100000-val=0.0032.ckpt'
ckp_path = "./checkpoint/" \
f"{args.params + '-fp16' if args.fp16 else args.params}/{SLURM_JOB_ID}"
ckp_name = "CLEVRVideo-{epoch:03d}-{step:06d}-val_{val_recon_loss:.4f}"
checkpoint_callback = ModelCheckpoint(
monitor="val_recon_loss",
dirpath=ckp_path,
filename=ckp_name,
save_top_k=2,
mode="min",
)
# automatically detect previous checkpoint
# because if SLURM_JOB_ID is equal, that should definitely be the case
if os.path.exists(ckp_path):
ckp_files = os.listdir(ckp_path)
ckp_files = [ckp for ckp in ckp_files if ckp.startswith('CLEVRVideo')]
step_num = [int(ckp[26:32]) for ckp in ckp_files]
last_ckp = ckp_files[np.argmax(step_num)]
print(f'INFO: automatically detect checkpoint {last_ckp}')
args.weight = os.path.join(ckp_path, last_ckp)
process_ckp(args.weight) # enable mid-epoch resuming
trainer = Trainer(
logger=logger if params.is_logger_enabled else False,
# TODO: 'ddp' doesn't work on Vector cluster!
accelerator="dp" if params.gpus > 1 else None,
num_sanity_val_steps=params.num_sanity_val_steps
if not args.weight else 0,
gpus=params.gpus,
max_epochs=params.max_epochs,
log_every_n_steps=50,
val_check_interval=args.eval_interval,
callbacks=[
LearningRateMonitor("step"),
ImageLogCallback(),
VideoLogCallback(),
checkpoint_callback,
] if params.is_logger_enabled else [checkpoint_callback],
profiler='simple',
precision=16 if args.fp16 else 32,
weights_save_path=ckp_path,
)
trainer.fit(
method,
datamodule=clevr_datamodule,
ckpt_path=args.weight if args.weight else None)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Train Slot Attention')
parser.add_argument('--params', type=str, default='params')
parser.add_argument('--sbatch', action='store_true')
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--eval-interval', type=float, default=1.0)
parser.add_argument('--weight', type=str, default='')
args = parser.parse_args()
if args.sbatch:
assert os.environ.get('SLURM_JOB_ID') is not None, \
'program not running in sbatch mode!'
SLURM_JOB_ID = os.environ.get('SLURM_JOB_ID')
else:
SLURM_JOB_ID = ''
if args.params.endswith('.py'):
args.params = args.params[:-3]
params = importlib.import_module(args.params)
params = params.SlotAttentionParams()
main(params)
| 37.740741 | 79 | 0.691855 |
f1b81b74a6c8a0438d7a56db53974fbb44fcf6a5 | 18,798 | py | Python | resources/models/utils.py | City-of-Turku/respa | 53484dd45dfddb17b9c8c9812b51004690aaee9a | [
"MIT"
] | 1 | 2019-12-17T10:02:17.000Z | 2019-12-17T10:02:17.000Z | resources/models/utils.py | digipointtku/respa | a529e0df4d3f072df7801adb5bf97a5f4abd1243 | [
"MIT"
] | 12 | 2019-11-06T07:53:27.000Z | 2019-12-18T06:14:47.000Z | resources/models/utils.py | digipointtku/respa | a529e0df4d3f072df7801adb5bf97a5f4abd1243 | [
"MIT"
] | null | null | null | import base64
import datetime
from decimal import Decimal
import struct
import time
import io
import logging
from munigeo.models import Municipality
import pytz
import arrow
from django.conf import settings
from django.utils import formats
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, ContentType
from django.utils.translation import ugettext
from django.utils.translation import ungettext
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.timezone import localtime
from rest_framework.reverse import reverse
from icalendar import Calendar, Event, vDatetime, vText, vGeo
from modeltranslation.translator import NotRegistered, translator
import xlsxwriter
DEFAULT_LANG = settings.LANGUAGES[0][0]
def save_dt(obj, attr, dt, orig_tz="UTC"):
"""
Sets given field in an object to a DateTime object with or without
a time zone converted into UTC time zone from given time zone
If there is no time zone on the given DateTime, orig_tz will be used
"""
if dt.tzinfo:
arr = arrow.get(dt).to("UTC")
else:
arr = arrow.get(dt, orig_tz).to("UTC")
setattr(obj, attr, arr.datetime)
def get_dt(obj, attr, tz):
return arrow.get(getattr(obj, attr)).to(tz).datetime
def get_translated(obj, attr):
key = "%s_%s" % (attr, DEFAULT_LANG)
val = getattr(obj, key, None)
if not val:
val = getattr(obj, attr)
return val
# Needed for slug fields populating
def get_translated_name(obj):
return get_translated(obj, 'name')
def generate_id():
t = time.time() * 1000000
b = base64.b32encode(struct.pack(">Q", int(t)).lstrip(b'\x00')).strip(b'=').lower()
return b.decode('utf8')
def time_to_dtz(time, date=None, arr=None):
tz = timezone.get_current_timezone()
if time:
if date:
return tz.localize(datetime.datetime.combine(date, time))
elif arr:
return tz.localize(datetime.datetime(arr.year, arr.month, arr.day, time.hour, time.minute))
else:
return None
def is_valid_time_slot(time, time_slot_duration, opening_time):
"""
Check if given time is correctly aligned with time slots.
:type time: datetime.datetime
:type time_slot_duration: datetime.timedelta
:type opening_time: datetime.datetime
:rtype: bool
"""
return not ((time - opening_time) % time_slot_duration)
def humanize_duration(duration):
"""
Return the given duration in a localized humanized form.
Examples: "2 hours 30 minutes", "1 hour", "30 minutes"
:type duration: datetime.timedelta
:rtype: str
"""
hours = duration.days * 24 + duration.seconds // 3600
mins = duration.seconds // 60 % 60
hours_string = ungettext('%(count)d hour', '%(count)d hours', hours) % {'count': hours} if hours else None
mins_string = ungettext('%(count)d minute', '%(count)d minutes', mins) % {'count': mins} if mins else None
return ' '.join(filter(None, (hours_string, mins_string)))
notification_logger = logging.getLogger('respa.notifications')
def send_respa_mail(email_address, subject, body, html_body=None, attachments=None):
if not getattr(settings, 'RESPA_MAILS_ENABLED', False):
notification_logger.info('Respa mail is not enabled.')
return False
try:
from_address = (getattr(settings, 'RESPA_MAILS_FROM_ADDRESS', None) or
'noreply@%s' % Site.objects.get_current().domain)
notification_logger.info('Sending notification email to %s: "%s"' % (email_address, subject))
text_content = body
msg = EmailMultiAlternatives(subject, text_content, from_address, [email_address], attachments=attachments)
if html_body:
msg.attach_alternative(html_body, 'text/html')
msg.send()
return True
except Exception as exc:
notification_logger.error('Respa mail error %s', exc)
return False
def generate_reservation_xlsx(reservations, **kwargs):
"""
Return reservations in Excel xlsx format
The parameter is expected to be a list of dicts with fields:
* unit: unit name str
* resource: resource name str
* begin: begin time datetime
* end: end time datetime
* staff_event: is staff event bool
* user: user email str (optional)
* comments: comments str (optional)
* all of RESERVATION_EXTRA_FIELDS are optional as well
:rtype: bytes
"""
from resources.models import Resource, Reservation, RESERVATION_EXTRA_FIELDS
def clean(string):
if not string:
return ''
if isinstance(string, dict):
string = next(iter(string.items()))[1]
if not isinstance(string, str):
return string
unallowed_characters = ['=', '+', '-', '"', '@']
if string[0] in unallowed_characters:
string = string[1:]
return string
request = kwargs.get('request', None)
output = io.BytesIO()
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet()
headers = [
('Unit', 30),
('Resource', 40),
('Begin time', 35),
('End time', 35),
('Created at', 30),
('User', 30),
('Comments', 30),
('Staff event', 10),
]
for field in RESERVATION_EXTRA_FIELDS:
headers.append((Reservation._meta.get_field(field).verbose_name, 20))
header_format = workbook.add_format({'bold': True})
for column, header in enumerate(headers):
worksheet.write(0, column, str(_(header[0])), header_format)
worksheet.set_column(column, column, header[1])
opening_hours = {}
resource_usage_info = {}
if request:
query_start = datetime\
.datetime\
.strptime(request.query_params.get('start', '1970-01-01'), '%Y-%m-%d')
query_end = datetime\
.datetime\
.strptime(request.query_params.get('end', '1970-01-01'), '%Y-%m-%d')
try:
resources = request.query_params.get('resource').split(',')
except:
resources = []
opening_hours = {
resource:resource.get_opening_hours(query_start, query_end) \
for resource in Resource.objects.filter(id__in=resources)
}
for resource in opening_hours:
for date, time_range in opening_hours[resource].items():
for time_slot in time_range:
opens, closes = time_slot.items()
if not opens[1] or not closes[1]:
continue
if resource not in resource_usage_info:
resource_usage_info[resource] = {'total_opening_hours': 0, 'total_reservation_hours': 0}
resource_usage_info[resource]['total_opening_hours'] += (closes[1] - opens[1]).total_seconds() / 3600
date_format = workbook.add_format({'num_format': 'dd.mm.yyyy hh:mm', 'align': 'left'})
total_reservation_hours = 0
row = 0
for row, reservation in enumerate(reservations, 1):
for key in reservation:
reservation[key] = clean(reservation[key])
obj = Reservation.objects.get(pk=reservation['id'])
usage_info = resource_usage_info.get(obj.resource, None)
begin = localtime(reservation['begin']).replace(tzinfo=None)
end = localtime(reservation['end']).replace(tzinfo=None)
worksheet.write(row, 0, reservation['unit'])
worksheet.write(row, 1, reservation['resource'])
worksheet.write(row, 2, begin, date_format)
worksheet.write(row, 3, end, date_format)
worksheet.write(row, 4, localtime(reservation['created_at']).replace(tzinfo=None), date_format)
if 'user' in reservation:
worksheet.write(row, 5, reservation['user'])
if 'comments' in reservation:
worksheet.write(row, 6, reservation['comments'])
worksheet.write(row, 7, reservation['staff_event'])
for i, field in enumerate(RESERVATION_EXTRA_FIELDS, 8):
if field in reservation:
if isinstance(reservation[field], dict):
try:
reservation[field] = next(iter(reservation[field].items()))[1]
except:
continue
worksheet.write(row, i, reservation[field])
total_reservation_hours += (end-begin).total_seconds() # Overall total
if usage_info:
usage_info['total_reservation_hours'] += (end-begin).total_seconds() / 3600 # Resource specific total
if row > 0:
row = row+2
col_format = workbook.add_format({'color': 'red', 'font': 'bold'})
worksheet.write(row, 0, ugettext('Reservation hours total'), col_format)
worksheet.write(row, 1, ugettext('%(hours)s hours') % ({'hours': int((total_reservation_hours / 60) / 60)}), col_format)
col_format = workbook.add_format()
col_format.set_bg_color('black')
col_format.set_font_color('white')
col_format.set_bold()
worksheet.write(row+2, 0, '', col_format)
worksheet.write(row+2, 1, '', col_format)
if request:
worksheet.write(row+2, 2, ugettext('Resource utilization for period %(start)s - %(end)s') % ({
'start': query_start.date(),
'end': query_end.date()
}), col_format)
else:
worksheet.write(row+2, 2, ugettext('Resource utilization'), col_format)
worksheet.write(row+2, 3, '', col_format)
worksheet.write(row+2, 4, '', col_format)
col_format = workbook.add_format({'color': 'black'})
col_format.set_bold()
worksheet.write(row+3, 0, ugettext('Unit'), col_format)
worksheet.write(row+3, 1, ugettext('Resource'), col_format)
worksheet.write(row+3, 2, ugettext('Resource utilization'), col_format)
worksheet.write(row+3, 3, ugettext('Opening hours total'), col_format)
worksheet.write(row+3, 4, ugettext('Reservation hours total'), col_format)
row = row+4
for idx, resource_info in enumerate(resource_usage_info.items()):
resource, info = resource_info
worksheet.write(row+idx, 0, resource.unit.name)
worksheet.write(row+idx, 1, resource.name)
worksheet.write(row+idx, 2, "%.2f%%" % float(
(info.get('total_reservation_hours') / info.get('total_opening_hours')) * 100))
worksheet.write(row+idx, 3, "%sh" % info.get('total_opening_hours'))
worksheet.write(row+idx, 4, "%sh" % info.get('total_reservation_hours'))
workbook.close()
return output.getvalue()
def get_object_or_none(cls, **kwargs):
try:
return cls.objects.get(**kwargs)
except cls.DoesNotExist:
return None
def create_datetime_days_from_now(days_from_now):
if days_from_now is None:
return None
dt = timezone.localtime(timezone.now()) + datetime.timedelta(days=days_from_now)
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
return dt
def localize_datetime(dt):
return formats.date_format(timezone.localtime(dt), 'DATETIME_FORMAT')
def format_dt_range(language, begin, end):
if language == 'fi':
# ma 1.1.2017 klo 12.00
begin_format = r'D j.n.Y \k\l\o G.i'
if begin.date() == end.date():
end_format = 'G.i'
sep = '–'
else:
end_format = begin_format
sep = ' – '
res = sep.join([formats.date_format(begin, begin_format), formats.date_format(end, end_format)])
else:
# default to English
begin_format = r'D j/n/Y G:i'
if begin.date() == end.date():
end_format = 'G:i'
sep = '–'
else:
end_format = begin_format
sep = ' – '
res = sep.join([formats.date_format(begin, begin_format), formats.date_format(end, end_format)])
return res
def build_reservations_ical_file(reservations):
"""
Return iCalendar file containing given reservations
"""
cal = Calendar()
for reservation in reservations:
event = Event()
begin_utc = timezone.localtime(reservation.begin, timezone.utc)
end_utc = timezone.localtime(reservation.end, timezone.utc)
event['uid'] = 'respa_reservation_{}'.format(reservation.id)
event['dtstart'] = vDatetime(begin_utc)
event['dtend'] = vDatetime(end_utc)
unit = reservation.resource.unit
event['location'] = vText('{} {} {}'.format(unit.name, unit.street_address, unit.address_zip))
if unit.location:
event['geo'] = vGeo(unit.location)
event['summary'] = vText('{} {}'.format(unit.name, reservation.resource.name))
cal.add_component(event)
return cal.to_ical()
def build_ical_feed_url(ical_token, request):
"""
Return iCal feed url for given token without query parameters
"""
url = reverse('ical-feed', kwargs={'ical_token': ical_token}, request=request)
return url[:url.find('?')]
def dateparser(first, iter) -> str:
"""
Return parsed time format `%d-%m-%Y` `%H:%M:%S` from `%Y-%m-%d` `%H:%M:%S`+`%z`
"""
try:
time = '%s %s' % (str(iter).split(' ')[0], str(first).split(' ')[1])
time = time.split('+')[0]
time = datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y %H:%M:%S')
return time
except:
return ""
def get_municipality_help_options():
try:
return list(Municipality.objects.all().values_list('pk', flat=True))
except:
return []
def get_order_quantity(item):
'''
Return the quantity of products based on the item['product']['price_type'].
If price_type is 'per_period' -> quantity = total price of product / single unit price of product
otherwise return item['quantity'].
e.g. 2 hour reservation with 10euro per 30min price period, 40 / 10 = 4.
'''
price = item["product"]["price"].replace(',','.')
if Decimal(price) == Decimal('0.00'):
return float(item["quantity"])
if item["product"]["price_type"] == 'per_period':
if item["product"]["type"] != "rent":
'''
This is for product's that have price_type == 'per_period' but type is something other than 'rent'.
The order's quantity is used instead of calculating one based on prices.
'''
return float(item["quantity"])
# Quantity is calculated from the total unit price / single product price.
quantity = float(item["unit_price"].replace(',','.')) / float(item["product"]["price"].replace(',','.'))
if quantity < 1:
'''
If for example the price_period of the product was 1h30min with a price of 9 euros and
the actual reservation is only 30min, then the unit_price would 3 euros.
3 / 9 = ~0.333 so we just return a 1 instead.
'''
return float(1)
return float(quantity)
return float(item["quantity"])
def get_order_tax_price(item):
'''
Returns the correct tax price/amount for this item.
'''
price = item["product"]["price"].replace(',','.')
if Decimal(price) == Decimal('0.00'):
return float(price)
if item["product"]["price_type"] == 'per_period':
if item["product"]["type"] != "rent":
# Use the precalculated tax price if type is not 'rent'
return float(item["reservation_tax_price"])
quantity = float(item["unit_price"].replace(',','.')) / float(item["product"]["price"].replace(',','.'))
if quantity > 1:
return float(item["product"]["tax_price"].replace(',','.'))
return float(item["reservation_tax_price"])
def get_order_pretax_price(item):
'''
Returns the correct tax-free price for this item.
'''
price = item["product"]["price"].replace(',','.')
if Decimal(price) == Decimal('0.00'):
return float(price)
if item["product"]["price_type"] == 'per_period':
quantity = float(item["unit_price"].replace(',','.')) / float(item["product"]["price"].replace(',','.'))
if quantity < 1 or item["product"]["type"] != "rent":
return float(item['reservation_pretax_price'])
return float(item["product"]["pretax_price"].replace(',','.'))
return float(item['reservation_pretax_price'])
def log_entry(instance, user, *, is_edit, message : str):
content_type = ContentType.objects.get_for_model(instance)
LogEntry.objects.log_action(
user.id, content_type.id,
instance.id, repr(instance),
CHANGE if is_edit else ADDITION,
message
)
def get_translated_fields(instance, use_field_name=False):
translated = {}
try:
translation_options = translator.get_options_for_model(instance.__class__)
for field_name in translation_options.fields.keys():
for lang in [x[0] for x in settings.LANGUAGES]:
field = getattr(instance, '%s_%s' % (field_name, lang), None)
if not field:
continue
if not use_field_name:
translated[lang] = field
continue
if field_name not in translated:
translated[field_name] = {}
translated[field_name][lang] = field
return translated
except NotRegistered:
return None
def get_payment_requested_waiting_time(reservation):
'''
Returns the date and time of when a order should be paid by.
Time is calculated by adding order.confirmed_by_staff_at datetime + waiting_time,
after this exact calculation the datetime is rounded down to the nearest hour.
waiting_time is based on the payment_requested_waiting_time value found in
the resource or the resources unit, if neither have this value set then the
env variable RESPA_PAYMENTS_PAYMENT_REQUESTED_WAITING_TIME is used instead.
'''
waiting_time = settings.RESPA_PAYMENTS_PAYMENT_REQUESTED_WAITING_TIME
if getattr(reservation.resource,'payment_requested_waiting_time', None):
waiting_time = reservation.resource.payment_requested_waiting_time
elif getattr(reservation.resource.unit, 'payment_requested_waiting_time', None):
waiting_time = reservation.resource.unit.payment_requested_waiting_time
exact_value = reservation.order.confirmed_by_staff_at + datetime.timedelta(hours=waiting_time)
rounded_value = exact_value.replace(microsecond=0, second=0, minute=0)
return rounded_value.astimezone(reservation.resource.unit.get_tz()).strftime('%d.%m.%Y %H:%M')
| 35.070896 | 128 | 0.637089 |
78db1b5e786e2c54396cafa05922ae1b3d75840c | 1,424 | py | Python | docs/conf.py | nickcafferry/The-working-of-an-acid-buffer | 178ce110020ca64babb0aa4b2fc5689c8b75b352 | [
"MIT"
] | 1 | 2020-03-11T02:56:33.000Z | 2020-03-11T02:56:33.000Z | docs/conf.py | machine-learning-in-materials-sciences/PSSpred | b065dd394794200bbcc69cd1bde8732933bfe989 | [
"MIT"
] | null | null | null | docs/conf.py | machine-learning-in-materials-sciences/PSSpred | b065dd394794200bbcc69cd1bde8732933bfe989 | [
"MIT"
] | 2 | 2020-09-20T13:52:54.000Z | 2022-03-01T01:52:40.000Z | # -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath("../../manim/"))
project = u'PSSpred-Protein Secondary Structure prediction'
copyright = u'- Wei MEI (Nick Cafferry)'
author = u'Wei MEI'
version = '1.0'
release = '1.0'
extensions = [
'sphinx.ext.todo',
'sphinx.ext.githubpages',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx_rtd_theme',
'sphinx.ext.graphviz',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram'
]
autoclass_content = 'both'
mathjax_path = "https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'README'
language = 'english'
html_search_language = 'Chinese'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'remain']
pygments_style = 'default'
html_static_path = ['assets']
html_theme = 'haiku'
html_logo = 'GCC.svg'
html_favicon = 'GCC.svg'
html_theme_options = {
'linkcolor': "red",
"external_links": [
{"url": "https://psspred.readthedocs.io/en/latest/README.html?badge=latest", "name": "PSSpred"}
],
"github_url": "https://github.com/nickcafferry/PSSpred",
}
html_sidebars = {
'**': ['globaltoc.html', 'sourcelink.html', 'searchbox.html'],
'using/windows': ['windowssidebar.html', 'searchbox.html','playground.html'],
}
| 24.982456 | 103 | 0.668539 |
cc1428c9e258d11e7b1c2c2c18887fbc17a2d5cb | 1,918 | py | Python | pybossa_lc/__init__.py | LibCrowds/libcrowds-project-generator | d474ffc713e27bd870f1d3f8fca70cec684411e0 | [
"MIT"
] | 2 | 2018-09-17T13:58:06.000Z | 2020-03-31T15:10:06.000Z | pybossa_lc/__init__.py | LibCrowds/pybossa-lc | d474ffc713e27bd870f1d3f8fca70cec684411e0 | [
"MIT"
] | null | null | null | pybossa_lc/__init__.py | LibCrowds/pybossa-lc | d474ffc713e27bd870f1d3f8fca70cec684411e0 | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
"""Main package for pybossa-lc."""
import os
import json
from flask import current_app as app
from flask.ext.plugins import Plugin
from pybossa.extensions import importer
from . import default_settings
from .extensions import *
from .importers.iiif_enhanced import BulkTaskIIIFEnhancedImporter
__plugin__ = "PyBossaLC"
__version__ = json.load(open(os.path.join(os.path.dirname(__file__),
'info.json')))['version']
class PyBossaLC(Plugin):
"""A PYBOSSA plugin for managing LibCrowds projects."""
def setup(self):
"""Setup plugin."""
self.configure()
self.setup_blueprints()
self.setup_enhanced_iiif_importer()
wa_client.init_app(app)
def configure(self):
"""Load configuration settings."""
settings = [key for key in dir(default_settings) if key.isupper() and
not key.startswith('#')]
for s in settings:
if not app.config.get(s):
app.config[s] = getattr(default_settings, s)
def setup_blueprints(self):
"""Setup blueprints."""
from .api.analysis import BLUEPRINT as analysis
from .api.projects import BLUEPRINT as projects
from .api.categories import BLUEPRINT as categories
from .api.admin import BLUEPRINT as admin
from .api.proxy import BLUEPRINT as proxy
app.register_blueprint(analysis, url_prefix='/lc/analysis')
app.register_blueprint(projects, url_prefix='/lc/projects')
app.register_blueprint(categories, url_prefix='/lc/categories')
app.register_blueprint(admin, url_prefix='/lc/admin')
app.register_blueprint(proxy, url_prefix='/lc/proxy')
def setup_enhanced_iiif_importer(self):
"""Setup the enhanced IIIF manifest importer."""
importer._importers['iiif-enhanced'] = BulkTaskIIIFEnhancedImporter
| 35.518519 | 77 | 0.669447 |
f9c5b547e434f916db97ced95cf30e2a4e197f25 | 425 | py | Python | project/tests/base.py | kshinba/cs501_t1_diagnostic_test | dc762306be50dbdbd3c369c305e65949966fb54f | [
"MIT"
] | null | null | null | project/tests/base.py | kshinba/cs501_t1_diagnostic_test | dc762306be50dbdbd3c369c305e65949966fb54f | [
"MIT"
] | null | null | null | project/tests/base.py | kshinba/cs501_t1_diagnostic_test | dc762306be50dbdbd3c369c305e65949966fb54f | [
"MIT"
] | null | null | null | # project/server/tests/base.py
from flask_testing import TestCase
from project.server import app, db
class BaseTestCase(TestCase):
""" Base Tests """
def create_app(self):
app.config.from_object('project.server.config.TestingConfig')
return app
def setUp(self):
db.create_all()
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
| 17.708333 | 69 | 0.644706 |
d003e61a0d49f132a8be8525c7feaced7b379f39 | 3,984 | py | Python | dgi/code2graph/utils/parse_config.py | konveyor/tackle-data-gravity-insights | 97a3eb6a04a2bca7f7e3422581a8fad055d90c04 | [
"Apache-2.0"
] | 3 | 2022-03-28T20:54:34.000Z | 2022-03-31T15:14:39.000Z | dgi/code2graph/utils/parse_config.py | rofrano/tackle-data-gravity-insights | f734f023dc46ca8e038b5ba8029e5c1177a1d34f | [
"Apache-2.0"
] | 9 | 2022-03-01T13:29:50.000Z | 2022-03-31T13:04:36.000Z | dgi/code2graph/utils/parse_config.py | rofrano/tackle-data-gravity-insights | f734f023dc46ca8e038b5ba8029e5c1177a1d34f | [
"Apache-2.0"
] | 3 | 2022-03-28T14:41:45.000Z | 2022-03-30T19:17:31.000Z | ################################################################################
# Copyright IBM Corporation 2021, 2022
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""A container to hold all configurations.
Config contains methods to read a yaml file and create an object to hold the configration. Once initialized,
we can use the dot notation to access the configurations.
E.g.,
Let's say we have a yaml file called conf.yml
foo:
bar: "bar"
baz:
baza: 1
bazb: False
After reading this, we can use it as follows:
>>> conf = Config("conf.yml")
>>> conf = conf.load_config()
>>> print(conf.bar)
bar
>>> print(conf.baz.baza)
1
>>> print(conf.baz.bazb)
False
"""
import os
import re
import yaml
from typing import Any, Generator
__author__ = "Rahul Krishna"
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Rahul Krishna"
__email__ = "rkrsn@ibm.com"
__status__ = "Research Prototype"
class Config:
def __init__(self, config_file: str = None) -> None:
self.config_file = config_file
self._num_attributes = 0
def __iter__(self):
""" Iterates over all the attributes.
Yields:
Generator[str, Config or dict_like]: Key is the name of the attribute.
Value is either an instance of Config or any value
"""
for attr_name, attr_val in list(self.__dict__.items()):
yield attr_name, attr_val
def get_num_attributes(self) -> int:
""" A getter method for number of attributes
"""
return self._num_attributes
def set_config(self, key: str, val: Any):
""" Set config attribute
Args:
key (str): Name of the config
val (Any): The value
Returns:
Config: A reference back to self
"""
# If the config file is nested, then recurse.
if isinstance(val, dict):
cfg_cls = Config()
for sub_key, sub_val in list(val.items()):
cfg_cls = cfg_cls.set_config(sub_key, sub_val)
val = cfg_cls
self._num_attributes += 1
# If the value has environment variables, replace them with the correct values or the defaults
reg = re.compile("\${[^\}]*}")
if isinstance(val, str) and reg.match(val):
raw_str = re.sub("[${\ }]", "", val)
sub_str = raw_str.split("|")
env_val = sub_str[0]
default = None
if len(sub_str) == 2:
default = sub_str[1]
val = os.getenv(env_val)
if not val:
assert (default), "Enviroment variable {val} not set, and default value is not set. Please set {val}".format(
val=env_val)
val = default
setattr(self, key, val)
return self
def load_config(self):
"""
Read a yaml file with all the configurations and set them.
Parameters
----------
config_file: path_str
Path to the config yaml file
Returns
-------
self: self
A reference to self
"""
with open(self.config_file, 'r') as cfg:
yaml_loader = yaml.load(cfg, Loader=yaml.FullLoader)
for attr_name, attr_val in list(yaml_loader.items()):
self.set_config(attr_name, attr_val)
return self
| 29.080292 | 125 | 0.583333 |
0dfd171041149ee0e262c18bcd6509e1ef2c4d3b | 2,087 | py | Python | app/core/migrations/0001_initial.py | FarbodFarhangfar/recipe-app-api | 40c36ec0f0ccf90177616d76b77be971bde05866 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | FarbodFarhangfar/recipe-app-api | 40c36ec0f0ccf90177616d76b77be971bde05866 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | FarbodFarhangfar/recipe-app-api | 40c36ec0f0ccf90177616d76b77be971bde05866 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.2 on 2022-02-02 23:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False,
help_text='Designates that this user has all permissions without explicitly assigning them.',
verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True,
help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.',
related_name='user_set', related_query_name='user', to='auth.Group',
verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.',
related_name='user_set', related_query_name='user',
to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 52.175 | 160 | 0.524677 |
23b4be2198f22ce21760c3bedf8feb133c29e6e0 | 1,535 | py | Python | azure-mgmt-web/azure/mgmt/web/models/azure_blob_storage_http_logs_config.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-web/azure/mgmt/web/models/azure_blob_storage_http_logs_config.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-web/azure/mgmt/web/models/azure_blob_storage_http_logs_config.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureBlobStorageHttpLogsConfig(Model):
"""Http logs to azure blob storage configuration.
:param sas_url: SAS url to a azure blob container with
read/write/list/delete permissions.
:type sas_url: str
:param retention_in_days: Retention in days.
Remove blobs older than X days.
0 or lower means no retention.
:type retention_in_days: int
:param enabled: True if configuration is enabled, false if it is disabled
and null if configuration is not set.
:type enabled: bool
"""
_attribute_map = {
'sas_url': {'key': 'sasUrl', 'type': 'str'},
'retention_in_days': {'key': 'retentionInDays', 'type': 'int'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(AzureBlobStorageHttpLogsConfig, self).__init__(**kwargs)
self.sas_url = kwargs.get('sas_url', None)
self.retention_in_days = kwargs.get('retention_in_days', None)
self.enabled = kwargs.get('enabled', None)
| 37.439024 | 77 | 0.619544 |
2ca81ae81458d99e807d69f391134e690cb68fda | 7,906 | py | Python | my_classes/.history/Tuples/name_tuples_20210722180527.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/.history/Tuples/name_tuples_20210722180527.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/.history/Tuples/name_tuples_20210722180527.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """ Tuple as Data Structure
We have see how we interpreted tuples as data structures
The position of the object contained in the tuple gives it meaning
For example, we can represent a 2D coordinate as: (10, 20)
x y
If pt is a position tuple, we can retrieve the x and x, y = pt or x = pt[0]
y coordinates using: y = py[1]
For example, to calculate the distance of pt from the origin we could write:
dist = math.sgrt(pt[0] ** 2 + pt[1] ** 2)
Now this is not very readable, and if someone sees this code they will have ti know thatpt[0] mans the x-coordinate and pt[1] means the y-coordinate.
This is not very transparent.
# Using a class instead.
At this point, in order to make things clearer for the reader (not the complier, the reader), we might want to approach this using a class method instead.
"""
from calendar import month
import symbol
class Point2D:
def __init__(self, x, y): # pt = Point2D(10, 20)
self.x = x
self.y = y
class Stock:
def --init__(self, symbol, year, month, day, open, high, low, close):
self.symbol = symbol
self.year = year
self.month = month
self.day = day # Class approach # Tuple Approach
self.open # djia.symbol # djia[0]
self.high # djia.open # djia[4]
self.low = low # djia.close # djia[7]
self. close = close
# djia.high - djia.low # djia[5] - djia[6]
""" Extra stuff
At the very least we shouldimpliment the __eq__ method too
-> Point(10, 20) == Point(10, 20) -> True
"""
class Point2D:
def __init__(self, x, y): # pt = Point2D(10, 20)
self.x = x
self.y = y
def __repr__(self):
return f'Point2D(x={self.x}, y={self.y}'
def __eq_(self, other):
if isinstance(other, Point2D):
return self.x == other.x and self.y == other.y
else:
return False
""" Named Tuples to the rescue
There are other reasons to seek another approach, We cover those in the coding video
Amonst other thing, Point2D objects are mutable - something we may not want!
There's a lot to like using tuples to represent simple data structures
The real drawback is that we have to know what the positions mean, and remember this in our code.
If we ever need to change the structure of our tuple in our code (like inserting a value that we forgot) and most likely our code will break!
Named Tuples to the rescue
Named tuples give meaningful name to positions:
They subclass tuple, and add a layer to assign property names to the positional elements
Located in the collections standard library module
from collections import nametuple
named tuple is a function (not a type) which generates a new class -> class factory
that new class inherits from tuple
but also provides named properties to access elements of the tuple
but an instance of that class is still a tuple
Generating Named Tuple Classes
We have to understand that namedtuple is a class factory
namedtuple needs a few things to generate this class:
the class name we want to use
a sequence of field names (strings) we want to assign, in the order of the elements in the tuple
field names can be any valid variable name
except they cannot start with an underscore
The return value of the call to namedtuple will be a class
We need to assign that class to a variable name in our code so we can use it to construct instances
n general, we use the same name as the name of the class that was generated
Point2D = namedtuple('Point2D', ['x', 'y'])
We can create instances of Point2D just as we would with anny class (since it is a class)
Pt2D = namedTuple('Point2D', ['x', 'y'])
pt = Point2D(19, 20)
Variable:MyClass ---> Class: MyClass 0xFF300 # they point to the same memory object
class MyClass: /
pass /
/
MyClassAlias = MyClass Variable: MyClassAlias
instance_1 = MyClass() instance_2 = MyClassAlias() # instances of the same class
Similarly
Pt2DAlias = namedtuple('Point2D', ['x', 'y'])
| # Points to the same class
Class: )xFF900
Variable: Pt2DAlias ------> Point2D
There are many ways we can provide the list of field names to the namedtuple function
a list of strings -> in fact any sequence, just remember that order matters!
a tuple of strings -> in fact any sequence, just remember that order matters!
a single string with the field names seperated by whitespace or commas
namedtuple( 'Point2D', ['x', 'y'])
namedtuple( 'Point2D', ('x', 'y'))
namedtuple( 'Point2D', 'x, y')
namedtuple( 'Point2D', 'x y')
Instantiating Named Tuples
After we have created a named tuple class, we can instanciate them just like an ordinary class
In fact, the __new__ method of the generated class uses the field names we provided as parm names
Point2D = namedtuple( 'Point2D', ' x y')
Once we have created a class, I can use positional arguments:
pt1 = Point2D(10, 20) 10 -> x 20 -> y
And even keyword arguments:
pt2 = Point2D(x=10, y=20) " "
Accessing Data in a named tuple
Since named tuples are also regular tuples. we can still handle them just like any other tuple
by index
slicing
iterate
Point2D = namedtuple( 'Point2D', 'x y')
pt1 = Point2D(10, 20) isinstance(pt1, tuple) -> True
x, y = pt1 # unpacking
x = pt1[0]
for e in pt1: # iterate
print(e)
But now, in addition, we can alos access the data using the field names:
Point2D = namedtuple( 'Point2D, 'x y')
pt1 = Point2D(10, 20)
pt1.x -> 10
pt1.y -> 20
Since nametuple generated classes inherit from tuple class Point2D(tuple):
...
pt1 is a tuple, and is therefore immutable
pt1.x = 100 will not work!
The rename keyword-only argument for namedtuple
Remember that field names for named tuples must be valid identifiers but cannot start with a underscore
This will NOT work!!! Person = namedtuple( 'Person), 'named age, _ssn')
nametuple has a keywordonly argument,rename (defaults to False) that will automatically rename any invalid field name
uses convention: _{position in list of field names}
This will not work:
Person = namedtuple('Person', 'name age _ssn', rename=True)
And the actual field names would be: name age _2
Introspection
We can easily find out the field names in a named tuple generated class
class property -> _fields
person = namedtuple( 'Person', 'name age _snn', rename=True)
Person._fields -> ('name', 'age', '_2')
Remember that namedtuple is a class factory, i.e. it generates a class
We can actually see what the code for that class is, using the class property _source
"""
Point2D = namedtuple('Point2D', 'x y')
Point2D._source # lots of code omitted
/
/
class Point2D(tuple): /
'Point2D(x, y')' /
/
/
def __new__(_cls, x, y):
'Create new instance of Point2D(x, y)'
return _tuple.__new__(_cls, (x, y))
def __repr__(self):
'Return a nicely formated represent'
| 32.138211 | 154 | 0.612446 |
443f46ad4b4e1e32d9a4a70177bbd0c0c23d42ba | 14,799 | py | Python | ndn_hydra/repo/main/main_loop.py | UCLA-IRL/ndn-hydra | e01829253876ff6dbfd6dfaa92142eca08cd6705 | [
"Apache-2.0"
] | 2 | 2021-04-14T04:12:14.000Z | 2021-04-23T02:28:49.000Z | ndn_hydra/repo/main/main_loop.py | ZixuanZhong/hydra | e01829253876ff6dbfd6dfaa92142eca08cd6705 | [
"Apache-2.0"
] | 10 | 2021-07-03T18:29:26.000Z | 2022-02-01T04:11:20.000Z | ndn_hydra/repo/main/main_loop.py | ZixuanZhong/ndn-distributed-repo | e01829253876ff6dbfd6dfaa92142eca08cd6705 | [
"Apache-2.0"
] | null | null | null | # ----------------------------------------------------------
# NDN Hydra MainLoop
# ----------------------------------------------------------
# @Project: NDN Hydra
# @Date: 2021-01-25
# @Authors: Please check AUTHORS.rst
# @Source-Code: https://github.com/UCLA-IRL/ndn-hydra
# @Documentation: https://ndn-hydra.readthedocs.io/
# @Pip-Library: https://pypi.org/project/ndn-hydra/
# ----------------------------------------------------------
import asyncio as aio
import logging
import secrets
import time
import random
from typing import Dict, List
from ndn.app import NDNApp
from ndn.encoding import Name, Component
from ndn.types import InterestNack, InterestTimeout
from ndn.svs import SVSync
from ndn_python_repo import Storage, SqliteStorage
from ndn_hydra.repo.global_view.global_view import GlobalView
from ndn_hydra.repo.repo_messages import *
from ndn_hydra.repo.utils.concurrent_fetcher import concurrent_fetcher
class MainLoop:
def __init__(self, app: NDNApp, config: Dict, global_view: GlobalView, data_storage: Storage):
self.app = app
self.config = config
self.global_view = global_view
self.data_storage = data_storage
self.svs_storage = SqliteStorage(self.config['svs_storage_path'])
self.svs = None
self.expire_at = 0
self.logger = logging.getLogger()
# the main coroutine
async def start(self):
self.svs = SVSync(self.app, Name.normalize(self.config['repo_prefix'] + "/group"), Name.normalize(self.config['session_id']), self.svs_missing_callback, storage=self.svs_storage)
while True:
await aio.sleep(self.config['period'])
self.periodic()
# def __init__(self, app:NDNApp, svs_storage:Storage, session_id:str, node_name:str, svs_cache_others:bool, global_view: GlobalView, config):
# self.app = app
# self.svs_storage = svs_storage
# self.svs_group_prefix = repo_prefix + "/group"
# self.svs_node_id = session_id
# self.repo_node_name = node_name
# self.svs_cache_others = svs_cache_others
# self.svs = None
# self.global_view = global_view
# self.config = config
# self.expire_at = 0
def heartbeat(self):
# TODO: skip if expire_at value is big enough
# hb tlv
expire_at = int(time.time()+(self.config['period']*2))
self.expire_at = expire_at
favor = 1.85
heartbeat_message_body = HeartbeatMessageBodyTlv()
heartbeat_message_body.session_id = self.config['session_id'].encode()
heartbeat_message_body.node_name = self.config['node_name'].encode()
heartbeat_message_body.expire_at = expire_at
heartbeat_message_body.favor = str(favor).encode()
# hb msg
heartbeat_message = MessageTlv()
heartbeat_message.header = MessageTypes.HEARTBEAT
heartbeat_message.body = heartbeat_message_body.encode()
# heartbeat_message_body = HeartbeatMessageBody(self.svs_node_id, self.state_vector, heartbeat_message_body.encode())
# print("state_vector: {0}".format(self.svs.getCore().getStateVector().to_str()))
try:
next_state_vector = self.svs.getCore().getStateTable().getSeqNum(Name.to_str(Name.from_str(self.config['session_id']))) + 1
except TypeError:
next_state_vector = 0
self.global_view.update_session(self.config['session_id'], self.config['node_name'], expire_at, favor, next_state_vector)
self.svs.publishData(heartbeat_message.encode())
def detect_expired_sessions(self):
deadline = int(time.time()) - (self.config['period'])
expired_sessions = self.global_view.get_sessions_expired_by(deadline)
for expired_session in expired_sessions:
# generate expire msg and send
# expire tlv
expire_at = int(time.time()+(self.config['period']*2))
favor = 1.85
expire_message_body = ExpireMessageBodyTlv()
expire_message_body.session_id = self.config['session_id'].encode()
expire_message_body.node_name = self.config['node_name'].encode()
expire_message_body.expire_at = expire_at
expire_message_body.favor = str(favor).encode()
expire_message_body.expired_session_id = expired_session['id'].encode()
# expire msg
expire_message = MessageTlv()
expire_message.header = MessageTypes.EXPIRE
expire_message.body = expire_message_body.encode()
# apply globalview and send msg thru SVS
self.global_view.expire_session(expired_session['id'])
self.svs.publishData(expire_message.encode())
val = "[MSG][EXPIRE]* sid={sid};exp_sid={esid}".format(
sid=self.config['session_id'],
esid=expired_session['id']
)
self.logger.info(val)
# am I at the top of any insertion's backup list?
underreplicated_insertions = self.global_view.get_underreplicated_insertions()
for underreplicated_insertion in underreplicated_insertions:
deficit = underreplicated_insertion['desired_copies'] - len(underreplicated_insertion['stored_bys'])
for backuped_by in underreplicated_insertion['backuped_bys']:
if (backuped_by['session_id'] == self.config['session_id']) and (backuped_by['rank'] < deficit):
self.fetch_file(underreplicated_insertion['id'], underreplicated_insertion['file_name'], underreplicated_insertion['packets'], underreplicated_insertion['digests'], underreplicated_insertion['fetch_path'])
def claim(self):
# TODO: possibility based on # active sessions and period
# if random.random() < 0.618:
# return
backupable_insertions = self.global_view.get_backupable_insertions()
for backupable_insertion in backupable_insertions:
if random.random() < 0.618:
continue
# print(json.dumps(backupable_insertion['stored_bys']))
# print(json.dumps(backupable_insertion['backuped_bys']))
already_in = False
for stored_by in backupable_insertion['stored_bys']:
if stored_by == self.config['session_id']:
already_in = True
break
for backuped_by in backupable_insertion['backuped_bys']:
if backuped_by['session_id'] == self.config['session_id']:
already_in = True
break
if already_in == True:
continue
if len(backupable_insertion['backuped_bys']) == 0 and len(backupable_insertion['stored_bys']) == 0:
continue
authorizer = None
if len(backupable_insertion['backuped_bys']) == 0:
authorizer = {
'session_id': backupable_insertion['stored_bys'][-1],
'rank': -1,
'nonce': backupable_insertion['id']
}
else:
authorizer = backupable_insertion['backuped_bys'][-1]
# generate claim (request) msg and send
# claim tlv
expire_at = int(time.time()+(self.config['period']*2))
favor = 1.85
claim_message_body = ClaimMessageBodyTlv()
claim_message_body.session_id = self.config['session_id'].encode()
claim_message_body.node_name = self.config['node_name'].encode()
claim_message_body.expire_at = expire_at
claim_message_body.favor = str(favor).encode()
claim_message_body.insertion_id = backupable_insertion['id'].encode()
claim_message_body.type = ClaimMessageTypes.REQUEST
claim_message_body.claimer_session_id = self.config['session_id'].encode()
claim_message_body.claimer_nonce = secrets.token_hex(4).encode()
claim_message_body.authorizer_session_id = authorizer['session_id'].encode()
claim_message_body.authorizer_nonce = authorizer['nonce'].encode()
# claim msg
claim_message = MessageTlv()
claim_message.header = MessageTypes.CLAIM
claim_message.body = claim_message_body.encode()
self.svs.publishData(claim_message.encode())
val = "[MSG][CLAIM.R]*sid={sid};iid={iid}".format(
sid=self.config['session_id'],
iid=backupable_insertion['id']
)
self.logger.info(val)
def periodic(self):
# print('periodic')
# periodic tasks:
self.heartbeat()
self.detect_expired_sessions()
self.claim()
# self.store()
# sessions = self.global_view.get_sessions()
# insertions = self.global_view.get_insertions()
# for insertion in insertions:
# on = ""
# for stored_by in insertion['stored_bys']:
# on = on + stored_by + ","
# bck = ""
# for backuped_by in insertion['backuped_bys']:
# bck = bck + backuped_by['session_id'] + ","
# val = '[GV] iid={iid}; name={name}; on={on}; bck={bck}'.format(
# iid=insertion['id'],
# name=insertion['file_name'],
# on=on,
# bck=bck
# )
# self.logger.info(val)
# print("--")
def store(self, insertion_id: str):
insertion = self.global_view.get_insertion(insertion_id)
if len(insertion['stored_bys']) < insertion['desired_copies']:
# store msg
expire_at = int(time.time()+(self.config['period']*2))
favor = 1.85
store_message_body = StoreMessageBodyTlv()
store_message_body.session_id = self.config['session_id'].encode()
store_message_body.node_name = self.config['node_name'].encode()
store_message_body.expire_at = expire_at
store_message_body.favor = str(favor).encode()
store_message_body.insertion_id = insertion_id.encode()
# store msg
store_message = MessageTlv()
store_message.header = MessageTypes.STORE
store_message.body = store_message_body.encode()
# apply globalview and send msg thru SVS
# next_state_vector = svs.getCore().getStateVector().get(config['session_id']) + 1
self.global_view.store_file(insertion_id, self.config['session_id'])
self.svs.publishData(store_message.encode())
val = "[MSG][STORE]* sid={sid};iid={iid}".format(
sid=self.config['session_id'],
iid=insertion_id
)
self.logger.info(val)
def svs_missing_callback(self, missing_list):
aio.ensure_future(self.on_missing_svs_messages(missing_list))
async def on_missing_svs_messages(self, missing_list):
for i in missing_list:
while i.lowSeqNum <= i.highSeqNum:
# print('{}:{}, {}'.format(i.nid, i.lowSeqNum, i.highSeqNum))
message_bytes = await self.svs.fetchData(Name.from_str(i.nid), i.lowSeqNum)
if message_bytes == None:
continue
nid = i.nid
seq = i.lowSeqNum
message = Message(nid, seq, message_bytes)
message_body = message.get_message_body()
aio.ensure_future(message_body.apply(self.global_view, self.fetch_file, self.svs, self.config))
# print('fetched GM {}:{}'.format(nid, seq))
i.lowSeqNum = i.lowSeqNum + 1
def svs_sending_callback(self, expire_at: int):
self.expire_at = expire_at
def fetch_file(self, insertion_id: str, file_name: str, packets: int, digests: List[bytes], fetch_path: str):
val = "[ACT][FETCH]* iid={iid};file_name={file_name};pcks={packets};fetch_path={fetch_path}".format(
iid=insertion_id,
file_name=file_name,
packets=packets,
fetch_path=fetch_path
)
self.logger.info(val)
aio.ensure_future(self.async_fetch(insertion_id, file_name, packets, digests, fetch_path))
async def async_fetch(self, insertion_id: str, file_name: str, packets: int, digests: List[bytes], fetch_path: str):
self.logger.debug(packets)
if packets > 1:
start = time.time()
inserted_packets = await self.fetch_segmented_file(file_name, packets, fetch_path)
if inserted_packets == packets:
end = time.time()
duration = end -start
val = "[ACT][FETCHED]*pcks={packets};duration={duration}".format(
packets=packets,
duration=duration
)
self.logger.info(val)
self.store(insertion_id)
elif packets == 1:
inserted_packets = await self.fetch_single_file(file_name, fetch_path)
if inserted_packets == packets:
self.store(insertion_id)
async def fetch_segmented_file(self, file_name: str, packets: int, fetch_path: str):
semaphore = aio.Semaphore(10)
fetched_segments = 0
async for (_, _, content, data_bytes, key) in concurrent_fetcher(self.app, fetch_path, file_name, 0, packets-1, semaphore):
#TODO: check digest
# print("segment:")
# print(Name.to_str(key))
# print(type(content))
# print(content)
# print(type(data_bytes))
# print(data_bytes)
self.data_storage.put_data_packet(key, data_bytes)
# self.data_storage.put_data_packet(key, content.tobytes())
fetched_segments += 1
return fetched_segments
async def fetch_single_file(self, file_name: str, fetch_path: str):
int_name = int_name = Name.normalize(fetch_path) + [Component.from_segment(0)]
key = Name.normalize(file_name) + [Component.from_segment(0)]
try:
data_name, _, _, data_bytes = await self.app.express_interest(
int_name, need_raw_packet=True, can_be_prefix=False, lifetime=1000)
except InterestNack as e:
return 0
except InterestTimeout:
return 0
self.data_storage.put_data_packet(key, data_bytes)
return 1 | 47.585209 | 226 | 0.599568 |
7563471959abcbe24ea3e35d55d01da203058dec | 437 | gyp | Python | binding.gyp | phi16/node-sensel | 8cfb01cb8667398711658fe42bb10c69e433db5d | [
"MIT"
] | null | null | null | binding.gyp | phi16/node-sensel | 8cfb01cb8667398711658fe42bb10c69e433db5d | [
"MIT"
] | null | null | null | binding.gyp | phi16/node-sensel | 8cfb01cb8667398711658fe42bb10c69e433db5d | [
"MIT"
] | null | null | null | {
"targets": [
{
"target_name": "sensel",
"sources": [
"sensel-lib.cc"
],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")",
"<(module_root_dir)/include"
],
"libraries": [
"<(module_root_dir)/lib/LibSensel.lib",
"<(module_root_dir)/lib/LibSenselDecompress.lib"
],
"defines": [ "NAPI_DISABLE_CPP_EXCEPTIONS" ]
}
]
}
| 21.85 | 61 | 0.503432 |
ad4c384791e27664797922c4b06e9473736d8d1a | 12,017 | py | Python | nse.py | shivaneeshinde/nsetools | ce70d42e568e2b90932235c4fb4b46a2a2c35dc9 | [
"MIT"
] | 8 | 2017-12-24T17:02:08.000Z | 2021-11-08T23:42:17.000Z | nse.py | shivaneeshinde/nsetools | ce70d42e568e2b90932235c4fb4b46a2a2c35dc9 | [
"MIT"
] | null | null | null | nse.py | shivaneeshinde/nsetools | ce70d42e568e2b90932235c4fb4b46a2a2c35dc9 | [
"MIT"
] | 11 | 2017-07-09T05:47:35.000Z | 2021-10-16T20:30:57.000Z | """
The MIT License (MIT)
Copyright (c) 2014 Vivek Jha
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import six
# import paths differ in python 2 and python 3
if six.PY2:
from urllib2 import build_opener, HTTPCookieProcessor, Request
from urllib import urlencode
from cookielib import CookieJar
elif six.PY3:
from urllib.request import build_opener, HTTPCookieProcessor, Request
from urllib.parse import urlencode
from http.cookiejar import CookieJar
import ast
import re
import json
from nsetools.bases import AbstractBaseExchange
from nsetools.utils import byte_adaptor
from nsetools.utils import js_adaptor
class Nse(AbstractBaseExchange):
"""
class which implements all the functionality for
National Stock Exchange
"""
__CODECACHE__ = None
def __init__(self):
self.opener = self.nse_opener()
self.headers = self.nse_headers()
# URL list
self.get_quote_url = 'http://nseindia.com/live_market/dynaContent/live_watch/get_quote/GetQuote.jsp?'
self.stocks_csv_url = 'http://www.nseindia.com/content/equities/EQUITY_L.csv'
self.top_gainer_url = 'http://www.nseindia.com/live_market/dynaContent/live_analysis/gainers/niftyGainers1.json'
self.top_loser_url = 'http://www.nseindia.com/live_market/dynaContent/live_analysis/losers/niftyLosers1.json'
self.advances_declines_url = 'http://www.nseindia.com/common/json/indicesAdvanceDeclines.json'
self.index_url="http://www.nseindia.com/homepage/Indices1.json"
def get_stock_codes(self, cached=True, as_json=False):
"""
returns a dictionary with key as stock code and value as stock name.
It also implements cache functionality and hits the server only
if user insists or cache is empty
:return: dict
"""
url = self.stocks_csv_url
req = Request(url, None, self.headers)
res_dict = {}
if cached is not True or self.__CODECACHE__ is None:
# raises HTTPError and URLError
res = self.opener.open(req)
if res is not None:
# for py3 compat covert byte file like object to
# string file like object
res = byte_adaptor(res)
for line in res.read().split('\n'):
if line != '' and re.search(',', line):
(code, name) = line.split(',')[0:2]
res_dict[code] = name
# else just skip the evaluation, line may not be a valid csv
else:
raise Exception('no response received')
self.__CODECACHE__ = res_dict
return self.render_response(self.__CODECACHE__, as_json)
def is_valid_code(self, code):
"""
:param code: a string stock code
:return: Boolean
"""
if code:
stock_codes = self.get_stock_codes()
if code.upper() in stock_codes.keys():
return True
else:
return False
def get_quote(self, code, as_json=False):
"""
gets the quote for a given stock code
:param code:
:return: dict or None
:raises: HTTPError, URLError
"""
if self.is_valid_code(code):
url = self.build_url_for_quote(code)
req = Request(url, None, self.headers)
# this can raise HTTPError and URLError, but we are not handling it
# north bound APIs should use it for exception handling
res = self.opener.open(req)
# for py3 compat covert byte file like object to
# string file like object
res = byte_adaptor(res)
# Now parse the response to get the relevant data
match = re.search(\
r'\{<div\s+id="responseDiv"\s+style="display:none">\s+(\{.*?\{.*?\}.*?\})',
res.read(), re.S
)
# ast can raise SyntaxError, let's catch only this error
try:
buffer = match.group(1)
buffer = js_adaptor(buffer)
response = self.clean_server_response(ast.literal_eval(buffer)['data'][0])
except SyntaxError as err:
raise Exception('ill formatted response')
else:
return self.render_response(response, as_json)
else:
return None
def get_top_gainers(self, as_json=False):
"""
:return: a list of dictionaries containing top gainers of the day
"""
url = self.top_gainer_url
req = Request(url, None, self.headers)
# this can raise HTTPError and URLError
res = self.opener.open(req)
# for py3 compat covert byte file like object to
# string file like object
res = byte_adaptor(res)
res_dict = json.load(res)
# clean the output and make appropriate type conversions
res_list = [self.clean_server_response(item) for item in res_dict['data']]
return self.render_response(res_list, as_json)
def get_top_losers(self, as_json=False):
"""
:return: a list of dictionaries containing top losers of the day
"""
url = self.top_loser_url
req = Request(url, None, self.headers)
# this can raise HTTPError and URLError
res = self.opener.open(req)
# for py3 compat covert byte file like object to
# string file like object
res = byte_adaptor(res)
res_dict = json.load(res)
# clean the output and make appropriate type conversions
res_list = [self.clean_server_response(item)
for item in res_dict['data']]
return self.render_response(res_list, as_json)
def get_advances_declines(self, as_json=False):
"""
:return: a list of dictionaries with advance decline data
:raises: URLError, HTTPError
"""
url = self.advances_declines_url
req = Request(url, None, self.headers)
# raises URLError or HTTPError
resp = self.opener.open(req)
# for py3 compat covert byte file like object to
# string file like object
resp = byte_adaptor(resp)
resp_dict = json.load(resp)
resp_list = [self.clean_server_response(item)
for item in resp_dict['data']]
return self.render_response(resp_list, as_json)
def get_index_list(self, as_json=False):
""" get list of indices and codes
params:
as_json: True | False
returns: a list | json of index codes
"""
url = self.index_url
req = Request(url, None, self.headers)
# raises URLError or HTTPError
resp = self.opener.open(req)
resp = byte_adaptor(resp)
resp_list = json.load(resp)['data']
index_list = [str(item['name']) for item in resp_list]
return self.render_response(index_list, as_json)
def is_valid_index(self, code):
"""
returns: True | Flase , based on whether code is valid
"""
index_list = self.get_index_list()
return True if code.upper() in index_list else False
def get_index_quote(self, code, as_json=False):
"""
params:
code : string index code
as_json: True|False
returns:
a dict | json quote for the given index
"""
url = self.index_url
if self.is_valid_index(code):
req = Request(url, None, self.headers)
# raises HTTPError and URLError
resp = self.opener.open(req)
resp = byte_adaptor(resp)
resp_list = json.load(resp)['data']
# this is list of dictionaries
resp_list = [self.clean_server_response(item)
for item in resp_list]
# search the right list element to return
search_flag = False
for item in resp_list:
if item['name'] == code.upper():
search_flag = True
break
return self.render_response(item, as_json) if search_flag else None
def nse_headers(self):
"""
Builds right set of headers for requesting http://nseindia.com
:return: a dict with http headers
"""
return {'Accept' : '*/*',
'Accept-Language' : 'en-US,en;q=0.5',
'Host': 'nseindia.com',
'Referer': 'http://nseindia.com/live_market/dynaContent/live_watch/get_quote/GetQuote.jsp?symbol=INFY&illiquid=0',
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0',
'X-Requested-With': 'XMLHttpRequest'
}
def nse_opener(self):
"""
builds opener for urllib2
:return: opener object
"""
cj = CookieJar()
return build_opener(HTTPCookieProcessor(cj))
def build_url_for_quote(self, code):
"""
builds a url which can be requested for a given stock code
:param code: string containing stock code.
:return: a url object
"""
if code is not None and type(code) is str:
encoded_args = urlencode({'symbol':code, 'illiquid':'0'})
return self.get_quote_url + encoded_args
else:
raise Exception('code must be string')
def clean_server_response(self, resp_dict):
"""cleans the server reponse by replacing:
'-' -> None
'1,000' -> 1000
:param resp_dict:
:return: dict with all above substitution
"""
# change all the keys from unicode to string
d = {}
for key, value in resp_dict.items():
d[str(key)] = value
resp_dict = d
for key, value in resp_dict.items():
if type(value) is str or isinstance(value, six.string_types):
if re.match('-', value):
resp_dict[key] = None
elif re.search(r'^[0-9,.]+$', value):
# replace , to '', and type cast to int
resp_dict[key] = float(re.sub(',', '', value))
else:
resp_dict[key] = str(value)
return resp_dict
def render_response(self, data, as_json=False):
if as_json is True:
return json.dumps(data)
else:
return data
def __str__(self):
"""
string representation of object
:return: string
"""
return 'Driver Class for National Stock Exchange (NSE)'
# TODO: get_most_active()
# TODO: get_top_volume()
# TODO: get_peer_companies()
# TODO: is_market_open()
# TODO: concept of portfolio for fetching price in a batch and field which should be captured
# TODO: Concept of session, just like as in sqlalchemy
| 38.639871 | 130 | 0.603645 |
1779eda1be035d1de8757cfa2b9b91afd8dc9798 | 1,198 | py | Python | setup.py | dillonalaird/shared_numpy | 526f69009d75fdd914ded8a0e7017ca277a792f3 | [
"MIT"
] | 29 | 2020-03-08T06:18:03.000Z | 2022-02-28T12:41:15.000Z | setup.py | dillonalaird/shared_numpy | 526f69009d75fdd914ded8a0e7017ca277a792f3 | [
"MIT"
] | 4 | 2020-09-04T07:24:54.000Z | 2021-08-28T03:38:30.000Z | setup.py | dillonalaird/shared_numpy | 526f69009d75fdd914ded8a0e7017ca277a792f3 | [
"MIT"
] | 8 | 2020-03-10T02:39:23.000Z | 2021-12-02T09:00:39.000Z | from distutils.core import setup, Extension
import sys
import platform
import subprocess
if sys.version_info[0] == 3 and sys.version_info[1] == 6:
subprocess.run(["python", "shared_numpy/py36_clinic.py", "shared_numpy/posixshmem.c"])
elif sys.version_info[0] == 3 and sys.version_info[1] == 7:
subprocess.run(["python", "shared_numpy/py37_clinic.py", "shared_numpy/posixshmem.c"])
else:
raise ValueError("Must run on Python 3.6 or 3.7")
linux_module = Extension(
"shared_numpy/_posixshmem",
define_macros=[
("HAVE_SHM_OPEN", "1"),
("HAVE_SHM_UNLINK", "1"),
("HAVE_SHM_MMAN_H", 1),
],
libraries=["rt"],
sources=["shared_numpy/posixshmem.c"],
)
darwin_module = Extension(
"shared_numpy/_posixshmem",
define_macros=[
("HAVE_SHM_OPEN", "1"),
("HAVE_SHM_UNLINK", "1"),
("HAVE_SHM_MMAN_H", 1),
],
sources=["shared_numpy/posixshmem.c"],
)
setup(
name="shared-numpy",
version="1.1.1",
description="Shared Numpy",
py_modules=["shared_numpy"],
ext_modules=[linux_module]
if platform.system() == "Linux"
else [darwin_module]
if platform.system() == "Darwin"
else [],
)
| 24.44898 | 90 | 0.641903 |
de0eb18fb437338f25ecf9412ce1d387864a7062 | 3,621 | py | Python | webSocket/webSocketApp.py | colaboradorDiego/socketsForDummies | 38d2d9c68edc41ba2b289b299f7db62d07e57904 | [
"Apache-2.0"
] | null | null | null | webSocket/webSocketApp.py | colaboradorDiego/socketsForDummies | 38d2d9c68edc41ba2b289b299f7db62d07e57904 | [
"Apache-2.0"
] | null | null | null | webSocket/webSocketApp.py | colaboradorDiego/socketsForDummies | 38d2d9c68edc41ba2b289b299f7db62d07e57904 | [
"Apache-2.0"
] | null | null | null | import websocket
import threading
import time
import sys
import os
import json
# Generalmente leemos un json para configurar nuestro socket
class Configurador:
pathToConfFile = ''
connDATA = ''
def __init__(self, path, connDATA):
self.pathToConfFile=path
self.connDATA=connDATA
self.leer()
def getConf(self):
return self.connDATA
def leer(self):
# loads JSON from a file.
if self.connDATA=='':
parametros = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', self.pathToConfFile))
with open(parametros, 'r') as f:
self.connDATA = json.load(f)
def mostratConf(self):
print('connDATA', self.connDATA)
print()
print('Setup from json conf file')
print('Loading connection parameters')
print()
print("User credentials & Stuff")
print('... Login.host', self.connDATA['login']['host'])
print('... Login.user', self.connDATA['login']['user'])
print('... Login.pass', self.connDATA['login']['pass'])
print()
print("API credentials & Stuff")
print('... API.name', self.connDATA['api']['name'])
print('... API.user', self.connDATA['api']['user'])
print('... API.pass', self.connDATA['api']['pass'])
print('... API.tokenUrl', self.connDATA['api']['tokenUrl'])
print()
print("Stomper data")
print('... Stomp.service', self.connDATA['stomp']['service'])
print('... Stomp.port', self.connDATA['stomp']['port'])
print()
print("Endpoints")
for data in self.connDATA['endpoint']:
print('... Enpoints.' + data, self.connDATA['endpoint'][data])
print()
print()
# En este caso es una global class que esta disponible para toda la app
config = Configurador('appOMSApi.ini', '')
def on_message(ws, message):
print('on_message:', message)
print()
def on_error(ws, err):
print('on_error:', err)
print()
def on_close(ws):
print('on_close:', 'Connection Closed!')
def on_open(ws):
print('on_open:', 'Connection opened!')
# enviarMsg running on secondary thread
def enviarMsg(arg):
for i in range(arg):
time.sleep(1)
ws.send("Hello %d" % i)
time.sleep(1)
x = threading.Thread(target=enviarMsg, args=(3,))
x.start()
# es recomendable tener a conectar como una funcion para que en caso de desconexion
# la podamos voler a llamar
def conectar():
ws = websocket.WebSocketApp("ws://localhost:8765",
on_message=on_message,
on_error=on_error,
on_close=on_close)
# bound method
ws.on_open = on_open
return ws
def main(argv):
# websocket.enableTrace(True)
ws = conectar()
"""
Aqui es donde precisas entender q es un thread
There's always going to be a thread dedicated to listening to the socket.
In this case the main thread that enters a loop inside the run_forever waiting for messages.
If you want to have some other thing going ON you'll need another thread, here, enviarMsg.
"""
try:
# https://www.kite.com/python/docs/websocket.WebSocketApp.run_forever
# run_forever running on main thread
ws.run_forever()
print('por aca solo pasa cuando salimos del loop infinito')
except KeyboardInterrupt:
pass
finally:
ws.close()
def init():
if __name__ == '__main__':
sys.exit(main(sys.argv))
init()
| 26.822222 | 114 | 0.597901 |
e2592c2a483bbf75b5f4fa247c1ce72c255a72e1 | 1,055 | py | Python | lists/okfn/bin/alpha3.py | openregister/countries | b335b268ea06dbe442fdf5cd6ee6c7c86cabf27f | [
"MIT"
] | 2 | 2016-05-26T10:06:07.000Z | 2016-06-02T05:17:06.000Z | lists/okfn/bin/alpha3.py | openregister/countries | b335b268ea06dbe442fdf5cd6ee6c7c86cabf27f | [
"MIT"
] | 5 | 2016-02-05T13:05:33.000Z | 2018-02-23T11:02:35.000Z | lists/okfn/bin/alpha3.py | openregister/countries | b335b268ea06dbe442fdf5cd6ee6c7c86cabf27f | [
"MIT"
] | 4 | 2016-02-08T09:26:28.000Z | 2021-04-11T08:26:20.000Z | #!/usr/bin/env python3
import sys
import csv
# name,official_name_en,official_name_fr,ISO3166-1-Alpha-2,ISO3166-1-Alpha-3,ISO3166-1-numeric,ITU,MARC,WMO,DS,Dial,FIFA,FIPS,GAUL,IOC,ISO4217-currency_alphabetic_code,ISO4217-currency_country_name,ISO4217-currency_minor_unit,ISO4217-currency_name,ISO4217-currency_numeric_code,is_independent,Capital,Continent,TLD,Languages,geonameid,EDGAR
alpha2 = {}
for row in csv.DictReader(open('../../data/country/countries.tsv'), delimiter='\t'):
alpha2[row['country']] = "country:" + row['country']
for row in csv.DictReader(open('../../data/territory/territories.tsv'), delimiter='\t'):
alpha2[row['territory']] = "territory:" + row['territory']
fields = ['alpha3', 'world-location']
print("\t".join(fields))
for row in csv.DictReader(sys.stdin):
row['alpha3'] = row['ISO3166-1-Alpha-3']
if row['alpha3']:
if row['ISO3166-1-Alpha-2'] in alpha2:
row['world-location'] = alpha2[row['ISO3166-1-Alpha-2']]
print("\t".join([row.get(field, '') for field in fields]))
| 35.166667 | 340 | 0.70237 |
b2abc7815460d7fe783801cd6ce2e330f00c7af7 | 879 | py | Python | bot/plugins/stats/migrations/0011_download.py | sergei-maertens/discord-bot | 29b8daa8c75a8a9ee292fc4f95eaba5f8a16850e | [
"MIT"
] | 45 | 2015-12-29T23:14:43.000Z | 2021-05-29T14:49:54.000Z | bot/plugins/stats/migrations/0011_download.py | sergei-maertens/discord-bot | 29b8daa8c75a8a9ee292fc4f95eaba5f8a16850e | [
"MIT"
] | 28 | 2016-02-16T21:34:15.000Z | 2022-02-10T10:40:36.000Z | bot/plugins/stats/migrations/0011_download.py | sergei-maertens/discord-bot | 29b8daa8c75a8a9ee292fc4f95eaba5f8a16850e | [
"MIT"
] | 47 | 2016-01-06T20:42:02.000Z | 2021-03-10T02:19:35.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-06 20:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stats', '0010_auto_20161006_1942'),
]
operations = [
migrations.CreateModel(
name='Download',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('file', models.FileField(upload_to='downloads/%Y/%m/')),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'download',
'verbose_name_plural': 'downloads',
},
),
]
| 30.310345 | 114 | 0.567691 |
131d958ea37e1023c674ef329a97675708164f45 | 1,349 | py | Python | 07_gashlycrumb/gashlycrumb.py | Fleid/tiny_python_projects | 6d6c3bc397631758c42b2e93c41a0179120e8eab | [
"MIT"
] | null | null | null | 07_gashlycrumb/gashlycrumb.py | Fleid/tiny_python_projects | 6d6c3bc397631758c42b2e93c41a0179120e8eab | [
"MIT"
] | null | null | null | 07_gashlycrumb/gashlycrumb.py | Fleid/tiny_python_projects | 6d6c3bc397631758c42b2e93c41a0179120e8eab | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Author : fleide <fleide@localhost>
Date : 2021-01-27
Purpose: Working with directories
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Gashly Crumbing',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('letter',
metavar='letter',
nargs='+', # 1 or more
help='Letter(s)')
parser.add_argument('-f',
'--file',
help='A readable dictionary file',
metavar='FILE',
type=argparse.FileType('rt'),
default="gashlycrumb.txt")
return parser.parse_args()
# --------------------------------------------------
def main():
"""Gashly Crumbing the Crumbs"""
args = get_args()
letters = args.letter
dictionary_source = args.file
dictionary = {}
for line in dictionary_source:
dictionary[line[0].upper()] = line.rstrip()
for letter in letters:
print(dictionary.get(letter.upper(),'I do not know \"'+letter+'\".'))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 24.981481 | 77 | 0.487769 |
dc8c731978959fffa6b2c34764789a1d9461bfb3 | 4,238 | py | Python | tests/test_endpoints_init_on_bp_single.py | jekel/sanic-jwt | 4f6ab07376ad90011d40e205feacae8219359d6f | [
"MIT"
] | 226 | 2017-09-05T08:23:58.000Z | 2022-03-28T09:23:47.000Z | tests/test_endpoints_init_on_bp_single.py | jekel/sanic-jwt | 4f6ab07376ad90011d40e205feacae8219359d6f | [
"MIT"
] | 179 | 2017-09-27T08:33:16.000Z | 2022-01-28T20:35:23.000Z | tests/test_endpoints_init_on_bp_single.py | jekel/sanic-jwt | 4f6ab07376ad90011d40e205feacae8219359d6f | [
"MIT"
] | 45 | 2017-10-14T10:26:46.000Z | 2022-02-04T15:01:20.000Z | from sanic import Sanic
from sanic.blueprints import Blueprint
from sanic.response import json
from sanic_jwt import Authentication, Initialize, protected, scoped
blueprint = Blueprint("Test")
cache = {}
@blueprint.get("/", strict_slashes=True)
@protected(blueprint)
def protected_hello_world(request):
return json({"message": "hello world"})
@blueprint.get("/user/<id>", strict_slashes=True)
@protected(blueprint)
def protected_user(request, id):
return json({"user": id})
@blueprint.route("/scoped_empty")
@scoped("something", initialized_on=blueprint)
async def scoped(request):
return json({"scoped": True})
class MyAuthentication(Authentication):
async def authenticate(self, request, *args, **kwargs):
return {"user_id": 1}
async def store_refresh_token(
self, user_id, refresh_token, *args, **kwargs
):
key = "refresh_token_{user_id}".format(user_id=user_id)
cache[key] = refresh_token
async def retrieve_refresh_token(self, user_id, *args, **kwargs):
key = "refresh_token_{user_id}".format(user_id=user_id)
token = cache.get(key, None)
return token
async def retrieve_user(self, request, payload, *args, **kwargs):
return {"user_id": 1}
app = Sanic("sanic-jwt-test")
sanicjwt = Initialize(
blueprint,
app=app,
authentication_class=MyAuthentication,
refresh_token_enabled=True,
)
app.blueprint(blueprint, url_prefix="/test")
def test_protected_blueprint():
_, response = app.test_client.get("/test/")
assert response.status == 401
assert response.json.get("exception") == "Unauthorized"
assert "Authorization header not present." in response.json.get("reasons")
_, response = app.test_client.post(
"/test/auth", json={"username": "user1", "password": "abcxyz"}
)
assert response.status == 200
access_token = response.json.get(sanicjwt.config.access_token_name(), None)
assert access_token is not None
_, response = app.test_client.get(
"/test/", headers={"Authorization": "Bearer {}".format(access_token)}
)
assert response.status == 200
assert response.json.get("message") == "hello world"
def test_scoped_empty():
_, response = app.test_client.get("/test/scoped_empty")
assert response.status == 401
assert response.json.get("exception") == "Unauthorized"
assert "Authorization header not present." in response.json.get("reasons")
def test_authentication_all_methods():
_, response = app.test_client.post(
"/test/auth", json={"username": "user1", "password": "abcxyz"}
)
assert response.status == 200
assert sanicjwt.config.access_token_name() in response.json
assert sanicjwt.config.refresh_token_name() in response.json
access_token = response.json.get(sanicjwt.config.access_token_name(), None)
refresh_token = response.json.get(
sanicjwt.config.refresh_token_name(), None
)
assert access_token is not None
assert refresh_token is not None
_, response = app.test_client.get(
"/test/",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200
assert response.json.get("message") == "hello world"
_, response = app.test_client.get(
"/test/auth/verify",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200
_, response = app.test_client.get(
"/test/auth/me",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200
assert "me" in response.json
_, response = app.test_client.post(
"/test/auth/refresh",
headers={"Authorization": "Bearer {}".format(access_token)},
json={sanicjwt.config.refresh_token_name(): refresh_token},
)
new_access_token = response.json.get(
sanicjwt.config.access_token_name(), None
)
assert response.status == 200
assert new_access_token is not None
assert (
response.json.get(sanicjwt.config.refresh_token_name(), None) is None
) # there is no new refresh token
assert sanicjwt.config.refresh_token_name() not in response.json
| 28.253333 | 79 | 0.679094 |
99520035dde54211c25dcb95715dd2684f317825 | 24,022 | py | Python | src/rsqsim_api/rsqsim_api/fault/segment.py | uc-eqgeo/rsqsim-python-tools | 35d65629809b7edc10053a464c212ea03616c8df | [
"MIT"
] | 1 | 2022-03-20T12:02:02.000Z | 2022-03-20T12:02:02.000Z | src/rsqsim_api/rsqsim_api/fault/segment.py | uc-eqgeo/rsqsim-python-tools | 35d65629809b7edc10053a464c212ea03616c8df | [
"MIT"
] | null | null | null | src/rsqsim_api/rsqsim_api/fault/segment.py | uc-eqgeo/rsqsim-python-tools | 35d65629809b7edc10053a464c212ea03616c8df | [
"MIT"
] | 1 | 2021-12-12T19:15:49.000Z | 2021-12-12T19:15:49.000Z | import os
from collections.abc import Iterable
from typing import Union, List
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from pyproj import Transformer
import meshio
from shapely.ops import linemerge, unary_union
from shapely.geometry import LineString, MultiPolygon
from rsqsim_api.io.read_utils import read_dxf, read_stl
from rsqsim_api.io.tsurf import tsurf
from rsqsim_api.fault.patch import RsqSimTriangularPatch, RsqSimGenericPatch, cross_3d, norm_3d
import rsqsim_api.io.rsqsim_constants as csts
transformer_utm2nztm = Transformer.from_crs(32759, 2193, always_xy=True)
class DisplacementArray:
def __init__(self, x_array: np.ndarray, y_array: np.ndarray, z_array: np.ndarray = None,
e_array: np.ndarray = None, n_array: np.ndarray = None, v_array: np.ndarray = None):
assert x_array.shape == y_array.shape, "X and Y arrays should be the same size"
assert x_array.ndim == 1, "Expecting 1D arrays"
assert not all([a is None for a in [e_array, n_array, v_array]]), "Read in at least one set of displacements"
self.x, self.y = x_array, y_array
if z_array is None:
self.z = np.zeros(self.x.shape)
else:
assert isinstance(z_array, np.ndarray)
assert z_array.shape == self.x.shape
self.z = z_array
if e_array is not None:
assert isinstance(e_array, np.ndarray)
assert e_array.shape == self.x.shape
self.e = e_array
if n_array is not None:
assert isinstance(n_array, np.ndarray)
assert n_array.shape == self.x.shape
self.n = n_array
if v_array is not None:
assert isinstance(v_array, np.ndarray)
assert v_array.shape == self.x.shape
self.v = v_array
class RsqSimSegment:
def __init__(self, segment_number: int, patch_type: str = "triangle", fault_name: str = None):
"""
:param segment_number:
:param patch_type:
:param fault_name:
"""
self._name = None
self._patch_numbers = None
self._patch_outlines = None
self._patch_vertices = None
self._vertices = None
self._triangles = None
self._edge_lines = None
self._segment_number = segment_number
self._patch_type = None
self._adjacency_map = None
self._laplacian = None
self._boundary = None
self._mean_slip_rate =None
self.patch_type = patch_type
self.name = fault_name
self.ss_gf, self.ds_gf = (None,) * 2
@property
def name(self):
return self._name
@name.setter
def name(self, fault_name: str):
if fault_name is None:
self._name = None
else:
assert isinstance(fault_name, str)
assert " " not in fault_name, "No spaces in fault name, please..."
self._name = fault_name.lower()
@property
def patch_numbers(self):
return self._patch_numbers
@patch_numbers.setter
def patch_numbers(self, numbers: Union[list, tuple, np.ndarray]):
number_array = np.array(numbers)
assert number_array.dtype == "int"
if self.patch_outlines is not None:
assert len(number_array) == len(self.patch_outlines)
self._patch_numbers = number_array
@property
def segment_number(self):
return self._segment_number
@property
def patch_type(self):
return self._patch_type
@patch_type.setter
def patch_type(self, patch_type: str):
assert isinstance(patch_type, str)
patch_lower = patch_type.lower()
assert patch_lower in ("triangle", "rectangle", "tri", "rect"), "Expecting 'triangle' or 'rectangle'"
if patch_lower in ("triangle", "tri"):
self._patch_type = "triangle"
else:
self._patch_type = "rectangle"
@property
def patch_outlines(self):
return self._patch_outlines
@property
def patch_vertices(self):
return self._patch_vertices
@patch_outlines.setter
def patch_outlines(self, patches: List):
if self.patch_type == "triangle":
assert all([isinstance(patch, RsqSimTriangularPatch) for patch in patches])
elif self.patch_type == "rectangle":
assert all([isinstance(patch, RsqSimGenericPatch) for patch in patches])
else:
raise ValueError("Set patch type (triangle or rectangle) for fault!")
self._patch_outlines = patches
self._patch_vertices = [patch.vertices for patch in patches]
@property
def patch_triangle_rows(self):
return np.array([triangle.flatten() for triangle in self.patch_vertices])
@property
def vertices(self):
if self._vertices is None:
self.get_unique_vertices()
return self._vertices
@property
def bounds(self):
"""
Square box in XY plane containing all vertices
"""
x0 = min(self.vertices[:, 0])
y0 = min(self.vertices[:, 1])
x1 = max(self.vertices[:, 0])
y1 = max(self.vertices[:, 1])
bounds = np.array([x0, y0, x1, y1])
return bounds
@property
def boundary(self):
return self._boundary
@boundary.setter
def boundary(self, boundary_array: np.ndarray):
if boundary_array is not None:
assert isinstance(boundary_array, np.ndarray)
assert boundary_array.ndim == 2 # 2D array
assert boundary_array.shape[1] == 3 # Three columns
self._boundary = boundary_array
@property
def quaternion(self):
return None
@property
def mean_slip_rate(self):
if self._mean_slip_rate is None:
self.get_mean_slip_rate()
return self._mean_slip_rate
def get_mean_slip_rate(self):
all_patches = []
for patch_id in self.patch_numbers:
patch = self.patch_dic[patch_id]
slip_rate = patch.total_slip
all_patches.append(slip_rate)
fault_slip_rate = np.mean(all_patches)
self._mean_slip_rate = fault_slip_rate
def get_unique_vertices(self):
if self.patch_vertices is None:
raise ValueError("Read in triangles first!")
all_vertices = np.reshape(self.patch_vertices, (3 * len(self.patch_vertices), 3))
unique_vertices = np.unique(all_vertices, axis=0)
self._vertices = unique_vertices
@property
def triangles(self):
if self._triangles is None:
self.generate_triangles()
return self._triangles
@property
def edge_lines(self):
if self._edge_lines is None:
self.generate_triangles()
return self._edge_lines
def generate_triangles(self):
assert self.patch_outlines is not None, "Load patches first!"
all_vertices = [patch.vertices for patch in self.patch_outlines]
unique_vertices = np.unique(np.vstack(all_vertices), axis=0)
self._vertices = unique_vertices
triangle_ls = []
line_ls = []
for triangle in all_vertices:
vertex_numbers = []
for vertex in triangle:
index = np.where((unique_vertices == vertex).all(axis=1))[0][0]
vertex_numbers.append(index)
triangle_ls.append(vertex_numbers)
line_ls += [[vertex_numbers[0], vertex_numbers[1]],
[vertex_numbers[0], vertex_numbers[2]],
[vertex_numbers[1], vertex_numbers[2]]]
self._triangles = np.array(triangle_ls)
self._edge_lines = np.array(line_ls)
def find_triangles_from_vertex_index(self, vertex_index: int):
assert isinstance(vertex_index, int)
assert 0 <= vertex_index < len(self.vertices)
triangle_index_list = []
for i, triangle in enumerate(self.triangles):
if vertex_index in triangle:
triangle_index_list.append(i)
print(triangle_index_list)
return triangle_index_list
@classmethod
def from_triangles(cls, triangles: Union[np.ndarray, list, tuple], segment_number: int = 0,
patch_numbers: Union[list, tuple, set, np.ndarray] = None, fault_name: str = None,
strike_slip: Union[int, float] = None, dip_slip: Union[int, float] = None,
rake: Union[int, float] = None, total_slip: np.ndarray = None, min_patch_area: float = 1.):
"""
Create a segment from triangle vertices and (if appropriate) populate it with strike-slip/dip-slip values
:param segment_number:
:param triangles:
:param patch_numbers:
:param fault_name:
:param strike_slip:
:param dip_slip:
:return:
"""
# Test shape of input array is appropriate
triangle_array = np.array(triangles)
assert triangle_array.shape[1] == 9, "Expecting 3d coordinates of 3 vertices each"
# # check no patches have 0 area
# triangle_verts=np.reshape(triangle_array,[len(triangle_array),3,3])
# for i,triangle in enumerate(triangle_verts):
# side1=triangle[1]-triangle[0]
# side2=triangle[1]-triangle[2]
# cross_prod=cross_3d(side1,side2)
# norm_cross=norm_3d(cross_prod)
# area=0.5*norm_cross
# if area < min_patch_area:
# np.delete(triangle_array,i,axis=0)
# if patch_numbers is not None:
# np.delete(patch_numbers,i,axis=0)
if patch_numbers is None:
patch_numbers = np.arange(len(triangle_array))
else:
assert len(patch_numbers) == triangle_array.shape[0], "Need one patch for each triangle"
# Create empty segment object
fault = cls(patch_type="triangle", segment_number=segment_number, fault_name=fault_name)
triangle_ls = []
# Populate segment object
for i, (patch_num, triangle) in enumerate(zip(patch_numbers, triangle_array)):
triangle3 = triangle.reshape(3, 3)
if total_slip is not None:
patch = RsqSimTriangularPatch(fault, vertices=triangle3, patch_number=patch_num, strike_slip=strike_slip,
dip_slip=dip_slip, rake=rake, total_slip=total_slip[i])
else:
patch = RsqSimTriangularPatch(fault, vertices=triangle3, patch_number=patch_num,
strike_slip=strike_slip,
dip_slip=dip_slip, rake=rake)
triangle_ls.append(patch)
fault.patch_outlines = triangle_ls
fault.patch_numbers = np.array([patch.patch_number for patch in triangle_ls])
fault.patch_dic = {p_num: patch for p_num, patch in zip(fault.patch_numbers, fault.patch_outlines)}
return fault
@classmethod
def from_tsurface(cls, tsurface_file: str, segment_number: int = 0,
patch_numbers: Union[list, tuple, set, np.ndarray] = None, fault_name: str = None,
strike_slip: Union[int, float] = None, dip_slip: Union[int, float] = None):
assert os.path.exists(tsurface_file)
tsurface_mesh = tsurf(tsurface_file)
fault = cls.from_triangles(tsurface_mesh.triangles, segment_number=segment_number, patch_numbers=patch_numbers,
fault_name=fault_name, strike_slip=strike_slip, dip_slip=dip_slip)
return fault
@classmethod
def from_dxf(cls, dxf_file: str, segment_number: int = 0,
patch_numbers: Union[list, tuple, set, np.ndarray] = None, fault_name: str = None,
strike_slip: Union[int, float] = None, dip_slip: Union[int, float] = None):
triangles, boundary = read_dxf(dxf_file)
segment = cls.from_triangles(triangles, segment_number=segment_number, patch_numbers=patch_numbers,
fault_name=fault_name, strike_slip=strike_slip, dip_slip=dip_slip)
segment.boundary = boundary
return segment
@classmethod
def from_pandas(cls, dataframe: pd.DataFrame, segment_number: int,
patch_numbers: Union[list, tuple, set, np.ndarray], fault_name: str = None,
strike_slip: Union[int, float] = None, dip_slip: Union[int, float] = None, read_rake: bool = True,
read_slip_rate: bool = True, transform_from_utm: bool = False):
triangles = dataframe.iloc[:, :9].to_numpy()
if transform_from_utm:
reshaped_array = triangles.reshape((len(triangles) * 3), 3)
transformed_array = transformer_utm2nztm.transform(reshaped_array[:, 0], reshaped_array[:, 1],
reshaped_array[:, 2])
reordered_array = np.vstack(transformed_array).T
triangles_nztm = reordered_array.reshape((len(triangles), 9))
else:
triangles_nztm = triangles
# Create empty segment object
fault = cls(patch_type="triangle", segment_number=segment_number, fault_name=fault_name)
triangle_ls = []
if read_slip_rate:
assert "slip_rate" in dataframe.columns, "Cannot read slip rate"
slip_rate=dataframe.slip_rate.to_numpy()
else:
#set slip rate to 1 for calculating tsunami green functions
slip_rate = 1
if read_rake:
assert "rake" in dataframe.columns, "Cannot read rake"
assert all([a is None for a in (dip_slip, strike_slip)]), "Either read_rake or specify ds and ss, not both!"
rake = dataframe.rake.to_numpy()
rake_dic = {r: (np.cos(np.radians(r)), np.sin(np.radians(r))) for r in np.unique(rake)}
assert len(rake) == len(triangles_nztm)
else:
rake = np.zeros((len(triangles_nztm),))
# Populate segment object
for i, (patch_num, triangle) in enumerate(zip(patch_numbers, triangles_nztm)):
triangle3 = triangle.reshape(3, 3)
if read_rake:
if read_slip_rate:
strike_slip = rake_dic[rake[i]][0]*slip_rate[i]
dip_slip = rake_dic[rake[i]][1]*slip_rate[i]
else:
strike_slip = rake_dic[rake[i]][0]
dip_slip = rake_dic[rake[i]][1]
patch = RsqSimTriangularPatch(fault, vertices=triangle3, patch_number=patch_num,
strike_slip=strike_slip,
dip_slip=dip_slip, total_slip=slip_rate[i], rake=rake[i])
triangle_ls.append(patch)
fault.patch_outlines = triangle_ls
fault.patch_numbers = patch_numbers
fault.patch_dic = {p_num: patch for p_num, patch in zip(fault.patch_numbers, fault.patch_outlines)}
return fault
@classmethod
def from_pickle(cls, dataframe: pd.DataFrame, segment_number: int,
patch_numbers: Union[list, tuple, set, np.ndarray], fault_name: str = None):
patches = dataframe.to_numpy()
# Create empty segment object
fault = cls(patch_type="triangle", segment_number=segment_number, fault_name=fault_name)
triangle_ls = []
# Populate segment object
for i, patch_num in enumerate(patch_numbers):
patch_data = patches[i]
patch = RsqSimTriangularPatch(fault, vertices=patch_data[0], patch_number=patch_num,
strike_slip=patch_data[8],
dip_slip=patch_data[7],
patch_data=patch_data[1:7])
triangle_ls.append(patch)
fault.patch_outlines = triangle_ls
fault.patch_numbers = patch_numbers
fault.patch_dic = {p_num: patch for p_num, patch in zip(fault.patch_numbers, fault.patch_outlines)}
return fault
@classmethod
def from_stl(cls, stl_file: str, segment_number: int = 0,
patch_numbers: Union[list, tuple, set, np.ndarray] = None, fault_name: str = None,
strike_slip: Union[int, float] = None, dip_slip: Union[int, float] = None):
triangles = read_stl(stl_file)
return cls.from_triangles(triangles, segment_number=segment_number, patch_numbers=patch_numbers,
fault_name=fault_name, strike_slip=strike_slip, dip_slip=dip_slip)
@property
def adjacency_map(self):
if self._adjacency_map is None:
self.build_adjacency_map()
return self._adjacency_map
def build_adjacency_map(self):
"""
For each triangle vertex, find the indices of the adjacent triangles.
This function overwrites that from the parent class TriangularPatches.
:Kwargs:
* verbose : Speak to me
:Returns:
* None
"""
self._adjacency_map = []
# Cache the vertices and faces arrays
# First find adjacent triangles for all triangles
# Currently any triangle with a edge, could be a common vertex instead.
for vertex_numbers in self.triangles:
adjacent_triangles = []
for j, triangle in enumerate(self.triangles):
common_vertices = [a for a in vertex_numbers if a in triangle]
if len(common_vertices) == 2:
adjacent_triangles.append(j)
self._adjacency_map.append(adjacent_triangles)
def build_laplacian_matrix(self):
"""
Build a discrete Laplacian smoothing matrix.
:Args:
* verbose : if True, displays stuff.
* method : Method to estimate the Laplacian operator
- 'count' : The diagonal is 2-times the number of surrounding nodes. Off diagonals are -2/(number of surrounding nodes) for the surrounding nodes, 0 otherwise.
- 'distance': Computes the scale-dependent operator based on Desbrun et al 1999. (Mathieu Desbrun, Mark Meyer, Peter Schr\"oder, and Alan Barr, 1999. Implicit Fairing of Irregular Meshes using Diffusion and Curvature Flow, Proceedings of SIGGRAPH).
* irregular : Not used, here for consistency purposes
:Returns:
* Laplacian : 2D array
"""
# Build the tent adjacency map
if self.adjacency_map is None:
self.build_adjacency_map()
# Get the vertices
# Allocate an array
laplacian_matrix = np.zeros((len(self.patch_numbers), len(self.patch_numbers)))
# Normalize the distances
all_distances = []
for i, (patch, adjacents) in enumerate(zip(self.patch_outlines, self.adjacency_map)):
patch_centre = patch.centre
distances = np.array([np.linalg.norm(self.patch_outlines[a].centre - patch_centre) for a in adjacents])
all_distances.append(distances)
normalizer = np.max([np.max(d) for d in all_distances])
# Iterate over the vertices
for i, (adjacents, distances) in enumerate(zip(self.adjacency_map, all_distances)):
# Distance-based
distances_normalized = distances / normalizer
e = np.sum(distances_normalized)
laplacian_matrix[i, i] = float(len(adjacents)) * 2. / e * np.sum(1. / distances_normalized)
laplacian_matrix[i, adjacents] = -2. / e * 1. / distances_normalized
self._laplacian = np.hstack((laplacian_matrix, laplacian_matrix))
@property
def laplacian(self):
if self._laplacian is None:
self.build_laplacian_matrix()
return self._laplacian
def find_top_vertex_indices(self, depth_tolerance: Union[float, int] = 100):
top_vertex_depth = max(self.vertices[:, -1])
shallow_indices = np.where(self.vertices[:, -1] >= top_vertex_depth - depth_tolerance)[0]
return shallow_indices
def find_top_vertices(self, depth_tolerance: Union[float, int] = 100):
shallow_indices = self.find_top_vertex_indices(depth_tolerance)
return self.vertices[shallow_indices]
def find_top_edges(self, depth_tolerance: Union[float, int] = 100):
shallow_indices = self.find_top_vertex_indices(depth_tolerance)
top_edges = self.edge_lines[np.all(np.isin(self.edge_lines, shallow_indices), axis=1)]
return top_edges
@property
def trace(self):
top_edges = self.find_top_edges()
line_list = []
for edge in top_edges:
v1 = self.vertices[edge[0]]
v2 = self.vertices[edge[1]]
line = LineString([v1[:-1], v2[:-1]])
line_list.append(line)
return linemerge(line_list)
@property
def fault_outline(self):
multip = MultiPolygon(patch.as_polygon() for patch in self.patch_outlines)
return unary_union(list(multip.geoms))
def plot_2d(self, ax: plt.Axes):
ax.triplot(self.vertices[:, 0], self.vertices[:, 1], self.triangles)
def to_mesh(self, write_slip: bool = False):
mesh = meshio.Mesh(points=self.vertices, cells=[("triangle", self.triangles)])
if write_slip:
mesh.cell_data["slip"] = np.array([patch.total_slip for patch in self.patch_outlines])
return mesh
def to_stl(self, stl_name: str):
mesh = self.to_mesh()
mesh.write(stl_name, file_format="stl")
def to_vtk(self, vtk_name: str, write_slip: bool = False):
mesh = self.to_mesh(write_slip=write_slip)
mesh.write(vtk_name, file_format="vtk")
@property
def dip_slip(self):
return np.array([patch.dip_slip for patch in self.patch_outlines])
def to_rsqsim_fault_file(self, flt_name):
tris = pd.DataFrame(self.patch_triangle_rows)
rakes = pd.Series(np.ones(self.dip_slip.shape) * 90.)
tris.loc[:, 9] = rakes
slip_rates = pd.Series(self.dip_slip * 1.e-3 / csts.seconds_per_year)
tris.loc[:, 10] = slip_rates
segment_num = pd.Series(np.ones(self.dip_slip.shape) * self.segment_number, dtype=np.int)
tris.loc[:, 11] = segment_num
seg_names = pd.Series([self.name for i in range(len(self.patch_numbers))])
tris.loc[:, 12] = seg_names
tris.to_csv(flt_name, index=False, header=False, sep="\t", encoding='ascii')
def to_rsqsim_fault_array(self, flt_name):
tris = pd.DataFrame(self.patch_triangle_rows)
rakes = pd.Series(np.ones(self.dip_slip.shape) * 90.)
tris.loc[:, 9] = rakes
slip_rates = pd.Series(self.dip_slip * 1.e-3 / csts.seconds_per_year)
tris.loc[:, 10] = slip_rates
segment_num = pd.Series(np.ones(self.dip_slip.shape) * self.segment_number, dtype=np.int)
tris.loc[:, 11] = segment_num
seg_names = pd.Series([self.name for i in range(len(self.patch_numbers))])
tris.loc[:, 12] = seg_names
return tris
class RsqSimFault:
"""
The idea is to allow a fault to have one or more segments
"""
def __init__(self, segments: Union[RsqSimSegment, List[RsqSimSegment]]):
self._segments = None
self._vertices = None
if segments is not None:
self.segments = segments
@property
def segments(self):
return self._segments
@segments.setter
def segments(self, segments: Union[RsqSimSegment, List[RsqSimSegment]]):
if isinstance(segments, RsqSimSegment):
self._segments = [segments]
else:
assert isinstance(segments, Iterable), "Expected either one segment or a list of segments"
assert all([isinstance(segment, RsqSimSegment) for segment in segments]), "Expected a list of segments"
self._segments = list(segments)
| 39.123779 | 264 | 0.624178 |
0e11a75f336a7dc43b0cf06a6de0d716b057ffd3 | 344 | py | Python | scripts/run_make_decoy_dist_mat.py | lonelu/Metalprot_learning | 8edb2c3e4f6ba129a409d75fd4d15ceb3a9e307b | [
"MIT"
] | null | null | null | scripts/run_make_decoy_dist_mat.py | lonelu/Metalprot_learning | 8edb2c3e4f6ba129a409d75fd4d15ceb3a9e307b | [
"MIT"
] | null | null | null | scripts/run_make_decoy_dist_mat.py | lonelu/Metalprot_learning | 8edb2c3e4f6ba129a409d75fd4d15ceb3a9e307b | [
"MIT"
] | null | null | null | import os
from src.extractor import make_neg_dist_mats as neg
from src import config
workdir = os.getcwd()
pdb_path = '/data/metal_decoys/'
out_path = '/data/metal_decoys/out/'
opts = config.ExtractorConfig()
opts.config['dmap'] = True
opts.config['--greyscale'] = True
neg.run_get_neg_dist_mats(workdir, pdb_path, out_path, opts.config)
| 21.5 | 67 | 0.758721 |
8f8eeac1435b10d431ea3a5ac24c1427a4e7c844 | 1,376 | py | Python | setup.py | Boondockers-Welcome/django-autosave | 42792db2163ed410c3adfbed4137e018397aaed7 | [
"BSD-2-Clause"
] | null | null | null | setup.py | Boondockers-Welcome/django-autosave | 42792db2163ed410c3adfbed4137e018397aaed7 | [
"BSD-2-Clause"
] | null | null | null | setup.py | Boondockers-Welcome/django-autosave | 42792db2163ed410c3adfbed4137e018397aaed7 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
from __future__ import absolute_import
from setuptools import setup, find_packages
setup(
name="Django Autosave",
version="1.0.0",
author='Jason Goldstein',
author_email='jason@betheshoe.com',
url='https://github.com/theatlantic/django-autosave',
packages=['autosave'],
description='Generic autosave for the Django Admin.',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
install_requires=['Django>=1.11'],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
classifiers=[
'Development Status :: 5 - Production',
'License :: OSI Approved :: BSD License',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
include_package_data=True,
zip_safe=False,
)
| 35.282051 | 77 | 0.604651 |
34d3a99f1c8550b9688b021ce203b1bddcd2662a | 1,486 | py | Python | cloudify_types/cloudify_types/shared_resource/operations.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
] | null | null | null | cloudify_types/cloudify_types/shared_resource/operations.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
] | null | null | null | cloudify_types/cloudify_types/shared_resource/operations.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudify.decorators import operation
from cloudify_types.utils import proxy_operation
from .shared_resource import SharedResource
from .constants import WORKFLOW_EXECUTION_TIMEOUT
from .execute_shared_resource_workflow import execute_shared_resource_workflow
@operation(resumable=True)
@proxy_operation('validate_deployment')
def connect_deployment(operation, **_):
return getattr(SharedResource(_), operation)()
@operation(resumable=True)
def execute_workflow(workflow_id,
parameters,
timeout=WORKFLOW_EXECUTION_TIMEOUT,
redirect_logs=True,
**_):
return execute_shared_resource_workflow(workflow_id,
parameters,
timeout,
redirect_logs)
| 37.15 | 78 | 0.687079 |
e331a4807842c3e6e053dd0b96a1ef59e380b823 | 2,523 | py | Python | play.py | jeeiantam/cs221-project | 71b5177b93adedc5eb0ba4fdb97b6e031101aeb8 | [
"MIT"
] | null | null | null | play.py | jeeiantam/cs221-project | 71b5177b93adedc5eb0ba4fdb97b6e031101aeb8 | [
"MIT"
] | null | null | null | play.py | jeeiantam/cs221-project | 71b5177b93adedc5eb0ba4fdb97b6e031101aeb8 | [
"MIT"
] | null | null | null | import gym
import time
import sys
SKIP_CONTROL = 0 # Use previous control decision SKIP_CONTROL times, that's how you
# can test what skip is still usable.
incremental_action_games = []
def controller_mapping(game_name):
if 'LunarLander' in game_name:
return {ord('s'):2, ord('a'):3, ord('d'):1}
elif 'Breakout' in game_name:
return {ord('a'):3, ord('d'):2}
elif 'Asteroids' in game_name:
return {ord('a'):4, ord('w'):2, ord('d'):3, ord('s'):5, 32:1 }
elif 'CarRacing' in game_name:
return {(ord('a'),0): -1.0, (ord('w'),1):+1.0, (ord('d'),0):+1.0, (ord('s'),2):+0.8}
game_name = str(sys.argv[1])
env = gym.make(game_name)
keybinds = controller_mapping(game_name)
#Get type of action for env
human_agent_action = env.action_space.sample()
action_islist = hasattr(human_agent_action,'__contains__');
action_isIncremental = len([s for s in incremental_action_games if s in game_name])>0
human_agent_action = [0]*len(human_agent_action) if action_islist else 0
def zero_actions(key):
global keybinds, human_agent_action, action_islist, action_isIncremental
if action_isIncremental:
return
if action_islist:
for k,i in keybinds.keys():
if key == k:
human_agent_action[i] = 0
return
else:
human_agent_action = 0
def do_action(key):
global keybinds, action_islist, action_isIncremental, human_agent_action
if action_isIncremental:
if action_islist:
for k,i in keybinds.keys():
if k == key:
human_agent_action[i] += keybinds[(k,i)]
return
else:
human_agent_action += keybinds[key]
else:
if action_islist:
for k,i in keybinds.keys():
if k == key:
human_agent_action[i] = keybinds[(k,i)]
return
else:
if key in keybinds.keys():
human_agent_action = keybinds[key]
def key_press(key,mod):
do_action(key)
def key_release(key,mod):
zero_actions(key)
env.reset()
env.render()
env.viewer.window.on_key_press = key_press
env.viewer.window.on_key_release = key_release
done = False
skip = 0
while not done:
if not skip:
a = human_agent_action
skip = SKIP_CONTROL
else:
skip -= 1
obser,r,done,info = env.step(a)
env.render()
time.sleep(0.033)
| 29 | 92 | 0.597305 |
6b0df4212aa97a7aaaea1235a603a55b426a98d1 | 17,570 | py | Python | Blender/appdata_common/Blender Foundation/Blender/BLENDER_VERSION/scripts/addons/DTB/DtbMaterial.py | samjay3d/DazToBlender | cf9239bcc5e3c82a8a2373dd821f705edf481cc3 | [
"Apache-2.0"
] | 86 | 2020-07-23T03:00:37.000Z | 2022-03-26T06:35:20.000Z | Blender/appdata_common/Blender Foundation/Blender/BLENDER_VERSION/scripts/addons/DTB/DtbMaterial.py | samjay3d/DazToBlender | cf9239bcc5e3c82a8a2373dd821f705edf481cc3 | [
"Apache-2.0"
] | 91 | 2020-07-25T14:58:55.000Z | 2022-03-26T13:02:18.000Z | Blender/appdata_common/Blender Foundation/Blender/BLENDER_VERSION/scripts/addons/DTB/DtbMaterial.py | samjay3d/DazToBlender | cf9239bcc5e3c82a8a2373dd821f705edf481cc3 | [
"Apache-2.0"
] | 25 | 2020-07-27T17:06:05.000Z | 2022-02-12T14:56:58.000Z | import os
import json
import pathlib
import bpy
from . import Global
from . import NodeArrange
from . import Versions
from . import MatDct
from . import Util
# region top-level methods
def srgb_to_linear_rgb(srgb):
if srgb < 0:
return 0
elif srgb < 0.04045:
return srgb / 12.92
else:
return ((srgb + 0.055) / 1.055) ** 2.4
def hex_to_col(hex, normalize=True, precision=6):
col = []
it = iter(hex)
for char in it:
col.append(int(char + it.__next__(), 16))
if normalize:
col = map(lambda x: x / 255, col)
col = map(lambda x: round(x, precision), col)
return list(srgb_to_linear_rgb(c) for c in col)
def getGroupNode(key):
for slot in Global.getBody().material_slots:
ROOT = bpy.data.materials[slot.name].node_tree.nodes
for n in ROOT:
if n.name.startswith("Group"):
if n.node_tree.name.startswith(key):
return n
def getGroupNodeTree(key):
rtn = getGroupNode(key)
if rtn is not None:
return rtn.node_tree
def default_material():
getGroupNodeTree("EyeDry")
getGroupNodeTree("EyeWet")
getGroupNodeTree("IrayUberSkin")
def forbitMinus():
pbsdf = "Principled BSDF"
for dobj in Util.myccobjs():
if dobj.type != "MESH" or dobj == Global.getBody():
continue
for slot in dobj.material_slots:
mat = bpy.data.materials.get(slot.name)
if mat is None or mat.node_tree is None:
continue
mat_nodes = mat.node_tree.nodes
for mat_node in mat_nodes:
if pbsdf not in mat_node.name:
continue
for node_input in mat_nodes[pbsdf].inputs:
if len(node_input.links) != 0:
continue
if type(node_input.default_value) is float:
if node_input.default_value < 0:
node_input.default_value = 0.0
if (
node_input.name == "Metallic"
and node_input.default_value == 1.0
):
node_input.default_value = 0.0
if (
node_input.name == "Specular"
and node_input.default_value == 2.0
):
node_input.default_value = 0.2
elif type(node_input.default_value) is list:
for i in node_input.default_value:
if type(i) is float:
if node_input.default_value < 0:
node_input.default_value = 0.0
def adjust_material(kind, inc_value, isEye):
skincombi = [
["Base Color.Hue", 11, 0],
["Base Color.Saturation", 11, 1],
["Base Color.Value", 11, 2],
["Base Color.Bright", 8, 1],
["Base Color.Contrast", 8, 2],
["Specular", 9, 1],
["Roughness", 10, 1],
["Roughness.Contrast", 9, 2],
["Specular.Contrast", 10, 2],
["Subsurface.Scale", 14, 1],
["Subsurface.Scale", 13, 1],
["Normal.Strength", 5, 0],
["Bump.Strength", 6, 0],
["Bump.Distance", 6, 1],
["Displacement.Height", 4, 2], # 14
["Subsurface.Scale", 2, 2],
["Subsurface.Scale", 2, 1],
]
eyecombi = [
["Base Color.Bright", 1, 1],
["Base Color.Contrast", 1, 2],
["Normal.Strength", 3, 0],
["Bump.Strength", 4, 0],
["Bump.Distance", 4, 1],
["Base Color.Hue", 6, 0],
["Base Color.Saturation", 6, 1],
["Base Color.Value", 6, 2],
]
flg_skin = False
if isEye:
tree = getGroupNodeTree("EyeDry")
tbls = eyecombi
else:
tree = getGroupNodeTree("IrayUberSkin")
tbls = skincombi
flg_skin = True
if tree is None:
return
nds = tree.nodes
for tidx, tbl in enumerate(tbls):
if tbl[0] == kind:
t1 = getNidx(int(tbl[1]), nds)
dv = nds[t1].inputs[tbl[2]].default_value
cg = 1.0
if flg_skin:
if tidx > 8 and tidx < 16:
cg = cg * Global.getSize() * 0.01
if tidx == 9:
cg = cg * 3
elif tidx == 10:
cg = cg * 0.5
elif tidx == 16:
cg = cg * 0.2
# elif tidx==14:
# cg = cg * 12
# elif tidx>=11 or tidx<=13:
# cg = cg * 8
cg = cg * inc_value
if tidx == 15:
dv[0] += cg * 10
dv[1] += cg * 2
dv[2] += cg
else:
dv += cg
nds[t1].inputs[tbl[2]].default_value = dv
def getNidx(idx, nodes):
for nidx, n in enumerate(nodes):
if n.name.endswith("-" + str(idx)):
return nidx
return idx
# endregion top-level methods
class DtbShaders:
def __init__(self, dtu):
self.material_list = dtu.get_materials_list()
self.mat_data_dict = {}
self.mat_property_dict = {}
self.node_groups = []
self.is_Diffuse = False
self.is_Refract = False
self.is_Alpha = False
# TODO: Find a better way to create the dict
def make_dct(self):
mat_info_list = self.material_list
for mat_info in mat_info_list:
if mat_info["Asset Name"] == mat_info["Asset Label"]:
if mat_info["Asset Name"] in self.mat_data_dict.keys():
self.mat_data_dict[mat_info["Asset Name"]][
mat_info["Material Name"]
] = mat_info
else:
self.mat_data_dict[mat_info["Asset Name"]] = {}
self.mat_data_dict[mat_info["Asset Name"]][
mat_info["Material Name"]
] = mat_info
elif mat_info["Asset Name"] != mat_info["Asset Label"]:
if mat_info["Asset Name"] not in self.mat_data_dict.keys():
self.mat_data_dict[mat_info["Asset Name"]] = {}
self.mat_data_dict[mat_info["Asset Name"]][
mat_info["Material Name"]
] = mat_info
if mat_info["Asset Name"] in self.mat_data_dict.keys():
if (
mat_info["Material Name"]
not in self.mat_data_dict[mat_info["Asset Name"]]
):
self.mat_data_dict[mat_info["Asset Name"]][
mat_info["Material Name"]
] = mat_info
if mat_info["Asset Label"] in self.mat_data_dict.keys():
self.mat_data_dict[mat_info["Asset Label"]][
mat_info["Material Name"]
] = mat_info
if mat_info["Asset Label"] not in self.mat_data_dict.keys():
self.mat_data_dict[mat_info["Asset Label"]] = {}
self.mat_data_dict[mat_info["Asset Label"]][
mat_info["Material Name"]
] = mat_info
def load_shader_nodes(self):
file_path = os.path.join("dependencies", "link_library.blend")
file_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(file_dir, file_path)
# load node_groups from link_library.blend file
with bpy.data.libraries.load(file_path) as (data_from, data_to):
if len(bpy.data.node_groups) != len(data_from.node_groups):
self.node_groups = data_from.node_groups
data_to.node_groups = data_from.node_groups
def get_mat_properties(self, mat_data):
self.mat_property_dict = {}
# To deal with material names sometimes being undescriptive.
for mat_property in mat_data["Properties"]:
self.mat_property_dict[mat_property["Name"]] = mat_property
self.mat_property_dict[mat_property["Label"]] = mat_property
return self.mat_property_dict
def get_mat_type(self, material):
material_name = material["Material Name"]
material_type = material["Material Type"]
object_type = material["Value"]
if material_name in [
"Cornea",
"EyeMoisture",
"EyeMoisture.00",
"EylsMoisture",
"Tear",
]:
return "EyeWet"
elif material_name in ["Pupils", "Trises", "Sclera"]:
return "EyeDry"
elif "Eyelashes" in object_type:
return "Eyelashes"
elif material_type == "Iray Uber":
if object_type == "Actor/Character":
return "IrayUberSkin"
else:
return "IrayUber"
elif material_type == "AoA_Subsurface":
return "AoA_Subsurface"
elif material_type == "omUberSurface":
return "omUberSurface"
elif material_type == "PBRSkin":
return "IrayUberSkin"
elif ("Hair" in material_type) or ("Hair" in object_type):
return "IrayUber"
elif material_type == "DAZ Studio Default":
return "DAZ Studio Default"
else:
return "DefaultMaterial"
def optimize_materials(self, mat_slot):
mat = mat_slot.material
if "Genesis" in mat["Asset Name"]:
mat_name = mat["Asset Label"] + "_" + mat["Material Name"]
else:
mat_name = mat["Asset Name"] + "_" + mat["Material Name"]
if mat_name not in bpy.data.materials:
if mat["Asset Name"] != mat["Asset Label"]:
mat.name = mat["Asset Name"] + "_" + mat["Material Name"]
return
else:
return
material = bpy.data.materials[mat_name]
if mat_name != mat.name:
if mat["Asset Name"] == material["Asset Name"]:
mat_slot.material = material
bpy.data.materials.remove(mat)
return True
# TODO: Check for all Color Maps
def check_map_type(self, property_key):
if "Diffuse" in property_key:
self.is_Diffuse = True
else:
self.is_Diffuse = False
if "Opacity" in property_key:
self.is_Alpha = True
else:
self.is_Alpha = False
def check_refract(self):
if "Refraction Weight" in self.mat_property_dict.keys():
if self.mat_property_dict["Refraction Weight"]["Value"] > 0:
self.is_Refract = True
def set_eevee_alpha(self, mat):
if self.is_Alpha:
Versions.eevee_alpha(mat, "HASHED", 0)
else:
mat_name = mat["Material Name"]
if mat_name in [
"Cornea",
"EyeMoisture",
"EylsMoisture",
"Tear",
"Eyelashes",
"Glass",
]:
Versions.eevee_alpha(mat, "HASHED", 0)
def set_eevee_refract(self, mat):
if self.is_Refract:
mat.use_screen_refraction = True
mat.refraction_depth = 0.8 * Global.get_size()
def find_node_property(self, input_key, mat_property_dict):
property_key, property_type = input_key.split(": ")
property_info = mat_property_dict[property_key][property_type]
return property_key, property_type, property_info
def create_texture_input(self, tex_path, tex_image_node):
tex_path = os.path.abspath(tex_path)
tex_image = bpy.data.images.load(filepath=tex_path)
tex_image_node.image = tex_image
if not self.is_Diffuse:
Versions.to_color_space_non(tex_image_node)
def convert_color(self, color, shader_node):
color_hex = color.lstrip("#")
color_rgb = hex_to_col(color_hex)
color_rgb.append(1) # alpha
return color_rgb
def setup_materials(self, obj):
for mat_slot in obj.material_slots:
mat = mat_slot.material
mat_name = mat.name
obj_name = obj.name.replace(".Shape", "")
obj_name = obj_name.split(".")[0]
if mat is None:
# Get or create a new material when slot is missing material
mat = bpy.data.materials.get(mat_slot.name) or bpy.data.materials.new(
name=mat_slot.name
)
mat_slot.material = mat
if obj_name not in self.mat_data_dict.keys():
continue
if mat_name not in self.mat_data_dict[obj_name].keys():
mat_name = mat.name.split(".")[0]
if mat_name not in self.mat_data_dict[obj_name].keys():
continue
mat_data = self.mat_data_dict[obj_name][mat_name]
self.mat_property_dict = self.get_mat_properties(mat_data)
# Set Custom Properties
for key in mat_data:
if not key == "Properties":
mat[key] = mat_data[key]
# Update Name
new_name = mat["Asset Label"] + "_" + mat["Material Name"]
if bpy.context.window_manager.combine_materials:
# To Deal with a duplicate being converted first.
if new_name in bpy.data.materials:
mat_slot.material = bpy.data.materials[new_name]
bpy.data.materials.remove(mat)
continue
mat.name = new_name
mat_name = mat.name
# To Deal with duplications
if self.optimize_materials(mat_slot):
continue
mat.use_nodes = True
mat_nodes = mat.node_tree.nodes
mat_links = mat.node_tree.links
# Remove all the nodes from the material
for mat_node in mat_nodes:
mat_nodes.remove(mat_node)
# Create material output nodes and set corresponding targets
out_node_cy = mat_nodes.new(type="ShaderNodeOutputMaterial")
out_node_cy.target = "CYCLES"
out_node_ev = mat_nodes.new(type="ShaderNodeOutputMaterial")
out_node_ev.target = "EEVEE"
# Create shader node and set links
shader_node = mat_nodes.new(type="ShaderNodeGroup")
node_group = self.get_mat_type(mat)
shader_node.node_tree = bpy.data.node_groups[node_group]
# Link corresponding nodes in the material
render_output = None
surface_input = out_node_cy.inputs["Surface"]
render_output = shader_node.outputs["Cycles"]
mat_links.new(render_output, surface_input)
mat_links.new(shader_node.outputs["EEVEE"], out_node_ev.inputs["Surface"])
# Find and Attach Node Input
for input_key in shader_node.inputs.keys():
if ("Texture" in input_key) or ("Value" in input_key):
# To deal with Gen 8.1 Not Share the Same info as Gen 8 "temp"
if input_key.split(": ")[0] in self.mat_property_dict.keys():
(
property_key,
property_type,
property_info,
) = self.find_node_property(input_key, self.mat_property_dict)
if property_type == "Value":
# Check if Info is a Hex Color
if isinstance(property_info, str):
property_info = self.convert_color(
property_info, shader_node
)
if input_key == "Normal Map: Value":
if isinstance(property_info, list):
property_info = 1
shader_node.inputs[input_key].default_value = property_info
if property_type == "Texture":
if os.path.exists(property_info):
self.check_map_type(property_key)
tex_image_node = mat_nodes.new(
type="ShaderNodeTexImage"
)
self.create_texture_input(property_info, tex_image_node)
tex_node_output = tex_image_node.outputs["Color"]
mat_links.new(
tex_node_output, shader_node.inputs[input_key]
)
# Set Alpha Modes
self.check_refract()
self.set_eevee_refract(mat)
self.set_eevee_alpha(mat)
# Set the cycles displacement method
if node_group == "IrayUberSkin":
mat_links.new(
shader_node.outputs["Displacement"],
out_node_cy.inputs["Displacement"],
)
mat.cycles.displacement_method = "BOTH"
else:
mat.cycles.displacement_method = "BUMP"
if mat_nodes is not None:
NodeArrange.toNodeArrange(mat_nodes)
| 37.067511 | 88 | 0.520319 |
389234dc0c55ab79d07e2f8e2c6f53c892845571 | 4,606 | py | Python | great_expectations/datasource/sqlalchemy_source.py | orenovadia/great_expectations | 76ef0c4e066227f8b589a1ee6ac885618f65906e | [
"Apache-2.0"
] | null | null | null | great_expectations/datasource/sqlalchemy_source.py | orenovadia/great_expectations | 76ef0c4e066227f8b589a1ee6ac885618f65906e | [
"Apache-2.0"
] | null | null | null | great_expectations/datasource/sqlalchemy_source.py | orenovadia/great_expectations | 76ef0c4e066227f8b589a1ee6ac885618f65906e | [
"Apache-2.0"
] | 1 | 2022-02-10T04:20:37.000Z | 2022-02-10T04:20:37.000Z | import time
import logging
from string import Template
from .datasource import Datasource
from great_expectations.dataset.sqlalchemy_dataset import SqlAlchemyDataset
from .generator.query_generator import QueryGenerator
logger = logging.getLogger(__name__)
try:
import sqlalchemy
from sqlalchemy import create_engine, MetaData
except ImportError:
sqlalchemy = None
create_engine = None
MetaData = None
logger.debug("Unable to import sqlalchemy.")
class SqlAlchemyDatasource(Datasource):
"""
A SqlAlchemyDatasource will provide data_assets converting batch_kwargs using the following rules:
- if the batch_kwargs include a table key, the datasource will provide a dataset object connected
to that table
- if the batch_kwargs include a query key, the datasource will create a temporary table using that
that query. The query can be parameterized according to the standard python Template engine, which
uses $parameter, with additional kwargs passed to the get_batch method.
"""
def __init__(self, name="default", data_context=None, profile=None, generators=None, **kwargs):
if generators is None:
generators = {
"default": {"type": "queries"}
}
super(SqlAlchemyDatasource, self).__init__(name,
type_="sqlalchemy",
data_context=data_context,
generators=generators)
if profile is not None:
self._datasource_config.update({
"profile": profile
})
# if an engine was provided, use that
if "engine" in kwargs:
self.engine = kwargs.pop("engine")
# if a connection string or url was provided, use that
elif "connection_string" in kwargs:
connection_string = kwargs.pop("connection_string")
self.engine = create_engine(connection_string, **kwargs)
elif "url" in kwargs:
url = kwargs.pop("url")
self.engine = create_engine(url, **kwargs)
# Otherwise, connect using remaining kwargs
else:
self._connect(self._get_sqlalchemy_connection_options(**kwargs))
self._build_generators()
def _get_sqlalchemy_connection_options(self, **kwargs):
if "profile" in self._datasource_config:
profile = self._datasource_config["profile"]
credentials = self.data_context.get_profile_credentials(profile)
else:
credentials = {}
# Update credentials with anything passed during connection time
credentials.update(dict(**kwargs))
drivername = credentials.pop("drivername")
options = sqlalchemy.engine.url.URL(drivername, **credentials)
return options
def _connect(self, options):
self.engine = create_engine(options)
self.meta = MetaData()
def _get_generator_class(self, type_):
if type_ == "queries":
return QueryGenerator
else:
raise ValueError("Unrecognized DataAssetGenerator type %s" % type_)
def _get_data_asset(self, batch_kwargs, expectation_suite, schema=None, **kwargs):
if "table" in batch_kwargs:
return SqlAlchemyDataset(table_name=batch_kwargs["table"],
engine=self.engine,
schema=schema,
data_context=self._data_context,
expectation_suite=expectation_suite,
batch_kwargs=batch_kwargs)
elif "query" in batch_kwargs:
query = Template(batch_kwargs["query"]).safe_substitute(**kwargs)
return SqlAlchemyDataset(custom_sql=query,
engine=self.engine,
data_context=self._data_context,
expectation_suite=expectation_suite,
batch_kwargs=batch_kwargs)
else:
raise ValueError("Invalid batch_kwargs: exactly one of 'table' or 'query' must be specified")
def build_batch_kwargs(self, *args, **kwargs):
"""Magically build batch_kwargs by guessing that the first non-keyword argument is a table name"""
if len(args) > 0:
kwargs.update({
"table": args[0],
"timestamp": time.time()
})
return kwargs
| 40.761062 | 106 | 0.603561 |
ae99e20593556be3d5926a809b2bcf6e04a57894 | 2,148 | py | Python | zulip_bots/zulip_bots/bots/dialogflow/dialogflow.py | benjaoming/python-zulip-api | d46935218022d82fed262fb485e112caa1aefd11 | [
"Apache-2.0"
] | 1 | 2020-06-17T06:47:15.000Z | 2020-06-17T06:47:15.000Z | zulip_bots/zulip_bots/bots/dialogflow/dialogflow.py | benjaoming/python-zulip-api | d46935218022d82fed262fb485e112caa1aefd11 | [
"Apache-2.0"
] | 7 | 2017-10-05T07:43:32.000Z | 2017-10-14T06:56:47.000Z | zulip_bots/zulip_bots/bots/dialogflow/dialogflow.py | benjaoming/python-zulip-api | d46935218022d82fed262fb485e112caa1aefd11 | [
"Apache-2.0"
] | 1 | 2020-08-25T19:25:25.000Z | 2020-08-25T19:25:25.000Z | # See readme.md for instructions on running this code.
import logging
import json
import apiai
from typing import Any, Dict
help_message = '''DialogFlow bot
This bot will interact with dialogflow bots.
Simply send this bot a message, and it will respond depending on the configured bot's behaviour.
'''
def get_bot_result(message_content: str, config: Dict[str, str], sender_id: str) -> str:
if message_content.strip() == '' or message_content.strip() == 'help':
return config['bot_info']
ai = apiai.ApiAI(config['key'])
try:
request = ai.text_request()
request.session_id = sender_id
request.query = message_content
response = request.getresponse()
res_str = response.read().decode('utf8', 'ignore')
res_json = json.loads(res_str)
if res_json['status']['errorType'] != 'success' and 'result' not in res_json.keys():
return 'Error {}: {}.'.format(res_json['status']['code'], res_json['status']['errorDetails'])
if res_json['result']['fulfillment']['speech'] == '':
if 'alternateResult' in res_json.keys():
if res_json['alternateResult']['fulfillment']['speech'] != '':
return res_json['alternateResult']['fulfillment']['speech']
return 'Error. No result.'
return res_json['result']['fulfillment']['speech']
except Exception as e:
logging.exception(str(e))
return 'Error. {}.'.format(str(e))
class DialogFlowHandler:
'''
This plugin allows users to easily add their own
DialogFlow bots to zulip
'''
def initialize(self, bot_handler: Any) -> None:
self.config_info = bot_handler.get_config_info('dialogflow')
def usage(self) -> str:
return '''
This plugin will allow users to easily add their own
DialogFlow bots to zulip
'''
def handle_message(self, message: Dict[str, str], bot_handler: Any) -> None:
result = get_bot_result(message['content'], self.config_info, message['sender_id'])
bot_handler.send_reply(message, result)
handler_class = DialogFlowHandler
| 37.684211 | 105 | 0.643389 |
2a4167042c51400c37ba748360240d7cc4342a5b | 13,477 | py | Python | electrumsys/logging.py | syscoin/electrum | ac576539c758640ff87ed6be1311eb57b57d205f | [
"MIT"
] | 1 | 2019-06-26T16:51:43.000Z | 2019-06-26T16:51:43.000Z | electrumsys/logging.py | syscoin/electrumsys | ac576539c758640ff87ed6be1311eb57b57d205f | [
"MIT"
] | null | null | null | electrumsys/logging.py | syscoin/electrumsys | ac576539c758640ff87ed6be1311eb57b57d205f | [
"MIT"
] | 1 | 2018-09-10T21:43:02.000Z | 2018-09-10T21:43:02.000Z | # Copyright (C) 2019 The ElectrumSys developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import logging
import logging.handlers
import datetime
import sys
import pathlib
import os
import platform
from typing import Optional
import copy
import subprocess
class LogFormatterForFiles(logging.Formatter):
def formatTime(self, record, datefmt=None):
# timestamps follow ISO 8601 UTC
date = datetime.datetime.fromtimestamp(record.created).astimezone(datetime.timezone.utc)
if not datefmt:
datefmt = "%Y%m%dT%H%M%S.%fZ"
return date.strftime(datefmt)
def format(self, record):
record = _shorten_name_of_logrecord(record)
return super().format(record)
file_formatter = LogFormatterForFiles(fmt="%(asctime)22s | %(levelname)8s | %(name)s | %(message)s")
class LogFormatterForConsole(logging.Formatter):
def format(self, record):
record = _shorten_name_of_logrecord(record)
text = super().format(record)
shortcut = getattr(record, 'custom_shortcut', None)
if shortcut:
text = text[:1] + f"/{shortcut}" + text[1:]
return text
# try to make console log lines short... no timestamp, short levelname, no "electrumsys."
console_formatter = LogFormatterForConsole(fmt="%(levelname).1s | %(name)s | %(message)s")
def _shorten_name_of_logrecord(record: logging.LogRecord) -> logging.LogRecord:
record = copy.copy(record) # avoid mutating arg
# strip the main module name from the logger name
if record.name.startswith("electrumsys."):
record.name = record.name[9:]
# manual map to shorten common module names
record.name = record.name.replace("interface.Interface", "interface", 1)
record.name = record.name.replace("network.Network", "network", 1)
record.name = record.name.replace("synchronizer.Synchronizer", "synchronizer", 1)
record.name = record.name.replace("verifier.SPV", "verifier", 1)
record.name = record.name.replace("gui.qt.main_window.ElectrumSysWindow", "gui.qt.main_window", 1)
return record
class TruncatingMemoryHandler(logging.handlers.MemoryHandler):
"""An in-memory log handler that only keeps the first N log messages
and discards the rest.
"""
target: Optional['logging.Handler']
def __init__(self):
logging.handlers.MemoryHandler.__init__(
self,
capacity=1, # note: this is the flushing frequency, ~unused by us
flushLevel=logging.DEBUG,
)
self.max_size = 100 # max num of messages we keep
self.num_messages_seen = 0
self.__never_dumped = True
# note: this flush implementation *keeps* the buffer as-is, instead of clearing it
def flush(self):
self.acquire()
try:
if self.target:
for record in self.buffer:
if record.levelno >= self.target.level:
self.target.handle(record)
finally:
self.release()
def dump_to_target(self, target: 'logging.Handler'):
self.acquire()
try:
self.setTarget(target)
self.flush()
self.setTarget(None)
finally:
self.__never_dumped = False
self.release()
def emit(self, record):
self.num_messages_seen += 1
if len(self.buffer) < self.max_size:
super().emit(record)
def close(self) -> None:
# Check if captured log lines were never to dumped to e.g. stderr,
# and if so, try to do it now. This is useful e.g. in case of sys.exit().
if self.__never_dumped:
_configure_stderr_logging()
super().close()
def _delete_old_logs(path, keep=10):
files = sorted(list(pathlib.Path(path).glob("electrumsys_log_*.log")), reverse=True)
for f in files[keep:]:
try:
os.remove(str(f))
except OSError as e:
_logger.warning(f"cannot delete old logfile: {e}")
_logfile_path = None
def _configure_file_logging(log_directory: pathlib.Path):
global _logfile_path
assert _logfile_path is None, 'file logging already initialized'
log_directory.mkdir(exist_ok=True)
_delete_old_logs(log_directory)
timestamp = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M%SZ")
PID = os.getpid()
_logfile_path = log_directory / f"electrumsys_log_{timestamp}_{PID}.log"
file_handler = logging.FileHandler(_logfile_path, encoding='utf-8')
file_handler.setFormatter(file_formatter)
file_handler.setLevel(logging.DEBUG)
root_logger.addHandler(file_handler)
if _inmemory_startup_logs:
_inmemory_startup_logs.dump_to_target(file_handler)
console_stderr_handler = None
def _configure_stderr_logging(*, verbosity=None, verbosity_shortcuts=None):
# log to stderr; by default only WARNING and higher
global console_stderr_handler
if console_stderr_handler is not None:
_logger.warning("stderr handler already exists")
return
console_stderr_handler = logging.StreamHandler(sys.stderr)
console_stderr_handler.setFormatter(console_formatter)
if not verbosity and not verbosity_shortcuts:
console_stderr_handler.setLevel(logging.WARNING)
root_logger.addHandler(console_stderr_handler)
else:
console_stderr_handler.setLevel(logging.DEBUG)
root_logger.addHandler(console_stderr_handler)
_process_verbosity_log_levels(verbosity)
_process_verbosity_filter_shortcuts(verbosity_shortcuts, handler=console_stderr_handler)
if _inmemory_startup_logs:
_inmemory_startup_logs.dump_to_target(console_stderr_handler)
def _process_verbosity_log_levels(verbosity):
if verbosity == '*' or not isinstance(verbosity, str):
return
# example verbosity:
# debug,network=error,interface=error // effectively blacklists network and interface
# warning,network=debug,interface=debug // effectively whitelists network and interface
filters = verbosity.split(',')
for filt in filters:
if not filt: continue
items = filt.split('=')
if len(items) == 1:
level = items[0]
electrumsys_logger.setLevel(level.upper())
elif len(items) == 2:
logger_name, level = items
logger = get_logger(logger_name)
logger.setLevel(level.upper())
else:
raise Exception(f"invalid log filter: {filt}")
def _process_verbosity_filter_shortcuts(verbosity_shortcuts, *, handler: 'logging.Handler'):
if not isinstance(verbosity_shortcuts, str):
return
if len(verbosity_shortcuts) < 1:
return
# depending on first character being '^', either blacklist or whitelist
is_blacklist = verbosity_shortcuts[0] == '^'
if is_blacklist:
filters = verbosity_shortcuts[1:]
else: # whitelist
filters = verbosity_shortcuts[0:]
filt = ShortcutFilteringFilter(is_blacklist=is_blacklist, filters=filters)
# apply filter directly (and only!) on stderr handler
# note that applying on one of the root loggers directly would not work,
# see https://docs.python.org/3/howto/logging.html#logging-flow
handler.addFilter(filt)
class ShortcutInjectingFilter(logging.Filter):
def __init__(self, *, shortcut: Optional[str]):
super().__init__()
self.__shortcut = shortcut
def filter(self, record):
record.custom_shortcut = self.__shortcut
return True
class ShortcutFilteringFilter(logging.Filter):
def __init__(self, *, is_blacklist: bool, filters: str):
super().__init__()
self.__is_blacklist = is_blacklist
self.__filters = filters
def filter(self, record):
# all errors are let through
if record.levelno >= logging.ERROR:
return True
# the logging module itself is let through
if record.name == __name__:
return True
# do filtering
shortcut = getattr(record, 'custom_shortcut', None)
if self.__is_blacklist:
if shortcut is None:
return True
if shortcut in self.__filters:
return False
return True
else: # whitelist
if shortcut is None:
return False
if shortcut in self.__filters:
return True
return False
# enable logs universally (including for other libraries)
root_logger = logging.getLogger()
root_logger.setLevel(logging.WARNING)
# Start collecting log messages now, into an in-memory buffer. This buffer is only
# used until the proper log handlers are fully configured, including their verbosity,
# at which point we will dump its contents into those, and remove this log handler.
# Note: this is set up at import-time instead of e.g. as part of a function that is
# called from run_electrumsys (the main script). This is to have this run as early
# as possible.
# Note: some users might use ElectrumSys as a python library and not use run_electrumsys,
# in which case these logs might never get redirected or cleaned up.
# Also, the python docs recommend libraries not to set a handler, to
# avoid interfering with the user's logging.
_inmemory_startup_logs = None
if getattr(sys, "_ELECTRUMSYS_RUNNING_VIA_RUNELECTRUMSYS", False):
_inmemory_startup_logs = TruncatingMemoryHandler()
root_logger.addHandler(_inmemory_startup_logs)
# creates a logger specifically for electrumsys library
electrumsys_logger = logging.getLogger("electrumsys")
electrumsys_logger.setLevel(logging.DEBUG)
# --- External API
def get_logger(name: str) -> logging.Logger:
if name.startswith("electrumsys."):
name = name[9:]
return electrumsys_logger.getChild(name)
_logger = get_logger(__name__)
_logger.setLevel(logging.INFO)
class Logger:
# Single character short "name" for this class.
# Can be used for filtering log lines. Does not need to be unique.
LOGGING_SHORTCUT = None # type: Optional[str]
def __init__(self):
self.logger = self.__get_logger_for_obj()
def __get_logger_for_obj(self) -> logging.Logger:
cls = self.__class__
if cls.__module__:
name = f"{cls.__module__}.{cls.__name__}"
else:
name = cls.__name__
try:
diag_name = self.diagnostic_name()
except Exception as e:
raise Exception("diagnostic name not yet available?") from e
if diag_name:
name += f".[{diag_name}]"
logger = get_logger(name)
if self.LOGGING_SHORTCUT:
logger.addFilter(ShortcutInjectingFilter(shortcut=self.LOGGING_SHORTCUT))
return logger
def diagnostic_name(self):
return ''
def configure_logging(config):
verbosity = config.get('verbosity')
verbosity_shortcuts = config.get('verbosity_shortcuts')
_configure_stderr_logging(verbosity=verbosity, verbosity_shortcuts=verbosity_shortcuts)
log_to_file = config.get('log_to_file', False)
is_android = 'ANDROID_DATA' in os.environ
if is_android:
from jnius import autoclass
build_config = autoclass("org.electrumsys.electrumsys.BuildConfig")
log_to_file |= bool(build_config.DEBUG)
if log_to_file:
log_directory = pathlib.Path(config.path) / "logs"
_configure_file_logging(log_directory)
# clean up and delete in-memory logs
global _inmemory_startup_logs
if _inmemory_startup_logs:
num_discarded = _inmemory_startup_logs.num_messages_seen - _inmemory_startup_logs.max_size
if num_discarded > 0:
_logger.warning(f"Too many log messages! Some have been discarded. "
f"(discarded {num_discarded} messages)")
_inmemory_startup_logs.close()
root_logger.removeHandler(_inmemory_startup_logs)
_inmemory_startup_logs = None
# if using kivy, avoid kivy's own logs to get printed twice
logging.getLogger('kivy').propagate = False
from . import ELECTRUMSYS_VERSION
from .constants import GIT_REPO_URL
_logger.info(f"ElectrumSys version: {ELECTRUMSYS_VERSION} - https://electrum.syscoin.org - {GIT_REPO_URL}")
_logger.info(f"Python version: {sys.version}. On platform: {describe_os_version()}")
_logger.info(f"Logging to file: {str(_logfile_path)}")
_logger.info(f"Log filters: verbosity {repr(verbosity)}, verbosity_shortcuts {repr(verbosity_shortcuts)}")
def get_logfile_path() -> Optional[pathlib.Path]:
return _logfile_path
def describe_os_version() -> str:
if 'ANDROID_DATA' in os.environ:
from kivy import utils
if utils.platform != "android":
return utils.platform
import jnius
bv = jnius.autoclass('android.os.Build$VERSION')
b = jnius.autoclass('android.os.Build')
return "Android {} on {} {} ({})".format(bv.RELEASE, b.BRAND, b.DEVICE, b.DISPLAY)
else:
return platform.platform()
def get_git_version() -> Optional[str]:
dir = os.path.dirname(os.path.realpath(__file__))
try:
version = subprocess.check_output(
['git', 'describe', '--always', '--dirty'], cwd=dir)
version = str(version, "utf8").strip()
except Exception:
version = None
return version
| 36.228495 | 111 | 0.67886 |
81cb076d10538c0bf28f0d2018d6cddf43d7774b | 3,719 | py | Python | src/dbxdeploy/package/PackageInstaller.py | daipe-ai/dbx-deploy | 6fee3578de0189afa45345c930823558f240bb4e | [
"MIT"
] | null | null | null | src/dbxdeploy/package/PackageInstaller.py | daipe-ai/dbx-deploy | 6fee3578de0189afa45345c930823558f240bb4e | [
"MIT"
] | 7 | 2021-04-30T07:20:15.000Z | 2022-01-03T10:21:52.000Z | src/dbxdeploy/package/PackageInstaller.py | daipe-ai/dbx-deploy | 6fee3578de0189afa45345c930823558f240bb4e | [
"MIT"
] | 1 | 2021-12-16T09:06:45.000Z | 2021-12-16T09:06:45.000Z | from typing import List
from urllib.parse import urlparse
from dbxdeploy.package.PackageIndexResolver import PackageIndexResolver
class PackageInstaller:
def __init__(
self,
offline_install: bool,
package_index_resolver: PackageIndexResolver,
):
self.__offline_install = offline_install
self.__package_index_resolver = package_index_resolver
def get_package_install_command(self, package_file_path: str, dependencies_dir_path: str):
if self.__offline_install:
return self.__get_offline_install_command(package_file_path, dependencies_dir_path)
return self.__get_online_install_command(package_file_path)
def is_package_install_command(self, command_code: str):
return command_code.startswith("# %install_master_package_whl")
def __modify_dbfs(self, path: str):
return "/dbfs/" + path.lstrip("dbfs:/")
def __get_install_command(self, package_file_path: str, options: List[str]):
pip_options = " ".join(options)
return (
"# %install_master_package_whl\n"
"import os\n"
"import IPython\n\n"
'if "DAIPE_BOOTSTRAPPED" not in os.environ:\n'
" IPython.get_ipython().run_line_magic"
f'("pip", f"install {self.__modify_dbfs(package_file_path)} {pip_options}") # noqa'
)
def __get_online_install_command(self, package_file_path: str):
options = []
if self.__package_index_resolver.has_default_index():
options.append(f"{self.__get_index_url_part()}")
if self.__package_index_resolver.has_secondary_indexes():
options.append(f"{self.__get_extra_index_url_part()}")
install_command = self.__get_install_command(package_file_path, options)
return install_command
def __get_offline_install_command(self, package_file_path: str, dependencies_dir_path: str):
options = ["--no-index", f"--find-links {self.__modify_dbfs(dependencies_dir_path)}"]
install_command = self.__get_install_command(package_file_path, options)
return install_command
def __get_index_url_part(self):
default_index = self.__package_index_resolver.get_default_index()
default_index_url = default_index["url"]
username_env_var_name = f'DATABRICKS_HTTP_BASIC_{default_index["name"]}_USERNAME'.upper()
password_env_var_name = f'DATABRICKS_HTTP_BASIC_{default_index["name"]}_PASSWORD'.upper()
parsed_url = urlparse(default_index_url)
if "@" not in parsed_url.netloc:
parsed_url = parsed_url._replace(
netloc=f"{{os.getenv('{username_env_var_name}')}}:{{os.getenv('{password_env_var_name}')}}@{parsed_url.netloc}"
)
return f"--index-url {parsed_url.geturl()}"
def __get_extra_index_url_part(self):
extra_indexes = self.__package_index_resolver.get_secondary_indexes()
extra_indexes_to_return = []
for extra_index in extra_indexes:
extra_index_url = extra_index["url"]
username_env_var_name = f'DATABRICKS_HTTP_BASIC_{extra_index["name"]}_USERNAME'.upper()
password_env_var_name = f'DATABRICKS_HTTP_BASIC_{extra_index["name"]}_PASSWORD'.upper()
parsed_url = urlparse(extra_index_url)
if "@" not in parsed_url.netloc:
parsed_url = parsed_url._replace(
netloc=f"{{os.getenv('{username_env_var_name}')}}:{{os.getenv('{password_env_var_name}')}}@{parsed_url.netloc}"
)
extra_indexes_to_return.append(f"--extra-index-url {parsed_url.geturl()}")
return " ".join(extra_indexes_to_return)
| 40.423913 | 131 | 0.686206 |
c7c389be9e7b219ee3e9e2d88c4773257b39419e | 2,248 | py | Python | setup.py | wingify/vwo-sdk-log-messages | 6f403babf0bb9e21d076d70df9973c9a9712de24 | [
"Apache-2.0"
] | null | null | null | setup.py | wingify/vwo-sdk-log-messages | 6f403babf0bb9e21d076d70df9973c9a9712de24 | [
"Apache-2.0"
] | null | null | null | setup.py | wingify/vwo-sdk-log-messages | 6f403babf0bb9e21d076d70df9973c9a9712de24 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Wingify Software Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
import os
from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools import Command
# import subprocess
# current_directory = os.path.join(os.path.dirname(__file__))
# with open(os.path.join(current_directory, "requirements.txt")) as f:
# REQUIREMENTS = f.read().splitlines()
with open("README.md", "r") as f:
long_description = f.read()
class PostDevelopCommand(develop):
"""Post-installation for development mode."""
def run(self):
print("\nRUNNING POST INSTALL DEVELOP SCRIPT \n")
develop.run(self)
setup(
name="vwo-sdk-log-messages",
version="0.5.0",
description="Log messages for VWO server-side SDKs",
long_description=long_description,
long_description_content_type="text/markdown",
author="VWO",
author_email="dev@wingify.com",
url="https://github.com/wingify/vwo-sdk-log-messages",
license="Apache License 2.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
cmdclass={
"develop": PostDevelopCommand
},
packages=find_packages(exclude=["tests"])
install_requires=[],
)
| 30.794521 | 74 | 0.684164 |
07e906affd40c12b6e5d468a99e69b10ccde046c | 4,091 | py | Python | packages/python/chart-studio/chart_studio/api/v2/plots.py | sgn/plotly.py | 587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | packages/python/chart-studio/chart_studio/api/v2/plots.py | sgn/plotly.py | 587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6 | [
"MIT"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | packages/python/chart-studio/chart_studio/api/v2/plots.py | sgn/plotly.py | 587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6 | [
"MIT"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | """Interface to Plotly's /v2/plots endpoints."""
from __future__ import absolute_import
from chart_studio.api.v2.utils import build_url, make_params, request
RESOURCE = "plots"
def create(body):
"""
Create a new plot.
:param (dict) body: A mapping of body param names to values.
:returns: (requests.Response) Returns response directly from requests.
"""
url = build_url(RESOURCE)
return request("post", url, json=body)
def retrieve(fid, share_key=None):
"""
Retrieve a plot from Plotly.
:param (str) fid: The `{username}:{idlocal}` identifier. E.g. `foo:88`.
:param (str) share_key: The secret key granting 'read' access if private.
:returns: (requests.Response) Returns response directly from requests.
"""
url = build_url(RESOURCE, id=fid)
params = make_params(share_key=share_key)
return request("get", url, params=params)
def content(fid, share_key=None, inline_data=None, map_data=None):
"""
Retrieve the *figure* for a Plotly plot file.
:param (str) fid: The `{username}:{idlocal}` identifier. E.g. `foo:88`.
:param (str) share_key: The secret key granting 'read' access if private.
:param (bool) inline_data: If True, include the data arrays with the plot.
:param (str) map_data: Currently only accepts 'unreadable' to return a
mapping of grid-fid: grid. This is useful if you
want to maintain structure between the plot and
referenced grids when you have READ access to the
plot, but you don't have READ access to the
underlying grids.
:returns: (requests.Response) Returns response directly from requests.
"""
url = build_url(RESOURCE, id=fid, route="content")
params = make_params(
share_key=share_key, inline_data=inline_data, map_data=map_data
)
return request("get", url, params=params)
def update(fid, body):
"""
Update a plot from Plotly.
:param (str) fid: The `{username}:{idlocal}` identifier. E.g. `foo:88`.
:param (dict) body: A mapping of body param names to values.
:returns: (requests.Response) Returns response directly from requests.
"""
url = build_url(RESOURCE, id=fid)
return request("put", url, json=body)
def trash(fid):
"""
Soft-delete a plot from Plotly. (Can be undone with 'restore').
:param (str) fid: The `{username}:{idlocal}` identifier. E.g. `foo:88`.
:returns: (requests.Response) Returns response directly from requests.
"""
url = build_url(RESOURCE, id=fid, route="trash")
return request("post", url)
def restore(fid):
"""
Restore a trashed plot from Plotly. See 'trash'.
:param (str) fid: The `{username}:{idlocal}` identifier. E.g. `foo:88`.
:returns: (requests.Response) Returns response directly from requests.
"""
url = build_url(RESOURCE, id=fid, route="restore")
return request("post", url)
def permanent_delete(fid, params=None):
"""
Permanently delete a trashed plot file from Plotly. See 'trash'.
:param (str) fid: The `{username}:{idlocal}` identifier. E.g. `foo:88`.
:returns: (requests.Response) Returns response directly from requests.
"""
url = build_url(RESOURCE, id=fid, route="permanent_delete")
return request("delete", url, params=params)
def lookup(path, parent=None, user=None, exists=None):
"""
Retrieve a plot file from Plotly without needing a fid.
:param (str) path: The '/'-delimited path specifying the file location.
:param (int) parent: Parent id, an integer, which the path is relative to.
:param (str) user: The username to target files for. Defaults to requestor.
:param (bool) exists: If True, don't return the full file, just a flag.
:returns: (requests.Response) Returns response directly from requests.
"""
url = build_url(RESOURCE, route="lookup")
params = make_params(path=path, parent=parent, user=user, exists=exists)
return request("get", url, params=params)
| 33.809917 | 79 | 0.659985 |
65371165565f5e068b9549cb5d18ebd16a8ac325 | 1,293 | py | Python | config.py | dysfunctionals/sdi-display | cd7ce012de73034752a4efb70298200dd6d42a39 | [
"MIT"
] | null | null | null | config.py | dysfunctionals/sdi-display | cd7ce012de73034752a4efb70298200dd6d42a39 | [
"MIT"
] | 1 | 2020-02-23T11:57:04.000Z | 2020-03-09T11:11:42.000Z | config.py | dysfunctionals/sdi-display | cd7ce012de73034752a4efb70298200dd6d42a39 | [
"MIT"
] | 1 | 2020-02-22T23:58:22.000Z | 2020-02-22T23:58:22.000Z | config = {
"ships": [
{
"img": "ship1_1.png",
"detail_img": "ship1_1.png",
"init_pos": (570, 360),
"init_bearing": 45,
"detail_pos": (1920 - 288 + 18 + 14, 18 + 20),
"controls": [115, 97, 119, 100],
"colour" : "Blue",
},
{
"img": "ship2_1.png",
"detail_img": "ship2_1.png",
"init_pos": (1150, 360),
"init_bearing": 135,
"detail_pos": (1920 - 288 + 18 + 14, 288 + 20),
"controls": [116, 102, 103, 104],
"colour" : "Pink",
},
{
"img": "ship3_1.png",
"detail_img": "ship3_1.png",
"init_pos": (570, 720),
"init_bearing": 315,
"detail_pos": (1920 - 288 + 18 + 14, 558 + 20),
"controls": [105, 106, 107, 108],
"colour" : "Yellow",
},
{
"img": "ship4_1.png",
"detail_img": "ship4_1.png",
"init_pos": (1150, 720),
"init_bearing": 45,
"detail_pos": (1920 - 288 + 18 + 14, 828 + 7),
"controls": [118, 98, 110, 109],
"colour" : "Green",
},
],
"server": "http://35.234.154.91:8080/metrics",
}
| 30.785714 | 59 | 0.396752 |
80520868e3b6dbdc4500a19ef0e7ca4c2c886c8d | 6,168 | py | Python | models/segnet.py | DLWK/CGRNet | a9a65fa192cc9888e7861755313b8b3ac80fa512 | [
"MIT"
] | 1 | 2022-03-29T06:32:34.000Z | 2022-03-29T06:32:34.000Z | models/segnet.py | DLWK/CGRNet | a9a65fa192cc9888e7861755313b8b3ac80fa512 | [
"MIT"
] | null | null | null | models/segnet.py | DLWK/CGRNet | a9a65fa192cc9888e7861755313b8b3ac80fa512 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
class SegNet(nn.Module):
def __init__(self,input_nbr,label_nbr):
super(SegNet, self).__init__()
batchNorm_momentum = 0.1
self.conv11 = nn.Conv2d(input_nbr, 64, kernel_size=3, padding=1)
self.bn11 = nn.BatchNorm2d(64, momentum= batchNorm_momentum)
self.conv12 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(64, momentum= batchNorm_momentum)
self.conv21 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(128, momentum= batchNorm_momentum)
self.conv22 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(128, momentum= batchNorm_momentum)
self.conv31 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.bn31 = nn.BatchNorm2d(256, momentum= batchNorm_momentum)
self.conv32 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn32 = nn.BatchNorm2d(256, momentum= batchNorm_momentum)
self.conv33 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn33 = nn.BatchNorm2d(256, momentum= batchNorm_momentum)
self.conv41 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.bn41 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)
self.conv42 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn42 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)
self.conv43 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn43 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)
self.conv51 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn51 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)
self.conv52 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn52 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)
self.conv53 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn53 = nn.BatchNorm2d(512, momentum= batchNorm_momentum)
self.conv53d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn53d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)
self.conv52d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn52d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)
self.conv51d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn51d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)
self.conv43d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn43d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)
self.conv42d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn42d = nn.BatchNorm2d(512, momentum= batchNorm_momentum)
self.conv41d = nn.Conv2d(512, 256, kernel_size=3, padding=1)
self.bn41d = nn.BatchNorm2d(256, momentum= batchNorm_momentum)
self.conv33d = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn33d = nn.BatchNorm2d(256, momentum= batchNorm_momentum)
self.conv32d = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn32d = nn.BatchNorm2d(256, momentum= batchNorm_momentum)
self.conv31d = nn.Conv2d(256, 128, kernel_size=3, padding=1)
self.bn31d = nn.BatchNorm2d(128, momentum= batchNorm_momentum)
self.conv22d = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn22d = nn.BatchNorm2d(128, momentum= batchNorm_momentum)
self.conv21d = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.bn21d = nn.BatchNorm2d(64, momentum= batchNorm_momentum)
self.conv12d = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn12d = nn.BatchNorm2d(64, momentum= batchNorm_momentum)
self.conv11d = nn.Conv2d(64, label_nbr, kernel_size=3, padding=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# Stage 1
x11 = F.relu(self.bn11(self.conv11(x)))
x12 = F.relu(self.bn12(self.conv12(x11)))
x1p, id1 = F.max_pool2d(x12,kernel_size=2, stride=1,return_indices=True)
# Stage 2
x21 = F.relu(self.bn21(self.conv21(x1p)))
x22 = F.relu(self.bn22(self.conv22(x21)))
x2p, id2 = F.max_pool2d(x22,kernel_size=2, stride=1,return_indices=True)
# Stage 3
x31 = F.relu(self.bn31(self.conv31(x2p)))
x32 = F.relu(self.bn32(self.conv32(x31)))
x33 = F.relu(self.bn33(self.conv33(x32)))
x3p, id3 = F.max_pool2d(x33,kernel_size=2, stride=1,return_indices=True)
# Stage 4
x41 = F.relu(self.bn41(self.conv41(x3p)))
x42 = F.relu(self.bn42(self.conv42(x41)))
x43 = F.relu(self.bn43(self.conv43(x42)))
x4p, id4 = F.max_pool2d(x43,kernel_size=2, stride=1,return_indices=True)
# Stage 5
x51 = F.relu(self.bn51(self.conv51(x4p)))
x52 = F.relu(self.bn52(self.conv52(x51)))
x53 = F.relu(self.bn53(self.conv53(x52)))
x5p, id5 = F.max_pool2d(x53,kernel_size=2, stride=1,return_indices=True)
# Stage 5d
x5d = F.max_unpool2d(x5p, id5, kernel_size=2, stride=1)
x53d = F.relu(self.bn53d(self.conv53d(x5d)))
x52d = F.relu(self.bn52d(self.conv52d(x53d)))
x51d = F.relu(self.bn51d(self.conv51d(x52d)))
# Stage 4d
x4d = F.max_unpool2d(x51d, id4, kernel_size=2, stride=1)
x43d = F.relu(self.bn43d(self.conv43d(x4d)))
x42d = F.relu(self.bn42d(self.conv42d(x43d)))
x41d = F.relu(self.bn41d(self.conv41d(x42d)))
# Stage 3d
x3d = F.max_unpool2d(x41d, id3, kernel_size=2, stride=1)
x33d = F.relu(self.bn33d(self.conv33d(x3d)))
x32d = F.relu(self.bn32d(self.conv32d(x33d)))
x31d = F.relu(self.bn31d(self.conv31d(x32d)))
# Stage 2d
x2d = F.max_unpool2d(x31d, id2, kernel_size=2, stride=1)
x22d = F.relu(self.bn22d(self.conv22d(x2d)))
x21d = F.relu(self.bn21d(self.conv21d(x22d)))
# Stage 1d
x1d = F.max_unpool2d(x21d, id1, kernel_size=2, stride=1)
x12d = F.relu(self.bn12d(self.conv12d(x1d)))
x11d = self.conv11d(x12d)
# x11d = self.sigmoid(x11d)
return x11d | 46.37594 | 80 | 0.649805 |
303e5546417460f56171f79300b57a4c2d2f611e | 2,724 | py | Python | alg_print_tasks.py | bowen0701/python-algorithms-data-structures | e625f59a9fc59e4728825078d4434a7968a724e5 | [
"BSD-2-Clause"
] | 8 | 2019-03-18T06:37:24.000Z | 2022-01-30T07:50:58.000Z | alg_print_tasks.py | bowen0701/python-algorithms-data-structures | e625f59a9fc59e4728825078d4434a7968a724e5 | [
"BSD-2-Clause"
] | null | null | null | alg_print_tasks.py | bowen0701/python-algorithms-data-structures | e625f59a9fc59e4728825078d4434a7968a724e5 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
from __future__ import division
import numpy as np
from ds_queue import Queue
class Printer(object):
"""A printer class."""
def __init__(self, pages_per_minute):
self.page_rate = pages_per_minute
self.current_task = None
self.time_remaining = 0
def tick(self):
if self.current_task is not None:
self.time_remaining = self.time_remaining - 1
if self.time_remaining == 0:
self.current_task = None
def busy(self):
if self.current_task is not None:
return True
else:
return False
def start_next(self, new_task):
self.current_task = new_task
self.time_remaining = (
new_task.get_pages() / (self.page_rate / 60))
class Task(object):
"""Task class."""
def __init__(self, time):
self.timestamp = time
self.pages = np.random.random_integers(1, 20)
def get_stamp(self):
return self.timestamp
def get_pages(self):
return self.pages
def wait_time(self, current_time):
return current_time - self.timestamp
def create_print_task():
"""Create printing task.
A printing task is generated per 180 seconds.
"""
num = np.random.random_integers(1, 180)
if num == 180:
return True
else:
return False
def simulate_print_tasks(num_seconds, pages_per_minute):
"""Simulate printing tasks."""
printer = Printer(pages_per_minute)
print_queue = Queue()
waiting_times = []
for current_second in xrange(num_seconds):
if create_print_task():
task = Task(current_second)
print_queue.enqueue(task)
if (not printer.busy()) and (not print_queue.is_empty()):
next_task = print_queue.dequeue()
waiting_times.append(
next_task.wait_time(current_second))
printer.start_next(next_task)
printer.tick()
avg_wait_seconds = sum(waiting_times) / len(waiting_times)
print('Average wait {0:.2f} secs, {1} tasks remaining.'
.format(avg_wait_seconds, print_queue.size()))
def main():
num_seconds = 3600
pages_per_minute = 5
print('num_seconds: {0}, pages_per_minute: {1}'
.format(num_seconds, pages_per_minute))
for i in xrange(10):
simulate_print_tasks(num_seconds, pages_per_minute)
print('===')
num_seconds = 3600
pages_per_minute = 10
print('num_seconds: {0}, pages_per_minute: {1}'
.format(num_seconds, pages_per_minute))
for i in xrange(10):
simulate_print_tasks(num_seconds, pages_per_minute)
if __name__ == '__main__':
main()
| 25.942857 | 65 | 0.634728 |
2383b27ddcd02dbb29faf84023cae0a73d883d8c | 807 | py | Python | manage.py | jonmsawyer/jsonbench | 0b51fa7568d7c638ab94fbd38955f3d9b899e63a | [
"MIT"
] | null | null | null | manage.py | jonmsawyer/jsonbench | 0b51fa7568d7c638ab94fbd38955f3d9b899e63a | [
"MIT"
] | null | null | null | manage.py | jonmsawyer/jsonbench | 0b51fa7568d7c638ab94fbd38955f3d9b899e63a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jsonbench.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.086957 | 77 | 0.643123 |
2ce635935e11178bd4ab2cc980bd81f28747a5b6 | 2,819 | py | Python | python/avi/sdk/samples/get_config_logs.py | thisisshi/sdk | 99c52caffeebbfd41f43931fea2b5b1323841892 | [
"Apache-2.0"
] | 37 | 2016-03-14T22:27:17.000Z | 2022-03-03T05:18:39.000Z | python/avi/sdk/samples/get_config_logs.py | thisisshi/sdk | 99c52caffeebbfd41f43931fea2b5b1323841892 | [
"Apache-2.0"
] | 195 | 2016-03-14T23:47:55.000Z | 2021-05-12T11:28:56.000Z | python/avi/sdk/samples/get_config_logs.py | thisisshi/sdk | 99c52caffeebbfd41f43931fea2b5b1323841892 | [
"Apache-2.0"
] | 50 | 2016-03-14T05:52:14.000Z | 2022-01-06T06:12:00.000Z | import requests
import argparse
import time
from avi.sdk.avi_api import ApiSession
def get_config_logs_for_vs(api, vs, only_config=True):
url = ('analytics/logs?type=2&filter=co(all,%s)&'
'page_size=10000' % vs)
if only_config:
url += ('&filter=eq(event_id,[config_create,config_update,'
'config_delete,config_action])')
start = time.time()
resp = {"count": -1}
total_retries = 50
retry = 0
while url:
try:
resp = api.get(url).json()
if "more" in resp and resp["more"]:
print ("Need to get more: Curr Count %s Remaining: %s%%" % (
resp["count"], resp["percent_remaining"]))
url = resp["more"].split("api/")[1]
else:
break
except Exception as e:
if "Search system is down" in str(e):
print ("Search system is down; wait 2 seconds and retry")
time.sleep(5.0)
else:
print("Failed in getting: %s;"
" will continue to retry")
time.sleep(1.0)
print("url: %s" % url)
if retry == total_retries:
break
else:
retry += 1
time_taken = time.time() - start
print ("Time taken to fetch config logs for VS %s: %ss" % (vs, time_taken))
with open("config_logs_%s" % vs, 'w') as f:
f.write("%s" % resp)
return
def get_config_logs_for_all_vses(api):
all_vses = api.get("virtualservice?page_size=1000").json()["results"]
vs_ids = [vs["uuid"] for vs in all_vses]
for vs in vs_ids:
print ("Working on VS %s" % vs)
get_config_logs_for_vs(api, vs)
def main(args):
api = ApiSession.get_session(args.controller, args.username, args.password,
tenant="*")
if args.vs:
get_config_logs_for_vs(api, args.vs)
else:
get_config_logs_for_all_vses(api)
if __name__ == "__main__":
requests.packages.urllib3.disable_warnings()
parser = argparse.ArgumentParser(
description="Script to get the config logs from Avi controller")
parser.add_argument("-u", "--username", required=True,
help="Login username")
parser.add_argument("-p", "--password", required=True,
help="Login password")
parser.add_argument("-c", "--controller", required=True,
help="Controller IP address")
parser.add_argument("-v", "--vs", required=False,
help="VirtualService uuid for which config logs are"
" needed. If not specified, config logs for all "
"VSes are fetched.")
args = parser.parse_args()
main(args)
| 35.2375 | 79 | 0.55055 |
b8fe1990c4e7e3f7df041cae3512702dbeb544ff | 395 | py | Python | python-icmperror/icmperror/setup.py | krihal/pscheduler | e69e0357797d88d290c78b92b1d99048e73a63e8 | [
"Apache-2.0"
] | 47 | 2016-09-28T14:19:10.000Z | 2022-03-21T13:26:47.000Z | python-icmperror/icmperror/setup.py | krihal/pscheduler | e69e0357797d88d290c78b92b1d99048e73a63e8 | [
"Apache-2.0"
] | 993 | 2016-07-07T19:30:32.000Z | 2022-03-21T10:25:52.000Z | python-icmperror/icmperror/setup.py | mfeit-internet2/pscheduler-dev | d2cd4065a6fce88628b0ca63edc7a69f2672dad2 | [
"Apache-2.0"
] | 36 | 2016-09-15T09:39:45.000Z | 2021-06-23T15:05:13.000Z | #!/usr/bin/env python3
from distutils.core import setup
setup(name='icmperror',
version='0.1',
description='Functions for translating ICMP error codes to enumerated values',
url='http://www.perfsonar.net',
author='The perfSONAR Development Team',
author_email='perfsonar-developer@perfsonar.net',
license='Apache 2.0',
packages=['icmperror'],
)
| 28.214286 | 84 | 0.673418 |
18dbf6ec3babfe896687d15cfe88a5da28a3c3c2 | 26,441 | py | Python | lib/modeling/semseg_headsnvidia-smi.py | zhanwj/multi-task-pytorch | 7d57645ec8be0ca0c258cfa99fb788e3cd37f106 | [
"MIT"
] | 2 | 2019-06-11T16:16:11.000Z | 2020-07-21T10:34:40.000Z | lib/modeling/semseg_headsnvidia-smi.py | zhanwj/multi-task-pytorch | 7d57645ec8be0ca0c258cfa99fb788e3cd37f106 | [
"MIT"
] | null | null | null | lib/modeling/semseg_headsnvidia-smi.py | zhanwj/multi-task-pytorch | 7d57645ec8be0ca0c258cfa99fb788e3cd37f106 | [
"MIT"
] | 2 | 2019-05-21T11:07:29.000Z | 2019-06-11T16:17:02.000Z | import torch
import torch.nn as nn
import torchvision
from . import resnet as resnet
from core.config import cfg
from lib.nn import SynchronizedBatchNorm2d
import modeling.fcn8s as fcn
import modeling.spn as spn
class SegmentationModuleBase(nn.Module):
def __init__(self):
super(SegmentationModuleBase, self).__init__()
def pixel_acc(self, pred, label):
_, preds = torch.max(pred, dim=1)
valid = (label >= 0).long()
acc_sum = torch.sum(valid * (preds == label).long())
pixel_sum = torch.sum(valid)
acc = acc_sum.float() / (pixel_sum.float() + 1e-10)
return acc
class SegmentationModule(SegmentationModuleBase):
def __init__(self, net_enc, net_dec, crit, deep_sup_scale=None):
super(SegmentationModule, self).__init__()
self.encoder = net_enc
self.decoder = net_dec
self.crit = crit
self.deep_sup_scale = deep_sup_scale
def forward(self, feed_dict, *, segSize=None):
if segSize is None: # training
if self.deep_sup_scale is not None: # use deep supervision technique
(pred, pred_deepsup) = self.decoder(self.encoder(feed_dict['data'], return_feature_maps=True))
else:
pred = self.decoder(self.encoder(feed_dict['data'], return_feature_maps=True))
loss = self.crit(pred, feed_dict[cfg.SEM.OUTPUT_PRIFEX+'_0'])
if self.deep_sup_scale is not None:
for i in range(2, len(cfg.SEM.DOWNSAMPLE)):
loss_deepsup = self.crit(pred_deepsup,
feed_dict['{}_{}'.format(cfg.SEM.OUTPUT_PRIFEX, i)])
loss = loss + loss_deepsup * self.deep_sup_scale[i]
acc = self.pixel_acc(pred, feed_dict[cfg.SEM.OUTPUT_PRIFEX+'_0'])
return loss, acc
else: # inference
pred = self.decoder(self.encoder(feed_dict['data'], return_feature_maps=True), segSize=segSize)
return pred
def conv3x3(in_planes, out_planes, stride=1, has_bias=False):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=has_bias)
def conv3x3_bn_relu(in_planes, out_planes, stride=1):
return nn.Sequential(
conv3x3(in_planes, out_planes, stride),
SynchronizedBatchNorm2d(out_planes),
nn.ReLU(inplace=True),
)
class ModelBuilder():
# custom weights initialization
def weights_init(self, m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight.data)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.)
m.bias.data.fill_(1e-4)
#elif classname.find('Linear') != -1:
# m.weight.data.normal_(0.0, 0.0001)
def build_encoder(self, arch='resnet50_dilated8', fc_dim=512, weights=''):
pretrained = True if len(weights) == 0 else False
if arch == 'resnet18':
orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet18_dilated8':
orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=8)
elif arch == 'resnet18_dilated16':
orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=16)
elif arch == 'resnet34':
raise NotImplementedError
orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet34_dilated8':
raise NotImplementedError
orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=8)
elif arch == 'resnet34_dilated16':
raise NotImplementedError
orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=16)
elif arch == 'resnet50':
orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet50_dilated8':
orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=8)
elif arch == 'resnet50_dilated16':
orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=16)
elif arch == 'resnet101':
orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet101_dilated8':
orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=8)
elif arch == 'resnet101_dilated16':
orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=16)
elif arch == 'resnext101':
orig_resnext = resnext.__dict__['resnext101'](pretrained=pretrained)
net_encoder = Resnet(orig_resnext) # we can still use class Resnet
elif arch == 'fcn8s':
net_encoder=fcn.FCN8s()
else:
raise Exception('Architecture undefined!')
# net_encoder.apply(self.weights_init)
if len(weights) > 0:
print('Loading weights for net_encoder')
net_encoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_encoder
def build_decoder(self, arch='ppm_bilinear_deepsup',
fc_dim=512, num_class=150,
weights='', use_softmax=False):
if arch == 'c1_bilinear_deepsup':
net_decoder = C1BilinearDeepSup(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'c1_bilinear':
net_decoder = C1Bilinear(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'ppm_bilinear':
net_decoder = PPMBilinear(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'ppm_bilinear_deepsup':
net_decoder = PPMBilinearDeepsup(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'ppm_bilinear_3Ddeepsup':
net_decoder = PPMBilinear3DDeepsup(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'upernet_lite':
net_decoder = UPerNet(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=256)
elif arch == 'upernet':
net_decoder = UPerNet(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=512)
elif arch == 'upernet_tmp':
net_decoder = UPerNetTmp(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=512)
elif arch == 'spn':
net_decoder = spn.SPN()
else:
raise Exception('Architecture undefined!')
net_decoder.apply(self.weights_init)
if len(weights) > 0:
print('Loading weights for net_decoder')
net_decoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_decoder
class Resnet(nn.Module):
def __init__(self, orig_resnet):
super(Resnet, self).__init__()
# take pretrained resnet, except AvgPool and FC
self.conv1 = orig_resnet.conv1
self.bn1 = orig_resnet.bn1
self.relu1 = orig_resnet.relu1
self.conv2 = orig_resnet.conv2
self.bn2 = orig_resnet.bn2
self.relu2 = orig_resnet.relu2
self.conv3 = orig_resnet.conv3
self.bn3 = orig_resnet.bn3
self.relu3 = orig_resnet.relu3
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
def forward(self, x, return_feature_maps=False):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x); conv_out.append(x);
x = self.layer2(x); conv_out.append(x);
x = self.layer3(x); conv_out.append(x);
x = self.layer4(x); conv_out.append(x);
if return_feature_maps:
return conv_out
return [x]
class ResnetDilated(nn.Module):
def __init__(self, orig_resnet, dilate_scale=8):
super(ResnetDilated, self).__init__()
from functools import partial
if dilate_scale == 8:
orig_resnet.layer3.apply(
partial(self._nostride_dilate, dilate=2))
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=4))
elif dilate_scale == 16:
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=2))
# take pretrained resnet, except AvgPool and FC
self.conv1 = orig_resnet.conv1
self.bn1 = orig_resnet.bn1
self.relu1 = orig_resnet.relu1
self.conv2 = orig_resnet.conv2
self.bn2 = orig_resnet.bn2
self.relu2 = orig_resnet.relu2
self.conv3 = orig_resnet.conv3
self.bn3 = orig_resnet.bn3
self.relu3 = orig_resnet.relu3
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
def _nostride_dilate(self, m, dilate):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
# the convolution with stride
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate//2, dilate//2)
m.padding = (dilate//2, dilate//2)
# other convoluions
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def forward(self, x, return_feature_maps=False):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x); conv_out.append(x);
x = self.layer2(x); conv_out.append(x);
x = self.layer3(x); conv_out.append(x);
x = self.layer4(x); conv_out.append(x);
if return_feature_maps:
return conv_out
return [x]
# last conv, bilinear upsample
class C1BilinearDeepSup(nn.Module):
def __init__(self, num_class=150, fc_dim=2048, use_softmax=False):
super(C1BilinearDeepSup, self).__init__()
self.use_softmax = use_softmax
self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
# last conv
self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
self.conv_last_last = nn.Conv2d(num_class*2,cfg.SEM.SD_DIM,3,1,1)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
x = self.cbr(conv5)
x = self.conv_last(x)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
# deep sup
conv4 = conv_out[-2]
_ = self.cbr_deepsup(conv4)
_ = self.conv_last_deepsup(_)
x = nn.functional.log_softmax(x, dim=1)
_ = nn.functional.log_softmax(_, dim=1)
return self.conv_last_last(torch.cat((x,_),dim=1))
# last conv, bilinear upsample
class C1Bilinear(nn.Module):
def __init__(self, num_class=150, fc_dim=2048, use_softmax=False):
super(C1Bilinear, self).__init__()
self.use_softmax = use_softmax
self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
# last conv
self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
x = self.cbr(conv5)
x = self.conv_last(x)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
else:
x = nn.functional.log_softmax(x, dim=1)
return x
# pyramid pooling, bilinear upsample
class PPMBilinear(nn.Module):
def __init__(self, num_class=150, fc_dim=4096,
use_softmax=False, pool_scales=(1, 2, 3, 6)):
super(PPMBilinear, self).__init__()
self.use_softmax = use_softmax
self.ppm = []
for scale in pool_scales:
self.ppm.append(nn.Sequential(
nn.AdaptiveAvgPool2d(scale),
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm = nn.ModuleList(self.ppm)
self.conv_last = nn.Sequential(
nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
kernel_size=3, padding=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(512, num_class, kernel_size=1)
)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale in self.ppm:
ppm_out.append(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False))
ppm_out = torch.cat(ppm_out, 1)
x=self.conv_last(ppm_out)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
#else:
# x = nn.functional.log_softmax(x, dim=1)
return x
# pyramid pooling, bilinear upsample
class PPMBilinearDeepsup(nn.Module):
def __init__(self, num_class=150, fc_dim=1024,
use_softmax=False, pool_scales=(1, 2, 3, 6)):
super(PPMBilinearDeepsup, self).__init__()
self.use_softmax = use_softmax
self.ppm = []
for scale in pool_scales:
self.ppm.append(nn.Sequential(
nn.AdaptiveAvgPool2d(scale),
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm = nn.ModuleList(self.ppm)
self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
self.conv_last = nn.Sequential(
nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
kernel_size=3, padding=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(512, num_class, kernel_size=1)
)
self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
self.dropout_deepsup = nn.Dropout2d(0.1)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale in self.ppm:
ppm_out.append(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False))
ppm_out = torch.cat(ppm_out, 1)
x = self.conv_last(ppm_out)
if self.use_softmax : # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
# deep sup
conv4 = conv_out[-2]
_ = self.cbr_deepsup(conv4)
_ = self.dropout_deepsup(_)
_ = self.conv_last_deepsup(_)
#x = nn.functional.log_softmax(x, dim=1)
_ = nn.functional.log_softmax(_, dim=1)
return (x, _)
class PPMBilinear3DDeepsup(nn.Module):
def __init__(self, num_class=150, fc_dim=1024,
use_softmax=False, pool_scales=(1, 2, 3, 6)):
super(PPMBilinear3DDeepsup, self).__init__()
self.use_softmax = use_softmax
self.ppm = []
for scale in pool_scales:
self.ppm.append(nn.Sequential(
nn.AdaptiveAvgPool2d(scale),
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm = nn.ModuleList(self.ppm)
self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
self.conv_last = nn.Sequential(
nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
kernel_size=3, padding=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(512, num_class, kernel_size=1)
)
self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
self.dropout_deepsup = nn.Dropout2d(0.1)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale in self.ppm:
ppm_out.append(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False))
ppm_out = torch.cat(ppm_out, 1)
x = self.conv_last(ppm_out)
# deep sup
conv4 = conv_out[-2]
_ = self.cbr_deepsup(conv4)
_ = self.dropout_deepsup(_)
_ = self.conv_last_deepsup(_)
pred_features = torch.cat((x,_),dim=1)
pred_semseg, _ = torch.split(x, cfg.TRAIN.IMS_PER_BATCH, dim=0)
return pred_features, pred_semseg
# upernet
class UPerNet(nn.Module):
def __init__(self, num_class=19, fc_dim=2048,
use_softmax=False, pool_scales=(1, 2, 3, 6),
fpn_inplanes=(256,512,1024,2048), fpn_dim=256):
super(UPerNet, self).__init__()
self.use_softmax = use_softmax
# PPM Module
self.ppm_pooling = []
self.ppm_conv = []
for scale in pool_scales:
self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
self.ppm_conv.append(nn.Sequential(
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
self.ppm_conv = nn.ModuleList(self.ppm_conv)
self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales)*512, fpn_dim, 1)
# FPN Module
self.fpn_in = []
for fpn_inplane in fpn_inplanes[:-1]: # skip the top layer
self.fpn_in.append(nn.Sequential(
nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(fpn_dim),
nn.ReLU(inplace=True)
))
self.fpn_in = nn.ModuleList(self.fpn_in)
self.fpn_out = []
for i in range(len(fpn_inplanes) - 1): # skip the top layer
self.fpn_out.append(nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
))
self.fpn_out = nn.ModuleList(self.fpn_out)
self.conv_last = nn.Sequential(
conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1),
nn.Conv2d(fpn_dim, num_class, kernel_size=1)
)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv):
ppm_out.append(pool_conv(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False)))
ppm_out = torch.cat(ppm_out, 1)
f = self.ppm_last_conv(ppm_out)
fpn_feature_list = [f]
for i in reversed(range(len(conv_out) - 1)):
conv_x = conv_out[i]
conv_x = self.fpn_in[i](conv_x) # lateral branch
f = nn.functional.interpolate(
f, size=conv_x.size()[2:], mode='bilinear', align_corners=False) # top-down branch
f = conv_x + f
fpn_feature_list.append(self.fpn_out[i](f))
fpn_feature_list.reverse() # [P2 - P5]
output_size = fpn_feature_list[0].size()[2:]
fusion_list = [fpn_feature_list[0]]
for i in range(1, len(fpn_feature_list)):
fusion_list.append(nn.functional.interpolate(
fpn_feature_list[i],
output_size,
mode='bilinear', align_corners=False))
fusion_out = torch.cat(fusion_list, 1)
x = self.conv_last(fusion_out)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
x = nn.functional.log_softmax(x, dim=1)
return x
# cspn upernet
class UPerNetCspn(nn.Module):
def __init__(self, num_class=150, fc_dim=4096,
use_softmax=False, pool_scales=(1, 2, 3, 6),
fpn_inplanes=(256,512,1024,2048), fpn_dim=256):
super(UPerNet, self).__init__()
self.use_softmax = use_softmax
# PPM Module
self.ppm_pooling = []
self.ppm_conv = []
for scale in pool_scales:
self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
self.ppm_conv.append(nn.Sequential(
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
self.ppm_conv = nn.ModuleList(self.ppm_conv)
self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales)*512, fpn_dim, 1)
# FPN Module
self.fpn_in = []
for fpn_inplane in fpn_inplanes[:-1]: # skip the top layer
self.fpn_in.append(nn.Sequential(
nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(fpn_dim),
nn.ReLU(inplace=True)
))
self.fpn_in = nn.ModuleList(self.fpn_in)
self.fpn_out = []
for i in range(len(fpn_inplanes) - 1): # skip the top layer
self.fpn_out.append(nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
))
self.fpn_out = nn.ModuleList(self.fpn_out)
self.conv_last = nn.Sequential(
conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1),
nn.Conv2d(fpn_dim, num_class, kernel_size=1)
)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv):
ppm_out.append(pool_conv(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False)))
ppm_out = torch.cat(ppm_out, 1)
f = self.ppm_last_conv(ppm_out)
fpn_feature_list = [f]
for i in reversed(range(len(conv_out) - 1)):
conv_x = conv_out[i]
conv_x = self.fpn_in[i](conv_x) # lateral branch
f = nn.functional.interpolate(
f, size=conv_x.size()[2:], mode='bilinear', align_corners=False) # top-down branch
f = conv_x + f
fpn_feature_list.append(self.fpn_out[i](f))
fpn_feature_list.reverse() # [P2 - P5]
output_size = fpn_feature_list[0].size()[2:]
fusion_list = [fpn_feature_list[0]]
for i in range(1, len(fpn_feature_list)):
fusion_list.append(nn.functional.interpolate(
fpn_feature_list[i],
output_size,
mode='bilinear', align_corners=False))
fusion_out = torch.cat(fusion_list, 1)
x = self.conv_last(fusion_out)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
x = nn.functional.log_softmax(x, dim=1)
return x
| 37.558239 | 110 | 0.580689 |
7ae3b728d5669f6a97c63654aa5e59804440dbe2 | 1,168 | py | Python | version.py | kappazeta/cm_predict | f3093eb58b825ebe45c9830bf7fae985de1e9b2b | [
"Apache-2.0"
] | 3 | 2022-01-06T20:54:11.000Z | 2022-03-23T15:03:26.000Z | version.py | kappazeta/cm_predict | f3093eb58b825ebe45c9830bf7fae985de1e9b2b | [
"Apache-2.0"
] | 4 | 2021-09-07T09:05:20.000Z | 2022-03-04T09:07:28.000Z | version.py | kappazeta/cm_predict | f3093eb58b825ebe45c9830bf7fae985de1e9b2b | [
"Apache-2.0"
] | 1 | 2021-11-03T14:13:59.000Z | 2021-11-03T14:13:59.000Z | # vim: set tabstop=8 softtabstop=0 expandtab shiftwidth=4 smarttab
# KappaMask predictor version and changelog.
#
# Copyright 2021 KappaZeta Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '1.0.5'
min_cm_vsm_version = '0.2.6'
# 1.0.5 - Calls to cm_vsm now less dependent on platform. Switch from miniconda to micromamba.
# 1.0.4 - Support processing withing polygon-limited area of interest. Sub-tiles no longer flipped.
# 1.0.3 - Mosaic performance optimization.
# 1.0.2 - Mosaic function is unified.
# 1.0.1 - L1C support, new weights files.
# 1.0.0 - cm_predict version implementation, logger implementation, image rotating on re-creation fix.
| 41.714286 | 103 | 0.758562 |
d05f52d61dace9a8da82457c1df7187272bac490 | 13,203 | py | Python | tests/shared/core/training_data/story_reader/test_yaml_story_reader.py | SunYanCN/rasa | d522fdaab6414a0aff15a27bf60ac4f4bdbb9e2c | [
"Apache-2.0"
] | null | null | null | tests/shared/core/training_data/story_reader/test_yaml_story_reader.py | SunYanCN/rasa | d522fdaab6414a0aff15a27bf60ac4f4bdbb9e2c | [
"Apache-2.0"
] | 209 | 2020-03-18T18:28:12.000Z | 2022-03-01T13:42:29.000Z | tests/shared/core/training_data/story_reader/test_yaml_story_reader.py | SunYanCN/rasa | d522fdaab6414a0aff15a27bf60ac4f4bdbb9e2c | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from typing import Text, List
import pytest
from rasa.shared.exceptions import FileNotFoundException, YamlSyntaxException
import rasa.shared.utils.io
from rasa.shared.constants import LATEST_TRAINING_DATA_FORMAT_VERSION
from rasa.core import training
from rasa.shared.core.constants import RULE_SNIPPET_ACTION_NAME
from rasa.shared.core.domain import Domain
from rasa.shared.core.training_data import loading
from rasa.shared.core.events import ActionExecuted, UserUttered, SlotSet, ActiveLoop
from rasa.shared.core.training_data.story_reader.yaml_story_reader import (
YAMLStoryReader,
)
from rasa.shared.core.training_data.structures import StoryStep, RuleStep
@pytest.fixture()
async def rule_steps_without_stories(default_domain: Domain) -> List[StoryStep]:
yaml_file = "data/test_yaml_stories/rules_without_stories.yml"
return await loading.load_data_from_files([yaml_file], default_domain)
async def test_can_read_test_story_with_slots(default_domain: Domain):
trackers = await training.load_data(
"data/test_yaml_stories/simple_story_with_only_end.yml",
default_domain,
use_story_concatenation=False,
tracker_limit=1000,
remove_duplicates=False,
)
assert len(trackers) == 1
assert trackers[0].events[-2] == SlotSet(key="name", value="peter")
assert trackers[0].events[-1] == ActionExecuted("action_listen")
async def test_can_read_test_story_with_entities_slot_autofill(default_domain: Domain):
trackers = await training.load_data(
"data/test_yaml_stories/story_with_or_and_entities.yml",
default_domain,
use_story_concatenation=False,
tracker_limit=1000,
remove_duplicates=False,
)
assert len(trackers) == 2
assert trackers[0].events[-3] == UserUttered(
intent={"name": "greet", "confidence": 1.0},
parse_data={
"text": "/greet",
"intent_ranking": [{"confidence": 1.0, "name": "greet"}],
"intent": {"confidence": 1.0, "name": "greet"},
"entities": [],
},
)
assert trackers[0].events[-2] == ActionExecuted("utter_greet")
assert trackers[0].events[-1] == ActionExecuted("action_listen")
assert trackers[1].events[-4] == UserUttered(
intent={"name": "greet", "confidence": 1.0},
entities=[{"entity": "name", "value": "peter"}],
parse_data={
"text": "/greet",
"intent_ranking": [{"confidence": 1.0, "name": "greet"}],
"intent": {"confidence": 1.0, "name": "greet"},
"entities": [{"entity": "name", "value": "peter"}],
},
)
assert trackers[1].events[-3] == SlotSet(key="name", value="peter")
assert trackers[1].events[-2] == ActionExecuted("utter_greet")
assert trackers[1].events[-1] == ActionExecuted("action_listen")
async def test_can_read_test_story_with_entities_without_value(default_domain: Domain):
trackers = await training.load_data(
"data/test_yaml_stories/story_with_or_and_entities_with_no_value.yml",
default_domain,
use_story_concatenation=False,
tracker_limit=1000,
remove_duplicates=False,
)
assert len(trackers) == 1
assert trackers[0].events[-4] == UserUttered(
intent={"name": "greet", "confidence": 1.0},
entities=[{"entity": "name", "value": ""}],
parse_data={
"text": "/greet",
"intent_ranking": [{"confidence": 1.0, "name": "greet"}],
"intent": {"confidence": 1.0, "name": "greet"},
"entities": [{"entity": "name", "value": ""}],
},
)
assert trackers[0].events[-2] == ActionExecuted("utter_greet")
assert trackers[0].events[-1] == ActionExecuted("action_listen")
@pytest.mark.parametrize(
"file,is_yaml_file",
[
("data/test_yaml_stories/stories.yml", True),
("data/test_stories/stories.md", False),
("data/test_yaml_stories/rules_without_stories.yml", True),
],
)
async def test_is_yaml_file(file: Text, is_yaml_file: bool):
assert YAMLStoryReader.is_stories_file(file) == is_yaml_file
async def test_yaml_intent_with_leading_slash_warning(default_domain: Domain):
yaml_file = "data/test_wrong_yaml_stories/intent_with_leading_slash.yml"
with pytest.warns(UserWarning) as record:
tracker = await training.load_data(
yaml_file,
default_domain,
use_story_concatenation=False,
tracker_limit=1000,
remove_duplicates=False,
)
# one for leading slash
assert len(record) == 1
assert tracker[0].latest_message == UserUttered(intent={"name": "simple"})
async def test_yaml_slot_without_value_is_parsed(default_domain: Domain):
yaml_file = "data/test_yaml_stories/story_with_slot_was_set.yml"
tracker = await training.load_data(
yaml_file,
default_domain,
use_story_concatenation=False,
tracker_limit=1000,
remove_duplicates=False,
)
assert tracker[0].events[-2] == SlotSet(key="name", value=None)
async def test_yaml_wrong_yaml_format_warning(default_domain: Domain):
yaml_file = "data/test_wrong_yaml_stories/wrong_yaml.yml"
with pytest.raises(YamlSyntaxException):
_ = await training.load_data(
yaml_file,
default_domain,
use_story_concatenation=False,
tracker_limit=1000,
remove_duplicates=False,
)
async def test_read_rules_with_stories(default_domain: Domain):
yaml_file = "data/test_yaml_stories/stories_and_rules.yml"
steps = await loading.load_data_from_files([yaml_file], default_domain)
ml_steps = [s for s in steps if not isinstance(s, RuleStep)]
rule_steps = [s for s in steps if isinstance(s, RuleStep)]
# this file contains three rules and three ML stories
assert len(ml_steps) == 3
assert len(rule_steps) == 3
assert rule_steps[0].block_name == "rule 1"
assert rule_steps[1].block_name == "rule 2"
assert rule_steps[2].block_name == "rule 3"
assert ml_steps[0].block_name == "simple_story_without_checkpoint"
assert ml_steps[1].block_name == "simple_story_with_only_start"
assert ml_steps[2].block_name == "simple_story_with_only_end"
def test_read_rules_without_stories(rule_steps_without_stories: List[StoryStep]):
ml_steps = [s for s in rule_steps_without_stories if not isinstance(s, RuleStep)]
rule_steps = [s for s in rule_steps_without_stories if isinstance(s, RuleStep)]
# this file contains five rules and no ML stories
assert len(ml_steps) == 0
assert len(rule_steps) == 8
def test_rule_with_condition(rule_steps_without_stories: List[StoryStep]):
rule = rule_steps_without_stories[0]
assert rule.block_name == "Rule with condition"
assert rule.events == [
ActiveLoop("loop_q_form"),
SlotSet("requested_slot", "some_slot"),
ActionExecuted(RULE_SNIPPET_ACTION_NAME),
UserUttered(
intent={"name": "inform", "confidence": 1.0},
entities=[{"entity": "some_slot", "value": "bla"}],
),
ActionExecuted("loop_q_form"),
]
def test_rule_without_condition(rule_steps_without_stories: List[StoryStep]):
rule = rule_steps_without_stories[1]
assert rule.block_name == "Rule without condition"
assert rule.events == [
ActionExecuted(RULE_SNIPPET_ACTION_NAME),
UserUttered(intent={"name": "explain", "confidence": 1.0}),
ActionExecuted("utter_explain_some_slot"),
ActionExecuted("loop_q_form"),
ActiveLoop("loop_q_form"),
]
def test_rule_with_explicit_wait_for_user_message(
rule_steps_without_stories: List[StoryStep],
):
rule = rule_steps_without_stories[2]
assert rule.block_name == "Rule which explicitly waits for user input when finished"
assert rule.events == [
ActionExecuted(RULE_SNIPPET_ACTION_NAME),
UserUttered(intent={"name": "explain", "confidence": 1.0}),
ActionExecuted("utter_explain_some_slot"),
]
def test_rule_which_hands_over_at_end(rule_steps_without_stories: List[StoryStep]):
rule = rule_steps_without_stories[3]
assert rule.block_name == "Rule after which another action should be predicted"
assert rule.events == [
ActionExecuted(RULE_SNIPPET_ACTION_NAME),
UserUttered(intent={"name": "explain", "confidence": 1.0}),
ActionExecuted("utter_explain_some_slot"),
ActionExecuted(RULE_SNIPPET_ACTION_NAME),
]
def test_conversation_start_rule(rule_steps_without_stories: List[StoryStep]):
rule = rule_steps_without_stories[4]
assert rule.block_name == "Rule which only applies to conversation start"
assert rule.events == [
UserUttered(intent={"name": "explain", "confidence": 1.0}),
ActionExecuted("utter_explain_some_slot"),
]
async def test_warning_if_intent_not_in_domain(default_domain: Domain):
stories = """
stories:
- story: I am gonna make you explode 💥
steps:
# Intent defined in user key.
- intent: definitely not in domain
"""
reader = YAMLStoryReader(default_domain)
yaml_content = rasa.shared.utils.io.read_yaml(stories)
with pytest.warns(UserWarning) as record:
reader.read_from_parsed_yaml(yaml_content)
# one for missing intent
assert len(record) == 1
async def test_no_warning_if_intent_in_domain(default_domain: Domain):
stories = (
f'version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}"\n'
f"stories:\n"
f"- story: I am fine 💥\n"
f" steps:\n"
f" - intent: greet"
)
reader = YAMLStoryReader(default_domain)
yaml_content = rasa.shared.utils.io.read_yaml(stories)
with pytest.warns(None) as record:
reader.read_from_parsed_yaml(yaml_content)
assert not len(record)
async def test_active_loop_is_parsed(default_domain: Domain):
stories = (
f'version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}"\n'
f"stories:\n"
f"- story: name\n"
f" steps:\n"
f" - intent: greet\n"
f" - active_loop: null"
)
reader = YAMLStoryReader(default_domain)
yaml_content = rasa.shared.utils.io.read_yaml(stories)
with pytest.warns(None) as record:
reader.read_from_parsed_yaml(yaml_content)
assert not len(record)
def test_is_test_story_file(tmp_path: Path):
path = str(tmp_path / "test_stories.yml")
rasa.shared.utils.io.write_yaml({"stories": []}, path)
assert YAMLStoryReader.is_test_stories_file(path)
def test_is_not_test_story_file_if_it_doesnt_contain_stories(tmp_path: Path):
path = str(tmp_path / "test_stories.yml")
rasa.shared.utils.io.write_yaml({"nlu": []}, path)
assert not YAMLStoryReader.is_test_stories_file(path)
def test_is_not_test_story_file_raises_if_file_does_not_exist(tmp_path: Path):
path = str(tmp_path / "test_stories.yml")
with pytest.raises(FileNotFoundException):
YAMLStoryReader.is_test_stories_file(path)
def test_is_not_test_story_file_without_test_prefix(tmp_path: Path):
path = str(tmp_path / "stories.yml")
rasa.shared.utils.io.write_yaml({"stories": []}, path)
assert not YAMLStoryReader.is_test_stories_file(path)
def test_end_to_end_story_with_shortcut_intent():
intent = "greet"
plain_text = f'/{intent}{{"name": "test"}}'
story = f"""
stories:
- story: my story
steps:
- user: |
{plain_text}
intent: {intent}
"""
story_as_yaml = rasa.shared.utils.io.read_yaml(story)
steps = YAMLStoryReader().read_from_parsed_yaml(story_as_yaml)
user_uttered = steps[0].events[0]
assert user_uttered == UserUttered(
plain_text,
intent={"name": intent},
entities=[{"entity": "name", "start": 6, "end": 22, "value": "test"}],
)
def test_read_mixed_training_data_file(default_domain: Domain):
training_data_file = "data/test_mixed_yaml_training_data/training_data.yml"
reader = YAMLStoryReader(default_domain)
yaml_content = rasa.shared.utils.io.read_yaml_file(training_data_file)
with pytest.warns(None) as record:
reader.read_from_parsed_yaml(yaml_content)
assert not len(record)
def test_or_statement_if_not_training_mode():
stories = """
stories:
- story: hello world
steps:
- or:
- intent: intent1
- intent: intent2
- action: some_action
- intent: intent3
- action: other_action
"""
reader = YAMLStoryReader(is_used_for_training=False)
yaml_content = rasa.shared.utils.io.read_yaml(stories)
steps = reader.read_from_parsed_yaml(yaml_content)
assert len(steps) == 1
assert len(steps[0].events) == 4 # 4 events in total
assert len(steps[0].start_checkpoints) == 1
assert steps[0].start_checkpoints[0].name == "STORY_START"
assert steps[0].end_checkpoints == []
or_statement = steps[0].events[0]
assert isinstance(or_statement, list) # But first one is a list (OR)
assert or_statement[0].intent["name"] == "intent1"
assert or_statement[1].intent["name"] == "intent2"
| 33.510152 | 88 | 0.685677 |
734fb4f06745b21aa63ae2833e22107320d0a6cc | 15,215 | py | Python | resources/usr/local/lib/python2.7/dist-packages/sklearn/linear_model/bayes.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | null | null | null | resources/usr/local/lib/python2.7/dist-packages/sklearn/linear_model/bayes.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | null | null | null | resources/usr/local/lib/python2.7/dist-packages/sklearn/linear_model/bayes.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | 1 | 2020-07-23T19:26:19.000Z | 2020-07-23T19:26:19.000Z | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_arrays
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
`coef_` : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
`alpha_` : float
estimated precision of the noise.
`lambda_` : array, shape = (n_features)
estimated precisions of the weights.
`scores_` : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_arrays(X, y, sparse_format='dense',
dtype=np.float)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
`coef_` : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
`alpha_` : float
estimated precision of the noise.
`lambda_` : array, shape = (n_features)
estimated precisions of the weights.
`sigma_` : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
`scores_` : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_arrays(X, y, sparse_format='dense',
dtype=np.float)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| 36.140143 | 79 | 0.574827 |
fde8bb7081184db91f230d6a27f02808f9e73fab | 5,049 | py | Python | lifemonitor/commands/oauth.py | ilveroluca/life_monitor | 61752952cff6be8daea1d87b8f395ccb4dbe424c | [
"MIT"
] | null | null | null | lifemonitor/commands/oauth.py | ilveroluca/life_monitor | 61752952cff6be8daea1d87b8f395ccb4dbe424c | [
"MIT"
] | 1 | 2021-04-16T09:08:26.000Z | 2021-04-16T09:08:26.000Z | lifemonitor/commands/oauth.py | ilveroluca/life_monitor | 61752952cff6be8daea1d87b8f395ccb4dbe424c | [
"MIT"
] | null | null | null | # Copyright (c) 2020-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import sys
import click
from flask import Blueprint
from flask.cli import with_appcontext
from lifemonitor.auth.models import User
from lifemonitor.auth.oauth2.server import server
# set module level logger
logger = logging.getLogger(__name__)
# define the blueprint for DB commands
blueprint = Blueprint('oauth', __name__)
def invalidate_token(token):
invalid_token = token.copy()
invalid_token["expires_in"] = 10
invalid_token["expires_at"] = token["created_at"] + 10
return invalid_token
@blueprint.cli.command('invalidate-tokens')
@click.argument("username")
@with_appcontext
def token_invalidate(username):
"""
Invalidate all tokens related with a given user
"""
logger.debug("Finding User '%s'...", username)
user = User.find_by_username(username)
if not user:
print("User not found", file=sys.stderr)
sys.exit(99)
logger.debug("User found: %r", user)
count = 0
for identity in user.oauth_identity.values():
identity.set_token(invalidate_token(identity.token))
identity.save()
print("Token invalidated: %r !" % identity.token)
count += 1
print("%d Token invalidated!" % count, file=sys.stderr)
logger.debug("Token of User '%s' invalidated!", user.username)
@blueprint.cli.command('create-client-oauth-code')
@click.argument("client_name")
@click.argument("client_uri")
@click.argument("client_redirect_uri")
@click.argument("scope")
@click.argument("client_auth_method",
type=click.Choice(['client_secret_basic', 'client_secret_post']),
default='client_secret_post')
@click.option("--username", default="1") # should be the "admin" username
@with_appcontext
def create_client_oauth_code(client_name, client_uri, client_redirect_uri,
client_auth_method, scope, username):
"""
Create a OAuth2 client with 'authorization_code' grant
"""
user = User.find_by_username(username)
logger.debug("USERNAME: %r", username)
if not user:
print("User not found", file=sys.stderr)
sys.exit(99)
logger.debug("User found: %r", user)
client = server.create_client(user,
client_name, client_uri,
['authorization_code', 'token', 'id_token'],
["code", "token"], scope,
client_redirect_uri, client_auth_method)
print("CLIENT ID: %s" % client.client_id)
print("CLIENT SECRET: %s" % client.client_secret)
print("AUTHORIZATION URL: <LIFE_MONITOR_BASE_URL>/oauth/authorize")
print("ACCESS TOKEN URL: <LIFE_MONITOR_BASE_URL>/oauth/token")
logger.debug("Client created")
@blueprint.cli.command('create-client-credentials')
@click.argument("client_name")
@click.argument("client_uri")
@click.argument("scope")
@click.argument("client_auth_method",
type=click.Choice(['client_secret_basic', 'client_secret_post']),
default='client_secret_post')
@click.option("--username", default="1") # should be the "admin" username
@with_appcontext
def create_client_credentials(client_name, client_uri, client_auth_method, scope, username):
"""
Create a OAuth2 client with 'client_credentials' grant
"""
user = User.find_by_username(username)
logger.debug("USERNAME: %r", username)
if not user:
print("User not found", file=sys.stderr)
sys.exit(99)
logger.debug("User found: %r", user)
client = server.create_client(user,
client_name, client_uri,
'client_credentials', ["token"], scope,
"", client_auth_method)
print("CLIENT ID: %s" % client.client_id)
print("CLIENT SECRET: %s" % client.client_secret)
print("ACCESS TOKEN URL: <LIFE_MONITOR_BASE_URL>/oauth/token")
logger.debug("Client created")
| 39.755906 | 92 | 0.6837 |
0f58bf9feebc3d76a635f4a79624193c729d282e | 240 | py | Python | GalaX/login/admin.py | Doublexe/GalaX | 1908346a6b60032bdbf494a442f863fd231db8b9 | [
"MIT"
] | null | null | null | GalaX/login/admin.py | Doublexe/GalaX | 1908346a6b60032bdbf494a442f863fd231db8b9 | [
"MIT"
] | null | null | null | GalaX/login/admin.py | Doublexe/GalaX | 1908346a6b60032bdbf494a442f863fd231db8b9 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from . import models
admin.site.site_header = 'Galax后台'
admin.site.index_title = '作者:胡成伟 唐应天 柳蕴珂'
#admin.site.register(models.User)
#admin.site.register(models.ConfirmString) | 21.818182 | 42 | 0.779167 |
a486d4596033d2d1469d511f010171e72e3f834e | 15,572 | py | Python | reservoir_loop.py | brendano257/boulder_reservoir | e924c76c153e8ad88aa41fa69b75914d2b8242ef | [
"Apache-2.0"
] | null | null | null | reservoir_loop.py | brendano257/boulder_reservoir | e924c76c153e8ad88aa41fa69b75914d2b8242ef | [
"Apache-2.0"
] | null | null | null | reservoir_loop.py | brendano257/boulder_reservoir | e924c76c153e8ad88aa41fa69b75914d2b8242ef | [
"Apache-2.0"
] | null | null | null | import os
import asyncio
homedir = os.getcwd()
locallogdir = 'log'
plotdir = 'plots'
def print_now(string):
"""Takes a string and prints it, but appended with - {ISO Datetime of the time it was printed}"""
from datetime import datetime
print(f"{string} - {datetime.now().isoformat(' ')}")
async def check_load_logs(logpath, homedir, sleeptime):
'''
Checks the directory against the database for new log files. Loads and commits
to db if there are any new files.
Basic format: Connect to the db, check for new log files. If new files
exist, load and commit them to the db. In all cases, sleep for 30s before
looping back.
'''
while True:
from reservoir_nmhc import connect_to_reservoir_db, TempDir, LogFile, fix_off_dates, read_log_file
engine, session, Base = connect_to_reservoir_db('sqlite:///reservoir.sqlite', homedir)
Base.metadata.create_all(engine)
LogFiles = session.query(LogFile).order_by(LogFile.id).all() # list of all log objects
with os.scandir(logpath) as files:
logfns = [file.name for file in files if 'l.txt' in file.name]
if len(logfns) is 0:
await asyncio.sleep(sleeptime)
print('There we no log files in the directory!')
continue # no logs in directory? Sleep and look again
logs_in_db = [log.filename for log in LogFiles] # log names
logs_to_load = []
for log in logfns:
if log not in logs_in_db:
logs_to_load.append(log) # add files if not in the database filenames
if len(logs_to_load) is 0:
print('No new logs were found.')
await asyncio.sleep(sleeptime)
session.commit()
session.close()
engine.dispose()
await asyncio.sleep(sleeptime)
else:
new_logs = []
with TempDir(logpath):
for log in logs_to_load:
new_logs.append(read_log_file(log))
if len(new_logs) != 0:
fix_off_dates(new_logs, [])
for item in new_logs:
session.merge(item)
print('New logs were added!')
session.commit()
session.close()
engine.dispose()
await asyncio.sleep(sleeptime)
async def check_load_pas(filename, directory, sleeptime):
"""
Basic format: Checks the file size of the PA log, opens it if it's bigger
than before, and reads from the last recorded line onwards. Any new lines
are added as objects and committed. All exits sleep for 30s before re-upping.
"""
pa_file_size = 0 # always assume all lines could be new when initialized
start_line = 0 # defaults set for testing, not runtime
while True:
from reservoir_nmhc import connect_to_reservoir_db, TempDir, NmhcLine, fix_off_dates, read_pa_line
engine, session, Base = connect_to_reservoir_db('sqlite:///reservoir.sqlite', directory)
Base.metadata.create_all(engine)
NmhcLines = session.query(NmhcLine).order_by(NmhcLine.id).all()
line_dates = [line.date for line in NmhcLines]
from pathlib import Path
pa_path = Path(directory)/filename
if os.path.isfile(pa_path):
with TempDir(directory):
new_file_size = os.path.getsize(filename)
if new_file_size > pa_file_size:
with TempDir(directory):
contents = open('NMHC_PA.LOG').readlines()
new_lines = []
for line in contents[start_line:]:
try:
with TempDir(directory):
new_lines.append(read_pa_line(line))
except:
print('A line in NMHC_PA.LOG was not processed by read_pa_line() due to an exception.')
print(f'The line was: {line}')
fix_off_dates([], new_lines) # correct dates for lines if necessary
if len(new_lines) is 0:
print('No new pa lines added.')
await asyncio.sleep(sleeptime)
continue
for item in new_lines:
if item.date not in line_dates: #prevents duplicates in db
line_dates.append(item.date) #prevents duplicates in one load
session.merge(item)
session.commit()
start_line = len(contents)
pa_file_size = new_file_size # set filesize to current file size
print('Some PA lines found and added.')
await asyncio.sleep(sleeptime)
else:
print('PA file was the same size, so it was not touched.')
await asyncio.sleep(sleeptime)
else:
print('PA file did not exist!')
print('PA file did not exist!')
await asyncio.sleep(sleeptime)
await asyncio.sleep(sleeptime)
session.close()
engine.dispose()
async def create_gc_runs(directory, sleeptime):
while True:
print('Running create_gc_runs()')
from reservoir_nmhc import LogFile, NmhcLine, GcRun
from reservoir_nmhc import connect_to_reservoir_db
engine, session, Base = connect_to_reservoir_db('sqlite:///reservoir.sqlite', directory)
Base.metadata.create_all(engine)
NmhcLines = (session.query(NmhcLine)
.filter(NmhcLine.status == 'single')
.order_by(NmhcLine.id).all())
LogFiles = (session.query(LogFile)
.filter(LogFile.status == 'single')
.order_by(LogFile.id).all())
GcRuns = session.query(GcRun).order_by(GcRun.id).all()
run_dates = [run.date_end for run in GcRuns]
from reservoir_nmhc import match_log_to_pa
GcRuns = match_log_to_pa(LogFiles, NmhcLines)
for run in GcRuns:
if run.date_end not in run_dates:
run_dates.append(run.date_end)
from reservoir_nmhc import check_c4_rts
run = check_c4_rts(run) # make any possible acetylene/nbutane corrections
session.merge(run)
session.commit()
session.close()
engine.dispose()
await asyncio.sleep(sleeptime)
async def load_crfs(directory, sleeptime):
while True:
print('Running load_crfs()')
from reservoir_nmhc import read_crf_data, Crf, connect_to_reservoir_db, TempDir
engine, session, Base = connect_to_reservoir_db('sqlite:///reservoir.sqlite', directory)
Base.metadata.create_all(engine)
with TempDir(homedir):
Crfs = read_crf_data('reservoir_CRFs.txt')
Crfs_in_db = session.query(Crf).order_by(Crf.id).all()
crf_dates = [rf.date_start for rf in Crfs_in_db]
for rf in Crfs:
if rf.date_start not in crf_dates: # prevent duplicates in db
crf_dates.append(rf.date_start) # prevent duplicates in this load
session.merge(rf)
session.commit()
session.close()
engine.dispose()
await asyncio.sleep(sleeptime)
async def integrate_runs(directory, sleeptime):
from datetime import datetime
while True:
print('Running integrate_runs()')
from reservoir_nmhc import find_crf
from reservoir_nmhc import GcRun, Datum, Crf
from reservoir_nmhc import connect_to_reservoir_db
engine, session, Base = connect_to_reservoir_db('sqlite:///reservoir.sqlite', directory)
Base.metadata.create_all(engine)
GcRuns = (session.query(GcRun)
.filter(GcRun.data_id == None)
.order_by(GcRun.id).all()) # get all un-integrated runs
Crfs = session.query(Crf).order_by(Crf.id).all() # get all crfs
data = [] # Match all runs with available CRFs
for run in GcRuns:
run.crfs = find_crf(Crfs, run.date_end)
session.commit() # commit changes to crfs?
data.append(run.integrate())
data_in_db = session.query(Datum).order_by(Datum.id).all()
data_dates = [d.date_end for d in data_in_db]
if len(data) is 0:
print(f'No data to integrate found at {datetime.now()}')
session.commit()
session.close()
engine.dispose()
await asyncio.sleep(sleeptime)
else:
for datum in data:
if datum is not None and datum.date_end not in data_dates: # prevent duplicates in db
data_dates.append(datum.date_end) # prevent duplicates on this load
session.merge(datum)
print(f'Data {datum} was added!')
session.commit()
session.close()
engine.dispose()
await asyncio.sleep(sleeptime)
async def plot_new_data(directory, plotdir, sleeptime):
"""
Date limits have been tinkered with to correctly plot provided data.
"""
days_to_plot = 3
while True:
print('Running plot_new_data()')
data_len = 0
from reservoir_nmhc import connect_to_reservoir_db, TempDir, get_dates_mrs, res_nmhc_plot
from datetime import datetime
import datetime as dt
engine, session, Base = connect_to_reservoir_db('sqlite:///reservoir.sqlite', directory)
Base.metadata.create_all(engine)
# now = datetime.now() # save 'now' as the start of making plots
# date_ago = now - dt.timedelta(days=days_to_plot+1) # set a static limit for retrieving data at beginning of plot cycle
date_ago = datetime(2019,1,15) #fake value for provided data
date_limits = dict()
date_limits['right'] = datetime(2019, 1, 28).replace(hour=0, minute=0, second=0, microsecond=0) + dt.timedelta(days=1) # end of last day
date_limits['left'] = date_limits['right'] - dt.timedelta(days=days_to_plot)
## For use at runtime:
# date_limits['right'] = now.replace(hour=0, minute=0, second=0, microsecond=0) + dt.timedelta(days=1) # end of last day
# date_limits['left'] = date_limits['right'] - dt.timedelta(days=days_to_plot)
major_ticks = [date_limits['right'] - dt.timedelta(days=x) for x in range(0, days_to_plot+1)] # make dynamic ticks
minor_ticks = [date_limits['right'] - dt.timedelta(hours=x*6) for x in range(0, days_to_plot*4+1)]
try:
_ , dates = get_dates_mrs(session, 'ethane', date_start=date_ago) # get dates for data length
except ValueError:
print('No new data was found. Plots were not created.')
session.close()
engine.dispose()
await asyncio.sleep(sleeptime)
continue
if len(dates) != data_len:
with TempDir(plotdir): ## PLOT ethane and propane
ethane_mrs, ethane_dates = get_dates_mrs(session, 'ethane', date_start=date_ago)
propane_mrs, propane_dates = get_dates_mrs(session, 'propane', date_start=date_ago)
res_nmhc_plot(None, ({'Ethane': [ethane_dates, ethane_mrs],
'Propane': [propane_dates, propane_mrs]}),
limits={'right': date_limits.get('right',None),
'left': date_limits.get('left', None),
'bottom': 0},
major_ticks=major_ticks,
minor_ticks=minor_ticks)
with TempDir(plotdir): ## PLOT i-butane, n-butane, acetylene
ibut_mrs, ibut_dates = get_dates_mrs(session, 'i-butane', date_start=date_ago)
nbut_mrs, nbut_dates = get_dates_mrs(session, 'n-butane', date_start=date_ago)
acet_mrs, acet_dates = get_dates_mrs(session, 'acetylene', date_start=date_ago)
res_nmhc_plot(None, ({'i-Butane': [ibut_dates, ibut_mrs],
'n-Butane': [nbut_dates, nbut_mrs],
'Acetylene': [acet_dates, acet_mrs]}),
limits={'right': date_limits.get('right',None),
'left': date_limits.get('left', None),
'bottom': 0},
major_ticks=major_ticks,
minor_ticks=minor_ticks)
with TempDir(plotdir): ## PLOT i-pentane and n-pentane, & ratio
ipent_mrs, ipent_dates = get_dates_mrs(session, 'i-pentane', date_start=date_ago)
npent_mrs, npent_dates = get_dates_mrs(session, 'n-pentane', date_start=date_ago)
inpent_ratio = []
for i, n in zip(ipent_mrs, npent_mrs):
if n == 0 or n == None:
inpent_ratio.append(None)
elif i == None:
inpent_ratio.append(None)
else:
inpent_ratio.append(i/n)
res_nmhc_plot(dates, ({'i-Pentane': [ipent_dates, ipent_mrs],
'n-Pentane': [npent_dates, npent_mrs]}),
limits={'right': date_limits.get('right',None),
'left': date_limits.get('left', None),
'bottom': 0},
major_ticks=major_ticks,
minor_ticks=minor_ticks)
res_nmhc_plot(None, ({'i/n Pentane ratio': [ipent_dates, inpent_ratio]}),
limits={'right': date_limits.get('right',None),
'left': date_limits.get('left', None),
'bottom': 0,
'top': 3},
major_ticks=major_ticks,
minor_ticks=minor_ticks)
with TempDir(plotdir): ## PLOT benzene and toluene
benz_mrs, benz_dates = get_dates_mrs(session, 'benzene', date_start=date_ago)
tol_mrs, tol_dates = get_dates_mrs(session, 'toluene', date_start=date_ago)
res_nmhc_plot(None, ({'Benzene': [benz_dates, benz_mrs],
'Toluene': [tol_dates, tol_mrs]}),
limits={'right': date_limits.get('right',None),
'left': date_limits.get('left', None),
'bottom': 0},
major_ticks=major_ticks,
minor_ticks=minor_ticks)
print('New data plots created!')
session.close()
engine.dispose()
await asyncio.sleep(sleeptime)
else:
print('New data plots were not created, there was no new data.')
session.close()
engine.dispose()
await asyncio.sleep(sleeptime)
os.chdir(homedir)
loop = asyncio.get_event_loop()
loop.create_task(check_load_logs(locallogdir, homedir, 5))
loop.create_task(check_load_pas('NMHC_PA.LOG', homedir, 5))
loop.create_task(create_gc_runs(homedir, 5))
loop.create_task(load_crfs(homedir, 5))
loop.create_task(integrate_runs(homedir, 5))
loop.create_task(plot_new_data(homedir, plotdir, 5))
loop.run_forever()
| 39.224181 | 145 | 0.573208 |
eb9c65b74f493eb3ccd4d8c4616662c542319ff2 | 537 | py | Python | setup.py | Enchan1207/CANClientLib | 790001ac6052f6c655b11cdf758607d24c27cea9 | [
"MIT"
] | null | null | null | setup.py | Enchan1207/CANClientLib | 790001ac6052f6c655b11cdf758607d24c27cea9 | [
"MIT"
] | 3 | 2021-08-11T22:06:12.000Z | 2021-08-19T09:16:10.000Z | setup.py | Enchan1207/CANClientLib | 790001ac6052f6c655b11cdf758607d24c27cea9 | [
"MIT"
] | null | null | null | #
# pipが読んでライブラリの諸々を設定するためのファイル
#
import glob
import setuptools, os
setuptools.setup(
name="CANClientLib",
version="0.2.0",
license="MIT Licence",
description="CAN Client library",
author="Enchan1207",
url="https://github.com/Enchan1207/CANClientLib",
packages=setuptools.find_packages("src"),
install_requires=["python-can"],
package_dir={"": "src"},
py_modules=[os.path.splitext(os.path.basename(path))[0] for path in glob.glob('src/*.py')],
include_package_data=True,
zip_safe=False
)
| 25.571429 | 95 | 0.689013 |
5ff55118bf185e69061c23958a95010a5643ce1e | 59 | py | Python | optimizer/__init__.py | hslee1539/NN | 8b60a858c1137785ef684dd548b008bcc46b8d6d | [
"MIT"
] | null | null | null | optimizer/__init__.py | hslee1539/NN | 8b60a858c1137785ef684dd548b008bcc46b8d6d | [
"MIT"
] | null | null | null | optimizer/__init__.py | hslee1539/NN | 8b60a858c1137785ef684dd548b008bcc46b8d6d | [
"MIT"
] | null | null | null | from . import interface_module
from .SGD_module import SGD
| 19.666667 | 30 | 0.830508 |
491acd05a4ac111eed223e6c0397c4c3693b88bc | 1,501 | py | Python | calamari_ocr/ocr/savedmodel/migrations/version0to1.py | zhangjiulong/calamari | ecd29d46f807a3ad406f0a65bdc3283e358c3585 | [
"Apache-2.0"
] | 1 | 2021-03-24T09:32:56.000Z | 2021-03-24T09:32:56.000Z | calamari_ocr/ocr/savedmodel/migrations/version0to1.py | zhangjiulong/calamari | ecd29d46f807a3ad406f0a65bdc3283e358c3585 | [
"Apache-2.0"
] | null | null | null | calamari_ocr/ocr/savedmodel/migrations/version0to1.py | zhangjiulong/calamari | ecd29d46f807a3ad406f0a65bdc3283e358c3585 | [
"Apache-2.0"
] | null | null | null | import logging
logger = logging.getLogger(__name__)
def rename(checkpoint, replace_from, replace_to, add_prefix, dry_run, force_prefix=False):
import tensorflow as tf
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
for var_name, _ in tf.compat.v1.train.list_variables(checkpoint):
# Load the variable
var = tf.compat.v1.train.load_variable(checkpoint, var_name)
# Set the new name
new_name = var_name
if None not in [replace_from, replace_to]:
new_name = new_name.replace(replace_from, replace_to)
if add_prefix:
if force_prefix or not new_name.startswith(add_prefix):
# force prefix or add prefix if it does not exist yet
new_name = add_prefix + new_name
if dry_run:
logger.info(f'{var_name} would be renamed to {new_name}.')
else:
if var_name == new_name:
logger.info(f'No change for {var_name}')
else:
logger.info(f'Renaming {var_name} to {new_name}.')
# Rename the variable
tf.Variable(var, name=new_name)
if not dry_run:
# Save the variables
saver = tf.compat.v1.train.Saver()
sess.run(tf.compat.v1.global_variables_initializer())
saver.save(sess, checkpoint)
tf.compat.v1.reset_default_graph()
| 35.738095 | 90 | 0.586942 |
660625c9bf75613f59799b778d95dda246e34a79 | 18,902 | py | Python | python/paddle/fluid/tests/unittests/test_deform_conv2d.py | Wangzheee/NX_Paddle | 98498d650ca0945d2d045e7e8353079600d4bfb2 | [
"Apache-2.0"
] | 2 | 2021-09-07T05:00:00.000Z | 2021-12-09T06:56:19.000Z | python/paddle/fluid/tests/unittests/test_deform_conv2d.py | AFLee/Paddle | 311b3b44fc7d51d4d66d90ab8a3fc0d42231afda | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_deform_conv2d.py | AFLee/Paddle | 311b3b44fc7d51d4d66d90ab8a3fc0d42231afda | [
"Apache-2.0"
] | 1 | 2021-12-09T06:56:27.000Z | 2021-12-09T06:56:27.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
import paddle.nn.initializer as I
import numpy as np
import unittest
from unittest import TestCase
class TestDeformConv2D(TestCase):
batch_size = 4
spatial_shape = (16, 16)
dtype = "float32"
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [0, 0]
self.stride = [1, 1]
self.dilation = [1, 1]
self.groups = 1
self.no_bias = True
def prepare(self):
if isinstance(self.kernel_size, int):
filter_shape = (self.kernel_size, ) * 2
else:
filter_shape = tuple(self.kernel_size)
self.filter_shape = filter_shape
self.weight = np.random.uniform(
-1, 1, (self.out_channels, self.in_channels // self.groups
) + filter_shape).astype(self.dtype)
if not self.no_bias:
self.bias = np.random.uniform(-1, 1, (
self.out_channels, )).astype(self.dtype)
def out_size(in_size, pad_size, dilation_size, kernel_size,
stride_size):
return (in_size + 2 * pad_size -
(dilation_size * (kernel_size - 1) + 1)) / stride_size + 1
out_h = int(
out_size(self.spatial_shape[0], self.padding[0], self.dilation[0],
self.kernel_size[0], self.stride[0]))
out_w = int(
out_size(self.spatial_shape[1], self.padding[1], self.dilation[1],
self.kernel_size[1], self.stride[1]))
out_shape = (out_h, out_w)
self.input_shape = (self.batch_size, self.in_channels
) + self.spatial_shape
self.offset_shape = (self.batch_size, 2 * filter_shape[0] *
filter_shape[1]) + out_shape
self.mask_shape = (self.batch_size, filter_shape[0] * filter_shape[1]
) + out_shape
self.input = np.random.uniform(-1, 1,
self.input_shape).astype(self.dtype)
self.offset = np.random.uniform(-1, 1,
self.offset_shape).astype(self.dtype)
self.mask = np.random.uniform(-1, 1, self.mask_shape).astype(self.dtype)
def static_graph_case_dcn(self):
main = paddle.static.Program()
start = paddle.static.Program()
paddle.enable_static()
with paddle.static.program_guard(main, start):
x = paddle.static.data(
"input", (-1, self.in_channels, -1, -1), dtype=self.dtype)
offset = paddle.static.data(
"offset",
(-1, 2 * self.filter_shape[0] * self.filter_shape[1], -1, -1),
dtype=self.dtype)
mask = paddle.static.data(
"mask",
(-1, self.filter_shape[0] * self.filter_shape[1], -1, -1),
dtype=self.dtype)
y_v1 = paddle.fluid.layers.deformable_conv(
input=x,
offset=offset,
mask=None,
num_filters=self.out_channels,
filter_size=self.filter_shape,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
deformable_groups=1,
im2col_step=1,
param_attr=I.Assign(self.weight),
bias_attr=False if self.no_bias else I.Assign(self.bias),
modulated=False)
y_v2 = paddle.fluid.layers.deformable_conv(
input=x,
offset=offset,
mask=mask,
num_filters=self.out_channels,
filter_size=self.filter_shape,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
deformable_groups=1,
im2col_step=1,
param_attr=I.Assign(self.weight),
bias_attr=False if self.no_bias else I.Assign(self.bias))
exe = paddle.static.Executor(self.place)
exe.run(start)
out_v1, out_v2 = exe.run(main,
feed={
"input": self.input,
"offset": self.offset,
"mask": self.mask
},
fetch_list=[y_v1, y_v2])
return out_v1, out_v2
def dygraph_case_dcn(self):
paddle.disable_static()
x = paddle.to_tensor(self.input)
offset = paddle.to_tensor(self.offset)
mask = paddle.to_tensor(self.mask)
bias = None if self.no_bias else paddle.to_tensor(self.bias)
deform_conv2d = paddle.vision.ops.DeformConv2D(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
weight_attr=I.Assign(self.weight),
bias_attr=False if self.no_bias else I.Assign(self.bias))
y_v1 = deform_conv2d(x, offset)
y_v2 = deform_conv2d(x, offset, mask)
out_v1 = y_v1.numpy()
out_v2 = y_v2.numpy()
return out_v1, out_v2
def _test_identity(self):
self.prepare()
static_dcn_v1, static_dcn_v2 = self.static_graph_case_dcn()
dy_dcn_v1, dy_dcn_v2 = self.dygraph_case_dcn()
np.testing.assert_array_almost_equal(static_dcn_v1, dy_dcn_v1)
np.testing.assert_array_almost_equal(static_dcn_v2, dy_dcn_v2)
def test_identity(self):
self.place = paddle.CPUPlace()
self._test_identity()
if paddle.is_compiled_with_cuda():
self.place = paddle.CUDAPlace(0)
self._test_identity()
class TestDeformConv2DFunctional(TestCase):
batch_size = 4
spatial_shape = (16, 16)
dtype = "float32"
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [0, 0]
self.stride = [1, 1]
self.dilation = [1, 1]
self.groups = 1
self.no_bias = True
def prepare(self):
if isinstance(self.kernel_size, int):
filter_shape = (self.kernel_size, ) * 2
else:
filter_shape = tuple(self.kernel_size)
self.filter_shape = filter_shape
self.weight = np.random.uniform(
-1, 1, (self.out_channels, self.in_channels // self.groups
) + filter_shape).astype(self.dtype)
if not self.no_bias:
self.bias = np.random.uniform(-1, 1, (
self.out_channels, )).astype(self.dtype)
def out_size(in_size, pad_size, dilation_size, kernel_size,
stride_size):
return (in_size + 2 * pad_size -
(dilation_size * (kernel_size - 1) + 1)) / stride_size + 1
out_h = int(
out_size(self.spatial_shape[0], self.padding[0], self.dilation[0],
self.kernel_size[0], self.stride[0]))
out_w = int(
out_size(self.spatial_shape[1], self.padding[1], self.dilation[1],
self.kernel_size[1], self.stride[1]))
out_shape = (out_h, out_w)
self.input_shape = (self.batch_size, self.in_channels
) + self.spatial_shape
self.offset_shape = (self.batch_size, 2 * filter_shape[0] *
filter_shape[1]) + out_shape
self.mask_shape = (self.batch_size, filter_shape[0] * filter_shape[1]
) + out_shape
self.input = np.random.uniform(-1, 1,
self.input_shape).astype(self.dtype)
self.offset = np.random.uniform(-1, 1,
self.offset_shape).astype(self.dtype)
self.mask = np.random.uniform(-1, 1, self.mask_shape).astype(self.dtype)
def static_graph_case_dcn(self):
main = paddle.static.Program()
start = paddle.static.Program()
paddle.enable_static()
with paddle.static.program_guard(main, start):
x = paddle.static.data(
"input", (-1, self.in_channels, -1, -1), dtype=self.dtype)
offset = paddle.static.data(
"offset",
(-1, 2 * self.filter_shape[0] * self.filter_shape[1], -1, -1),
dtype=self.dtype)
mask = paddle.static.data(
"mask",
(-1, self.filter_shape[0] * self.filter_shape[1], -1, -1),
dtype=self.dtype)
y_v1 = paddle.fluid.layers.deformable_conv(
input=x,
offset=offset,
mask=None,
num_filters=self.out_channels,
filter_size=self.filter_shape,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
deformable_groups=1,
im2col_step=1,
param_attr=I.Assign(self.weight),
bias_attr=False if self.no_bias else I.Assign(self.bias),
modulated=False)
y_v2 = paddle.fluid.layers.deformable_conv(
input=x,
offset=offset,
mask=mask,
num_filters=self.out_channels,
filter_size=self.filter_shape,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
deformable_groups=1,
im2col_step=1,
param_attr=I.Assign(self.weight),
bias_attr=False if self.no_bias else I.Assign(self.bias))
exe = paddle.static.Executor(self.place)
exe.run(start)
out_v1, out_v2 = exe.run(main,
feed={
"input": self.input,
"offset": self.offset,
"mask": self.mask
},
fetch_list=[y_v1, y_v2])
return out_v1, out_v2
def dygraph_case_dcn(self):
paddle.disable_static()
x = paddle.to_tensor(self.input)
offset = paddle.to_tensor(self.offset)
mask = paddle.to_tensor(self.mask)
weight = paddle.to_tensor(self.weight)
bias = None if self.no_bias else paddle.to_tensor(self.bias)
y_v1 = paddle.vision.ops.deform_conv2d(
x=x,
offset=offset,
weight=weight,
bias=bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups, )
y_v2 = paddle.vision.ops.deform_conv2d(
x=x,
offset=offset,
mask=mask,
weight=weight,
bias=bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups, )
out_v1 = y_v1.numpy()
out_v2 = y_v2.numpy()
return out_v1, out_v2
def new_api_static_graph_case_dcn(self):
main = paddle.static.Program()
start = paddle.static.Program()
paddle.enable_static()
with paddle.static.program_guard(main, start):
x = paddle.static.data(
"input", (-1, self.in_channels, -1, -1), dtype=self.dtype)
offset = paddle.static.data(
"offset",
(-1, 2 * self.filter_shape[0] * self.filter_shape[1], -1, -1),
dtype=self.dtype)
mask = paddle.static.data(
"mask",
(-1, self.filter_shape[0] * self.filter_shape[1], -1, -1),
dtype=self.dtype)
weight = paddle.static.data(
"weight", list(self.weight.shape), dtype=self.dtype)
if not self.no_bias:
bias = paddle.static.data("bias", [-1], dtype=self.dtype)
y_v1 = paddle.vision.ops.deform_conv2d(
x=x,
offset=offset,
weight=weight,
bias=None if self.no_bias else bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups, )
y_v2 = paddle.vision.ops.deform_conv2d(
x=x,
offset=offset,
mask=mask,
weight=weight,
bias=None if self.no_bias else bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups, )
exe = paddle.static.Executor(self.place)
exe.run(start)
feed_dict = {
"input": self.input,
"offset": self.offset,
"mask": self.mask,
"weight": self.weight
}
if not self.no_bias:
feed_dict["bias"] = self.bias
out_v1, out_v2 = exe.run(main, feed=feed_dict, fetch_list=[y_v1, y_v2])
return out_v1, out_v2
def _test_identity(self):
self.prepare()
static_dcn_v1, static_dcn_v2 = self.static_graph_case_dcn()
dy_dcn_v1, dy_dcn_v2 = self.dygraph_case_dcn()
new_static_dcn_v1, new_static_dcn_v2 = self.new_api_static_graph_case_dcn(
)
np.testing.assert_array_almost_equal(static_dcn_v1, dy_dcn_v1)
np.testing.assert_array_almost_equal(static_dcn_v2, dy_dcn_v2)
np.testing.assert_array_almost_equal(static_dcn_v1, new_static_dcn_v1)
np.testing.assert_array_almost_equal(static_dcn_v2, new_static_dcn_v2)
def test_identity(self):
self.place = paddle.CPUPlace()
self._test_identity()
if paddle.is_compiled_with_cuda():
self.place = paddle.CUDAPlace(0)
self._test_identity()
# testcases for DeformConv2D
class TestDeformConv2DWithPadding(TestDeformConv2D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [2, 2]
self.stride = [1, 1]
self.dilation = [1, 1]
self.groups = 1
self.no_bias = True
class TestDeformConv2DWithBias(TestDeformConv2D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [2, 2]
self.stride = [1, 1]
self.dilation = [1, 1]
self.groups = 1
self.no_bias = False
class TestDeformConv2DWithAsynPadding(TestDeformConv2D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [1, 2]
self.stride = [1, 1]
self.dilation = [1, 1]
self.groups = 1
self.no_bias = False
class TestDeformConv2DWithDilation(TestDeformConv2D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [1, 1]
self.stride = [1, 1]
self.dilation = [3, 3]
self.groups = 1
self.no_bias = False
class TestDeformConv2DWithStride(TestDeformConv2D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [1, 1]
self.stride = [2, 2]
self.dilation = [1, 1]
self.groups = 1
self.no_bias = False
class TestDeformConv2DWithGroups(TestDeformConv2D):
def setUp(self):
self.in_channels = 5
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [1, 1]
self.stride = [1, 1]
self.dilation = [1, 1]
self.groups = 5
self.no_bias = False
# testcases for deform_conv2d
class TestDeformConv2DFunctionalWithPadding(TestDeformConv2DFunctional):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [2, 2]
self.stride = [1, 1]
self.dilation = [1, 1]
self.groups = 1
self.no_bias = True
class TestDeformConv2DFunctionalWithBias(TestDeformConv2DFunctional):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [2, 2]
self.stride = [1, 1]
self.dilation = [1, 1]
self.groups = 1
self.no_bias = False
class TestDeformConv2DFunctionalWithAsynPadding(TestDeformConv2DFunctional):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [1, 2]
self.stride = [1, 1]
self.dilation = [1, 1]
self.groups = 1
self.no_bias = False
class TestDeformConv2DFunctionalWithDilation(TestDeformConv2DFunctional):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [1, 1]
self.stride = [1, 1]
self.dilation = [3, 3]
self.groups = 1
self.no_bias = False
class TestDeformConv2DFunctionalWithStride(TestDeformConv2DFunctional):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [1, 1]
self.stride = [2, 2]
self.dilation = [1, 1]
self.groups = 1
self.no_bias = False
class TestDeformConv2DFunctionalWithGroups(TestDeformConv2DFunctional):
def setUp(self):
self.in_channels = 5
self.out_channels = 5
self.kernel_size = [3, 3]
self.padding = [1, 1]
self.stride = [1, 1]
self.dilation = [1, 1]
self.groups = 5
self.no_bias = False
if __name__ == "__main__":
unittest.main()
| 33.813953 | 82 | 0.55063 |
2acfe184bde1591671d2944819a27e45a1203e53 | 13,601 | py | Python | release/scripts/addons/mesh_extra_tools/mesh_check.py | noorbeast/BlenderSource | 65ebecc5108388965678b04b43463b85f6c69c1d | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2019-03-20T13:10:46.000Z | 2019-05-15T20:00:31.000Z | engine/2.80/scripts/addons/mesh_extra_tools/mesh_check.py | byteinc/Phasor | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | [
"Unlicense"
] | null | null | null | engine/2.80/scripts/addons/mesh_extra_tools/mesh_check.py | byteinc/Phasor | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | [
"Unlicense"
] | null | null | null | # gpl author: Pistiwique
bl_info = {
"name": "Mesh Check BGL edition",
"description": "Display the triangles and ngons of the mesh",
"author": "Pistiwique",
"version": (1, 0, 1),
"blender": (2, 75, 0),
"location": "3D View(s) > Properties > Shading",
"category": "3D View"
}
import bpy
import bmesh
from bgl import (
glBegin,
glLineWidth,
glColor4f,
glVertex3f,
glEnd,
GL_LINES,
glEnable,
glDisable,
GL_DEPTH_TEST,
GL_BLEND,
GL_POLYGON
)
from mathutils.geometry import tessellate_polygon as tessellate
from bpy.types import (
Operator,
PropertyGroup,
)
from bpy.props import (
BoolProperty,
EnumProperty,
FloatProperty,
FloatVectorProperty,
PointerProperty,
)
# -- Globals -- #
mesh_check_handle = []
draw_enabled = [False]
edge_width = [1.0]
face_opacity = [0.2]
edges_tri_color = [(1.0, 1.0, 0.0, 1)]
faces_tri_color = [(1.0, 1.0, 0.0, face_opacity[0])]
edges_ngons_color = [(1.0, 0.0, 0.0, 1.0)]
faces_ngons_color = [(1.0, 0.0, 0.0, face_opacity[0])]
bm_old = [None]
finer_lines = [False]
def draw_poly(points):
for i in range(len(points)):
glVertex3f(points[i][0], points[i][1], points[i][2])
def mesh_check_draw_callback():
obj = bpy.context.object
if obj and obj.type == 'MESH':
if draw_enabled[0]:
mesh = obj.data
matrix_world = obj.matrix_world
glLineWidth(edge_width[0])
if bpy.context.mode == 'EDIT_MESH':
use_occlude = True
if bm_old[0] is None or not bm_old[0].is_valid:
bm = bm_old[0] = bmesh.from_edit_mesh(mesh)
else:
bm = bm_old[0]
no_depth = not bpy.context.space_data.use_occlude_geometry
if no_depth:
glDisable(GL_DEPTH_TEST)
use_occlude = False
if finer_lines[0]:
glLineWidth(edge_width[0] / 4.0)
use_occlude = True
for face in bm.faces:
if len([verts for verts in face.verts]) == 3:
faces = [matrix_world * vert.co for vert in face.verts]
glColor4f(*faces_tri_color[0])
glEnable(GL_BLEND)
glBegin(GL_POLYGON)
draw_poly(faces)
glEnd()
for edge in face.edges:
if edge.is_valid:
edges = [matrix_world * vert.co for vert in edge.verts]
glColor4f(*edges_tri_color[0])
glBegin(GL_LINES)
draw_poly(edges)
glEnd()
elif len([verts for verts in face.verts]) > 4:
new_faces = []
faces = []
coords = [v.co for v in face.verts]
indices = [v.index for v in face.verts]
for pol in tessellate([coords]):
new_faces.append([indices[i] for i in pol])
for f in new_faces:
faces.append(
[((matrix_world * bm.verts[i].co)[0] + face.normal.x * 0.001,
(matrix_world * bm.verts[i].co)[1] + face.normal.y * 0.001,
(matrix_world * bm.verts[i].co)[2] + face.normal.z * 0.001)
for i in f]
)
for f in faces:
glColor4f(*faces_ngons_color[0])
glEnable(GL_BLEND)
glBegin(GL_POLYGON)
draw_poly(f)
glEnd()
for edge in face.edges:
if edge.is_valid:
edges = [matrix_world * vert.co for vert in edge.verts]
glColor4f(*edges_ngons_color[0])
glBegin(GL_LINES)
draw_poly(edges)
glEnd()
glDisable(GL_BLEND)
glColor4f(0.0, 0.0, 0.0, 1.0)
glLineWidth(edge_width[0])
glEnable(GL_DEPTH_TEST)
if use_occlude:
for face in bm.faces:
if len([verts for verts in face.verts]) == 3:
faces = []
for vert in face.verts:
vert_face = matrix_world * vert.co
faces.append(
(vert_face[0] + face.normal.x * 0.001,
vert_face[1] + face.normal.y * 0.001,
vert_face[2] + face.normal.z * 0.001)
)
glColor4f(*faces_tri_color[0])
glEnable(GL_BLEND)
glBegin(GL_POLYGON)
draw_poly(faces)
glEnd()
for edge in face.edges:
if edge.is_valid:
edges = []
for vert in edge.verts:
vert_edge = matrix_world * vert.co
edges.append(
(vert_edge[0] + face.normal.x * 0.001,
vert_edge[1] + face.normal.y * 0.001,
vert_edge[2] + face.normal.z * 0.001)
)
glColor4f(*edges_tri_color[0])
glBegin(GL_LINES)
draw_poly(edges)
glEnd()
elif len([verts for verts in face.verts]) > 4:
new_faces = []
faces = []
coords = [v.co for v in face.verts]
indices = [v.index for v in face.verts]
for pol in tessellate([coords]):
new_faces.append([indices[i] for i in pol])
for f in new_faces:
faces.append([
((matrix_world * bm.verts[i].co)[0] + face.normal.x * 0.001,
(matrix_world * bm.verts[i].co)[1] + face.normal.y * 0.001,
(matrix_world * bm.verts[i].co)[2] + face.normal.z * 0.001)
for i in f]
)
for f in faces:
glColor4f(*faces_ngons_color[0])
glEnable(GL_BLEND)
glBegin(GL_POLYGON)
draw_poly(f)
glEnd()
for edge in face.edges:
if edge.is_valid:
edges = []
for vert in edge.verts:
vert_edge = matrix_world * vert.co
edges.append(
(vert_edge[0] + face.normal.x * 0.001,
vert_edge[1] + face.normal.y * 0.001,
vert_edge[2] + face.normal.z * 0.001)
)
glColor4f(*edges_ngons_color[0])
glBegin(GL_LINES)
draw_poly(edges)
glEnd()
glDisable(GL_BLEND)
glColor4f(0.0, 0.0, 0.0, 1.0)
def updateBGLData(self, context):
if self.mesh_check_use and self.display_faces:
bpy.ops.object.mode_set(mode='EDIT')
draw_enabled[0] = True
edge_width[0] = self.edge_width
finer_lines[0] = self.finer_lines_behind_use
face_opacity[0] = self.face_opacity
edges_tri_color[0] = (
self.custom_tri_color[0],
self.custom_tri_color[1],
self.custom_tri_color[2],
1)
faces_tri_color[0] = (
self.custom_tri_color[0],
self.custom_tri_color[1],
self.custom_tri_color[2],
self.face_opacity
)
edges_ngons_color[0] = (
self.custom_ngons_color[0],
self.custom_ngons_color[1],
self.custom_ngons_color[2],
1)
faces_ngons_color[0] = (
self.custom_ngons_color[0],
self.custom_ngons_color[1],
self.custom_ngons_color[2],
self.face_opacity
)
return
draw_enabled[0] = False
class FaceTypeSelect(Operator):
bl_idname = "object.face_type_select"
bl_label = "Face type select"
bl_description = "Select Triangles and / or Ngons on the Active Object"
bl_options = {'REGISTER', 'UNDO'}
face_type: EnumProperty(
name="Face Type",
items=(('tris', "Tris", "Colorize Triangles in the Mesh"),
('ngons', "Ngons", "Colorize Ngons in the Mesh")),
default='ngons'
)
@classmethod
def poll(cls, context):
return context.active_object is not None and context.active_object.type == 'MESH'
def execute(self, context):
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
context.tool_settings.mesh_select_mode = (False, False, True)
if self.face_type == "tris":
bpy.ops.mesh.select_face_by_sides(number=3, type='EQUAL')
else:
bpy.ops.mesh.select_face_by_sides(number=4, type='GREATER')
return {'FINISHED'}
class MeshCheckCollectionGroup(PropertyGroup):
mesh_check_use: BoolProperty(
name="Mesh Check",
description="Display Mesh Check options",
default=False,
update=updateBGLData
)
display_faces: BoolProperty(
name="Display Faces",
description="Use BGL to display Ngons and Tris of the mesh",
default=False,
update=updateBGLData
)
edge_width: FloatProperty(
name="Width",
description="Drawn Edges width in pixels",
min=1.0,
max=10.0,
default=3.0,
subtype='PIXEL',
update=updateBGLData
)
finer_lines_behind_use: BoolProperty(
name="Finer Lines behind",
description="Display partially hidden edges finer in non-occlude mode",
default=True,
update=updateBGLData
)
custom_tri_color: FloatVectorProperty(
name="Tri Color",
description="Custom color for the Triangles",
min=0.0,
max=1.0,
default=(1.0, 1.0, 0.0),
size=3,
subtype='COLOR',
update=updateBGLData
)
custom_ngons_color: FloatVectorProperty(
name="Ngons Color",
description="Custom color for the Ngons",
min=0.0,
max=1.0,
default=(1.0, 0.0, 0.0),
size=3,
subtype='COLOR',
update=updateBGLData
)
face_opacity: FloatProperty(
name="Face Opacity",
description="Opacity of the color for the face",
min=0.0,
max=1.0,
default=0.2,
subtype='FACTOR',
update=updateBGLData
)
# Register
classes = (
FaceTypeSelect,
MeshCheckCollectionGroup,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.WindowManager.mesh_check = PointerProperty(
type=MeshCheckCollectionGroup
)
if mesh_check_handle:
bpy.types.SpaceView3D.draw_handler_remove(mesh_check_handle[0], 'WINDOW')
mesh_check_handle[:] = [bpy.types.SpaceView3D.draw_handler_add(mesh_check_draw_callback,
(), 'WINDOW', 'POST_VIEW')]
def unregister():
del bpy.types.WindowManager.mesh_check
if mesh_check_handle:
bpy.types.SpaceView3D.draw_handler_remove(mesh_check_handle[0], 'WINDOW')
mesh_check_handle[:] = []
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
| 36.660377 | 101 | 0.438277 |
93b39a9e1914785384afd0471fe693872ada7582 | 1,506 | py | Python | checkov/terraform/checks/resource/kubernetes/DefaultNamespace.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | null | null | null | checkov/terraform/checks/resource/kubernetes/DefaultNamespace.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | null | null | null | checkov/terraform/checks/resource/kubernetes/DefaultNamespace.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | null | null | null | from typing import Any
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
class DefaultNamespace(BaseResourceCheck):
def __init__(self):
# CIS-1.5 5.7.4
name = "The default namespace should not be used"
id = "CKV_K8S_21"
supported_resources = ["kubernetes_pod", "kubernetes_deployment", "kubernetes_daemonset",
"kubernetes_stateful_set", "kubernetes_replication_controller", "kubernetes_job",
"kubernetes_cron_job", "kubernetes_service", "kubernetes_secret",
"kubernetes_service_account", "kubernetes_role_binding", "kubernetes_config_map",
"kubernetes_ingress"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf) -> CheckResult:
if "metadata" not in conf:
self.evaluated_keys = [""]
return CheckResult.FAILED
metadata = conf.get('metadata')[0]
if metadata.get('namespace'):
if metadata.get('namespace') == ["default"]:
self.evaluated_keys = ['metadata/[0]/namespace']
return CheckResult.FAILED
return CheckResult.PASSED
return CheckResult.FAILED
check = DefaultNamespace()
| 43.028571 | 112 | 0.648738 |
a78e5ed126bb71732b7fa10dbe264b1287857fd9 | 70,054 | gyp | Python | net/net.gyp | Scopetta197/chromium | b7bf8e39baadfd9089de2ebdc0c5d982de4a9820 | [
"BSD-3-Clause"
] | 212 | 2015-01-31T11:55:58.000Z | 2022-02-22T06:35:11.000Z | net/net.gyp | Scopetta197/chromium | b7bf8e39baadfd9089de2ebdc0c5d982de4a9820 | [
"BSD-3-Clause"
] | 5 | 2015-03-27T14:29:23.000Z | 2019-09-25T13:23:12.000Z | net/net.gyp | Scopetta197/chromium | b7bf8e39baadfd9089de2ebdc0c5d982de4a9820 | [
"BSD-3-Clause"
] | 221 | 2015-01-07T06:21:24.000Z | 2022-02-11T02:51:12.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
'linux_link_kerberos%': 0,
'conditions': [
['chromeos==1 or OS=="android"', {
# Disable Kerberos on ChromeOS and Android, at least for now.
# It needs configuration (krb5.conf and so on).
'use_kerberos%': 0,
}, { # chromeos == 0
'use_kerberos%': 1,
}],
],
},
'includes': [
'../build/win_precompile.gypi',
],
'targets': [
{
'target_name': 'net',
'type': '<(component)',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../crypto/crypto.gyp:crypto',
'../sdch/sdch.gyp:sdch',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/zlib/zlib.gyp:zlib',
'../v8/tools/gyp/v8.gyp:v8',
'net_resources',
],
'sources': [
'android/net_jni_registrar.cc',
'android/net_jni_registrar.h',
'android/network_change_notifier.cc',
'android/network_change_notifier.h',
'android/network_change_notifier_factory.cc',
'android/network_change_notifier_factory.h',
'android/network_library.cc',
'android/network_library.h',
'base/address_family.h',
'base/address_list.cc',
'base/address_list.h',
'base/address_list_net_log_param.cc',
'base/address_list_net_log_param.h',
'base/asn1_util.cc',
'base/asn1_util.h',
'base/auth.cc',
'base/auth.h',
'base/backoff_entry.cc',
'base/backoff_entry.h',
'base/bandwidth_metrics.cc',
'base/bandwidth_metrics.h',
'base/big_endian.cc',
'base/big_endian.h',
'base/cache_type.h',
'base/capturing_net_log.cc',
'base/capturing_net_log.h',
'base/cert_database.cc',
'base/cert_database.h',
'base/cert_database_mac.cc',
'base/cert_database_nss.cc',
'base/cert_database_openssl.cc',
'base/cert_database_win.cc',
'base/cert_status_flags.cc',
'base/cert_status_flags.h',
'base/cert_verifier.cc',
'base/cert_verifier.h',
'base/cert_verify_proc.cc',
'base/cert_verify_proc.h',
'base/cert_verify_proc_mac.cc',
'base/cert_verify_proc_mac.h',
'base/cert_verify_proc_nss.cc',
'base/cert_verify_proc_nss.h',
'base/cert_verify_proc_openssl.cc',
'base/cert_verify_proc_openssl.h',
'base/cert_verify_proc_win.cc',
'base/cert_verify_proc_win.h',
'base/cert_verify_result.cc',
'base/cert_verify_result.h',
'base/completion_callback.h',
'base/connection_type_histograms.cc',
'base/connection_type_histograms.h',
'base/crl_set.cc',
'base/crl_set.h',
'base/crypto_module.h',
'base/crypto_module_nss.cc',
'base/crypto_module_openssl.cc',
'base/data_url.cc',
'base/data_url.h',
'base/default_server_bound_cert_store.cc',
'base/default_server_bound_cert_store.h',
'base/directory_lister.cc',
'base/directory_lister.h',
'base/dns_reloader.cc',
'base/dns_reloader.h',
'base/dns_util.cc',
'base/dns_util.h',
'base/dnsrr_resolver.cc',
'base/dnsrr_resolver.h',
'base/dnssec_chain_verifier.cc',
'base/dnssec_chain_verifier.h',
'base/dnssec_keyset.cc',
'base/dnssec_keyset.h',
'base/escape.cc',
'base/escape.h',
'base/escape_icu.cc',
'base/ev_root_ca_metadata.cc',
'base/ev_root_ca_metadata.h',
'base/expiring_cache.h',
'base/file_stream.cc',
'base/file_stream.h',
'base/file_stream_metrics.cc',
'base/file_stream_metrics.h',
'base/file_stream_metrics_posix.cc',
'base/file_stream_metrics_win.cc',
'base/file_stream_net_log_parameters.cc',
'base/file_stream_net_log_parameters.h',
'base/file_stream_posix.cc',
'base/file_stream_posix.h',
'base/file_stream_whence.h',
'base/file_stream_win.cc',
'base/file_stream_win.h',
'base/filter.cc',
'base/filter.h',
'base/gzip_filter.cc',
'base/gzip_filter.h',
'base/gzip_header.cc',
'base/gzip_header.h',
'base/host_cache.cc',
'base/host_cache.h',
'base/host_mapping_rules.cc',
'base/host_mapping_rules.h',
'base/host_port_pair.cc',
'base/host_port_pair.h',
'base/host_resolver.cc',
'base/host_resolver.h',
'base/host_resolver_impl.cc',
'base/host_resolver_impl.h',
'base/host_resolver_proc.cc',
'base/host_resolver_proc.h',
'base/io_buffer.cc',
'base/io_buffer.h',
'base/ip_endpoint.cc',
'base/ip_endpoint.h',
'base/keygen_handler.cc',
'base/keygen_handler.h',
'base/keygen_handler_mac.cc',
'base/keygen_handler_nss.cc',
'base/keygen_handler_openssl.cc',
'base/keygen_handler_win.cc',
'base/listen_socket.cc',
'base/listen_socket.h',
'base/load_flags.h',
'base/load_flags_list.h',
'base/load_states.h',
'base/mapped_host_resolver.cc',
'base/mapped_host_resolver.h',
'base/mime_sniffer.cc',
'base/mime_sniffer.h',
'base/mime_util.cc',
'base/mime_util.h',
'base/multi_threaded_cert_verifier.cc',
'base/multi_threaded_cert_verifier.h',
'base/net_error_list.h',
'base/net_errors.cc',
'base/net_errors.h',
'base/net_errors_posix.cc',
'base/net_errors_win.cc',
'base/net_export.h',
'base/net_log.cc',
'base/net_log.h',
'base/net_log_event_type_list.h',
'base/net_log_source_type_list.h',
'base/net_module.cc',
'base/net_module.h',
'base/net_util.cc',
'base/net_util.h',
'base/net_util_posix.cc',
'base/net_util_win.cc',
'base/network_change_notifier.cc',
'base/network_change_notifier.h',
'base/network_change_notifier_factory.h',
'base/network_change_notifier_linux.cc',
'base/network_change_notifier_linux.h',
'base/network_change_notifier_mac.cc',
'base/network_change_notifier_mac.h',
'base/network_change_notifier_netlink_linux.cc',
'base/network_change_notifier_netlink_linux.h',
'base/network_change_notifier_win.cc',
'base/network_change_notifier_win.h',
'base/network_config_watcher_mac.cc',
'base/network_config_watcher_mac.h',
'base/network_delegate.cc',
'base/network_delegate.h',
'base/nss_memio.c',
'base/nss_memio.h',
'base/openssl_memory_private_key_store.cc',
'base/openssl_private_key_store.h',
'base/openssl_private_key_store_android.cc',
'base/pem_tokenizer.cc',
'base/pem_tokenizer.h',
'base/platform_mime_util.h',
# TODO(tc): gnome-vfs? xdgmime? /etc/mime.types?
'base/platform_mime_util_linux.cc',
'base/platform_mime_util_mac.cc',
'base/platform_mime_util_win.cc',
'base/prioritized_dispatcher.cc',
'base/prioritized_dispatcher.h',
'base/priority_queue.h',
'base/rand_callback.h',
'base/registry_controlled_domain.cc',
'base/registry_controlled_domain.h',
'base/request_priority.h',
'base/sdch_filter.cc',
'base/sdch_filter.h',
'base/sdch_manager.cc',
'base/sdch_manager.h',
'base/server_bound_cert_service.cc',
'base/server_bound_cert_service.h',
'base/server_bound_cert_store.cc',
'base/server_bound_cert_store.h',
'base/single_request_cert_verifier.cc',
'base/single_request_cert_verifier.h',
'base/single_request_host_resolver.cc',
'base/single_request_host_resolver.h',
'base/ssl_cert_request_info.cc',
'base/ssl_cert_request_info.h',
'base/ssl_cipher_suite_names.cc',
'base/ssl_cipher_suite_names.h',
'base/ssl_client_auth_cache.cc',
'base/ssl_client_auth_cache.h',
'base/ssl_client_cert_type.h',
'base/ssl_config_service.cc',
'base/ssl_config_service.h',
'base/ssl_config_service_defaults.cc',
'base/ssl_config_service_defaults.h',
'base/ssl_info.cc',
'base/ssl_info.h',
'base/static_cookie_policy.cc',
'base/static_cookie_policy.h',
'base/sys_addrinfo.h',
'base/tcp_listen_socket.cc',
'base/tcp_listen_socket.h',
'base/test_data_stream.cc',
'base/test_data_stream.h',
'base/test_root_certs.cc',
'base/test_root_certs.h',
'base/test_root_certs_mac.cc',
'base/test_root_certs_nss.cc',
'base/test_root_certs_openssl.cc',
'base/test_root_certs_win.cc',
'base/transport_security_state.cc',
'base/transport_security_state.h',
'base/transport_security_state_static.h',
'base/upload_data.cc',
'base/upload_data.h',
'base/upload_data_stream.cc',
'base/upload_data_stream.h',
'base/winsock_init.cc',
'base/winsock_init.h',
'base/winsock_util.cc',
'base/winsock_util.h',
'base/x509_cert_types.cc',
'base/x509_cert_types.h',
'base/x509_cert_types_mac.cc',
'base/x509_cert_types_win.cc',
'base/x509_certificate.cc',
'base/x509_certificate.h',
'base/x509_certificate_mac.cc',
'base/x509_certificate_net_log_param.cc',
'base/x509_certificate_net_log_param.h',
'base/x509_certificate_nss.cc',
'base/x509_certificate_openssl.cc',
'base/x509_certificate_win.cc',
'base/x509_util.h',
'base/x509_util_mac.cc',
'base/x509_util_mac.h',
'base/x509_util_nss.cc',
'base/x509_util_nss.h',
'base/x509_util_openssl.cc',
'base/x509_util_openssl.h',
'base/zap.cc',
'base/zap.h',
'cookies/cookie_monster.cc',
'cookies/cookie_monster.h',
'cookies/cookie_options.h',
'cookies/cookie_store.cc',
'cookies/cookie_store.h',
'cookies/cookie_util.cc',
'cookies/cookie_util.h',
'disk_cache/addr.cc',
'disk_cache/addr.h',
'disk_cache/backend_impl.cc',
'disk_cache/backend_impl.h',
'disk_cache/bitmap.cc',
'disk_cache/bitmap.h',
'disk_cache/block_files.cc',
'disk_cache/block_files.h',
'disk_cache/cache_util.h',
'disk_cache/cache_util_posix.cc',
'disk_cache/cache_util_win.cc',
'disk_cache/disk_cache.h',
'disk_cache/disk_format.cc',
'disk_cache/disk_format.h',
'disk_cache/entry_impl.cc',
'disk_cache/entry_impl.h',
'disk_cache/errors.h',
'disk_cache/eviction.cc',
'disk_cache/eviction.h',
'disk_cache/experiments.h',
'disk_cache/file.cc',
'disk_cache/file.h',
'disk_cache/file_block.h',
'disk_cache/file_lock.cc',
'disk_cache/file_lock.h',
'disk_cache/file_posix.cc',
'disk_cache/file_win.cc',
'disk_cache/hash.cc',
'disk_cache/hash.h',
'disk_cache/histogram_macros.h',
'disk_cache/in_flight_backend_io.cc',
'disk_cache/in_flight_backend_io.h',
'disk_cache/in_flight_io.cc',
'disk_cache/in_flight_io.h',
'disk_cache/mapped_file.h',
'disk_cache/mapped_file_posix.cc',
'disk_cache/mapped_file_win.cc',
'disk_cache/mem_backend_impl.cc',
'disk_cache/mem_backend_impl.h',
'disk_cache/mem_entry_impl.cc',
'disk_cache/mem_entry_impl.h',
'disk_cache/mem_rankings.cc',
'disk_cache/mem_rankings.h',
'disk_cache/net_log_parameters.cc',
'disk_cache/net_log_parameters.h',
'disk_cache/rankings.cc',
'disk_cache/rankings.h',
'disk_cache/sparse_control.cc',
'disk_cache/sparse_control.h',
'disk_cache/stats.cc',
'disk_cache/stats.h',
'disk_cache/stats_histogram.cc',
'disk_cache/stats_histogram.h',
'disk_cache/storage_block-inl.h',
'disk_cache/storage_block.h',
'disk_cache/stress_support.h',
'disk_cache/trace.cc',
'disk_cache/trace.h',
'dns/dns_client.cc',
'dns/dns_client.h',
'dns/dns_config_service.cc',
'dns/dns_config_service.h',
'dns/dns_config_service_posix.cc',
'dns/dns_config_service_posix.h',
'dns/dns_config_service_win.cc',
'dns/dns_config_service_win.h',
'dns/dns_hosts.cc',
'dns/dns_hosts.h',
'dns/dns_protocol.h',
'dns/dns_query.cc',
'dns/dns_query.h',
'dns/dns_response.cc',
'dns/dns_response.h',
'dns/dns_session.cc',
'dns/dns_session.h',
'dns/dns_transaction.cc',
'dns/dns_transaction.h',
'dns/file_path_watcher_wrapper.cc',
'dns/file_path_watcher_wrapper.h',
'dns/notify_watcher_mac.cc',
'dns/notify_watcher_mac.h',
'dns/serial_worker.cc',
'dns/serial_worker.h',
'ftp/ftp_auth_cache.cc',
'ftp/ftp_auth_cache.h',
'ftp/ftp_ctrl_response_buffer.cc',
'ftp/ftp_ctrl_response_buffer.h',
'ftp/ftp_directory_listing_parser.cc',
'ftp/ftp_directory_listing_parser.h',
'ftp/ftp_directory_listing_parser_ls.cc',
'ftp/ftp_directory_listing_parser_ls.h',
'ftp/ftp_directory_listing_parser_netware.cc',
'ftp/ftp_directory_listing_parser_netware.h',
'ftp/ftp_directory_listing_parser_os2.cc',
'ftp/ftp_directory_listing_parser_os2.h',
'ftp/ftp_directory_listing_parser_vms.cc',
'ftp/ftp_directory_listing_parser_vms.h',
'ftp/ftp_directory_listing_parser_windows.cc',
'ftp/ftp_directory_listing_parser_windows.h',
'ftp/ftp_network_layer.cc',
'ftp/ftp_network_layer.h',
'ftp/ftp_network_session.cc',
'ftp/ftp_network_session.h',
'ftp/ftp_network_transaction.cc',
'ftp/ftp_network_transaction.h',
'ftp/ftp_request_info.h',
'ftp/ftp_response_info.cc',
'ftp/ftp_response_info.h',
'ftp/ftp_server_type_histograms.cc',
'ftp/ftp_server_type_histograms.h',
'ftp/ftp_transaction.h',
'ftp/ftp_transaction_factory.h',
'ftp/ftp_util.cc',
'ftp/ftp_util.h',
'http/des.cc',
'http/des.h',
'http/disk_cache_based_ssl_host_info.cc',
'http/disk_cache_based_ssl_host_info.h',
'http/http_atom_list.h',
'http/http_auth.cc',
'http/http_auth.h',
'http/http_auth_cache.cc',
'http/http_auth_cache.h',
'http/http_auth_controller.cc',
'http/http_auth_controller.h',
'http/http_auth_filter.cc',
'http/http_auth_filter.h',
'http/http_auth_filter_win.h',
'http/http_auth_gssapi_posix.cc',
'http/http_auth_gssapi_posix.h',
'http/http_auth_handler.cc',
'http/http_auth_handler.h',
'http/http_auth_handler_basic.cc',
'http/http_auth_handler_basic.h',
'http/http_auth_handler_digest.cc',
'http/http_auth_handler_digest.h',
'http/http_auth_handler_factory.cc',
'http/http_auth_handler_factory.h',
'http/http_auth_handler_negotiate.cc',
'http/http_auth_handler_negotiate.h',
'http/http_auth_handler_ntlm.cc',
'http/http_auth_handler_ntlm.h',
'http/http_auth_handler_ntlm_portable.cc',
'http/http_auth_handler_ntlm_win.cc',
'http/http_auth_sspi_win.cc',
'http/http_auth_sspi_win.h',
'http/http_basic_stream.cc',
'http/http_basic_stream.h',
'http/http_byte_range.cc',
'http/http_byte_range.h',
'http/http_cache.cc',
'http/http_cache.h',
'http/http_cache_transaction.cc',
'http/http_cache_transaction.h',
'http/http_content_disposition.cc',
'http/http_content_disposition.h',
'http/http_chunked_decoder.cc',
'http/http_chunked_decoder.h',
'http/http_net_log_params.cc',
'http/http_net_log_params.h',
'http/http_network_layer.cc',
'http/http_network_layer.h',
'http/http_network_session.cc',
'http/http_network_session.h',
'http/http_network_session_peer.cc',
'http/http_network_session_peer.h',
'http/http_network_transaction.cc',
'http/http_network_transaction.h',
'http/http_pipelined_connection.h',
'http/http_pipelined_connection_impl.cc',
'http/http_pipelined_connection_impl.h',
'http/http_pipelined_host.cc',
'http/http_pipelined_host.h',
'http/http_pipelined_host_capability.h',
'http/http_pipelined_host_forced.cc',
'http/http_pipelined_host_forced.h',
'http/http_pipelined_host_impl.cc',
'http/http_pipelined_host_impl.h',
'http/http_pipelined_host_pool.cc',
'http/http_pipelined_host_pool.h',
'http/http_pipelined_stream.cc',
'http/http_pipelined_stream.h',
'http/http_proxy_client_socket.cc',
'http/http_proxy_client_socket.h',
'http/http_proxy_client_socket_pool.cc',
'http/http_proxy_client_socket_pool.h',
'http/http_request_headers.cc',
'http/http_request_headers.h',
'http/http_request_info.cc',
'http/http_request_info.h',
'http/http_response_body_drainer.cc',
'http/http_response_body_drainer.h',
'http/http_response_headers.cc',
'http/http_response_headers.h',
'http/http_response_info.cc',
'http/http_response_info.h',
'http/http_server_properties.cc',
'http/http_server_properties.h',
'http/http_server_properties_impl.cc',
'http/http_server_properties_impl.h',
'http/http_status_code.h',
'http/http_stream.h',
'http/http_stream_factory.cc',
'http/http_stream_factory.h',
'http/http_stream_factory_impl.cc',
'http/http_stream_factory_impl.h',
'http/http_stream_factory_impl_job.cc',
'http/http_stream_factory_impl_job.h',
'http/http_stream_factory_impl_request.cc',
'http/http_stream_factory_impl_request.h',
'http/http_stream_parser.cc',
'http/http_stream_parser.h',
'http/http_transaction.h',
'http/http_transaction_factory.h',
'http/http_util.cc',
'http/http_util.h',
'http/http_util_icu.cc',
'http/http_vary_data.cc',
'http/http_vary_data.h',
'http/http_version.h',
'http/md4.cc',
'http/md4.h',
'http/partial_data.cc',
'http/partial_data.h',
'http/proxy_client_socket.h',
'http/proxy_client_socket.cc',
'http/url_security_manager.cc',
'http/url_security_manager.h',
'http/url_security_manager_posix.cc',
'http/url_security_manager_win.cc',
'ocsp/nss_ocsp.cc',
'ocsp/nss_ocsp.h',
'proxy/dhcp_proxy_script_adapter_fetcher_win.cc',
'proxy/dhcp_proxy_script_adapter_fetcher_win.h',
'proxy/dhcp_proxy_script_fetcher.cc',
'proxy/dhcp_proxy_script_fetcher.h',
'proxy/dhcp_proxy_script_fetcher_factory.cc',
'proxy/dhcp_proxy_script_fetcher_factory.h',
'proxy/dhcp_proxy_script_fetcher_win.cc',
'proxy/dhcp_proxy_script_fetcher_win.h',
'proxy/dhcpcsvc_init_win.cc',
'proxy/dhcpcsvc_init_win.h',
'proxy/multi_threaded_proxy_resolver.cc',
'proxy/multi_threaded_proxy_resolver.h',
'proxy/network_delegate_error_observer.cc',
'proxy/network_delegate_error_observer.h',
'proxy/polling_proxy_config_service.cc',
'proxy/polling_proxy_config_service.h',
'proxy/proxy_bypass_rules.cc',
'proxy/proxy_bypass_rules.h',
'proxy/proxy_config.cc',
'proxy/proxy_config.h',
'proxy/proxy_config_service.h',
'proxy/proxy_config_service_fixed.cc',
'proxy/proxy_config_service_fixed.h',
'proxy/proxy_config_service_linux.cc',
'proxy/proxy_config_service_linux.h',
'proxy/proxy_config_service_mac.cc',
'proxy/proxy_config_service_mac.h',
'proxy/proxy_config_service_win.cc',
'proxy/proxy_config_service_win.h',
'proxy/proxy_info.cc',
'proxy/proxy_info.h',
'proxy/proxy_list.cc',
'proxy/proxy_list.h',
'proxy/proxy_resolver.h',
'proxy/proxy_resolver_error_observer.h',
'proxy/proxy_resolver_js_bindings.cc',
'proxy/proxy_resolver_js_bindings.h',
'proxy/proxy_resolver_mac.cc',
'proxy/proxy_resolver_mac.h',
'proxy/proxy_resolver_request_context.h',
'proxy/proxy_resolver_script.h',
'proxy/proxy_resolver_script_data.cc',
'proxy/proxy_resolver_script_data.h',
'proxy/proxy_resolver_v8.cc',
'proxy/proxy_resolver_v8.h',
'proxy/proxy_resolver_winhttp.cc',
'proxy/proxy_resolver_winhttp.h',
'proxy/proxy_retry_info.h',
'proxy/proxy_script_decider.cc',
'proxy/proxy_script_decider.h',
'proxy/proxy_script_fetcher.h',
'proxy/proxy_script_fetcher_impl.cc',
'proxy/proxy_script_fetcher_impl.h',
'proxy/proxy_server.cc',
'proxy/proxy_server.h',
'proxy/proxy_server_mac.cc',
'proxy/proxy_service.cc',
'proxy/proxy_service.h',
'proxy/sync_host_resolver.h',
'proxy/sync_host_resolver_bridge.cc',
'proxy/sync_host_resolver_bridge.h',
'socket/buffered_write_stream_socket.cc',
'socket/buffered_write_stream_socket.h',
'socket/client_socket_factory.cc',
'socket/client_socket_factory.h',
'socket/client_socket_handle.cc',
'socket/client_socket_handle.h',
'socket/client_socket_pool.cc',
'socket/client_socket_pool.h',
'socket/client_socket_pool_base.cc',
'socket/client_socket_pool_base.h',
'socket/client_socket_pool_histograms.cc',
'socket/client_socket_pool_histograms.h',
'socket/client_socket_pool_manager.cc',
'socket/client_socket_pool_manager.h',
'socket/client_socket_pool_manager_impl.cc',
'socket/client_socket_pool_manager_impl.h',
'socket/next_proto.h',
'socket/nss_ssl_util.cc',
'socket/nss_ssl_util.h',
'socket/server_socket.h',
'socket/socket.h',
'socket/socks5_client_socket.cc',
'socket/socks5_client_socket.h',
'socket/socks_client_socket.cc',
'socket/socks_client_socket.h',
'socket/socks_client_socket_pool.cc',
'socket/socks_client_socket_pool.h',
'socket/ssl_client_socket.cc',
'socket/ssl_client_socket.h',
'socket/ssl_client_socket_mac.cc',
'socket/ssl_client_socket_mac.h',
'socket/ssl_client_socket_nss.cc',
'socket/ssl_client_socket_nss.h',
'socket/ssl_client_socket_openssl.cc',
'socket/ssl_client_socket_openssl.h',
'socket/ssl_client_socket_pool.cc',
'socket/ssl_client_socket_pool.h',
'socket/ssl_client_socket_win.cc',
'socket/ssl_client_socket_win.h',
'socket/ssl_error_params.cc',
'socket/ssl_error_params.h',
'socket/ssl_host_info.cc',
'socket/ssl_host_info.h',
'socket/ssl_server_socket.h',
'socket/ssl_server_socket_nss.cc',
'socket/ssl_server_socket_nss.h',
'socket/ssl_server_socket_openssl.cc',
'socket/ssl_socket.h',
'socket/stream_socket.cc',
'socket/stream_socket.h',
'socket/tcp_client_socket.cc',
'socket/tcp_client_socket.h',
'socket/tcp_client_socket_libevent.cc',
'socket/tcp_client_socket_libevent.h',
'socket/tcp_client_socket_win.cc',
'socket/tcp_client_socket_win.h',
'socket/tcp_server_socket.h',
'socket/tcp_server_socket_libevent.cc',
'socket/tcp_server_socket_libevent.h',
'socket/tcp_server_socket_win.cc',
'socket/tcp_server_socket_win.h',
'socket/transport_client_socket_pool.cc',
'socket/transport_client_socket_pool.h',
'socket/web_socket_server_socket.cc',
'socket/web_socket_server_socket.h',
'socket_stream/socket_stream.cc',
'socket_stream/socket_stream.h',
'socket_stream/socket_stream_job.cc',
'socket_stream/socket_stream_job.h',
'socket_stream/socket_stream_job_manager.cc',
'socket_stream/socket_stream_job_manager.h',
'socket_stream/socket_stream_metrics.cc',
'socket_stream/socket_stream_metrics.h',
'spdy/buffered_spdy_framer.cc',
'spdy/buffered_spdy_framer.h',
'spdy/spdy_bitmasks.h',
'spdy/spdy_credential_state.cc',
'spdy/spdy_credential_state.h',
'spdy/spdy_frame_builder.cc',
'spdy/spdy_frame_builder.h',
'spdy/spdy_frame_reader.cc',
'spdy/spdy_frame_reader.h',
'spdy/spdy_framer.cc',
'spdy/spdy_framer.h',
'spdy/spdy_http_stream.cc',
'spdy/spdy_http_stream.h',
'spdy/spdy_http_utils.cc',
'spdy/spdy_http_utils.h',
'spdy/spdy_io_buffer.cc',
'spdy/spdy_io_buffer.h',
'spdy/spdy_protocol.h',
'spdy/spdy_proxy_client_socket.cc',
'spdy/spdy_proxy_client_socket.h',
'spdy/spdy_session.cc',
'spdy/spdy_session.h',
'spdy/spdy_session_pool.cc',
'spdy/spdy_session_pool.h',
'spdy/spdy_stream.cc',
'spdy/spdy_stream.h',
'spdy/spdy_websocket_stream.cc',
'spdy/spdy_websocket_stream.h',
'third_party/mozilla_security_manager/nsKeygenHandler.cpp',
'third_party/mozilla_security_manager/nsKeygenHandler.h',
'third_party/mozilla_security_manager/nsNSSCertTrust.cpp',
'third_party/mozilla_security_manager/nsNSSCertTrust.h',
'third_party/mozilla_security_manager/nsNSSCertificateDB.cpp',
'third_party/mozilla_security_manager/nsNSSCertificateDB.h',
'third_party/mozilla_security_manager/nsPKCS12Blob.cpp',
'third_party/mozilla_security_manager/nsPKCS12Blob.h',
'udp/datagram_client_socket.h',
'udp/datagram_server_socket.h',
'udp/datagram_socket.h',
'udp/udp_client_socket.cc',
'udp/udp_client_socket.h',
'udp/udp_data_transfer_param.cc',
'udp/udp_data_transfer_param.h',
'udp/udp_server_socket.cc',
'udp/udp_server_socket.h',
'udp/udp_socket.h',
'udp/udp_socket_libevent.cc',
'udp/udp_socket_libevent.h',
'udp/udp_socket_win.cc',
'udp/udp_socket_win.h',
'url_request/fraudulent_certificate_reporter.h',
'url_request/url_request.cc',
'url_request/url_request.h',
'url_request/url_request_about_job.cc',
'url_request/url_request_about_job.h',
'url_request/url_request_context.cc',
'url_request/url_request_context.h',
'url_request/url_request_context_builder.cc',
'url_request/url_request_context_builder.h',
'url_request/url_request_context_getter.cc',
'url_request/url_request_context_getter.h',
'url_request/url_request_context_storage.cc',
'url_request/url_request_context_storage.h',
'url_request/url_request_data_job.cc',
'url_request/url_request_data_job.h',
'url_request/url_request_error_job.cc',
'url_request/url_request_error_job.h',
'url_request/url_request_file_dir_job.cc',
'url_request/url_request_file_dir_job.h',
'url_request/url_request_file_job.cc',
'url_request/url_request_file_job.h',
'url_request/url_request_filter.cc',
'url_request/url_request_filter.h',
'url_request/url_request_ftp_job.cc',
'url_request/url_request_ftp_job.h',
'url_request/url_request_http_job.cc',
'url_request/url_request_http_job.h',
'url_request/url_request_job.cc',
'url_request/url_request_job.h',
'url_request/url_request_job_factory.cc',
'url_request/url_request_job_factory.h',
'url_request/url_request_job_manager.cc',
'url_request/url_request_job_manager.h',
'url_request/url_request_netlog_params.cc',
'url_request/url_request_netlog_params.h',
'url_request/url_request_redirect_job.cc',
'url_request/url_request_redirect_job.h',
'url_request/url_request_simple_job.cc',
'url_request/url_request_simple_job.h',
'url_request/url_request_status.h',
'url_request/url_request_test_job.cc',
'url_request/url_request_test_job.h',
'url_request/url_request_throttler_entry.cc',
'url_request/url_request_throttler_entry.h',
'url_request/url_request_throttler_entry_interface.h',
'url_request/url_request_throttler_header_adapter.cc',
'url_request/url_request_throttler_header_adapter.h',
'url_request/url_request_throttler_header_interface.h',
'url_request/url_request_throttler_manager.cc',
'url_request/url_request_throttler_manager.h',
'url_request/view_cache_helper.cc',
'url_request/view_cache_helper.h',
'websockets/websocket_handshake_handler.cc',
'websockets/websocket_handshake_handler.h',
'websockets/websocket_job.cc',
'websockets/websocket_job.h',
'websockets/websocket_net_log_params.cc',
'websockets/websocket_net_log_params.h',
'websockets/websocket_throttle.cc',
'websockets/websocket_throttle.h',
],
'defines': [
'NET_IMPLEMENTATION',
],
'export_dependent_settings': [
'../base/base.gyp:base',
],
'conditions': [
['chromeos==1', {
'sources!': [
'base/network_change_notifier_linux.cc',
'base/network_change_notifier_linux.h',
'base/network_change_notifier_netlink_linux.cc',
'base/network_change_notifier_netlink_linux.h',
'proxy/proxy_config_service_linux.cc',
'proxy/proxy_config_service_linux.h',
],
}],
['use_kerberos==1', {
'defines': [
'USE_KERBEROS',
],
'conditions': [
['OS=="openbsd"', {
'include_dirs': [
'/usr/include/kerberosV'
],
}],
['linux_link_kerberos==1', {
'link_settings': {
'ldflags': [
'<!@(krb5-config --libs gssapi)',
],
},
}, { # linux_link_kerberos==0
'defines': [
'DLOPEN_KERBEROS',
],
}],
],
}, { # use_kerberos == 0
'sources!': [
'http/http_auth_gssapi_posix.cc',
'http/http_auth_gssapi_posix.h',
'http/http_auth_handler_negotiate.h',
'http/http_auth_handler_negotiate.cc',
],
}],
['use_openssl==1', {
'sources!': [
'base/cert_database_nss.cc',
'base/cert_verify_proc_nss.cc',
'base/cert_verify_proc_nss.h',
'base/crypto_module_nss.cc',
'base/dnssec_keyset.cc',
'base/dnssec_keyset.h',
'base/keygen_handler_nss.cc',
'base/nss_memio.c',
'base/nss_memio.h',
'base/test_root_certs_nss.cc',
'base/x509_certificate_nss.cc',
'base/x509_util_nss.cc',
'base/x509_util_nss.h',
'ocsp/nss_ocsp.cc',
'ocsp/nss_ocsp.h',
'socket/nss_ssl_util.cc',
'socket/nss_ssl_util.h',
'socket/ssl_client_socket_nss.cc',
'socket/ssl_client_socket_nss.h',
'socket/ssl_server_socket_nss.cc',
'socket/ssl_server_socket_nss.h',
'third_party/mozilla_security_manager/nsKeygenHandler.cpp',
'third_party/mozilla_security_manager/nsKeygenHandler.h',
'third_party/mozilla_security_manager/nsNSSCertificateDB.cpp',
'third_party/mozilla_security_manager/nsNSSCertificateDB.h',
'third_party/mozilla_security_manager/nsNSSCertTrust.cpp',
'third_party/mozilla_security_manager/nsNSSCertTrust.h',
'third_party/mozilla_security_manager/nsPKCS12Blob.cpp',
'third_party/mozilla_security_manager/nsPKCS12Blob.h',
],
},
{ # else !use_openssl: remove the unneeded files
'sources!': [
'base/cert_database_openssl.cc',
'base/cert_verify_proc_openssl.cc',
'base/cert_verify_proc_openssl.h',
'base/crypto_module_openssl.cc',
'base/keygen_handler_openssl.cc',
'base/openssl_memory_private_key_store.cc',
'base/openssl_private_key_store.h',
'base/test_root_certs_openssl.cc',
'base/x509_certificate_openssl.cc',
'base/x509_util_openssl.cc',
'base/x509_util_openssl.h',
'socket/ssl_client_socket_openssl.cc',
'socket/ssl_client_socket_openssl.h',
'socket/ssl_server_socket_openssl.cc',
],
},
],
[ 'use_glib == 1', {
'dependencies': [
'../build/linux/system.gyp:gconf',
'../build/linux/system.gyp:gio',
],
'conditions': [
['use_openssl==1', {
'dependencies': [
'../third_party/openssl/openssl.gyp:openssl',
],
},
{ # else use_openssl==0, use NSS
'dependencies': [
'../build/linux/system.gyp:ssl',
],
}],
['os_bsd==1', {
'sources!': [
'base/network_change_notifier_linux.cc',
'base/network_change_notifier_netlink_linux.cc',
'proxy/proxy_config_service_linux.cc',
],
},{
'dependencies': [
'../build/linux/system.gyp:libresolv',
],
}],
['OS=="solaris"', {
'link_settings': {
'ldflags': [
'-R/usr/lib/mps',
],
},
}],
],
},
{ # else: OS is not in the above list
'sources!': [
'base/cert_database_nss.cc',
'base/crypto_module_nss.cc',
'base/keygen_handler_nss.cc',
'base/test_root_certs_nss.cc',
'base/x509_certificate_nss.cc',
'ocsp/nss_ocsp.cc',
'ocsp/nss_ocsp.h',
'third_party/mozilla_security_manager/nsKeygenHandler.cpp',
'third_party/mozilla_security_manager/nsKeygenHandler.h',
'third_party/mozilla_security_manager/nsNSSCertificateDB.cpp',
'third_party/mozilla_security_manager/nsNSSCertificateDB.h',
'third_party/mozilla_security_manager/nsNSSCertTrust.cpp',
'third_party/mozilla_security_manager/nsNSSCertTrust.h',
'third_party/mozilla_security_manager/nsPKCS12Blob.cpp',
'third_party/mozilla_security_manager/nsPKCS12Blob.h',
],
},
],
[ 'toolkit_uses_gtk == 1', {
'dependencies': [
'../build/linux/system.gyp:gdk',
],
}],
[ 'use_nss != 1', {
'sources!': [
'base/cert_verify_proc_nss.cc',
'base/cert_verify_proc_nss.h',
],
}],
[ 'OS == "win"', {
'sources!': [
'http/http_auth_handler_ntlm_portable.cc',
'socket/tcp_client_socket_libevent.cc',
'socket/tcp_client_socket_libevent.h',
'socket/tcp_server_socket_libevent.cc',
'socket/tcp_server_socket_libevent.h',
'udp/udp_socket_libevent.cc',
'udp/udp_socket_libevent.h',
],
'dependencies': [
'../third_party/nss/nss.gyp:nspr',
'../third_party/nss/nss.gyp:nss',
'third_party/nss/ssl.gyp:libssl',
'tld_cleanup',
],
}, { # else: OS != "win"
'sources!': [
'base/winsock_init.cc',
'base/winsock_init.h',
'base/winsock_util.cc',
'base/winsock_util.h',
'proxy/proxy_resolver_winhttp.cc',
'proxy/proxy_resolver_winhttp.h',
],
},
],
[ 'OS == "mac"', {
'dependencies': [
'../third_party/nss/nss.gyp:nspr',
'../third_party/nss/nss.gyp:nss',
'third_party/nss/ssl.gyp:libssl',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
'$(SDKROOT)/System/Library/Frameworks/SystemConfiguration.framework',
'$(SDKROOT)/usr/lib/libresolv.dylib',
]
},
},
],
[ 'OS == "android"', {
'defines': [
# Android can shut down our app at any time, so we persist session cookies.
'ENABLE_PERSISTENT_SESSION_COOKIES'
],
'dependencies': [
'../build/android/system.gyp:ssl',
'net_java',
'net_jni_headers',
],
}, { # else OS! = "android"
'defines': [
# These are the features Android doesn't support.
'ENABLE_MEDIA_CODEC_THEORA',
],
},
],
[ 'OS == "linux"', {
'dependencies': [
'../build/linux/system.gyp:dbus',
'../dbus/dbus.gyp:dbus',
],
},
],
],
'target_conditions': [
['OS == "android"', {
'sources/': [
['include', '^base/platform_mime_util_linux\\.cc$'],
],
}],
],
},
{
'target_name': 'net_unittests',
'type': 'executable',
'dependencies': [
'net',
'net_test_support',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../crypto/crypto.gyp:crypto',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/zlib/zlib.gyp:zlib',
],
'sources': [
'base/address_list_unittest.cc',
'base/backoff_entry_unittest.cc',
'base/big_endian_unittest.cc',
'base/cert_database_nss_unittest.cc',
'base/cert_verify_proc_unittest.cc',
'base/crl_set_unittest.cc',
'base/data_url_unittest.cc',
'base/default_server_bound_cert_store_unittest.cc',
'base/directory_lister_unittest.cc',
'base/dnssec_unittest.cc',
'base/dns_util_unittest.cc',
'base/dnsrr_resolver_unittest.cc',
'base/escape_unittest.cc',
'base/ev_root_ca_metadata_unittest.cc',
'base/expiring_cache_unittest.cc',
'base/file_stream_unittest.cc',
'base/filter_unittest.cc',
'base/gzip_filter_unittest.cc',
'base/host_cache_unittest.cc',
'base/host_mapping_rules_unittest.cc',
'base/host_port_pair_unittest.cc',
'base/host_resolver_impl_unittest.cc',
'base/ip_endpoint_unittest.cc',
'base/keygen_handler_unittest.cc',
'base/mapped_host_resolver_unittest.cc',
'base/mime_sniffer_unittest.cc',
'base/mime_util_unittest.cc',
'base/mock_filter_context.cc',
'base/mock_filter_context.h',
'base/multi_threaded_cert_verifier_unittest.cc',
'base/net_log_unittest.cc',
'base/net_log_unittest.h',
'base/net_util_unittest.cc',
'base/network_change_notifier_linux_unittest.cc',
'base/network_change_notifier_win_unittest.cc',
'base/pem_tokenizer_unittest.cc',
'base/prioritized_dispatcher_unittest.cc',
'base/priority_queue_unittest.cc',
'base/registry_controlled_domain_unittest.cc',
'base/run_all_unittests.cc',
'base/sdch_filter_unittest.cc',
'base/server_bound_cert_service_unittest.cc',
'base/single_request_host_resolver_unittest.cc',
'base/ssl_cipher_suite_names_unittest.cc',
'base/ssl_client_auth_cache_unittest.cc',
'base/ssl_config_service_unittest.cc',
'base/static_cookie_policy_unittest.cc',
'base/tcp_listen_socket_unittest.cc',
'base/tcp_listen_socket_unittest.h',
'base/test_certificate_data.h',
'base/test_completion_callback_unittest.cc',
'base/transport_security_state_unittest.cc',
'base/upload_data_unittest.cc',
'base/upload_data_stream_unittest.cc',
'base/x509_certificate_unittest.cc',
'base/x509_cert_types_unittest.cc',
'base/x509_util_nss_unittest.cc',
'base/x509_util_openssl_unittest.cc',
'cookies/cookie_monster_unittest.cc',
'cookies/cookie_store_unittest.h',
'cookies/cookie_util_unittest.cc',
'disk_cache/addr_unittest.cc',
'disk_cache/backend_unittest.cc',
'disk_cache/bitmap_unittest.cc',
'disk_cache/block_files_unittest.cc',
'disk_cache/cache_util_unittest.cc',
'disk_cache/entry_unittest.cc',
'disk_cache/mapped_file_unittest.cc',
'disk_cache/storage_block_unittest.cc',
'dns/dns_config_service_posix_unittest.cc',
'dns/dns_config_service_unittest.cc',
'dns/dns_config_service_win_unittest.cc',
'dns/dns_hosts_unittest.cc',
'dns/dns_query_unittest.cc',
'dns/dns_response_unittest.cc',
'dns/dns_transaction_unittest.cc',
'dns/file_path_watcher_wrapper_unittest.cc',
'dns/serial_worker_unittest.cc',
'ftp/ftp_auth_cache_unittest.cc',
'ftp/ftp_ctrl_response_buffer_unittest.cc',
'ftp/ftp_directory_listing_parser_ls_unittest.cc',
'ftp/ftp_directory_listing_parser_netware_unittest.cc',
'ftp/ftp_directory_listing_parser_os2_unittest.cc',
'ftp/ftp_directory_listing_parser_unittest.cc',
'ftp/ftp_directory_listing_parser_unittest.h',
'ftp/ftp_directory_listing_parser_vms_unittest.cc',
'ftp/ftp_directory_listing_parser_windows_unittest.cc',
'ftp/ftp_network_transaction_unittest.cc',
'ftp/ftp_util_unittest.cc',
'http/des_unittest.cc',
'http/disk_cache_based_ssl_host_info_unittest.cc',
'http/http_auth_cache_unittest.cc',
'http/http_auth_controller_unittest.cc',
'http/http_auth_filter_unittest.cc',
'http/http_auth_gssapi_posix_unittest.cc',
'http/http_auth_handler_basic_unittest.cc',
'http/http_auth_handler_digest_unittest.cc',
'http/http_auth_handler_factory_unittest.cc',
'http/http_auth_handler_mock.cc',
'http/http_auth_handler_mock.h',
'http/http_auth_handler_negotiate_unittest.cc',
'http/http_auth_handler_unittest.cc',
'http/http_auth_sspi_win_unittest.cc',
'http/http_auth_unittest.cc',
'http/http_byte_range_unittest.cc',
'http/http_cache_unittest.cc',
'http/http_chunked_decoder_unittest.cc',
'http/http_content_disposition_unittest.cc',
'http/http_network_layer_unittest.cc',
'http/http_network_transaction_spdy3_unittest.cc',
'http/http_network_transaction_spdy2_unittest.cc',
'http/http_pipelined_connection_impl_unittest.cc',
'http/http_pipelined_host_forced_unittest.cc',
'http/http_pipelined_host_impl_unittest.cc',
'http/http_pipelined_host_pool_unittest.cc',
'http/http_pipelined_host_test_util.cc',
'http/http_pipelined_host_test_util.h',
'http/http_pipelined_network_transaction_unittest.cc',
'http/http_proxy_client_socket_pool_spdy2_unittest.cc',
'http/http_proxy_client_socket_pool_spdy3_unittest.cc',
'http/http_request_headers_unittest.cc',
'http/http_response_body_drainer_unittest.cc',
'http/http_response_headers_unittest.cc',
'http/http_server_properties_impl_unittest.cc',
'http/http_stream_factory_impl_unittest.cc',
'http/http_stream_parser_unittest.cc',
'http/http_transaction_unittest.cc',
'http/http_transaction_unittest.h',
'http/http_util_unittest.cc',
'http/http_vary_data_unittest.cc',
'http/mock_allow_url_security_manager.cc',
'http/mock_allow_url_security_manager.h',
'http/mock_gssapi_library_posix.cc',
'http/mock_gssapi_library_posix.h',
'http/mock_http_cache.cc',
'http/mock_http_cache.h',
'http/mock_sspi_library_win.h',
'http/mock_sspi_library_win.cc',
'http/url_security_manager_unittest.cc',
'proxy/dhcp_proxy_script_adapter_fetcher_win_unittest.cc',
'proxy/dhcp_proxy_script_fetcher_factory_unittest.cc',
'proxy/dhcp_proxy_script_fetcher_win_unittest.cc',
'proxy/multi_threaded_proxy_resolver_unittest.cc',
'proxy/network_delegate_error_observer_unittest.cc',
'proxy/proxy_bypass_rules_unittest.cc',
'proxy/proxy_config_service_linux_unittest.cc',
'proxy/proxy_config_service_win_unittest.cc',
'proxy/proxy_config_unittest.cc',
'proxy/proxy_list_unittest.cc',
'proxy/proxy_resolver_js_bindings_unittest.cc',
'proxy/proxy_resolver_v8_unittest.cc',
'proxy/proxy_script_decider_unittest.cc',
'proxy/proxy_script_fetcher_impl_unittest.cc',
'proxy/proxy_server_unittest.cc',
'proxy/proxy_service_unittest.cc',
'proxy/sync_host_resolver_bridge_unittest.cc',
'socket/buffered_write_stream_socket_unittest.cc',
'socket/client_socket_pool_base_unittest.cc',
'socket/deterministic_socket_data_unittest.cc',
'socket/mock_client_socket_pool_manager.cc',
'socket/mock_client_socket_pool_manager.h',
'socket/socks5_client_socket_unittest.cc',
'socket/socks_client_socket_pool_unittest.cc',
'socket/socks_client_socket_unittest.cc',
'socket/ssl_client_socket_pool_unittest.cc',
'socket/ssl_client_socket_unittest.cc',
'socket/ssl_server_socket_unittest.cc',
'socket/tcp_client_socket_unittest.cc',
'socket/tcp_server_socket_unittest.cc',
'socket/transport_client_socket_pool_unittest.cc',
'socket/transport_client_socket_unittest.cc',
'socket/web_socket_server_socket_unittest.cc',
'socket_stream/socket_stream_metrics_unittest.cc',
'socket_stream/socket_stream_unittest.cc',
'spdy/buffered_spdy_framer_spdy3_unittest.cc',
'spdy/buffered_spdy_framer_spdy2_unittest.cc',
'spdy/spdy_credential_state_unittest.cc',
'spdy/spdy_frame_reader_test.cc',
'spdy/spdy_framer_test.cc',
'spdy/spdy_http_stream_spdy3_unittest.cc',
'spdy/spdy_http_stream_spdy2_unittest.cc',
'spdy/spdy_http_utils_unittest.cc',
'spdy/spdy_network_transaction_spdy3_unittest.cc',
'spdy/spdy_network_transaction_spdy2_unittest.cc',
'spdy/spdy_protocol_test.cc',
'spdy/spdy_proxy_client_socket_spdy3_unittest.cc',
'spdy/spdy_proxy_client_socket_spdy2_unittest.cc',
'spdy/spdy_session_spdy3_unittest.cc',
'spdy/spdy_session_spdy2_unittest.cc',
'spdy/spdy_stream_spdy3_unittest.cc',
'spdy/spdy_stream_spdy2_unittest.cc',
'spdy/spdy_test_util_spdy3.cc',
'spdy/spdy_test_util_spdy3.h',
'spdy/spdy_test_util_spdy2.cc',
'spdy/spdy_test_util_spdy2.h',
'spdy/spdy_websocket_stream_spdy2_unittest.cc',
'spdy/spdy_websocket_stream_spdy3_unittest.cc',
'spdy/spdy_websocket_test_util_spdy2.cc',
'spdy/spdy_websocket_test_util_spdy2.h',
'spdy/spdy_websocket_test_util_spdy3.cc',
'spdy/spdy_websocket_test_util_spdy3.h',
'test/python_utils_unittest.cc',
'tools/dump_cache/url_to_filename_encoder.cc',
'tools/dump_cache/url_to_filename_encoder.h',
'tools/dump_cache/url_to_filename_encoder_unittest.cc',
'tools/dump_cache/url_utilities.h',
'tools/dump_cache/url_utilities.cc',
'tools/dump_cache/url_utilities_unittest.cc',
'udp/udp_socket_unittest.cc',
'url_request/url_request_context_builder_unittest.cc',
'url_request/url_request_job_factory_unittest.cc',
'url_request/url_request_job_unittest.cc',
'url_request/url_request_throttler_simulation_unittest.cc',
'url_request/url_request_throttler_test_support.cc',
'url_request/url_request_throttler_test_support.h',
'url_request/url_request_throttler_unittest.cc',
'url_request/url_request_unittest.cc',
'url_request/view_cache_helper_unittest.cc',
'websockets/websocket_handshake_handler_unittest.cc',
'websockets/websocket_job_spdy2_unittest.cc',
'websockets/websocket_job_spdy3_unittest.cc',
'websockets/websocket_net_log_params_unittest.cc',
'websockets/websocket_throttle_unittest.cc',
],
'conditions': [
['chromeos==1', {
'sources!': [
'base/network_change_notifier_linux_unittest.cc',
'proxy/proxy_config_service_linux_unittest.cc',
],
}],
[ 'use_glib == 1', {
'dependencies': [
'../build/linux/system.gyp:ssl',
],
}, { # else: OS is not in the above list
'sources!': [
'base/cert_database_nss_unittest.cc',
],
},
],
[ 'toolkit_uses_gtk == 1', {
'dependencies': [
'../build/linux/system.gyp:gtk',
],
},
],
[ 'os_posix == 1 and OS != "mac" and OS != "android"', {
'conditions': [
['linux_use_tcmalloc==1', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
}],
],
}],
[ 'use_kerberos==1', {
'defines': [
'USE_KERBEROS',
],
}, { # use_kerberos == 0
'sources!': [
'http/http_auth_gssapi_posix_unittest.cc',
'http/http_auth_handler_negotiate_unittest.cc',
'http/mock_gssapi_library_posix.cc',
'http/mock_gssapi_library_posix.h',
],
}],
[ 'use_openssl==1', {
# When building for OpenSSL, we need to exclude NSS specific tests.
# TODO(bulach): Add equivalent tests when the underlying
# functionality is ported to OpenSSL.
'sources!': [
'base/x509_util_nss_unittest.cc',
'base/cert_database_nss_unittest.cc',
'base/dnssec_unittest.cc',
],
}, { # else !use_openssl: remove the unneeded files
'sources!': [
'base/x509_util_openssl_unittest.cc',
],
},
],
[ 'OS == "win"', {
'sources!': [
'dns/dns_config_service_posix_unittest.cc',
'http/http_auth_gssapi_posix_unittest.cc',
],
# This is needed to trigger the dll copy step on windows.
# TODO(mark): Specifying this here shouldn't be necessary.
'dependencies': [
'../third_party/icu/icu.gyp:icudata',
'../third_party/nss/nss.gyp:nspr',
'../third_party/nss/nss.gyp:nss',
'third_party/nss/ssl.gyp:libssl',
],
},
],
[ 'OS == "mac"', {
'dependencies': [
'../third_party/nss/nss.gyp:nspr',
'../third_party/nss/nss.gyp:nss',
'third_party/nss/ssl.gyp:libssl',
],
},
],
[ 'OS == "linux"', {
'dependencies': [
'../build/linux/system.gyp:dbus',
'../dbus/dbus.gyp:dbus_test_support',
],
},
],
[ 'OS == "android"', {
'dependencies': [
'../build/android/system.gyp:ssl',
],
'sources!': [
'dns/dns_config_service_posix_unittest.cc',
],
},
],
[ 'OS != "win" and OS != "mac"', {
'sources!': [
'base/x509_cert_types_unittest.cc',
],
}]
],
},
{
'target_name': 'net_unittests_run',
'type': 'none',
'dependencies': [
'net_unittests',
],
'includes': [
'net_unittests.isolate',
],
'actions': [
{
'action_name': 'isolate',
'inputs': [
'net_unittests.isolate',
'<@(isolate_dependency_tracked)',
],
'outputs': [
'<(PRODUCT_DIR)/net_unittests.results',
],
'action': [
'python',
'../tools/isolate/isolate.py',
'--mode', '<(tests_run)',
'--variable', 'PRODUCT_DIR', '<(PRODUCT_DIR)',
'--variable', 'OS', '<(OS)',
'--result', '<@(_outputs)',
'net_unittests.isolate',
],
},
],
},
{
'target_name': 'net_perftests',
'type': 'executable',
'dependencies': [
'net',
'net_test_support',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/base.gyp:test_support_perf',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../testing/gtest.gyp:gtest',
],
'sources': [
'cookies/cookie_monster_perftest.cc',
'disk_cache/disk_cache_perftest.cc',
'proxy/proxy_resolver_perftest.cc',
],
'conditions': [
# This is needed to trigger the dll copy step on windows.
# TODO(mark): Specifying this here shouldn't be necessary.
[ 'OS == "win"', {
'dependencies': [
'../third_party/icu/icu.gyp:icudata',
],
},
],
],
},
{
'target_name': 'stress_cache',
'type': 'executable',
'dependencies': [
'net',
'net_test_support',
'../base/base.gyp:base',
],
'sources': [
'disk_cache/stress_cache.cc',
],
},
{
'target_name': 'tld_cleanup',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../build/temp_gyp/googleurl.gyp:googleurl',
],
'sources': [
'tools/tld_cleanup/tld_cleanup.cc',
],
},
{
'target_name': 'crash_cache',
'type': 'executable',
'dependencies': [
'net',
'net_test_support',
'../base/base.gyp:base',
],
'sources': [
'tools/crash_cache/crash_cache.cc',
],
},
{
'target_name': 'run_testserver',
'type': 'executable',
'dependencies': [
'net',
'net_test_support',
'../base/base.gyp:base',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../testing/gtest.gyp:gtest',
],
'sources': [
'tools/testserver/run_testserver.cc',
],
},
{
'target_name': 'net_test_support',
'type': 'static_library',
'dependencies': [
'net',
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
'../testing/gtest.gyp:gtest',
],
'export_dependent_settings': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
'../testing/gtest.gyp:gtest',
],
'sources': [
'base/cert_test_util.cc',
'base/cert_test_util.h',
'base/mock_cert_verifier.cc',
'base/mock_cert_verifier.h',
'base/mock_file_stream.cc',
'base/mock_file_stream.h',
'base/mock_host_resolver.cc',
'base/mock_host_resolver.h',
'base/net_test_suite.cc',
'base/net_test_suite.h',
'base/test_completion_callback.cc',
'base/test_completion_callback.h',
'cookies/cookie_monster_store_test.cc',
'cookies/cookie_monster_store_test.h',
'cookies/cookie_store_test_callbacks.cc',
'cookies/cookie_store_test_callbacks.h',
'cookies/cookie_store_test_helpers.cc',
'cookies/cookie_store_test_helpers.h',
'disk_cache/disk_cache_test_base.cc',
'disk_cache/disk_cache_test_base.h',
'disk_cache/disk_cache_test_util.cc',
'disk_cache/disk_cache_test_util.h',
'dns/dns_test_util.cc',
'dns/dns_test_util.h',
'proxy/mock_proxy_resolver.cc',
'proxy/mock_proxy_resolver.h',
'proxy/mock_proxy_script_fetcher.cc',
'proxy/mock_proxy_script_fetcher.h',
'proxy/proxy_config_service_common_unittest.cc',
'proxy/proxy_config_service_common_unittest.h',
'socket/socket_test_util.cc',
'socket/socket_test_util.h',
'test/base_test_server.cc',
'test/base_test_server.h',
'test/local_test_server_posix.cc',
'test/local_test_server_win.cc',
'test/local_test_server.cc',
'test/local_test_server.h',
'test/python_utils.cc',
'test/python_utils.h',
'test/remote_test_server.cc',
'test/remote_test_server.h',
'test/spawner_communicator.cc',
'test/spawner_communicator.h',
'test/test_server.h',
'url_request/url_request_test_util.cc',
'url_request/url_request_test_util.h',
],
'conditions': [
['inside_chromium_build==1', {
'dependencies': [
'../chrome/app/policy/cloud_policy_codegen.gyp:cloud_policy_proto_compile',
# The test server uses Python modules generated by the sync protos.
'../sync/protocol/sync_proto.gyp:sync_proto',
'../third_party/protobuf/protobuf.gyp:py_proto',
],
}],
['os_posix == 1 and OS != "mac" and OS != "android"', {
'conditions': [
['use_openssl==1', {
'dependencies': [
'../third_party/openssl/openssl.gyp:openssl',
],
}, {
'dependencies': [
'../build/linux/system.gyp:ssl',
],
}],
],
}],
['os_posix == 1 and OS != "mac" and OS != "android"', {
'conditions': [
['linux_use_tcmalloc==1', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
}],
],
}],
['OS != "android"', {
'sources!': [
'test/remote_test_server.cc',
'test/remote_test_server.h',
'test/spawner_communicator.cc',
'test/spawner_communicator.h',
],
}],
],
},
{
'target_name': 'net_resources',
'type': 'none',
'variables': {
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/net',
},
'actions': [
{
'action_name': 'net_resources',
'variables': {
'grit_grd_file': 'base/net_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
},
{
'target_name': 'fetch_client',
'type': 'executable',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'net',
'../base/base.gyp:base',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../testing/gtest.gyp:gtest',
],
'sources': [
'tools/fetch/fetch_client.cc',
],
},
{
'target_name': 'fetch_server',
'type': 'executable',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'net',
'../base/base.gyp:base',
'../build/temp_gyp/googleurl.gyp:googleurl',
],
'sources': [
'tools/fetch/fetch_server.cc',
'tools/fetch/http_listen_socket.cc',
'tools/fetch/http_listen_socket.h',
'tools/fetch/http_server.cc',
'tools/fetch/http_server.h',
'tools/fetch/http_server_request_info.cc',
'tools/fetch/http_server_request_info.h',
'tools/fetch/http_server_response_info.cc',
'tools/fetch/http_server_response_info.h',
'tools/fetch/http_session.cc',
'tools/fetch/http_session.h',
],
},
{
'target_name': 'http_server',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'net',
'../base/base.gyp:base',
],
'sources': [
'server/http_connection.cc',
'server/http_connection.h',
'server/http_server.cc',
'server/http_server.h',
'server/http_server_request_info.cc',
'server/http_server_request_info.h',
'server/web_socket.cc',
'server/web_socket.h',
],
},
{
'target_name': 'dnssec_chain_verify',
'type': 'executable',
'dependencies': [
'net',
'../base/base.gyp:base',
],
'sources': [
'tools/dnssec_chain_verify/dnssec_chain_verify.cc',
],
},
{
'target_name': 'crl_set_dump',
'type': 'executable',
'dependencies': [
'net',
'../base/base.gyp:base',
],
'sources': [
'tools/crl_set_dump/crl_set_dump.cc',
],
},
],
'conditions': [
['os_posix == 1 and OS != "mac" and OS != "android"', {
'targets': [
{
'target_name': 'flip_in_mem_edsm_server',
'type': 'executable',
'cflags': [
'-Wno-deprecated',
],
'dependencies': [
'../base/base.gyp:base',
'net',
'../third_party/openssl/openssl.gyp:openssl',
],
'sources': [
'tools/dump_cache/url_to_filename_encoder.cc',
'tools/dump_cache/url_to_filename_encoder.h',
'tools/dump_cache/url_utilities.h',
'tools/dump_cache/url_utilities.cc',
'tools/flip_server/acceptor_thread.h',
'tools/flip_server/acceptor_thread.cc',
'tools/flip_server/balsa_enums.h',
'tools/flip_server/balsa_frame.cc',
'tools/flip_server/balsa_frame.h',
'tools/flip_server/balsa_headers.cc',
'tools/flip_server/balsa_headers.h',
'tools/flip_server/balsa_headers_token_utils.cc',
'tools/flip_server/balsa_headers_token_utils.h',
'tools/flip_server/balsa_visitor_interface.h',
'tools/flip_server/buffer_interface.h',
'tools/flip_server/constants.h',
'tools/flip_server/create_listener.cc',
'tools/flip_server/create_listener.h',
'tools/flip_server/epoll_server.cc',
'tools/flip_server/epoll_server.h',
'tools/flip_server/flip_config.cc',
'tools/flip_server/flip_config.h',
'tools/flip_server/flip_in_mem_edsm_server.cc',
'tools/flip_server/http_interface.cc',
'tools/flip_server/http_interface.h',
'tools/flip_server/http_message_constants.cc',
'tools/flip_server/http_message_constants.h',
'tools/flip_server/loadtime_measurement.h',
'tools/flip_server/mem_cache.h',
'tools/flip_server/mem_cache.cc',
'tools/flip_server/output_ordering.cc',
'tools/flip_server/output_ordering.h',
'tools/flip_server/ring_buffer.cc',
'tools/flip_server/ring_buffer.h',
'tools/flip_server/simple_buffer.cc',
'tools/flip_server/simple_buffer.h',
'tools/flip_server/sm_connection.cc',
'tools/flip_server/sm_connection.h',
'tools/flip_server/sm_interface.h',
'tools/flip_server/split.h',
'tools/flip_server/split.cc',
'tools/flip_server/spdy_ssl.cc',
'tools/flip_server/spdy_ssl.h',
'tools/flip_server/spdy_interface.cc',
'tools/flip_server/spdy_interface.h',
'tools/flip_server/spdy_util.cc',
'tools/flip_server/spdy_util.h',
'tools/flip_server/streamer_interface.cc',
'tools/flip_server/streamer_interface.h',
'tools/flip_server/string_piece_utils.h',
],
},
{
'target_name': 'curvecp',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'curvecp/circular_buffer.cc',
'curvecp/circular_buffer.h',
'curvecp/client_packetizer.cc',
'curvecp/client_packetizer.h',
'curvecp/connection_key.cc',
'curvecp/connection_key.h',
'curvecp/curvecp_client_socket.cc',
'curvecp/curvecp_client_socket.h',
'curvecp/curvecp_server_socket.cc',
'curvecp/curvecp_server_socket.h',
'curvecp/messenger.h',
'curvecp/messenger.cc',
'curvecp/packetizer.h',
'curvecp/protocol.cc',
'curvecp/protocol.h',
'curvecp/received_block_list.cc',
'curvecp/received_block_list.h',
'curvecp/rtt_and_send_rate_calculator.cc',
'curvecp/rtt_and_send_rate_calculator.h',
'curvecp/sent_block_list.cc',
'curvecp/sent_block_list.h',
'curvecp/server_messenger.cc',
'curvecp/server_messenger.h',
'curvecp/server_packetizer.cc',
'curvecp/server_packetizer.h',
],
},
{
'target_name': 'curvecp_unittests',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'curvecp',
'net',
'net_test_support',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/zlib/zlib.gyp:zlib',
],
'sources': [
'curvecp/curvecp_transfer_unittest.cc',
'curvecp/test_client.cc',
'curvecp/test_server.cc',
],
},
]
}],
['OS=="android"', {
'targets': [
{
'target_name': 'net_jni_headers',
'type': 'none',
'variables': {
'java_sources': [
'android/java/org/chromium/net/AndroidNetworkLibrary.java',
'android/java/org/chromium/net/NetworkChangeNotifier.java',
],
'jni_headers': [
'<(SHARED_INTERMEDIATE_DIR)/net/jni/android_network_library_jni.h',
'<(SHARED_INTERMEDIATE_DIR)/net/jni/network_change_notifier_jni.h',
],
},
'includes': [ '../build/jni_generator.gypi' ],
},
{
'target_name': 'net_java',
'type': 'none',
'variables': {
'package_name': 'net',
'java_in_dir': '../net/android/java',
},
'dependencies': [
'../base/base.gyp:base_java',
],
'includes': [ '../build/java.gypi' ],
},
],
}],
['OS=="win"', {
'targets': [
{
# TODO(port): dump_cache is still Windows-specific.
'target_name': 'dump_cache',
'type': 'executable',
'dependencies': [
'net',
'net_test_support',
'../base/base.gyp:base',
],
'sources': [
'tools/dump_cache/cache_dumper.cc',
'tools/dump_cache/cache_dumper.h',
'tools/dump_cache/dump_cache.cc',
'tools/dump_cache/dump_files.cc',
'tools/dump_cache/upgrade.cc',
'tools/dump_cache/url_to_filename_encoder.cc',
'tools/dump_cache/url_to_filename_encoder.h',
'tools/dump_cache/url_utilities.h',
'tools/dump_cache/url_utilities.cc',
],
},
],
}],
],
}
| 37.744612 | 94 | 0.594984 |
93477b630d32f08205dc636baf79485650df17fb | 596 | py | Python | botorch/utils/multi_objective/box_decompositions/__init__.py | dme65/botorch | 508f215bfe987373924e39444c8fb544d5132178 | [
"MIT"
] | null | null | null | botorch/utils/multi_objective/box_decompositions/__init__.py | dme65/botorch | 508f215bfe987373924e39444c8fb544d5132178 | [
"MIT"
] | null | null | null | botorch/utils/multi_objective/box_decompositions/__init__.py | dme65/botorch | 508f215bfe987373924e39444c8fb544d5132178 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
NondominatedPartitioning,
)
from botorch.utils.multi_objective.box_decompositions.utils import (
compute_non_dominated_hypercell_bounds_2d,
)
__all__ = [
"compute_non_dominated_hypercell_bounds_2d",
"FastNondominatedPartitioning",
"NondominatedPartitioning",
]
| 27.090909 | 76 | 0.790268 |
b18d2e6002e0168d817adf39a70e676444ddc4b6 | 98 | py | Python | app/util/chunker.py | robert-huang/taylen | 123b09f6ef697f97e4c33eee263edb3cfb4e2555 | [
"MIT"
] | null | null | null | app/util/chunker.py | robert-huang/taylen | 123b09f6ef697f97e4c33eee263edb3cfb4e2555 | [
"MIT"
] | null | null | null | app/util/chunker.py | robert-huang/taylen | 123b09f6ef697f97e4c33eee263edb3cfb4e2555 | [
"MIT"
] | null | null | null | def chunker(seq, size: int):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
| 32.666667 | 68 | 0.642857 |
15cb5d665f39cb294aee431259529b7d511ef0d9 | 1,952 | py | Python | src/adage/dagstate.py | lukasheinrich/dagger | 353c15cd97ff5150eff128f34cf1666c78826524 | [
"MIT"
] | 31 | 2018-07-12T10:33:39.000Z | 2021-12-01T22:49:42.000Z | src/adage/dagstate.py | lukasheinrich/dagger | 353c15cd97ff5150eff128f34cf1666c78826524 | [
"MIT"
] | 10 | 2021-02-15T20:13:43.000Z | 2022-02-03T00:48:34.000Z | src/adage/dagstate.py | lukasheinrich/dagger | 353c15cd97ff5150eff128f34cf1666c78826524 | [
"MIT"
] | 3 | 2019-05-31T18:04:15.000Z | 2021-08-23T12:00:18.000Z | import logging
import adage.nodestate as nodestate
log = logging.getLogger(__name__)
def node_ran_and_failed(nodeobj):
'''
:param nodeobj: the node object
:return:
- ``True`` if node has been processed and failed
- ``False`` in all other cases
'''
return nodeobj.state == nodestate.FAILED
def upstream_ok(dag,nodeobj):
upstream = dag.predecessors(nodeobj.identifier)
log.debug("upstream nodes are %s",dag.predecessors(nodeobj.identifier))
if not upstream:
return True
return all(node_status(dag.getNode(x)) for x in upstream)
def upstream_ready(dag,nodeobj):
upstream = dag.predecessors(nodeobj.identifier)
if not upstream:
return True
return all(dag.getNode(x).ready() for x in upstream)
def upstream_failure(dag,nodeobj):
upstream = [dag.getNode(x) for x in dag.predecessors(nodeobj.identifier)]
if not upstream:
return False
log.debug('checking upstream nodes %s',upstream)
upstream_status = [node_ran_and_failed(obj) or upstream_failure(dag,obj) for obj in upstream]
log.debug('upstream %s', 'ok' if upstream_status else 'failed')
return any(upstream_status)
def node_status(nodeobj):
'''
boolean check on node status.
:param nodeobj: the node object
:return:
- ``True`` if successful (i.e. has beedn submitted, finished processing and exited successfully)
- ``False`` in all other cases
'''
submitted = nodeobj.submit_time
ready = nodeobj.ready()
successful = nodeobj.successful()
log.debug("node %s: submitted: %s, ready: %s, successful: %s",nodeobj.identifier,submitted,ready,successful)
return submitted and ready and successful
def node_defined_or_running(nodeobj):
running = (nodeobj.state == nodestate.RUNNING)
defined = (nodeobj.state == nodestate.DEFINED)
log.debug('defined: %s running %s',defined,running)
return running or defined
| 32 | 112 | 0.693135 |
12d18498a43e4d1a626fcd3f70dd0abbf998f8d6 | 6,494 | py | Python | client/python/lib/tests/tfs_compat_grpc/test_grpc_responses.py | BrightTux/model_server | cdbeb464c78b161e5706490fc18b0a8cf16138fb | [
"Apache-2.0"
] | 305 | 2018-10-01T12:41:28.000Z | 2020-04-24T10:36:08.000Z | client/python/lib/tests/tfs_compat_grpc/test_grpc_responses.py | BrightTux/model_server | cdbeb464c78b161e5706490fc18b0a8cf16138fb | [
"Apache-2.0"
] | 61 | 2018-11-15T09:23:01.000Z | 2020-04-23T09:29:56.000Z | client/python/lib/tests/tfs_compat_grpc/test_grpc_responses.py | BrightTux/model_server | cdbeb464c78b161e5706490fc18b0a8cf16138fb | [
"Apache-2.0"
] | 67 | 2018-10-13T14:33:48.000Z | 2020-04-22T19:01:32.000Z | #
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from numpy import ndarray
from numpy.core.numeric import array_equal
import pytest
from tensorflow.core.framework.types_pb2 import DataType
from tensorflow_serving.apis.get_model_status_pb2 import ModelVersionStatus
from tensorflow_serving.apis.predict_pb2 import PredictResponse
from ovmsclient.tfs_compat.grpc.responses import (GrpcModelMetadataResponse,
GrpcModelStatusResponse,
GrpcPredictResponse)
from tfs_compat_grpc.config import (MODEL_METADATA_RESPONSE_VALID, MODEL_STATUS_RESPONSE_VALID,
PREDICT_RESPONSE_VALID, PREDICT_RESPONSE_TENSOR_TYPE_INVALID)
from tfs_compat_grpc.utils import (create_model_metadata_response,
create_model_status_response,
merge_model_status_responses)
@pytest.mark.parametrize("outputs_dict, model_name, model_version,"
"expected_outputs", PREDICT_RESPONSE_VALID)
def test_PredictResponse_to_dict_valid(outputs_dict, model_name, model_version,
expected_outputs):
predict_raw_response = PredictResponse()
predict_raw_response.model_spec.name = model_name
predict_raw_response.model_spec.version.value = model_version
for key, value in outputs_dict.items():
predict_raw_response.outputs[key].CopyFrom(value)
predict_response = GrpcPredictResponse(predict_raw_response)
response_dict = predict_response.to_dict()
assert isinstance(response_dict, dict)
assert "outputs" in response_dict
assert len(response_dict) == 1
raw_response = predict_response.raw_response
if isinstance(response_dict["outputs"], dict):
for output_name, array in response_dict["outputs"].items():
assert output_name in raw_response.outputs.keys()
assert type(array) is ndarray
assert array_equal(array, expected_outputs[output_name])
else:
assert type(response_dict["outputs"]) is ndarray
assert array_equal(response_dict["outputs"], expected_outputs)
@pytest.mark.parametrize("outputs_dict, model_name, model_version, expected_exception,"
"expected_message", PREDICT_RESPONSE_TENSOR_TYPE_INVALID)
def test_PredictResponse_to_dict_invalid(outputs_dict, model_name, model_version,
expected_exception, expected_message):
predict_raw_response = PredictResponse()
predict_raw_response.model_spec.name = model_name
predict_raw_response.model_spec.version.value = model_version
for key, value in outputs_dict.items():
predict_raw_response.outputs[key].CopyFrom(value)
predict_response = GrpcPredictResponse(predict_raw_response)
with pytest.raises(expected_exception) as e_info:
predict_response.to_dict()
assert str(e_info.value) == expected_message
@pytest.mark.parametrize("model_raw_status_response_dict", MODEL_STATUS_RESPONSE_VALID)
def test_ModelStatusResponse_to_dict_valid(model_raw_status_response_dict):
model_raw_responses = []
for version, status in model_raw_status_response_dict.items():
model_raw_responses.append(create_model_status_response(version,
status['error_code'], status['error_message'],
status['state']))
raw_response = merge_model_status_responses(model_raw_responses)
response = GrpcModelStatusResponse(raw_response)
response_dict = response.to_dict()
assert isinstance(response_dict, dict)
assert len(response_dict) == len(model_raw_status_response_dict)
for version, status in model_raw_status_response_dict.items():
assert version in response_dict
assert isinstance(response_dict[version], dict)
assert response_dict[version]['error_code'] == status['error_code']
assert response_dict[version]['error_message'] == status['error_message']
assert response_dict[version]['state'] == ModelVersionStatus.State.Name(status['state'])
@pytest.mark.parametrize("model_raw_metadata_response_dict", MODEL_METADATA_RESPONSE_VALID)
def test_ModelMetadataResponse_to_dict_valid(model_raw_metadata_response_dict):
raw_response = create_model_metadata_response(model_raw_metadata_response_dict)
response = GrpcModelMetadataResponse(raw_response)
response_dict = response.to_dict()
assert isinstance(response_dict, dict)
assert len(response_dict) == 3
version = model_raw_metadata_response_dict['version']
assert all(key in response_dict for key in ["model_version", "inputs", "outputs"])
assert response_dict['model_version'] == version
inputs = response_dict['inputs']
assert len(inputs) == len(model_raw_metadata_response_dict['inputs'])
for input in inputs:
assert input in model_raw_metadata_response_dict['inputs']
assert isinstance(inputs[input], dict)
assert 'shape' in inputs[input] and 'dtype' in inputs[input]
assert inputs[input]['shape'] == model_raw_metadata_response_dict['inputs'][input]['shape']
assert (inputs[input]['dtype']
== DataType.Name(model_raw_metadata_response_dict['inputs'][input]['dtype']))
outputs = response_dict['outputs']
assert len(outputs) == len(model_raw_metadata_response_dict['outputs'])
for output in outputs:
assert output in model_raw_metadata_response_dict['outputs']
assert isinstance(outputs[output], dict)
assert 'shape' in outputs[output] and 'dtype' in outputs[output]
assert (outputs[output]['shape']
== model_raw_metadata_response_dict['outputs'][output]['shape'])
assert (outputs[output]['dtype']
== DataType.Name(model_raw_metadata_response_dict['outputs'][output]['dtype']))
| 45.732394 | 99 | 0.721435 |
d4cfc3ddc014993a9b70e145a39d22433abd1e1a | 28,593 | py | Python | src/sentry/web/forms/accounts.py | dpoirier/sentry | 19bdf8aff1cd230fb6f6b32f13340d49ae16f67c | [
"BSD-3-Clause"
] | null | null | null | src/sentry/web/forms/accounts.py | dpoirier/sentry | 19bdf8aff1cd230fb6f6b32f13340d49ae16f67c | [
"BSD-3-Clause"
] | null | null | null | src/sentry/web/forms/accounts.py | dpoirier/sentry | 19bdf8aff1cd230fb6f6b32f13340d49ae16f67c | [
"BSD-3-Clause"
] | null | null | null | """
sentry.web.forms.accounts
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import pytz
import six
from datetime import datetime
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate, get_user_model
from django.db.models import Q
from django.utils.text import capfirst, mark_safe
from django.utils.translation import ugettext_lazy as _
from sentry import newsletter, options
from sentry.auth import password_validation
from sentry.app import ratelimiter
from sentry.constants import LANGUAGES
from sentry.models import (Organization, OrganizationStatus, User, UserOption, UserOptionValue)
from sentry.security import capture_security_activity
from sentry.utils.auth import find_users, logger
from sentry.web.forms.fields import CustomTypedChoiceField, ReadOnlyTextField
from six.moves import range
def _get_timezone_choices():
results = []
for tz in pytz.common_timezones:
now = datetime.now(pytz.timezone(tz))
offset = now.strftime('%z')
results.append((int(offset), tz, '(UTC%s) %s' % (offset, tz)))
results.sort()
for i in range(len(results)):
results[i] = results[i][1:]
return results
TIMEZONE_CHOICES = _get_timezone_choices()
class AuthenticationForm(forms.Form):
username = forms.CharField(
label=_('Account'),
max_length=128,
widget=forms.TextInput(attrs={
'placeholder': _('username or email'),
'tabindex': 1,
}),
)
password = forms.CharField(
label=_('Password'),
widget=forms.PasswordInput(attrs={
'placeholder': _('password'),
'tabindex': 2,
}),
)
error_messages = {
'invalid_login':
_(
"Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."
),
'rate_limited':
_("You have made too many failed authentication "
"attempts. Please try again later."),
'no_cookies':
_(
"Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."
),
'inactive':
_("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if not self.fields['username'].label:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean_username(self):
value = (self.cleaned_data.get('username') or '').strip()
if not value:
return
return value.lower()
def is_rate_limited(self):
if self._is_ip_rate_limited():
return True
if self._is_user_rate_limited():
return True
return False
def _is_ip_rate_limited(self):
limit = options.get('auth.ip-rate-limit')
if not limit:
return False
ip_address = self.request.META['REMOTE_ADDR']
return ratelimiter.is_limited(
'auth:ip:{}'.format(ip_address),
limit,
)
def _is_user_rate_limited(self):
limit = options.get('auth.user-rate-limit')
if not limit:
return False
username = self.cleaned_data.get('username')
if not username:
return False
return ratelimiter.is_limited(
u'auth:username:{}'.format(username),
limit,
)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if not (username and password):
raise forms.ValidationError(
self.error_messages['invalid_login'] %
{'username': self.username_field.verbose_name}
)
if self.is_rate_limited():
logger.info(
'user.auth.rate-limited',
extra={
'ip_address': self.request.META['REMOTE_ADDR'],
'username': username,
}
)
raise forms.ValidationError(self.error_messages['rate_limited'])
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'] %
{'username': self.username_field.verbose_name}
)
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class RegistrationForm(forms.ModelForm):
name = forms.CharField(
label=_('Name'),
max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'Jane Doe'}),
required=True
)
username = forms.EmailField(
label=_('Email'),
max_length=128,
widget=forms.TextInput(attrs={'placeholder': 'you@example.com'}),
required=True
)
password = forms.CharField(
required=True, widget=forms.PasswordInput(attrs={'placeholder': 'something super secret'})
)
subscribe = CustomTypedChoiceField(
coerce=lambda x: six.text_type(x) == u'1',
label=_("Email updates"),
choices=(
(1, 'Yes, I would like to receive updates via email'),
(0, "No, I'd prefer not to receive these updates"),
),
widget=forms.RadioSelect,
required=True,
initial=False,
)
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
if not newsletter.is_enabled():
del self.fields['subscribe']
else:
# NOTE: the text here is duplicated within the ``NewsletterConsent`` component
# in the UI
self.fields['subscribe'].help_text = mark_safe("We'd love to keep you updated via email with product and feature announcements, promotions, educational materials, and events. Our updates focus on relevant information and never sell your data to marketing companies. See our <a href=\"%(privacy_link)s\">Privacy Policy</a> for more details.".format(
privacy_link=settings.PRIVACY_URL,
))
class Meta:
fields = ('username', 'name')
model = User
def clean_username(self):
value = (self.cleaned_data.get('username') or '').strip()
if not value:
return
if User.objects.filter(username__iexact=value).exists():
raise forms.ValidationError(
_('An account is already registered with that email address.')
)
return value.lower()
def clean_password(self):
password = self.cleaned_data['password']
password_validation.validate_password(password)
return password
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.email = user.username
user.set_password(self.cleaned_data['password'])
if commit:
user.save()
if self.cleaned_data.get('subscribe'):
newsletter.create_or_update_subscriptions(user, list_ids=newsletter.get_default_list_ids())
return user
class RecoverPasswordForm(forms.Form):
user = forms.CharField(
label=_('Account'),
max_length=128,
widget=forms.TextInput(attrs={'placeholder': _('username or email')}),
)
def clean_user(self):
value = (self.cleaned_data.get('user') or '').strip()
if not value:
return
users = find_users(value, with_valid_password=False)
if not users:
raise forms.ValidationError(_("We were unable to find a matching user."))
users = [u for u in users if not u.is_managed]
if not users:
raise forms.ValidationError(
_(
"The account you are trying to recover is managed and does not support password recovery."
)
)
if len(users) > 1:
raise forms.ValidationError(
_("Multiple accounts were found matching this email address.")
)
return users[0]
class ChangePasswordRecoverForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput())
def clean_password(self):
password = self.cleaned_data['password']
password_validation.validate_password(password)
return password
class EmailForm(forms.Form):
alt_email = forms.EmailField(
label=_('New Email'),
required=False,
help_text='Designate an alternative email for this account',
)
password = forms.CharField(
label=_('Current password'),
widget=forms.PasswordInput(),
help_text=_('You will need to enter your current account password to make changes.'),
required=True,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(EmailForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if not needs_password:
del self.fields['password']
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError(_('The password you entered is not correct.'))
elif not value:
raise forms.ValidationError(
_('You must confirm your current password to make changes.')
)
return value
class AccountSettingsForm(forms.Form):
name = forms.CharField(required=True, label=_('Name'), max_length=30)
username = forms.CharField(label=_('Username'), max_length=128)
email = forms.EmailField(label=_('Email'))
new_password = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput(),
required=False,
# help_text=password_validation.password_validators_help_text_html(),
)
verify_new_password = forms.CharField(
label=_('Verify new password'),
widget=forms.PasswordInput(),
required=False,
)
password = forms.CharField(
label=_('Current password'),
widget=forms.PasswordInput(),
help_text='You will need to enter your current account password to make changes.',
required=False,
)
def __init__(self, user, request, *args, **kwargs):
self.user = user
self.request = request
super(AccountSettingsForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if self.user.is_managed:
# username and password always managed, email and
# name optionally managed
for field in ('email', 'name', 'username'):
if field == 'username' or field in settings.SENTRY_MANAGED_USER_FIELDS:
self.fields[field] = ReadOnlyTextField(label=self.fields[field].label)
if field == 'email':
needs_password = False
del self.fields['new_password']
del self.fields['verify_new_password']
# don't show username field if its the same as their email address
if self.user.email == self.user.username:
del self.fields['username']
if not needs_password:
del self.fields['password']
def is_readonly(self):
if self.user.is_managed:
return set(('email', 'name')) == set(settings.SENTRY_MANAGED_USER_FIELDS)
return False
def _clean_managed_field(self, field):
if self.user.is_managed and (
field == 'username' or field in settings.SENTRY_MANAGED_USER_FIELDS
):
return getattr(self.user, field)
return self.cleaned_data[field]
def clean_email(self):
value = self._clean_managed_field('email').lower()
if self.user.email.lower() == value:
return value
if User.objects.filter(Q(email__iexact=value) | Q(username__iexact=value)).exclude(
id=self.user.id
).exists():
raise forms.ValidationError(
_("There was an error adding %s: that email is already in use") %
self.cleaned_data['email']
)
return value
def clean_name(self):
return self._clean_managed_field('name')
def clean_username(self):
value = self._clean_managed_field('username')
if User.objects.filter(username__iexact=value).exclude(id=self.user.id).exists():
raise forms.ValidationError(_("That username is already in use."))
return value
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError('The password you entered is not correct.')
elif not value and (
self.cleaned_data.get('email', self.user.email) != self.user.email or
self.cleaned_data.get('new_password')
):
raise forms.ValidationError('You must confirm your current password to make changes.')
return value
def clean_verify_new_password(self):
new_password = self.cleaned_data.get('new_password')
if new_password:
verify_new_password = self.cleaned_data.get('verify_new_password')
if verify_new_password is None:
raise forms.ValidationError('You must verify your new password.')
if new_password != verify_new_password:
raise forms.ValidationError('Your new password and verify new password must match.')
return verify_new_password
def clean_new_password(self):
new_password = self.cleaned_data.get('new_password')
if new_password:
password_validation.validate_password(new_password)
return new_password
def save(self, commit=True):
if self.cleaned_data.get('new_password'):
self.user.set_password(self.cleaned_data['new_password'])
self.user.refresh_session_nonce(self.request)
capture_security_activity(
account=self.user,
type='password-changed',
actor=self.request.user,
ip_address=self.request.META['REMOTE_ADDR'],
send_email=True,
)
self.user.name = self.cleaned_data['name']
if self.cleaned_data['email'] != self.user.email:
new_username = self.user.email == self.user.username
else:
new_username = False
self.user.email = self.cleaned_data['email']
if self.cleaned_data.get('username'):
self.user.username = self.cleaned_data['username']
elif new_username and not User.objects.filter(username__iexact=self.user.email).exists():
self.user.username = self.user.email
if commit:
self.user.save()
return self.user
class AppearanceSettingsForm(forms.Form):
language = forms.ChoiceField(
label=_('Language'),
choices=LANGUAGES,
required=False,
widget=forms.Select(attrs={'class': 'input-xlarge'})
)
stacktrace_order = forms.ChoiceField(
label=_('Stacktrace order'),
choices=(
('-1', _('Default (let Sentry decide)')), ('1', _('Most recent call last')),
('2', _('Most recent call first')),
),
help_text=_('Choose the default ordering of frames in stacktraces.'),
required=False,
widget=forms.Select(attrs={'class': 'input-xlarge'})
)
timezone = forms.ChoiceField(
label=_('Time zone'),
choices=TIMEZONE_CHOICES,
required=False,
widget=forms.Select(attrs={'class': 'input-xxlarge'})
)
clock_24_hours = forms.BooleanField(
label=_('Use a 24-hour clock'),
required=False,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AppearanceSettingsForm, self).__init__(*args, **kwargs)
def save(self):
# Save user language
UserOption.objects.set_value(
user=self.user,
key='language',
value=self.cleaned_data['language'],
)
# Save stacktrace options
UserOption.objects.set_value(
user=self.user,
key='stacktrace_order',
value=self.cleaned_data['stacktrace_order'],
)
# Save time zone options
UserOption.objects.set_value(
user=self.user,
key='timezone',
value=self.cleaned_data['timezone'],
)
# Save clock 24 hours option
UserOption.objects.set_value(
user=self.user,
key='clock_24_hours',
value=self.cleaned_data['clock_24_hours'],
)
return self.user
class NotificationReportSettingsForm(forms.Form):
organizations = forms.ModelMultipleChoiceField(
queryset=Organization.objects.none(),
required=False,
widget=forms.CheckboxSelectMultiple(),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(NotificationReportSettingsForm, self).__init__(*args, **kwargs)
org_queryset = Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
member_set__user=user,
)
disabled_orgs = set(
UserOption.objects.get_value(
user=user,
key='reports:disabled-organizations',
default=[],
)
)
self.fields['organizations'].queryset = org_queryset
self.fields['organizations'].initial = [
o.id for o in org_queryset if o.id not in disabled_orgs
]
def save(self):
enabled_orgs = set((o.id for o in self.cleaned_data.get('organizations')))
all_orgs = set(self.fields['organizations'].queryset.values_list('id', flat=True))
UserOption.objects.set_value(
user=self.user,
key='reports:disabled-organizations',
value=list(all_orgs.difference(enabled_orgs)),
)
class NotificationDeploySettingsForm(forms.Form):
CHOICES = [
(UserOptionValue.all_deploys, _('All deploys')),
(UserOptionValue.committed_deploys_only,
_('Deploys with your commits')), (UserOptionValue.no_deploys, _('Never'))
]
notifications = forms.ChoiceField(
choices=CHOICES,
required=False,
widget=forms.RadioSelect(),
)
def __init__(self, user, organization, *args, **kwargs):
self.user = user
self.organization = organization
super(NotificationDeploySettingsForm, self).__init__(*args, **kwargs)
self.fields['notifications'].label = "" # hide the label
deploy_setting = UserOption.objects.get_value(
user=user,
organization=self.organization,
key='deploy-emails',
default=UserOptionValue.committed_deploys_only,
)
self.fields['notifications'].initial = deploy_setting
def save(self):
value = self.data.get('{}-notifications'.format(self.prefix), None)
if value is not None:
UserOption.objects.set_value(
user=self.user,
organization=self.organization,
key='deploy-emails',
value=value,
)
class NotificationSettingsForm(forms.Form):
alert_email = forms.EmailField(
label=_('Email'),
help_text=_('Designate an alternative email address to send email notifications to.'),
required=False
)
subscribe_by_default = forms.BooleanField(
label=_('Automatically subscribe to alerts for new projects'),
help_text=_(
"When enabled, you'll automatically subscribe to alerts when you create or join a project."
),
required=False,
)
workflow_notifications = forms.ChoiceField(
label=_('Preferred workflow subscription level for new projects'),
choices=[
(UserOptionValue.all_conversations, "Receive workflow updates for all issues."),
(UserOptionValue.participating_only,
"Receive workflow updates only for issues that I am participating in or have subscribed to."),
(UserOptionValue.no_conversations, "Never receive workflow updates."),
],
help_text=_("This will be automatically set as your subscription preference when you create or join a project. It has no effect on existing projects."),
required=False,
)
self_notifications = forms.BooleanField(
label=_('Receive notifications about my own activity'),
help_text=_(
'Enable this if you wish to receive emails for your own actions, as well as others.'
),
required=False,
)
self_assign_issue = forms.BooleanField(
label=_('Claim unassigned issues when resolving them'),
help_text=_(
"When enabled, you'll automatically be assigned to unassigned issues when marking them as resolved."
),
required=False,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(NotificationSettingsForm, self).__init__(*args, **kwargs)
self.fields['alert_email'].initial = UserOption.objects.get_value(
user=self.user,
key='alert_email',
default=user.email,
)
self.fields['subscribe_by_default'].initial = (
UserOption.objects.get_value(
user=self.user,
key='subscribe_by_default',
default='1',
) == '1'
)
self.fields['workflow_notifications'].initial = UserOption.objects.get_value(
user=self.user,
key='workflow:notifications',
default=UserOptionValue.all_conversations,
project=None,
)
self.fields['self_notifications'].initial = UserOption.objects.get_value(
user=self.user, key='self_notifications', default='0'
) == '1'
self.fields['self_assign_issue'].initial = UserOption.objects.get_value(
user=self.user, key='self_assign_issue', default='0'
) == '1'
def get_title(self):
return "General"
def save(self):
UserOption.objects.set_value(
user=self.user,
key='alert_email',
value=self.cleaned_data['alert_email'],
)
UserOption.objects.set_value(
user=self.user,
key='subscribe_by_default',
value='1' if self.cleaned_data['subscribe_by_default'] else '0',
)
UserOption.objects.set_value(
user=self.user,
key='self_notifications',
value='1' if self.cleaned_data['self_notifications'] else '0',
)
UserOption.objects.set_value(
user=self.user,
key='self_assign_issue',
value='1' if self.cleaned_data['self_assign_issue'] else '0',
)
workflow_notifications_value = self.cleaned_data.get('workflow_notifications')
if not workflow_notifications_value:
UserOption.objects.unset_value(
user=self.user,
key='workflow:notifications',
project=None,
)
else:
UserOption.objects.set_value(
user=self.user,
key='workflow:notifications',
value=workflow_notifications_value,
project=None,
)
class ProjectEmailOptionsForm(forms.Form):
alert = forms.BooleanField(required=False)
workflow = forms.ChoiceField(
choices=[
(UserOptionValue.no_conversations, 'Nothing'),
(UserOptionValue.participating_only, 'Participating'),
(UserOptionValue.all_conversations, 'Everything'),
],
)
email = forms.ChoiceField(label="", choices=(), required=False,
widget=forms.Select())
def __init__(self, project, user, *args, **kwargs):
self.project = project
self.user = user
super(ProjectEmailOptionsForm, self).__init__(*args, **kwargs)
has_alerts = project.is_user_subscribed_to_mail_alerts(user)
# This allows users who have entered an alert_email value or have specified an email
# for notifications to keep their settings
emails = [e.email for e in user.get_verified_emails()]
alert_email = UserOption.objects.get_value(self.user, 'alert_email')
specified_email = UserOption.objects.get_value(self.user, 'mail:email', project=project)
emails.extend([user.email, alert_email, specified_email])
choices = [(email, email) for email in sorted(set(emails)) if email]
self.fields['email'].choices = choices
self.fields['alert'].initial = has_alerts
self.fields['workflow'].initial = UserOption.objects.get_value(
user=self.user,
project=self.project,
key='workflow:notifications',
default=UserOption.objects.get_value(
user=self.user,
project=None,
key='workflow:notifications',
default=UserOptionValue.all_conversations,
),
)
self.fields['email'].initial = specified_email or alert_email or user.email
def save(self):
UserOption.objects.set_value(
user=self.user,
key='mail:alert',
value=int(self.cleaned_data['alert']),
project=self.project,
)
UserOption.objects.set_value(
user=self.user,
key='workflow:notifications',
value=self.cleaned_data['workflow'],
project=self.project,
)
if self.cleaned_data['email']:
UserOption.objects.set_value(
user=self.user,
key='mail:email',
value=self.cleaned_data['email'],
project=self.project,
)
else:
UserOption.objects.unset_value(self.user, self.project, 'mail:email')
class TwoFactorForm(forms.Form):
otp = forms.CharField(
label=_('One-time password'),
max_length=20,
widget=forms.TextInput(
attrs={
'placeholder': _('Code from authenticator'),
'autofocus': True,
}
),
)
class ConfirmPasswordForm(forms.Form):
password = forms.CharField(
label=_('Sentry account password'),
widget=forms.PasswordInput(),
help_text='You will need to enter your current Sentry account password to make changes.',
required=True,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(ConfirmPasswordForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if not needs_password:
del self.fields['password']
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError(_('The password you entered is not correct.'))
elif not value:
raise forms.ValidationError(
_('You must confirm your current password to make changes.')
)
return value
| 34.039286 | 360 | 0.610954 |
3b0d8d39e933952bc542bd161b8d54bce3601224 | 708 | py | Python | tests/test_variant.py | jeremymcrae/vcfsyncer | 3233df0b4ea760d066a7ade7d315441bf4c01fac | [
"MIT"
] | 1 | 2019-01-25T19:07:05.000Z | 2019-01-25T19:07:05.000Z | tests/test_variant.py | jeremymcrae/vcfsyncer | 3233df0b4ea760d066a7ade7d315441bf4c01fac | [
"MIT"
] | null | null | null | tests/test_variant.py | jeremymcrae/vcfsyncer | 3233df0b4ea760d066a7ade7d315441bf4c01fac | [
"MIT"
] | null | null | null |
import unittest
from collections import namedtuple
from vcfsyncer.variant import MultiVariantRecord
Var = namedtuple('Var', ['chrom', 'pos', 'id', 'ref', 'alts', 'qual', 'filter',
'info', 'format', 'samples'])
class TestVCFVariant(unittest.TestCase):
def test_missing_alt(self):
x = Var('1', 100, '.', 'A', ['G'], 100, 'PASS', {}, 'AD', {'A': '10,10'})
y = Var('1', 100, '.', 'A', ['G'], 100, 'PASS', {}, 'AD', {'B': '10,10'})
z = Var('1', 100, '.', 'A', ['C'], 100, 'PASS', {}, 'AD', {'C': '10,10'})
m = Var('1', 100, '.', 'A', None, 100, 'PASS', {}, None, {})
multivar = MultiVariantRecord(x, y, z, m)
self.assertEqual(multivar.alts, ['G', 'C'])
| 39.333333 | 81 | 0.511299 |
71721ad0e941345f5d447e750599274cae8b4f97 | 15,625 | py | Python | SDUc for Sarbecoviruses MacLean_Lytras_etal_2020/sduc_CpGbdicts.py | spyros-lytras/dinuq | 7a3b637ba6f6cec6211ec9784cdbbfe3029d3544 | [
"MIT"
] | 11 | 2020-07-07T12:37:06.000Z | 2022-03-24T15:49:06.000Z | SDUc for Sarbecoviruses MacLean_Lytras_etal_2020/sduc_CpGbdicts.py | spyros-lytras/dinuq | 7a3b637ba6f6cec6211ec9784cdbbfe3029d3544 | [
"MIT"
] | 4 | 2020-03-18T19:34:51.000Z | 2020-10-30T11:32:07.000Z | SDUc for Sarbecoviruses MacLean_Lytras_etal_2020/sduc_CpGbdicts.py | spyros-lytras/dinuq | 7a3b637ba6f6cec6211ec9784cdbbfe3029d3544 | [
"MIT"
] | 2 | 2020-03-27T10:11:32.000Z | 2022-03-23T13:11:50.000Z | from Bio import SeqIO
from Bio.Seq import Seq
import random
import math
#total number of nucleotides used in the estimator
ntsamples = 19200
#### NB ####
#because of the computational intensity of the estimations below
#the bridge ntsamples are only 19200 instead of 19200000 used for
#pos1 and pos2. This problem is mitigated by taking 100 samples
#for each virus instead of 10 and averaging out in a later step
#################
syco_orig = {
"C": ["TGT", "TGC"],
"D": ["GAT", "GAC"],
"S": ["TCT", "TCG", "TCA", "TCC", "AGC", "AGT"],
"Q": ["CAA", "CAG"],
"M": ["ATG"],
"N": ["AAC", "AAT"],
"P": ["CCT", "CCG", "CCA", "CCC"],
"K": ["AAG", "AAA"],
"T": ["ACC", "ACA", "ACG", "ACT"],
"F": ["TTT", "TTC"],
"A": ["GCA", "GCC", "GCG", "GCT"],
"G": ["GGT", "GGG", "GGA", "GGC"],
"I": ["ATC", "ATA", "ATT"],
"L": ["TTA", "TTG", "CTC", "CTT", "CTG", "CTA"],
"H": ["CAT", "CAC"],
"R": ["CGA", "CGC", "CGG", "CGT", "AGG", "AGA"],
"W": ["TGG"],
"V": ["GTA", "GTC", "GTG", "GTT"],
"E": ["GAG", "GAA"],
"Y": ["TAT", "TAC"]}
#### GC content ####
full_ntcont = {'RpShaanxi2011|Bat-R_pusillus|Shaanxi|JX993987|2011-09': {'G': 0.21082621082621084, 'C': 0.20509428842762176, 'A': 0.2843915343915344, 'T': 0.299687966354633}, 'HuB2013|Bat-Bat-R_sinicus|Hubei|KJ473814|2013-04': {'G': 0.21019623710297392, 'C': 0.2041270483512037, 'A': 0.28447636388158337, 'T': 0.301200350664239}, '279_2005|Bat-R_macrotis|Hubei|DQ648857|2004-11': {'G': 0.21014760767963417, 'C': 0.2010019837934165, 'A': 0.2851618977169564, 'T': 0.30368851080999293}, 'Rm1|Bat-R_macrotis|Hubei|DQ412043|2004-11': {'G': 0.20982217889676963, 'C': 0.20131769135096977, 'A': 0.28569027530337154, 'T': 0.30316985444888905}, 'JL2012|Bat-R_ferrumequinum|Jilin|KJ473811|2012-10': {'G': 0.20938802217859973, 'C': 0.20060612322209595, 'A': 0.284258015635224, 'T': 0.3057478389640803}, 'JTMC15|Bat-R_ferrumequinum|Jilin|KU182964|2013-10': {'G': 0.20896352699836584, 'C': 0.20089704808594971, 'A': 0.2831959945759883, 'T': 0.3069434303396961}, 'HeB2013|Bat-R_ferrumequinum|Hebei|KJ473812|2013-04': {'G': 0.2085724960092382, 'C': 0.20127025099344495, 'A': 0.284991339197772, 'T': 0.3051659137995449}, 'SX2013|Bat-R_ferrumequinum|Shanxi|KJ473813|2013-11': {'G': 0.20851294932283357, 'C': 0.20121516581242999, 'A': 0.28491904551780317, 'T': 0.30535283934693325}, 'Jiyuan-84|Bat-R_ferrumequinum|Henan-Jiyuan|KY770860|2012': {'G': 0.20845839937944757, 'C': 0.2020167953863276, 'A': 0.28461097433476107, 'T': 0.3049138308994638}, 'Rf1|Bat-R_ferrumequinum|Hubei-Yichang|DQ412042|2004-11': {'G': 0.20892658790265575, 'C': 0.20205998182368978, 'A': 0.2845602342724427, 'T': 0.30445319600121173}, 'GX2013|Bat-R_sinicus|Guangxi|KJ473815|2012-11': {'G': 0.2086005281026028, 'C': 0.2000274339014437, 'A': 0.28606700730427626, 'T': 0.30530503069167725}, 'Rp3|Bat-R_pearsoni|Guangxi-Nanning|DQ071615|2004-12': {'G': 0.2085687382297552, 'C': 0.19995964487489912, 'A': 0.2874293785310734, 'T': 0.30404223836427224}, 'Rf4092|Bat-R_ferrumequinum|Yunnan-Kunming|KY417145|2012-09-18': {'G': 0.21013126893301917, 'C': 0.20265903736115787, 'A': 0.2838774823291821, 'T': 0.30333221137664085}, 'Rs4231|Bat-R_sinicus|Yunnan-Kunming|KY417146|2013-04-17': {'G': 0.2086495198442012, 'C': 0.20085957961184608, 'A': 0.284668591766839, 'T': 0.3058223087771137}, 'WIV16|Bat-R_sinicus|Yunnan-Kunming|KT444582|2013-07-21': {'G': 0.20835259161439418, 'C': 0.20066028392208649, 'A': 0.28451634202707166, 'T': 0.30647078243644765}, 'Rs4874|Bat-R_sinicus|Yunnan-Kunming|KY417150|2013-07-21': {'G': 0.20830721520240178, 'C': 0.2005542542311372, 'A': 0.2850450331562799, 'T': 0.3060934974101811}, 'YN2018B|Bat-R_affinis|Yunnan|MK211376|2016-09': {'G': 0.20759518773135907, 'C': 0.20081967213114754, 'A': 0.2848360655737705, 'T': 0.3067490745637229}, 'Rs7327|Bat-R_sinicus|Yunnan--Kunming|KY417151|2014-10-24': {'G': 0.20774078595703963, 'C': 0.20081169366812948, 'A': 0.28528062823770084, 'T': 0.30616689213713005}, 'Rs9401|Bat-R_sinicus|Yunnan-Kunming|KY417152|2015-10-16': {'G': 0.2080352044072693, 'C': 0.20017467835667976, 'A': 0.28506164130471295, 'T': 0.30672847593133795}, 'Rs4084|Bat-R_sinicus|Yunnan-Kunming|KY417144|2012-09-18': {'G': 0.20806180718844475, 'C': 0.20137722539469263, 'A': 0.2850856567013772, 'T': 0.3054753107154854}, 'RsSHC014|Bat-R_sinicus|Yunnan-Kunming|KC881005|2011-04-17': {'G': 0.20807734917917212, 'C': 0.20096015040118173, 'A': 0.2854601000436432, 'T': 0.3054016852989559}, 'Rs3367|Bat-R_sinicus|Yunnan-Kunming|KC881006|2012-03-19': {'G': 0.20784103114930183, 'C': 0.20012083780880774, 'A': 0.28594924812030076, 'T': 0.3060888829215897}, 'WIV1|Bat-R_sinicus|Yunnan-Kunming|KF367457|2012-09': {'G': 0.20766109076511927, 'C': 0.20000659870005608, 'A': 0.2855257514269689, 'T': 0.30680655910785576}, 'YN2018C|Bat-R_affinis|Yunnan-Kunming|MK211377|2016-09': {'G': 0.2088315537741251, 'C': 0.20206136953080264, 'A': 0.28512243591902725, 'T': 0.303984640776045}, 'As6526|Bat-Aselliscus_stoliczkanus|Yunnan-Kunming|KY417142|2014-05-12': {'G': 0.20904962153069806, 'C': 0.2016484440706476, 'A': 0.28518082422203533, 'T': 0.304121110176619}, 'YN2018D|Bat-R_affinis|Yunnan|MK211378|2016-09': {'G': 0.20855260980372686, 'C': 0.20173435276205606, 'A': 0.28540694403071526, 'T': 0.3043060934035018}, 'Rs4081|Bat-R_sinicus|Yunnan-Kunming|KY417143|2012-09-18': {'G': 0.20920614639722943, 'C': 0.20143909081739014, 'A': 0.2854645102720151, 'T': 0.3038902525133654}, 'Rs4255|Bat-R_sinicus|Yunnan-Kunming|KY417149|2013-04-17': {'G': 0.20899035067074606, 'C': 0.20092122516222305, 'A': 0.2856806643580002, 'T': 0.3044077598090307}, 'Rs4237|Bat-R_sinicus|Yunnan-Kunming|KY417147|2013-04-17': {'G': 0.20920614639722943, 'C': 0.2003631350660704, 'A': 0.28583437006153123, 'T': 0.304596348475169}, 'Rs4247|Bat-R_sinicus|Yunnan-Kunming|KY417148|2013-04-17': {'G': 0.20902397202703157, 'C': 0.200786739737081, 'A': 0.2857142857142857, 'T': 0.3044750025216017}, 'Rs672|Bat-R_sinicus|Guizhou|FJ588686|2006-09': {'G': 0.20940156233869026, 'C': 0.20152104339447333, 'A': 0.2844557624143983, 'T': 0.3046216318524381}, 'YN2018A|Bat-R_affinis|Yunnan|MK211375|2016-09': {'G': 0.20883561182571217, 'C': 0.20166341167755406, 'A': 0.2850360293622466, 'T': 0.3044649471344872}, 'YN2013|Bat-R_sinicus|Yunnan|KJ473816|2010-12': {'G': 0.21045226820396679, 'C': 0.20056962459680186, 'A': 0.28241026696863636, 'T': 0.306567840230595}, 'Anlong-103|Bat-R_sinicus|Guizhou-Anlong|KY770858|2013': {'G': 0.20954594448935596, 'C': 0.20112503368364323, 'A': 0.283447857720291, 'T': 0.3058811641067098}, 'Anlong-112|Bat-R_sinicus|Guizhou-Anlong|KY770859|2013': {'G': 0.20969427309805508, 'C': 0.20109886405770722, 'A': 0.2830754710621229, 'T': 0.3061313917821148}, 'HSZ-Cc|SARS-CoV-1|Guangzhou|AY394995|2002': {'G': 0.20792877540735763, 'C': 0.1998656139761465, 'A': 0.28509994960524104, 'T': 0.3071056610112548}, 'YNLF_31C|Bat-R_Ferrumequinum|Yunnan-Lufeng|KP886808|2013-05-23': {'G': 0.20828987652659556, 'C': 0.19910507014769707, 'A': 0.2850654375399522, 'T': 0.3075396157857551}, 'YNLF_34C|Bat-R_Ferrumequinum|Yunnan-Lufeng|KP886809|2013-05-23': {'G': 0.20832352050600544, 'C': 0.19910507014769707, 'A': 0.285132725498772, 'T': 0.3074386838475255}, 'F46|Bat-R_pusillus|Yunnan|KU973692|2012': {'G': 0.21031559114460668, 'C': 0.20136599152143195, 'A': 0.2837965143664625, 'T': 0.3045219029674988}, 'SC2018|Bat-R_spp|Sichuan|MK211374|2016-10': {'G': 0.2096937398812736, 'C': 0.19913653534808418, 'A': 0.28332433890987585, 'T': 0.3078453858607663}, 'LYRa11|Bat-R_affinis|Yunnan-Baoshan|KF569996|2011': {'G': 0.20848850863949, 'C': 0.19832242912263043, 'A': 0.2843482637141419, 'T': 0.30884079852373764}, 'Yunnan2011|Bat-Chaerephon_plicata|Yunnan|JX993988|2011-11': {'G': 0.2092557381502105, 'C': 0.19947711530626103, 'A': 0.2844628548146136, 'T': 0.3068042917289148}, 'Longquan_140|Bat-R_monoceros|China|KF294457|2012': {'G': 0.20996765062676911, 'C': 0.1983420946219167, 'A': 0.28531473244372557, 'T': 0.30637552230758863}, 'HKU3-1|Bat-R_sinicus|Hong_Kong|DQ022305|2005-02-17': {'G': 0.21131593110871905, 'C': 0.19987890204520992, 'A': 0.2841428955866523, 'T': 0.30466227125941875}, 'HKU3-3|Bat-R_sinicus|Hong_Kong|DQ084200|2005-03-17': {'G': 0.21133586887011543, 'C': 0.19989229578270673, 'A': 0.28420450338258557, 'T': 0.30456733196459224}, 'HKU3-2|Bat-R_sinicus|Hong_Kong|DQ084199|2005-02-24': {'G': 0.2116751440024253, 'C': 0.19995284131101154, 'A': 0.28342372082056116, 'T': 0.304948293866002}, 'HKU3-4|Bat-R_sinicus|Hong_Kong|GQ153539|2005-07-20': {'G': 0.21172232695933207, 'C': 0.19997306760032318, 'A': 0.2832951791004579, 'T': 0.30500942633988687}, 'HKU3-5|Bat-R_sinicus|Hong_Kong|GQ153540|2005-09-20': {'G': 0.211553999461352, 'C': 0.20010772959870723, 'A': 0.2833961755992459, 'T': 0.30494209534069483}, 'HKU3-6|Bat-R_sinicus|Hong_Kong|GQ153541|2005-12-16': {'G': 0.21168866145973605, 'C': 0.20007406409911124, 'A': 0.28332884460005386, 'T': 0.30490842984109884}, 'HKU3-10|Bat-R_sinicus|Hong_Kong|GQ153545|2006-10-28': {'G': 0.2115844418252231, 'C': 0.1999663242970197, 'A': 0.2832800134702812, 'T': 0.305169220407476}, 'HKU3-9|Bat-R_sinicus|Hong_Kong|GQ153544|2006-10-28': {'G': 0.2115844418252231, 'C': 0.1999326485940394, 'A': 0.2832800134702812, 'T': 0.3052028961104563}, 'HKU3-11|Bat-R_sinicus|Hong_King|GQ153546|2007-03-07': {'G': 0.2115844418252231, 'C': 0.2, 'A': 0.2832800134702812, 'T': 0.3051355447044957}, 'HKU3-13|Bat-R_sinicus|Hong_Kong|GQ153548|2007-11-15': {'G': 0.21147690130404018, 'C': 0.2001886983185632, 'A': 0.28328335074299965, 'T': 0.305051049634397}, 'HKU3-12|Bat-R_sinicus|Hong_Kong|GQ153547|2007-05-15': {'G': 0.21098168596821976, 'C': 0.19980474010234311, 'A': 0.28373283059520604, 'T': 0.3054807433342311}, 'HKU3-7|Bat-R_sinicus|Guangdong|GQ153542|2006-02-15': {'G': 0.21187239197738592, 'C': 0.1991519720016153, 'A': 0.2832144299367344, 'T': 0.3057612060842644}, 'HKU3-8|Bat-R_sinicus|Guangdong|GQ153543|2006-02-15': {'G': 0.21192008355513628, 'C': 0.1989151308918163, 'A': 0.2831104073312894, 'T': 0.306054378221758}, 'CoVZC45|Bat-R_sinicus|Zhoushan-Dinghai|MG772933|2017-02': {'G': 0.20199986578082008, 'C': 0.1870344272196497, 'A': 0.2932689081269713, 'T': 0.31769679887255886}, 'CoVZXC21|Bat-R_sinicus|Zhoushan-Dinghai|MG772934|2015-07': {'G': 0.20099556033902866, 'C': 0.18723933808690973, 'A': 0.2937575676039284, 'T': 0.3180075339701332}, 'Wuhan-Hu-1|SARS-CoV-2|Wuhan|MN908947|2019-12': {'G': 0.19606728421897468, 'C': 0.18366050229074005, 'A': 0.29943483931378123, 'T': 0.32083737417650404}, 'BtKY72|Bat-R_spp|Kenya|KY352407|2007-10': {'G': 0.2067363530778165, 'C': 0.18542050966728155, 'A': 0.2853726856596297, 'T': 0.32247045159527227}, 'BM48-31|Bat-R_blasii|Bulgaria|NC_014470|2008-04': {'G': 0.2101038393223118, 'C': 0.19439131028829076, 'A': 0.27770187184041534, 'T': 0.3178029785489821}, 'RaTG13|Bat-R_affinis|Yunnan|EPI_ISL_402131|2013-07-24': {'G': 0.1958130966337297, 'C': 0.18455870038519512, 'A': 0.29904538603249037, 'T': 0.3205828169485848}, 'P4L|pangolin|Guangxi|EPI_ISL_410538|2017': {'G': 0.19714151513118164, 'C': 0.18804938602965846, 'A': 0.29849694692343826, 'T': 0.31627860162383414}, 'P5L|pangolin|Guangxi||EPI_ISL_410540|2017': {'G': 0.19717506542306917, 'C': 0.18788163457022075, 'A': 0.29859759779910083, 'T': 0.3163457022076092}, 'P5E|pangolin|Guangxi|EPI_ISL_410541|2017': {'G': 0.1972757162987318, 'C': 0.18801583573777092, 'A': 0.2985640475072133, 'T': 0.3160101992887338}, 'P1E|pangolin|Guangxi|EPI_ISL_410539|2017': {'G': 0.19717459145666252, 'C': 0.1878796013556592, 'A': 0.2987148082279118, 'T': 0.3162309989597665}, 'P2V|pangolin|Guangxi|EPI_ISL_410542|2017': {'G': 0.19714717234435308, 'C': 0.1878503104547743, 'A': 0.2985064608155731, 'T': 0.31616042960228224}, 'Pangolin-CoV|pangolin|Guandong|EPI_ISL_410721|2020-02-16': {'G': 0.1965898369052433, 'C': 0.18550343712090578, 'A': 0.3002763175630139, 'T': 0.3172934357730152}, 'RmYN02|EPI_ISL_412977|China|Yunnan|Xishuangbanna|2019-06-25': {'G': 0.19760035051059957, 'C': 0.18479323244919282, 'A': 0.29746216844730544, 'T': 0.32014424859290214}}
#### CUSTOM DICTIONARIES ####
#prints out tab-delimited header
print('sample\tacc\tdict')
#for each virus
for theid in list(full_ntcont):
#take 100 samples per virus
for i in range(100):
#define the dictionary with single nucleotide estimates
ntcont = full_ntcont[theid]
all_nt = []
#for each nucleotide
for n in list(ntcont):
#determine number of occurences of this nucleotide given its
#whole-genome frequency and total number of nucleotides used here
i = int(ntcont[n] * ntsamples)
#add that number of this nucleotide in the list
for ii in range(i):
all_nt.append(n)
#shuffle the nucleotides randomly in the list
all_nt_sh = random.sample(all_nt, len(all_nt))
all_cods = []
#make sure number of nucleotides in the list is a multiple of 3
remainder = len(all_nt_sh) % 3
if remainder != 0:
all_nt_sh = all_nt_sh[:-remainder]
#put the randomised nucleotides into codons (groups of 3)
for i in range(0, len(all_nt_sh), 3):
all_cods.append(str(all_nt_sh[i] + all_nt_sh[i+1] + all_nt_sh[i+2]))
syco_new = {}
#populate an amino acid dictionary : synonymous codons dictionary
#with the generated codons
for a in list(syco_orig):
syco_new.update({a:[]})
for c in syco_orig[a]:
for ac in all_cods:
if ac == c:
syco_new[a].append(ac)
CpGbridge = {}
aacod3 = {}
aacod1 = {}
#for each amino acid
for a in syco_new:
start = []
end = []
#store first and third nucleotide of each successive codon in order
for cod in syco_new[a]:
stnt = cod[0]
ennt = cod[2]
start.append(stnt)
end.append(ennt)
aacod3.update({a:end})
aacod1.update({a:start})
bridge_matrix = {}
#connect the successive first and last nucleotides of the codons
#to make a list of bridge dinucleotide combinations stored in a
#dictionary under the key of the respective amino acid combination
for ea in aacod3:
for sa in aacod1:
diaa = str(ea + sa)
alldint = []
for ent in aacod3[ea]:
for snt in aacod1[sa]:
dint = str(ent + snt)
alldint.append(dint)
bridge_matrix.update({diaa:alldint})
#for each amino acid combination
for dia in bridge_matrix:
dibridgelist = bridge_matrix[dia]
#calculate the proportion of CpG dinucleotides
if 'CG' in dibridgelist:
numCpGb = dibridgelist.count('CG')
prob3 = numCpGb/len(dibridgelist)
CpGbridge.update({dia:prob3})
#print this sample's tab-delimited row
print(str(i+1) + '\t' + theid + '\t' + str(CpGbridge))
| 88.778409 | 10,925 | 0.662848 |
b6092826ca672abb53a4e77adba71a6300a18143 | 1,010 | py | Python | EindhovenDS/hack9/util/util_func.py | dvalp/coding-practice | bc22de6dfc7590616fd38d029648ebf1ff102feb | [
"MIT"
] | null | null | null | EindhovenDS/hack9/util/util_func.py | dvalp/coding-practice | bc22de6dfc7590616fd38d029648ebf1ff102feb | [
"MIT"
] | null | null | null | EindhovenDS/hack9/util/util_func.py | dvalp/coding-practice | bc22de6dfc7590616fd38d029648ebf1ff102feb | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import clip_ops
def minimize(loss_batch, learningrate=0.0005):
"""The following plots for every trainable variable
- Histogram of the entries of the Tensor
- Histogram of the gradient over the Tensor
- Histogram of the gradient-norm over the Tensor"""
tvars = tf.trainable_variables()
grads = tf.gradients(loss_batch, tvars)
gradients = zip(grads, tvars)
step = tf.train.AdamOptimizer(learningrate).apply_gradients(gradients)
for gradient, variable in zip(grads, tvars):
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
h1 = tf.summary.histogram(variable.name, variable)
h2 = tf.summary.histogram(variable.name + "/gradients", grad_values)
h3 = tf.summary.histogram(variable.name + "/gradient_norm", clip_ops.global_norm([grad_values]))
return step | 42.083333 | 104 | 0.707921 |
5fba3d3bb2edbee3d96baf6e38c1c84633cd140f | 5,207 | py | Python | src/RobotFrameworkCore/org.robotframework.ide.core-functions/src/main/python/scripts/red_variables.py | puraner/RED | d41b7c244ff2e449c541fd0691213fcf5a3b30c1 | [
"Apache-2.0"
] | null | null | null | src/RobotFrameworkCore/org.robotframework.ide.core-functions/src/main/python/scripts/red_variables.py | puraner/RED | d41b7c244ff2e449c541fd0691213fcf5a3b30c1 | [
"Apache-2.0"
] | null | null | null | src/RobotFrameworkCore/org.robotframework.ide.core-functions/src/main/python/scripts/red_variables.py | puraner/RED | d41b7c244ff2e449c541fd0691213fcf5a3b30c1 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 Nokia Solutions and Networks
# Licensed under the Apache License, Version 2.0,
# see license.txt file for details.
#
def get_global_variables():
import robot
import tempfile
import os
# Global variables copied from robot.variables.__init__.py
global_variables = {
'${TEMPDIR}': os.path.normpath(tempfile.gettempdir()),
'${EXECDIR}': '',
'${/}': os.sep,
'${:}': os.pathsep,
'${SPACE}': ' ',
'${EMPTY}': '',
'@{EMPTY}': [],
'${True}': True,
'${False}': False,
'${None}': None,
'${null}': None,
'${OUTPUT_DIR}': '',
'${OUTPUT_FILE}': '',
'${SUMMARY_FILE}': '',
'${REPORT_FILE}': '',
'${LOG_FILE}': '',
'${DEBUG_FILE}': '',
'${PREV_TEST_NAME}': '',
'${PREV_TEST_STATUS}': '',
'${PREV_TEST_MESSAGE}': '',
'${CURDIR}': '.',
'${TEST_NAME}': '',
'${TEST_DOCUMENTATION}': '',
'@{TEST_TAGS}': [],
'${TEST_STATUS}': '',
'${TEST_MESSAGE}': '',
'${SUITE_NAME}': '',
'${SUITE_SOURCE}': '',
'${SUITE_STATUS}': '',
'${SUITE_MESSAGE}': '',
'${SUITE_DOCUMENTATION}': '',
'${KEYWORD_MESSAGE}': '',
'${KEYWORD_STATUS}': ''
}
glob_variables = {}
try:
from robot.variables import GLOBAL_VARIABLES
glob_variables = GLOBAL_VARIABLES
except ImportError: # for robot >2.9
global_variables['&{EMPTY}'] = {}
global_variables['&{SUITE_METADATA}'] = {}
from robot.conf.settings import RobotSettings
from robot.variables.scopes import GlobalVariables
glob_variables = GlobalVariables(RobotSettings()).as_dict()
glob_variables['${OUTPUT_DIR}'] = ''
glob_variables['${EXECDIR}'] = ''
glob_variables['${OUTPUT_FILE}'] = ''
glob_variables['${REPORT_FILE}'] = ''
glob_variables['${LOG_FILE}'] = ''
data = dict((_wrap_variable_if_needed(key), value) for key, value in glob_variables.items())
for k in global_variables:
if not k in data:
data[k] = global_variables[k]
return data
def _wrap_variable_if_needed(varname):
if varname.startswith('${') or varname.startswith('@{') or varname.startswith('&{'):
return varname
else:
return '${' + varname + '}'
def get_variables(path, arguments):
import inspect
from robot.utils.dotdict import DotDict
vars_from_file = _get_variables_from_file(path, arguments)
filtered_vars = {}
for k, v in vars_from_file.items():
try:
if isinstance(v, DotDict):
filtered_vars[k] = _extract_dot_dict(v)
elif not inspect.ismodule(v) and not inspect.isfunction(v):
filtered_vars[k] = _escape_unicode(v)
# we filter out modules and functions so that they will not be avaliable
# in assistant as well as not visible for validator
except Exception as e:
filtered_vars[k] = 'None'
return filtered_vars
def _get_variables_from_file(path, arguments):
import robot
variables = robot.variables.Variables()
variables.set_from_file(path, arguments)
return variables.store.data
def __get_robot_version():
import robot.version as ver
return tuple(map(int, ver.get_version(True).split('.')))
def _extract_dot_dict(dict):
return dict((_escape_unicode(k), _escape_unicode(v)) for k, v in dict.items())
def _escape_unicode(data):
from copy import copy
# basestring and long is not defined in python3
import sys
py_version = sys.version_info
if py_version < (3,0,0) and isinstance(data, unicode):
import unicodedata
return unicodedata.normalize('NFKD', data).encode('ascii','ignore') # for XML-RPC problems with unicode characters
elif py_version >= (3,0,0) and isinstance(data, str):
escaped_data = data.encode('unicode_escape')
if isinstance(escaped_data, bytes):
escaped_data = escaped_data.decode()
return escaped_data
elif py_version < (3,0,0) and isinstance(data, basestring):
return data.encode('unicode_escape')
elif py_version < (3,0,0) and isinstance(data, long): # for OverflowError in XML-RPC
return str(data)
elif isinstance(data, int) and (data < -(2**31) or data > (2 ** 31) -1):
return str(data)
elif isinstance(data, dict):
data_result = {}
for key, val in data.items():
if isinstance(key, tuple):
return 'None'
data_result[_escape_unicode(str(key))] = _escape_unicode(val)
return data_result
elif isinstance(data, list):
data_result = copy(data)
for index, item in enumerate(data_result):
data_result[index] = _escape_unicode(item)
return data_result
elif isinstance(data, tuple):
tuple_data = ()
for item in data:
tuple_data = tuple_data + tuple(_escape_unicode(item))
return tuple_data
elif data is None:
return _escape_unicode('None')
else:
return _escape_unicode(str(data))
| 33.165605 | 122 | 0.595737 |
3267925e6a4864d89a1c91885ffa8c297dc93c2c | 1,045 | py | Python | lxserv/passify_UltralightDestroy.py | 9bstudios/mecco_passify | 1d7231a215e7c651a3d6242890694cf9456a4f70 | [
"MIT"
] | 2 | 2018-06-15T14:23:23.000Z | 2021-03-08T18:00:01.000Z | lxserv/passify_UltralightDestroy.py | 9bstudios/mecco_passify | 1d7231a215e7c651a3d6242890694cf9456a4f70 | [
"MIT"
] | null | null | null | lxserv/passify_UltralightDestroy.py | 9bstudios/mecco_passify | 1d7231a215e7c651a3d6242890694cf9456a4f70 | [
"MIT"
] | null | null | null | # python
import lx, lxu.command, traceback, passify, lxifc
class cmd_destroy(lxu.command.BasicCommand):
_first_run = True
def __init__(self):
lxu.command.BasicCommand.__init__(self)
def cmd_Flags (self):
return lx.symbol.fCMD_MODEL | lx.symbol.fCMD_UNDO
def CMD_EXE(self, msg, flags):
passify.ultralight.destroy()
notifier = passify.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_DATATYPE)
def basic_Execute(self, msg, flags):
try:
self.CMD_EXE(msg, flags)
except Exception:
lx.out(traceback.format_exc())
def basic_Enable(self,msg):
try:
if passify.fetch_by_tag(passify.ULTRALIGHT_PGRP,type_='renderPassGroups'):
return True
except:
return False
return False
class Notifiers(lxu.command.BasicHints):
def __init__(self):
self._notifiers = [('notifier.layerAutoAdd',''),('notifier.editAction','')]
lx.bless(cmd_destroy, passify.CMD_ULTRALIGHT_DESTROY)
| 24.880952 | 86 | 0.647847 |
90d6988a021fc1c61a8d079165ff33bd8d976bee | 2,097 | py | Python | tests/test_versioning.py | Svenito/versup | b918a1a0d731f6ed90a0141efeff8fb5fdca8d15 | [
"MIT"
] | 3 | 2020-04-15T15:31:40.000Z | 2020-09-19T08:09:51.000Z | tests/test_versioning.py | Svenito/versup | b918a1a0d731f6ed90a0141efeff8fb5fdca8d15 | [
"MIT"
] | 11 | 2020-04-15T16:11:09.000Z | 2020-12-11T11:09:47.000Z | tests/test_versioning.py | Svenito/versup | b918a1a0d731f6ed90a0141efeff8fb5fdca8d15 | [
"MIT"
] | 1 | 2020-09-02T02:58:03.000Z | 2020-09-02T02:58:03.000Z | from versup import __version__
import pytest
from versup import versioning
def test_get_new_version_explicit():
assert "1.6.88" == versioning.get_new_version("1.2.3", "1.6.88", [], False)
def test_get_new_version_explicit_raise():
with pytest.raises(Exception) as e_info:
versioning.get_new_version("1.4.2", "1.pies.88", [], False)
def test_get_new_version_increment_raise():
with pytest.raises(Exception) as e_info:
versioning.get_new_version("1.4.2", "minor", ["major"], False)
assert "1.5.0" == versioning.get_new_version("1.4.2", "minor", ["minor"], False)
def test_patch_increment():
assert "1.0.1" == versioning.bump_version("1.0.0", "patch")
def test_minor_increment():
assert "1.1.0" == versioning.bump_version("1.0.0", "minor")
def test_major_increment():
assert "2.0.0" == versioning.bump_version("1.0.0", "major")
def test_prepatch_increment():
assert "1.0.1-rc.1" == versioning.bump_version("1.0.0", "prepatch")
def test_preminor_increment():
assert "1.1.0-rc.1" == versioning.bump_version("1.0.0", "preminor")
assert "1.3.0-rc.1" == versioning.bump_version("1.2.4", "preminor")
def test_premajor_increment():
assert "2.0.0-rc.1" == versioning.bump_version("1.0.0", "premajor")
assert "2.0.0-rc.1" == versioning.bump_version("1.3.4", "premajor")
def test_prerelease():
assert "1.0.1-rc.1" == versioning.bump_version("1.0.0", "prerelease")
assert "1.0.1-rc.2" == versioning.bump_version("1.0.1-rc.1", "prerelease")
assert "1.0.1-rc.3" == versioning.bump_version("1.0.1-rc.2", "prerelease")
def test_drop_prerelease():
assert "1.0.1" == versioning.bump_version("1.0.1-rc.1", "patch")
assert "1.1.0" == versioning.bump_version("1.0.1-rc.1", "minor")
assert "2.0.0" == versioning.bump_version("1.0.1-rc.1", "major")
def test_release():
assert "1.0.1" == versioning.bump_version("1.0.1-rc.1", "release")
assert "2.0.1" == versioning.bump_version("2.0.1-rc.5", "release")
with pytest.raises(Exception) as e_info:
versioning.bump_version("2.0.1", "release")
| 31.772727 | 84 | 0.664282 |
cbdfc54dc619ffb1beddda508572f1de9b67140e | 7,971 | py | Python | montepython/importance_sampling.py | ivandebono/montepython_public_3.2dev_Python3 | 16771c3d37faaa3f80b171c01d78da56a75aa3d9 | [
"MIT"
] | null | null | null | montepython/importance_sampling.py | ivandebono/montepython_public_3.2dev_Python3 | 16771c3d37faaa3f80b171c01d78da56a75aa3d9 | [
"MIT"
] | null | null | null | montepython/importance_sampling.py | ivandebono/montepython_public_3.2dev_Python3 | 16771c3d37faaa3f80b171c01d78da56a75aa3d9 | [
"MIT"
] | null | null | null | """
.. module:: importance_sampling
:synopsis: Perform an Importance Sampling from an existing folder
.. moduleauthor:: Benjamin Audren <benjamin.audren@epfl.ch>
"""
try:
from collections import OrderedDict as od
except ImportError:
from ordereddict import OrderedDict as od
from copy import copy
from multiprocessing import Pool
import os
import warnings
import math
from . import io_mp
from . import sampler
from .data import Data
def run(cosmo, data, command_line):
"""
Performing the Importance Sampling
The idea is to start from an existing run, constraining a certain model I,
given a set of experiments. The new run will constrain the same model I,
but adding one or several new experiments. In the case where it is expected
that the final posterior distribution should not differ too greatly between
the two parameter extractions, then using Importance Sampling can speed up
significantly the second one.
Instead of properly sampling randomly the parameter space, it instead reads
the chains from the previous run, recompute the cosmology at this point,
then adds the log-likelihood contributed by the new experiments to the
previous ones. As an input of the method, with the flag
`--IS-starting-folder`, you can thus specify either a folder containing a
Monte Python run, or a set of chains that you want to be converted.
The code will automatically compute the minimum amount of things. For
instance, if the first run had all the Planck likelihoods, and the second,
all the Planck likelihoods plus a prior on :math:`H_0`, it would be absurd
to recompute also the cosmological perturbations: the only needed quantity
is a background quantity.
The new chains will hence store the same points in parameter space, but
with a different value of the likelihood, and also of the multiplicity -
that will become non-integer. Indeed, the multiplicity is also a probe of
the posterior, and this new, higher likelihood should have had a higher
multiplicity.
"""
# Check that the command_line "--IS-starting-folder" points to an existing
# Monte Python folder run, or a subset of files, and store in any case all
# the chains to analyze in the chains.
starting_folder = command_line.IS_starting_folder
if not starting_folder:
raise io_mp.ConfigurationError(
"When running importance sampling, you should specify a folder or"
" a set of chains with the option '--IS-starting-folder'")
chains = []
# If starting_folder is of length 1, it means it is either a whole folder,
# or just one chain. If it is a folder, we recover all chains within.
if len(starting_folder) == 1:
starting_folder = starting_folder[0]
if os.path.isdir(starting_folder):
for elem in os.listdir(starting_folder):
if elem.find("__") != -1:
chains.append(elem)
# Else, it is a list of chains, of which we recover folder name, and store
# all of them in chains.
else:
chains = starting_folder
starting_folder = os.path.sep.join(chains[0].split(os.path.sep)[:-1])
chains = [elem.split(os.path.sep)[-1] for elem in chains]
# Recovering only the extra likelihoods
new_experiments = recover_new_experiments(
data, command_line, starting_folder)
if not new_experiments:
raise io_mp.ConfigurationError(
"You are using Importance Sampling without adding a new "
"experiment. This is not what this method is coded for.")
# resetting the needed cosmo arguments, and deleting the dictionary of
# likelihoods, only if new_experiments is smaller than the old ones.
ignore_likelihood = False
# Wipe out the problematic information from previous likelihoods,
# namely their desired output
data.cosmo_arguments['output'] = ''
try:
del data.cosmo_arguments['l_max_scalars']
del data.cosmo_arguments['lensing']
except KeyError:
pass
# Initialise the requirements of the new likelihood
data.initialise_likelihoods(new_experiments)
# Multiprocessing part, to analyze all the chains in parallel. When not
# specifying any 'processes' keyword argument to the Pool call, the system
# uses as many as possible.
pool = Pool()
args = [(data, cosmo, command_line, starting_folder,
elem, ignore_likelihood) for elem in chains]
# Note the use of translate_chain_star, and not translate_chain, because of
# the limitations of the `map` function (it only takes one argument). The
# `_star` function simply unwraps the argument.
print('\nStart extracting the chains:\n')
pool.map(translate_chain_star, args)
# Close the pool, and join everything (the join might not be needed)
pool.close()
pool.join()
def recover_new_experiments(data, command_line, starting_folder):
"""
Given the input, extract the additional likelihoods
"""
# Initialize the companion data structure, on a modified command line
modified_command_line = copy(command_line)
modified_command_line.folder = starting_folder
modified_command_line.param = os.path.join(starting_folder, 'log.param')
# Go through the file, and stop when you find the good line. The previous
# way of doing, to simply initialise another data instance fails when using
# Planck. Indeed, clik likelihoods can not be initialised twice.
print('Reading the starting folder')
print('---------------------------')
with open(modified_command_line.param, 'r') as init:
for line in init:
if line.find('data.experiments') != -1:
_, experiments = line.split('=')
experiments = experiments.strip()
print('The likelihood will be computed only for:')
new_experiments = [
elem for elem in data.experiments if elem not in experiments]
print(' ->', end=' ')
print(', '.join(new_experiments))
return new_experiments
def translate_chain(data, cosmo, command_line,
starting_folder, chain_name, ignore_likelihood=False):
"""Translate the input to the output
.. note::
If the keyword argument `ignore_likelihood` is set to true, the
previous value of the likelihood is discarded.
"""
input_path = os.path.join(starting_folder, chain_name)
output_path = os.path.join(command_line.folder, chain_name)
print(' -> reading ', input_path)
parameter_names = data.get_mcmc_parameters(['varying'])
with open(input_path, 'r') as input_chain:
with open(output_path, 'w') as output_chain:
for line in input_chain:
# T. Brinckmann: Added next 3 lines for compatibility with --update
if line[0]=='#':
output_chain.write(line)
continue
params = line.split()
# recover the likelihood of this point
if not ignore_likelihood:
loglike = -float(params[1])
else:
loglike = 0
N = float(params[0])
# Assign all the recovered values to the data structure
for index, param in enumerate(parameter_names):
data.mcmc_parameters[param]['current'] = \
float(params[2+index])
data.update_cosmo_arguments()
newloglike = sampler.compute_lkl(cosmo, data)
weight = math.exp(newloglike)
newloglike += loglike
# Accept the point
sampler.accept_step(data)
io_mp.print_vector([output_chain], N*weight, newloglike, data)
print(output_path, 'written')
def translate_chain_star(args):
"""Trick function for multiprocessing"""
return translate_chain(*args)
| 41.087629 | 83 | 0.672939 |
d275af3eb0a054d0368763cea75dd2e50e3d2c91 | 4,490 | py | Python | eval_calibration/get_acquisition_stat.py | lyn1874/region_based_active_learning | c4813086b1eb6f8ebb2df26e5c697652b450b775 | [
"MIT"
] | 7 | 2020-10-26T06:33:17.000Z | 2022-03-26T12:32:31.000Z | eval_calibration/get_acquisition_stat.py | lyn1874/region_based_active_learning | c4813086b1eb6f8ebb2df26e5c697652b450b775 | [
"MIT"
] | null | null | null | eval_calibration/get_acquisition_stat.py | lyn1874/region_based_active_learning | c4813086b1eb6f8ebb2df26e5c697652b450b775 | [
"MIT"
] | 3 | 2020-10-26T06:33:19.000Z | 2021-09-27T07:58:29.000Z | import numpy as np
import os
from select_regions import selection as SPR_Region_Im
from data_utils.update_data import give_init_train_and_val_data, update_training_data, prepare_the_new_uncertain_input
import pickle
def collect_number_acquired_pixels_region(path_input, total_select_folder_init, stage, start_step):
"""This function is used to calculate the number of pixels and number of images that have been
selected in each acquisition step
total_select_folder: the list of paths that denote the best experiment in each acquisition step
path_input: str, the path that saves the experiment
stage: int, 0,1,2,3 --> decide the acquisition method: random, uncertainty, entropy, BALD
start_step: the number of acquisition steps that are going to be considered
"""
path_mom = os.path.join('/scratch/Act_Learn_Desperate_V6', path_input)
collect_data_path = os.path.join(path_mom, 'collect_data')
if not os.path.exists(collect_data_path):
os.makedirs(collect_data_path)
exp_version = int(path_input.strip().split('_')[-1])
total_select_folder = sorted(total_select_folder_init, key=lambda s: int(s.strip().split('_')[-4]))
total_active_step = start_step
acq_method_total = ["A", "B", "C", "D"]
acq_selec_method = acq_method_total[stage]
kernel_window = np.ones([150, 150])
stride_size = 30
num_most_uncert_patch = 20
most_init_train_data, all_the_time_val_data = give_init_train_and_val_data()
num_of_pixels_need_to_be_annotate = np.zeros([total_active_step])
num_of_images_per_step = np.zeros([total_active_step])
for single_acq_step in range(total_active_step):
if single_acq_step == 0:
ckpt_dir_init = "/home/s161488/Exp_Stat/Multistart/Multistart_stage0_version1" # Need initial ckpt_dir
tds_save = os.path.join(collect_data_path, 'FE_step_00_version_%d' % exp_version)
if not os.path.exists(tds_save):
os.makedirs(tds_save)
most_uncert = SPR_Region_Im(tds_save, ckpt_dir_init, acq_selec_method, None, None, kernel_window,
stride_size, num_most_uncert_patch=20, check_overconfident=True)
updated_training_data = update_training_data(most_init_train_data[:4], [], most_uncert[:4])
already_selected_im_index = most_uncert[-1]
already_selected_binary_mask = most_uncert[-2]
most_uncert_old = most_uncert
save_path_name = os.path.join(tds_save, 'updated_uncertain.txt')
with open(save_path_name, 'wb') as f:
pickle.dump(most_uncert, f)
num_of_pixels_need_to_be_annotate[single_acq_step] = np.sum(np.reshape(most_uncert_old[-2], [-1]))
percent_pixel_to_be_annotate = num_of_pixels_need_to_be_annotate[single_acq_step] / (528 * 784 * 5)
num_im = np.shape(updated_training_data[0])[0]
num_of_images_per_step[single_acq_step] = num_im
model_dir_goes_into_act_stage = total_select_folder[single_acq_step]
tds_select = os.path.join(model_dir_goes_into_act_stage, 'pool_data')
if not os.path.exists(tds_select):
os.makedirs(tds_select)
most_uncert = SPR_Region_Im(tds_select, model_dir_goes_into_act_stage, acq_selec_method,
already_selected_im_index, already_selected_binary_mask,
kernel_window, stride_size, num_most_uncert_patch, check_overconfident=True)
updated_most_uncertain = prepare_the_new_uncertain_input(most_uncert_old, most_uncert)
updated_training_data = update_training_data(most_init_train_data[:4], [], updated_most_uncertain[:4])
already_selected_im_index = updated_most_uncertain[-1]
already_selected_binary_mask = updated_most_uncertain[-2]
most_uncert_old = updated_most_uncertain
tds_save = os.path.join(collect_data_path, "FE_step_%d_version_%d" % (single_acq_step, exp_version))
save_path_name = os.path.join(tds_save, 'updated_uncertain.txt')
with open(save_path_name, 'wb') as f:
pickle.dump(most_uncert, f)
print("--At step %d, there are %d training images with %.2f pixels that needs to be annotated" % (
single_acq_step, num_im, percent_pixel_to_be_annotate))
np.save(os.path.join(path_mom, 'num_of_image'), num_of_images_per_step)
np.save(os.path.join(path_mom, 'num_of_pixel'), num_of_pixels_need_to_be_annotate)
| 60.675676 | 118 | 0.721381 |
5c650367acaca386bb7883ced28d525569ce46bd | 300 | py | Python | start_client.py | TrixiS/lolder | 05c642551b1f42d40d22bcbc6a7a6cfe1d89efe7 | [
"MIT"
] | null | null | null | start_client.py | TrixiS/lolder | 05c642551b1f42d40d22bcbc6a7a6cfe1d89efe7 | [
"MIT"
] | null | null | null | start_client.py | TrixiS/lolder | 05c642551b1f42d40d22bcbc6a7a6cfe1d89efe7 | [
"MIT"
] | null | null | null | import sys
from PyQt5.QtWidgets import QApplication
from src.client import MainWindow, LoginDialog
app = QApplication(sys.argv)
login_dialog = LoginDialog()
window = MainWindow()
window.show()
api_client = login_dialog.exec()
window.api_client = api_client
window.load_files()
sys.exit(app.exec())
| 21.428571 | 46 | 0.793333 |
6348abfbd17f69a4b4a4881012f9b371623b7b19 | 2,262 | py | Python | tacotron2/text/__init__.py | Moon-sung-woo/Tacotron2_ParallelWaveGAN_korean | 437c9748673f2e5cb84e99884e8d0d916f269c9e | [
"BSD-3-Clause"
] | 3 | 2020-12-22T01:42:27.000Z | 2021-06-17T13:08:58.000Z | tacotron2/text/__init__.py | Moon-sung-woo/Tacotron2_ParallelWaveGAN_korean | 437c9748673f2e5cb84e99884e8d0d916f269c9e | [
"BSD-3-Clause"
] | null | null | null | tacotron2/text/__init__.py | Moon-sung-woo/Tacotron2_ParallelWaveGAN_korean | 437c9748673f2e5cb84e99884e8d0d916f269c9e | [
"BSD-3-Clause"
] | null | null | null | """ from https://github.com/keithito/tacotron """
import re
from tacotron2.text import cleaners
from tacotron2.text.symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
#print('3')
#print(sequence)
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
#print("2")
#print(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~' | 29.763158 | 93 | 0.68435 |
d27888bcc574ccf0249e4b9ba511b9d5b399743c | 154 | py | Python | api_crawler/__init__.py | pawnhearts/aio_api_crawler | 543c4edb8be77bbe9e2ac017fca9a9d6020b51b8 | [
"BSD-3-Clause"
] | 2 | 2020-10-31T07:53:20.000Z | 2020-11-11T22:32:44.000Z | api_crawler/__init__.py | pawnhearts/aio_api_crawler | 543c4edb8be77bbe9e2ac017fca9a9d6020b51b8 | [
"BSD-3-Clause"
] | null | null | null | api_crawler/__init__.py | pawnhearts/aio_api_crawler | 543c4edb8be77bbe9e2ac017fca9a9d6020b51b8 | [
"BSD-3-Clause"
] | null | null | null | from api_crawler.endpoint.json_endpoint import JsonEndpoint
__author__ = """robotnaoborot"""
__email__ = "robotnaoborot@gmail.com"
__version__ = "0.1.3"
| 25.666667 | 59 | 0.785714 |
adeabb85bd326cd2737f58857802e66770b7e9c4 | 8,787 | py | Python | src/scancode/utils.py | doc22940/scancode-toolk | 588b9a9411730e99d763d715ae9f38575744aaee | [
"Apache-2.0",
"CC0-1.0"
] | 1 | 2020-06-24T16:03:52.000Z | 2020-06-24T16:03:52.000Z | src/scancode/utils.py | doc22940/scancode-toolk | 588b9a9411730e99d763d715ae9f38575744aaee | [
"Apache-2.0",
"CC0-1.0"
] | 1 | 2021-06-02T02:50:07.000Z | 2021-06-02T02:50:07.000Z | src/scancode/utils.py | hwpplayers/scancode-toolkit | 72850bd57a1a841e5a6a6e4120223a00c4189046 | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | #
# Copyright (c) 2018 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import click
click.disable_unicode_literals_warning = True
from click.utils import echo
from click.termui import style
from click._termui_impl import ProgressBar
from commoncode import compat
from commoncode.fileutils import file_name
from commoncode.fileutils import splitext
from commoncode.text import toascii
"""
Command line UI utilities for help and and progress reporting.
"""
class BaseCommand(click.Command):
"""
An enhanced click Command working around some Click quirk.
"""
# override this in sub-classes with a command-specific message such as
# "Try 'scancode --help' for help on options and arguments."
short_usage_help = ''
def get_usage(self, ctx):
"""
Ensure that usage points to the --help option explicitly.
Workaround click issue https://github.com/mitsuhiko/click/issues/393
"""
return super(BaseCommand, self).get_usage(ctx) + self.short_usage_help
def main(self, args=None, prog_name=None, complete_var=None,
standalone_mode=True, **extra):
"""
Workaround click 4.0 bug https://github.com/mitsuhiko/click/issues/365
"""
return click.Command.main(self, args=args, prog_name=self.name,
complete_var=complete_var,
standalone_mode=standalone_mode, **extra)
class EnhancedProgressBar(ProgressBar):
"""
Enhanced progressbar ensuring that nothing is displayed when the bar is hidden.
"""
def render_progress(self):
if not self.is_hidden:
return super(EnhancedProgressBar, self).render_progress()
class ProgressLogger(ProgressBar):
"""
A subclass of Click ProgressBar providing a verbose line-by-line progress
reporting.
In contrast with the progressbar the label, percent, ETA, pos, bar_template
and other formatting options are ignored.
Progress information are printed as-is and no LF is added. The caller must
provide an item_show_func to display some content and this must terminated
with a line feed if needed.
If no item_show_func is provided a simple dot is printed for each event.
"""
def __init__(self, *args, **kwargs):
super(ProgressLogger, self).__init__(*args, **kwargs)
self.is_hidden = False
def render_progress(self):
line = self.format_progress_line()
if line:
# only add new lines if there is an item_show_func
nl = bool(self.item_show_func)
echo(line, file=self.file, nl=nl, color=self.color)
self.file.flush()
def format_progress_line(self):
if self.item_show_func:
item_info = self.item_show_func(self.current_item)
else:
item_info = '.'
if item_info:
return item_info
def render_finish(self):
self.file.flush()
BAR_WIDTH = 20
BAR_SEP = ' '
BAR_SEP_LEN = len(BAR_SEP)
def progressmanager(iterable=None, length=None, label=None, show_eta=True,
show_percent=None, show_pos=True, item_show_func=None,
fill_char='#', empty_char='-', bar_template=None,
info_sep=BAR_SEP, width=BAR_WIDTH, file=None, color=None, # NOQA
verbose=False):
"""
Return an iterable context manager showing progress as a progress bar
(default) or item-by-item log (if verbose is True) while iterating.
Its arguments are similar to Click.termui.progressbar with these new
arguments added at the end of the signature:
:param verbose: if True, display a progress log. Otherwise, a progress bar.
"""
if verbose:
progress_class = ProgressLogger
else:
progress_class = EnhancedProgressBar
bar_template = ('[%(bar)s]' + BAR_SEP + '%(info)s'
if bar_template is None else bar_template)
return progress_class(iterable=iterable, length=length,
show_eta=show_eta, show_percent=show_percent, show_pos=show_pos,
item_show_func=item_show_func, fill_char=fill_char,
empty_char=empty_char, bar_template=bar_template, info_sep=info_sep,
file=file, label=label, width=width, color=color)
def fixed_width_file_name(path, max_length=25):
"""
Return a fixed width file name of at most `max_length` characters computed
from the `path` string and usable for fixed width display. If the `path`
file name is longer than `max_length`, the file name is truncated in the
middle using three dots "..." as an ellipsis and the ext is kept.
For example:
>>> fwfn = fixed_width_file_name('0123456789012345678901234.c')
>>> assert '0123456789...5678901234.c' == fwfn
>>> fwfn = fixed_width_file_name('some/path/0123456789012345678901234.c')
>>> assert '0123456789...5678901234.c' == fwfn
>>> fwfn = fixed_width_file_name('some/sort.c')
>>> assert 'sort.c' == fwfn
>>> fwfn = fixed_width_file_name('some/123456', max_length=5)
>>> assert '' == fwfn
"""
if not path:
return ''
# get the path as unicode for display!
filename = file_name(path)
if len(filename) <= max_length:
return filename
base_name, ext = splitext(filename)
dots = 3
len_ext = len(ext)
remaining_length = max_length - len_ext - dots
if remaining_length < 5 or remaining_length < (len_ext + dots):
return ''
prefix_and_suffix_length = abs(remaining_length // 2)
prefix = base_name[:prefix_and_suffix_length]
ellipsis = dots * '.'
suffix = base_name[-prefix_and_suffix_length:]
return '{prefix}{ellipsis}{suffix}{ext}'.format(**locals())
def file_name_max_len(used_width=BAR_WIDTH + BAR_SEP_LEN + 7 + BAR_SEP_LEN + 8 + BAR_SEP_LEN):
"""
Return the max length of a path given the current terminal width.
A progress bar is composed of these elements:
[-----------------------------------#] 1667 Scanned: tu-berlin.yml
- the bar proper which is BAR_WIDTH characters
- one BAR_SEP
- the number of files. We set it to 7 chars, eg. 9 999 999 files
- one BAR_SEP
- the word Scanned: 8 chars
- one BAR_SEP
- the file name proper
The space usage is therefore:
BAR_WIDTH + BAR_SEP_LEN + 7 + BAR_SEP_LEN + 8 + BAR_SEP_LEN
+ the file name length
"""
term_width, _height = click.get_terminal_size()
max_filename_length = term_width - used_width
return max_filename_length
def path_progress_message(item, verbose=False, prefix='Scanned: '):
"""
Return a styled message suitable for progress display when processing a path
for an `item` tuple of (location, rid, scan_errors, *other items)
"""
if not item:
return ''
location = item[0]
errors = item[2]
location = compat.unicode(toascii(location))
progress_line = location
if not verbose:
max_file_name_len = file_name_max_len()
# do not display a file name in progress bar if there is no space available
if max_file_name_len <= 10:
return ''
progress_line = fixed_width_file_name(location, max_file_name_len)
color = 'red' if errors else 'green'
return style(prefix) + style(progress_line, fg=color)
| 37.075949 | 94 | 0.686241 |
512f258ab3abf9f65aed2efe41ccb6a4ea5d5aa4 | 3,173 | py | Python | src/test/conftest.py | olirice/nebulo | de9b043fe66d0cb872c5c0f2aca3c5c6f20918a7 | [
"MIT"
] | 76 | 2020-04-03T01:21:47.000Z | 2021-12-06T02:54:53.000Z | src/test/conftest.py | olirice/nebulo | de9b043fe66d0cb872c5c0f2aca3c5c6f20918a7 | [
"MIT"
] | 7 | 2020-04-06T04:44:10.000Z | 2021-05-17T12:38:15.000Z | src/test/conftest.py | olirice/nebulo | de9b043fe66d0cb872c5c0f2aca3c5c6f20918a7 | [
"MIT"
] | 2 | 2020-10-23T10:25:16.000Z | 2020-10-28T14:16:57.000Z | # pylint: disable=redefined-outer-name
from __future__ import annotations
import importlib
from typing import Callable, Optional
import pytest
from nebulo.gql.sqla_to_gql import sqla_models_to_graphql_schema
from nebulo.server.starlette import create_app
from nebulo.sql import table_base
from nebulo.sql.reflection.constraint_comments import reflect_all_constraint_comments
from nebulo.sql.reflection.manager import reflect_sqla_models
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from starlette.applications import Starlette
from starlette.testclient import TestClient
SQL_DOWN = """
DROP SCHEMA public CASCADE;
CREATE SCHEMA public;
GRANT ALL ON SCHEMA public TO nebulo_user;
"""
@pytest.fixture(scope="function", autouse=True)
def clear_caches():
reflect_all_constraint_comments.cache_clear()
@pytest.fixture(scope="session")
def connection_str():
""" SQLAlchemy connection string to test database """
return "postgresql://nebulo_user:password@localhost:4442/nebulo_db"
@pytest.fixture(scope="session")
def engine(connection_str: str):
""" SQLAlchemy engine """
_engine = create_engine(connection_str, echo=False)
# Make sure the schema is clean
_engine.execute(SQL_DOWN)
yield _engine
_engine.execute(SQL_DOWN)
_engine.dispose()
@pytest.fixture(scope="session")
def session_maker(engine):
smake = sessionmaker(bind=engine)
_session = scoped_session(smake)
yield _session
@pytest.fixture
def session(session_maker):
_session = session_maker
_session.execute(SQL_DOWN)
_session.commit()
yield _session
_session.rollback()
_session.execute(SQL_DOWN)
_session.commit()
_session.close()
@pytest.fixture
def schema_builder(session, engine):
"""Return a function that accepts a sql string
and returns graphql schema"""
def build(sql: str):
session.execute(sql)
session.commit()
tables, functions = reflect_sqla_models(engine, schema="public")
schema = sqla_models_to_graphql_schema(tables, functions)
return schema
yield build
@pytest.fixture
def app_builder(connection_str, session) -> Callable[[str, Optional[str], Optional[str]], Starlette]:
def build(sql: str, jwt_identifier: Optional[str] = None, jwt_secret: Optional[str] = None) -> Starlette:
session.execute(sql)
session.commit()
# Create the schema
app = create_app(connection_str, jwt_identifier=jwt_identifier, jwt_secret=jwt_secret)
return app
return build
@pytest.fixture
def client_builder(
app_builder: Callable[[str, Optional[str], Optional[str]], Starlette]
) -> Callable[[str, Optional[str], Optional[str]], TestClient]:
# NOTE: Client must be used as a context manager for on_startup and on_shutdown to execute
# e.g. connect to the database
def build(sql: str, jwt_identifier: Optional[str] = None, jwt_secret: Optional[str] = None) -> TestClient:
importlib.reload(table_base)
app = app_builder(sql, jwt_identifier, jwt_secret)
client = TestClient(app)
return client
return build
| 29.654206 | 110 | 0.736527 |
c3ed6879ef86a1d19dda0d3009ca3d60333426d9 | 234 | py | Python | LPP/lmsff/lms_app/models.py | teamdiniz/Projeto-devops-lms | e6b95b621a0d998d761ec9fa1654d443940391dd | [
"Apache-2.0"
] | null | null | null | LPP/lmsff/lms_app/models.py | teamdiniz/Projeto-devops-lms | e6b95b621a0d998d761ec9fa1654d443940391dd | [
"Apache-2.0"
] | null | null | null | LPP/lmsff/lms_app/models.py | teamdiniz/Projeto-devops-lms | e6b95b621a0d998d761ec9fa1654d443940391dd | [
"Apache-2.0"
] | null | null | null | from django.db import models
from lms_app.professor import *
from lms_app.disciplina import *
from lms_app.disciplinaofertada import *
from lms_app.Aluno import *
| 8.357143 | 41 | 0.57265 |
98f7a7737dd9566d45a563ab3045cd1143454123 | 26,980 | py | Python | src/python/pants/pantsd/process_manager.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | 1 | 2021-05-05T18:58:28.000Z | 2021-05-05T18:58:28.000Z | src/python/pants/pantsd/process_manager.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | null | null | null | src/python/pants/pantsd/process_manager.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | 3 | 2020-06-30T08:28:13.000Z | 2021-07-28T09:35:57.000Z | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import functools
import logging
import os
import signal
import subprocess
import time
import traceback
from contextlib import contextmanager
from typing import Optional
import psutil
from pants.base.build_environment import get_buildroot
from pants.process.lock import OwnerPrintingInterProcessFileLock
from pants.process.subprocess import Subprocess
from pants.util.dirutil import read_file, rm_rf, safe_file_dump, safe_mkdir
from pants.util.memo import memoized_property
logger = logging.getLogger(__name__)
@contextmanager
def swallow_psutil_exceptions():
"""A contextmanager that swallows standard psutil access exceptions."""
try:
yield
except (psutil.AccessDenied, psutil.NoSuchProcess):
# This masks common, but usually benign psutil process access exceptions that might be seen
# when accessing attributes/methods on psutil.Process objects.
pass
class ProcessGroup:
"""Wraps a logical group of processes and provides convenient access to ProcessManager
objects."""
def __init__(self, name, metadata_base_dir=None):
self._name = name
self._metadata_base_dir = metadata_base_dir
def _instance_from_process(self, process):
"""Default converter from psutil.Process to process instance classes for subclassing."""
return ProcessManager(
name=process.name(),
pid=process.pid,
process_name=process.name(),
metadata_base_dir=self._metadata_base_dir,
)
def iter_processes(self, proc_filter=None):
"""Yields processes from psutil.process_iter with an optional filter and swallows psutil
errors.
If a psutil exception is raised during execution of the filter, that process will not be
yielded but subsequent processes will. On the other hand, if psutil.process_iter raises an
exception, no more processes will be yielded.
"""
with swallow_psutil_exceptions(): # process_iter may raise
for proc in psutil.process_iter():
with swallow_psutil_exceptions(): # proc_filter may raise
if (proc_filter is None) or proc_filter(proc):
yield proc
def iter_instances(self, *args, **kwargs):
for item in self.iter_processes(*args, **kwargs):
yield self._instance_from_process(item)
class ProcessMetadataManager:
""""Manages contextual, on-disk process metadata."""
class MetadataError(Exception):
pass
class Timeout(Exception):
pass
FAIL_WAIT_SEC = 10
INFO_INTERVAL_SEC = 5
WAIT_INTERVAL_SEC = 0.1
def __init__(self, metadata_base_dir=None):
"""
:param str metadata_base_dir: The base directory for process metadata.
"""
super().__init__()
self._metadata_base_dir = (
metadata_base_dir or Subprocess.Factory.global_instance().create().get_subprocess_dir()
)
@staticmethod
def _maybe_cast(item, caster):
"""Given a casting function, attempt to cast to that type while masking common cast
exceptions.
N.B. This is mostly suitable for casting string types to numeric types - e.g. a port number
read from disk into an int.
:param func caster: A casting callable (e.g. `int`).
:returns: The result of caster(item) or item if TypeError or ValueError are raised during cast.
"""
try:
return caster(item)
except (TypeError, ValueError):
# N.B. the TypeError catch here (already) protects against the case that caster is None.
return item
@classmethod
def _deadline_until(
cls,
closure,
action_msg,
timeout=FAIL_WAIT_SEC,
wait_interval=WAIT_INTERVAL_SEC,
info_interval=INFO_INTERVAL_SEC,
):
"""Execute a function/closure repeatedly until a True condition or timeout is met.
:param func closure: the function/closure to execute (should not block for long periods of time
and must return True on success).
:param str action_msg: a description of the action that is being executed, to be rendered as
info while we wait, and as part of any rendered exception.
:param float timeout: the maximum amount of time to wait for a true result from the closure in
seconds. N.B. this is timing based, so won't be exact if the runtime of
the closure exceeds the timeout.
:param float wait_interval: the amount of time to sleep between closure invocations.
:param float info_interval: the amount of time to wait before and between reports via info
logging that we're still waiting for the closure to succeed.
:raises: :class:`ProcessManager.Timeout` on execution timeout.
"""
now = time.time()
deadline = now + timeout
info_deadline = now + info_interval
while 1:
if closure():
return True
now = time.time()
if now > deadline:
raise cls.Timeout(
"exceeded timeout of {} seconds while waiting for {}".format(
timeout, action_msg
)
)
if now > info_deadline:
logger.info("waiting for {}...".format(action_msg))
info_deadline = info_deadline + info_interval
elif wait_interval:
time.sleep(wait_interval)
@classmethod
def _wait_for_file(cls, filename, timeout=FAIL_WAIT_SEC, want_content=True):
"""Wait up to timeout seconds for filename to appear with a non-zero size or raise
Timeout()."""
def file_waiter():
return os.path.exists(filename) and (not want_content or os.path.getsize(filename))
action_msg = "file {} to appear".format(filename)
return cls._deadline_until(file_waiter, action_msg, timeout=timeout)
@staticmethod
def _get_metadata_dir_by_name(name, metadata_base_dir):
"""Retrieve the metadata dir by name.
This should always live outside of the workdir to survive a clean-all.
"""
return os.path.join(metadata_base_dir, name)
def _maybe_init_metadata_dir_by_name(self, name):
"""Initialize the metadata directory for a named identity if it doesn't exist."""
safe_mkdir(self.__class__._get_metadata_dir_by_name(name, self._metadata_base_dir))
def _metadata_file_path(self, name, metadata_key):
return self.metadata_file_path(name, metadata_key, self._metadata_base_dir)
@classmethod
def metadata_file_path(cls, name, metadata_key, metadata_base_dir):
return os.path.join(cls._get_metadata_dir_by_name(name, metadata_base_dir), metadata_key)
def read_metadata_by_name(self, name, metadata_key, caster=None):
"""Read process metadata using a named identity.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param func caster: A casting callable to apply to the read value (e.g. `int`).
"""
file_path = self._metadata_file_path(name, metadata_key)
try:
metadata = read_file(file_path).strip()
return self._maybe_cast(metadata, caster)
except (IOError, OSError):
return None
def write_metadata_by_name(self, name, metadata_key, metadata_value):
"""Write process metadata using a named identity.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param string metadata_value: The metadata value (e.g. '1729').
"""
self._maybe_init_metadata_dir_by_name(name)
file_path = self._metadata_file_path(name, metadata_key)
safe_file_dump(file_path, metadata_value)
def await_metadata_by_name(self, name, metadata_key, timeout, caster=None):
"""Block up to a timeout for process metadata to arrive on disk.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param int timeout: The deadline to write metadata.
:param type caster: A type-casting callable to apply to the read value (e.g. int, str).
:returns: The value of the metadata key (read from disk post-write).
:raises: :class:`ProcessMetadataManager.Timeout` on timeout.
"""
file_path = self._metadata_file_path(name, metadata_key)
self._wait_for_file(file_path, timeout=timeout)
return self.read_metadata_by_name(name, metadata_key, caster)
def purge_metadata_by_name(self, name):
"""Purge a processes metadata directory.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
meta_dir = self._get_metadata_dir_by_name(name, self._metadata_base_dir)
logger.debug("purging metadata directory: {}".format(meta_dir))
try:
rm_rf(meta_dir)
except OSError as e:
raise ProcessMetadataManager.MetadataError(
"failed to purge metadata directory {}: {!r}".format(meta_dir, e)
)
class ProcessManager(ProcessMetadataManager):
"""Subprocess/daemon management mixin/superclass.
Not intended to be thread-safe.
"""
class InvalidCommandOutput(Exception):
pass
class NonResponsiveProcess(Exception):
pass
class ExecutionError(Exception):
def __init__(self, message, output=None):
super(ProcessManager.ExecutionError, self).__init__(message)
self.message = message
self.output = output
def __repr__(self):
return "{}(message={!r}, output={!r})".format(
type(self).__name__, self.message, self.output
)
KILL_WAIT_SEC = 5
KILL_CHAIN = (signal.SIGTERM, signal.SIGKILL)
def __init__(
self,
name,
pid=None,
socket=None,
process_name=None,
socket_type=int,
metadata_base_dir=None,
):
"""
:param string name: The process identity/name (e.g. 'pantsd' or 'ng_Zinc').
:param int pid: The process pid. Overrides fetching of the self.pid @property.
:param string socket: The socket metadata. Overrides fetching of the self.socket @property.
:param string process_name: The process name for cmdline executable name matching.
:param type socket_type: The type to be used for socket type casting (e.g. int).
:param str metadata_base_dir: The overridden base directory for process metadata.
"""
super().__init__(metadata_base_dir)
self._name = name.lower().strip()
self._pid = pid
self._socket = socket
self._socket_type = socket_type
self._process_name = process_name
self._buildroot = get_buildroot()
self._process = None
@property
def name(self):
"""The logical name/label of the process."""
return self._name
@property
def process_name(self):
"""The logical process name.
If defined, this is compared to exe_name for stale pid checking.
"""
return self._process_name
@memoized_property
def lifecycle_lock(self):
"""An identity-keyed inter-process lock for safeguarding lifecycle and other operations."""
safe_mkdir(self._metadata_base_dir)
return OwnerPrintingInterProcessFileLock(
# N.B. This lock can't key into the actual named metadata dir (e.g. `.pids/pantsd/lock`
# via `ProcessMetadataManager._get_metadata_dir_by_name()`) because of a need to purge
# the named metadata dir on startup to avoid stale metadata reads.
os.path.join(self._metadata_base_dir, ".lock.{}".format(self._name))
)
@property
def cmdline(self):
"""The process commandline. e.g. ['/usr/bin/python2.7', 'pants.pex'].
:returns: The command line or else `None` if the underlying process has died.
"""
with swallow_psutil_exceptions():
process = self._as_process()
if process:
return process.cmdline()
return None
@property
def cmd(self):
"""The first element of the process commandline e.g. '/usr/bin/python2.7'.
:returns: The first element of the process command line or else `None` if the underlying
process has died.
"""
return (self.cmdline or [None])[0]
@property
def pid(self):
"""The running processes pid (or None)."""
return self._pid or self.read_metadata_by_name(self._name, "pid", int)
@property
def socket(self):
"""The running processes socket/port information (or None)."""
return self._socket or self.read_metadata_by_name(self._name, "socket", self._socket_type)
@classmethod
def get_subprocess_output(cls, command, ignore_stderr=True, **kwargs):
"""Get the output of an executed command.
:param command: An iterable representing the command to execute (e.g. ['ls', '-al']).
:param ignore_stderr: Whether or not to ignore stderr output vs interleave it with stdout.
:raises: `ProcessManager.ExecutionError` on `OSError` or `CalledProcessError`.
:returns: The output of the command.
"""
if ignore_stderr is False:
kwargs.setdefault("stderr", subprocess.STDOUT)
try:
return subprocess.check_output(command, **kwargs).decode().strip()
except (OSError, subprocess.CalledProcessError) as e:
subprocess_output = getattr(e, "output", "").strip()
raise cls.ExecutionError(str(e), subprocess_output)
def await_pid(self, timeout):
"""Wait up to a given timeout for a process to write pid metadata."""
return self.await_metadata_by_name(self._name, "pid", timeout, int)
def await_socket(self, timeout):
"""Wait up to a given timeout for a process to write socket info."""
return self.await_metadata_by_name(self._name, "socket", timeout, self._socket_type)
def write_pid(self, pid=None):
"""Write the current processes PID to the pidfile location."""
pid = pid or os.getpid()
self.write_metadata_by_name(self._name, "pid", str(pid))
def write_socket(self, socket_info):
"""Write the local processes socket information (TCP port or UNIX socket)."""
self.write_metadata_by_name(self._name, "socket", str(socket_info))
def write_named_socket(self, socket_name, socket_info):
"""A multi-tenant, named alternative to ProcessManager.write_socket()."""
self.write_metadata_by_name(self._name, "socket_{}".format(socket_name), str(socket_info))
def read_named_socket(self, socket_name, socket_type):
"""A multi-tenant, named alternative to ProcessManager.socket."""
return self.read_metadata_by_name(self._name, "socket_{}".format(socket_name), socket_type)
def _as_process(self):
"""Returns a psutil `Process` object wrapping our pid.
NB: Even with a process object in hand, subsequent method calls against it can always raise
`NoSuchProcess`. Care is needed to document the raises in the public API or else trap them and
do something sensible for the API.
:returns: a psutil Process object or else None if we have no pid.
:rtype: :class:`psutil.Process`
:raises: :class:`psutil.NoSuchProcess` if the process identified by our pid has died.
"""
if self._process is None and self.pid:
self._process = psutil.Process(self.pid)
return self._process
def is_dead(self):
"""Return a boolean indicating whether the process is dead or not."""
return not self.is_alive()
def is_alive(self, extended_check=None):
"""Return a boolean indicating whether the process is running or not.
:param func extended_check: An additional callable that will be invoked to perform an extended
liveness check. This callable should take a single argument of a
`psutil.Process` instance representing the context-local process
and return a boolean True/False to indicate alive vs not alive.
"""
try:
process = self._as_process()
return not (
# Can happen if we don't find our pid.
(not process)
or
# Check for walkers.
(process.status() == psutil.STATUS_ZOMBIE)
or
# Check for stale pids.
(self.process_name and self.process_name != process.name())
or
# Extended checking.
(extended_check and not extended_check(process))
)
except (psutil.NoSuchProcess, psutil.AccessDenied):
# On some platforms, accessing attributes of a zombie'd Process results in NoSuchProcess.
return False
def purge_metadata(self, force=False):
"""Instance-based version of ProcessMetadataManager.purge_metadata_by_name() that checks for
process liveness before purging metadata.
:param bool force: If True, skip process liveness check before purging metadata.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
if not force and self.is_alive():
raise ProcessMetadataManager.MetadataError(
"cannot purge metadata for a running process!"
)
super().purge_metadata_by_name(self._name)
def _kill(self, kill_sig):
"""Send a signal to the current process."""
if self.pid:
os.kill(self.pid, kill_sig)
def terminate(self, signal_chain=KILL_CHAIN, kill_wait=KILL_WAIT_SEC, purge=True):
"""Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL)."""
alive = self.is_alive()
if alive:
logger.debug("terminating {}".format(self._name))
for signal_type in signal_chain:
pid = self.pid
try:
logger.debug("sending signal {} to pid {}".format(signal_type, pid))
self._kill(signal_type)
except OSError as e:
logger.warning(
"caught OSError({e!s}) during attempt to kill -{signal} {pid}!".format(
e=e, signal=signal_type, pid=pid
)
)
# Wait up to kill_wait seconds to terminate or move onto the next signal.
try:
if self._deadline_until(self.is_dead, "daemon to exit", timeout=kill_wait):
alive = False
logger.debug("successfully terminated pid {}".format(pid))
break
except self.Timeout:
# Loop to the next kill signal on timeout.
pass
if alive:
raise ProcessManager.NonResponsiveProcess(
"failed to kill pid {pid} with signals {chain}".format(
pid=self.pid, chain=signal_chain
)
)
if purge:
self.purge_metadata(force=True)
def daemonize(
self,
pre_fork_opts=None,
post_fork_parent_opts=None,
post_fork_child_opts=None,
fork_context=None,
write_pid=True,
):
"""Perform a double-fork, execute callbacks and write the child pid file.
The double-fork here is necessary to truly daemonize the subprocess such that it can never
take control of a tty. The initial fork and setsid() creates a new, isolated process group
and also makes the first child a session leader (which can still acquire a tty). By forking a
second time, we ensure that the second child can never acquire a controlling terminal because
it's no longer a session leader - but it now has its own separate process group.
Additionally, a normal daemon implementation would typically perform an os.umask(0) to reset
the processes file mode creation mask post-fork. We do not do this here (and in daemon_spawn
below) due to the fact that the daemons that pants would run are typically personal user
daemons. Having a disparate umask from pre-vs-post fork causes files written in each phase to
differ in their permissions without good reason - in this case, we want to inherit the umask.
:param fork_context: A function which accepts and calls a function that will call fork. This
is not a contextmanager/generator because that would make interacting with native code more
challenging. If no fork_context is passed, the fork function is called directly.
"""
def double_fork():
logger.debug("forking %s", self)
pid = os.fork()
if pid == 0:
os.setsid()
second_pid = os.fork()
if second_pid == 0:
return False, True
else:
if write_pid:
self.write_pid(second_pid)
return False, False
else:
# This prevents un-reaped, throw-away parent processes from lingering in the process table.
os.waitpid(pid, 0)
return True, False
fork_func = functools.partial(fork_context, double_fork) if fork_context else double_fork
# Perform the double fork (optionally under the fork_context). Three outcomes are possible after
# the double fork: we're either the original parent process, the middle double-fork process, or
# the child. We assert below that a process is not somehow both the parent and the child.
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
is_parent, is_child = fork_func()
try:
if not is_parent and not is_child:
# Middle process.
os._exit(0)
elif is_parent:
assert not is_child
self.post_fork_parent(**post_fork_parent_opts or {})
else:
assert not is_parent
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logger.critical(traceback.format_exc())
os._exit(0)
def daemon_spawn(
self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None
):
"""Perform a single-fork to run a subprocess and write the child pid file.
Use this if your post_fork_child block invokes a subprocess via subprocess.Popen(). In this
case, a second fork such as used in daemonize() is extraneous given that Popen() also forks.
Using this daemonization method vs daemonize() leaves the responsibility of writing the pid
to the caller to allow for library-agnostic flexibility in subprocess execution.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
pid = os.fork()
if pid == 0:
# fork's child execution
try:
os.setsid()
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
# fork's parent execution
try:
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logger.critical(traceback.format_exc())
def pre_fork(self):
"""Pre-fork callback for subclasses."""
def post_fork_child(self):
"""Pre-fork child callback for subclasses."""
def post_fork_parent(self):
"""Post-fork parent callback for subclasses."""
class FingerprintedProcessManager(ProcessManager):
"""A `ProcessManager` subclass that provides a general strategy for process fingerprinting."""
FINGERPRINT_KEY = "fingerprint"
FINGERPRINT_CMD_KEY: Optional[str] = None
FINGERPRINT_CMD_SEP = "="
@property
def fingerprint(self):
"""The fingerprint of the current process.
This can either read the current fingerprint from the running process's psutil.Process.cmdline
(if the managed process supports that) or from the `ProcessManager` metadata.
:returns: The fingerprint of the running process as read from the process table, ProcessManager
metadata or `None`.
:rtype: string
"""
return self.parse_fingerprint(self.cmdline) or self.read_metadata_by_name(
self.name, self.FINGERPRINT_KEY
)
def parse_fingerprint(self, cmdline, key=None, sep=None):
"""Given a psutil.Process.cmdline, parse and return a fingerprint.
:param list cmdline: The psutil.Process.cmdline of the current process.
:param string key: The key for fingerprint discovery.
:param string sep: The key/value separator for fingerprint discovery.
:returns: The parsed fingerprint or `None`.
:rtype: string or `None`
"""
key = key or self.FINGERPRINT_CMD_KEY
if key:
sep = sep or self.FINGERPRINT_CMD_SEP
cmdline = cmdline or []
for cmd_part in cmdline:
if cmd_part.startswith("{}{}".format(key, sep)):
return cmd_part.split(sep)[1]
def has_current_fingerprint(self, fingerprint):
"""Determines if a new fingerprint is the current fingerprint of the running process.
:param string fingerprint: The new fingerprint to compare to.
:rtype: bool
"""
return fingerprint == self.fingerprint
def needs_restart(self, fingerprint):
"""Determines if the current ProcessManager needs to be started or restarted.
:param string fingerprint: The new fingerprint to compare to.
:rtype: bool
"""
return self.is_dead() or not self.has_current_fingerprint(fingerprint)
| 41.253823 | 107 | 0.635878 |
09750213a93d161c18a3be8a5bf64e5789d68932 | 1,620 | py | Python | Zoocmd/build_exe.py | helicontech/zoo | a33ba547f553bcce415f7a54bd89c444f82e48ee | [
"Apache-2.0"
] | 2 | 2017-05-01T07:35:24.000Z | 2018-04-12T13:36:03.000Z | Zoocmd/build_exe.py | helicontech/zoo | a33ba547f553bcce415f7a54bd89c444f82e48ee | [
"Apache-2.0"
] | 2 | 2017-03-23T17:28:37.000Z | 2018-06-07T06:38:08.000Z | Zoocmd/build_exe.py | helicontech/zoo | a33ba547f553bcce415f7a54bd89c444f82e48ee | [
"Apache-2.0"
] | 3 | 2016-06-22T11:11:16.000Z | 2019-10-25T15:09:46.000Z | # -*- coding: utf-8 -*-
# A very simple setup script to create a single executable
#
# hello.py is a very simple 'Hello, world' type script which also displays the
# environment in which the script runs
#
# Run the build process by running the command 'python setup.py build'
#
# If everything works well you should find a subdirectory in the build
# subdirectory that contains the files needed to run the script without Python
from cx_Freeze import setup, Executable
import sys
import os
from core import version
print("==========Building============")
SourcePath = os.path.abspath(os.path.dirname(__file__))
WebZooFolder = os.path.join("web", "zoo")
def zoopath(folder):
return os.path.join(WebZooFolder, folder)
def fullpath(folder):
return os.path.join(SourcePath, zoopath (folder))
def paths_tuple(folder):
return (fullpath(folder), zoopath(folder))
executables = [
Executable(os.path.join(SourcePath, "zoocmd.py"))
]
options = {
'build_exe': {
"packages": ["os", "pymssql", "_mssql", "win32console"],
"excludes": ["tkinter"],
'compressed': False,
"include_files": [paths_tuple("jstemplates"),
paths_tuple("templates"),
paths_tuple("static")],
'path': sys.path + ['modules']
}
}
#with open(os.path.join(SourcePath, "core", "version.py"), 'r') as file:
# version = file.read()
setup(name ='zoo',
version =version.VERSION,
description ='Zoo executable',
options =options,
executables =executables
)
| 29.454545 | 78 | 0.627778 |
25e3ca65c705e75fdd9d547fd40adc587743c435 | 177 | py | Python | virtual/bin/django-admin.py | sharonandisi/hoodwatch | 6be8d5ab791f2857d465c1ecd3512522784510e2 | [
"PostgreSQL"
] | null | null | null | virtual/bin/django-admin.py | sharonandisi/hoodwatch | 6be8d5ab791f2857d465c1ecd3512522784510e2 | [
"PostgreSQL"
] | null | null | null | virtual/bin/django-admin.py | sharonandisi/hoodwatch | 6be8d5ab791f2857d465c1ecd3512522784510e2 | [
"PostgreSQL"
] | null | null | null | #!/home/sharon/Documents/python/Django/Hoodwatch/virtual/bin/python3.6
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 29.5 | 70 | 0.79661 |
d8b8c4850efe40c143d4b9b1e2b9cd2200e08a92 | 1,679 | py | Python | venv/Lib/site-packages/pybrain3/rl/environments/shipsteer/northwardtask.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pybrain3/rl/environments/shipsteer/northwardtask.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pybrain3/rl/environments/shipsteer/northwardtask.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | __author__ = 'Martin Felder, felder@in.tum.de'
from pybrain3.rl.environments import EpisodicTask
from .shipsteer import ShipSteeringEnvironment
class GoNorthwardTask(EpisodicTask):
""" The task of balancing some pole(s) on a cart """
def __init__(self, env=None, maxsteps=1000):
"""
:key env: (optional) an instance of a ShipSteeringEnvironment (or a subclass thereof)
:key maxsteps: maximal number of steps (default: 1000)
"""
if env == None:
env = ShipSteeringEnvironment(render=False)
EpisodicTask.__init__(self, env)
self.N = maxsteps
self.t = 0
# scale sensors
# [h, hdot, v]
self.sensor_limits = [(-180.0, +180.0), (-180.0, +180.0), (-10.0, +40.0)]
# actions: thrust, rudder
self.actor_limits = [(-1.0, +2.0), (-90.0, +90.0)]
# scale reward over episode, such that max. return = 100
self.rewardscale = 100. / maxsteps / self.sensor_limits[2][1]
def reset(self):
EpisodicTask.reset(self)
self.t = 0
def performAction(self, action):
self.t += 1
EpisodicTask.performAction(self, action)
def isFinished(self):
if self.t >= self.N:
# maximal timesteps
return True
return False
def getReward(self):
if abs(self.env.getHeading()) < 5.:
return self.env.getSpeed() * self.rewardscale
else:
return 0
def setMaxLength(self, n):
self.N = n
| 31.679245 | 93 | 0.533651 |
51e08530e2a925a5f2e429ff8accc36827b9f332 | 4,686 | py | Python | tests/test_extra.py | JanmanX/mistune | e2c16048d7ffb2fa8b657f33ecd2936e6076e25c | [
"BSD-3-Clause"
] | null | null | null | tests/test_extra.py | JanmanX/mistune | e2c16048d7ffb2fa8b657f33ecd2936e6076e25c | [
"BSD-3-Clause"
] | null | null | null | tests/test_extra.py | JanmanX/mistune | e2c16048d7ffb2fa8b657f33ecd2936e6076e25c | [
"BSD-3-Clause"
] | null | null | null | import mistune
def test_escape():
ret = mistune.markdown('<div>**foo**</div>', escape=True)
assert '>' in ret
ret = mistune.markdown('this **foo** is <b>bold</b>', escape=True)
assert '>' in ret
def test_linebreak():
ret = mistune.markdown('this **foo** \nis me')
assert '<br>' not in ret
ret = mistune.markdown('this **foo** \nis me', hard_wrap=True)
assert '<br>' in ret
def test_safe_links():
attack_vectors = (
# "standard" javascript pseudo protocol
('javascript:alert`1`', ''),
# bypass attempt
('jAvAsCrIpT:alert`1`', ''),
# bypass with newline
('javasc\nript:alert`1`', ''),
# javascript pseudo protocol with entities
('javascript:alert`1`', 'javascript&colon;alert`1`'),
# javascript pseudo protocol with prefix (dangerous in Chrome)
('\x1Ajavascript:alert`1`', ''),
# vbscript-URI (dangerous in Internet Explorer)
('vbscript:msgbox', ''),
# breaking out of the attribute
('"<>', '"<>'),
)
for vector, expected in attack_vectors:
# image
assert 'src="%s"' % expected in mistune.markdown('' % vector)
# link
assert 'href="%s"' % expected in mistune.markdown('[atk](%s)' % vector)
def test_skip_style():
ret = mistune.markdown(
'foo\n<style>body{color:red}</style>', skip_style=True
)
assert ret == '<p>foo</p>\n'
def test_use_xhtml():
ret = mistune.markdown('foo\n\n----\n\nbar')
assert '<hr>' in ret
ret = mistune.markdown('foo\n\n----\n\nbar', use_xhtml=True)
assert '<hr />' in ret
ret = mistune.markdown('foo \nbar', use_xhtml=True)
assert '<br />' in ret
ret = mistune.markdown('', use_xhtml=True)
assert '<img src="bar" alt="foo" title="title" />' in ret
def test_parse_inline_html():
ret = mistune.markdown(
'<div>**foo**</div>', parse_inline_html=True, escape=False
)
assert '<strong>' not in ret
ret = mistune.markdown(
'<span>**foo**</span>', parse_inline_html=True, escape=False
)
assert '<span><strong>' in ret
ret = mistune.markdown(
'<span id="foo">**foo**</span>', parse_inline_html=True, escape=False
)
assert '<span id="foo"><strong>' in ret
ret = mistune.markdown(
'<span id=foo>**foo**</span>', parse_inline_html=True, escape=False
)
assert '<span id=foo><strong>' in ret
ret = mistune.markdown(
'<a>http://lepture.com</a>', parse_inline_html=True, escape=False
)
assert 'href' not in ret
def test_block_html():
ret = mistune.markdown(
'<div ></div>', escape=False
)
assert '<div ></div>' in ret
def test_parse_block_html():
ret = mistune.markdown(
'<div>**foo**</div>', parse_block_html=True, escape=False
)
assert '<div><strong>' in ret
ret = mistune.markdown(
'<div id="foo">**foo**</div>', parse_block_html=True, escape=False
)
assert '<div id="foo"><strong>' in ret
ret = mistune.markdown(
'<div id=foo>**foo**</div>', parse_block_html=True, escape=False
)
assert '<div id=foo><strong>' in ret
ret = mistune.markdown(
'<span>**foo**</span>', parse_block_html=True, escape=False
)
assert '<strong>' not in ret
def test_parse_nested_html():
ret = mistune.markdown(
'<div><a href="http://example.org">**foo**</a></div>',
parse_block_html=True, escape=False
)
assert '<div><a href="http://example.org">' in ret
assert '<strong>' not in ret
ret = mistune.markdown(
'<div><a href="http://example.org">**foo**</a></div>',
parse_block_html=True, parse_inline_html=True, escape=False
)
assert '<div><a href="http://example.org"><strong>' in ret
def test_trigger_more_cases():
markdown = mistune.Markdown(
inline=mistune.InlineLexer,
block=mistune.BlockLexer,
skip_html=True
)
ret = markdown.render('foo[^foo]\n\n[^foo]: foo\n\n[^foo]: bar\n')
assert 'bar' not in ret
def test_not_escape_block_tags():
text = '<h1>heading</h1> text'
assert text in mistune.markdown(text, escape=False)
def test_not_escape_inline_tags():
text = '<a name="top"></a>'
assert text in mistune.markdown(text, escape=False)
# space between =
text = '<span style = "color:red;">test</span>'
assert text in mistune.markdown(text, escape=False)
def test_hard_wrap_renderer():
text = 'foo\nnewline'
renderer = mistune.Renderer(hard_wrap=True)
func = mistune.Markdown(renderer=renderer)
assert '<br>' in func(text)
| 28.925926 | 79 | 0.60286 |
5358539eb614d4e22a3ddb4ff48807bd755b6f8d | 3,946 | py | Python | CenterNet/centernet.py | tommyjohn1001/CenterNet-pytorch-lightning | 145fa88807b10c5917a19668639e9b35d8d0df28 | [
"Apache-2.0"
] | null | null | null | CenterNet/centernet.py | tommyjohn1001/CenterNet-pytorch-lightning | 145fa88807b10c5917a19668639e9b35d8d0df28 | [
"Apache-2.0"
] | null | null | null | CenterNet/centernet.py | tommyjohn1001/CenterNet-pytorch-lightning | 145fa88807b10c5917a19668639e9b35d8d0df28 | [
"Apache-2.0"
] | null | null | null | from argparse import ArgumentParser
import pytorch_lightning as pl
import torch
from CenterNet.models import create_model
class CenterNet(pl.LightningModule):
def __init__(self, arch):
super().__init__()
self.arch = arch
# Backbone specific args
self.head_conv = 256 if "dla" in arch or "hourglass" in arch else 64
self.num_stacks = 2 if "hourglass" in arch else 1
self.padding = 127 if "hourglass" in arch else 31
self.backbone = create_model(arch)
self.down_ratio = 4
def load_pretrained_weights(self, model_weight_path, strict=True):
mapping = {
"hm": "heatmap",
"wh": "width_height",
"reg": "regression",
"hm_hp": "heatmap_keypoints",
"hp_offset": "heatmap_keypoints_offset",
"hps": "keypoints",
}
print(f"Loading weights from: {model_weight_path}")
checkpoint = torch.load(model_weight_path)
backbone = {
k.replace("module.", ""): v
for k, v in checkpoint["state_dict"].items()
if k.split(".")[1] not in mapping
}
self.backbone.load_state_dict(backbone, strict=strict)
# These next lines are some special magic.
# Try not to touch them and enjoy their beauty.
# (The new decoupled heads require these amazing mapping functions
# to load the old pretrained weights)
heads = {
("0." if self.num_stacks == 1 else "")
+ ".".join(
[mapping[k.replace("module.", "").split(".")[0]], "fc"] + k.split(".")[2:]
).replace("conv.", ""): v
for k, v in checkpoint["state_dict"].items()
if k.split(".")[1] in mapping
}
if self.arch == "hourglass":
heads = {
".".join(k.split(".")[2:3] + k.split(".")[:2] + k.split(".")[3:]).replace(
"fc.1", "fc.2"
): v
for k, v in heads.items()
}
self.heads.load_state_dict(heads, strict=strict)
def forward(self, x):
return self.backbone.forward(x)
def loss(self, outputs, target):
return 0, {}
def training_step(self, batch, batch_idx):
img, target = batch
outputs = self(img)
loss, loss_stats = self.loss(outputs, target)
self.log(f"train_loss", loss, on_epoch=True)
for key, value in loss_stats.items():
self.log(f"train/{key}", value)
return loss
def validation_step(self, batch, batch_idx):
img, target = batch
outputs = self(img)
loss, loss_stats = self.loss(outputs, target)
self.log(f"val_loss", loss, on_epoch=True, sync_dist=True)
for name, value in loss_stats.items():
self.log(f"val/{name}", value, on_epoch=True, sync_dist=True)
return {"loss": loss, "loss_stats": loss_stats}
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
lr_scheduler = {
"scheduler": torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=self.learning_rate_milestones
),
"name": "learning_rate",
"interval": "epoch",
"frequency": 1,
}
return [optimizer], [lr_scheduler]
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument(
"--arch",
default="dla_34",
help="backbone architecture. Currently tested "
"res_18 | res_101 | resdcn_18 | resdcn_101 | dla_34 | hourglass",
)
parser.add_argument("--learning_rate", type=float, default=25e-5)
parser.add_argument("--learning_rate_milestones", default="90, 120")
return parser
| 33.159664 | 90 | 0.571211 |
b091a4a2fd13e84b901a46c73ee8924005056125 | 711 | py | Python | versions/1.1.3/blog/api/v1/views/article_views.py | mnp-fuf/django-bona-blog | ca9afd301345ece8074ccb696959feb6b8ca14b3 | [
"MIT"
] | 92 | 2020-07-08T05:24:53.000Z | 2022-03-16T05:53:47.000Z | versions/1.1.3/blog/api/v1/views/article_views.py | mnp-fuf/django-bona-blog | ca9afd301345ece8074ccb696959feb6b8ca14b3 | [
"MIT"
] | 9 | 2020-07-12T15:16:52.000Z | 2022-03-12T09:48:10.000Z | versions/1.1.3/blog/api/v1/views/article_views.py | mnp-fuf/django-bona-blog | ca9afd301345ece8074ccb696959feb6b8ca14b3 | [
"MIT"
] | 45 | 2020-07-10T09:20:45.000Z | 2022-03-19T18:53:18.000Z | # Third-party apps import
from rest_framework import generics
# Blog app imports
from blog.models.article_models import Article
from ..serializers.article_serializers import ArticleSerializer
class ArticleList(generics.ListAPIView):
queryset = Article.objects.filter(status='PUBLISHED')
serializer_class = ArticleSerializer
class CategoryArticleList(generics.ListAPIView):
serializer_class = ArticleSerializer
def get_queryset(self):
category_name = self.kwargs['category_name']
articles = Article.objects.filter(category__name=category_name,
status='PUBLISHED'
)
return articles
| 29.625 | 71 | 0.68917 |
25a92e61cbf9776c30b3a94c8a7697b0ce4c472a | 13,201 | py | Python | info/modules/admin/views.py | GalphaXie/FlaskProject | ddde875add0934060fcdebaf3f3cf4461d28714b | [
"MIT"
] | null | null | null | info/modules/admin/views.py | GalphaXie/FlaskProject | ddde875add0934060fcdebaf3f3cf4461d28714b | [
"MIT"
] | null | null | null | info/modules/admin/views.py | GalphaXie/FlaskProject | ddde875add0934060fcdebaf3f3cf4461d28714b | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# file: views.py
# Created by Guang at 19-9-15
# description:
# *-* coding:utf8 *-*
import time
from datetime import datetime, timedelta
from flask import render_template, request, session, redirect, url_for, current_app, g, jsonify, abort
from info import constants, db
from info.models import User, News, Category
from info.modules.admin import admin_blu
from info.utils.common import user_login_data
from info.utils.image_storage import storage
from info.utils.response_code import RET
@admin_blu.route('/index')
@user_login_data
def index():
user = g.user
return render_template('admin/index.html', user=user.to_dict())
@admin_blu.route('/login', methods=["GET", "POST"])
def admin_login():
if request.method == "GET":
# 判断当前是否有登陆,如果有直接跳转到管理员后台的主页
user_id = session.get('user_id', None)
is_admin = session.get('is_admin', False)
if user_id and is_admin:
return redirect(url_for('admin.index'))
return render_template('admin/login.html')
# 取到登录的参数
username = request.form.get("username")
password = request.form.get("password")
# 校验参数--查询账户
if not all([username, password]):
return render_template('admin/login.html', errmsg="参数不足")
try:
user = User.query.filter(User.mobile == username, User.is_admin == True).first()
except Exception as e:
current_app.logger.error(e)
return render_template('admin/login.html', errmsg="数据查询失败")
if not user:
return render_template('admin/login.html', errmsg="未查询到用户信息")
# 校验密码
if not user.check_passowrd(password):
return render_template('admin/login.html', errmsg="用户名或者密码错误")
# 保存用户的登陆信息
session["user_id"] = user.id
session["nick_name"] = user.nick_name
session["mobile"] = user.mobile
session["is_admin"] = user.is_admin
# 跳转到后台管理首页
return redirect(url_for('admin.index'))
@admin_blu.route('/user_count')
def user_count():
# 总人数
total_count = 0
try:
total_count = User.query.filter(User.is_admin == False).count()
except Exception as e:
current_app.logger.error(e)
# 月新增数
mon_count = 0
t = time.localtime()
begin_mon_date = datetime.strptime(('%d-%02d-01' % (t.tm_year, t.tm_mon)), "%Y-%m-%d")
try:
mon_count = User.query.filter(User.is_admin == False, User.create_time > begin_mon_date).count()
except Exception as e:
current_app.logger.error(e)
# 日新增数
day_count = 0
begin_day_date = datetime.strptime(('%d-%02d-%02d' % (t.tm_year, t.tm_mon, t.tm_mday)), "%Y-%m-%d")
try:
day_count = User.query.filter(User.is_admin == False, User.create_time > begin_day_date).count()
except Exception as e:
current_app.logger.error(e)
# 折线图数据
active_time = []
active_count = []
# 取到今天的时间字符串
today_date_str = ('%d-%02d-%02d' % (t.tm_year, t.tm_mon, t.tm_mday))
# 转成时间对象
today_date = datetime.strptime(today_date_str, "%Y-%m-%d")
for i in range(0, 31):
# 取到某一天的0点0分
begin_date = today_date - timedelta(days=i) # timedelta 是时间差数据
# 取到下一天的0点0分
end_date = today_date - timedelta(days=(i - 1)) # 可以为负值
count = User.query.filter(User.is_admin == False, User.last_login >= begin_date,
User.last_login < end_date).count()
active_count.append(count)
# 这里begin_date 是一个特殊的时间对象,而前端需要接收的是 str ,所以调用 strftime 方法
active_time.append(begin_date.strftime('%Y-%m-%d'))
# User.query.filter(User.is_admin == False, User.last_login >= 今天0点0分, User.last_login < 今天24点).count()
# 反转,让最近的一天显示在最后
active_time.reverse()
active_count.reverse()
data = {
"total_count": total_count,
"mon_count": mon_count,
"day_count": day_count,
"active_time": active_time,
"active_count": active_count
}
return render_template('admin/user_count.html', data=data)
@admin_blu.route('/user_list')
def user_list():
page = request.args.get("page", 1)
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
users = []
current_page = 1
total_page = 1
try:
paginate = User.query.filter(User.is_admin == False).paginate(page, constants.ADMIN_USER_PAGE_MAX_COUNT, False)
users = paginate.items
current_page = paginate.page
total_page = paginate.pages
except Exception as e:
current_app.logger.error(e)
# 进行模型列表转字典列表
user_dict_li = []
for user in users:
user_dict_li.append(user.to_admin_dict())
data = {
"users": user_dict_li,
"total_page": total_page,
"current_page": current_page,
}
return render_template('admin/user_list.html', data=data)
@admin_blu.route('/news_review')
def news_review():
page = request.args.get("page", 1)
keywords = request.args.get("keywords", None)
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
news_list = []
current_page = 1
total_page = 1
filters = [News.status != 0]
# 如果关键字存在,那么就添加关键字搜索
if keywords:
filters.append(News.title.contains(keywords))
try:
paginate = News.query.filter(*filters) \
.order_by(News.create_time.desc()) \
.paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)
news_list = paginate.items
current_page = paginate.page
total_page = paginate.pages
except Exception as e:
current_app.logger.error(e)
news_dict_list = []
for news in news_list:
news_dict_list.append(news.to_review_dict())
context = {"total_page": total_page, "current_page": current_page, "news_list": news_dict_list}
return render_template('admin/news_review.html', data=context)
@admin_blu.route('/news_review_detail/<int:news_id>')
def news_review_detail(news_id):
# 通过id查询新闻
news = None
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
if not news:
return render_template('admin/news_review_detail.html', data={"errmsg": "未查询到此新闻"})
# 返回数据
data = {"news": news.to_dict()}
return render_template('admin/news_review_detail.html', data=data)
@admin_blu.route('/news_review_action', methods=["POST"])
def news_review_action():
# 1. 接受参数
news_id = request.json.get("news_id")
action = request.json.get("action")
# 2. 参数校验
if not all([news_id, action]):
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
if action not in ("accept", "reject"):
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
# 查询到指定的新闻数据
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据查询失败")
if not news:
return jsonify(errno=RET.NODATA, errmsg="未查询到数据")
if action == "accept":
# 代表接受
news.status = 0
else:
# 代表拒绝
reason = request.json.get("reason")
if not reason:
return jsonify(errno=RET.PARAMERR, errmsg="请输入拒绝原因")
news.status = -1
news.reason = reason
return jsonify(errno=RET.OK, errmsg="OK")
@admin_blu.route('/news_edit')
def news_edit():
page = request.args.get("page", 1)
keywords = request.args.get("keywords", None)
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
news_list = []
current_page = 1
total_page = 1
filters = [News.status == 0]
# 如果关键字存在,那么就添加关键字搜索
if keywords:
filters.append(News.title.contains(keywords))
try:
paginate = News.query.filter(*filters) \
.order_by(News.create_time.desc()) \
.paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)
news_list = paginate.items
current_page = paginate.page
total_page = paginate.pages
except Exception as e:
current_app.logger.error(e)
news_dict_list = []
for news in news_list:
news_dict_list.append(news.to_basic_dict())
context = {"total_page": total_page, "current_page": current_page, "news_list": news_dict_list}
return render_template('admin/news_edit.html', data=context)
@admin_blu.route('/news_edit_detail', methods=["POST", 'GET'])
def news_edit_detail():
if request.method == "GET":
# 查询点击的新闻的相关数据并传入到模板中
news_id = request.args.get('news_id')
if not news_id:
abort(404)
try:
news_id = int(news_id)
except Exception as e:
current_app.logger.error(e)
return render_template('admin/news_edit_detail.html', errmsg="参数错误")
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
return render_template('admin/news_edit_detail.html', errmsg="查询错误")
if not news:
return render_template('admin/news_edit_detail.html', errmsg="未查询到数据")
# 1.先查询出所有分类数据(去掉"最新")-因为可能对分类数据也要进行编辑操作; 2. 还要在显示的时候选中当前新闻分类->增加属性is_selected
try:
categories = Category.query.all()
except Exception as e:
current_app.logger.error(e)
return render_template('admin/news_edit_detail.html', errmsg="查询错误")
category_dict_li = []
for category in categories:
# 取到分类的字典
cate_dict = category.to_dict()
# 遍历的时候,如果分类是当前的新闻分类,那么给这个分类字典添加一个 k:v 来进行标记
if category.id == news.category_id:
cate_dict['is_selected'] = True
category_dict_li.append(cate_dict)
# 去掉"最新"
category_dict_li.pop(0)
data = {
"news": news.to_dict(),
"categories" : category_dict_li
}
return render_template('admin/news_edit_detail.html', data=data)
# 获取post请求过来的数据
news_id = request.form.get("news_id")
title = request.form.get("title")
digest = request.form.get("digest")
content = request.form.get("content")
index_image = request.files.get("index_image")
category_id = request.form.get("category_id")
# 1.1 判断数据是否有值--这里没有判断 index_image , 图片可以选择上传,也可以不上传;
# 我的理解是: 前面用户直接编辑新闻提交的时候是必须要图片的否则参数错误; 这里主要是考虑来自爬虫的文章没有图片
if not all([title, digest, content, category_id]):
return jsonify(errno=RET.PARAMERR, errmsg="参数有误")
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据查询失败")
if not news:
return jsonify(errno=RET.NODATA, errmsg="未查询到新闻数据")
# 1.2 尝试读取图片,如果有上传图片
if index_image:
try:
index_image = index_image.read()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.PARAMERR, errmsg="参数有误")
# 2. 将标题图片上传到七牛
try:
key = storage(index_image)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.THIRDERR, errmsg="上传图片错误")
news.index_image_url = constants.QINIU_DOMIN_PREFIX + key
# 3. 给相关数据 重新赋值
news.title = title
news.digest = digest
news.content = content
news.category_id = category_id
# 4. 保存到数据库
try:
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据失败")
# 5. 返回结果
return jsonify(errno=RET.OK, errmsg="编辑成功")
@admin_blu.route('/news_type', methods=["POST", 'GET'])
def news_type():
if request.method == "GET":
# 查询分类数据
try:
categories = Category.query.all()
except Exception as e:
current_app.logger.error(e)
return render_template('admin/news_type.html', errmsg="查询错误")
category_dict_li = []
for category in categories:
# 取到分类的字典
cate_dict = category.to_dict()
category_dict_li.append(cate_dict)
# 去掉"最新"
category_dict_li.pop(0)
data = {
"categories": category_dict_li
}
return render_template('admin/news_type.html', data=data)
# 新增或者添加分类--POST
# 1. 取参数
c_name = request.json.get("name")
# 如果传了cid,代表是编辑已存在的分类,可以获取到 cid
c_id = request.json.get("id")
if not c_name:
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
if c_id:
# 有 分类 id 代表查询相关数据
try:
c_id = int(c_id)
category = Category.query.get(c_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
if not category:
return jsonify(errno=RET.NODATA, errmsg="未查询到分类数据")
category.name = c_name # 重新赋值
else:
# 没有c_id ,那么表示增加分类
category = Category()
category.name = c_name
db.session.add(category)
return jsonify(errno=RET.OK, errmsg="OK")
| 29.205752 | 119 | 0.625786 |
68281fd0d67678650c4ca758dfb3d1c19b91804f | 5,993 | py | Python | pypykatz/remote/cmdhelper.py | m0xbf/pypykatz-copy | 39d8b06861d9ccd615e8107707f56f6556fb15a0 | [
"MIT"
] | 5 | 2019-04-20T05:34:01.000Z | 2019-10-12T01:26:09.000Z | pypykatz/remote/cmdhelper.py | m0xbf/pypykatz-copy | 39d8b06861d9ccd615e8107707f56f6556fb15a0 | [
"MIT"
] | 1 | 2018-09-13T15:20:29.000Z | 2018-09-13T15:20:29.000Z | pypykatz/remote/cmdhelper.py | m0xbf/pypykatz-copy | 39d8b06861d9ccd615e8107707f56f6556fb15a0 | [
"MIT"
] | 8 | 2018-09-11T22:02:22.000Z | 2019-11-27T08:52:20.000Z | #!/usr/bin/env python3
#
# Author:
# Tamas Jos (@skelsec)
#
import os
import json
import glob
import ntpath
import traceback
from pypykatz import logging
from pypykatz.commons.common import UniversalEncoder
class RemoteCMDHelper:
def __init__(self):
self.live_keywords = ['share','session','localgroup']
self.keywords = [] #['remote'] no yet implemented
def add_args(self, parser, live_parser):
live_group = live_parser.add_parser('share', help='Remote share relted operations')
live_group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')
live_group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')
live_group.add_argument('cmd', choices=['enum'])
live_group.add_argument('-f', '--target-file', help = 'Targets file, one per line')
live_group.add_argument('-t', '--target', action='append', help = 'Target to check. Stackable.')
live_group.add_argument('--timeout', type=int, help = 'Pre-check timeout.')
live_group.add_argument('--disable-pre-check', action='store_true',help = 'Disables pre-check to see if the remote destination is alive. Will make enumeration take years!')
live_group = live_parser.add_parser('session', help='Remote user sessions related operations')
live_group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')
live_group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')
live_group.add_argument('cmd', choices=['enum'])
live_group.add_argument('-f', '--target-file', help = 'Targets file, one per line')
live_group.add_argument('-t', '--target', action='append', help = 'Target to check. Stackable.')
live_group.add_argument('--timeout', type=int, help = 'Pre-check timeout.')
live_group.add_argument('--disable-pre-check', action='store_true',help = 'Disables pre-check to see if the remote destination is alive. Will make enumeration take years!')
live_group = live_parser.add_parser('localgroup', help='Remote localgroup related operations')
live_group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')
live_group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')
live_group.add_argument('cmd', choices=['enum'])
live_group.add_argument('-f', '--target-file', help = 'Targets file, one per line')
live_group.add_argument('-t', '--target', action='append', help = 'Target to check. Stackable.')
live_group.add_argument('--timeout', type=int, help = 'Pre-check timeout.')
live_group.add_argument('--disable-pre-check', action='store_true',help = 'Disables pre-check to see if the remote destination is alive. Will make enumeration take years!')
live_group.add_argument('-g', '--group', action='append', help = 'Localgroup name to look for. Stackable.')
#group = parser.add_parser('registry', help='Get secrets from registry files')
#group.add_argument('system', help='path to the SYSTEM registry hive')
#group.add_argument('--sam', help='path to the SAM registry hive')
#group.add_argument('--security', help='path to the SECURITY registry hive')
#group.add_argument('--software', help='path to the SOFTWARE registry hive')
#group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')
#group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')
def execute(self, args):
if len(self.keywords) > 0 and args.command in self.keywords:
self.run(args)
if len(self.live_keywords) > 0 and args.command == 'live' and args.module in self.live_keywords:
self.run_live(args)
def process_results(self, results, args):
pass
def run_live(self, args):
if args.module == 'share':
if args.cmd == 'enum':
from pypykatz.remote.live.share.enumerator import ShareEnumerator
se = ShareEnumerator()
if args.target_file:
se.load_targets_file(args.target_file)
if args.target:
se.load_tagets(args.target)
if len(se.hosts) == 0:
raise Exception('No targets loaded!')
if args.timeout:
se.timeout = args.timeout
se.pre_check = True
if args.disable_pre_check:
se.pre_check = False
se.to_json = args.json
if args.outfile:
se.out_file = args.outfile
se.run()
elif args.module == 'session':
if args.cmd == 'enum':
from pypykatz.remote.live.session.enumerator import SessionMonitor
se = SessionMonitor()
if args.target_file:
se.load_targets_file(args.target_file)
if args.target:
se.load_tagets(args.target)
if len(se.hosts) == 0:
raise Exception('No targets loaded!')
if args.timeout:
se.timeout = args.timeout
se.pre_check = True
if args.disable_pre_check:
se.pre_check = False
se.to_json = args.json
if args.outfile:
se.out_file = args.outfile
se.run()
elif args.module == 'localgroup':
if args.cmd == 'enum':
from pypykatz.remote.live.localgroup.enumerator import LocalGroupEnumerator
se = LocalGroupEnumerator()
if args.target_file:
se.load_targets_file(args.target_file)
if args.target:
se.load_tagets(args.target)
if len(se.hosts) == 0:
raise Exception('No targets loaded!')
if args.timeout:
se.timeout = args.timeout
se.groups = ['Remote Desktop Users','Administrators','Distributed COM Users']
if args.group:
se.groups = args.group
se.pre_check = True
if args.disable_pre_check:
se.pre_check = False
se.to_json = args.json
if args.outfile:
se.out_file = args.outfile
se.run()
def run(self, args):
pass
| 37.223602 | 174 | 0.686468 |
f1a1e8481a998da7176e4f971368fbf868dc2c57 | 28,617 | py | Python | uestc_eams/cli.py | Sunmxt/UESTC-EAMS | 760a7387a5d73967e45a0b9d211acb383bb50fe1 | [
"Apache-2.0"
] | 1 | 2020-07-25T13:53:35.000Z | 2020-07-25T13:53:35.000Z | uestc_eams/cli.py | Sunmxt/UESTC-EAMS | 760a7387a5d73967e45a0b9d211acb383bb50fe1 | [
"Apache-2.0"
] | null | null | null | uestc_eams/cli.py | Sunmxt/UESTC-EAMS | 760a7387a5d73967e45a0b9d211acb383bb50fe1 | [
"Apache-2.0"
] | null | null | null | '''
Commandline interface to accessing UESTC EAMS system.
'''
#import re
import os
import os.path
import getopt
import sqlite3
import pickle
from .session import Login, EAMSSession
from .base import ELECT, CANCEL, RecordMethodException
'''
Some constants
'''
ERR_OK = 0
ERR_PARAMS = 1
ERR_NETWORK = 2
ERR_DB = 3
HELP_TEXT = """
Access to UESTC EAMS system.
Usage: %s <command> [options] < <username> -p <password> | -i account_id>
Command:
login <account> [options] Login with account.
logout <account_id> Logout.
query Query information
elect Elect course.
Common options:
-p --password Specified account password.
-i --id <account_id> Specified account ID to logout.
login options:
--no-persist Do not keep login state.
query options:
--account List logined accounts.
--elect-platform List available course electing platform.
--electable <platform_id> List available courses in specified electing platform.
--elected <platform_id> List elected platform.
elect options:
-I --course-id <course_id> Course ID to elect.
-d --cancel <course_id> Cancel election of specified course.
-P --platform <platform_id> Specified platform.
-f --force Force to Elect. (experimental)
"""
# -c --cash <cash> Cash to the course. If the specified course is elected, \n\
# cashes will be alter.\n\
# --semester List semesters.\n\
# --course <semester_id> List courses of specified semester.\n\
WEEK_STR = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat' ,'Sun')
def GetAccountDatabaseDirectory():
return os.environ['HOME']
def GetAccountDatabaseName():
return '.uestc_eams'
def GetAccountDatabasePath():
dir = GetAccountDatabaseDirectory()
if dir[-1] != '/':
return dir + '/' + GetAccountDatabaseName()
return dir + GetAccountDatabaseName()
class AccountDatabase:
def __init__(self, _db_path, _no_throw = False):
self.__db_path = _db_path
self.Exception = None
self.NoThrow = _no_throw
self.__conn = self.__connect_to(_db_path)
def __record_exception(self, _exception):
if self.NoThrow:
self.Exception = _exception
else:
raise _exception
return None
def __connect_to(self, _db_path):
conn = sqlite3.connect(_db_path)
self.__ensure_integrailty(conn)
return conn
@RecordMethodException(__record_exception, None, sqlite3.OperationalError)
def __ensure_integrailty(self, conn):
cur = conn.execute("SELECT name FROM sqlite_master where type=='table' and name=='Accounts'")
datas = cur.fetchall()
cur.close()
# Found Account table
if len(datas) < 1:
cur = conn.executescript("""
BEGIN EXCLUSIVE TRANSACTION;
CREATE TABLE Accounts(
ID INTEGER PRIMARY KEY
, Username TEXT UNIQUE NOT NULL COLLATE NOCASE
, SessionObject BLOB NOT NULL);
COMMIT;
""")
cur.close()
@RecordMethodException(__record_exception, (None, None), sqlite3.OperationalError)
def GetSessionFromUsername(self, _username):
if not isinstance(_username, str):
raise TypeError('_username should be str.')
cur = self.__conn.cursor()
cur.execute('SELECT * FROM Accounts where (Username == ?)', (_username,))
res = cur.fetchall()
cur.close()
if len(res) < 1:
return (None, None)
return (res[0][0], pickle.loads(res[0][2]))
@RecordMethodException(__record_exception, (None, None), sqlite3.OperationalError)
def GetSessionFromID(self, _id):
if not isinstance(_id, int):
raise TypeError('_id should be an integar.')
cur = self.__conn.cursor()
cur.execute('SELECT Username, SessionObject FROM Accounts WHERE (ID == ?)', (_id,))
res = cur.fetchall()
cur.close()
if len(res) < 1:
return (None, None)
return (res[0][0], pickle.loads(res[0][1]))
@RecordMethodException(__record_exception, None, sqlite3.OperationalError)
def ListAll(self):
cur = self.__conn.cursor()
cur.execute('SELECT * FROM Accounts')
data = cur.fetchall()
cur.close()
return data
@RecordMethodException(__record_exception, False, sqlite3.OperationalError)
def UpdateSessionByUsername(self, _username, _object):
if not isinstance(_username, str):
raise TypeError('_username should be str.')
if not isinstance(_object, EAMSSession):
raise TypeError('_object shoule be EAMSSession')
raw = pickle.dumps(_object)
cur = self.__conn.cursor()
cur.execute('UPDATE Accounts SET SessionObject=? where (Username == ?)', (raw, _username))
self.__conn.commit()
cur.close()
return True
@RecordMethodException(__record_exception, False, sqlite3.OperationalError)
def SaveSession(self, _username, _object):
if not isinstance(_username, str):
raise TypeError('_username should be str.')
if not isinstance(_object, EAMSSession):
raise TypeError('_object shoule be EAMSSession')
raw = pickle.dumps(_object)
cur = self.__conn.cursor()
cur.execute('INSERT INTO Accounts(Username, SessionObject) VALUES (?, ?)' , (_username, raw))
self.__conn.commit()
return True
@RecordMethodException(__record_exception, None, sqlite3.OperationalError)
def QueryIDByUsername(self, _username):
if not isinstance(_username, str):
raise TypeError('_username should be str.')
cur = self.__conn.cursor()
cur.execute('SELECT ID FROM Accounts where Username == ?' , (_username,))
res = cur.fetchall()
cur.close()
if len(res) < 1:
return None
return res[0][0]
@RecordMethodException(__record_exception, False, sqlite3.OperationalError)
def QueryUsernameByID(self, _id):
if not isinstance(_id, int):
raise TypeError('_id should be an integer.')
cur = self.__conn.cursor()
cur.execute('SELECT Username FROM Accounts where ID == ?', (_id,))
res = cur.fetchall()
cur.close()
if len(res) < 1:
return None
return res[0][0]
@RecordMethodException(__record_exception, False, sqlite3.OperationalError)
def DropSession(self, _username = None, _id = None):
if _id and not isinstance(_id, int):
raise('_id should be a integer')
return False
if _username and not isinstance(_username, str):
raise('_username should be str.')
return False
if _id:
if _username:
username = self.QueryUsernameFromID(_id)
if _id != username or not username:
return False
cur = self.__conn.cursor()
cur.execute('DELETE FROM Accounts WHERE ID == ?', (_id,))
self.__conn.commit()
elif _username:
cur = self.__conn.cursor()
cur.execute('DELETE FROM Accounts WHERE Username == ?' , (_username,))
self.__conn.commit()
else:
raise TypeError('_id or _username should be given.')
return False
return True
def TablePrint(_data, _limits, _margin):
'''
Print table line with specfied format.
_data line of data elements to print
_limits box size of elements
_margin margin between two boxes.
'''
if not isinstance(_data, list):
if isinstance(_data, tuple):
_data = list(_data)
else:
raise TypeError('_dict should be list or tuple')
if not isinstance(_limits, list) and not isinstance(_limits, tuple):
raise TypeError('_limits should be list or tuple')
if len(_data) != len(_limits):
raise ValueError('_limits should have same length with _dict.')
idx_vec = [0 for i in range(0, len(_data))]
len_vec = []
#Convert all elements to string
for idx in range(0, len(_data)):
if str == type(_data[idx]):
_data[idx] = '%s' % _data[idx]
elif int == type(_data[idx]):
_data[idx] = '%d' % _data[idx]
else:
raise TypeError('Unsupport element type: %s' % type(_data[idx]).__name__)
return False
len_vec.append(len(_data[idx]))
end = False
while not end:
# loop for all elements
end = True
for idx in range(0, len(_data)):
# element is fully printed. so just add padding
if len_vec[idx] <= idx_vec[idx]:
print(' '*(_limits[idx] + _margin[idx]), end = '')
continue
# calculate used spaces
occup = 0 ; ldx = idx_vec[idx]; nidx = -1
while occup < _limits[idx] and ldx < len_vec[idx]:
# drawback : chinese character only
cord = ord(_data[idx][ldx])
if 0x4e00 < cord and cord < 0x9fa5:
if occup + 2 > _limits[idx]:
break
occup += 2
elif _data[idx][ldx] == '\n':
nidx = ldx
break
else:
occup += 1
ldx += 1
# If newline is found (\n), cut the string.
if nidx != -1:
print('%s' % _data[idx][idx_vec[idx]:nidx] + ' '*(_limits[idx] - occup), end = '')
ldx = nidx + 1
# If box cannot contain the remain part, cut and squeeze it to next line.
else:
print('%s' % _data[idx][idx_vec[idx]: ldx] + ' '*(_limits[idx] - occup), end = '')
# loop until all elements are fully printed
if ldx < len_vec[idx]:
end = False
idx_vec[idx] = ldx
# pad with margin blanks
print(' ' * _margin[idx], end = '')
# next line
print('')
return True
class EAMSCommandShell:
def __init__(self):
self.__account_db_loaded = False
self.__password = None
self.__username = None
self.__session = None
self.__session_id = None
pass
def __load_account_database(self):
if self.__account_db_loaded:
return True
try:
self.__account_db = AccountDatabase(GetAccountDatabasePath())
except Exception as s:
raise s
return False
# Don't throw exception
self.__account_db.NoThrow = True
return True
def __login(self, _user, _password, _id = None, _db_first = True):
# Firstly, check whether logined.
if _id:
username, session = self.__account_db.GetSessionFromID(_id)
if not session:
if self.__account_db.Exception:
return (ERR_DB, None, None)
else:
return (ERR_PARAMS, None, None)
return (ERR_OK, username, session)
if _db_first:
id, session = self.__account_db.GetSessionFromUsername(_user)
if session:
return (ERR_OK, id, session)
# Try to login
try:
session = Login(self.__username, self.__password)
if not session:
return (ERR_NETWORK, None, None)
except Exception as s:
raise s
return (ERR_NETWORK, None, None)
return ERR_OK, None, session
def __try_login(self):
retval, id_user, session = self.__login(self.__username, self.__password, self.__id, _db_first = False)
if retval == ERR_DB:
print('Cannot account to database: ', end = '')
print(self.__account_db.Exception)
return ERR_DB
elif retval == ERR_PARAMS:
print('No session %d.' % self.__id)
return ERR_PARAMS
elif retval == ERR_NETWORK:
print('Cannot login.')
return ERR_NETWORK
if id:
print('Username : %s (ID : %d)' % (id_user, self.__id))
self.__username = id_user
else:
print('Login successfully.')
return retval, session
def DoLogin(self, _arguments):
long_opt = ['no-persist', 'password']
self.__no_persist = False
try:
opts, extra = getopt.gnu_getopt(_arguments, 'p:', long_opt)
except getopt.GetoptError as e:
print(e)
return ERR_PARAMS
# Check account specified
if len(extra) > 1:
print('Too many accounts.')
return ERR_PARAMS
elif len(extra) == 0:
print('Must specified an account.')
return ERR_PARAMS
self.__username = extra[0]
# Other options
for opt, val in opts:
if opt == '-p' or opt == '--password':
if len(val) == 0:
print('Password is too short.')
return ERR_PARAMS
self.__password = val
elif opt == '--no-persist':
self.__no_persist = True
else:
print('Unknown option : %s' % opt)
# Check account specified
if not self.__password:
print('Logging in with no password is not supported.')
return ERR_PARAMS
if not self.__load_account_database():
print('Cannot access to account database.')
return ERR_DB
# Login with the specified
retval, id, session = self.__login(self.__username, self.__password)
if id:
print('User %s has logined. (ID : %d)' % (self.__username, id))
return ERR_PARAMS
if retval != ERR_OK:
print('Cannot login.')
return ret
if self.__no_persist:
print('Login successfully. State not saved.')
return ERR_OK
if not self.__account_db.SaveSession(self.__username, session):
print('Failed to save session : ', end = '')
print(self.__account_db.Exception)
return ERR_DB
# Check whether session is saved
id = self.__account_db.QueryIDByUsername(self.__username)
if id == None:
print('Cannot save session : ', end = '')
print(self.__account_db.Exception)
return ERR_DB
print('Login successfully. (ID : %d)' % id)
return ERR_OK
def DoLogout(self, _arguments):
long_opt = ['id']
opts, extra = getopt.gnu_getopt(_arguments, 'i:', long_opt)
self.__id = None
for opt, val in opts:
if opt == '--id' or opt == '-i':
if not val:
print('option \'-i\' : Missing ID.')
return ERR_PARAMS
try:
self.__id = int(val)
except ValueError as s:
print('Illegal ID : %s.' % val)
return ERR_PARAMS
if not self.__id and len(extra) < 1:
print('Accounts missing.')
return ERR_PARAMS
if not self.__load_account_database():
print('Cannot access to account database.')
return ERR_DB
if self.__id:
username, session = self.__account_db.GetSessionFromID(self.__id)
if not username:
print('No account with ID %d' % self.__id)
else:
print('Logout %s' % username)
if not self.__account_db.DropSession(_id = self.__id):
print('Failed : ', end = '')
print(self.__account_db.Exception)
else:
session.Logout()
for username in extra:
id, session = self.__account_db.GetSessionFromUsername(username)
if not id:
print('No account %s' % username)
continue
if not self.__account_db.DropSession(_username = username):
print('Failed at %s : ' % username, end = '')
print(self.__account_db.Exception)
else:
print('%s Logouted.' % username)
session.Logout()
return ERR_OK
def ListElectPlatform(self, _session):
print('Available platform ID:')
for name, interface in zip(_session.ElectCourse.Platform.keys(), _session.ElectCourse.Platform.values()):
print(name)
return ERR_OK
def __print_elect_course_table(self, _list, _counts):
limit = (6, 10, 7, 10, 8, 12, 15, 8, 8, 10, 20)
margin = (2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2)
print('ID Name Credits Count Teachers Type Arrange Exam WeekHour Campus Remark')
for course in _list:
#for course in _filter(plat.Courses)
teacher = ''
arrange = ''
for t in course['teachers']:
teacher += t + '\n'
for t in course['arrange']:
arrange += '%s week %d-%d %s\n' % (WEEK_STR[t['day'] - 1], t['start'], t['end'], t['room'])
this_count = _counts.get(int(course['id']))
line = (
course['id']
, course['name'].strip()
, '%.1f' % course['credits']
, '-/-/-' if not this_count else '%d/%d/%d' % (this_count['current'], this_count['limit'] - this_count['current'], this_count['limit'])
, teacher
, course['type'].strip()
, arrange
, course['exam'].strip()
, course['week_hour']
, course['campus'].strip()
, course['remark'].strip()
)
TablePrint(line, limit, margin)
return ERR_OK
def ListElectable(self, _plat):
return self.__print_elect_course_table(_plat.Courses, _plat.Counts)
def ListElected(self, _plat):
elected = [course for course in [_plat.GetCourseByID(id) for id in _plat.Elected] if course]
return self.__print_elect_course_table(elected, _plat.Counts)
def ListAccounts(self):
accounts = self.__account_db.ListAll()
if accounts == None:
print('Cannot access to database: ', end = '')
return ERR_DB
print('Session ID Username')
for account in accounts:
print('%-14d %-15s' % (account[0], account[1]))
return ERR_OK
def DoList(self, _arguments):
long_opt = ['account', 'semester', 'course', 'elect-platform', 'electable=', 'elected=', 'id=']
opts, extra = getopt.gnu_getopt(_arguments, 'i:p:', long_opt)
self.__list_semester = False
self.__list_account = False
self.__list_elect_platform = False
self.__list_electable = False
self.__list_course = False
self.__password = None
self.__username = None
self.__id = None
op_count = 0
for opt, val in opts:
if opt == '--account':
self.__list_account = True
op_count += 1
elif opt == '--semester':
self.__list_semester = True
op_count += 1
elif opt == '--elect-platform':
self.__list_elect_platform = True
op_count += 1
elif opt == '--electable':
self.__list_electable = True
if not val:
print('Platform ID missing...')
return ERR_PARAMS
else:
self.__platform_id = val
op_count += 1
elif opt == '--elected':
self.__list_elected = True
if not val:
print('Platform ID missing.')
return ERR_PARAMS
else:
self.__platform_id = val
op_count += 1
elif opt == '--id' or opt == '-i':
if not val:
print('option \'--id\' : ID value missing.')
return ERR_PARAMS
else:
try:
self.__id = int(val)
except ValueError as s:
print('Illegal ID : %s.' % val)
return ERR_PARAMS
elif opt == '-p' or opt == '--password':
if not val:
print('option \'%s\' : password missing.' % opt)
else:
self.__password = val
else:
print('Unknown options : %s' % opt)
return ERR_PARAMS
if op_count > 1:
print('Too many query options.')
return ERR_PARAMS
if not self.__load_account_database():
print('Cannot access to account database')
return ERR_DB
if self.__list_account:
return self.ListAccounts()
if len(extra) > 1:
print('Too many accounts.')
return ERR_PARAMS
elif len(extra) > 0:
self.__username = extra[0]
if self.__id and self.__username:
print('confilct : options for session %d and account %s' % (self.__id, self.__username))
return ERR_PARAMS
if not self.__id and not self.__username:
print('Account missing.')
return ERR_PARAMS
if self.__username and not self.__password:
print('password missing. (see \'--password\' or \'-p\')')
return ERR_PARAMS
# load session
retval, session = self.__try_login()
if retval != ERR_OK:
return retvel
if self.__list_semester:
print('Support later.')
else:
if not session.ElectCourse.Opened:
print('Course election is not opened.')
return ERR_OK
if self.__list_elect_platform:
retval = self.ListElectPlatform(session)
else:
plat = session.ElectCourse.Platform.get(self.__platform_id)
if not plat:
print('Platform not found: %s' % self.__platform_id)
print('Current platform: %s' % self.__platform_id)
if self.__list_electable:
retval = self.ListElectable(plat)
elif self.__list_elected:
retval = self.ListElected(plat)
if retval != ERR_OK:
return retval
# if session is from account database, update it.
if not id:
if not self.__account_db.UpdateSessionByUsername(id_user, session):
print('Cannot update session: ', end = '')
print(self.__account_db.Exception)
return ERR_OK
def DoElect(self, _arguments):
long_opt = ['course-id=', 'cancel', 'password=', 'id=', 'platform=', 'force']
opts, extra = getopt.gnu_getopt(_arguments, 'i:p:I:P:f', long_opt)
course_id = None
force = False
self.__password = None
self.__username = None
self.__id = None
self.__cancel = False
self.__platform_id = None
for opt, val in opts:
if opt == '--course-id' or opt == '-I':
if not val:
print('missing Course ID for option \'%s\'' % opt)
return ERR_PARAMS
try:
course_id = int(val)
except ValueError as s:
print('Illegal ID : %s.' % val)
return ERR_PARAMS
elif opt == '--password' or opt == '-p':
if not val:
print('option \'%s\' : password missing.' % opt)
else:
self.__password = val
elif opt == '--cancel':
self.__cancel = True
elif opt == '--id' or opt == '-i':
if not val:
print('option \'--id\' : ID value missing.')
return ERR_PARAMS
else:
try:
self.__id = int(val)
except ValueError as s:
print('Illegal ID : %s.' % val)
return ERR_PARAMS
elif opt == '--platform' or opt == '-P':
if not val:
print('option \'--id\' : Platform missing.')
return ERR_PARAMS
else:
self.__platform_id = val
elif opt == '--force' or opt == '-f':
force = True
else:
print('Unknown options : %s' % opt)
if not self.__load_account_database():
print('Cannot access to account database')
return ERR_DB
if len(extra) > 1:
print('Too many accounts.')
return ERR_PARAMS
elif len(extra) > 0:
self.__username = extra[0]
if self.__id and self.__username:
print('confilct : options for session %d and account %s' % (self.__id, self.__username))
return ERR_PARAMS
if not self.__id and not self.__username:
print('Account missing.')
return ERR_PARAMS
if self.__username and not self.__password:
print('password missing. (see \'--password\' or \'-p\')')
return ERR_PARAMS
if not self.__platform_id:
print('Platform missing. (see \'--platform\' or \'-P\')')
return ERR_PARAMS
retval, session = self.__try_login()
if retval != ERR_OK:
return retvel
plat = session.ElectCourse.Platform.get(self.__platform_id)
if not plat:
print('Platform not found : %s' % self.__platform_id)
course = plat.GetCourseByID(course_id)
if not course:
print('Cannot found the course with ID %d.' % course_id)
if not force:
return ERR_PARAMS
if self.__cancel:
if not course:
print('Cancel %d by force.' % course_id)
else:
print('Cancel %s (%d).' % (course['name'], course_id))
result, message = plat.Elect(course_id, CANCEL)
else:
if not course:
print('Elect %d by force.' % course_id)
else:
print('Elect %s (%d).' % (course['name'], course_id))
result, message = plat.Elect(course_id, ELECT)
if result:
print('Succeed.')
else:
print('Failed : %s' % message)
if not self.__account_db.UpdateSessionByUsername(self.__username, session):
print('Cannot update state : ', end = '')
print(self.__account_db.Exception)
return ERR_OK
def PrintHelp(self, _arguments):
print(HELP_TEXT % _arguments[0])
pass
def Run(self, _arguments):
len_arg = len(_arguments)
if len_arg < 2:
self.PrintHelp(_arguments)
return ERR_OK
if _arguments[1].lower() == 'login':
return self.DoLogin(_arguments[2:])
elif _arguments[1].lower() == 'logout':
return self.DoLogout(_arguments[2:])
elif _arguments[1].lower() == 'query':
return self.DoList(_arguments[2:])
elif _arguments[1].lower() == 'elect':
return self.DoElect(_arguments[2:])
else:
self.PrintHelp(_arguments)
print('Unknown command : %s' % _arguments[1])
return 1
| 35.549068 | 151 | 0.525317 |
f8eb5adbeecb3347527d932f785083d5d6ee80f6 | 3,331 | py | Python | heron/instance/src/python/utils/misc/communicator.py | Munyola/incubator-heron | 4aa106c6eaef9c60ed2d692e41998adda8115e6f | [
"Apache-2.0"
] | 2 | 2016-07-04T07:10:31.000Z | 2018-03-28T16:59:02.000Z | heron/instance/src/python/utils/misc/communicator.py | Munyola/incubator-heron | 4aa106c6eaef9c60ed2d692e41998adda8115e6f | [
"Apache-2.0"
] | 1 | 2019-05-08T22:30:16.000Z | 2019-05-08T22:30:16.000Z | heron/instance/src/python/utils/misc/communicator.py | Munyola/incubator-heron | 4aa106c6eaef9c60ed2d692e41998adda8115e6f | [
"Apache-2.0"
] | 1 | 2017-06-05T17:55:45.000Z | 2017-06-05T17:55:45.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''communicator.py: module responsible for communication between Python heron modules'''
import sys
import Queue
from heron.common.src.python.utils.log import Log
class HeronCommunicator(object):
"""HeronCommunicator: a wrapper class for non-blocking queue in Heron.
Note that this class does not yet implement the dynamic tuning of expected available capacity,
as it is not necessary for single thread instance.
"""
def __init__(self, producer_cb=None, consumer_cb=None):
"""Initialize HeronCommunicator
:param producer_cb: Callback function to be called (usually on producer thread)
when ``poll()`` is called by the consumer. Default ``None``
:param consumer_cb: Callback function to be called (usually on consumer thread)
when ``offer()`` is called by the producer. Default ``None``
"""
self._producer_callback = producer_cb
self._consumer_callback = consumer_cb
self._buffer = Queue.Queue()
self.capacity = sys.maxsize
def register_capacity(self, capacity):
"""Registers the capacity of this communicator
By default, the capacity of HeronCommunicator is set to be ``sys.maxsize``
"""
self.capacity = capacity
def get_available_capacity(self):
return max(self.capacity - self.get_size(), 0)
def get_size(self):
"""Returns the size of the buffer"""
return self._buffer.qsize()
def is_empty(self):
"""Returns whether the buffer is empty"""
return self._buffer.empty()
def poll(self):
"""Poll from the buffer
It is a non-blocking operation, and when the buffer is empty, it raises Queue.Empty exception
"""
try:
# non-blocking
ret = self._buffer.get(block=False)
if self._producer_callback is not None:
self._producer_callback()
return ret
except Queue.Empty:
Log.debug("%s: Empty in poll()" % str(self))
raise Queue.Empty
def offer(self, item):
"""Offer to the buffer
It is a non-blocking operation, and when the buffer is full, it raises Queue.Full exception
"""
try:
# non-blocking
self._buffer.put(item, block=False)
if self._consumer_callback is not None:
self._consumer_callback()
return True
except Queue.Full:
Log.debug("%s: Full in offer()" % str(self))
raise Queue.Full
def clear(self):
"""Clear the buffer"""
while not self.is_empty():
self.poll()
def __str__(self):
return "HeronCommunicator"
| 32.980198 | 97 | 0.698889 |
5b04bbd2bf9e835c93fb1c113cd2b0eeb617a49d | 219 | py | Python | 04/abc176_b.py | koshin117/python-learning | 68dd99e2f72fff7507a874c11511415fef3c9354 | [
"MIT"
] | 1 | 2021-03-29T08:30:19.000Z | 2021-03-29T08:30:19.000Z | 04/abc176_b.py | koshin117/python-learning | 68dd99e2f72fff7507a874c11511415fef3c9354 | [
"MIT"
] | null | null | null | 04/abc176_b.py | koshin117/python-learning | 68dd99e2f72fff7507a874c11511415fef3c9354 | [
"MIT"
] | null | null | null | def main():
# input
N = input()
# compute
As = list(map(int, N))
x = sum(As)
# output
if x % 9 == 0:
print("Yes")
else:
print("No")
if __name__ == '__main__':
main()
| 14.6 | 26 | 0.438356 |
1deb7829b3603678194bda9709250905a193dd78 | 6,401 | bzl | Python | mediapipe/framework/port/build_config.bzl | hangqiu/MLEXray | 11762df1b1113e86f44d81f201d54fb6ac3a2242 | [
"MIT"
] | 2 | 2022-02-11T23:49:58.000Z | 2022-03-15T20:24:46.000Z | mediapipe/framework/port/build_config.bzl | hangqiu/ML-EXray | 23d0ff7cd39854bbd8fcc8e050bb3c1787edb056 | [
"MIT"
] | null | null | null | mediapipe/framework/port/build_config.bzl | hangqiu/ML-EXray | 23d0ff7cd39854bbd8fcc8e050bb3c1787edb056 | [
"MIT"
] | null | null | null | # TODO: Split build rules into several individual files to make them
# more manageable.
""".bzl file for mediapipe open source build configs."""
load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library", "py_proto_library")
load("@rules_proto//proto:defs.bzl", "proto_library")
def provided_args(**kwargs):
"""Returns the keyword arguments omitting None arguments."""
return {k: v for k, v in kwargs.items() if v != None}
def replace_suffix(string, old, new):
"""Returns a string with an old suffix replaced by a new suffix."""
return string.endswith(old) and string[:-len(old)] + new or string
def replace_deps(deps, old, new, drop_google_protobuf = True):
"""Returns deps with an old suffix replaced by a new suffix.
Args:
deps: the specified dep targets.
old: the suffix to remove.
new: the suffix to insert.
drop_google_protobuf: if true, omit google/protobuf deps.
Returns:
the modified dep targets.
"""
if drop_google_protobuf:
deps = [dep for dep in deps if not dep.startswith("@com_google_protobuf//")]
deps = [replace_suffix(dep, "any_proto", "cc_wkt_protos") for dep in deps]
deps = [replace_suffix(dep, old, new) for dep in deps]
return deps
# TODO: load this macro from a common helper file.
def mediapipe_proto_library(
name,
srcs,
deps = [],
exports = None,
visibility = None,
testonly = None,
compatible_with = None,
def_proto = True,
def_cc_proto = True,
def_py_proto = True,
def_java_lite_proto = True,
def_portable_proto = True,
def_objc_proto = True,
def_java_proto = True,
def_jspb_proto = True,
portable_deps = None):
"""Defines the proto_library targets needed for all mediapipe platforms.
Args:
name: the new proto_library target name.
srcs: the ".proto" source files to compile.
deps: the proto_library targets for all referenced protobufs.
exports: deps that are published with "import public".
portable_deps: the portable_proto_library targets for all referenced protobufs.
visibility: visibility of this target.
testonly: true means the proto can be used for testing only.
compatible_with: a list of environments the rule is compatible with.
def_proto: define the proto_library target
def_cc_proto: define the cc_proto_library target
def_py_proto: define the py_proto_library target
def_java_lite_proto: define the java_lite_proto_library target
def_portable_proto: define the portable_proto_library target
def_objc_proto: define the objc_proto_library target
def_java_proto: define the java_proto_library target
def_jspb_proto: define the jspb_proto_library target
"""
_ignore = [def_portable_proto, def_objc_proto, def_java_proto, def_jspb_proto, portable_deps]
# The proto_library targets for the compiled ".proto" source files.
proto_deps = [":" + name]
if def_proto:
# native.proto_library(**provided_args(
proto_library(**provided_args(
name = name,
srcs = srcs,
deps = deps,
exports = exports,
visibility = visibility,
testonly = testonly,
compatible_with = compatible_with,
))
if def_cc_proto:
cc_deps = replace_deps(deps, "_proto", "_cc_proto", False)
mediapipe_cc_proto_library(**provided_args(
name = replace_suffix(name, "_proto", "_cc_proto"),
srcs = srcs,
deps = proto_deps,
cc_deps = cc_deps,
visibility = visibility,
testonly = testonly,
))
if def_py_proto:
py_deps = replace_deps(deps, "_proto", "_py_pb2")
mediapipe_py_proto_library(**provided_args(
name = replace_suffix(name, "_proto", "_py_pb2"),
srcs = srcs,
proto_deps = proto_deps,
py_proto_deps = py_deps,
api_version = 2,
visibility = visibility,
testonly = testonly,
))
if def_java_lite_proto:
native.java_lite_proto_library(**provided_args(
name = replace_suffix(name, "_proto", "_java_proto_lite"),
deps = proto_deps,
visibility = visibility,
testonly = testonly,
compatible_with = compatible_with,
))
def mediapipe_py_proto_library(
name,
srcs,
visibility = None,
py_proto_deps = [],
proto_deps = None,
api_version = None,
testonly = 0):
"""Generate py_proto_library for mediapipe open source version.
Args:
name: the name of the py_proto_library.
api_version: api version for bazel use only.
srcs: the .proto files of the py_proto_library for Bazel use.
visibility: visibility of this target.
py_proto_deps: a list of dependency labels for Bazel use; must be py_proto_library.
proto_deps: a list of dependency labels for bazel use.
testonly: test only proto or not.
"""
_ignore = [api_version, proto_deps]
py_proto_library(**provided_args(
name = name,
srcs = srcs,
visibility = visibility,
default_runtime = "@com_google_protobuf//:protobuf_python",
protoc = "@com_google_protobuf//:protoc",
deps = py_proto_deps + ["@com_google_protobuf//:protobuf_python"],
testonly = testonly,
))
def mediapipe_cc_proto_library(name, srcs, visibility = None, deps = [], cc_deps = [], testonly = 0):
"""Generate cc_proto_library for mediapipe open source version.
Args:
name: the name of the cc_proto_library.
srcs: the .proto files of the cc_proto_library for Bazel use.
visibility: visibility of this target.
deps: a list of dependency labels for Bazel use; must be cc_proto_library.
testonly: test only proto or not.
"""
_ignore = [deps]
cc_proto_library(**provided_args(
name = name,
srcs = srcs,
visibility = visibility,
deps = cc_deps,
testonly = testonly,
cc_libs = ["@com_google_protobuf//:protobuf"],
protoc = "@com_google_protobuf//:protoc",
default_runtime = "@com_google_protobuf//:protobuf",
alwayslink = 1,
))
| 37 | 101 | 0.645993 |
a6a12f4a1bc8a7dbb5f12ad75b074abbde80b3f9 | 4,074 | py | Python | scrapy/tests/test_squeue.py | emschorsch/scrapy | acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f | [
"BSD-3-Clause"
] | 1 | 2016-01-01T14:58:12.000Z | 2016-01-01T14:58:12.000Z | scrapy/tests/test_squeue.py | emschorsch/scrapy | acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f | [
"BSD-3-Clause"
] | null | null | null | scrapy/tests/test_squeue.py | emschorsch/scrapy | acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f | [
"BSD-3-Clause"
] | null | null | null | from scrapy.tests import test_utils_queue as t
from scrapy.squeue import MarshalFifoDiskQueue, MarshalLifoDiskQueue, PickleFifoDiskQueue, PickleLifoDiskQueue
from scrapy.item import Item, Field
from scrapy.http import Request
from scrapy.contrib.loader import ItemLoader
class TestItem(Item):
name = Field()
def test_processor(x):
return x + x
class TestLoader(ItemLoader):
default_item_class = TestItem
name_out = staticmethod(test_processor)
class MarshalFifoDiskQueueTest(t.FifoDiskQueueTest):
chunksize = 100000
def queue(self):
return MarshalFifoDiskQueue(self.qdir, chunksize=self.chunksize)
def test_serialize(self):
q = self.queue()
q.push('a')
q.push(123)
q.push({'a': 'dict'})
self.assertEqual(q.pop(), 'a')
self.assertEqual(q.pop(), 123)
self.assertEqual(q.pop(), {'a': 'dict'})
def test_nonserializable_object(self):
q = self.queue()
self.assertRaises(ValueError, q.push, lambda x: x)
class ChunkSize1MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 1
class ChunkSize2MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 2
class ChunkSize3MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 3
class ChunkSize4MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 4
class PickleFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 100000
def queue(self):
return PickleFifoDiskQueue(self.qdir, chunksize=self.chunksize)
def test_serialize_item(self):
q = self.queue()
i = TestItem(name='foo')
q.push(i)
i2 = q.pop()
assert isinstance(i2, TestItem)
self.assertEqual(i, i2)
def test_serialize_loader(self):
q = self.queue()
l = TestLoader()
q.push(l)
l2 = q.pop()
assert isinstance(l2, TestLoader)
assert l2.default_item_class is TestItem
self.assertEqual(l2.name_out('x'), 'xx')
def test_serialize_request_recursive(self):
q = self.queue()
r = Request('http://www.example.com')
r.meta['request'] = r
q.push(r)
r2 = q.pop()
assert isinstance(r2, Request)
self.assertEqual(r.url, r2.url)
assert r2.meta['request'] is r2
class ChunkSize1PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 1
class ChunkSize2PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 2
class ChunkSize3PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 3
class ChunkSize4PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 4
class MarshalLifoDiskQueueTest(t.LifoDiskQueueTest):
def queue(self):
return MarshalLifoDiskQueue(self.path)
def test_serialize(self):
q = self.queue()
q.push('a')
q.push(123)
q.push({'a': 'dict'})
self.assertEqual(q.pop(), {'a': 'dict'})
self.assertEqual(q.pop(), 123)
self.assertEqual(q.pop(), 'a')
def test_nonserializable_object(self):
q = self.queue()
self.assertRaises(ValueError, q.push, lambda x: x)
class PickleLifoDiskQueueTest(MarshalLifoDiskQueueTest):
def queue(self):
return PickleLifoDiskQueue(self.path)
def test_serialize_item(self):
q = self.queue()
i = TestItem(name='foo')
q.push(i)
i2 = q.pop()
assert isinstance(i2, TestItem)
self.assertEqual(i, i2)
def test_serialize_loader(self):
q = self.queue()
l = TestLoader()
q.push(l)
l2 = q.pop()
assert isinstance(l2, TestLoader)
assert l2.default_item_class is TestItem
self.assertEqual(l2.name_out('x'), 'xx')
def test_serialize_request_recursive(self):
q = self.queue()
r = Request('http://www.example.com')
r.meta['request'] = r
q.push(r)
r2 = q.pop()
assert isinstance(r2, Request)
self.assertEqual(r.url, r2.url)
assert r2.meta['request'] is r2
| 27.714286 | 110 | 0.656357 |
e27b8950074e95e5b0b9e477cd1eaaa72d88d067 | 2,235 | py | Python | talos/parameters/DistributeParamSpace.py | abhijithneilabraham/talos | 4f60dbbbedede240a086a7a6cd1e7a2b17db87dd | [
"MIT"
] | null | null | null | talos/parameters/DistributeParamSpace.py | abhijithneilabraham/talos | 4f60dbbbedede240a086a7a6cd1e7a2b17db87dd | [
"MIT"
] | null | null | null | talos/parameters/DistributeParamSpace.py | abhijithneilabraham/talos | 4f60dbbbedede240a086a7a6cd1e7a2b17db87dd | [
"MIT"
] | null | null | null | class DistributeParamSpace:
def __init__(self,
params,
param_keys,
random_method='uniform_mersenne',
fraction_limit=None,
round_limit=None,
time_limit=None,
boolean_limit=None,
machines=2):
'''Splits ParamSpace object based on number
of machines.
params | object | ParamSpace class object
machines | int | number of machines to split for
NOTE: `Scan()` limits will not be applied if ParamSpace object
is passed directly into `Scan()` as `params` argument so they
should be passed directly into `DistributeParamSpace` instead.
'''
from talos.parameters.ParamSpace import ParamSpace
self._params = ParamSpace(params=params,
param_keys=param_keys,
random_method='uniform_mersenne',
fraction_limit=None,
round_limit=None,
time_limit=None,
boolean_limit=None)
self.machines = machines
self.param_spaces = self._split_param_space()
def _split_param_space(self):
'''Takes in a ParamSpace object and splits it so that
it can be used in DistributeScan experiments.'''
import numpy as np
import copy
out = {}
# randomly shuffle the param_space
rand = np.random.default_rng()
rand.shuffle(self._params.param_space, axis=0)
# split into n arras
param_spaces = np.array_split(self._params.param_space, self.machines)
# remove keys to allow copy
param_keys = self._params.param_keys
self._params.param_keys = []
# create the individual ParamSpace objects
for i in range(self.machines):
out[i] = copy.deepcopy(self._params)
out[i].param_space = param_spaces[i]
out[i].dimensions = len(out[i].param_space)
out[i].param_index = list(range(out[i].dimensions))
out[i].param_keys = param_keys
return out
| 31.928571 | 78 | 0.561521 |
a5677cc3e26a0afe7994228cdcf04d4fdd78a039 | 1,503 | py | Python | packages/python/plotly/plotly/validators/parcats/line/__init__.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/parcats/line/__init__.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/parcats/line/__init__.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._showscale import ShowscaleValidator
from ._shape import ShapeValidator
from ._reversescale import ReversescaleValidator
from ._hovertemplate import HovertemplateValidator
from ._colorsrc import ColorsrcValidator
from ._colorscale import ColorscaleValidator
from ._colorbar import ColorbarValidator
from ._coloraxis import ColoraxisValidator
from ._color import ColorValidator
from ._cmin import CminValidator
from ._cmid import CmidValidator
from ._cmax import CmaxValidator
from ._cauto import CautoValidator
from ._autocolorscale import AutocolorscaleValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._showscale.ShowscaleValidator",
"._shape.ShapeValidator",
"._reversescale.ReversescaleValidator",
"._hovertemplate.HovertemplateValidator",
"._colorsrc.ColorsrcValidator",
"._colorscale.ColorscaleValidator",
"._colorbar.ColorbarValidator",
"._coloraxis.ColoraxisValidator",
"._color.ColorValidator",
"._cmin.CminValidator",
"._cmid.CmidValidator",
"._cmax.CmaxValidator",
"._cauto.CautoValidator",
"._autocolorscale.AutocolorscaleValidator",
],
)
| 35.785714 | 56 | 0.685961 |
f6bb122b26bab596fe3bdfc57d9dfdc92dcf59a0 | 1,611 | py | Python | dset_loaders/domainnet.py | Luodian/Learning-Invariant-Representations-and-Risks | f3058fe50e86660ca0c17ba0df41ece9af64c557 | [
"MIT"
] | 17 | 2021-04-22T03:24:38.000Z | 2022-03-30T03:12:09.000Z | dset_loaders/domainnet.py | Luodian/Learning-Invariant-Representations-and-Risks | f3058fe50e86660ca0c17ba0df41ece9af64c557 | [
"MIT"
] | 5 | 2021-12-10T10:12:26.000Z | 2022-03-31T00:01:58.000Z | dset_loaders/domainnet.py | Luodian/Learning-Invariant-Representations-and-Risks | f3058fe50e86660ca0c17ba0df41ece9af64c557 | [
"MIT"
] | 3 | 2021-05-19T06:12:14.000Z | 2021-12-17T09:27:49.000Z | import os.path
import torch.utils.data as data
import torchvision
from PIL import Image
import numpy as np
import random
from utils.utils_module import TwoCropsTransform
# what about re-calculate these values for each dataset?
class DomainnetParams(object):
num_channels = 3
image_size = 256 # 384, 216
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
num_cls = 345
target_transform = None
class DOMAINNET(data.Dataset):
def __init__(self, root, num_cls, transform, data=None):
self.root = root
self.transform = transform
self.classes = []
for i in range(num_cls):
self.classes.append(i)
self.num_cls = num_cls
self.images, self.labels = data['ids'], data['labels']
self.transform = TwoCropsTransform(self.transform)
def __getitem__(self, index, debug=False):
image = self.images[index % len(self.images)]
label = int(self.labels[index % len(self.images)])
rand_index = random.randint(0, len(self.images) - 1)
image1 = self.images[rand_index % len(self.images)]
label1 = int(self.labels[rand_index % len(self.images)])
img = Image.open(image).convert('RGB')
img1 = Image.open(image1).convert('RGB')
img_q, img_k = self.transform(img)
img1_q, img1_k = self.transform(img1)
return {
'sample_1_q':(img_q, label),
'sample_1_k':(img_k, label),
'sample_2_q':(img1_q, label1),
'sample_2_k':(img1_k, label1),
}
def __len__(self):
return len(self.images)
| 32.877551 | 64 | 0.62694 |
ace66bb5e5137d33b80ab90b3e952c496403ab32 | 42,563 | py | Python | src/azure-cli/azure/cli/command_modules/rdbms/custom.py | ZengTaoxu/azure-cli | 6be96de450da5ac9f07aafb22dd69880bea04792 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/rdbms/custom.py | ZengTaoxu/azure-cli | 6be96de450da5ac9f07aafb22dd69880bea04792 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/rdbms/custom.py | ZengTaoxu/azure-cli | 6be96de450da5ac9f07aafb22dd69880bea04792 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=unused-argument, line-too-long
from datetime import datetime, timedelta
from importlib import import_module
import re
from dateutil.tz import tzutc # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import resource_id, is_valid_resource_id, parse_resource_id # pylint: disable=import-error
from knack.log import get_logger
from knack.util import todict
from urllib.request import urlretrieve
from azure.core.exceptions import ResourceNotFoundError
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import CLIError, sdk_no_wait
from azure.cli.core.local_context import ALL
from azure.mgmt.rdbms import postgresql, mysql, mariadb
from azure.mgmt.rdbms.mysql.operations._servers_operations import ServersOperations as MySqlServersOperations
from azure.mgmt.rdbms.postgresql.operations._location_based_performance_tier_operations import LocationBasedPerformanceTierOperations as PostgreSQLLocationOperations
from azure.mgmt.rdbms.mariadb.operations._servers_operations import ServersOperations as MariaDBServersOperations
from azure.mgmt.rdbms.mariadb.operations._location_based_performance_tier_operations import LocationBasedPerformanceTierOperations as MariaDBLocationOperations
from ._client_factory import get_mariadb_management_client, get_mysql_management_client, cf_mysql_db, cf_mariadb_db, \
get_postgresql_management_client, cf_postgres_check_resource_availability_sterling, \
cf_mysql_check_resource_availability_sterling, cf_mariadb_check_resource_availability_sterling
from ._flexible_server_util import generate_missing_parameters, generate_password, resolve_poller
from ._util import parse_public_network_access_input, create_firewall_rule
logger = get_logger(__name__)
SKU_TIER_MAP = {'Basic': 'b', 'GeneralPurpose': 'gp', 'MemoryOptimized': 'mo'}
DEFAULT_DB_NAME = 'defaultdb'
# pylint: disable=too-many-locals, too-many-statements, raise-missing-from
def _server_create(cmd, client, resource_group_name=None, server_name=None, sku_name=None, no_wait=False,
location=None, administrator_login=None, administrator_login_password=None, backup_retention=None,
geo_redundant_backup=None, ssl_enforcement=None, storage_mb=None, tags=None, version=None, auto_grow='Enabled',
assign_identity=False, public_network_access=None, infrastructure_encryption=None, minimal_tls_version=None):
provider = 'Microsoft.DBforPostgreSQL'
if isinstance(client, MySqlServersOperations):
provider = 'Microsoft.DBforMySQL'
elif isinstance(client, MariaDBServersOperations):
provider = 'Microsoft.DBforMariaDB'
server_result = firewall_id = None
administrator_login_password = generate_password(administrator_login_password)
engine_name = 'postgres'
pricing_link = 'https://aka.ms/postgres-pricing'
start_ip = end_ip = ''
if public_network_access is not None and str(public_network_access).lower() != 'enabled' and str(public_network_access).lower() != 'disabled':
if str(public_network_access).lower() == 'all':
start_ip, end_ip = '0.0.0.0', '255.255.255.255'
else:
start_ip, end_ip = parse_public_network_access_input(public_network_access)
# if anything but 'disabled' is passed on to the args,
# then the public network access value passed on to the API is Enabled.
public_network_access = 'Enabled'
# Check availability for server name if it is supplied by the user
if provider == 'Microsoft.DBforPostgreSQL':
# Populate desired parameters
location, resource_group_name, server_name = generate_missing_parameters(cmd, location, resource_group_name,
server_name, engine_name)
check_name_client = cf_postgres_check_resource_availability_sterling(cmd.cli_ctx, None)
name_availability_resquest = postgresql.models.NameAvailabilityRequest(name=server_name, type="Microsoft.DBforPostgreSQL/servers")
check_server_name_availability(check_name_client, name_availability_resquest)
logger.warning('Creating %s Server \'%s\' in group \'%s\'...', engine_name, server_name, resource_group_name)
logger.warning('Your server \'%s\' is using sku \'%s\' (Paid Tier). '
'Please refer to %s for pricing details', server_name, sku_name, pricing_link)
parameters = postgresql.models.ServerForCreate(
sku=postgresql.models.Sku(name=sku_name),
properties=postgresql.models.ServerPropertiesForDefaultCreate(
administrator_login=administrator_login,
administrator_login_password=administrator_login_password,
version=version,
ssl_enforcement=ssl_enforcement,
minimal_tls_version=minimal_tls_version,
public_network_access=public_network_access,
infrastructure_encryption=infrastructure_encryption,
storage_profile=postgresql.models.StorageProfile(
backup_retention_days=backup_retention,
geo_redundant_backup=geo_redundant_backup,
storage_mb=storage_mb,
storage_autogrow=auto_grow)),
location=location,
tags=tags)
if assign_identity:
parameters.identity = postgresql.models.ResourceIdentity(
type=postgresql.models.IdentityType.system_assigned.value)
elif provider == 'Microsoft.DBforMySQL':
engine_name = 'mysql'
pricing_link = 'https://aka.ms/mysql-pricing'
location, resource_group_name, server_name = generate_missing_parameters(cmd, location, resource_group_name,
server_name, engine_name)
check_name_client = cf_mysql_check_resource_availability_sterling(cmd.cli_ctx, None)
name_availability_resquest = mysql.models.NameAvailabilityRequest(name=server_name, type="Microsoft.DBforMySQL/servers")
check_server_name_availability(check_name_client, name_availability_resquest)
logger.warning('Creating %s Server \'%s\' in group \'%s\'...', engine_name, server_name, resource_group_name)
logger.warning('Your server \'%s\' is using sku \'%s\' (Paid Tier). '
'Please refer to %s for pricing details', server_name, sku_name, pricing_link)
parameters = mysql.models.ServerForCreate(
sku=mysql.models.Sku(name=sku_name),
properties=mysql.models.ServerPropertiesForDefaultCreate(
administrator_login=administrator_login,
administrator_login_password=administrator_login_password,
version=version,
ssl_enforcement=ssl_enforcement,
minimal_tls_version=minimal_tls_version,
public_network_access=public_network_access,
infrastructure_encryption=infrastructure_encryption,
storage_profile=mysql.models.StorageProfile(
backup_retention_days=backup_retention,
geo_redundant_backup=geo_redundant_backup,
storage_mb=storage_mb,
storage_autogrow=auto_grow)),
location=location,
tags=tags)
if assign_identity:
parameters.identity = mysql.models.ResourceIdentity(type=mysql.models.IdentityType.system_assigned.value)
elif provider == 'Microsoft.DBforMariaDB':
engine_name = 'mariadb'
pricing_link = 'https://aka.ms/mariadb-pricing'
location, resource_group_name, server_name = generate_missing_parameters(cmd, location, resource_group_name,
server_name, engine_name)
check_name_client = cf_mariadb_check_resource_availability_sterling(cmd.cli_ctx, None)
name_availability_resquest = mariadb.models.NameAvailabilityRequest(name=server_name, type="Microsoft.DBforMariaDB")
check_server_name_availability(check_name_client, name_availability_resquest)
logger.warning('Creating %s Server \'%s\' in group \'%s\'...', engine_name, server_name, resource_group_name)
logger.warning('Your server \'%s\' is using sku \'%s\' (Paid Tier). '
'Please refer to %s for pricing details', server_name, sku_name, pricing_link)
parameters = mariadb.models.ServerForCreate(
sku=mariadb.models.Sku(name=sku_name),
properties=mariadb.models.ServerPropertiesForDefaultCreate(
administrator_login=administrator_login,
administrator_login_password=administrator_login_password,
version=version,
ssl_enforcement=ssl_enforcement,
minimal_tls_version=minimal_tls_version,
public_network_access=public_network_access,
storage_profile=mariadb.models.StorageProfile(
backup_retention_days=backup_retention,
geo_redundant_backup=geo_redundant_backup,
storage_mb=storage_mb,
storage_autogrow=auto_grow)),
location=location,
tags=tags)
server_result = resolve_poller(
client.begin_create(resource_group_name, server_name, parameters), cmd.cli_ctx,
'{} Server Create'.format(engine_name))
user = server_result.administrator_login
version = server_result.version
host = server_result.fully_qualified_domain_name
# Adding firewall rule
if public_network_access is not None and start_ip != '':
firewall_id = create_firewall_rule(cmd, resource_group_name, server_name, start_ip, end_ip, engine_name)
logger.warning('Make a note of your password. If you forget, you would have to '
'reset your password with \'az %s server update -n %s -g %s -p <new-password>\'.',
engine_name, server_name, resource_group_name)
update_local_contexts(cmd, provider, server_name, resource_group_name, location, user)
if engine_name == 'postgres':
return form_response(server_result, administrator_login_password if administrator_login_password is not None else '*****',
host=host,
connection_string=create_postgresql_connection_string(server_name, host, user, administrator_login_password),
database_name=None, firewall_id=firewall_id)
# Serves both - MySQL and MariaDB
# Create mysql database if it does not exist
database_name = DEFAULT_DB_NAME
create_database(cmd, resource_group_name, server_name, database_name, engine_name)
return form_response(server_result, administrator_login_password if administrator_login_password is not None else '*****',
host=host,
connection_string=create_mysql_connection_string(server_name, host, database_name, user, administrator_login_password),
database_name=database_name, firewall_id=firewall_id)
# Need to replace source server name with source server id, so customer server restore function
# The parameter list should be the same as that in factory to use the ParametersContext
# arguments and validators
def _server_restore(cmd, client, resource_group_name, server_name, source_server, restore_point_in_time, no_wait=False):
provider = 'Microsoft.DBforPostgreSQL'
if isinstance(client, MySqlServersOperations):
provider = 'Microsoft.DBforMySQL'
elif isinstance(client, MariaDBServersOperations):
provider = 'Microsoft.DBforMariaDB'
parameters = None
if not is_valid_resource_id(source_server):
if len(source_server.split('/')) == 1:
source_server = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace=provider,
type='servers',
name=source_server)
else:
raise ValueError('The provided source-server {} is invalid.'.format(source_server))
if provider == 'Microsoft.DBforMySQL':
parameters = mysql.models.ServerForCreate(
properties=mysql.models.ServerPropertiesForRestore(
source_server_id=source_server,
restore_point_in_time=restore_point_in_time),
location=None)
elif provider == 'Microsoft.DBforPostgreSQL':
parameters = postgresql.models.ServerForCreate(
properties=postgresql.models.ServerPropertiesForRestore(
source_server_id=source_server,
restore_point_in_time=restore_point_in_time),
location=None)
elif provider == 'Microsoft.DBforMariaDB':
parameters = mariadb.models.ServerForCreate(
properties=mariadb.models.ServerPropertiesForRestore(
source_server_id=source_server,
restore_point_in_time=restore_point_in_time),
location=None)
parameters.properties.source_server_id = source_server
parameters.properties.restore_point_in_time = restore_point_in_time
# Here is a workaround that we don't support cross-region restore currently,
# so the location must be set as the same as source server (not the resource group)
id_parts = parse_resource_id(source_server)
try:
source_server_object = client.get(id_parts['resource_group'], id_parts['name'])
parameters.location = source_server_object.location
except Exception as e:
raise ValueError('Unable to get source server: {}.'.format(str(e)))
return sdk_no_wait(no_wait, client.begin_create, resource_group_name, server_name, parameters)
# need to replace source server name with source server id, so customer server georestore function
# The parameter list should be the same as that in factory to use the ParametersContext
# auguments and validators
def _server_georestore(cmd, client, resource_group_name, server_name, sku_name, location, source_server,
backup_retention=None, geo_redundant_backup=None, no_wait=False, **kwargs):
provider = 'Microsoft.DBforPostgreSQL'
if isinstance(client, MySqlServersOperations):
provider = 'Microsoft.DBforMySQL'
elif isinstance(client, MariaDBServersOperations):
provider = 'Microsoft.DBforMariaDB'
parameters = None
if not is_valid_resource_id(source_server):
if len(source_server.split('/')) == 1:
source_server = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace=provider,
type='servers',
name=source_server)
else:
raise ValueError('The provided source-server {} is invalid.'.format(source_server))
if provider == 'Microsoft.DBforMySQL':
parameters = mysql.models.ServerForCreate(
sku=mysql.models.Sku(name=sku_name),
properties=mysql.models.ServerPropertiesForGeoRestore(
storage_profile=mysql.models.StorageProfile(
backup_retention_days=backup_retention,
geo_redundant_backup=geo_redundant_backup),
source_server_id=source_server),
location=location)
elif provider == 'Microsoft.DBforPostgreSQL':
parameters = postgresql.models.ServerForCreate(
sku=postgresql.models.Sku(name=sku_name),
properties=postgresql.models.ServerPropertiesForGeoRestore(
storage_profile=postgresql.models.StorageProfile(
backup_retention_days=backup_retention,
geo_redundant_backup=geo_redundant_backup),
source_server_id=source_server),
location=location)
elif provider == 'Microsoft.DBforMariaDB':
parameters = mariadb.models.ServerForCreate(
sku=mariadb.models.Sku(name=sku_name),
properties=mariadb.models.ServerPropertiesForGeoRestore(
storage_profile=mariadb.models.StorageProfile(
backup_retention_days=backup_retention,
geo_redundant_backup=geo_redundant_backup),
source_server_id=source_server),
location=location)
parameters.properties.source_server_id = source_server
source_server_id_parts = parse_resource_id(source_server)
try:
source_server_object = client.get(source_server_id_parts['resource_group'], source_server_id_parts['name'])
if parameters.sku.name is None:
parameters.sku.name = source_server_object.sku.name
except Exception as e:
raise ValueError('Unable to get source server: {}.'.format(str(e)))
return sdk_no_wait(no_wait, client.begin_create, resource_group_name, server_name, parameters)
# Custom functions for server replica, will add PostgreSQL part after backend ready in future
def _replica_create(cmd, client, resource_group_name, server_name, source_server, no_wait=False, location=None, sku_name=None, **kwargs):
provider = 'Microsoft.DBforPostgreSQL'
if isinstance(client, MySqlServersOperations):
provider = 'Microsoft.DBforMySQL'
elif isinstance(client, MariaDBServersOperations):
provider = 'Microsoft.DBforMariaDB'
# set source server id
if not is_valid_resource_id(source_server):
if len(source_server.split('/')) == 1:
source_server = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace=provider,
type='servers',
name=source_server)
else:
raise CLIError('The provided source-server {} is invalid.'.format(source_server))
source_server_id_parts = parse_resource_id(source_server)
try:
source_server_object = client.get(source_server_id_parts['resource_group'], source_server_id_parts['name'])
except CloudError as e:
raise CLIError('Unable to get source server: {}.'.format(str(e)))
if location is None:
location = source_server_object.location
if sku_name is None:
sku_name = source_server_object.sku.name
parameters = None
if provider == 'Microsoft.DBforMySQL':
parameters = mysql.models.ServerForCreate(
sku=mysql.models.Sku(name=sku_name),
properties=mysql.models.ServerPropertiesForReplica(source_server_id=source_server),
location=location)
elif provider == 'Microsoft.DBforPostgreSQL':
parameters = postgresql.models.ServerForCreate(
sku=postgresql.models.Sku(name=sku_name),
properties=postgresql.models.ServerPropertiesForReplica(source_server_id=source_server),
location=location)
elif provider == 'Microsoft.DBforMariaDB':
parameters = mariadb.models.ServerForCreate(
sku=mariadb.models.Sku(name=sku_name),
properties=mariadb.models.ServerPropertiesForReplica(source_server_id=source_server),
location=location)
return sdk_no_wait(no_wait, client.begin_create, resource_group_name, server_name, parameters)
def _replica_stop(client, resource_group_name, server_name):
try:
server_object = client.get(resource_group_name, server_name)
except Exception as e:
raise CLIError('Unable to get server: {}.'.format(str(e)))
if server_object.replication_role.lower() != "replica":
raise CLIError('Server {} is not a replica server.'.format(server_name))
server_module_path = server_object.__module__
module = import_module(server_module_path.replace('server', 'server_update_parameters'))
ServerUpdateParameters = getattr(module, 'ServerUpdateParameters')
params = ServerUpdateParameters(replication_role='None')
return client.begin_update(resource_group_name, server_name, params)
def _server_update_custom_func(instance,
sku_name=None,
storage_mb=None,
backup_retention=None,
administrator_login_password=None,
ssl_enforcement=None,
tags=None,
auto_grow=None,
assign_identity=False,
public_network_access=None,
minimal_tls_version=None):
server_module_path = instance.__module__
module = import_module(server_module_path.replace('server', 'server_update_parameters'))
ServerUpdateParameters = getattr(module, 'ServerUpdateParameters')
if sku_name:
instance.sku.name = sku_name
instance.sku.capacity = None
instance.sku.family = None
instance.sku.tier = None
else:
instance.sku = None
if storage_mb:
instance.storage_profile.storage_mb = storage_mb
if backup_retention:
instance.storage_profile.backup_retention_days = backup_retention
if auto_grow:
instance.storage_profile.storage_autogrow = auto_grow
params = ServerUpdateParameters(sku=instance.sku,
storage_profile=instance.storage_profile,
administrator_login_password=administrator_login_password,
version=None,
ssl_enforcement=ssl_enforcement,
tags=tags,
public_network_access=public_network_access,
minimal_tls_version=minimal_tls_version)
if assign_identity:
if server_module_path.find('postgres'):
if instance.identity is None:
instance.identity = postgresql.models.ResourceIdentity(type=postgresql.models.IdentityType.system_assigned.value)
params.identity = instance.identity
elif server_module_path.find('mysql'):
if instance.identity is None:
instance.identity = mysql.models.ResourceIdentity(type=mysql.models.IdentityType.system_assigned.value)
params.identity = instance.identity
return params
def _server_mysql_upgrade(cmd, client, resource_group_name, server_name, target_server_version):
parameters = mysql.models.ServerUpgradeParameters(
target_server_version=target_server_version
)
client.begin_upgrade(resource_group_name, server_name, parameters)
def _server_mariadb_get(cmd, resource_group_name, server_name):
client = get_mariadb_management_client(cmd.cli_ctx)
return client.servers.get(resource_group_name, server_name)
def _server_mysql_get(cmd, resource_group_name, server_name):
client = get_mysql_management_client(cmd.cli_ctx)
return client.servers.get(resource_group_name, server_name)
def _server_stop(cmd, client, resource_group_name, server_name):
logger.warning("Server will be automatically started after 7 days "
"if you do not perform a manual start operation")
return client.begin_stop(resource_group_name, server_name)
def _server_postgresql_get(cmd, resource_group_name, server_name):
client = get_postgresql_management_client(cmd.cli_ctx)
return client.servers.get(resource_group_name, server_name)
def _server_update_get(client, resource_group_name, server_name):
return client.get(resource_group_name, server_name)
def _server_update_set(client, resource_group_name, server_name, parameters):
return client.begin_update(resource_group_name, server_name, parameters)
def _server_delete(cmd, client, resource_group_name, server_name):
database_engine = 'postgres'
if isinstance(client, MySqlServersOperations):
database_engine = 'mysql'
result = client.begin_delete(resource_group_name, server_name)
if cmd.cli_ctx.local_context.is_on:
local_context_file = cmd.cli_ctx.local_context._get_local_context_file() # pylint: disable=protected-access
local_context_file.remove_option('{}'.format(database_engine), 'server_name')
return result.result()
def _get_sku_name(tier, family, capacity):
return '{}_{}_{}'.format(SKU_TIER_MAP[tier], family, str(capacity))
def _firewall_rule_create(client, resource_group_name, server_name, firewall_rule_name, start_ip_address, end_ip_address):
parameters = {'name': firewall_rule_name, 'start_ip_address': start_ip_address, 'end_ip_address': end_ip_address}
return client.begin_create_or_update(resource_group_name, server_name, firewall_rule_name, parameters)
def _firewall_rule_custom_getter(client, resource_group_name, server_name, firewall_rule_name):
return client.get(resource_group_name, server_name, firewall_rule_name)
def _firewall_rule_custom_setter(client, resource_group_name, server_name, firewall_rule_name, parameters):
return client.begin_create_or_update(
resource_group_name,
server_name,
firewall_rule_name,
parameters)
def _firewall_rule_update_custom_func(instance, start_ip_address=None, end_ip_address=None):
if start_ip_address is not None:
instance.start_ip_address = start_ip_address
if end_ip_address is not None:
instance.end_ip_address = end_ip_address
return instance
def _vnet_rule_create(client, resource_group_name, server_name, virtual_network_rule_name, virtual_network_subnet_id, ignore_missing_vnet_service_endpoint=None):
parameters = {
'name': virtual_network_rule_name,
'virtual_network_subnet_id': virtual_network_subnet_id,
'ignore_missing_vnet_service_endpoint': ignore_missing_vnet_service_endpoint
}
return client.begin_create_or_update(resource_group_name, server_name, virtual_network_rule_name, parameters)
def _custom_vnet_update_getter(client, resource_group_name, server_name, virtual_network_rule_name):
return client.get(resource_group_name, server_name, virtual_network_rule_name)
def _custom_vnet_update_setter(client, resource_group_name, server_name, virtual_network_rule_name, parameters):
return client.begin_create_or_update(
resource_group_name,
server_name,
virtual_network_rule_name,
parameters)
def _vnet_rule_update_custom_func(instance, virtual_network_subnet_id, ignore_missing_vnet_service_endpoint=None):
instance.virtual_network_subnet_id = virtual_network_subnet_id
if ignore_missing_vnet_service_endpoint is not None:
instance.ignore_missing_vnet_service_endpoint = ignore_missing_vnet_service_endpoint
return instance
def _configuration_update(client, resource_group_name, server_name, configuration_name, value=None, source=None):
parameters = {
'name': configuration_name,
'value': value,
'source': source
}
return client.begin_create_or_update(resource_group_name, server_name, configuration_name, parameters)
def _db_create(client, resource_group_name, server_name, database_name, charset=None, collation=None):
parameters = {
'name': database_name,
'charset': charset,
'collation': collation
}
return client.begin_create_or_update(resource_group_name, server_name, database_name, parameters)
# Custom functions for server logs
def _download_log_files(
client,
resource_group_name,
server_name,
file_name):
# list all files
files = client.list_by_server(resource_group_name, server_name)
for f in files:
if f.name in file_name:
urlretrieve(f.url, f.name)
def _list_log_files_with_filter(client, resource_group_name, server_name, filename_contains=None,
file_last_written=None, max_file_size=None):
# list all files
all_files = client.list_by_server(resource_group_name, server_name)
files = []
if file_last_written is None:
file_last_written = 72
time_line = datetime.utcnow().replace(tzinfo=tzutc()) - timedelta(hours=file_last_written)
for f in all_files:
if f.last_modified_time < time_line:
continue
if filename_contains is not None and re.search(filename_contains, f.name) is None:
continue
if max_file_size is not None and f.size_in_kb > max_file_size:
continue
del f.created_time
files.append(f)
return files
# Custom functions for list servers
def _server_list_custom_func(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
# region private_endpoint
def _update_private_endpoint_connection_status(cmd, client, resource_group_name, server_name,
private_endpoint_connection_name, is_approved=True, description=None): # pylint: disable=unused-argument
private_endpoint_connection = client.get(resource_group_name=resource_group_name, server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name)
new_status = 'Approved' if is_approved else 'Rejected'
private_link_service_connection_state = {
'status': new_status,
'description': description
}
private_endpoint_connection.private_link_service_connection_state = private_link_service_connection_state
return client.begin_create_or_update(resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
parameters=private_endpoint_connection)
def approve_private_endpoint_connection(cmd, client, resource_group_name, server_name, private_endpoint_connection_name,
description=None):
"""Approve a private endpoint connection request for a server."""
return _update_private_endpoint_connection_status(
cmd, client, resource_group_name, server_name, private_endpoint_connection_name, is_approved=True,
description=description)
def reject_private_endpoint_connection(cmd, client, resource_group_name, server_name, private_endpoint_connection_name,
description=None):
"""Reject a private endpoint connection request for a server."""
return _update_private_endpoint_connection_status(
cmd, client, resource_group_name, server_name, private_endpoint_connection_name, is_approved=False,
description=description)
def server_key_create(client, resource_group_name, server_name, kid):
"""Create Server Key."""
key_name = _get_server_key_name_from_uri(kid)
parameters = {
'uri': kid,
'server_key_type': "AzureKeyVault"
}
return client.begin_create_or_update(server_name, key_name, resource_group_name, parameters)
def server_key_get(client, resource_group_name, server_name, kid):
"""Get Server Key."""
key_name = _get_server_key_name_from_uri(kid)
return client.get(
resource_group_name=resource_group_name,
server_name=server_name,
key_name=key_name)
def server_key_delete(cmd, client, resource_group_name, server_name, kid):
"""Drop Server Key."""
key_name = _get_server_key_name_from_uri(kid)
return client.begin_delete(
resource_group_name=resource_group_name,
server_name=server_name,
key_name=key_name)
def _get_server_key_name_from_uri(uri):
'''
Gets the key's name to use as a server key.
The SQL server key API requires that the server key has a specific name
based on the vault, key and key version.
'''
match = re.match(r'https://(.)+\.(managedhsm.azure.net|managedhsm-preview.azure.net|vault.azure.net|vault-int.azure-int.net|vault.azure.cn|managedhsm.azure.cn|vault.usgovcloudapi.net|managedhsm.usgovcloudapi.net|vault.microsoftazure.de|managedhsm.microsoftazure.de|vault.cloudapi.eaglex.ic.gov|vault.cloudapi.microsoft.scloud)(:443)?\/keys/[^\/]+\/[0-9a-zA-Z]+$', uri)
if match is None:
raise CLIError('The provided uri is invalid. Please provide a valid Azure Key Vault key id. For example: '
'"https://YourVaultName.vault.azure.net/keys/YourKeyName/01234567890123456789012345678901" or "https://YourManagedHsmRegion.YourManagedHsmName.managedhsm.azure.net/keys/YourKeyName/01234567890123456789012345678901"')
vault = uri.split('.')[0].split('/')[-1]
key = uri.split('/')[-2]
version = uri.split('/')[-1]
return '{}_{}_{}'.format(vault, key, version)
def server_ad_admin_set(client, resource_group_name, server_name, login=None, sid=None):
'''
Sets a server's AD admin.
'''
parameters = {
'administratorType': 'ActiveDirectory',
'login': login,
'sid': sid,
'tenant_id': _get_tenant_id()
}
return client.begin_create_or_update(
server_name=server_name,
resource_group_name=resource_group_name,
properties=parameters)
def _get_tenant_id():
'''
Gets tenantId from current subscription.
'''
profile = Profile()
sub = profile.get_subscription()
return sub['tenantId']
# endregion
# region new create experience
def create_mysql_connection_string(server_name, host, database_name, user_name, password):
connection_kwargs = {
'host': host,
'dbname': database_name,
'username': user_name,
'servername': server_name,
'password': password if password is not None else '{password}'
}
return 'mysql {dbname} --host {host} --user {username}@{servername} --password={password}'.format(**connection_kwargs)
def create_database(cmd, resource_group_name, server_name, database_name, engine_name):
if engine_name == 'mysql':
# check for existing database, create if not present
database_client = cf_mysql_db(cmd.cli_ctx, None)
elif engine_name == 'mariadb':
database_client = cf_mariadb_db(cmd.cli_ctx, None)
parameters = {
'name': database_name,
'charset': 'utf8'
}
try:
database_client.get(resource_group_name, server_name, database_name)
except ResourceNotFoundError:
logger.warning('Creating %s database \'%s\'...', engine_name, database_name)
database_client.begin_create_or_update(resource_group_name, server_name, database_name, parameters)
def form_response(server_result, password, host, connection_string, database_name=None, firewall_id=None):
result = todict(server_result)
result['connectionString'] = connection_string
result['password'] = password
if firewall_id is not None:
result['firewallName'] = firewall_id
if database_name is not None:
result['databaseName'] = database_name
return result
def create_postgresql_connection_string(server_name, host, user, password):
connection_kwargs = {
'user': user,
'host': host,
'servername': server_name,
'password': password if password is not None else '{password}'
}
return 'postgres://{user}%40{servername}:{password}@{host}/postgres?sslmode=require'.format(**connection_kwargs)
def check_server_name_availability(check_name_client, parameters):
server_availability = check_name_client.execute(parameters)
if not server_availability.name_available:
raise CLIError(server_availability.message)
return True
def update_local_contexts(cmd, provider, server_name, resource_group_name, location, user):
engine = 'postgres'
if provider == 'Microsoft.DBforMySQL':
engine = 'mysql'
elif provider == 'Microsoft.DBforMariaDB':
engine = 'mariadb'
if cmd.cli_ctx.local_context.is_on:
cmd.cli_ctx.local_context.set([engine], 'server_name',
server_name) # Setting the server name in the local context
cmd.cli_ctx.local_context.set([engine], 'administrator_login',
user) # Setting the server name in the local context
cmd.cli_ctx.local_context.set([ALL], 'location',
location) # Setting the location in the local context
cmd.cli_ctx.local_context.set([ALL], 'resource_group_name', resource_group_name)
def get_connection_string(cmd, client, server_name='{server}', database_name='{database}', administrator_login='{username}', administrator_login_password='{password}'):
provider = 'MySQL'
if isinstance(client, PostgreSQLLocationOperations):
provider = 'PostgreSQL'
elif isinstance(client, MariaDBLocationOperations):
provider = 'MariaDB'
if provider == 'MySQL':
server_endpoint = cmd.cli_ctx.cloud.suffixes.mysql_server_endpoint
host = '{}{}'.format(server_name, server_endpoint)
result = {
'mysql_cmd': "mysql {database} --host {host} --user {user}@{server} --password={password}",
'ado.net': "Server={host}; Port=3306; Database={database}; Uid={user}@{server}; Pwd={password}",
'jdbc': "jdbc:mysql://{host}:3306/{database}?user={user}@{server}&password={password}",
'node.js': "var conn = mysql.createConnection({{host: '{host}', user: '{user}@{server}',"
"password: {password}, database: {database}, port: 3306}});",
'php': "$con=mysqli_init(); [mysqli_ssl_set($con, NULL, NULL, {{ca-cert filename}}, NULL, NULL);] mysqli_real_connect($con, '{host}', '{user}@{server}', '{password}', '{database}', 3306);",
'python': "cnx = mysql.connector.connect(user='{user}@{server}', password='{password}', host='{host}', "
"port=3306, database='{database}')",
'ruby': "client = Mysql2::Client.new(username: '{user}@{server}', password: '{password}', "
"database: '{database}', host: '{host}', port: 3306)"
}
connection_kwargs = {
'host': host,
'user': administrator_login,
'password': administrator_login_password if administrator_login_password is not None else '{password}',
'database': database_name,
'server': server_name
}
for k, v in result.items():
result[k] = v.format(**connection_kwargs)
if provider == 'PostgreSQL':
server_endpoint = cmd.cli_ctx.cloud.suffixes.postgresql_server_endpoint
host = '{}{}'.format(server_name, server_endpoint)
result = {
'psql_cmd': "postgresql://{user}@{server}:{password}@{host}/{database}?sslmode=require",
'C++ (libpq)': "host={host} port=5432 dbname={database} user={user}@{server} password={password} sslmode=require",
'ado.net': "Server={host};Database={database};Port=5432;User Id={user}@{server};Password={password};",
'jdbc': "jdbc:postgresql://{host}:5432/{database}?user={user}@{server}&password={password}",
'node.js': "var client = new pg.Client('postgres://{user}@{server}:{password}@{host}:5432/{database}');",
'php': "host={host} port=5432 dbname={database} user={user}@{server} password={password}",
'python': "cnx = psycopg2.connect(database='{database}', user='{user}@{server}', host='{host}', password='{password}', "
"port='5432')",
'ruby': "cnx = PG::Connection.new(:host => '{host}', :user => '{user}@{server}', :dbname => '{database}', "
":port => '5432', :password => '{password}')"
}
connection_kwargs = {
'host': host,
'user': administrator_login,
'password': administrator_login_password if administrator_login_password is not None else '{password}',
'database': database_name,
'server': server_name
}
for k, v in result.items():
result[k] = v.format(**connection_kwargs)
if provider == 'MariaDB':
server_endpoint = cmd.cli_ctx.cloud.suffixes.mariadb_server_endpoint
host = '{}{}'.format(server_name, server_endpoint)
result = {
'ado.net': "Server={host}; Port=3306; Database={database}; Uid={user}@{server}; Pwd={password}",
'jdbc': "jdbc:mariadb://{host}:3306/{database}?user={user}@{server}&password={password}",
'node.js': "var conn = mysql.createConnection({{host: '{host}', user: '{user}@{server}',"
"password: {password}, database: {database}, port: 3306}});",
'php': "$con=mysqli_init(); [mysqli_ssl_set($con, NULL, NULL, {{ca-cert filename}}, NULL, NULL);] mysqli_real_connect($con, '{host}', '{user}@{server}', '{password}', '{database}', 3306);",
'python': "cnx = mysql.connector.connect(user='{user}@{server}', password='{password}', host='{host}', "
"port=3306, database='{database}')",
'ruby': "client = Mysql2::Client.new(username: '{user}@{server}', password: '{password}', "
"database: '{database}', host: '{host}', port: 3306)"
}
connection_kwargs = {
'host': host,
'user': administrator_login,
'password': administrator_login_password if administrator_login_password is not None else '{password}',
'database': database_name,
'server': server_name
}
for k, v in result.items():
result[k] = v.format(**connection_kwargs)
return {
'connectionStrings': result
}
| 47.45039 | 372 | 0.682941 |
0cfd5617cea570c341260657e682a798f8fe660d | 39,456 | py | Python | sympy/printing/tests/test_mathml.py | Bavish2201/sympy | f472bcc07e8af4080d0e78057dd8beb948d8766f | [
"BSD-3-Clause"
] | null | null | null | sympy/printing/tests/test_mathml.py | Bavish2201/sympy | f472bcc07e8af4080d0e78057dd8beb948d8766f | [
"BSD-3-Clause"
] | null | null | null | sympy/printing/tests/test_mathml.py | Bavish2201/sympy | f472bcc07e8af4080d0e78057dd8beb948d8766f | [
"BSD-3-Clause"
] | null | null | null | from sympy import diff, Integral, Limit, sin, Symbol, Integer, Rational, cos, \
tan, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh, E, I, oo, \
pi, GoldenRatio, EulerGamma, Sum, Eq, Ne, Ge, Lt, Float, Matrix, Basic
from sympy.printing.mathml import mathml, MathMLContentPrinter, MathMLPresentationPrinter, \
MathMLPrinter
from sympy.utilities.pytest import raises
x = Symbol('x')
y = Symbol('y')
mp = MathMLContentPrinter()
mpp = MathMLPresentationPrinter()
def test_mathml_printer():
m = MathMLPrinter()
assert m.doprint(1+x) == mp.doprint(1+x)
def test_content_printmethod():
assert mp.doprint(1 + x) == '<apply><plus/><ci>x</ci><cn>1</cn></apply>'
def test_content_mathml_core():
mml_1 = mp._print(1 + x)
assert mml_1.nodeName == 'apply'
nodes = mml_1.childNodes
assert len(nodes) == 3
assert nodes[0].nodeName == 'plus'
assert nodes[0].hasChildNodes() is False
assert nodes[0].nodeValue is None
assert nodes[1].nodeName in ['cn', 'ci']
if nodes[1].nodeName == 'cn':
assert nodes[1].childNodes[0].nodeValue == '1'
assert nodes[2].childNodes[0].nodeValue == 'x'
else:
assert nodes[1].childNodes[0].nodeValue == 'x'
assert nodes[2].childNodes[0].nodeValue == '1'
mml_2 = mp._print(x**2)
assert mml_2.nodeName == 'apply'
nodes = mml_2.childNodes
assert nodes[1].childNodes[0].nodeValue == 'x'
assert nodes[2].childNodes[0].nodeValue == '2'
mml_3 = mp._print(2*x)
assert mml_3.nodeName == 'apply'
nodes = mml_3.childNodes
assert nodes[0].nodeName == 'times'
assert nodes[1].childNodes[0].nodeValue == '2'
assert nodes[2].childNodes[0].nodeValue == 'x'
mml = mp._print(Float(1.0, 2)*x)
assert mml.nodeName == 'apply'
nodes = mml.childNodes
assert nodes[0].nodeName == 'times'
assert nodes[1].childNodes[0].nodeValue == '1.0'
assert nodes[2].childNodes[0].nodeValue == 'x'
def test_content_mathml_functions():
mml_1 = mp._print(sin(x))
assert mml_1.nodeName == 'apply'
assert mml_1.childNodes[0].nodeName == 'sin'
assert mml_1.childNodes[1].nodeName == 'ci'
mml_2 = mp._print(diff(sin(x), x, evaluate=False))
assert mml_2.nodeName == 'apply'
assert mml_2.childNodes[0].nodeName == 'diff'
assert mml_2.childNodes[1].nodeName == 'bvar'
assert mml_2.childNodes[1].childNodes[
0].nodeName == 'ci' # below bvar there's <ci>x/ci>
mml_3 = mp._print(diff(cos(x*y), x, evaluate=False))
assert mml_3.nodeName == 'apply'
assert mml_3.childNodes[0].nodeName == 'partialdiff'
assert mml_3.childNodes[1].nodeName == 'bvar'
assert mml_3.childNodes[1].childNodes[
0].nodeName == 'ci' # below bvar there's <ci>x/ci>
def test_content_mathml_limits():
# XXX No unevaluated limits
lim_fun = sin(x)/x
mml_1 = mp._print(Limit(lim_fun, x, 0))
assert mml_1.childNodes[0].nodeName == 'limit'
assert mml_1.childNodes[1].nodeName == 'bvar'
assert mml_1.childNodes[2].nodeName == 'lowlimit'
assert mml_1.childNodes[3].toxml() == mp._print(lim_fun).toxml()
def test_content_mathml_integrals():
integrand = x
mml_1 = mp._print(Integral(integrand, (x, 0, 1)))
assert mml_1.childNodes[0].nodeName == 'int'
assert mml_1.childNodes[1].nodeName == 'bvar'
assert mml_1.childNodes[2].nodeName == 'lowlimit'
assert mml_1.childNodes[3].nodeName == 'uplimit'
assert mml_1.childNodes[4].toxml() == mp._print(integrand).toxml()
def test_content_mathml_matrices():
A = Matrix([1, 2, 3])
B = Matrix([[0, 5, 4], [2, 3, 1], [9, 7, 9]])
mll_1 = mp._print(A)
assert mll_1.childNodes[0].nodeName == 'matrixrow'
assert mll_1.childNodes[0].childNodes[0].nodeName == 'cn'
assert mll_1.childNodes[0].childNodes[0].childNodes[0].nodeValue == '1'
assert mll_1.childNodes[1].nodeName == 'matrixrow'
assert mll_1.childNodes[1].childNodes[0].nodeName == 'cn'
assert mll_1.childNodes[1].childNodes[0].childNodes[0].nodeValue == '2'
assert mll_1.childNodes[2].nodeName == 'matrixrow'
assert mll_1.childNodes[2].childNodes[0].nodeName == 'cn'
assert mll_1.childNodes[2].childNodes[0].childNodes[0].nodeValue == '3'
mll_2 = mp._print(B)
assert mll_2.childNodes[0].nodeName == 'matrixrow'
assert mll_2.childNodes[0].childNodes[0].nodeName == 'cn'
assert mll_2.childNodes[0].childNodes[0].childNodes[0].nodeValue == '0'
assert mll_2.childNodes[0].childNodes[1].nodeName == 'cn'
assert mll_2.childNodes[0].childNodes[1].childNodes[0].nodeValue == '5'
assert mll_2.childNodes[0].childNodes[2].nodeName == 'cn'
assert mll_2.childNodes[0].childNodes[2].childNodes[0].nodeValue == '4'
assert mll_2.childNodes[1].nodeName == 'matrixrow'
assert mll_2.childNodes[1].childNodes[0].nodeName == 'cn'
assert mll_2.childNodes[1].childNodes[0].childNodes[0].nodeValue == '2'
assert mll_2.childNodes[1].childNodes[1].nodeName == 'cn'
assert mll_2.childNodes[1].childNodes[1].childNodes[0].nodeValue == '3'
assert mll_2.childNodes[1].childNodes[2].nodeName == 'cn'
assert mll_2.childNodes[1].childNodes[2].childNodes[0].nodeValue == '1'
assert mll_2.childNodes[2].nodeName == 'matrixrow'
assert mll_2.childNodes[2].childNodes[0].nodeName == 'cn'
assert mll_2.childNodes[2].childNodes[0].childNodes[0].nodeValue == '9'
assert mll_2.childNodes[2].childNodes[1].nodeName == 'cn'
assert mll_2.childNodes[2].childNodes[1].childNodes[0].nodeValue == '7'
assert mll_2.childNodes[2].childNodes[2].nodeName == 'cn'
assert mll_2.childNodes[2].childNodes[2].childNodes[0].nodeValue == '9'
def test_content_mathml_sums():
summand = x
mml_1 = mp._print(Sum(summand, (x, 1, 10)))
assert mml_1.childNodes[0].nodeName == 'sum'
assert mml_1.childNodes[1].nodeName == 'bvar'
assert mml_1.childNodes[2].nodeName == 'lowlimit'
assert mml_1.childNodes[3].nodeName == 'uplimit'
assert mml_1.childNodes[4].toxml() == mp._print(summand).toxml()
def test_content_mathml_tuples():
mml_1 = mp._print([2])
assert mml_1.nodeName == 'list'
assert mml_1.childNodes[0].nodeName == 'cn'
assert len(mml_1.childNodes) == 1
mml_2 = mp._print([2, Integer(1)])
assert mml_2.nodeName == 'list'
assert mml_2.childNodes[0].nodeName == 'cn'
assert mml_2.childNodes[1].nodeName == 'cn'
assert len(mml_2.childNodes) == 2
def test_content_mathml_add():
mml = mp._print(x**5 - x**4 + x)
assert mml.childNodes[0].nodeName == 'plus'
assert mml.childNodes[1].childNodes[0].nodeName == 'minus'
assert mml.childNodes[1].childNodes[1].nodeName == 'apply'
def test_content_mathml_Rational():
mml_1 = mp._print(Rational(1, 1))
"""should just return a number"""
assert mml_1.nodeName == 'cn'
mml_2 = mp._print(Rational(2, 5))
assert mml_2.childNodes[0].nodeName == 'divide'
def test_content_mathml_constants():
mml = mp._print(I)
assert mml.nodeName == 'imaginaryi'
mml = mp._print(E)
assert mml.nodeName == 'exponentiale'
mml = mp._print(oo)
assert mml.nodeName == 'infinity'
mml = mp._print(pi)
assert mml.nodeName == 'pi'
assert mathml(GoldenRatio) == '<cn>φ</cn>'
mml = mathml(EulerGamma)
assert mml == '<eulergamma/>'
def test_content_mathml_trig():
mml = mp._print(sin(x))
assert mml.childNodes[0].nodeName == 'sin'
mml = mp._print(cos(x))
assert mml.childNodes[0].nodeName == 'cos'
mml = mp._print(tan(x))
assert mml.childNodes[0].nodeName == 'tan'
mml = mp._print(asin(x))
assert mml.childNodes[0].nodeName == 'arcsin'
mml = mp._print(acos(x))
assert mml.childNodes[0].nodeName == 'arccos'
mml = mp._print(atan(x))
assert mml.childNodes[0].nodeName == 'arctan'
mml = mp._print(sinh(x))
assert mml.childNodes[0].nodeName == 'sinh'
mml = mp._print(cosh(x))
assert mml.childNodes[0].nodeName == 'cosh'
mml = mp._print(tanh(x))
assert mml.childNodes[0].nodeName == 'tanh'
mml = mp._print(asinh(x))
assert mml.childNodes[0].nodeName == 'arcsinh'
mml = mp._print(atanh(x))
assert mml.childNodes[0].nodeName == 'arctanh'
mml = mp._print(acosh(x))
assert mml.childNodes[0].nodeName == 'arccosh'
def test_content_mathml_relational():
mml_1 = mp._print(Eq(x, 1))
assert mml_1.nodeName == 'apply'
assert mml_1.childNodes[0].nodeName == 'eq'
assert mml_1.childNodes[1].nodeName == 'ci'
assert mml_1.childNodes[1].childNodes[0].nodeValue == 'x'
assert mml_1.childNodes[2].nodeName == 'cn'
assert mml_1.childNodes[2].childNodes[0].nodeValue == '1'
mml_2 = mp._print(Ne(1, x))
assert mml_2.nodeName == 'apply'
assert mml_2.childNodes[0].nodeName == 'neq'
assert mml_2.childNodes[1].nodeName == 'cn'
assert mml_2.childNodes[1].childNodes[0].nodeValue == '1'
assert mml_2.childNodes[2].nodeName == 'ci'
assert mml_2.childNodes[2].childNodes[0].nodeValue == 'x'
mml_3 = mp._print(Ge(1, x))
assert mml_3.nodeName == 'apply'
assert mml_3.childNodes[0].nodeName == 'geq'
assert mml_3.childNodes[1].nodeName == 'cn'
assert mml_3.childNodes[1].childNodes[0].nodeValue == '1'
assert mml_3.childNodes[2].nodeName == 'ci'
assert mml_3.childNodes[2].childNodes[0].nodeValue == 'x'
mml_4 = mp._print(Lt(1, x))
assert mml_4.nodeName == 'apply'
assert mml_4.childNodes[0].nodeName == 'lt'
assert mml_4.childNodes[1].nodeName == 'cn'
assert mml_4.childNodes[1].childNodes[0].nodeValue == '1'
assert mml_4.childNodes[2].nodeName == 'ci'
assert mml_4.childNodes[2].childNodes[0].nodeValue == 'x'
def test_content_symbol():
mml = mp._print(Symbol("x"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeValue == 'x'
del mml
mml = mp._print(Symbol("x^2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mp._print(Symbol("x__2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mp._print(Symbol("x_2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msub'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mp._print(Symbol("x^3_2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msubsup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
assert mml.childNodes[0].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[2].childNodes[0].nodeValue == '3'
del mml
mml = mp._print(Symbol("x__3_2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msubsup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
assert mml.childNodes[0].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[2].childNodes[0].nodeValue == '3'
del mml
mml = mp._print(Symbol("x_2_a"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msub'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[
0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mml:mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[
0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[
0].nodeValue == 'a'
del mml
mml = mp._print(Symbol("x^2^a"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[
0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mml:mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[
0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[
0].nodeValue == 'a'
del mml
mml = mp._print(Symbol("x__2__a"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[
0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mml:mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[
0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[
0].nodeValue == 'a'
del mml
def test_content_mathml_greek():
mml = mp._print(Symbol('alpha'))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeValue == u'\N{GREEK SMALL LETTER ALPHA}'
assert mp.doprint(Symbol('alpha')) == '<ci>α</ci>'
assert mp.doprint(Symbol('beta')) == '<ci>β</ci>'
assert mp.doprint(Symbol('gamma')) == '<ci>γ</ci>'
assert mp.doprint(Symbol('delta')) == '<ci>δ</ci>'
assert mp.doprint(Symbol('epsilon')) == '<ci>ε</ci>'
assert mp.doprint(Symbol('zeta')) == '<ci>ζ</ci>'
assert mp.doprint(Symbol('eta')) == '<ci>η</ci>'
assert mp.doprint(Symbol('theta')) == '<ci>θ</ci>'
assert mp.doprint(Symbol('iota')) == '<ci>ι</ci>'
assert mp.doprint(Symbol('kappa')) == '<ci>κ</ci>'
assert mp.doprint(Symbol('lambda')) == '<ci>λ</ci>'
assert mp.doprint(Symbol('mu')) == '<ci>μ</ci>'
assert mp.doprint(Symbol('nu')) == '<ci>ν</ci>'
assert mp.doprint(Symbol('xi')) == '<ci>ξ</ci>'
assert mp.doprint(Symbol('omicron')) == '<ci>ο</ci>'
assert mp.doprint(Symbol('pi')) == '<ci>π</ci>'
assert mp.doprint(Symbol('rho')) == '<ci>ρ</ci>'
assert mp.doprint(Symbol('varsigma')) == '<ci>ς</ci>', mp.doprint(Symbol('varsigma'))
assert mp.doprint(Symbol('sigma')) == '<ci>σ</ci>'
assert mp.doprint(Symbol('tau')) == '<ci>τ</ci>'
assert mp.doprint(Symbol('upsilon')) == '<ci>υ</ci>'
assert mp.doprint(Symbol('phi')) == '<ci>φ</ci>'
assert mp.doprint(Symbol('chi')) == '<ci>χ</ci>'
assert mp.doprint(Symbol('psi')) == '<ci>ψ</ci>'
assert mp.doprint(Symbol('omega')) == '<ci>ω</ci>'
assert mp.doprint(Symbol('Alpha')) == '<ci>Α</ci>'
assert mp.doprint(Symbol('Beta')) == '<ci>Β</ci>'
assert mp.doprint(Symbol('Gamma')) == '<ci>Γ</ci>'
assert mp.doprint(Symbol('Delta')) == '<ci>Δ</ci>'
assert mp.doprint(Symbol('Epsilon')) == '<ci>Ε</ci>'
assert mp.doprint(Symbol('Zeta')) == '<ci>Ζ</ci>'
assert mp.doprint(Symbol('Eta')) == '<ci>Η</ci>'
assert mp.doprint(Symbol('Theta')) == '<ci>Θ</ci>'
assert mp.doprint(Symbol('Iota')) == '<ci>Ι</ci>'
assert mp.doprint(Symbol('Kappa')) == '<ci>Κ</ci>'
assert mp.doprint(Symbol('Lambda')) == '<ci>Λ</ci>'
assert mp.doprint(Symbol('Mu')) == '<ci>Μ</ci>'
assert mp.doprint(Symbol('Nu')) == '<ci>Ν</ci>'
assert mp.doprint(Symbol('Xi')) == '<ci>Ξ</ci>'
assert mp.doprint(Symbol('Omicron')) == '<ci>Ο</ci>'
assert mp.doprint(Symbol('Pi')) == '<ci>Π</ci>'
assert mp.doprint(Symbol('Rho')) == '<ci>Ρ</ci>'
assert mp.doprint(Symbol('Sigma')) == '<ci>Σ</ci>'
assert mp.doprint(Symbol('Tau')) == '<ci>Τ</ci>'
assert mp.doprint(Symbol('Upsilon')) == '<ci>Υ</ci>'
assert mp.doprint(Symbol('Phi')) == '<ci>Φ</ci>'
assert mp.doprint(Symbol('Chi')) == '<ci>Χ</ci>'
assert mp.doprint(Symbol('Psi')) == '<ci>Ψ</ci>'
assert mp.doprint(Symbol('Omega')) == '<ci>Ω</ci>'
def test_content_mathml_order():
expr = x**3 + x**2*y + 3*x*y**3 + y**4
mp = MathMLContentPrinter({'order': 'lex'})
mml = mp._print(expr)
assert mml.childNodes[1].childNodes[0].nodeName == 'power'
assert mml.childNodes[1].childNodes[1].childNodes[0].data == 'x'
assert mml.childNodes[1].childNodes[2].childNodes[0].data == '3'
assert mml.childNodes[4].childNodes[0].nodeName == 'power'
assert mml.childNodes[4].childNodes[1].childNodes[0].data == 'y'
assert mml.childNodes[4].childNodes[2].childNodes[0].data == '4'
mp = MathMLContentPrinter({'order': 'rev-lex'})
mml = mp._print(expr)
assert mml.childNodes[1].childNodes[0].nodeName == 'power'
assert mml.childNodes[1].childNodes[1].childNodes[0].data == 'y'
assert mml.childNodes[1].childNodes[2].childNodes[0].data == '4'
assert mml.childNodes[4].childNodes[0].nodeName == 'power'
assert mml.childNodes[4].childNodes[1].childNodes[0].data == 'x'
assert mml.childNodes[4].childNodes[2].childNodes[0].data == '3'
def test_content_settings():
raises(TypeError, lambda: mathml(Symbol("x"), method="garbage"))
def test_presentation_printmethod():
assert mpp.doprint(1 + x) == '<mrow><mi>x</mi><mo>+</mo><mn>1</mn></mrow>'
assert mpp.doprint(x**2) == '<msup><mi>x</mi><mn>2</mn></msup>'
assert mpp.doprint(2*x) == '<mrow><mn>2</mn><mo>⁢</mo><mi>x</mi></mrow>'
def test_presentation_mathml_core():
mml_1 = mpp._print(1 + x)
assert mml_1.nodeName == 'mrow'
nodes = mml_1.childNodes
assert len(nodes) == 3
assert nodes[0].nodeName in ['mi', 'mn']
assert nodes[1].nodeName == 'mo'
if nodes[0].nodeName == 'mn':
assert nodes[0].childNodes[0].nodeValue == '1'
assert nodes[2].childNodes[0].nodeValue == 'x'
else:
assert nodes[0].childNodes[0].nodeValue == 'x'
assert nodes[2].childNodes[0].nodeValue == '1'
mml_2 = mpp._print(x**2)
assert mml_2.nodeName == 'msup'
nodes = mml_2.childNodes
assert nodes[0].childNodes[0].nodeValue == 'x'
assert nodes[1].childNodes[0].nodeValue == '2'
mml_3 = mpp._print(2*x)
assert mml_3.nodeName == 'mrow'
nodes = mml_3.childNodes
assert nodes[0].childNodes[0].nodeValue == '2'
assert nodes[1].childNodes[0].nodeValue == '⁢'
assert nodes[2].childNodes[0].nodeValue == 'x'
mml = mpp._print(Float(1.0, 2)*x)
assert mml.nodeName == 'mrow'
nodes = mml.childNodes
assert nodes[0].childNodes[0].nodeValue == '1.0'
assert nodes[1].childNodes[0].nodeValue == '⁢'
assert nodes[2].childNodes[0].nodeValue == 'x'
def test_presentation_mathml_functions():
mml_1 = mpp._print(sin(x))
assert mml_1.childNodes[0].childNodes[0
].nodeValue == 'sin'
assert mml_1.childNodes[1].childNodes[0
].childNodes[0].nodeValue == 'x'
mml_2 = mpp._print(diff(sin(x), x, evaluate=False))
assert mml_2.nodeName == 'mfrac'
assert mml_2.childNodes[0].childNodes[0
].childNodes[0].nodeValue == 'ⅆ'
assert mml_2.childNodes[0].childNodes[1
].nodeName == 'mfenced'
assert mml_2.childNodes[1].childNodes[
0].childNodes[0].nodeValue == 'ⅆ'
mml_3 = mpp._print(diff(cos(x*y), x, evaluate=False))
assert mml_3.nodeName == 'mfrac'
assert mml_3.childNodes[0].childNodes[0
].childNodes[0].nodeValue == '∂'
assert mml_2.childNodes[0].childNodes[1
].nodeName == 'mfenced'
assert mml_3.childNodes[1].childNodes[
0].childNodes[0].nodeValue == '∂'
def test_presentation_mathml_limits():
lim_fun = sin(x)/x
mml_1 = mpp._print(Limit(lim_fun, x, 0))
assert mml_1.childNodes[0].nodeName == 'munder'
assert mml_1.childNodes[0].childNodes[0
].childNodes[0].nodeValue == 'lim'
assert mml_1.childNodes[0].childNodes[1
].childNodes[0].childNodes[0
].nodeValue == 'x'
assert mml_1.childNodes[0].childNodes[1
].childNodes[1].childNodes[0
].nodeValue == '→'
assert mml_1.childNodes[0].childNodes[1
].childNodes[2].childNodes[0
].nodeValue == '0'
def test_presentation_mathml_integrals():
integrand = x
mml_1 = mpp._print(Integral(integrand, (x, 0, 1)))
assert mml_1.childNodes[0].nodeName == 'msubsup'
assert len(mml_1.childNodes[0].childNodes) == 3
assert mml_1.childNodes[0].childNodes[0
].childNodes[0].nodeValue == '∫'
assert mml_1.childNodes[0].childNodes[1
].childNodes[0].nodeValue == '0'
assert mml_1.childNodes[0].childNodes[2
].childNodes[0].nodeValue == '1'
def test_presentation_mathml_matrices():
A = Matrix([1, 2, 3])
B = Matrix([[0, 5, 4], [2, 3, 1], [9, 7, 9]])
mll_1 = mpp._print(A)
assert mll_1.childNodes[0].nodeName == 'mtable'
assert mll_1.childNodes[0].childNodes[0].nodeName == 'mtr'
assert len(mll_1.childNodes[0].childNodes) == 3
assert mll_1.childNodes[0].childNodes[0].childNodes[0].nodeName == 'mtd'
assert len(mll_1.childNodes[0].childNodes[0].childNodes) == 1
assert mll_1.childNodes[0].childNodes[0].childNodes[0
].childNodes[0].childNodes[0].nodeValue == '1'
assert mll_1.childNodes[0].childNodes[1].childNodes[0
].childNodes[0].childNodes[0].nodeValue == '2'
assert mll_1.childNodes[0].childNodes[2].childNodes[0
].childNodes[0].childNodes[0].nodeValue == '3'
mll_2 = mpp._print(B)
assert mll_2.childNodes[0].nodeName == 'mtable'
assert mll_2.childNodes[0].childNodes[0].nodeName == 'mtr'
assert len(mll_2.childNodes[0].childNodes) == 3
assert mll_2.childNodes[0].childNodes[0].childNodes[0].nodeName == 'mtd'
assert len(mll_2.childNodes[0].childNodes[0].childNodes) == 3
assert mll_2.childNodes[0].childNodes[0].childNodes[0
].childNodes[0].childNodes[0].nodeValue == '0'
assert mll_2.childNodes[0].childNodes[0].childNodes[1
].childNodes[0].childNodes[0].nodeValue == '5'
assert mll_2.childNodes[0].childNodes[0].childNodes[2
].childNodes[0].childNodes[0].nodeValue == '4'
assert mll_2.childNodes[0].childNodes[1].childNodes[0
].childNodes[0].childNodes[0].nodeValue == '2'
assert mll_2.childNodes[0].childNodes[1].childNodes[1
].childNodes[0].childNodes[0].nodeValue == '3'
assert mll_2.childNodes[0].childNodes[1].childNodes[2
].childNodes[0].childNodes[0].nodeValue == '1'
assert mll_2.childNodes[0].childNodes[2].childNodes[0
].childNodes[0].childNodes[0].nodeValue == '9'
assert mll_2.childNodes[0].childNodes[2].childNodes[1
].childNodes[0].childNodes[0].nodeValue == '7'
assert mll_2.childNodes[0].childNodes[2].childNodes[2
].childNodes[0].childNodes[0].nodeValue == '9'
def test_presentation_mathml_sums():
summand = x
mml_1 = mpp._print(Sum(summand, (x, 1, 10)))
assert mml_1.childNodes[0].nodeName == 'munderover'
assert len(mml_1.childNodes[0].childNodes) == 3
assert mml_1.childNodes[0].childNodes[0].childNodes[0
].nodeValue == '∑'
assert len(mml_1.childNodes[0].childNodes[1].childNodes) == 3
assert mml_1.childNodes[0].childNodes[2].childNodes[0
].nodeValue == '10'
assert mml_1.childNodes[1].childNodes[0].nodeValue == 'x'
def test_presentation_mathml_add():
mml = mpp._print(x**5 - x**4 + x)
assert len(mml.childNodes) == 5
assert mml.childNodes[0].childNodes[0].childNodes[0
].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].childNodes[0
].nodeValue == '5'
assert mml.childNodes[1].childNodes[0].nodeValue == '-'
assert mml.childNodes[2].childNodes[0].childNodes[0
].nodeValue == 'x'
assert mml.childNodes[2].childNodes[1].childNodes[0
].nodeValue == '4'
assert mml.childNodes[3].childNodes[0].nodeValue == '+'
assert mml.childNodes[4].childNodes[0].nodeValue == 'x'
def test_presentation_mathml_Rational():
mml_1 = mpp._print(Rational(1, 1))
assert mml_1.nodeName == 'mn'
mml_2 = mpp._print(Rational(2, 5))
assert mml_2.nodeName == 'mfrac'
assert mml_2.childNodes[0].childNodes[0].nodeValue == '2'
assert mml_2.childNodes[1].childNodes[0].nodeValue == '5'
def test_presentation_mathml_constants():
mml = mpp._print(I)
assert mml.childNodes[0].nodeValue == 'ⅈ'
mml = mpp._print(E)
assert mml.childNodes[0].nodeValue == 'ⅇ'
mml = mpp._print(oo)
assert mml.childNodes[0].nodeValue == '∞'
mml = mpp._print(pi)
assert mml.childNodes[0].nodeValue == 'π'
assert mathml(GoldenRatio, printer='presentation') == '<mi>φ</mi>'
def test_presentation_mathml_trig():
mml = mpp._print(sin(x))
assert mml.childNodes[0].childNodes[0].nodeValue == 'sin'
mml = mpp._print(cos(x))
assert mml.childNodes[0].childNodes[0].nodeValue == 'cos'
mml = mpp._print(tan(x))
assert mml.childNodes[0].childNodes[0].nodeValue == 'tan'
mml = mpp._print(asin(x))
assert mml.childNodes[0].childNodes[0].nodeValue == 'arcsin'
mml = mpp._print(acos(x))
assert mml.childNodes[0].childNodes[0].nodeValue == 'arccos'
mml = mpp._print(atan(x))
assert mml.childNodes[0].childNodes[0].nodeValue == 'arctan'
mml = mpp._print(sinh(x))
assert mml.childNodes[0].childNodes[0].nodeValue == 'sinh'
mml = mpp._print(cosh(x))
assert mml.childNodes[0].childNodes[0].nodeValue == 'cosh'
mml = mpp._print(tanh(x))
assert mml.childNodes[0].childNodes[0].nodeValue == 'tanh'
mml = mpp._print(asinh(x))
assert mml.childNodes[0].childNodes[0].nodeValue == 'arcsinh'
mml = mpp._print(atanh(x))
assert mml.childNodes[0].childNodes[0].nodeValue == 'arctanh'
mml = mpp._print(acosh(x))
assert mml.childNodes[0].childNodes[0].nodeValue == 'arccosh'
def test_presentation_mathml_relational():
mml_1 = mpp._print(Eq(x, 1))
assert len(mml_1.childNodes) == 3
assert mml_1.childNodes[0].nodeName == 'mi'
assert mml_1.childNodes[0].childNodes[0].nodeValue == 'x'
assert mml_1.childNodes[1].nodeName == 'mo'
assert mml_1.childNodes[1].childNodes[0].nodeValue == '='
assert mml_1.childNodes[2].nodeName == 'mn'
assert mml_1.childNodes[2].childNodes[0].nodeValue == '1'
mml_2 = mpp._print(Ne(1, x))
assert len(mml_2.childNodes) == 3
assert mml_2.childNodes[0].nodeName == 'mn'
assert mml_2.childNodes[0].childNodes[0].nodeValue == '1'
assert mml_2.childNodes[1].nodeName == 'mo'
assert mml_2.childNodes[1].childNodes[0].nodeValue == '≠'
assert mml_2.childNodes[2].nodeName == 'mi'
assert mml_2.childNodes[2].childNodes[0].nodeValue == 'x'
mml_3 = mpp._print(Ge(1, x))
assert len(mml_3.childNodes) == 3
assert mml_3.childNodes[0].nodeName == 'mn'
assert mml_3.childNodes[0].childNodes[0].nodeValue == '1'
assert mml_3.childNodes[1].nodeName == 'mo'
assert mml_3.childNodes[1].childNodes[0].nodeValue == '≥'
assert mml_3.childNodes[2].nodeName == 'mi'
assert mml_3.childNodes[2].childNodes[0].nodeValue == 'x'
mml_4 = mpp._print(Lt(1, x))
assert len(mml_4.childNodes) == 3
assert mml_4.childNodes[0].nodeName == 'mn'
assert mml_4.childNodes[0].childNodes[0].nodeValue == '1'
assert mml_4.childNodes[1].nodeName == 'mo'
assert mml_4.childNodes[1].childNodes[0].nodeValue == '<'
assert mml_4.childNodes[2].nodeName == 'mi'
assert mml_4.childNodes[2].childNodes[0].nodeValue == 'x'
def test_presentation_symbol():
mml = mpp._print(Symbol("x"))
assert mml.nodeName == 'mi'
assert mml.childNodes[0].nodeValue == 'x'
del mml
mml = mpp._print(Symbol("x^2"))
assert mml.nodeName == 'mi'
assert mml.childNodes[0].nodeName == 'msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mpp._print(Symbol("x__2"))
assert mml.nodeName == 'mi'
assert mml.childNodes[0].nodeName == 'msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mpp._print(Symbol("x_2"))
assert mml.nodeName == 'mi'
assert mml.childNodes[0].nodeName == 'msub'
assert mml.childNodes[0].childNodes[0].nodeName == 'mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mpp._print(Symbol("x^3_2"))
assert mml.nodeName == 'mi'
assert mml.childNodes[0].nodeName == 'msubsup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
assert mml.childNodes[0].childNodes[2].nodeName == 'mi'
assert mml.childNodes[0].childNodes[2].childNodes[0].nodeValue == '3'
del mml
mml = mpp._print(Symbol("x__3_2"))
assert mml.nodeName == 'mi'
assert mml.childNodes[0].nodeName == 'msubsup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
assert mml.childNodes[0].childNodes[2].nodeName == 'mi'
assert mml.childNodes[0].childNodes[2].childNodes[0].nodeValue == '3'
del mml
mml = mpp._print(Symbol("x_2_a"))
assert mml.nodeName == 'mi'
assert mml.childNodes[0].nodeName == 'msub'
assert mml.childNodes[0].childNodes[0].nodeName == 'mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[
0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[
0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[
0].nodeValue == 'a'
del mml
mml = mpp._print(Symbol("x^2^a"))
assert mml.nodeName == 'mi'
assert mml.childNodes[0].nodeName == 'msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[
0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[
0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[
0].nodeValue == 'a'
del mml
mml = mpp._print(Symbol("x__2__a"))
assert mml.nodeName == 'mi'
assert mml.childNodes[0].nodeName == 'msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[
0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[
0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[
0].nodeValue == 'a'
del mml
def test_presentation_mathml_greek():
mml = mpp._print(Symbol('alpha'))
assert mml.nodeName == 'mi'
assert mml.childNodes[0].nodeValue == u'\N{GREEK SMALL LETTER ALPHA}'
assert mpp.doprint(Symbol('alpha')) == '<mi>α</mi>'
assert mpp.doprint(Symbol('beta')) == '<mi>β</mi>'
assert mpp.doprint(Symbol('gamma')) == '<mi>γ</mi>'
assert mpp.doprint(Symbol('delta')) == '<mi>δ</mi>'
assert mpp.doprint(Symbol('epsilon')) == '<mi>ε</mi>'
assert mpp.doprint(Symbol('zeta')) == '<mi>ζ</mi>'
assert mpp.doprint(Symbol('eta')) == '<mi>η</mi>'
assert mpp.doprint(Symbol('theta')) == '<mi>θ</mi>'
assert mpp.doprint(Symbol('iota')) == '<mi>ι</mi>'
assert mpp.doprint(Symbol('kappa')) == '<mi>κ</mi>'
assert mpp.doprint(Symbol('lambda')) == '<mi>λ</mi>'
assert mpp.doprint(Symbol('mu')) == '<mi>μ</mi>'
assert mpp.doprint(Symbol('nu')) == '<mi>ν</mi>'
assert mpp.doprint(Symbol('xi')) == '<mi>ξ</mi>'
assert mpp.doprint(Symbol('omicron')) == '<mi>ο</mi>'
assert mpp.doprint(Symbol('pi')) == '<mi>π</mi>'
assert mpp.doprint(Symbol('rho')) == '<mi>ρ</mi>'
assert mpp.doprint(Symbol('varsigma')) == '<mi>ς</mi>', mp.doprint(Symbol('varsigma'))
assert mpp.doprint(Symbol('sigma')) == '<mi>σ</mi>'
assert mpp.doprint(Symbol('tau')) == '<mi>τ</mi>'
assert mpp.doprint(Symbol('upsilon')) == '<mi>υ</mi>'
assert mpp.doprint(Symbol('phi')) == '<mi>φ</mi>'
assert mpp.doprint(Symbol('chi')) == '<mi>χ</mi>'
assert mpp.doprint(Symbol('psi')) == '<mi>ψ</mi>'
assert mpp.doprint(Symbol('omega')) == '<mi>ω</mi>'
assert mpp.doprint(Symbol('Alpha')) == '<mi>Α</mi>'
assert mpp.doprint(Symbol('Beta')) == '<mi>Β</mi>'
assert mpp.doprint(Symbol('Gamma')) == '<mi>Γ</mi>'
assert mpp.doprint(Symbol('Delta')) == '<mi>Δ</mi>'
assert mpp.doprint(Symbol('Epsilon')) == '<mi>Ε</mi>'
assert mpp.doprint(Symbol('Zeta')) == '<mi>Ζ</mi>'
assert mpp.doprint(Symbol('Eta')) == '<mi>Η</mi>'
assert mpp.doprint(Symbol('Theta')) == '<mi>Θ</mi>'
assert mpp.doprint(Symbol('Iota')) == '<mi>Ι</mi>'
assert mpp.doprint(Symbol('Kappa')) == '<mi>Κ</mi>'
assert mpp.doprint(Symbol('Lambda')) == '<mi>Λ</mi>'
assert mpp.doprint(Symbol('Mu')) == '<mi>Μ</mi>'
assert mpp.doprint(Symbol('Nu')) == '<mi>Ν</mi>'
assert mpp.doprint(Symbol('Xi')) == '<mi>Ξ</mi>'
assert mpp.doprint(Symbol('Omicron')) == '<mi>Ο</mi>'
assert mpp.doprint(Symbol('Pi')) == '<mi>Π</mi>'
assert mpp.doprint(Symbol('Rho')) == '<mi>Ρ</mi>'
assert mpp.doprint(Symbol('Sigma')) == '<mi>Σ</mi>'
assert mpp.doprint(Symbol('Tau')) == '<mi>Τ</mi>'
assert mpp.doprint(Symbol('Upsilon')) == '<mi>Υ</mi>'
assert mpp.doprint(Symbol('Phi')) == '<mi>Φ</mi>'
assert mpp.doprint(Symbol('Chi')) == '<mi>Χ</mi>'
assert mpp.doprint(Symbol('Psi')) == '<mi>Ψ</mi>'
assert mpp.doprint(Symbol('Omega')) == '<mi>Ω</mi>'
def test_presentation_mathml_order():
expr = x**3 + x**2*y + 3*x*y**3 + y**4
mp = MathMLPresentationPrinter({'order': 'lex'})
mml = mp._print(expr)
assert mml.childNodes[0].nodeName == 'msup'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '3'
assert mml.childNodes[6].nodeName == 'msup'
assert mml.childNodes[6].childNodes[0].childNodes[0].nodeValue == 'y'
assert mml.childNodes[6].childNodes[1].childNodes[0].nodeValue == '4'
mp = MathMLPresentationPrinter({'order': 'rev-lex'})
mml = mp._print(expr)
assert mml.childNodes[0].nodeName == 'msup'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'y'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '4'
assert mml.childNodes[6].nodeName == 'msup'
assert mml.childNodes[6].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[6].childNodes[1].childNodes[0].nodeValue == '3'
def test_presentation_settings():
raises(TypeError, lambda: mathml(Symbol("x"), printer='presentation',method="garbage"))
def test_toprettyxml_hooking():
# test that the patch doesn't influence the behavior of the standard library
import xml.dom.minidom
doc1 = xml.dom.minidom.parseString(
"<apply><plus/><ci>x</ci><cn>1</cn></apply>")
doc2 = xml.dom.minidom.parseString(
"<mrow><mi>x</mi><mo>+</mo><mn>1</mn></mrow>")
prettyxml_old1 = doc1.toprettyxml()
prettyxml_old2 = doc2.toprettyxml()
mp.apply_patch()
mp.restore_patch()
assert prettyxml_old1 == doc1.toprettyxml()
assert prettyxml_old2 == doc2.toprettyxml()
def test_print_basic():
expr = Basic(1, 2)
assert mpp.doprint(expr) == '<mrow><mi>basic</mi><mfenced><mn>1</mn><mn>2</mn></mfenced></mrow>'
assert mp.doprint(expr) == '<basic><cn>1</cn><cn>2</cn></basic>'
| 41.929862 | 100 | 0.644794 |
1cb725db1e03751f1731e54a351ab4cfb6e30e96 | 10,178 | py | Python | network-tests/test_deletes.py | victor-tucci/beldex-storage-server | b7726f775696bc964623b083a24947df0fb97674 | [
"MIT"
] | 2 | 2021-08-15T17:07:06.000Z | 2021-08-25T10:34:04.000Z | network-tests/test_deletes.py | victor-tucci/beldex-storage-server | b7726f775696bc964623b083a24947df0fb97674 | [
"MIT"
] | null | null | null | network-tests/test_deletes.py | victor-tucci/beldex-storage-server | b7726f775696bc964623b083a24947df0fb97674 | [
"MIT"
] | 9 | 2021-07-22T10:38:33.000Z | 2022-01-27T10:22:18.000Z | import pyoxenmq
import ss
import time
import base64
import json
from nacl.encoding import HexEncoder, Base64Encoder
from nacl.hash import blake2b
from nacl.signing import VerifyKey
import nacl.exceptions
def test_delete_all(omq, random_mn, sk, exclude):
swarm = ss.get_swarm(omq, random_mn, sk)
mns = ss.random_swarm_members(swarm, 2, exclude)
conns = [omq.connect_remote("curve://{}:{}/{}".format(mn['ip'], mn['port_omq'], mn['pubkey_x25519']))
for mn in mns]
msgs = ss.store_n(omq, conns[0], sk, b"omg123", 5)
my_ss_id = '05' + sk.verify_key.encode().hex()
ts = int(time.time() * 1000)
to_sign = "delete_all{}".format(ts).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params = json.dumps({
"pubkey": my_ss_id,
"timestamp": ts,
"signature": sig
}).encode()
resp = omq.request(conns[1], 'storage.delete_all', [params])
assert len(resp) == 1
r = json.loads(resp[0])
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['mnodes']}
msg_hashes = sorted(m['hash'] for m in msgs)
# signature of ( PUBKEY_HEX || TIMESTAMP || DELETEDHASH[0] || ... || DELETEDHASH[N] )
expected_signed = "".join((my_ss_id, str(ts), *msg_hashes)).encode()
for k, v in r['swarm'].items():
assert v['deleted'] == msg_hashes
edpk = VerifyKey(k, encoder=HexEncoder)
edpk.verify(expected_signed, base64.b64decode(v['signature']))
r = json.loads(omq.request(conns[0], 'storage.retrieve',
[json.dumps({ "pubkey": my_ss_id }).encode()]
)[0])
assert not r['messages']
def test_stale_delete_all(omq, random_mn, sk, exclude):
swarm = ss.get_swarm(omq, random_mn, sk)
mn = ss.random_swarm_members(swarm, 2, exclude)[0]
conn = omq.connect_remote("curve://{}:{}/{}".format(mn['ip'], mn['port_omq'], mn['pubkey_x25519']))
msgs = ss.store_n(omq, conn, sk, b"omg123", 5)
my_ss_id = '05' + sk.verify_key.encode().hex()
ts = int((time.time() - 120) * 1000)
to_sign = "delete_all{}".format(ts).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params = {
"pubkey": my_ss_id,
"timestamp": ts,
"signature": sig
}
resp = omq.request(conn, 'storage.delete_all', [json.dumps(params).encode()])
assert resp == [b'406', b'delete_all timestamp too far from current time']
ts = int((time.time() + 120) * 1000)
to_sign = "delete_all{}".format(ts).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params["signature"] = sig
resp = omq.request(conn, 'storage.delete_all', [json.dumps(params).encode()])
assert resp == [b'406', b'delete_all timestamp too far from current time']
def test_delete(omq, random_mn, sk, exclude):
swarm = ss.get_swarm(omq, random_mn, sk, netid=2)
mns = ss.random_swarm_members(swarm, 2, exclude)
conns = [omq.connect_remote("curve://{}:{}/{}".format(mn['ip'], mn['port_omq'], mn['pubkey_x25519']))
for mn in mns]
msgs = ss.store_n(omq, conns[0], sk, b"omg123", 5, netid=2)
my_ss_id = '02' + sk.verify_key.encode().hex()
ts = int(time.time() * 1000)
actual_del_msgs = sorted(msgs[i]['hash'] for i in (1, 4))
# Deliberately mis-sort the requested hashes to verify that the return is sorted as expected
del_msgs = sorted(actual_del_msgs + ['bepQtTaYrzcuCXO3fZkmk/h3xkMQ3vCh94i5HzLmj3I'], reverse=True)
to_sign = ("delete" + "".join(del_msgs)).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params = json.dumps({
"pubkey": my_ss_id,
"messages": del_msgs,
"signature": sig
}).encode()
resp = omq.request(conns[1], 'storage.delete', [params])
assert len(resp) == 1
r = json.loads(resp[0])
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['mnodes']}
# ( PUBKEY_HEX || RMSG[0] || ... || RMSG[N] || DMSG[0] || ... || DMSG[M] )
expected_signed = "".join(
(my_ss_id, *del_msgs, *actual_del_msgs)).encode()
for k, v in r['swarm'].items():
assert v['deleted'] == actual_del_msgs
edpk = VerifyKey(k, encoder=HexEncoder)
try:
edpk.verify(expected_signed, base64.b64decode(v['signature']))
except nacl.exceptions.BadSignatureError as e:
print("Bad signature from swarm member {}".format(k))
raise e
r = json.loads(omq.request(conns[0], 'storage.retrieve',
[json.dumps({ "pubkey": my_ss_id }).encode()]
)[0])
assert len(r['messages']) == 3
def test_delete_before(omq, random_mn, sk, exclude):
swarm = ss.get_swarm(omq, random_mn, sk)
mns = ss.random_swarm_members(swarm, 2, exclude)
conns = [omq.connect_remote("curve://{}:{}/{}".format(mn['ip'], mn['port_omq'], mn['pubkey_x25519']))
for mn in mns]
msgs = ss.store_n(omq, conns[0], sk, b"omg123", 10)
# store_n submits msgs with decreasing timestamps:
assert all(msgs[i]['req']['timestamp'] > msgs[i+1]['req']['timestamp'] for i in range(len(msgs)-1))
my_ss_id = '05' + sk.verify_key.encode().hex()
# Delete the last couple messages:
ts = msgs[8]['req']['timestamp']
expected_del = sorted(msgs[i]['hash'] for i in range(8, len(msgs)))
to_sign = ("delete_before" + str(ts)).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params = json.dumps({
"pubkey": my_ss_id,
"before": ts,
"signature": sig
}).encode()
resp = omq.request(conns[1], 'storage.delete_before', [params])
assert len(resp) == 1
r = json.loads(resp[0])
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['mnodes']}
# ( PUBKEY_HEX || BEFORE || DELETEDHASH[0] || ... || DELETEDHASH[N] )
expected_signed = "".join((my_ss_id, str(ts), *expected_del)).encode()
for k, v in r['swarm'].items():
assert v['deleted'] == expected_del
edpk = VerifyKey(k, encoder=HexEncoder)
try:
edpk.verify(expected_signed, base64.b64decode(v['signature']))
except nacl.exceptions.BadSignatureError as e:
print("Bad signature from swarm member {}".format(k))
raise e
r = json.loads(omq.request(conns[0], 'storage.retrieve',
[json.dumps({ "pubkey": my_ss_id }).encode()]
)[0])
assert len(r['messages']) == 8
# Delete with no matches:
ts = msgs[7]['req']['timestamp'] - 1
to_sign = ("delete_before" + str(ts)).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params = json.dumps({
"pubkey": my_ss_id,
"before": ts,
"signature": sig
}).encode()
resp = omq.request(conns[0], 'storage.delete_before', [params])
assert len(resp) == 1
r = json.loads(resp[0])
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['mnodes']}
# ( PUBKEY_HEX || BEFORE || DELETEDHASH[0] || ... || DELETEDHASH[N] )
expected_signed = "".join((my_ss_id, str(ts))).encode()
for k, v in r['swarm'].items():
assert not v['deleted']
edpk = VerifyKey(k, encoder=HexEncoder)
try:
edpk.verify(expected_signed, base64.b64decode(v['signature']))
except nacl.exceptions.BadSignatureError as e:
print("Bad signature from swarm member {}".format(k))
raise e
r = json.loads(omq.request(conns[0], 'storage.retrieve',
[json.dumps({ "pubkey": my_ss_id }).encode()]
)[0])
assert len(r['messages']) == 8
# Delete most of the remaining:
ts = msgs[1]['req']['timestamp']
expected_del = sorted(msgs[i]['hash'] for i in range(1, 8))
to_sign = ("delete_before" + str(ts)).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params = json.dumps({
"pubkey": my_ss_id,
"before": ts,
"signature": sig
}).encode()
resp = omq.request(conns[0], 'storage.delete_before', [params])
assert len(resp) == 1
r = json.loads(resp[0])
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['mnodes']}
# ( PUBKEY_HEX || BEFORE || DELETEDHASH[0] || ... || DELETEDHASH[N] )
expected_signed = "".join((my_ss_id, str(ts), *expected_del)).encode()
for k, v in r['swarm'].items():
assert v['deleted'] == expected_del
edpk = VerifyKey(k, encoder=HexEncoder)
try:
edpk.verify(expected_signed, base64.b64decode(v['signature']))
except nacl.exceptions.BadSignatureError as e:
print("Bad signature from swarm member {}".format(k))
raise e
r = json.loads(omq.request(conns[0], 'storage.retrieve',
[json.dumps({ "pubkey": my_ss_id }).encode()]
)[0])
assert len(r['messages']) == 1
# Delete the last one
ts = msgs[0]['req']['timestamp'] + 1
expected_del = [msgs[0]['hash']]
to_sign = ("delete_before" + str(ts)).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params = json.dumps({
"pubkey": my_ss_id,
"before": ts,
"signature": sig
}).encode()
resp = omq.request(conns[1], 'storage.delete_before', [params])
assert len(resp) == 1
r = json.loads(resp[0])
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['mnodes']}
# ( PUBKEY_HEX || BEFORE || DELETEDHASH[0] || ... || DELETEDHASH[N] )
expected_signed = "".join((my_ss_id, str(ts), *expected_del)).encode()
for k, v in r['swarm'].items():
assert v['deleted'] == expected_del
edpk = VerifyKey(k, encoder=HexEncoder)
try:
edpk.verify(expected_signed, base64.b64decode(v['signature']))
except nacl.exceptions.BadSignatureError as e:
print("Bad signature from swarm member {}".format(k))
raise e
r = json.loads(omq.request(conns[1], 'storage.retrieve',
[json.dumps({ "pubkey": my_ss_id }).encode()]
)[0])
assert not r['messages']
| 36.092199 | 105 | 0.599921 |
b4a1da18a8b3134a3394469a538f20ce9c9dc610 | 341 | py | Python | Script_db/ler_porta_e_atualiza_db.py | Collab-Barco-Solar/poente-software | 8ec5a6539c3fe385ce434d3eac813c34ee6b8e76 | [
"Unlicense"
] | 1 | 2021-04-01T21:04:47.000Z | 2021-04-01T21:04:47.000Z | Script_db/ler_porta_e_atualiza_db.py | Collab-Barco-Solar/poente-software | 8ec5a6539c3fe385ce434d3eac813c34ee6b8e76 | [
"Unlicense"
] | 4 | 2020-10-20T00:07:24.000Z | 2022-03-25T19:22:01.000Z | Script_db/ler_porta_e_atualiza_db.py | Collab-Barco-Solar/poente-software | 8ec5a6539c3fe385ce434d3eac813c34ee6b8e76 | [
"Unlicense"
] | null | null | null | import serial
import sqlite3
db = sqlite3.connect('teste.db')
cursor = db.cursor()
porta = "/dev/ttyUSB0"
velocidade = 9600
conexao = serial.Serial(porta,velocidade)
while(1):
leitura = conexao.read()
cursor.execute("""
INSERT INTO dados (num)
VALUES(?)
""",(leitura,))
db.commit()
print("dado inserido") | 14.208333 | 41 | 0.639296 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.