text string | size int64 | token_count int64 |
|---|---|---|
from setuptools import setup
from sys import path
path.insert(0, '.')
NAME = "zcli"
if __name__ == "__main__":
setup(
name = NAME,
version = "0.1.0",
author = "Tony Rogers",
author_email = "tony.rogers@rackspace.com",
url = "https://github.com/teriyakichild/python-zcli",
license = 'internal use',
packages = [NAME],
package_dir = {NAME: NAME},
description = "Zabbix CLI.",
install_requires = ['requests',
'argparse',
'pyzabbix',
'ConfigParser'],
entry_points={
'console_scripts': [ 'zcli = zcli:cli' ],
}
)
| 714 | 217 |
# Copyright (c) 2012-2021 Arm Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2013 Amin Farmahini-Farahani
# Copyright (c) 2015 University of Kaiserslautern
# Copyright (c) 2015 The University of Bologna
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Interfaces for LPDDR5 memory devices
These memory "interfaces" contain the timing,energy,etc parameters for each
memory type and are usually based on datasheets for the memory devices.
You can use these interfaces in the MemCtrl object as the `dram` timing
interface.
"""
from m5.objects import DRAMInterface
class HBM_1000_4H_1x128(DRAMInterface):
"""
A single HBM x128 interface (one command and address bus), with
default timings based on data publically released
("HBM: Memory Solution for High Performance Processors", MemCon, 2014),
IDD measurement values, and by extrapolating data from other classes.
Architecture values based on published HBM spec
A 4H stack is defined, 2Gb per die for a total of 1GiB of memory.
**IMPORTANT**
HBM gen1 supports up to 8 128-bit physical channels
Configuration defines a single channel, with the capacity
set to (full_ stack_capacity / 8) based on 2Gb dies
To use all 8 channels, set 'channels' parameter to 8 in
system configuration
"""
# 128-bit interface legacy mode
device_bus_width = 128
# HBM supports BL4 and BL2 (legacy mode only)
burst_length = 4
# size of channel in bytes, 4H stack of 2Gb dies is 1GiB per stack;
# with 8 channels, 128MiB per channel
device_size = "128MiB"
device_rowbuffer_size = "2KiB"
# 1x128 configuration
devices_per_rank = 1
# HBM does not have a CS pin; set rank to 1
ranks_per_channel = 1
# HBM has 8 or 16 banks depending on capacity
# 2Gb dies have 8 banks
banks_per_rank = 8
# depending on frequency, bank groups may be required
# will always have 4 bank groups when enabled
# current specifications do not define the minimum frequency for
# bank group architecture
# setting bank_groups_per_rank to 0 to disable until range is defined
bank_groups_per_rank = 0
# 500 MHz for 1Gbps DDR data rate
tCK = "2ns"
# use values from IDD measurement in JEDEC spec
# use tRP value for tRCD and tCL similar to other classes
tRP = "15ns"
tRCD = "15ns"
tCL = "15ns"
tRAS = "33ns"
# BL2 and BL4 supported, default to BL4
# DDR @ 500 MHz means 4 * 2ns / 2 = 4ns
tBURST = "4ns"
# value for 2Gb device from JEDEC spec
tRFC = "160ns"
# value for 2Gb device from JEDEC spec
tREFI = "3.9us"
# extrapolate the following from LPDDR configs, using ns values
# to minimize burst length, prefetch differences
tWR = "18ns"
tRTP = "7.5ns"
tWTR = "10ns"
# start with 2 cycles turnaround, similar to other memory classes
# could be more with variations across the stack
tRTW = "4ns"
# single rank device, set to 0
tCS = "0ns"
# from MemCon example, tRRD is 4ns with 2ns tCK
tRRD = "4ns"
# from MemCon example, tFAW is 30ns with 2ns tCK
tXAW = "30ns"
activation_limit = 4
# 4tCK
tXP = "8ns"
# start with tRFC + tXP -> 160ns + 8ns = 168ns
tXS = "168ns"
class HBM_1000_4H_1x64(HBM_1000_4H_1x128):
"""
A single HBM x64 interface (one command and address bus), with
default timings based on HBM gen1 and data publically released
A 4H stack is defined, 8Gb per die for a total of 4GiB of memory.
Note: This defines a pseudo-channel with a unique controller
instantiated per pseudo-channel
Stay at same IO rate (1Gbps) to maintain timing relationship with
HBM gen1 class (HBM_1000_4H_x128) where possible
**IMPORTANT**
For HBM gen2 with pseudo-channel mode, configure 2X channels.
Configuration defines a single pseudo channel, with the capacity
set to (full_ stack_capacity / 16) based on 8Gb dies
To use all 16 pseudo channels, set 'channels' parameter to 16 in
system configuration
"""
# 64-bit pseudo-channel interface
device_bus_width = 64
# HBM pseudo-channel only supports BL4
burst_length = 4
# size of channel in bytes, 4H stack of 8Gb dies is 4GiB per stack;
# with 16 channels, 256MiB per channel
device_size = "256MiB"
# page size is halved with pseudo-channel; maintaining the same same number
# of rows per pseudo-channel with 2X banks across 2 channels
device_rowbuffer_size = "1KiB"
# HBM has 8 or 16 banks depending on capacity
# Starting with 4Gb dies, 16 banks are defined
banks_per_rank = 16
# reset tRFC for larger, 8Gb device
# use HBM1 4Gb value as a starting point
tRFC = "260ns"
# start with tRFC + tXP -> 160ns + 8ns = 168ns
tXS = "268ns"
# Default different rank bus delay to 2 CK, @1000 MHz = 2 ns
tCS = "2ns"
tREFI = "3.9us"
# active powerdown and precharge powerdown exit time
tXP = "10ns"
# self refresh exit time
tXS = "65ns"
| 7,022 | 2,439 |
__id__ = "$Id: Geometry.py 51 2007-04-25 20:43:07Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 51 $"
__date__ = "$Date: 2007-04-25 14:43:07 -0600 (Wed, 25 Apr 2007) $"
import scipy
import Errors
class Geometry(object):
"""
Geometry is a class to hold information about the geometry of the problem.
"""
def __init__(self, bins, range):
"""
bins: A tuple each number is how many spatial bins in each dimension (up
to 3)
range: A list of [min, max] pairs; the limits of the spatial geometry in
each dimension.
"""
try:
self.dimension = len(bins)
except TypeError:
self.dimension = 1
if self.dimension != 1:
raise Errors.GeometryError(
"Geometry currently only suppors 1-D geometry")
elif self.dimension != len(range):
raise Errors.GeometryError(
"Bins and Range must have same degree")
else:
self.bins = bins
self.range = range
self.edges = scipy.zeros(self.bins+1)
self.centers = scipy.zeros(self.bins) # Bin centers
width = self.max - self.min
for i in xrange(self.bins+1):
edge = self.min + i*(width/float(self.bins))
self.edges[i] = edge
for i in xrange(len(self.centers)):
self.centers[i] = self.edges[i] + (self.edges[i+1] - self.edges[i])/2.0
def __repr__(self):
"""
"""
return "bins: %s, range: %s" %(self.bins, self.range)
def _getMinX(self):
return min(self.range[0])
def _getMaxX(self):
return max(self.range[0])
min = property(fget=_getMinX)
max = property(fget=_getMaxX)
| 1,826 | 633 |
from datetime import datetime
from hashlib import sha3_224 as hash
from logging import getLogger
from typing import IO, Any, Callable, Dict, List, Optional, Tuple, cast
from b2sdk.account_info import InMemoryAccountInfo
from b2sdk.account_info.abstract import AbstractAccountInfo
from b2sdk.account_info.sqlite_account_info import SqliteAccountInfo
from b2sdk.api import B2Api, Bucket
from b2sdk.cache import AuthInfoCache
from b2sdk.exception import FileOrBucketNotFound, NonExistentBucket
from django.core.cache.backends.base import BaseCache
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import File
from django.core.files.storage import Storage
from django.utils.deconstruct import deconstructible
from typing_extensions import TypedDict
from django_backblaze_b2.b2_file import B2File
from django_backblaze_b2.cache_account_info import DjangoCacheAccountInfo
from django_backblaze_b2.options import (
BackblazeB2StorageOptions,
DjangoCacheAccountInfoConfig,
SqliteAccountInfoConfig,
getDefaultB2StorageOptions,
)
logger = getLogger("django-backblaze-b2")
class _BaseFileInfoDict(TypedDict):
fileId: str
fileName: str
fileInfo: dict
class _FileInfoDict(_BaseFileInfoDict, total=False):
size: int
uploadTimestamp: int
contentType: str
class B2FileInformationNotAvailableException(Exception):
...
@deconstructible
class BackblazeB2Storage(Storage):
"""Storage class which fulfills the Django Storage contract through b2 apis"""
def __init__(self, **kwargs):
opts = self._getDjangoSettingsOptions(kwargs.get("opts", {}))
if "opts" in kwargs:
self._validateOptions(kwargs.get("opts"))
_merge(opts, kwargs.get("opts", {}))
logOpts = opts.copy()
logOpts.update({"application_key_id": "<redacted>", "application_key": "<redacted>"})
logger.debug(f"Initializing {self.__class__.__name__} with options {logOpts}")
self._bucketName = opts["bucket"]
self._defaultFileMetadata = opts["defaultFileInfo"]
self._forbidFilePropertyCaching = opts["forbidFilePropertyCaching"]
self._authInfo = dict(
[(k, v) for k, v in opts.items() if k in ["realm", "application_key_id", "application_key"]]
)
self._allowFileOverwrites = opts["allowFileOverwrites"]
self._getAccountInfo = self._createAccountInfoCallable(opts)
logger.info(f"{self.__class__.__name__} instantiated to use bucket {self._bucketName}")
if opts["authorizeOnInit"]:
logger.debug(f"{self.__class__.__name__} authorizing")
self.b2Api
if opts["validateOnInit"]:
self._getOrCreateBucket(opts["nonExistentBucketDetails"])
def _getDjangoSettingsOptions(self, kwargOpts: Dict) -> BackblazeB2StorageOptions:
"""Setting terminology taken from:
https://b2-sdk-python.readthedocs.io/en/master/glossary.html#term-application-key-ID
kwargOpts available for subclasses
"""
from django.conf import settings
if not hasattr(settings, "BACKBLAZE_CONFIG"):
raise ImproperlyConfigured("add BACKBLAZE_CONFIG dict to django settings")
if "application_key_id" not in settings.BACKBLAZE_CONFIG or "application_key" not in settings.BACKBLAZE_CONFIG:
raise ImproperlyConfigured(
"At minimum BACKBLAZE_CONFIG must contain auth 'application_key' and 'application_key_id'"
f"\nfound: {settings.BACKBLAZE_CONFIG}"
)
self._validateOptions(settings.BACKBLAZE_CONFIG)
opts = getDefaultB2StorageOptions()
opts.update(settings.BACKBLAZE_CONFIG) # type: ignore
return opts
def _validateOptions(self, options: Dict) -> None:
unrecognizedOptions = [k for k in options.keys() if k not in getDefaultB2StorageOptions().keys()]
if unrecognizedOptions:
raise ImproperlyConfigured(f"Unrecognized options: {unrecognizedOptions}")
def _createAccountInfoCallable(self, opts: BackblazeB2StorageOptions) -> Callable[[], AbstractAccountInfo]:
if (
not isinstance(opts["accountInfo"], dict)
or "type" not in opts["accountInfo"]
or opts["accountInfo"]["type"] not in ["memory", "sqlite", "django-cache"]
):
raise ImproperlyConfigured(
(f"accountInfo property must be a dict with type found in options.py, was {opts['accountInfo']}")
)
if opts["accountInfo"]["type"] == "django-cache":
logger.debug(f"{self.__class__.__name__} will use {DjangoCacheAccountInfo.__name__}")
return lambda: DjangoCacheAccountInfo(
cacheName=cast(DjangoCacheAccountInfoConfig, opts["accountInfo"]).get("cache", "django-backblaze-b2")
)
elif opts["accountInfo"]["type"] == "memory":
logger.debug(f"{self.__class__.__name__} will use {InMemoryAccountInfo.__name__}")
return lambda: InMemoryAccountInfo()
elif opts["accountInfo"]["type"] == "sqlite":
logger.debug(f"{self.__class__.__name__} will use {SqliteAccountInfo.__name__}")
return lambda: SqliteAccountInfo(
file_name=cast(SqliteAccountInfoConfig, opts["accountInfo"])["databasePath"]
)
raise ImproperlyConfigured()
@property
def b2Api(self) -> B2Api:
if not hasattr(self, "_b2Api"):
self._accountInfo = self._getAccountInfo()
self._b2Api = B2Api(account_info=self._accountInfo, cache=AuthInfoCache(self._accountInfo))
self._b2Api.authorize_account(**self._authInfo)
return self._b2Api
@property
def bucket(self) -> Bucket:
if not hasattr(self, "_bucket"):
self._getOrCreateBucket()
return self._bucket
def _getOrCreateBucket(self, newBucketDetails=None) -> None:
try:
self._bucket = self.b2Api.get_bucket_by_name(self._bucketName)
except NonExistentBucket as e:
if newBucketDetails is not None:
logger.debug(f"Bucket {self._bucketName} not found. Creating with details: {newBucketDetails}")
if "bucket_type" not in newBucketDetails:
newBucketDetails["bucket_type"] = "allPrivate"
self._bucket = self.b2Api.create_bucket(name=self._bucketName, **newBucketDetails)
else:
raise e
logger.debug(f"Connected to bucket {self._bucket.as_dict()}")
def _refreshBucket(self) -> None:
self.b2Api.session.cache.clear()
self._getOrCreateBucket()
def _open(self, name: str, mode: str) -> File:
return B2File(
name=name, bucket=self.bucket, fileMetadata=self._defaultFileMetadata, mode=mode, sizeProvider=self.size,
)
def _save(self, name: str, content: IO[Any]) -> str:
"""
Save and retrieve the filename.
If the file exists it will make another version of that file.
"""
return B2File(
name=name, bucket=self.bucket, fileMetadata=self._defaultFileMetadata, mode="w", sizeProvider=self.size,
).saveAndRetrieveFile(content)
def path(self, name: str) -> str:
return name
def delete(self, name: str) -> None:
fileInfo = self._fileInfo(name)
if fileInfo:
logger.debug(f"Deleting file {name} id=({fileInfo['fileId']})")
self.b2Api.delete_file_version(file_id=fileInfo["fileId"], file_name=name)
if self._cache:
self._cache.delete(self._fileCacheKey(name))
else:
logger.debug("Not found")
def _fileInfo(self, name: str) -> Optional[_FileInfoDict]:
try:
if self._cache:
cacheKey = self._fileCacheKey(name)
timeoutInSeconds = 60
def loadInfo():
logger.debug(f"file info cache miss for {name}")
return self.bucket.get_file_info_by_name(name).as_dict()
return self._cache.get_or_set(key=cacheKey, default=loadInfo, timeout=timeoutInSeconds)
return self.bucket.get_file_info_by_name(name).as_dict()
except FileOrBucketNotFound:
return None
def _fileCacheKey(self, name: str) -> str:
return hash(f"{self.bucket.name}__{name}".encode()).hexdigest()
@property
def _cache(self) -> Optional[BaseCache]:
if (
not self._forbidFilePropertyCaching
and self.b2Api # force init
and self._accountInfo
and isinstance(self._accountInfo, DjangoCacheAccountInfo)
):
return self._accountInfo.cache
return None
def exists(self, name: str) -> bool:
return bool(self._fileInfo(name))
def size(self, name: str) -> int:
fileInfo = self._fileInfo(name)
return fileInfo.get("size", 0) if fileInfo else 0
def url(self, name: Optional[str]) -> str:
if not name:
raise Exception("Name must be defined")
return self._getFileUrl(name)
def _getFileUrl(self, name: str) -> str:
return self.getBackblazeUrl(name)
def getBackblazeUrl(self, filename: str) -> str:
return self.b2Api.get_download_url_for_file_name(bucket_name=self._bucketName, file_name=filename)
def get_available_name(self, name: str, max_length: Optional[int] = None) -> str:
if self._allowFileOverwrites:
return name
return super().get_available_name(name, max_length)
def listdir(self, path: str) -> Tuple[List[str], List[str]]:
"""
List the contents of the specified path. Return a 2-tuple of lists:
the first item being directories, the second item being files.
"""
raise NotImplementedError("subclasses of Storage must provide a listdir() method")
def get_accessed_time(self, name: str) -> datetime:
"""
Return the last accessed time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
raise NotImplementedError("subclasses of Storage must provide a get_accessed_time() method")
def get_created_time(self, name: str) -> datetime:
"""
Return the creation time (as a datetime) of the file specified by name.
The datetime will be timezone-aware if USE_TZ=True.
"""
from datetime import timezone
from django.conf import settings
fileInfo = self._fileInfo(name)
try:
if fileInfo and float(fileInfo.get("uploadTimestamp", 0)) > 0:
timestamp = float(fileInfo["uploadTimestamp"]) / 1000.0
if settings.USE_TZ:
# Safe to use .replace() because UTC doesn't have DST
return datetime.utcfromtimestamp(timestamp).replace(tzinfo=timezone.utc)
return datetime.fromtimestamp(timestamp)
except ValueError as e:
raise B2FileInformationNotAvailableException(f"'uploadTimestamp' from API not valid for {name}: {e}")
raise B2FileInformationNotAvailableException(f"'uploadTimestamp' not available for {name}")
def get_modified_time(self, name: str) -> datetime:
"""
Return the last modified time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
return self.get_created_time(name)
def _merge(target: Dict, source: Dict, path=None) -> Dict:
"""merges b into a
https://stackoverflow.com/a/7205107/11076240
"""
if path is None:
path = []
for key in source:
if key in target:
printablePath = ".".join(path + [str(key)])
if isinstance(target[key], dict) and isinstance(source[key], dict):
_merge(target[key], source[key], path + [str(key)])
elif target[key] != source[key]:
logger.debug(f"Overriding setting {printablePath} with value {source[key]}")
target[key] = source[key]
else:
target[key] = source[key]
return target
| 12,244 | 3,535 |
# # NNabla Python API Demonstration Tutorial
# # (https://nnabla.readthedocs.io/en/latest/python/tutorial/python_api.html)
import matplotlib.pyplot as plt
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import numpy as np
from ivory.utils.path import cache_file
# ## NdArray
a = nn.NdArray((2, 3, 4))
print(a.data)
# -
print("[Substituting random values]")
a.data = np.random.randn(*a.shape)
print(a.data)
print("[Slicing]")
a.data[0, :, ::2] = 0
print(a.data)
# -
a.fill(1) # Filling all values with one.
print(a.data)
# -
b = nn.NdArray.from_numpy_array(np.ones(a.shape))
print(b.data)
# ## Variable
x = nn.Variable([2, 3, 4], need_grad=True)
print("x.data:", x.data)
print("x.grad:", x.grad)
# -
x.shape
# -
print("x.data")
print(x.d)
x.d = 1.2345 # To avoid NaN
assert np.all(x.d == x.data.data), "d: {} != {}".format(x.d, x.data.data)
print("x.grad")
print(x.g)
x.g = 1.2345 # To avoid NaN
assert np.all(x.g == x.grad.data), "g: {} != {}".format(x.g, x.grad.data)
# !Zeroing grad values
x.grad.zero()
print("x.grad (after `.zero()`)")
print(x.g)
# -
x2 = nn.Variable.from_numpy_array(np.ones((3,)), need_grad=True)
print(x2)
print(x2.d)
x3 = nn.Variable.from_numpy_array(np.ones((3,)), np.zeros((3,)), need_grad=True)
print(x3)
print(x3.d)
print(x3.g)
# -
print(x.parent)
# ## Function
sigmoid_output = F.sigmoid(x)
sum_output = F.reduce_sum(sigmoid_output)
print(sigmoid_output)
print(sum_output)
# -
print("sigmoid_output.parent.name:", sigmoid_output.parent.name)
print("x:", x)
print("sigmoid_output.parent.inputs refers to x:", sigmoid_output.parent.inputs)
# -
print("sum_output.parent.name:", sum_output.parent.name)
print("sigmoid_output:", sigmoid_output)
print("sum_output.parent.inputs refers to sigmoid_output:", sum_output.parent.inputs)
# -
sum_output.forward()
print("CG output:", sum_output.d)
print("Reference:", np.sum(1.0 / (1.0 + np.exp(-x.d))))
# -
x.grad.zero()
sum_output.backward()
print("d sum_o / d sigmoid_o:")
print(sigmoid_output.g)
print("d sum_o / d x:")
print(x.g)
x.d
# -
x = nn.Variable([5, 2]) # Input
w = nn.Variable([2, 3], need_grad=True) # Weights
b = nn.Variable([3], need_grad=True) # Biases
affine_out = F.affine(x, w, b) # Create a graph including only affine
affine_out
# -
# !Set random input and parameters
x.d = np.random.randn(*x.shape)
w.d = np.random.randn(*w.shape)
b.d = np.random.randn(*b.shape)
# !Initialize grad
x.grad.zero() # Just for showing gradients are not computed when need_grad=False.
w.grad.zero()
b.grad.zero()
# !Forward and backward
affine_out.forward()
affine_out.backward()
# -
print("F.affine")
print(affine_out.d)
print("Reference")
print(np.dot(x.d, w.d) + b.d)
print("dw")
print(w.g)
print("db")
print(b.g)
# -
print(x.g)
# ## Parametric Function
with nn.parameter_scope("affine1"):
c1 = PF.affine(x, 3)
# -
nn.get_parameters()
# -
c1 = PF.affine(x, 3, name="affine1")
nn.get_parameters()
# -
c1.shape
# -
with nn.parameter_scope("foo"):
h = PF.affine(x, 3)
with nn.parameter_scope("bar"):
h = PF.affine(h, 4)
with nn.parameter_scope("foo"):
params = nn.get_parameters()
params
# -
with nn.parameter_scope("foo"):
nn.clear_parameters()
nn.get_parameters()
# ## MLP Example For Explanation
nn.clear_parameters()
batchsize = 16
x = nn.Variable([batchsize, 2])
with nn.parameter_scope("fc1"):
h = F.tanh(PF.affine(x, 512))
with nn.parameter_scope("fc2"):
y = PF.affine(h, 1)
print("Shapes:", h.shape, y.shape)
# -
nn.get_parameters()
# -
x.d = np.random.randn(*x.shape) # Set random input
y.forward()
y.d
# -
# !Variable for label
label = nn.Variable([batchsize, 1])
# !Set loss
loss = F.reduce_mean(F.squared_error(y, label))
# !Execute forward pass.
label.d = np.random.randn(*label.shape) # Randomly generate labels
loss.forward()
print(loss.d)
# -
# !Collect all parameter variables and init grad.
for name, param in nn.get_parameters().items():
param.grad.zero()
# Gradients are accumulated to grad of params.
loss.backward()
# ## Imperative Mode
for name, param in nn.get_parameters().items():
param.data -= param.grad * 0.001 # 0.001 as learning rate
# -
# !A simple example of imperative mode.
xi = nn.NdArray.from_numpy_array(np.arange(4).reshape(2, 2))
yi = F.relu(xi - 1)
xi.data
# -
yi.data
# -
id(xi)
# -
xi = xi + 1
id(xi)
# -
xi -= 1
id(xi)
# -
# !The following doesn't perform substitution but assigns a new NdArray object to `xi`.
# !xi = xi + 1
# !The following copies the result of `xi + 1` to `xi`.
xi.copy_from(xi + 1)
assert np.all(xi.data == (np.arange(4).reshape(2, 2) + 1))
# Inplace operations like `+=`, `*=` can also be used (more efficient).
xi += 1
assert np.all(xi.data == (np.arange(4).reshape(2, 2) + 2))
# ## Solver
solver = S.Sgd(lr=0.00001)
solver.set_parameters(nn.get_parameters())
# -
# !Set random data
x.d = np.random.randn(*x.shape)
label.d = np.random.randn(*label.shape)
# !Forward
loss.forward()
# -
solver.zero_grad()
loss.backward()
solver.update()
# ## Toy Problem To Demonstrate Training
def vector2length(x):
# x : [B, 2] where B is number of samples.
return np.sqrt(np.sum(x ** 2, axis=1, keepdims=True))
# Example
vector2length(np.array([[3, 4], [5, 12]]))
# -
# !Data for plotting contour on a grid data.
xs = np.linspace(-1, 1, 100)
ys = np.linspace(-1, 1, 100)
grid = np.meshgrid(xs, ys)
X = grid[0].flatten()
Y = grid[1].flatten()
def plot_true():
"""Plotting contour of true mapping from a grid data created above."""
plt.contourf(
xs, ys, vector2length(np.hstack([X[:, None], Y[:, None]])).reshape(100, 100)
)
plt.axis("equal")
plt.colorbar()
plot_true()
# -
def length_mlp(x):
h = x
for i, hnum in enumerate([4, 8, 4, 2]):
h = F.tanh(PF.affine(h, hnum, name="fc{}".format(i)))
y = PF.affine(h, 1, name="fc")
return y
# -
nn.clear_parameters()
batchsize = 100
x = nn.Variable([batchsize, 2])
y = length_mlp(x)
label = nn.Variable([batchsize, 1])
loss = F.reduce_mean(F.squared_error(y, label))
# -
def predict(inp):
ret = []
for i in range(0, inp.shape[0], x.shape[0]):
xx = inp[i : i + x.shape[0]]
# Imperative execution
xi = nn.NdArray.from_numpy_array(xx)
yi = length_mlp(xi)
ret.append(yi.data.copy())
return np.vstack(ret)
def plot_prediction():
plt.contourf(xs, ys, predict(np.hstack([X[:, None], Y[:, None]])).reshape(100, 100))
plt.colorbar()
plt.axis("equal")
# -
solver = S.Adam(alpha=0.01)
solver.set_parameters(nn.get_parameters())
# -
def random_data_provider(n):
x = np.random.uniform(-1, 1, size=(n, 2))
y = vector2length(x)
return x, y
# -
num_iter = 2000
for i in range(num_iter):
# Sample data and set them to input variables of training.
xx, ll = random_data_provider(batchsize)
x.d = xx
label.d = ll
# Forward propagation given inputs.
loss.forward(clear_no_need_grad=True)
# Parameter gradients initialization and gradients computation by backprop.
solver.zero_grad()
loss.backward(clear_buffer=True)
# Apply weight decay and update by Adam rule.
solver.weight_decay(1e-6)
solver.update()
# Just print progress.
if i % 100 == 0 or i == num_iter - 1:
print("Loss@{:4d}: {}".format(i, loss.d))
# -
loss.forward(clear_buffer=True)
print("The prediction `y` is cleared because it's an intermediate variable.")
print(y.d.flatten()[:4]) # to save space show only 4 values
y.persistent = True
loss.forward(clear_buffer=True)
print("The prediction `y` is kept by the persistent flag.")
print(y.d.flatten()[:4]) # to save space show only 4 value
# -
plt.subplot(121)
plt.title("Ground truth")
plot_true()
plt.subplot(122)
plt.title("Prediction")
plot_prediction()
# -
path_param = cache_file('nnabla/tutorial/python_api/param-vector2length.h5')
nn.save_parameters(path_param)
# !Remove all once
nn.clear_parameters()
nn.get_parameters()
# -
# !Load again
nn.load_parameters(path_param)
print('\n'.join(map(str, nn.get_parameters().items())))
# -
with nn.parameter_scope('foo'):
nn.load_parameters(path_param)
print('\n'.join(map(str, nn.get_parameters().items())))
| 8,180 | 3,397 |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
# the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
from queue import Queue
from unittest import TestCase
from unittest.mock import patch
from unittest.mock import MagicMock
from amazon.ion.simpleion import dumps
from botocore.exceptions import ClientError
from pyqldb.cursor.read_ahead_cursor import ReadAheadCursor
from pyqldb.errors import ResultClosedError
from .helper_functions import assert_query_stats, generate_statement_result
MOCK_ERROR_CODE = '500'
MOCK_MESSAGE = 'foo'
MOCK_CLIENT_ERROR_MESSAGE = {'Error': {'Code': MOCK_ERROR_CODE, 'Message': MOCK_MESSAGE}}
MOCK_READ_AHEAD = 0
MOCK_TRANSACTION_ID = 'ID'
MOCK_VALUES = [1, 2]
MOCK_ION_BINARY_VALUES = [{'IonBinary': MOCK_VALUES[0]}, {'IonBinary': MOCK_VALUES[1]}]
MOCK_TOKEN = 'mock_token'
MOCK_PAGE_WITH_TOKEN = {'Values': MOCK_ION_BINARY_VALUES, 'NextPageToken': MOCK_TOKEN}
MOCK_READ_IOS = 3
MOCK_WRITE_IOS = 2
MOCK_PROCESSING_TIME = 1
MOCK_STATEMENT_RESULT = generate_statement_result(MOCK_READ_IOS, MOCK_WRITE_IOS, MOCK_PROCESSING_TIME,
MOCK_TOKEN, True, MOCK_ION_BINARY_VALUES)
MOCK_STATEMENT_RESULT_WITHOUT_TOKEN = generate_statement_result(MOCK_READ_IOS, MOCK_WRITE_IOS, MOCK_PROCESSING_TIME,
None, True, MOCK_ION_BINARY_VALUES)
MOCK_FETCH_PAGE_RESULT_WITHOUT_TOKEN = generate_statement_result(MOCK_READ_IOS, MOCK_WRITE_IOS, MOCK_PROCESSING_TIME,
None, False, MOCK_ION_BINARY_VALUES)
class TestReadAheadCursor(TestCase):
@patch('pyqldb.communication.session_client.SessionClient')
@patch('pyqldb.cursor.read_ahead_cursor.Queue')
@patch('pyqldb.cursor.read_ahead_cursor.Thread')
def test_ReadAheadCursor_without_executor(self, mock_thread, mock_queue, mock_session):
mock_session.return_value = None
mock_thread.return_value = mock_thread
mock_queue.return_value = mock_queue
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, None)
self.assertEqual(read_ahead_cursor._page, MOCK_PAGE_WITH_TOKEN)
self.assertEqual(read_ahead_cursor._session, mock_session)
self.assertEqual(read_ahead_cursor._read_ios, MOCK_READ_IOS)
self.assertEqual(read_ahead_cursor._write_ios, MOCK_WRITE_IOS)
self.assertEqual(read_ahead_cursor._processing_time_milliseconds, MOCK_PROCESSING_TIME)
self.assertEqual(read_ahead_cursor._transaction_id, MOCK_TRANSACTION_ID)
self.assertEqual(read_ahead_cursor._index, 0)
self.assertEqual(read_ahead_cursor._queue, mock_queue)
self.assertEqual(read_ahead_cursor._is_open, True)
mock_queue.assert_called_once_with(MOCK_READ_AHEAD - 1)
mock_thread.assert_called_once_with(target=read_ahead_cursor._populate_queue)
mock_thread().setDaemon.assert_called_once_with(True)
mock_thread().start.assert_called_once_with()
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.communication.session_client.SessionClient')
@patch('pyqldb.cursor.read_ahead_cursor.Queue')
def test_ReadAheadCursor_with_executor(self, mock_queue, mock_session, mock_executor):
mock_session.return_value = None
mock_queue.return_value = mock_queue
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, mock_executor)
self.assertEqual(read_ahead_cursor._page, MOCK_PAGE_WITH_TOKEN)
self.assertEqual(read_ahead_cursor._session, mock_session)
self.assertEqual(read_ahead_cursor._read_ios, MOCK_READ_IOS)
self.assertEqual(read_ahead_cursor._write_ios, MOCK_WRITE_IOS)
self.assertEqual(read_ahead_cursor._processing_time_milliseconds, MOCK_PROCESSING_TIME)
self.assertEqual(read_ahead_cursor._transaction_id, MOCK_TRANSACTION_ID)
self.assertEqual(read_ahead_cursor._index, 0)
self.assertEqual(read_ahead_cursor._queue, mock_queue)
self.assertEqual(read_ahead_cursor._is_open, True)
mock_queue.assert_called_once_with(MOCK_READ_AHEAD - 1)
mock_executor.submit.assert_called_once_with(read_ahead_cursor._populate_queue)
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.communication.session_client.SessionClient')
def test_iter(self, mock_session, mock_executor):
mock_session.return_value = None
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, mock_executor)
self.assertEqual(iter(read_ahead_cursor), read_ahead_cursor)
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.cursor.stream_cursor.StreamCursor._value_holder_to_ion_value')
@patch('pyqldb.communication.session_client.SessionClient')
def test_next(self, mock_session, mock_value_holder_to_ion_value, mock_executor):
mock_session.return_value = None
mock_value_holder_to_ion_value.side_effect = lambda val: val
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT_WITHOUT_TOKEN, mock_session,
MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor)
count = 0
for value in MOCK_ION_BINARY_VALUES:
self.assertEqual(read_ahead_cursor._index, count)
self.assertEqual(next(read_ahead_cursor), value)
mock_value_holder_to_ion_value.assert_called_with(value)
count += 1
self.assertRaises(StopIteration, next, read_ahead_cursor)
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.communication.session_client.SessionClient')
def test_next_when_closed(self, mock_session, mock_executor):
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, mock_executor)
read_ahead_cursor.close()
self.assertRaises(ResultClosedError, next, read_ahead_cursor)
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.cursor.stream_cursor.StreamCursor._value_holder_to_ion_value')
@patch('pyqldb.communication.session_client.SessionClient')
@patch('pyqldb.cursor.read_ahead_cursor.ReadAheadCursor._next_page')
@patch('pyqldb.cursor.read_ahead_cursor.ReadAheadCursor._are_there_more_results')
def test_next_verify_are_there_more_results_and_next_page_called(self, mock_are_there_more_results,
mock_next_page, mock_session,
mock_value_holder_to_ion_value,
mock_executor):
updated_result = '1'
def next_page():
read_ahead_cursor._page = {'NextPageToken': None, 'Values': [updated_result]}
read_ahead_cursor._index = 0
mock_are_there_more_results.return_value = True
mock_value_holder_to_ion_value.side_effect = lambda val: val
mock_session.return_value = None
mock_next_page.return_value = None
mock_next_page.side_effect = next_page
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, mock_executor)
read_ahead_cursor._index = len(MOCK_ION_BINARY_VALUES)
self.assertEqual(next(read_ahead_cursor), updated_result)
mock_are_there_more_results.assert_called_once_with()
mock_next_page.assert_called_once_with()
mock_value_holder_to_ion_value.assert_called_once_with(updated_result)
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.cursor.read_ahead_cursor.ReadAheadCursor._next_page')
@patch('pyqldb.communication.session_client.SessionClient')
def test_next_when_next_page_returns_empty_values_and_none_token(self, mock_session, mock_next_page, mock_executor):
mock_session.return_value = None
def next_page():
read_ahead_cursor._page = {'NextPageToken': None, 'Values': []}
read_ahead_cursor._index = 0
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, mock_executor)
read_ahead_cursor._index = len(MOCK_ION_BINARY_VALUES)
mock_next_page.side_effect = next_page
self.assertRaises(StopIteration, next, read_ahead_cursor)
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.communication.session_client.SessionClient')
def test_next_with_next_page_returns_empty_values_and_not_none_token(self, mock_session, mock_executor):
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, mock_executor)
read_ahead_cursor._queue = Queue()
read_ahead_cursor._queue.put({'Page': {'NextPageToken': 'token', 'Values': []}})
read_ahead_cursor._queue.put({'Page': {'NextPageToken': None, 'Values': []}})
read_ahead_cursor._index = len(MOCK_ION_BINARY_VALUES)
self.assertRaises(StopIteration, next, read_ahead_cursor)
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.communication.session_client.SessionClient')
def test_close(self, mock_session, mock_executor):
mock_session.return_value = None
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, mock_executor)
read_ahead_cursor.close()
self.assertFalse(read_ahead_cursor._is_open)
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.communication.session_client.SessionClient')
def test_are_there_more_results(self, mock_session, mock_executor):
mock_session.return_value = None
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, mock_executor)
read_ahead_cursor._page = {'NextPageToken': 'token', 'Values': []}
self.assertTrue(read_ahead_cursor._are_there_more_results())
read_ahead_cursor._page = {'NextPageToken': None, 'Values': []}
read_ahead_cursor._queue = Queue()
self.assertFalse(read_ahead_cursor._are_there_more_results())
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.communication.session_client.SessionClient')
@patch('pyqldb.cursor.read_ahead_cursor.Queue')
def test_populate_queue(self, mock_queue, mock_session, mock_executor):
mock_session.return_value = None
mock_queue.return_value = mock_queue
mock_session._fetch_page.return_value = MOCK_FETCH_PAGE_RESULT_WITHOUT_TOKEN
mock_queue.full.return_value = False
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, mock_executor)
read_ahead_cursor._queue = mock_queue
read_ahead_cursor._populate_queue()
next_page_token = MOCK_STATEMENT_RESULT.get('FirstPage').get('NextPageToken')
mock_session._fetch_page.assert_called_once_with(MOCK_TRANSACTION_ID, next_page_token)
mock_queue.put.assert_called_once_with(MOCK_FETCH_PAGE_RESULT_WITHOUT_TOKEN, timeout=0.05)
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.cursor.read_ahead_cursor.logger.debug')
@patch('pyqldb.communication.session_client.SessionClient')
def test_populate_queue_client_error(self, mock_session, mock_logger_debug, mock_executor):
mock_logger_debug.return_value = None
mock_session.return_value = None
mock_session._fetch_page.side_effect = ClientError(MOCK_CLIENT_ERROR_MESSAGE, MOCK_MESSAGE)
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, mock_executor)
read_ahead_cursor._queue = Queue(1)
read_ahead_cursor._queue.put('value to be removed')
read_ahead_cursor._populate_queue()
mock_logger_debug.assert_called_once()
self.assertIsInstance(read_ahead_cursor._queue.get(), ClientError)
self.assertEqual(read_ahead_cursor._queue.qsize(), 0)
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.cursor.read_ahead_cursor.logger.debug')
@patch('pyqldb.communication.session_client.SessionClient')
def test_populate_queue_result_closed_error(self, mock_session, mock_logger_debug, mock_executor):
def close_parent_txn(txn_id, token):
read_ahead_cursor._is_open = False
return MOCK_STATEMENT_RESULT
mock_logger_debug.return_value = None
mock_session.return_value = None
mock_session._fetch_page.side_effect = close_parent_txn
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, mock_executor)
read_ahead_cursor._queue = Queue(1)
read_ahead_cursor._queue.put('value to be removed')
read_ahead_cursor._populate_queue()
self.assertEqual(mock_logger_debug.call_count, 2)
self.assertIsInstance(read_ahead_cursor._queue.get(), ResultClosedError)
self.assertEqual(read_ahead_cursor._queue.qsize(), 0)
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.communication.session_client.SessionClient')
def test_read_ahead_queue_with_query_stats(self, mock_session, mock_executor):
mock_statement_result_1 = generate_statement_result(1, 2, 3, MOCK_TOKEN, True)
mock_statement_result_2 = generate_statement_result(1, 2, 3, MOCK_TOKEN, False)
mock_statement_result_3 = generate_statement_result(1, 2, 3, None, False)
def fetch_page(txn_id, token):
statement_results = [mock_statement_result_2, mock_statement_result_3]
statement_result = statement_results[fetch_page.page_num]
fetch_page.page_num += 1
return statement_result
fetch_page.page_num = 0
mock_session.return_value = None
mock_session._fetch_page.side_effect = fetch_page
mock_read_ahead = 3
read_ahead_cursor = ReadAheadCursor(mock_statement_result_1, mock_session, MOCK_TRANSACTION_ID,
mock_read_ahead, mock_executor)
read_ahead_cursor._value_holder_to_ion_value = MagicMock(name='_value_holder_to_ion_value')
read_ahead_cursor._populate_queue()
# Queue should be populated with the next two pages
self.assertEqual(read_ahead_cursor._queue.qsize(), mock_read_ahead - 1)
# Even if queue is populated with the next two pages, query stats should only total the first page here
assert_query_stats(self, read_ahead_cursor, 1, 2, 3)
read_ahead_cursor._next_page()
# Query stats should only total the first page and second page here
assert_query_stats(self, read_ahead_cursor, 2, 4, 6)
read_ahead_cursor._next_page()
# Query stats should total all three pages
assert_query_stats(self, read_ahead_cursor, 3, 6, 9)
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.communication.session_client.SessionClient')
def test_next_page(self, mock_session, mock_executor):
mock_page1 = {'Values': [{'IonBinary': 1}, {'IonBinary': 2}], 'NextPageToken': 'token'}
mock_page2 = {'Values': [{'IonBinary': 2}, {'IonBinary': 3}], 'NextPageToken': None}
mock_statement_result1 = {'Page': mock_page1}
mock_statement_result2 = {'Page': mock_page2}
mock_session.return_value = None
mock_session._fetch_page.return_value = mock_statement_result2
read_ahead_cursor = ReadAheadCursor(mock_statement_result1, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, mock_executor)
read_ahead_cursor._queue = Queue()
read_ahead_cursor._queue.put(mock_statement_result2)
read_ahead_cursor._next_page()
self.assertEqual(read_ahead_cursor._page, mock_page2)
self.assertEqual(read_ahead_cursor._index, 0)
@patch('concurrent.futures.thread.ThreadPoolExecutor')
@patch('pyqldb.communication.session_client.SessionClient')
def test_next_page_client_error(self, mock_session, mock_executor):
mock_session.return_value = None
mock_session._fetch_page.return_value = {'Page': MOCK_STATEMENT_RESULT}
read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID,
MOCK_READ_AHEAD, mock_executor)
read_ahead_cursor._queue = Queue()
read_ahead_cursor._queue.put(ClientError(MOCK_CLIENT_ERROR_MESSAGE, MOCK_MESSAGE))
self.assertRaises(ClientError, read_ahead_cursor._next_page)
def test_value_holder_to_ion_value(self):
ion_value = 'IonValue'
value_holder = {'IonBinary': dumps(ion_value)}
result = ReadAheadCursor._value_holder_to_ion_value(value_holder)
self.assertEqual(result, ion_value)
| 18,232 | 5,876 |
from unittest import TestCase
from q2 import *
class TestQuestao2Busca(TestCase):
"""
Nomeclatura: test_quando_..._deve_retornar_...
def test_quando_..._deve_retornar_...(self):
Metodos herdados de TestCase:
|nome: | Ação:
| setUp | Antes de cada teste
| tearDown | Depois de cada teste
| setUpClass | Antes de todos os testes
| tearDownClass | Depois de todos os testes
"""
# def teste_quando_array_crescente_deve_retornar_mediana(self):
# self.assertEqual(hello_world(), 'hello world')
# def teste_quando_array_decrescente_deve_retornar_mediana(self):
#
def setUp(self):
print("\nTestando Questao2 Busca")
def Teste(self, lista, deslocamento,saida_esperada):
print(f"Busca d. - teste: lista: {lista}, deslocamento: {deslocamento}; saida esperada: {saida_esperada}; ", end="")
saida = buscar_deslocamentos(lista, deslocamento)
#saida, pares = buscar_deslocamentos(lista, deslocamento)
print(f"saida:{saida}")
#print(f"saida:{saida}, pares: {pares}")
self.assertEqual(saida, saida_esperada)
def teste_Busca_quando_parametros_sao_do_exemplo1_deve_retornar_3(self):
n = [1, 5, 3, 4, 2]
x = 2
saida_esperada = 3
self.Teste(n,x,saida_esperada)
class TestQuestao2BuscaMelhorada(TestCase):
"""
Nomeclatura: test_quando_..._deve_retornar_...
def test_quando_..._deve_retornar_...(self):
Metodos herdados de TestCase:
|nome: | Ação:
| setUp | Antes de cada teste
| tearDown | Depois de cada teste
| setUpClass | Antes de todos os testes
| tearDownClass | Depois de todos os testes
"""
# def teste_quando_array_crescente_deve_retornar_mediana(self):
# self.assertEqual(hello_world(), 'hello world')
# def teste_quando_array_decrescente_deve_retornar_mediana(self):
#
def setUp(self):
print("\nTestando Questao2 Busca Melhorada")
def Teste(self, lista, deslocamento,saida_esperada):
print(f"Busca d. Melhor - teste: lista: {lista}, deslocamento: {deslocamento}; saida esperada: {saida_esperada}; ", end="")
saida, pares = buscar_deslocamentos_melhorado(lista, deslocamento)
print(f"saida:{saida}, pares: {pares}")
self.assertEqual(saida, saida_esperada)
def teste_Busca_quando_parametros_sao_do_exemplo1_deve_retornar_3(self):
n = [1, 5, 3, 4, 2]
x = 2
saida_esperada = 3
self.Teste(n,x,saida_esperada)
| 2,838 | 1,070 |
from datetime import datetime
import logging
import requests
from hashlib import md5
from time import sleep
from apscheduler.schedulers.background import BlockingScheduler,BackgroundScheduler
import kzconfig
import json
logging.basicConfig(
handlers=[logging.FileHandler('log.log', 'a', 'utf-8')],
level=logging.INFO,format='%(asctime)s %(levelname)s - %(message)s'
)
logger =logging.getLogger("kouzhao")
def token()->str:
now = datetime.now()
now_str = now.strftime("%Y*%m-%d")+ "_Qwe"
m = md5()
m.update(now_str.encode("utf-8"))
url_md = m.hexdigest()[7:15]
return url_md
headers = {
"User-Agent":"Mozilla/5.0 (Linux; Android 8.0.0; Pixel 2 XL Build/OPD1.170816.004) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/82.0.4078.0 Mobile Safari/537.36"
}
def miaosha_kz():
form_data = kzconfig.form_data
# form_data = {}
sleep(1)
url = "https://kzapi.****.gov.cn/kouzhao/sq/miaosha/"+token()
logger.info("today url is %s"%url)
for i in range(1,kzconfig.MAX_TRY_TIME+1):
logger.info("开始第 %d 此尝试 "%(i))
res = requests.post(url=url,data=form_data,headers=headers,timeout=10)
json_data = res.json()
logger.info(json.dumps(json_data,ensure_ascii=False))
shop_res = json_data.get("responseFlag","0") == "1"
if shop_res:
logger.info("第 %d 抢购成功"%i)
break
elif json_data.get("responseMessage","") == "您好,当前时间段口罩已经约完,建议关注后续的预约活动" :
logger.info("当前时间段口罩已经约完")
break
elif json_data.get("status",200) == 404:
break
logger.info("第 %d 抢购失败 %f 秒后再次尝试" % (i,kzconfig.SLEEP_TIME))
sleep(kzconfig.SLEEP_TIME)
logger.info("抢购结束")
def main():
scheduler = BlockingScheduler()
scheduler.add_job(miaosha_kz, 'cron'
, day="*/7", hour='19',minute="0",second='1'
,timezone =kzconfig.cst_tz)
scheduler.start()
if __name__ == '__main__':
main() | 1,994 | 843 |
"""
Copyright (c) 2020 Philippe Schmouker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#=============================================================================
from threading import Event, Thread
from typing import Optional
from .decorators import abstract
#=============================================================================
class RepeatedTimer( Thread ):
"""The class of repeated timers.
===-------------------------------------------------===
CAUTION:
When running this code over a non RTOS (for Real-Time
Operating System), there is NO WAY to ensure that
periods of time will be correctly respected. It MAY
and it WILL be that counts of milliseconds will not be
respected by the underlying operating system. Theref-
ore, you SHOULD NOT USE THIS CODE FOR APPLICATIONS
DEALING WITH PEOPLE SAFETY AND FOR ANY OTHER KIND OF
APPLICATIONS FOR WHICH REAL TIME OPERATING IS MANDATORY
IF THIS CODE IS NOT RUN OVER A TRUE RTOS.
Notice: MS-Windows is NOT an RTOS. Most versions of
Linux are not also, which includes MacOS versions too.
===-------------------------------------------------===
A repeated timer is a specific timer which repeats its
processing function after a fixed period of time has
elapsed.
Repeated timers must be explicitly started with a call
to their method '.start()'. They cannot be started
twice, since they inherit from threading.Threads.
Repeated timers can be definitively stopped by calling
their method '.stop()'.
Inheriting classes must implement method '.process()'.
This method contains the whole stuff that is to be
processed every time the watchdog is "awaken".
Users are encouraged to add attributes to this class.
These will then be accessible into method '.process()'
when they might be needed for this processing.
"""
#-------------------------------------------------------------------------
def __init__(self, period_s: float ,
name : Optional[str] = None,
*args, **kwargs ) -> None:
'''Constructor.
Args:
period_s: float
The interval of time, expressed as a fract-
ional value of seconds, to wait before the
timer will repeat.
name: str
The name of this timer. May be None, in
which case the underlying OS will give a
default, unique one to it. Defaults to None.
*args, **kwargs:
Arguments to be passed to the processing
function.
'''
self.stop_event= Event()
self.set_period( period_s )
self.args = args
self.kwargs = kwargs
super().__init__( name=name )
#-------------------------------------------------------------------------
@abstract
def process(self) -> None:
'''The instructions to be run when timer is repeated.
'self.args' and 'self.kwargs' are available in
this method.
Raises:
NotImplementedError: This method has not been
implemented in inheriting class.
'''
...
#-------------------------------------------------------------------------
def run(self) -> None:
'''This method is automatically called by method '.start()'.
Notice: method '.start()' is inherited from class
'threading.Thread'.
'''
self.stop_event.clear() ## just to be sure that associate internal flag is set to False
while not self.stop_event.wait( self.period_s ):
self.process()
#-------------------------------------------------------------------------
def set_period(self, period_s: int) -> None:
'''Modifies/sets the period of time used for repeating this timer.
Args:
period_s: float
The interval of time, expressed as a fract-
ional value of seconds, to wait before the
timer will repeat.
'''
assert period_s > 0.0
self.period_s = period_s
#-------------------------------------------------------------------------
def stop(self) -> None:
'''Definitively stops this repeated timer.
'''
self.stop_event.set()
#===== end of Utils.repeated_timer =====#
| 5,649 | 1,530 |
from Defer import Defer
with Defer() as defer:
print("enter the room")
defer(lambda: print("leave the room"))
print("prepare printer")
defer(lambda: print("close printer"))
print("start printing")
defer(lambda: print("end printing"))
print(3)
print(4)
print(5)
print("LONG LONG TASKS")
| 329 | 110 |
import copy
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.utils.safestring import mark_safe
from django.views.decorators.http import require_POST
from corehq.apps.commtrack.views import BaseCommTrackManageView
from corehq.apps.domain.decorators import domain_admin_required, login_and_domain_required
from corehq.apps.hqwebapp.utils import get_bulk_upload_form
from corehq.apps.locations.models import Location
from corehq.apps.locations.forms import LocationForm
from corehq.apps.locations.util import load_locs_json, location_hierarchy_config, dump_locations
from corehq.apps.commtrack.models import LocationType, Product, SupplyPointCase
from corehq.apps.commtrack.util import unicode_slug
from corehq.apps.facilities.models import FacilityRegistry
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.contrib import messages
from couchdbkit import ResourceNotFound
import urllib
import json
from django.utils.translation import ugettext as _, ugettext_noop
from dimagi.utils.decorators.memoized import memoized
from custom.openlmis.tasks import bootstrap_domain_task
from soil.util import expose_download, get_download_context
from corehq.apps.commtrack.tasks import import_locations_async
from couchexport.models import Format
from corehq.apps.consumption.shortcuts import get_default_monthly_consumption
@domain_admin_required
def default(request, domain):
return HttpResponseRedirect(reverse(LocationsListView.urlname, args=[domain]))
class BaseLocationView(BaseCommTrackManageView):
@property
def main_context(self):
context = super(BaseLocationView, self).main_context
context.update({
'hierarchy': location_hierarchy_config(self.domain),
'api_root': reverse('api_dispatch_list', kwargs={'domain': self.domain,
'resource_name': 'location',
'api_name': 'v0.3'}),
})
return context
class LocationsListView(BaseLocationView):
urlname = 'manage_locations'
page_title = ugettext_noop("Locations")
template_name = 'locations/manage/locations.html'
@property
def page_context(self):
selected_id = self.request.GET.get('selected')
return {
'selected_id': selected_id,
'locations': load_locs_json(self.domain, selected_id),
}
class LocationSettingsView(BaseCommTrackManageView):
urlname = 'location_settings'
page_title = ugettext_noop("Location Types")
template_name = 'locations/settings.html'
@property
def page_context(self):
return {
'settings': self.settings_context,
}
@property
def settings_context(self):
return {
'loc_types': [self._get_loctype_info(l) for l in self.domain_object.commtrack_settings.location_types],
}
def _get_loctype_info(self, loctype):
return {
'name': loctype.name,
'code': loctype.code,
'allowed_parents': [p or None for p in loctype.allowed_parents],
'administrative': loctype.administrative,
}
def post(self, request, *args, **kwargs):
payload = json.loads(request.POST.get('json'))
def mk_loctype(loctype):
loctype['allowed_parents'] = [p or '' for p in loctype['allowed_parents']]
cleaned_code = unicode_slug(loctype['code'])
if cleaned_code != loctype['code']:
err = _(
'Location type code "{code}" is invalid. No spaces or special characters are allowed. '
'It has been replaced with "{new_code}".'
)
messages.warning(request, err.format(code=loctype['code'], new_code=cleaned_code))
loctype['code'] = cleaned_code
return LocationType(**loctype)
#TODO add server-side input validation here (currently validated on client)
self.domain_object.commtrack_settings.location_types = [mk_loctype(l) for l in payload['loc_types']]
self.domain_object.commtrack_settings.save()
return self.get(request, *args, **kwargs)
class NewLocationView(BaseLocationView):
urlname = 'create_location'
page_title = ugettext_noop("New Location")
template_name = 'locations/manage/location.html'
@property
def parent_pages(self):
return [{
'title': LocationsListView.page_title,
'url': reverse(LocationsListView.urlname, args=[self.domain]),
}]
@property
def parent_id(self):
return self.request.GET.get('parent')
@property
@memoized
def location(self):
return Location(domain=self.domain, parent=self.parent_id)
@property
def consumption(self):
return None
@property
@memoized
def metadata(self):
return copy.copy(dict(self.location.metadata))
@property
@memoized
def location_form(self):
if self.request.method == 'POST':
return LocationForm(self.location, self.request.POST)
return LocationForm(self.location)
@property
def page_context(self):
return {
'form': self.location_form,
'location': self.location,
'consumption': self.consumption,
'metadata': self.metadata
}
def post(self, request, *args, **kwargs):
if self.location_form.is_valid():
self.location_form.save()
messages.success(request, _('Location saved!'))
return HttpResponseRedirect('%s?%s' % (
reverse(LocationsListView.urlname, args=[self.domain]),
urllib.urlencode({'selected': self.location_form.location._id})
))
return self.get(request, *args, **kwargs)
class EditLocationView(NewLocationView):
urlname = 'edit_location'
page_title = ugettext_noop("Edit Location")
@property
def location_id(self):
return self.kwargs['loc_id']
@property
@memoized
def location(self):
try:
return Location.get(self.location_id)
except ResourceNotFound:
raise Http404()
@property
@memoized
def supply_point(self):
return SupplyPointCase.get_by_location(self.location)
@property
def consumption(self):
consumptions = []
for product in Product.by_domain(self.domain):
consumption = get_default_monthly_consumption(
self.domain,
product._id,
self.location.location_type,
self.supply_point._id if self.supply_point else None,
)
if consumption:
consumptions.append((product.name, consumption))
return consumptions
@property
def page_name(self):
return mark_safe(_("Edit {name} <small>{type}</small>").format(
name=self.location.name, type=self.location.location_type
))
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain, self.location_id])
class BaseSyncView(BaseLocationView):
source = ""
sync_urlname = None
@property
def page_context(self):
return {
'settings': self.settings_context,
'source': self.source,
'sync_url': self.sync_urlname
}
@property
def settings_context(self):
key = "%s_config" % self.source
if hasattr(self.domain_object.commtrack_settings, key):
return {
"source_config": getattr(self.domain_object.commtrack_settings, key)._doc,
}
else:
return {}
def post(self, request, *args, **kwargs):
payload = json.loads(request.POST.get('json'))
#TODO add server-side input validation here (currently validated on client)
key = "%s_config" % self.source
if "source_config" in payload:
for item in payload['source_config']:
if hasattr(self.domain_object.commtrack_settings, key):
setattr(
getattr(self.domain_object.commtrack_settings, key),
item,
payload['source_config'][item]
)
self.domain_object.commtrack_settings.save()
return self.get(request, *args, **kwargs)
class FacilitySyncView(BaseSyncView):
urlname = 'sync_facilities'
sync_urlname = 'sync_openlmis'
page_title = ugettext_noop("OpenLMIS")
template_name = 'locations/facility_sync.html'
source = 'openlmis'
class EditLocationHierarchy(BaseLocationView):
urlname = 'location_hierarchy'
page_title = ugettext_noop("Location Hierarchy")
template_name = 'locations/location_hierarchy.html'
class LocationImportStatusView(BaseLocationView):
urlname = 'location_import_status'
page_title = ugettext_noop('Location Import Status')
template_name = 'hqwebapp/soil_status_full.html'
def get(self, request, *args, **kwargs):
context = super(LocationImportStatusView, self).main_context
context.update({
'domain': self.domain,
'download_id': kwargs['download_id'],
'poll_url': reverse('location_importer_job_poll', args=[self.domain, kwargs['download_id']]),
'title': _("Location Import Status"),
'progress_text': _("Importing your data. This may take some time..."),
'error_text': _("Problem importing data! Please try again or report an issue."),
})
return render(request, self.template_name, context)
def page_url(self):
return reverse(self.urlname, args=self.args, kwargs=self.kwargs)
class LocationImportView(BaseLocationView):
urlname = 'location_import'
page_title = ugettext_noop('Upload Locations from Excel')
template_name = 'locations/manage/import.html'
@property
def page_context(self):
context = {
'bulk_upload': {
"download_url": reverse(
"location_export", args=(self.domain,)),
"adjective": _("location"),
"plural_noun": _("locations"),
},
"manage_consumption": self.domain_object.commtrack_settings.individual_consumption_defaults,
}
context.update({
'bulk_upload_form': get_bulk_upload_form(context),
})
return context
def post(self, request, *args, **kwargs):
upload = request.FILES.get('bulk_upload_file')
if not upload:
messages.error(request, _('no file uploaded'))
return self.get(request, *args, **kwargs)
if not args:
messages.error(request, _('no domain specified'))
return self.get(request, *args, **kwargs)
domain = args[0]
# stash this in soil to make it easier to pass to celery
file_ref = expose_download(upload.read(),
expiry=1*60*60)
task = import_locations_async.delay(
domain,
file_ref.download_id,
)
file_ref.set_task(task)
return HttpResponseRedirect(
reverse(
LocationImportStatusView.urlname,
args=[domain, file_ref.download_id]
)
)
@login_and_domain_required
def location_importer_job_poll(request, domain, download_id, template="hqwebapp/partials/download_status.html"):
context = get_download_context(download_id, check_state=True)
context.update({
'on_complete_short': _('Import complete.'),
'on_complete_long': _('Location importing has finished'),
})
return render(request, template, context)
@login_and_domain_required
def location_export(request, domain):
include_consumption = request.GET.get('include_consumption') == 'true'
response = HttpResponse(mimetype=Format.from_format('xlsx').mimetype)
response['Content-Disposition'] = 'attachment; filename="locations.xlsx"'
dump_locations(response, domain, include_consumption)
return response
@domain_admin_required # TODO: will probably want less restrictive permission
def location_edit(request, domain, loc_id=None):
parent_id = request.GET.get('parent')
if loc_id:
try:
location = Location.get(loc_id)
except ResourceNotFound:
raise Http404()
else:
location = Location(domain=domain, parent=parent_id)
if request.method == "POST":
form = LocationForm(location, request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Location saved!')
return HttpResponseRedirect('%s?%s' % (
reverse('manage_locations', kwargs={'domain': domain}),
urllib.urlencode({'selected': form.location._id})
))
else:
form = LocationForm(location)
context = {
'domain': domain,
'api_root': reverse('api_dispatch_list', kwargs={'domain': domain,
'resource_name': 'location',
'api_name': 'v0.3'}),
'location': location,
'hierarchy': location_hierarchy_config(domain),
'form': form,
}
return render(request, 'locations/manage/location.html', context)
@domain_admin_required
@require_POST
def sync_facilities(request, domain):
commtrack_settings = request.project.commtrack_settings
# create Facility Registry and Facility LocationTypes if they don't exist
if not any(lt.name == 'Facility Registry'
for lt in commtrack_settings.location_types):
commtrack_settings.location_types.extend([
LocationType(name='Facility Registry', allowed_parents=['']),
LocationType(name='Facility', allowed_parents=['Facility Registry'])
])
commtrack_settings.save()
registry_locs = dict((l.external_id, l) for l in
Location.filter_by_type(domain, 'Facility Registry'))
# sync each registry and add/update Locations for each Facility
for registry in FacilityRegistry.by_domain(domain):
registry.sync_with_remote()
try:
registry_loc = registry_locs[registry.url]
except KeyError:
registry_loc = Location(
domain=domain, location_type='Facility Registry',
external_id=registry.url)
registry_loc.name = registry.name
registry_loc.save()
registry_loc._seen = True
facility_locs = dict((l.external_id, l) for l in
Location.filter_by_type(domain, 'Facility', registry_loc))
for facility in registry.get_facilities():
uuid = facility.data['uuid']
try:
facility_loc = facility_locs[uuid]
except KeyError:
facility_loc = Location(
domain=domain, location_type='Facility', external_id=uuid,
parent=registry_loc)
facility_loc.name = facility.data.get('name', 'Unnamed Facility')
facility_loc.save()
facility_loc._seen = True
for id, f in facility_locs.iteritems():
if not hasattr(f, '_seen'):
f.delete()
for id, r in registry_locs.iteritems():
if not hasattr(r, '_seen'):
r.delete()
return HttpResponse('OK')
@domain_admin_required
@require_POST
def sync_openlmis(request, domain):
# todo: error handling, if we care.
bootstrap_domain_task.delay(domain)
return HttpResponse('OK')
| 15,807 | 4,461 |
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
requires=['tensorflow', 'numpy', 'opencv'],
packages=['cnn_bridge_main'],
package_dir={'': 'src'},
)
setup(**setup_args)
| 363 | 125 |
# =======================================================================================================================================
# VNU-HCM, University of Science
# Department Computer Science, Faculty of Information Technology
# Authors: Nhut-Nam Le (Tich Phan Suy Rong)
# © 2020
import unittest
"""
Given a non-empty string like "Code" return a string like "CCoCodCode".
string_splosion('Code') → 'CCoCodCode'
string_splosion('abc') → 'aababc'
string_splosion('ab') → 'aab'
"""
def string_splosion(str):
result = ''
i = 0
while i < len(str):
result += str[:i+1]
i += 1
return result
class TestStringSplosion(unittest.TestCase):
def test_case_00(self):
self.assertEqual(string_splosion('Code'), 'CCoCodCode')
def test_case_01(self):
self.assertEqual(string_splosion('abc'), 'aababc')
def test_case_02(self):
self.assertEqual(string_splosion('ab'), 'aab')
def test_case_03(self):
self.assertEqual(string_splosion('x'), 'x')
def test_case_04(self):
self.assertEqual(string_splosion('fade'), 'ffafadfade')
def test_case_05(self):
self.assertEqual(string_splosion('There'), 'TThTheTherThere')
def test_case_06(self):
self.assertEqual(string_splosion('Kitten'), 'KKiKitKittKitteKitten')
def test_case_07(self):
self.assertEqual(string_splosion('Bye'), 'BByBye')
def test_case_08(self):
self.assertEqual(string_splosion('Good'), 'GGoGooGood')
def test_case_09(self):
self.assertEqual(string_splosion('Bad'), 'BBaBad')
if __name__ == "__main__":
unittest.main()
| 1,642 | 586 |
# centralized file where robot parameters can be customized
MIN_NUM_MODULES = 2
MAX_NUM_MODULES = 10
N_MODULES = list(range(MIN_NUM_MODULES, MAX_NUM_MODULES+1))
STIFF_TABLE = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
#STIFF_TABLE = [0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 3, 4]
| 334 | 222 |
# Copyright 2016 Streampunk Media Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
"targets": [
{
"target_name": "cinecoder",
"sources": [ "src/cinecoder.cc",
"src/Decoder.cc",
"src/Encoder.cc",
"src/DecoderCinegy.cc",
"src/EncoderCinegy.cc" ],
"include_dirs": [ "<!(node -e \"require('nan')\")" ],
'conditions': [
['OS=="linux"', {
"cflags_cc!": [
"-fno-rtti",
"-fno-exceptions"
],
"cflags_cc": [
"-std=c++11",
"-fexceptions"
],
"link_settings": {
"libraries": [
],
"ldflags": [
"-L<@(module_root_dir)/build/Release",
"-Wl,-rpath,<@(module_root_dir)/build/Release"
]
},
"copies": [
{
"destination": "build/Release/",
"files": [
# "cinegy/bin/cinecoder.so"
]
}
]
}],
['OS=="win"', {
"variables": {
"CinecoderRoot": "packages/Cinecoder.3.33.41.230"
},
"include_dirs": ["<(CinecoderRoot)/sources/"],
"sources" : [ "<(CinecoderRoot)/sources/Cinecoder_i.c" ],
"configurations": {
"Release": {
"msvs_settings": {
"VCCLCompilerTool": {
"RuntimeTypeInfo": "true",
"ExceptionHandling": 1
}
}
}
},
"libraries": [
"-l../<(CinecoderRoot)/runtimes/win-x64/native/release/cinecoder.lib",
"-l../<(CinecoderRoot)/runtimes/win-x64/native/release/D2_CUDA_lib.lib"
],
"copies": [
{
"destination": "build/Release/",
"files": [
"<(CinecoderRoot)/runtimes/win-x64/native/release/Cinecoder.dll",
"<(CinecoderRoot)/runtimes/win-x64/native/release/cudart64_80.dll",
"<(CinecoderRoot)/runtimes/win-x64/native/release/D2_CUDA_lib.dll"
]
}
]
}]
],
}
]
}
| 2,751 | 894 |
# init for information_gathering
from .Evaluator import PathEvaluator, PathEvaluatorWithRadius, PathEvaluatorAlongPath, applyBudget
from .MaskedEvaluator import MaskedEvaluator
from .StochasticOptimizer import StochasticOptimizer
from .InfoField import random_field2d, random_multi_field2d
| 291 | 92 |
# createSubjectTrainingMatrix(subj, in_orig_subj_path, output_net_path, arr_commands, arr_rip)
# createSubjectTestMatrix(subj, in_orig_subj_path, output_net_path, arr_commands, arr_rip, sentences_filename, sentence_counter)
# createFullMatrix(input_matrix_folder, data_name, label_name, output_matrix_path="")
import os
import shutil
import ntpath
import glob
import re
import json
import numpy as np
from numpy import genfromtxt
from datetime import datetime
from . import earray_wrapper
def moveFolderContent(indir, outdir):
for file in os.listdir(indir):
fileref = indir + '/' + file
if os.path.isdir(fileref) is False:
shutil.move(fileref, outdir + '/' + file)
def getFileName(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def remoteExtension(path):
return path.split('.')[0]
# works when input files are : commandlabelNREP_scores.dat
# vocfilepath contains lines as follows:
# 1 cmdlab1
# 2 cmdlab7
# 3 cmlab8
# etc...
def renameSubjectFiles(subject_name, inrootpath, outrootpath, vocfilepath):
inpath = inrootpath + '/' + subject_name
outpath = outrootpath + '/' + subject_name
if os.path.isdir(outpath) is False:
os.mkdir(outpath)
with open(vocfilepath, "r") as f:
data = f.readlines()
for line in data:
words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase
for infile in glob.glob(os.path.join(inpath, '*.*')):
file_name = os.path.basename(infile)
b = re.split('(\d+)', file_name)
if b[0] == words[1]:
shutil.copy2(infile, outpath + '/' + subject_name + "_" + words[0] + "_" + b[1] + ".dat")
# print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat
# works when input files are : SUBJ_commandlabelNREP_scores.dat
# vocfilepath contains lines as follows:
# 1 cmdlab1
# 2 cmdlab7
# 3 cmlab8
# etc...
def renameSubjectsFiles(subjects_name, inrootpath, outrootpath, vocfilepath):
inpath = inrootpath + '/' + subjects_name
with open(vocfilepath, "r") as f:
data = f.readlines()
for line in data:
words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase
for infile in glob.glob(os.path.join(inpath, '*.*')):
file_name = os.path.basename(infile)
id = file_name.index("_")
subjlabel = file_name[:id]
if os.path.isdir(outrootpath + '/' + subjlabel) is False:
os.mkdir(outrootpath + '/' + subjlabel)
file_name = file_name[(id+1):]
b = re.split('(\d+)', file_name)
if b[0] == words[1]:
shutil.copy2(infile,
outrootpath + '/' + subjlabel + '/' + subjlabel + "_" + words[0] + "_" + b[1] + ".dat")
# print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat
# works when input files are : commandlabelNREP_scores.dat
# vocfilepath contains lines as follows:
# 1 cmdlab1
# 2 cmdlab7
# 3 cmlab8
# etc...
def renameSubjectFilesJSON(subject_name, inrootpath, outrootpath, jsonvocfilepath, ext=".dat"):
inpath = inrootpath + '/' + subject_name
outpath = outrootpath + '/' + subject_name
if os.path.isdir(outpath) is False:
os.mkdir(outpath)
vocabulary = getVocabularyFromJSON(jsonvocfilepath)
for sentence in vocabulary:
sentenceid = str(sentence["id"])
lab = remoteExtension(str(sentence["readablefilename"]))
#with open(vocfilepath, "r") as f:
# data = f.readlines()
# for line in data:
# words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase
for infile in glob.glob(os.path.join(inpath, '*.*')):
file_name = os.path.basename(infile)
b = re.split('(\d+)', file_name)
if b[0] == lab:
shutil.copy2(infile, outpath + '/' + subject_name + "_" + sentenceid + "_" + b[1] + ext)
# print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat
# works when input files are : SUBJ_commandlabelNREP_scores.dat
# jsonvocfilepath contains lines as follows:
#{ "vocabulary_categories": [],
# "voicebank_vocabulary": [ { "title": "Sono felice", "id": 1101, "filename":"", "readablefilename" : "sono_felice.wav", "existwav": 0, "editable":false}, ...]
#}
def renameSubjectsFilesJSON(subjects_name, inrootpath, outrootpath, jsonvocfilepath, ext=".dat"):
inpath = inrootpath + '/' + subjects_name
vocabulary = getVocabularyFromJSON(jsonvocfilepath)
# words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase
for infile in glob.glob(os.path.join(inpath, '*.*')):
copied = False
file_name = os.path.basename(infile)
id = file_name.index("_")
subjlabel = file_name[:id]
if os.path.isdir(outrootpath + '/' + subjlabel) is False:
os.mkdir(outrootpath + '/' + subjlabel)
file_name = file_name[(id + 1):]
b = re.split('(\d+)', file_name)
for sentence in vocabulary:
sentenceid = str(sentence["id"])
lab = remoteExtension(str(sentence["readablefilename"]))
if b[0] == lab:
if os.path.isdir(outrootpath + '/' + subjlabel) is False:
os.mkdir(outrootpath + '/' + subjlabel)
shutil.copy2(infile, outrootpath + '/' + subjlabel + '/' + subjlabel + "_" + sentenceid + "_" + b[1] + ext)
shutil.copy2(infile, outrootpath + '/' + subjlabel + "_" + sentenceid + "_" + b[1] + ext)
copied = True
break
# print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat
if copied is False:
print(infile)
# works when input files are : commandlabelNREP.dat.SUBJLABEL
def renameSubjectFilesOld(subject_name, inrootpath, outrootpath, vocfilepath):
inpath = inrootpath + '/' + subject_name
outpath = outrootpath + '/' + subject_name
if os.path.isdir(outpath) is False:
os.mkdir(outpath)
with open(vocfilepath, "r") as f:
data = f.readlines()
for line in data:
words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase
for infile in glob.glob(os.path.join(inpath, '*.*')):
file_name = os.path.basename(infile)
a = os.path.splitext(file_name)[0]
b = re.split('(\d+)', a)
if b[0] == words[1]:
shutil.copy2(inpath + '/' + a + '.' + subject_name,
outpath + '/' + subject_name + "_" + words[0] + "_" + b[1] + ".dat")
# print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat
def getVocabularyFromJSON(json_inputfile):
with open(json_inputfile, encoding='utf-8') as data_file:
data = json.load(data_file)
return data["voicebank_vocabulary"]
def createVocabularySentence(list_ids, json_inputfile, txt_outputfile):
vocabulary = getVocabularyFromJSON(json_inputfile)
file = open(txt_outputfile, 'w+')
for id in list_ids:
for sentence in vocabulary:
sentenceid = sentence["id"]
if id == sentenceid:
title = sentence["title"]
file.write(title + os.linesep)
break
file.close()
def createVocabularyJson(list_ids, model, sessiondata, training_sessionid, json_globalvocabulary, json_outputfile):
# get commands list from json_globalvocabulary
vocabulary = getVocabularyFromJSON(json_globalvocabulary)
commands = []
for id in list_ids:
for sentence in vocabulary:
sentenceid = sentence["id"]
if id == sentenceid:
commands.append({'title': sentence["title"], 'id': sentenceid})
break
lencmds = len(commands)
nw = datetime.now()
# sModelFilePath is written by the App
res = {
'sLabel': sessiondata['sLabel'],
'nModelClass': sessiondata['nModelClass'],
'nModelType': sessiondata['nModelType'],
'nInputParams': model['nInputParams'],
'nContextFrames': model['nContextFrames'],
'nItems2Recognize': lencmds,
'sModelFilePath': "",
'sModelFileName': model['sModelFileName'],
'saInputNodeName': model['saInputNodeName'],
'sOutputNodeName': model['sOutputNodeName'],
'nProcessingScheme': sessiondata['nProcessingScheme'],
'fRecognitionThreshold': model['fRecognitionThreshold'],
'sCreationTime': nw.strftime('%Y/%m/%d %H:%M:%S'),
'sLocalFolder': sessiondata['sLocalFolder'],
'sessionid': str(training_sessionid),
'commands': commands
}
with open(json_outputfile, 'w', encoding='utf-8') as data_file:
json.dump(res, data_file)
# ===========================================================================================================================
# aims : This script creates the training matrix for a single subject (ctx_*.dat ==> SUBJ_train_data.npy [earray h5])
#
# input : subj: subject folder name
# in_orig_subj_path: path to the subject's cepstra with context
# output_net_path: path to the output folder
# arr_commands: IDs of the selected commands
# arr_rip: range from 0 to Nripetitions
#
# return : output_matrices_path: path to the output folder (e.g. output/train/ANALYSISNAME/matrices)
# ===========================================================================================================================
def createSubjectTrainingMatrix(subj, in_orig_subj_path, output_net_path, arr_commands, arr_rip, file_prefix='ctx'):
mat_compl = []
mat_lab = []
totalsize = 0
output_matrices_path = os.path.join(output_net_path, 'matrices')
write_every_nfiles = 1 # every N (e.g. 10) files read, append them to disk and clear arrays
if os.path.isdir(output_matrices_path) is False:
os.mkdir(output_matrices_path)
if subj != '':
subj = subj + "_"
output_data_matrix_path = output_matrices_path + '/' + subj + 'train_data.npy'
output_labels_matrix_path = output_matrices_path + '/' + subj + 'train_labels.npy'
if os.path.exists(output_data_matrix_path) is True:
os.remove(output_data_matrix_path)
if os.path.exists(output_labels_matrix_path) is True:
os.remove(output_labels_matrix_path)
try:
cnt = 0
for ctxfile in glob.glob(in_orig_subj_path + '/' + file_prefix + '*'):
spl = re.split('[_ .]', ctxfile) # e.g. ctx_SUBJ_CMD_REP => spl2[2] num comando, spl[3] num ripetiz
id_cmd = int(spl[2])
id_rep = int(spl[3])
if id_cmd in arr_commands and id_rep in arr_rip:
f = open(ctxfile, 'r')
lines = f.readlines()
count_lines = len(lines)
f.close()
# for every line of contexted file, write N-arr_commands columns
lb = [[1 if i == id_cmd else 0 for i in arr_commands] for j in range(count_lines)]
ctx = genfromtxt(ctxfile) # load dei cepstra
if len(mat_compl) == 0 and len(mat_lab) == 0:
mat_compl = ctx
mat_lab = lb
else:
mat_compl = np.vstack((mat_compl, ctx))
mat_lab = np.vstack((mat_lab, lb))
cnt = cnt + 1
# check whether write 2 disk
if cnt == write_every_nfiles:
cnt = 0
earray_wrapper.appendArray2File(mat_compl, output_data_matrix_path)
earray_wrapper.appendArray2File(mat_lab, output_labels_matrix_path)
totalsize += mat_compl.size
mat_compl = []
mat_lab = []
except Exception as e:
print(str(e))
# save data in output/train/ANALYSISNAME/matrices
if len(mat_compl):
earray_wrapper.appendArray2File(mat_compl, output_data_matrix_path)
earray_wrapper.appendArray2File(mat_lab, output_labels_matrix_path)
print("createSubjectTrainingMatrix ended: " + str(totalsize))
return {'data_matrices_path': output_data_matrix_path, 'labels_matrices_path': output_labels_matrix_path}
# -----------------------------------------------------------------------------------------------------------------------
# DO NOT create matrices file, just read and returns the data & labels arrays
def getSubjectTrainingMatrix(in_orig_subj_path, arr_commands, arr_rip, file_prefix='ctx'):
mat_compl = []
mat_lab = []
totalsize = 0
try:
cnt = 0
for ctxfile in glob.glob(in_orig_subj_path + '/' + file_prefix + '*'):
filename = ctxfile.split('/')[-1]
spl = filename.split('.')[0]
spl = spl.split('_')
# spl = re.split('[_ .]', filename) # e.g. ctx_SUBJ_CMD_REP => spl2[2] num comando, spl[3] num ripetiz
id_cmd = int(spl[2])
id_rep = int(spl[3])
if id_cmd in arr_commands and id_rep in arr_rip:
f = open(ctxfile, 'r')
lines = f.readlines()
count_lines = len(lines)
f.close()
# for every line of contexted file, write N-arr_commands columns
lb = [[1 if i == id_cmd else 0 for i in arr_commands] for j in range(count_lines)]
ctx = genfromtxt(ctxfile) # load dei cepstra
if len(mat_compl) == 0 and len(mat_lab) == 0:
mat_compl = ctx
mat_lab = lb
else:
mat_compl = np.vstack((mat_compl, ctx))
mat_lab = np.vstack((mat_lab, lb))
cnt = cnt + 1
except Exception as e:
print(str(e))
rows = len(mat_compl)
cols = len(mat_compl[0])
print("getSubjectTrainingMatrix ended, row: " + str(rows)+ ", cols: " + str(cols))
return mat_compl, mat_lab
# return {'data_matrices': mat_compl, 'labels_matrices': mat_lab}
# ===========================================================================================================================
# aims : This script creates the testing matrix for a single subject
#
# input : subj: subject folder name
# in_orig_subj_path: path to the subject's cepstra with context
# output_net_path: path to the output folder
# arr_commands: range from 1 to Ncommands
# arr_rip: range from 0 to Nripetitions
# sentences_filename: name of the output file
# sentence_counter: it takes account of how many rows are occupied by each command and the command_id
#
# return : output_matrices_path: path to the output folder
# sentence_counter: text file which takes account of how many rows are occupied by each command and the command_id
# ===========================================================================================================================
def createSubjectTestMatrix(subj, in_orig_subj_path, output_net_path, arr_commands, arr_rip, sentences_filename, sentence_counter):
mat_compl = []
mat_lab = []
totalsize = 0
output_matrices_path = os.path.join(output_net_path, 'matrices')
write_every_nfiles = 10 # every N (e.g. 10) files read, append them to disk and clear arrays
if os.path.isdir(output_matrices_path) is False:
os.mkdir(output_matrices_path)
if os.path.isfile(sentences_filename) is True:
os.remove(sentences_filename)
output_data_matrix = output_matrices_path + '/' + subj + '_test_data.npy'
output_labels_matrix = output_matrices_path + '/' + subj + '_test_labels.npy'
if os.path.exists(output_data_matrix) is True:
os.remove(output_data_matrix)
if os.path.exists(output_labels_matrix) is True:
os.remove(output_labels_matrix)
try:
cnt = 0
for ctxfile in glob.glob(in_orig_subj_path + '/ctx*'):
spl = re.split('[_ .]', ctxfile) # e.g. ctx_SUBJ_CMD_REP => spl2[2] num comando, spl[3] num ripetiz
id_cmd = int(spl[2])
id_rep = int(spl[3])
if id_cmd in arr_commands and id_rep in arr_rip:
f = open(ctxfile, 'r')
lines = f.readlines()
count_lines = len(lines)
f.close()
sentence_counter = sentence_counter + 1
sc = [[sentence_counter, id_cmd] for j in range(count_lines)]
with open(output_net_path + "/" + sentences_filename, 'ab') as f_handle:
np.savetxt(f_handle, sc, fmt='%.0f')
lb = [[1 if i == id_cmd else 0 for i in arr_commands] for j in range(count_lines)]
ctx = genfromtxt(ctxfile) # load dei cepstra
if len(mat_compl) == 0 and len(mat_lab) == 0:
mat_compl = ctx
mat_lab = lb
else:
mat_compl = np.vstack((mat_compl, ctx))
mat_lab = np.vstack((mat_lab, lb))
cnt = cnt + 1
# check whether write 2 disk
if cnt == write_every_nfiles:
cnt = 0
earray_wrapper.appendArray2File(mat_compl, output_data_matrix)
earray_wrapper.appendArray2File(mat_lab, output_labels_matrix)
totalsize += mat_compl.size
mat_compl = []
mat_lab = []
except Exception as e:
print(str(e))
# save data in output/test/ANALYSISNAME/matrices
if len(mat_compl):
earray_wrapper.appendArray2File(mat_compl, output_data_matrix)
earray_wrapper.appendArray2File(mat_lab, output_labels_matrix)
return {'data_matrices_path': output_data_matrix, 'labels_matrices_path': output_labels_matrix, 'sentence_counter': sentence_counter}
# ===========================================================================================================================
# aims : This script creates the testing matrix with all the pre-established subjects
#
# input : input_matrix_folder: path to the subject's folder containing testing and training matrices with cepstra or labels
# data_name: name of the testing or training matrices with cepstra
# label_name: name of the testing or training matrices with labels
# output_matrix_path: path to the output folder. If is not specified, data will be stored in the current working folder
#
# return : data_matrix_path: path to the output folder
# label_matrix_path: path to the output folder
# ===========================================================================================================================
def createFullMatrix(subjects_list, input_net_folder, data_name, label_name, output_net_folder=""):
input_matrix_folder = os.path.join(input_net_folder, 'matrices')
if os.path.isdir(input_matrix_folder) is False:
os.mkdir(input_matrix_folder)
if len(output_net_folder):
output_matrix_folder = os.path.join(output_net_folder, 'matrices')
if os.path.isdir(output_matrix_folder) is False:
os.mkdir(output_matrix_folder)
data_matrix_path = output_matrix_folder + '/full_' + data_name + '.npy'
label_matrix_path = output_matrix_folder + '/full_' + label_name + '.npy'
else:
data_matrix_path = input_matrix_folder + '/full_' + data_name + '.npy'
label_matrix_path = input_matrix_folder + '/full_' + label_name + '.npy'
for file in glob.glob(input_matrix_folder + '/*' + data_name + '.npy'):
file_name = os.path.basename(file)
spl = re.split('[_ .]', file_name) # spl[0] paz, spl[1] parola 'train'
for subj in subjects_list:
if subj == spl[0]:
print("createFullMatrix: " + file)
file_train = np.load(file)
file_labels = np.load(input_matrix_folder + '/' + spl[0] + '_' + label_name + '.npy')
earray_wrapper.appendArray2File(file_train, data_matrix_path)
earray_wrapper.appendArray2File(file_labels, label_matrix_path)
return {'data_matrix_path': data_matrix_path, 'label_matrix_path': label_matrix_path}
def getNodeBySubstring(graph, nomesubstring, allnodes=None):
if allnodes is None:
allnodes = [n.name for n in graph.as_graph_def().node ]
node_str = [s for s in allnodes if nomesubstring in s and 'read' not in s]
if len(node_str) == 1:
return graph.get_tensor_by_name(node_str[0] + ":0")
else:
return None
| 21,117 | 6,638 |
from datetime import datetime
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
BaseModel = declarative_base()
class Item(BaseModel):
__tablename__ = 'items'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(300), nullable=False)
location_refs = relationship('ItemLocation', back_populates='item', cascade='all, delete-orphan')
class Location(BaseModel):
__tablename__ = 'location'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(200), nullable=False)
class ItemLocation(BaseModel):
__tablename__ = 'item_location'
id = sa.Column(sa.Integer, primary_key=True)
location_id = sa.Column(sa.Integer, sa.ForeignKey(Location.id), nullable=False)
item_id = sa.Column(sa.Integer, sa.ForeignKey(Item.id), nullable=False)
location = relationship(Location, backref='item_refs')
item = relationship(Item, back_populates='location_refs')
class ItemNeeded(BaseModel):
__tablename__ = 'item_needed'
id = sa.Column(sa.Integer, primary_key=True)
item_id = sa.Column(sa.Integer, sa.ForeignKey(Item.id), nullable=False)
needed_time = sa.Column(sa.DateTime, nullable=False, default=datetime.utcnow())
cleared_time = sa.Column(sa.DateTime)
item = relationship(Item, backref='needed_refs')
| 1,373 | 460 |
# ---------- STATIC METHODS ----------
# Static methods allow access without the need to initialize
# a class. They should be used as utility methods, or when
# a method is needed, but it doesn't make sense for the real
# world object to be able to perform a task
class Sum:
# You use the static method decorator to define that a
# method is static
@staticmethod
def getSum(*args):
sum = 0
for i in args:
sum += i
return sum
def main():
# Call a static method by proceeding it with its class
# name
print("Sum :", Sum.getSum(1,2,3,4,5))
main()
# ---------- STATIC VARIABLES ----------
# Fields declared in a class, but outside of any method
# are static variables. There value is shared by every
# object of that class
class Dog:
# This is a static variable
num_of_dogs = 0
def __init__(self, name="Unknown"):
self.name = name
# You reference the static variable by proceeding
# it with the class name
Dog.num_of_dogs += 1
@staticmethod
def getNumOfDogs():
print("There are currently {} dogs".format(Dog.num_of_dogs))
def main():
spot = Dog("Spot")
doug = Dog("Doug")
spot.getNumOfDogs()
main()
# ---------- MODULES ----------
# Your Python programs will contain a main program that
# includes your main function. Then you will create many
# modules in separate files. Modules also end with .py
# just like any other Python file
# ————— sum.py —————
def getSum(*args):
sum = 0
for i in args:
sum += i
return sum
# ————— End of sum.py —————
# You can import by listing the file name minus the py
import sum
# Get access to functions by proceeding with the file
# name and then the function you want
print("Sum :", sum.getSum(1,2,3,4,5))
# ---------- FROM ----------
# You can use from to copy specific functions from a module
# You can use from sum import * to import all functions
# You can import multiple functions by listing them after
# import separated by commas
from sum import getSum
# You don't have to reference the module name now
print("Sum :", getSum(1,2,3,4,5))
# ---------- EXCEPTION HANDLING ----------
# Exceptions are triggered either when an error occurs
# or when you want them to.
# We use exceptions are used to handle errors, execute
# specific code when code generates something out of
# the ordinary, to always execute code when something
# happens (close a file that was opened),
# When an error occurs you stop executing code and jump
# to execute other code that responds to that error
# Let's handle an IndexError exception that is
# triggered when you try to access an index in a list
# that doesn't exist
# Surround a potential exception with try
try:
aList = [1,2,3]
print(aList[3])
# Catch the exception with except followed by the
# exception you want to catch
# You can catch multiple exceptions by separating them
# with commas inside parentheses
# except (IndexError, NameError):
except IndexError:
print("Sorry that index doesn't exist")
# If the exception wasn't caught above this will
# catch all others
except:
print("An unknown error occurred")
# ---------- CUSTOM EXCEPTIONS ----------
# Lets trigger an exception if the user enters a
# name that contains a number
# Although you won't commonly create your own exceptions
# this is how you do it
# Create a class that inherits from Exception
class DogNameError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
try:
dogName = input("What is your dogs name : ")
if any(char.isdigit() for char in dogName):
# Raise your own exception
# You can raise the built in exceptions as well
raise DogNameError
except DogNameError:
print("Your dogs name can't contain a number")
# ---------- FINALLY & ELSE ----------
# finally is used when you always want certain code to
# execute whether an exception is raised or not
num1, num2 = input("Enter to values to divide : ").split()
try:
quotient = int(num1) / int(num2)
print("{} / {} = {}".format(num1, num2, quotient))
except ZeroDivisionError:
print("You can't divide by zero")
# else is only executed if no exception was raised
else:
print("You didn't raise an exception")
finally:
print("I execute no matter what")
# ---------- PROBLEM EXCEPTIONS & FILES ----------
# 1. Create a file named mydata2.txt and put data in it
# 2. Using what you learned in part 8 and Google to find
# out how to open a file without with try to open the
# file in a try block
# 3. Catch the FileNotFoundError exception
# 4. In else print the file contents
# 5. In finally close the file
# 6. Try to open the nonexistent file mydata3.txt and
# test to see if you caught the exception
try:
myFile = open("mydata2.txt", encoding="utf-8")
# We can use as to access data and methods in the
# exception class
except FileNotFoundError as ex:
print("That file was not found")
# Print out further data on the exception
print(ex.args)
else:
print("File :", myFile.read())
myFile.close()
finally:
print("Finished Working with File") | 5,206 | 1,547 |
# https://www.kaggle.com/yassineghouzam/titanic-top-4-with-ensemble-modeling
# Feature analysis
# Feature engineering
# Modeling
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from collections import Counter
from sklearn.ensemble import RandomForestClassifier, \
AdaBoostClassifier, \
GradientBoostingClassifier, \
ExtraTreesClassifier, \
VotingClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve
sns.set(style="white", context="notebook", palette="deep")
# Load and check data
# Load data
train = pd.read_csv("./titanic/train.csv")
test = pd.read_csv("./titanic/test.csv")
IDtest = test["PassengerId"]
def detect_outliers(df, n, features):
"""
Take a dataframe df of features and returns a list of the indices corresponding to the observations containing more than n outliers according to the Tukey method
:param df: dataframe
:param n: features
:param features: feature name to be investigated
:return: outlier_indices
"""
outlier_indices = []
# iterate over features (columns)
for col in features:
Q1 = np.percentile(df[col], 25)
Q3 = np.percentile(df[col], 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outlier_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list( k for k, v in outlier_indices.items() if v > n )
return multiple_outliers
Outliers_to_drop = detect_outliers(train, 2, ["Age", "SibSp", "Parch", "Fare"])
train.loc[Outliers_to_drop]
train = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True)
train_len = len(train)
dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True)
dataset = dataset.fillna(np.nan)
dataset.isnull().sum()
train.info()
train.isnull().sum()
train.head()
train.dtypes
train.describe()
# Feature Analysis
g = sns.heatmap(train[["Survived", "SibSp", "Parch", "Age", "Fare"]].corr(), annot=True, fmt=".2f", cmap="coolwarm")
g = sns.catplot(x="SibSp", y="Survived", kind="bar", data=train, size=6, palette="muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
# Parch
g = sns.catplot(x="Parch", y="Survived", data=train, kind="bar", size=6, palette = "muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
# Age
g = sns.FacetGrid(train, col = "Survived")
g = g.map(sns.distplot, "Age")
g = sns.kdeplot(train["Age"][(train["Survived"]==0) & (train["Age"].notnull())], color = "Red", shade = True )
g = sns.kdeplot(train["Age"][(train["Survived"]==1) & (train["Age"].notnull())], color = "Blue", shade = True )
g.set_xlabel("Age")
g.set_ylabel("Frequency")
g = g.legend(["Not Survived", "Survived"])
# Fare
dataset["Fare"].isnull().sum()
dataset["Fare"] = dataset["Fare"].fillna(dataset["Fare"].median())
g = sns.distplot(dataset["Fare"], color="m", label="Skewness : %.2f"%(dataset["Fare"].skew()))
g = g.legend(loc = "best")
dataset["Fare"] = dataset["Fare"].map(lambda i: np.log(i) if i > 0 else 0)
g = sns.distplot(dataset["Fare"], color="b", label="Skewness : %.2f"%(dataset["Fare"].skew()))
g = g.legend(loc="best")
# Categorical values
# Sex
g = sns.barplot(x="Sex", y="Survived", data=train)
g = g.set_ylabel("Survival Probability")
train[["Sex", "Survived"]].groupby("Sex").mean()
# Pclass
g = sns.catplot(x="Pclass", y="Survived", data = train, kind="bar", size = 6, palette = "muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
g = sns.catplot(x="Pclass", y="Survived", hue="Sex", kind="bar", data=dataset, size=6, palette="muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
# Embarked
dataset["Embarked"].isnull().sum()
dataset["Embarked"] = dataset["Embarked"].fillna("S")
g = sns.catplot(x="Embarked", y="Survived", kind="bar", data=train, size=6, palette="muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
g = sns.catplot("Pclass", col="Embarked", kind="count", data=train, size=6, palette="muted")
# Filling missing values
# Age
g = sns.catplot(x = "Sex", y = "Age", kind="box", data=dataset)
g = sns.catplot(x = "Sex", y = "Age", hue="Pclass", kind="box", data=dataset)
g = sns.catplot(x = "Parch", y = "Age", kind="box", data=dataset)
g = sns.catplot(x = "SibSp", y = "Age", kind="box", data=dataset)
dataset["Sex"] = dataset["Sex"].map({"male":0, "female":1}) # male --> 0; female --> 1
g = sns.heatmap(dataset[["Age", "Sex", "SibSp", "Parch", "Pclass"]].corr(), cmap="BrBG", annot=True)
# Filling missing value of Age
index_NaN_age = list( dataset["Age"][dataset["Age"].isnull()].index )
for i in index_NaN_age :
age_med = dataset["Age"].median()
age_pred = dataset["Age"][((dataset["SibSp"] == dataset.iloc[i]["SibSp"]) &
(dataset["Parch"] == dataset.iloc[i]["Parch"]) &
(dataset["Pclass"] == dataset.iloc[i]["Pclass"])
)].median()
if not np.isnan(age_pred) :
dataset["Age"].iloc[i] = age_pred
else :
dataset["Age"].iloc[i] = age_med
g = sns.catplot(x="Survived", y="Age", kind="box", data=train)
g = sns.catplot(x="Survived", y="Age", data=train, kind="violin")
# Feature Engineering
dataset["Name"].head()
dataset_title = [i.split(",")[1].split(".")[0].strip() for i in dataset["Name"]]
dataset["Title"] = pd.Series(dataset_title)
dataset["Title"].head()
g = sns.countplot(x="Title", data=dataset)
g = plt.setp(g.get_xticklabels(), rotation=45)
dataset["Title"] = dataset["Title"].replace(["Lady", "the Countess", "Countess", "Capt", "Col", "Don", "Dr", "Major", "Rev", "Sir", "Jonkheer", "Dona"], "Rare")
dataset["Title"] = dataset["Title"].map({"Master":0,
"Miss":1,
"Ms":1,
"Mme":1,
"Mlle":1,
"Mrs":1,
"Mr":2,
"Rare":3 })
dataset["Title"] = dataset["Title"].astype(int)
dataset["Title"].value_counts()
g = sns.countplot(dataset["Title"])
g = g.set_xticklabels(["Master", "Miss/Ms/Mme/Mlle/Mrs", "Mr", "Rare"])
g = sns.catplot(x="Title", y="Survived", kind="bar", data=dataset)
g = g.set_xticklabels(["Master", "Miss-Mrs", "Mr", "Rare"])
g = g.set_ylabels("survival probability")
dataset.drop(labels = ["Name"], axis=1, inplace=True)
dataset["Fsize"] = dataset["SibSp"] + dataset["Parch"] + 1
g = sns.catplot(x="Fsize", y="Survived", kind="point", data=dataset)
g = g.set_ylabels("Survival probability")
dataset["Single"] = dataset["Fsize"].map(lambda s: 1 if s == 1 else 0)
dataset["SmallF"] = dataset["Fsize"].map(lambda s: 1 if s == 2 else 0)
dataset["MedF"] = dataset["Fsize"].map(lambda s: 1 if 3 <= s <= 4 else 0)
dataset["LargeF"] = dataset["Fsize"].map(lambda s: 1 if s >= 5 else 0)
dataset[["Single", "SmallF", "MedF", "LargeF"]].apply(lambda x: x.value_counts(), axis=0)
fig, ax=plt.subplots(2,2,figsize=(10,10))
sns.barplot(x = "Single", y="Survived", data=dataset, ax=ax[0,0])
ax[0,0].set_ylabel("Survival probability")
g = sns.barplot(x = "SmallF", y="Survived", data=dataset, ax=ax[0,1])
ax[0,1].set_ylabel("Survival probability")
g = sns.barplot(x = "MedF", y="Survived", data=dataset, ax=ax[1,0])
ax[1,0].set_ylabel("Survival probability")
g = sns.barplot(x = "LargeF", y="Survived", data=dataset, ax=ax[1,1])
ax[1,1].set_ylabel("Survival probability")
dataset = pd.get_dummies(dataset, columns = ["Title"])
dataset = pd.get_dummies(dataset, columns = ["Embarked"], prefix = "Em")
dataset.head(4)
# Cabin
dataset["Cabin"].head()
dataset["Cabin"].describe()
dataset["Cabin"].isnull().sum()
dataset["Cabin"][dataset["Cabin"].notnull()].head()
dataset["Cabin"] = pd.Series( [i[0] if not pd.isnull(i) else "X" for i in dataset["Cabin"] ])
ord = ["A", "B", "C", "D", "E", "F", "G", "T", "X"]
g = sns.countplot( dataset["Cabin"], order = ord )
g = sns.catplot(x="Cabin", y="Survived", kind="bar", data=dataset, order = ord)
g = g.set_ylabels("Survival Probability")
dataset = pd.get_dummies(dataset, prefix = "Cabin", columns=["Cabin"])
dataset["Ticket"].head()
Ticket = []
for i in list(dataset.Ticket):
if not i.isdigit() :
Ticket.append(i.replace(".", "").replace("/", "").strip().split(" ")[0])
else :
Ticket.append("X")
dataset["Ticket"] = Ticket
dataset["Ticket"].head()
dataset = pd.get_dummies(dataset, columns = ["Ticket"], prefix = "T")
dataset["Pclass"] = dataset["Pclass"].astype("category")
dataset = pd.get_dummies(dataset, columns=["Pclass"], prefix="Pc")
dataset.drop(labels = ["PassengerId"], axis=1, inplace=True)
dataset.head()
# Modeling
train = dataset[:train_len]
test = dataset[train_len:]
test.drop(labels=["Survived"], axis=1, inplace=True)
train["Survived"] = train["Survived"].astype(int)
Y_train = train["Survived"]
X_train = train.drop(labels = ["Survived"], axis=1)
# Simple modeling
kfold = StratifiedKFold(n_splits=10)
random_state = 2
classifiers = []
classifiers.append( SVC(random_state = random_state) )
classifiers.append( DecisionTreeClassifier(random_state = random_state) )
classifiers.append( AdaBoostClassifier(DecisionTreeClassifier(random_state = random_state), random_state = random_state, learning_rate = 0.1))
classifiers.append( RandomForestClassifier(random_state=random_state) )
classifiers.append( ExtraTreesClassifier(random_state=random_state) )
classifiers.append( GradientBoostingClassifier(random_state=random_state) )
classifiers.append( MLPClassifier(random_state=random_state) )
classifiers.append( KNeighborsClassifier() )
classifiers.append( LogisticRegression(random_state=random_state) )
classifiers.append( LinearDiscriminantAnalysis() )
cv_results = []
for classifier in classifiers :
cv_results.append(cross_val_score(classifier, X=X_train, y=Y_train, scoring = "accuracy", cv=kfold))
cv_means = []
cv_std = []
for cv_result in cv_results :
cv_means.append( cv_result.mean() )
cv_std.append( cv_result.std() )
algorithms = [ i.__str__().split("(")[0].replace("Classifier", "").replace("Regression", "").replace("Analysis", "") for i in classifiers ]
cv_res = pd.DataFrame({
"CrossValMeans":cv_means,
"CrossValerrors": cv_std,
"Algorithm": algorithms
})
g = sns.barplot("CrossValMeans", "Algorithm", data=cv_res, palette = "Set3", orient = "h", **{"xerr":cv_std})
g.set_xlabel("Mean Accuracy")
g = g.set_title("Cross validation scores")
DTC = DecisionTreeClassifier()
adaDTC = AdaBoostClassifier(DTC, random_state=7)
ada_param_grid = {"base_estimator__criterion" : ["gini", "entropy"],
"base_estimator__splitter" : ["best", "random"],
"algorithm" : ["SAMME", "SAMME.R"],
"n_estimators" : [1,2],
"learning_rate" : [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3, 1.5]}
gsadaDTC = GridSearchCV(adaDTC, param_grid = ada_param_grid, cv=kfold, scoring="accuracy", verbose=1)
gsadaDTC.fit(X_train, Y_train)
ada_best = gsadaDTC.best_estimator_
gsadaDTC.best_score_
# ExtraTrees
ExtC = ExtraTreesClassifier()
ex_param_grid = {
"max_depth": [None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [False],
"n_estimators": [100, 300],
"criterion": ["gini"] }
gsExtC = GridSearchCV(ExtC, param_grid = ex_param_grid, cv=kfold, scoring="accuracy", verbose=1)
gsExtC.fit(X_train, Y_train)
ExtC_best = gsExtC.best_estimator_
gsExtC.best_score_
# Random Forest
RFC = RandomForestClassifier()
rf_param_grid = {"max_depth": [None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [False],
"n_estimators": [100, 300],
"criterion": ["gini"]}
gsRFC = GridSearchCV( RFC, param_grid = rf_param_grid, cv=kfold, scoring="accuracy", verbose=1)
gsRFC.fit(X_train, Y_train)
RFC_best = gsRFC.best_estimator_
gsRFC.best_score_
# Gradient Boosting
GBC = GradientBoostingClassifier()
gb_param_grid = {
"loss" : ["deviance"],
"n_estimators" : [100,200,300],
"learning_rate" : [0.1,0.05,0.01],
"max_depth" : [4, 8],
"min_samples_leaf" : [100, 150],
"max_features" : [0.3, 0.1] }
gsGBC = GridSearchCV(GBC, param_grid = gb_param_grid, cv=kfold, scoring="accuracy", verbose = 1)
gsGBC.fit(X_train, Y_train)
GBC_best = gsGBC.best_estimator_
gsGBC.best_score_
# SVM
SVMC = SVC(probability=True)
svc_param_grid = {
"kernel": ["rbf"],
"gamma": [0.001, 0.01, 0.1, 1],
"C": [1, 10, 50, 100, 200, 300, 1000] }
gsSVMC = GridSearchCV(SVMC, param_grid=svc_param_grid, cv=kfold, scoring="accuracy", verbose=1)
gsSVMC.fit(X_train, Y_train)
SVMC_best = gsSVMC.best_estimator_
gsSVMC.best_score_
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)) :
"""
Generate a simple plot of the test and training learning curve
:param estimator:
:param title:
:param X:
:param y:
:param ylim:
:param cv:
:param n_jobs:
:param train_sizes:
:return:
"""
plt.figure()
plt.title(title)
if ylim is not None :
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.1, color="r" )
plt.fill_between(train_sizes,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1, color="r" )
plt.plot(train_sizes, train_scores_mean, "o-", color="r", label="Training score")
plt.plot(train_sizes, test_scores_mean, "o-", color="g", label="Cross-validation score")
plt.legend(loc="best")
return plt
g1 = plot_learning_curve(gsRFC.best_estimator_, "RF learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5))
g2 = plot_learning_curve(gsExtC.best_estimator_, "ExtraTrees learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5))
g3 = plot_learning_curve(gsSVMC.best_estimator_, "SVC learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5))
g4 = plot_learning_curve(gsadaDTC.best_estimator_, "AdaBoost learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5))
g5 = plot_learning_curve(gsGBC.best_estimator_, "GradientBoosting learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5))
nrows = ncols = 2
fig, axes = plt.subplots(nrows = nrows, ncols = ncols, sharex="all", figsize=(15,15))
names_classifiers = [("AdaBoosting", ada_best),
("ExtraTrees", ExtC_best),
("RandomForest", RFC_best),
("GradientBoosting", GBC_best) ]
nclassifier = 0
for row in range(nrows) :
for col in range(ncols) :
name = names_classifiers[nclassifier][0]
classifier = names_classifiers[nclassifier][1]
indices = np.argsort(classifier.feature_importances_)[::-1][:40]
g = sns.barplot(y=X_train.columns[indices][:40], x= classifier.feature_importances_[indices][:40], orient = "h", ax=axes[row][col])
g.set_xlabel("Relative Importance", fontsize=12)
g.set_ylabel("Feature", fontsize=12)
g.tick_params(labelsize=9)
g.set_title(name + " feature importance")
nclassifier += 1
test_Survived_RFC = pd.Series(RFC_best.predict(test), name="RFC")
test_Survived_ExtC = pd.Series(ExtC_best.predict(test), name="ExtC")
test_Survived_SVMC = pd.Series(SVMC_best.predict(test), name="SVMC")
test_Survived_AdaC = pd.Series(ada_best.predict(test), name="AdaC")
test_Survived_GBC = pd.Series(GBC_best.predict(test), name="GBC")
ensemble_results = pd.concat( [test_Survived_RFC, test_Survived_ExtC, test_Survived_AdaC, test_Survived_GBC, test_Survived_SVMC], axis=1 )
g = sns.heatmap(ensemble_results.corr(), annot=True)
votingC = VotingClassifier(estimators = [ ("rfc", RFC_best),
("extc", ExtC_best),
("adac", ada_best),
("gbc", GBC_best)],
voting="soft")
votingC = votingC.fit(X_train, Y_train)
test_Survived = pd.Series(votingC.predict(test), name="Survived")
resutls = pd.concat([IDtest, test_Survived], axis=1)
# results.to_csv("ensemble_python_voting.csv", index=False)
| 17,632 | 6,745 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://github.com/danthedeckie/simpleeval/blob/master/README.rst#compound-types
# """
# Compound types (dict, tuple, list, set) in general just work if you pass them in as named objects.
# If you want to allow creation of these, the EvalWithCompoundTypes class works. Just replace any use of
# SimpleEval with that.
# """
# pip install simpleeval
from simpleeval import simple_eval, SimpleEval, EvalWithCompoundTypes
# SimpleEval and simple_eval NOT WORK with compound types
try:
print(simple_eval('[1, 2, 3, 4]'))
except Exception as e:
print(e) # Sorry, List is not available in this evaluator
try:
my_eval = SimpleEval()
print(my_eval.eval('[1, 2, 3, 4]'))
except Exception as e:
print(e) # Sorry, List is not available in this evaluator
print()
# Compound Types
my_compound_types_eval = EvalWithCompoundTypes()
my_compound_types_eval.functions['len'] = len
# list
print(my_compound_types_eval.eval('[1, 2, 3, 4]')) # [1, 2, 3, 4]
print(my_compound_types_eval.eval('[1, 2] + [3, 4]')) # [1, 2, 3, 4]
print(my_compound_types_eval.eval('len([1, 2, 3, 4])')) # 4
print(my_compound_types_eval.eval('[1, 2, 1, 3, 4].count(1)')) # 2
print(my_compound_types_eval.eval('list("1234")')) # ['1', '2', '3', '4']
print()
# dict
print(my_compound_types_eval.eval('{"a": 1, "b": 999}')) # {'a': 1, 'b': 999}
print(my_compound_types_eval.eval('{"a": 1, "b": 999}["b"]')) # 999
print(my_compound_types_eval.eval('{"a": 1, "b": 999}.items()')) # dict_items([('a', 1), ('b', 999)])
print(my_compound_types_eval.eval('len({"a": 1, "b": 999})')) # 2
print(my_compound_types_eval.eval('dict([("a", 1), ("b", 999)])')) # {'a': 1, 'b': 999}
print()
# tuple
print(my_compound_types_eval.eval('(1, 2, 3, 4)')) # (1, 2, 3, 4)
print(my_compound_types_eval.eval('(1, 2) + (3, 4)')) # (1, 2, 3, 4)
print(my_compound_types_eval.eval('1, 2, 3, 4')) # (1, 2, 3, 4)
print(my_compound_types_eval.eval('len((1, 2, 3, 4))')) # 4
print(my_compound_types_eval.eval('(1, 2, 1, 3, 4).count(1)')) # 2
print()
# set
print(my_compound_types_eval.eval('{1, 2, 3, 4}')) # {1, 2, 3, 4}
print(my_compound_types_eval.eval('{1, 2, 1, 3, 1, 4, 3}')) # {1, 2, 3, 4}
print(my_compound_types_eval.eval('[1, 2, 1, 3, 1, 4, 3]')) # [1, 2, 1, 3, 1, 4, 3]
print(my_compound_types_eval.eval('set([1, 2, 1, 3, 1, 4, 3])')) # {1, 2, 3, 4}
print(my_compound_types_eval.eval('{1, 1, 2}.union({3, 2, 4})')) # {1, 2, 3, 4}
print(my_compound_types_eval.eval('{1, 1, 2}.intersection({3, 2, 4})')) # {2}
| 2,742 | 1,250 |
from datetime import datetime
from bson.objectid import ObjectId
import pytest
from project.domain.person.repository.physical_person import PhysicalPerson
def test_instance_physical_person():
input_data = {
"_id": ObjectId(),
"status": "active",
"name": "teste",
"last_name": "teste",
"age": 12,
"birthdate": datetime.now(),
"gender": "",
"personal_document_id": "11122233344",
"email": "teste@teste.com",
"phone": "+5534988887777",
}
physical_person = PhysicalPerson(**input_data)
assert input_data["_id"] == physical_person.dict()["id"]
def test_instance_physical_person_errors():
with pytest.raises(ValueError):
input_data = {
"status": "",
"name": "",
"last_name": "",
"age": -1,
"birthdate": datetime.now(),
"gender": "",
"personal_document_id": "",
"email": "",
"phone": "",
}
PhysicalPerson(**input_data)
| 1,049 | 332 |
# -*- coding: utf-8 -*-
"""
Test suite for the community-developed Python SDK for interacting with Lacework APIs.
"""
import random
import pytest
from laceworksdk.api.v2.queries import QueriesAPI
from tests.api.test_crud_endpoint import CrudEndpoint
# Tests
@pytest.fixture(scope="module")
def api_object(api):
return api.queries
@pytest.fixture(scope="module")
def api_object_create_body(random_text):
return {
"query_id": random_text,
"query_text": f"""{random_text} {{
source {{CloudTrailRawEvents e}}
filter {{EVENT_SOURCE = 'iam.amazonaws.com' AND EVENT:userIdentity.name::String NOT LIKE '%{random_text}'}}
return distinct {{EVENT_NAME, EVENT}}
}}"""
}
@pytest.fixture(scope="module")
def api_object_update_body(random_text):
return {
"query_text": f"""{random_text} {{
source {{CloudTrailRawEvents e}}
filter {{EVENT_SOURCE = 'iam.amazonaws.com' AND EVENT:userIdentity.name::String NOT LIKE '%{random_text}_updated'}}
return distinct {{EVENT_NAME, EVENT}}
}}"""
}
@pytest.fixture(scope="module")
def query(api):
queries = api.queries.get()
queries = list(filter(lambda elem: elem["owner"] == "Lacework" and "LW_Global_AWS_CTA" in elem["queryId"], queries["data"]))
query = random.choice(queries)
return query
class TestQueries(CrudEndpoint):
OBJECT_ID_NAME = "queryId"
OBJECT_TYPE = QueriesAPI
def test_api_get_by_id(self, api_object):
self._get_object_classifier_test(api_object, "id", self.OBJECT_ID_NAME)
def test_queries_api_execute_by_id(self, api_object, query):
start_time, end_time = self._get_start_end_times()
response = api_object.execute_by_id(
query_id=query["queryId"],
arguments={
"StartTimeRange": start_time,
"EndTimeRange": end_time,
}
)
assert "data" in response.keys()
def test_queries_api_validate(self, api_object, query):
response = api_object.validate(query_text=query["queryText"])
assert "data" in response.keys()
def test_api_search(self):
pass
| 2,202 | 726 |
#!/usr/bin/env python3
import os
import glob
import sys
from PIL import Image
import Imath
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from torchvision import transforms
from imgaug import augmenters as iaa
import imgaug as ia
import imageio
import cv2
from utils.utils import exr_loader, depthTensor2rgbTensor, depth2rgb
class ClearGraspsDataset(Dataset):
"""
Dataset class for training model
//TODO: DOC
Args:
input_dir (str): Path to folder containing the input images (.png format).
transform (imgaug transforms): imgaug Transforms to be applied to the imgs
"""
def __init__(
self,
input_dir,
depth_dir='',
transform=None,
input_only=None,
outputImgWidth = 256,
outputImgHeight = 256,
):
super().__init__()
self.images_dir = input_dir
self.depth_dir = depth_dir
self.transform = transform
self.input_only = input_only
# Create list of filenames
self._datalist_input = [] # Variable containing list of all input images filenames in dataset
self._datalist_depth = []
self._extension_input = ['-rgb.jpg'] # The file extension of input images
self._extension_depth = ['-depth-rectified.exr']
self._create_lists_filenames(self.images_dir, self.depth_dir)
self.outputImgWidth = outputImgWidth
self.outputImgHeight = outputImgHeight
def __len__(self):
return len(self._datalist_input)
def __getitem__(self, index):
'''Returns an item from the dataset at the given index. If no depths directory has been specified,
then a tensor of zeroes will be returned as the depth.
Args:
index (int): index of the item required from dataset.
Returns:
torch.Tensor: Tensor of input image
torch.Tensor: Tensor of depth (Tensor of zeroes is depth_dir is "" or None)
'''
# Open input imgs
image_path = self._datalist_input[index]
_img = Image.open(image_path).convert('RGB')
_img = np.array(_img)
# Open depths
if self.depth_dir:
depth_path = self._datalist_depth[index]
_depth = exr_loader(depth_path, ndim=1)
#_depth = cv2.resize(_depth, (self.outputImgWidth, self.outputImgHeight), interpolation=cv2.INTER_NEAREST)
_depth[np.isnan(_depth)] = 0
_depth[np.isinf(_depth)] = 0
_depth = np.expand_dims(_depth, axis=0)
# Apply image augmentations and convert to Tensor
if self.transform:
det_tf = self.transform.to_deterministic()
_img = det_tf.augment_image(_img.copy())
if self.depth_dir:
# Making all values of invalid pixels marked as -1.0 to 0.
# In raw data, invalid pixels are marked as (-1, -1, -1) so that on conversion to RGB they appear black.
mask = np.all(_depth == -1.0, axis=0)
_depth[:, mask] = 0.0
_depth = _depth.transpose((1, 2, 0)) # To Shape: (H, W, 3)
_depth = det_tf.augment_image(_depth, hooks=ia.HooksImages(activator=self._activator_masks))
_depth = _depth.transpose((2, 0, 1)) # To Shape: (3, H, W)
# Return Tensors
_img_tensor = transforms.ToTensor()(_img.copy())
if self.depth_dir:
_depth_tensor = torch.from_numpy(_depth.copy())
#_depth_tensor = nn.functional.normalize(_depth_tensor, p=2, dim=0)
else:
_depth_tensor = torch.zeros((3, _img_tensor.shape[1], _img_tensor.shape[2]), dtype=torch.float32)
return _img_tensor, _depth_tensor
def _create_lists_filenames(self, images_dir, depth_dir):
'''Creates a list of filenames of images and depths each in dataset
The depth at index N will match the image at index N.
Args:
images_dir (str): Path to the dir where images are stored
depth_dir (str): Path to the dir where depths are stored
Raises:
ValueError: If the given directories are invalid
ValueError: No images were found in given directory
ValueError: Number of images and depths do not match
'''
assert os.path.isdir(images_dir), 'Dataloader given images directory that does not exist: "%s"' % (images_dir)
for ext in self._extension_input:
imageSearchStr = os.path.join(images_dir, '*' + ext)
imagepaths = sorted(glob.glob(imageSearchStr))
self._datalist_input = self._datalist_input + imagepaths
numImages = len(self._datalist_input)
if numImages == 0:
raise ValueError('No images found in given directory. Searched in dir: {} '.format(images_dir))
if depth_dir:
assert os.path.isdir(depth_dir), ('Dataloader given depths directory that does not exist: "%s"' %
(depth_dir))
for ext in self._extension_depth:
depthSearchStr = os.path.join(depth_dir, '*' + ext)
depthpaths = sorted(glob.glob(depthSearchStr))
self._datalist_depth = self._datalist_depth + depthpaths
numdepths = len(self._datalist_depth)
if numdepths == 0:
raise ValueError('No depths found in given directory. Searched for {}'.format(imageSearchStr))
if numImages != numdepths:
raise ValueError('The number of images and depths do not match. Please check data,' +
'found {} images and {} depths in dirs:\n'.format(numImages, numdepths) +
'images: {}\ndepths: {}\n'.format(images_dir, depth_dir))
def _activator_masks(self, images, augmenter, parents, default):
'''Used with imgaug to help only apply some augmentations to images and not depths
Eg: Blur is applied to input only, not depth. However, resize is applied to both.
'''
if self.input_only and augmenter.name in self.input_only:
return False
else:
return default
if __name__ == '__main__':
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torchvision import transforms
import torchvision
import imageio
# Example Augmentations using imgaug
imsize = 512
augs_train = iaa.Sequential([
# Geometric Augs
iaa.Scale((imsize, imsize), 0), # Resize image
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
iaa.Rot90((0, 4)),
# Blur and Noise
iaa.Sometimes(0.2, iaa.GaussianBlur(sigma=(0, 1.5), name="gaus-blur")),
iaa.Sometimes(0.1, iaa.Grayscale(alpha=(0.0, 1.0), from_colorspace="RGB", name="grayscale")),
iaa.Sometimes(0.2, iaa.AdditiveLaplaceNoise(scale=(0, 0.1*255), per_channel=True, name="gaus-noise")),
# Color, Contrast, etc.
iaa.Sometimes(0.2, iaa.Multiply((0.75, 1.25), per_channel=0.1, name="brightness")),
iaa.Sometimes(0.2, iaa.GammaContrast((0.7, 1.3), per_channel=0.1, name="contrast")),
iaa.Sometimes(0.2, iaa.AddToHueAndSaturation((-20, 20), name="hue-sat")),
iaa.Sometimes(0.3, iaa.Add((-20, 20), per_channel=0.5, name="color-jitter")),
])
# augs_test = iaa.Sequential([
# # Geometric Augs
# iaa.Scale((imsize, imsize), 0),
# ])
min = 0.1
max = 1.5
augs = augs_train
input_only = ["gaus-blur", "grayscale", "gaus-noise", "brightness", "contrast", "hue-sat", "color-jitter"]
db_test = ClearGraspsDataset(input_dir='./data/train/rgb-imgs',
depth_dir='./data/train/depth-imgs-rectified',
transform=augs,
input_only=input_only)
batch_size = 4
testloader = DataLoader(db_test, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True)
# Show 1 Shuffled Batch of Images
for ii, batch in enumerate(testloader):
# Get Batch
img, depth = batch
print('image shape, type: ', img.shape, img.dtype)
print('depth shape, type: ', depth.shape, depth.dtype)
# Show Batch
im_vis1 = torchvision.utils.make_grid(img, nrow=batch_size // 4, padding=2, normalize=True, scale_each=True)
plt.imshow(im_vis1.numpy().transpose(1, 2, 0))
plt.show()
im_vis2 = torchvision.utils.make_grid(depthTensor2rgbTensor(depth), nrow=batch_size // 4, padding=2, normalize=True, scale_each=True)
plt.imshow(im_vis2.numpy().transpose(1, 2, 0))
plt.show()
break
| 9,058 | 2,983 |
"""
WSGI config for django_vue project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from dotenv import load_dotenv
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
project_folder = os.path.expanduser(PROJECT_DIR)
load_dotenv(os.path.join(PROJECT_DIR, '.env'))
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings.settings')
application = get_wsgi_application()
| 596 | 210 |
"""Adam optimizer"""
import copy
import torch
def adam(lr=1e-4):
"""torchoptim.adam(**adam_config)(params)
Factory that generates functional version of Adam optimizer.
Implementation has no in-place op and no data-dependent control flow.
Returns:
- `optim_func`: a function that:
- takes (`params`, `optim_state`, `params_grad`) as input
- returns (`params`, `optim_state`)
after applying Adam algorithm
- `optim_state_init_func`: a function that:
- takes `optim_state` as input
- returns `optim_state` which is Adam optimizer state
- `optim_state`: tracked state (shape-only) of Adam optimizer.
"""
# TODO FIXME: properly implement Adam optimizer
def optim_gen(params):
def optim_func(params, optim_state, params_grad):
for k in params:
params[k] = params[k] + params_grad[k] * lr
optim_state[k] = optim_state[k] + 1
return params, optim_state
optim_state = copy.deepcopy(params)
def optim_state_init_func(optim_state):
new_state = {}
for k, v in optim_state.items():
new_state[k] = torch.full_like(v, 0.0)
return new_state
return optim_func, optim_state_init_func, optim_state
return optim_gen
| 1,409 | 402 |
'''
@author: dan
'''
from f_widget import FWidget
from kivy.uix.label import Label
from kivy.properties import ListProperty, NumericProperty, StringProperty, BooleanProperty, ObjectProperty
from kivy.uix.button import Button
from kivy.lang import Builder
from f_button import FButton
from utils import get_icon_char, get_rgba_color
from f_scalable import ScalableBehaviour
Builder.load_string('''
<FIconLabel>:
Label:
id: licon
font_name: './graph_plots/fwidgets/data/font/fontawesome-webfont.ttf'
pos: root.pos
size: root.size
font_size: root.font_size
text: root.get_icon(root.icon) if root.icon else ''
color: root.get_color(root.txt_color)
''')
class FIconLabel(Button, FWidget, ScalableBehaviour):
icon = StringProperty('')
get_icon = ObjectProperty(get_icon_char)
txt_color = ListProperty(['Orange', '100'])
n_txt_color = ListProperty(['Orange', '100'])
d_txt_color = ListProperty(['Orange', '400'])
def __init__(self, **kwargs):
super(FIconLabel, self).__init__(**kwargs)
self.get_icon = get_icon_char
self.background_color = (1, 1, 1, 0)
self.markup = True
self.halign = 'center'
self.valign = 'middle'
self.color = self.get_color(self.txt_color)
self.size_hint = 1, 1
self.font_size = self.height * .8
self.p_width = 0
self.txt_color = self.n_txt_color
def on_txt_color(self, widget, txt_color):
widget.color = self.get_color(txt_color)
widget.ids.licon.color = self.get_color(txt_color)
def on_size(self, widget, size):
self.size = size
self.font_size = self.height * .8
def on_touch_down(self, touch):
if self.collide_point(touch.x, touch.y):
self.txt_color = self.d_txt_color
return super(FIconLabel, self).on_touch_down(touch)
def on_touch_up(self, touch):
if self.collide_point(touch.x, touch.y):
self.txt_color = self.n_txt_color
return super(FIconLabel, self).on_touch_up(touch)
| 2,122 | 729 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
This is an example with a source model logic tree containing uncertainties
on the fault geometry. The GMPE is fixed as the Sadigh et al. 1997 model.
The source model contains three faults SFLT1 (simple fault), COMFLT1 (complex
fault) and CHAR1 (characteristic fault). Two geometries are defined for
SFLT1 (sg1, sg2), two for COMFLT1 (cog1, cog2) and three for CHAR1
(char_simple, char_complex, char_planar)
12 curves output:
*_sg1_cog1_char_simple-*
*_sg1_cog1_char_complex-*
*_sg1_cog1_char_planar-*
*_sg1_cog2_char_simple-*
*_sg1_cog2_char_complex-*
*_sg1_cog2_char_planar-*
*_sg2_cog1_char_simple-*
*_sg2_cog1_char_complex-*
*_sg2_cog1_char_planar-*
*_sg2_cog2_char_simple-*
*_sg2_cog2_char_complex-*
*_sg2_cog2_char_planar-*
"""
| 1,498 | 574 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ParseError(Exception):
def __init__(self, message, line_no, line):
self.msg = message
self.line = line
self.line_no = line_no
def __str__(self):
return 'at line %d, %s: %r' % (self.line_no, self.msg, self.line)
class BaseParser(object):
PARSE_EXC = ParseError
def __init__(self):
super(BaseParser, self).__init__()
self.line_no = 0
def _assignment(self, key, value):
self.assignment(key, value)
return None, []
def _get_section(self, line):
if not line.endswith(']'):
return self.error_no_section_end_bracket(line)
if len(line) <= 2:
return self.error_no_section_name(line)
return line[1:-1]
def _split_key_value(self, line):
colon = line.find(':')
equal = line.find('=')
if colon < 0 and equal < 0:
return self.error_invalid_assignment(line)
if colon < 0 or (0 <= equal < colon):
key, value = line[:equal], line[equal + 1:]
else:
key, value = line[:colon], line[colon + 1:]
value = value.strip()
if value and value[0] == value[-1] and value.startswith(("\"", "'")):
value = value[1:-1]
return key.strip(), [value]
def _single_line_parse(self, line, key, value):
self.line_no += 1
if line.startswith(('#', ';')):
self.comment(line[1:].strip())
return key, value
active, _, comment = line.partition(';')
self.comment(comment.strip())
if not active:
# Blank line, ends multi-line values
if key:
key, value = self._assignment(key, value)
return key, value
if active.startswith((' ', '\t')):
# Continuation of previous assignment
if key is None:
return self.error_unexpected_continuation(line)
value.append(active.lstrip())
return key, value
if key:
# Flush previous assignment, if any
key, value = self._assignment(key, value)
if active.startswith('['):
# Section start
section = self._get_section(active)
if section:
self.new_section(section)
else:
key, value = self._split_key_value(active)
if not key:
return self.error_empty_key(line)
return key, value
def parse(self, line_iter=None):
if line_iter is None:
return
key = None
value = []
for line in line_iter:
key, value = self._single_line_parse(line, key, value)
if key:
# Flush previous assignment, if any
self._assignment(key, value)
def assignment(self, key, value):
"""Called when a full assignment is parsed."""
raise NotImplementedError()
def new_section(self, section):
"""Called when a new section is started."""
raise NotImplementedError()
def comment(self, comment):
"""Called when a comment is parsed."""
pass
def make_parser_error(self, template, line):
raise self.PARSE_EXC(template, self.line_no, line)
def error_invalid_assignment(self, line):
self.make_parser_error("No ':' or '=' found in assignment", line)
def error_empty_key(self, line):
self.make_parser_error('Key cannot be empty', line)
def error_unexpected_continuation(self, line):
self.make_parser_error('Unexpected continuation line', line)
def error_no_section_end_bracket(self, line):
self.make_parser_error('Invalid section (must end with ])', line)
def error_no_section_name(self, line):
self.make_parser_error('Empty section name', line)
class ConfigParser(BaseParser):
"""Parses a single config file, populating 'sections' to look like:
{'DEFAULT': {'key': [value, ...], ...},
...}
"""
def __init__(self, filename, sections):
super(ConfigParser, self).__init__()
self.filename = filename
self.sections = sections
self.section = None
def parse(self, line_iter=None):
with open(self.filename) as f:
return super(ConfigParser, self).parse(f)
def new_section(self, section):
self.section = section
self.sections.setdefault(self.section, [])
def assignment(self, key, value):
if not self.section:
raise self.error_no_section()
value = '\n'.join(value)
self.sections[self.section].append([key, value])
def error_no_section(self):
self.make_parser_error('Section must be started before assignment', '')
| 5,368 | 1,577 |
import pandas
df = pandas.read_json('data_with_income_rub.json')
df.to_csv('data_with_income_rub.csv', index=False)
df = pandas.read_json('data_with_income_rub_from_csv.json')
df.to_csv('data_with_income_rub_from_csv.csv', index=False)
| 245 | 107 |
import mock
from datetime import datetime, timedelta
from django.test import override_settings
from freezegun import freeze_time
from django.urls import reverse
from rest_framework import status
from tests.tests.test_api import get_token_from_email
@mock.patch('django.core.mail.outbox', new_callable=list)
@override_settings(TFA_TOKEN_AGE='20')
def test_token_expire(outbox, user_logged_client):
user_logged_client.post(
'/auth/login/',
data={
'username': 'user',
'password': 'password',
}
)
user_logged_client.post(reverse('tfa_create_challenge'), data={'type': 'email'})
token = get_token_from_email(outbox.pop())
date = datetime.now() + timedelta(seconds=30)
with freeze_time(date):
url = reverse('tfa_accept_challenge')
response = user_logged_client.post(url, data={'token': token})
assert response.status_code == status.HTTP_400_BAD_REQUEST
@mock.patch('django.core.mail.outbox', new_callable=list)
@override_settings(TFA_CLIENT_AGE='20')
def test_client_expire(outbox, user_logged_client):
user_logged_client.post(
'/auth/login/',
data={
'username': 'user',
'password': 'password',
}
)
user_logged_client.post(reverse('tfa_create_challenge'), data={'type': 'email'})
token = get_token_from_email(outbox.pop())
user_logged_client.post(reverse('tfa_accept_challenge'), data={'token': token})
response = user_logged_client.get('/dummy/')
assert response.status_code == status.HTTP_200_OK
date = datetime.now() + timedelta(seconds=30)
with freeze_time(date):
response = user_logged_client.get('/dummy/')
assert response.status_code == status.HTTP_303_SEE_OTHER
| 1,767 | 585 |
import sys
import json
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.exc import IntegrityError
from sqlalchemy import Column, Integer, String, Table, PrimaryKeyConstraint, create_engine
# engine = create_engine('sqlite:////tmp/zorin.db')
engine = create_engine('sqlite:///:memory:')
Base = declarative_base()
Session = sessionmaker(bind=engine)
visitors = Table('visitors', Base.metadata,
Column('site_id', Integer),
Column('name', String),
PrimaryKeyConstraint("site_id", "name")
)
class Visitor(Base):
__table__ = visitors
Base.metadata.create_all(engine)
class Site(object):
def __init__(self):
self.op_events = {}
self.chats = set()
self.emails = set()
self.operators = set()
def add_operator_event(self, ts, op, state):
self.op_events[op] = sorted(set(self.op_events.get(op, []) + [(ts, state)]))
self.operators.add(op)
def get_state(self, time_stamp):
states = []
for op, events in self.op_events.items():
prev_state = 'offline'
for ts, state in events:
if ts > time_stamp:
break
prev_state = state
states.append(prev_state)
return 'online' if 'online' in states else 'offline'
def add_chat(self, time_stamp, visitor, site_id):
if time_stamp in self.chats or time_stamp in self.emails:
return
state = self.get_state(time_stamp)
if state == 'online':
self.chats.add(time_stamp)
else:
self.emails.add(time_stamp)
visitor = Visitor(site_id=site_id, name=visitor)
session = Session()
session.add(visitor)
try:
session.commit()
except IntegrityError:
pass
def report(self, site_id):
session = Session()
visitors = session.query(Visitor).filter(Visitor.site_id == site_id).all()
print "{site_id},messages={messages},emails={emails},operators={operators},visitors={visitors}".format(
site_id=site_id, messages=len(self.chats), emails=len(self.emails),
operators=len(self.operators), visitors=len(visitors))
def main():
fname = sys.argv[1]
sites = {}
with open(fname) as f:
for line in f.readlines():
data = json.loads(line)
site_id = data['site_id']
site = sites.setdefault(site_id, Site())
if data['type'] == 'status':
site.add_operator_event(data['timestamp'], data['from'], data['data']['status'])
with open(fname) as f:
for line in f.readlines():
data = json.loads(line.strip())
site_id = data['site_id']
site = sites[site_id]
if data['type'] == 'message':
site.add_chat(data['timestamp'], data['from'], site_id)
for site_id, site in sorted(sites.items(), key=lambda _e: _e[0]):
site.report(site_id)
if __name__ == '__main__':
main()
| 3,131 | 957 |
# 이지호 작성 #
# 공동번역 성서의 저작권은 모두 저작권자에게 있습니다. #
import sys
import re
import random
end = "끝났습니다."
error = "오류입니다."
def run():
short = ['Gen', 'Exo', 'Lev', 'Num', 'Deu', 'Jos', 'Jdg', 'Rth', '1Sa', '2Sa', '1Ki', '2Ki', '1Ch', '2Ch', 'Ezr',
'Neh', 'Est', 'Job', 'Psa', 'Pro', 'Ecc', 'Sol', 'Isa', 'Jer', 'Eze', 'Dan', 'Amo', 'Oba', 'Jon', 'Mic',
'Nah', 'Hab', 'Zep', 'Hag', 'Zec', 'Mar', 'Luk', 'Joh', 'Act', 'Rom', '1Co', '2Co', 'Gal', 'Eph', 'Phi',
'Col', '1Th', '2Th', '1Ti', '2Ti', 'Tit', 'Phm', 'Heb', 'Jam', '1Pe', '2Pe', '1Jo', '2Jo', '3Jo', 'Jod',
'Rev']
bookname = ['창세기', '출애굽기', '레위기', '민수기', '신명기', '여호수아', '판관기', '룻기', '사무엘상', '사무엘하',
'열왕기상', '열왕기하', '역대기상', '역대기하', '에즈라', '느헤미야', '에스델', '욥기', '시편', '잠언',
'전도서', '아가', '이사야', '에레미야', '애가', '에제키엘', '다니엘', '호에샤', '요엘', '아모스',
'오바디야', '오냐', '미가', '나훔', '하바꾹', '스바니야', '하깨', '즈가리야', '말라기',
'마태오의 복음서', '마르코의 복음서', '루가의 복음서', '요한의 복음서', '사도행전', '로마인에게 보낸 편지',
'고린토인에게 보낸 첫째 편지', '고랜토인에게 보낸 둘째 편지', '갈라디아인에게 보낸 편지', '에페소인에게 보낸 편지',
'필립비인들에게 보낸 편지', '골로사이인들에게 보낸 편지', '델살로니카인들에게 보낸 첫째 편지',
'데살로니카인들에게 보낸 둘째 편지', '디모테오에게 보낸 첫째 편지', '디도에게 보낸 편지',
'필레몬에게 보낸 편지', '히브리인들에게 보낸 편지', '야고보의 편지', '베드로의 첫째 편지',
'베드로의 둘째 편지', '요한의 첫째 편지', '요한의 둘째 편지', '요한의 세째 편지', '유다의 편지', '요한의 묵시록']
global selectbookname
global k
global line
global number
for i in range(len(short)):
book = bookname[i]
say = ("[%d] " % (i + 1))
print(say + book, end=" ")
if i % 5 == 0:
print('''
''')
if i == (len(short) - 1) and (i % 5) != 0:
for p in range((len(short) - 1) % 5):
print(say + book, end=" ")
print("선택하실 책 번호를 선택하세요.")
number = int(input())
selectbookname = short[int(number - 1)]
print(selectbookname)
print('''
[1] 성경 scrapper
[2] 장 선택해서 읽기
[3] 줄 선택해서 읽기
[4] 성경 리더
[5] 랜덤 줄 출력(모든 경전)
무엇을 선택하시겠습니까?''')
choice = int(input())
if choice == 1: # 성경 scrapper
lines = ''
anypnl = re.compile("\d:\d")
while True:
with open('공동번역.txt', 'r')as a:
line = a.readline()
checker = line.find('%s %s' % (selectbookname, anypnl))
if not checker == -1:
lines += line
if line is False:
break
with open('result.txt', 'w') as b:
b.write(lines)
if choice == 2: # 장 리더
page = ''
print('''몇 장입니까?''')
k = int(input())
with open('공동번역.txt', 'r')as a:
while True:
line = a.readline()
checker = line.find('%s %d' % (selectbookname, k))
closer = line.find('%s %d' % (selectbookname, k+1))
if checker != -1:
page += '%s\n' % line
if closer != -1:
break
if not line:
break
print("\n" * 5)
print(page)
if choice == 3: # 줄 리더
print('''몇 장 입니까?''')
page = input()
print('''몇 줄 입니까?''')
line = input()
with open('공동번역.txt', 'r') as a:
while True:
linesearcher = a.readline()
linechecker = linesearcher.find("%s %s:%s" % (selectbookname, page, line))
if linechecker != -1:
break
if linesearcher == False:
break
print(linesearcher)
if choice == 4: # 성경 리더
page = ''
print('''몇 장부터 보시겠습니까?''')
k = int(input())
with open('공동번역.txt', 'r')as a:
while True:
line = a.readline()
checker = line.find('%s %d' % (selectbookname, k))
closer = line.find('%s %d' % (selectbookname, k+1))
if checker != -1:
page += '%s\n' % line
if closer != -1:
print(page)
print('''다음 장을 보려면 엔터를 눌러주세요.
다른 값을 입력하시면 종료됩니다.''')
k += 1
select = input()
if select == '':
continue
else:
break
if choice == 5: # 랜덤 줄
with open('공동번역.txt', 'r') as a:
alllines = a.readlines()
print(random.choice(alllines))
if choice not in [1, 2, 3, 4, 5]:
print(error)
sys.exit()
run()
| 4,766 | 2,519 |
"""Module containing the v1 API."""
from .root import API_V1 # noqa
from . import recipient_alias # noqa
from . import sender_alias # noqa
| 143 | 48 |
from cosmic_pairing_api.utils.enums.element import Element
from cosmic_pairing_api.utils.enums.modality import Modality
SIGNS = [
{
"id": 1,
"name": "aries",
"modality": Modality.CARDINAL.name,
"element": Element.FIRE.name,
},
{
"id": 2,
"name": "taurus",
"modality": Modality.FIXED.name,
"element": Element.EARTH.name,
},
{
"id": 3,
"name": "gemini",
"modality": Modality.MUTABLE.name,
"element": Element.AIR.name,
},
{
"id": 4,
"name": "cancer",
"modality": Modality.CARDINAL.name,
"element": Element.WATER.name,
},
{
"id": 5,
"name": "leo",
"modality": Modality.FIXED.name,
"element": Element.FIRE.name,
},
{
"id": 6,
"name": "virgo",
"modality": Modality.MUTABLE.name,
"element": Element.EARTH.name,
},
{
"id": 7,
"name": "libra",
"modality": Modality.CARDINAL.name,
"element": Element.AIR.name,
},
{
"id": 8,
"name": "scorpio",
"modality": Modality.FIXED.name,
"element": Element.WATER.name,
},
{
"id": 9,
"name": "sagittarius",
"modality": Modality.MUTABLE.name,
"element": Element.FIRE.name,
},
{
"id": 10,
"name": "capricorn",
"modality": Modality.CARDINAL.name,
"element": Element.EARTH.name,
},
{
"id": 11,
"name": "aquarius",
"modality": Modality.FIXED.name,
"element": Element.AIR.name,
},
{
"id": 12,
"name": "pisces",
"modality": Modality.MUTABLE.name,
"element": Element.WATER.name,
},
]
| 1,784 | 657 |
#!/usr/local/anaconda3/bin/python
import sys
sys.path.insert(0, "../libs/")
from spy_mean import compute_mean
if __name__ == "__main__":
print("Program to compute mean")
count = input("Enter total number of samples: ")
idx = 0
data = []
for idx in range(0, int(count)):
val = input("Enter data {0}: ".format(idx + 1))
data.append(val)
#mean = spy_mean.compute_mean(data)
mean = compute_mean(data)
print("You entered: {0} vals, mean = {1}".format(count, mean))
| 513 | 181 |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------
# wxalarm.py
#
# Copyright (c) 2019 sanderiana https://github.com/sanderiana
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
# ---------------------------------------------------------------
# Icon made by Freepik from www.flaticon.com
# ---------------------------------------------------------------
import datetime
def change_time(hour_min):
date = datetime.datetime.now()
year = date.year
month = date.month
day = date.day
time = hour_min.split(":")
hour = int(time[0])
min = int(time[1])
return datetime.datetime(year, month, day, hour, min, 0)
def change_delta(delta_time):
sec = delta_time.total_seconds()
hour = sec // 3600
min = (sec - (hour * 3600)) // 60
return "%02d:%02d" % (hour, min) | 896 | 291 |
"""Выбор режима"""
print ("""
Выберите режим загрузки:
1 - Стандартная загрузка с поддержкой сетевых драйверов
2 - Безопасный режим с записью действий в log
9 - Остановить загрузку
Введите ТОЛЬКО цифру
""")
def wr(mode):
f = open('config.ini','w') # открытие в режиме записи
f.write(str(mode)) # запись режима в файл
f.close()
x = input('))')
if x == 1:
wr(1)
print('Стандартная загрузка с поддержкой сетевых драйверов')
elif x == 2:
wr(2)
print('Безопасный режим с записью действий в log')
elif x == 9:
exit()
else:
wr(1)
print('Стандартная загрузка с поддержкой сетевых драйверов')
| 652 | 301 |
N = int(input())
ans = 'No'
for i in range(1, 10):
if N % i == 0:
res = N // i
if res < 10:
ans = 'Yes'
print(ans)
| 150 | 67 |
"""Convert shinkai ship Json to KcWiki Lua """
__all__ = ['main']
import json
from collections import OrderedDict
from utils import python_data_to_lua_table
SHIPS_HR_JSON = 'json/ships_human_readable.json'
SHIPS_LUA = 'lua/ships.lua'
def shinkai_parse_ship(ships):
"""Get shinkai ships stored by python OrderedDict"""
ships_dict = OrderedDict()
for ship_id in ships:
ship = ships[ship_id]
ship_dict = OrderedDict()
ship_dict['日文名'] = ship['name']['fullname_ja_jp']
ship_dict['中文名'] = ship['name']['fullname_zh_cn']
ship_dict['kcwiki分类'] = ship['kcwiki_class']
attributes_dict = OrderedDict()
attributes_dict['耐久'] = ship['stats']['taik']
attributes_dict['火力'] = [
ship['stats']['houg'],
ship['stats']['houg2']
]
attributes_dict['雷装'] = [
ship['stats']['raig'],
ship['stats']['raig2']
]
attributes_dict['对空'] = ship['stats']['tyku']
attributes_dict['对潜'] = ship['stats']['tais']
attributes_dict['回避'] = ship['stats']['houk']
attributes_dict['索敌'] = ship['stats']['saku']
attributes_dict['速力'] = ship['stats']['soku']
attributes_dict['装甲'] = ship['stats']['souk']
attributes_dict['运'] = ship['stats']['luck']
attributes_dict['射程'] = ship['stats']['leng']
ship_dict['属性'] = attributes_dict
equip_dict = OrderedDict()
equip_dict['格数'] = len(ship['slots'])
equip_dict['搭载'] = ship['slots']
equip_dict['装备'] = ship['equips']
ship_dict['装备'] = equip_dict
appears_list = []
for appear in ship.get('appears', []):
appear_dict = OrderedDict()
appear_dict['map'] = OrderedDict()
appear_dict['map']['限定海域'] = appear['map']['is_event']
appear_dict['map']['年'] = appear['map']['year']
appear_dict['map']['季节'] = [
None, '冬', '春', '夏', '秋'][appear['map']['season']]
appear_dict['map']['海域'] = 'E-' + str(appear['map']['event_id'])
appear_dict['map']['Boss'] = appear['map']['is_boss']
if 'is_final_battle' in appear:
appear_dict['最终战'] = appear['is_final_battle']
if 'selected_rank' in appear:
appear_dict['选择难度'] = [
'无', '丙', '乙', '甲'][appear['selected_rank']]
appears_list.append(appear_dict)
if appears_list:
ship_dict['出现海域'] = appears_list
ships_dict[ship_id] = ship_dict
return ships_dict
def shinkai_generate_ship_lua(ships):
"""Generate KcWiki shinkai ship Lua table"""
ships_dict = shinkai_parse_ship(ships)
data, _ = python_data_to_lua_table(ships_dict, level=1)
with open(SHIPS_LUA, 'w', encoding='utf8') as lua_fp:
lua_fp.write('local d = {}\n\n'
+ 'd.shipDataTable = {\n')
lua_fp.write(data)
lua_fp.write('\n}\n\nreturn d\n')
def load_ships_json(json_file):
"""Load and decode json"""
print('Load json file: {}'.format(json_file))
with open(json_file, 'r', encoding='utf8') as file:
ships = json.load(file)
return ships
def main():
"""Main process"""
ships = load_ships_json(SHIPS_HR_JSON)
shinkai_generate_ship_lua(ships)
if __name__ == '__main__':
main()
| 3,380 | 1,184 |
# Desafio 01
# Faça um programa Leia um nome de uma pessoa e que mostre uma mensagem de boas-vidas de acordo com o valor digitado.
print()
print('=-='*15)
nome = input ('Qual é sue nome ? ')
print ('Ola',nome,'Prazer em te conhecer!')
nome = input('Digite seu nome: ')
print('É um prazer te conhecer, {}!'.format(nome))
print('=-='*15)
print() | 346 | 128 |
import os
import sys
from typing_extensions import final
sys.path.append("../")
# TODO : Remove this append line
import numpy as np
import torch
import torch.nn as nn
from models.FCOS import FCOS
from models.PostProcessor import PostProcessor
import imgaug.augmenters as iaa
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
from torchvision import transforms
from utils.transforms.to_tensor import ToTensorOwn
from utils.transforms.normalize import Normalize
from utils.transforms.center_crop import CenterCrop
cuda = torch.device('cuda:0')
cpu = torch.device("cpu:0")
class FCOSInference(nn.Module):
def __init__(self, backbone_model='resnet50', freeze_backend=[False, False, False, False], \
fpn_features=256, num_classes=81, use_det_head_group_norm=True, \
centerness_on_regression=True, use_gradient_checkpointing=False, \
strides=[8, 16, 32, 64, 128], use_cuda=False, \
add_centerness_in_cls_prob=True, max_detection_boxes_num=1000, \
cls_score_threshold=0.05, nms_iou_threshold=0.60):
super(FCOSInference, self).__init__()
self.strides = strides
self.max_detection_boxes_num = max_detection_boxes_num
self.model = FCOS(backbone_model, freeze_backend, fpn_features, num_classes, \
use_det_head_group_norm, centerness_on_regression, use_gradient_checkpointing)
self.post_process = PostProcessor(use_cuda, add_centerness_in_cls_prob, \
max_detection_boxes_num, cls_score_threshold, nms_iou_threshold, num_classes)
if use_cuda:
self.model = self.model.to(cuda, non_blocking=True)
self.post_process = self.post_process.to(cuda, non_blocking=True)
def forward(self, preprocesed_image):
# image : [B x 3 x img_h x img_w]
cls_probs, cnt_logits, reg_values = self.model(preprocesed_image)
# cls_probs, cnt_logit, reg_values each will have a list of features having shape as below.
# cls_probs : [[B x 81 x H x W], [B x 81 x H x W], ....]
# cnt_logits: [[B x 1 x H x W], [B x 1 x H x W], ....]
# reg_values: [[B x 4 x H x W], [B x 4 x H x W], ....]
predictions = self.post_process([cls_probs, cnt_logits, reg_values], self.strides)
# predictions : List of [N x 6] tensor for each element in batch
# : [x1, y1, x2, y2, cls_prob, cls_id]
B = preprocesed_image.shape[0]
num_bboxes = torch.zeros(size=[B])
for i, res_img in enumerate(preprocesed_image):
img_h, img_w = res_img.shape[1:]
predictions[i][:, 0] = torch.clip(predictions[i][:, 0], 0, img_w)
predictions[i][:, 1] = torch.clip(predictions[i][:, 1], 0, img_h)
predictions[i][:, 2] = torch.clip(predictions[i][:, 2], 0, img_w)
predictions[i][:, 3] = torch.clip(predictions[i][:, 3], 0, img_h)
num_bboxes[i] = len(predictions[i])
final_prediction = torch.zeros(size=[B, self.max_detection_boxes_num, 6], dtype=torch.float32)
for i, pred in enumerate(predictions):
final_prediction[i, :len(pred)] = pred
return final_prediction, num_bboxes
if __name__ == "__main__":
import cv2
import config_converter as config
complete_model = FCOSInference(backbone_model=config.converter_backbone, freeze_backend=[False, False, False, False], \
fpn_features=config.converter_fpn_features, num_classes=config.converter_num_classes, \
use_det_head_group_norm=config.converter_use_det_head_group_norm, \
centerness_on_regression=config.converter_centerness_on_regression, \
use_gradient_checkpointing=False, strides=config.converter_strides, use_cuda=False, \
add_centerness_in_cls_prob=config.add_centerness_in_cls_prob, \
max_detection_boxes_num=config.max_detection_boxes_num, \
cls_score_threshold=config.cls_score_threshold, \
nms_iou_threshold=config.nms_iou_threshold)
ckpt_path = "../summaries/2021_07_26_00_01_29/ckpt/fcos_resnet50_eps_26_test_loss_2.5426.pth"
ckpt = torch.load(ckpt_path)['model']
complete_model.model.load_state_dict(ckpt, strict=True) # Restore FCOS architecture part only
complete_model.model.eval() # TODO : Skipping this intentionally
complete_model.eval()
# Image loading and preprocessing
img_path = "../sample_imgs/000026.jpg"
# img_path = "../sample_imgs/000012.jpg"
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
transforms = transforms.Compose([
CenterCrop(),
ToTensorOwn(), # Custom ToTensor transform, converts to CHW from HWC only
Normalize(config.converter_normalization_type),
])
empty_bb = BoundingBoxesOnImage([BoundingBox(0, 0, 100, 100, label=0)], \
shape=(*config.input_size, 3))
sample = {'image' : img, 'bbox' : empty_bb}
preprocessed_tensor = transforms([sample, config.input_size])
resized_img = preprocessed_tensor['image']
resized_img = torch.unsqueeze(resized_img, dim=0)
# Model Inference
final_predictions, num_bboxes = complete_model(resized_img)
final_predictions = final_predictions.detach().numpy()
num_bboxes = num_bboxes.detach().numpy()
resized_img = resized_img.detach().numpy()
for pred, num_bb, img in zip(final_predictions, num_bboxes, resized_img):
pred = pred[:int(num_bb)]
# Rest are padded zeros and not useful as we padded the predictions to make a batch of output
img[0:1, :, :] = img[0:1, :, :] * 0.229 + 0.485
img[1:2, :, :] = img[1:2, :, :] * 0.224 + 0.456
img[2:3, :, :] = img[2:3, :, :] * 0.225 + 0.406
img = np.uint8(np.transpose(img, (1, 2, 0)) * 255)
for bb in pred:
x1, y1, x2, y2 = [int(c) for c in bb[:4]]
cls_prob, cls_id = bb[4:]
cls_name = config.converter_label_dict[int(cls_id)]
print(f"X1: {x1}, Y1: {y1}, X2: {x2}, Y2: {y2}, Cls_id: {int(cls_id)}, Cls_name: {cls_name}, Cls_prob: {cls_prob:.4f}")
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
op_path = os.path.splitext(img_path)[0] + "_res.jpg"
cv2.imwrite(op_path, img)
| 6,547 | 2,456 |
f = open("C:\Users\Harshit Agarwal\Desktop\stackoverflow.com-Posts\Postsnew.xml", "w")
f.write('<?xml version="1.0" encoding="utf-8"?>\n<posts>')
i = 0
with open("C:\Users\Harshit Agarwal\Desktop\stackoverflow.com-Posts\Posts.xml") as fileobject:
for line in fileobject:
i +=1
if 'PostTypeId="1"' in line:
if i%100000 == 0:
print i
f.write(line)
f.write('</posts>')
f.close() | 435 | 165 |
# -*- coding: utf-8 -*-
"""Predicates for edge data from BEL graphs."""
from functools import wraps
from .utils import part_has_modifier
from ..graph import BELGraph
from ...constants import (
ACTIVITY, ANNOTATIONS, ASSOCIATION, CAUSAL_RELATIONS, CITATION, CITATION_AUTHORS, CITATION_TYPE,
CITATION_TYPE_PUBMED, DEGRADATION, DIRECT_CAUSAL_RELATIONS, EVIDENCE, OBJECT, POLAR_RELATIONS, RELATION, SUBJECT,
TRANSLOCATION,
)
from ...dsl import BiologicalProcess, Pathology
__all__ = [
'edge_predicate',
'keep_edge_permissive',
'has_provenance',
'has_pubmed',
'has_authors',
'is_causal_relation',
'is_direct_causal_relation',
'is_associative_relation',
'has_polarity',
'edge_has_activity',
'edge_has_degradation',
'edge_has_translocation',
'edge_has_annotation',
'has_pathology_causal',
]
def edge_predicate(func): # noqa: D202
"""Decorate an edge predicate function that only takes a dictionary as its singular argument.
Apply this as a decorator to a function that takes a single argument, a PyBEL node data dictionary, to make
sure that it can also accept a pair of arguments, a BELGraph and a PyBEL node tuple as well.
:type func: (dict) -> bool
:rtype: (pybel.BELGraph, tuple, tuple, int) -> bool
"""
@wraps(func)
def _wrapped(*args):
x = args[0]
if isinstance(x, BELGraph):
u, v, k = args[1:4]
return func(x[u][v][k])
return func(*args)
return _wrapped
def keep_edge_permissive(*args, **kwargs):
"""Return true for all edges.
:param dict data: A PyBEL edge data dictionary from a :class:`pybel.BELGraph`
:return: Always returns :code:`True`
:rtype: bool
"""
return True
@edge_predicate
def has_provenance(data):
"""Check if the edge has provenance information (i.e. citation and evidence).
:param dict data: The edge data dictionary
:return: If the edge has both a citation and and evidence entry
:rtype: bool
"""
return CITATION in data and EVIDENCE in data
@edge_predicate
def has_pubmed(data):
"""Check if the edge has a PubMed citation.
:param dict data: A PyBEL edge data dictionary from a :class:`pybel.BELGraph`
:return: Does the edge data dictionary has a PubMed citation?
:rtype: bool
"""
return CITATION in data and CITATION_TYPE_PUBMED == data[CITATION][CITATION_TYPE]
@edge_predicate
def has_authors(data):
"""Check if the edge contains author information for its citation.
:param dict data: A PyBEL edge data dictionary from a :class:`pybel.BELGraph`
:return: Does the edge's citation data dictionary have authors included?
:rtype: bool
"""
return CITATION in data and CITATION_AUTHORS in data[CITATION] and data[CITATION][CITATION_AUTHORS]
@edge_predicate
def is_causal_relation(data):
"""Check if the given relation is causal.
:param dict data: The PyBEL edge data dictionary
:rtype: bool
"""
return data[RELATION] in CAUSAL_RELATIONS
@edge_predicate
def is_direct_causal_relation(data):
"""Check if the edge is a direct causal relation.
:param dict data: The PyBEL edge data dictionary
:rtype: bool
"""
return data[RELATION] in DIRECT_CAUSAL_RELATIONS
@edge_predicate
def is_associative_relation(data):
"""Check if the edge has an association relation.
:param dict data: The PyBEL edge data dictionary
:return: If the edge is a causal edge
:rtype: bool
"""
return data[RELATION] == ASSOCIATION
@edge_predicate
def has_polarity(data):
"""Check if the edge has polarity.
:param dict data: The edge data dictionary
:return: If the edge is a polar edge
:rtype: bool
"""
return data[RELATION] in POLAR_RELATIONS
def _has_modifier(data, modifier):
"""Check if the edge has the given modifier.
:param dict data: The edge data dictionary
:param str modifier: The modifier to check. One of :data:`pybel.constants.ACTIVITY`,
:data:`pybel.constants.DEGRADATION`, or :data:`pybel.constants.TRANSLOCATION`.
:return: Does either the subject or object have the given modifier
:rtype: bool
"""
return part_has_modifier(data, SUBJECT, modifier) or part_has_modifier(data, OBJECT, modifier)
@edge_predicate
def edge_has_activity(data):
"""Check if the edge contains an activity in either the subject or object.
:param dict data: The edge data dictionary
:return: If the edge contains an activity in either the subject or object
:rtype: bool
"""
return _has_modifier(data, ACTIVITY)
@edge_predicate
def edge_has_translocation(data):
"""Check if the edge has a translocation in either the subject or object.
:param dict data: The edge data dictionary
:return: If the edge has a translocation in either the subject or object
:rtype: bool
"""
return _has_modifier(data, TRANSLOCATION)
@edge_predicate
def edge_has_degradation(data):
"""Check if the edge contains a degradation in either the subject or object.
:param dict data: The edge data dictionary
:return: If the edge contains a degradation in either the subject or object
:rtype: bool
"""
return _has_modifier(data, DEGRADATION)
def edge_has_annotation(data, key):
"""Check if an edge has the given annotation.
:param dict data: The data dictionary from a BELGraph's edge
:param str key: An annotation key
:return: If the annotation key is present in the current data dictionary
:rtype: Optional[Any]
For example, it might be useful to print all edges that are annotated with 'Subgraph':
>>> from pybel.examples import sialic_acid_graph
>>> for u, v, data in sialic_acid_graph.edges(data=True):
>>> if edge_has_annotation(data, 'Species')
>>> print(u, v, data)
"""
annotations = data.get(ANNOTATIONS)
if annotations is None:
return
return annotations.get(key)
def has_pathology_causal(graph, u, v, k):
"""Check if the subject is a pathology and has a causal relationship with a non bioprocess/pathology.
:param pybel.BELGraph graph: A BEL Graph
:param BaseEntity u: A BEL node
:param BaseEntity v: A BEL node
:param str k: The edge key between the given nodes
:return: If the subject of this edge is a pathology and it participates in a causal reaction.
:rtype: bool
"""
return (
isinstance(u, Pathology) and
is_causal_relation(graph, u, v, k) and
not isinstance(v, (Pathology, BiologicalProcess))
)
| 6,611 | 2,129 |
from kraken import plugins
from kraken.core.maths import Vec3
from kraken_examples.neck_component import NeckComponentGuide, NeckComponentRig
from kraken.core.profiler import Profiler
from kraken.helpers.utility_methods import logHierarchy
Profiler.getInstance().push("neck_build")
neckGuide = NeckComponentGuide("neck")
neckGuide.loadData({
"name": "Neck",
"location": "L",
"neckPosition": Vec3(0.0, 16.5572, -0.6915),
"neckUpVOffset": Vec3(0.0, 0.0, -1.0),
"neckEndPosition": Vec3(0.0, 17.4756, -0.421)
})
# Save the hand guide data for persistence.
saveData = neckGuide.saveData()
neckGuideData = neckGuide.getRigBuildData()
neck = NeckComponentRig()
neck.loadData(neckGuideData)
builder = plugins.getBuilder()
builder.build(neck)
Profiler.getInstance().pop()
if __name__ == "__main__":
print Profiler.getInstance().generateReport()
else:
logHierarchy(neck)
| 1,001 | 359 |
from datetime import datetime
from unittest.mock import Mock
from unittest.mock import patch
from saml2.config import config_factory
from saml2.response import authn_response
from saml2.sigver import SignatureError
from dateutil import parser
from pytest import raises
from pathutils import dotname
from pathutils import full_path
XML_RESPONSE_XSW = full_path("saml2_response_xsw.xml")
class TestAuthnResponse:
def setup_class(self):
self.conf = config_factory("sp", dotname("server_conf"))
self.ar = authn_response(self.conf, "http://lingon.catalogix.se:8087/")
@patch('saml2.response.validate_on_or_after', return_value=True)
def test_verify_signed_xsw(self, mock_validate_on_or_after):
self.ar.issue_instant_ok = Mock(return_value=True)
with open(XML_RESPONSE_XSW) as fp:
xml_response = fp.read()
self.ar.outstanding_queries = {"id12": "http://localhost:8088/sso"}
self.ar.timeslack = 10000
self.ar.loads(xml_response, decode=False)
assert self.ar.came_from == 'http://localhost:8088/sso'
assert self.ar.session_id() == "id12"
assert self.ar.issuer() == 'urn:mace:example.com:saml:roland:idp'
with raises(SignatureError):
self.ar.verify()
assert self.ar.ava is None
assert self.ar.name_id is None
| 1,354 | 465 |
def solution(s):
return [s[x:x+2] if x < len(s) - 1 else s[-1] + "_" for x in range(0, len(s), 2)] | 102 | 50 |
import os
import re
def writeLine(fs, line):
print "write: %s"%line
fs.write(line)
def fixFile(filePath):
fs = open(filePath, "r")
lines = fs.readlines()
fs.close()
fs = open(filePath, "w")
for line in lines:
result = re.search(r"(?<=\[NSThread\ssleepForTimeInterval:).*(?=\])", line)
if not result:
writeLine(fs, line)
continue
if line.startswith("\\\\"):
writeLine(fs, line)
continue
time = result.group(0)
indent = re.search(r"^\s*", line).group(0)
newLine = indent + "AppeckerWait(" + time + ");";
writeLine(fs, newLine)
fs.close()
def onFileDetected( arg, dirname, names ):
for name in names:
if re.search(r'\.mm$', name.lower()):
fixFile(dirname + '/' + name)
os.path.walk(".", onFileDetected, ())
| 752 | 321 |
import math
import sys
import os
import copy
thisPath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(thisPath)
from enum import Enum
from mlock import MLock
import utils
# Input.Cumulative means cumulative value is being input (e.g. total bytes)
# Input.Average means time average is being input (e.g. bytes/sec)
# Input Value means value is being given (e.g. bytes)
Input = Enum('Input', 'Value Counter NBitCounter')
def noneMax(x, y):
if x is None:
return y
elif y is None:
return x
else:
return max(x, y)
def noneMin(x, y):
if x is None:
return y
elif y is None:
return x
else:
return min(x, y)
class SlidingMetrics():
# if input is avg, it is something like bytes/sec, etc., otherwise unit is bytes or seconds of latency, etc.
def __init__(self, minWindow, maxWindow, inputType, bits=32):
self.minWindow = minWindow
self.maxWindow = maxWindow
self.subWindow = maxWindow - minWindow
self.maxWindows = math.ceil(minWindow / self.subWindow) + 1
self.inputType = inputType
self.winIndex = 0
self.lock = MLock()
# window statistics
self.NWin = []
self.ts1Win = []
self.tsNWin = []
self.minValWin = []
self.maxValWin = []
self.cumu1Win = []
self.cumuNWin = []
self.N = 0 # num samples (1 to N)
self.ts0 = None # ts below lowest value (needed for left )
self.ts1 = None
self.tsN = None
self.minVal = None
self.maxVal = None
self.cumu0 = 0
self.cumu1 = None
self.cumuNMinus1 = None
self.cumuN = 0
self.startTs = None
self.prevTs = None
self.prevData = 0 # raw data
self.bits = bits
def __dump__(self):
o = copy.deepcopy(self.__dict__)
o.pop("lock", None)
o["inputType"] = str(o["inputType"])
return o
@staticmethod
def __load__(o):
x = SlidingMetrics(10, 20, Input.Value, bits=32)
o = utils.smartLoad(o, True)
for key, val in o.items():
setattr(x, key, val)
x.inputType = eval(x.inputType) # convert back
return x
def __serialize__(self, seenVals):
o = {}
for k, v in sorted(self.__dict__.items()):
if k not in ["lock", "inputType"]:
o[k] = utils.serialize(v, seenVals)
o["inputType"] = str(self.inputType)
return o
def __deserialize__(self, o, toDict, seenVals):
for k, v in sorted(o.items()):
if k not in ["lock", "inputType"]:
setattr(self, k, utils.deserialize(v, toDict, seenVals))
self.inputType = eval(o["inputType"])
def _resetCumu(self):
amtToSub = self.cumu0
if amtToSub==0:
return
for i in range(len(self.cumu1Win)):
self.cumu1Win[i] -= amtToSub
self.cumuNWin[i] -= amtToSub
self.cumu0 -= amtToSub
self.cumu1 -= amtToSub
self.cumuNMinus1 -= amtToSub
self.cumuN -= amtToSub
# returns cumulative and value
def _setCumuVal(self, data):
if self.inputType==Input.NBitCounter:
if data < self.prevData: # counter overflow
data += (1 << self.bits)
val = data - self.prevData
elif self.inputType==Input.Counter:
val = data - self.prevData
elif self.inputType==Input.Value:
val = data
self.prevData = data
cumu = self.cumuN + val
return val, cumu
def popWindow(self):
N0 = self.NWin.pop(0)
self.N -= N0
self.ts1Win.pop(0)
self.ts1 = self.ts1Win[0]
self.ts0 = self.tsNWin.pop(0)
self.minValWin.pop(0)
self.minVal = min(self.minValWin)
self.maxValWin.pop(0)
self.maxVal = max(self.maxValWin)
self.cumu1Win.pop(0)
self.cumu1 = self.cumu1Win[0]
self.cumu0 = self.cumuNWin.pop(0)
def _addHelper(self, ts, data):
if self.startTs is None:
self.startTs = ts
ts = ts - self.startTs # normalize to start at zero
if self.prevTs is not None and ts < self.prevTs:
return False
self.prevTs = ts
val, cumu = self._setCumuVal(data)
if ts >= self.winIndex*self.subWindow:
# new window
self.winIndex += 1
self.NWin.append(1)
self.ts1Win.append(ts)
self.tsNWin.append(ts)
self.minValWin.append(val)
self.maxValWin.append(val)
self.cumu1Win.append(cumu)
self.cumuNWin.append(cumu)
else:
# add to current window (last window)
self.NWin[-1] += 1
self.tsNWin[-1] = ts
self.minValWin[-1] = min(self.minValWin[-1], val)
self.maxValWin[-1] = max(self.maxValWin[-1], val)
self.cumuNWin[-1] = cumu
# pop oldest window as new window is added
bWindowMoved = False
if len(self.ts1Win) > self.maxWindows:
self.popWindow()
bWindowMoved = True
if self.ts1 is None:
self.ts1 = ts
while ts-self.ts1 >= self.maxWindow:
self.popWindow()
bWindowMoved = True
self.N += 1
self.tsN = ts
self.minVal = noneMin(self.minVal, val)
self.maxVal = noneMax(self.maxVal, val)
if self.cumu1 is None:
self.cumu1 = cumu
self.cumuNMinus1 = self.cumuN
self.cumuN = cumu
#print("Cumu: {0} {1} {2} {3}".format(self.cumu0, self.cumu1, self.cumuNMinus1, self.cumuN))
if bWindowMoved:
self._resetCumu()
return True
def add(self, ts, data):
with self.lock:
return self._addHelper(ts, data)
def lockTryNan(self, fn):
with self.lock:
try:
return fn()
except ZeroDivisionError:
return 0.0
except Exception:
return float('nan')
# given data points have timestamp which is left end of interval
def avgL(self):
with self.lock:
if (self.N-1)==0 or (self.tsN == self.ts1):
return 0.0
else:
# N-1 points, N-1 intervals
return (self.cumuNMinus1 - self.cumu0) / (self.tsN - self.ts1)
def avgR(self):
with self.lock:
if self.ts0 is None:
if (self.N-1)<=0 or (self.tsN == self.ts1):
return 0.0
else:
# N-1 points, N-1 intervals
return (self.cumuN - self.cumu1) / (self.tsN - self.ts1)
else:
if self.N==0 or (self.tsN == self.ts0):
return 0.0
else:
# N points, N intervals
return (self.cumuN - self.cumu0) / (self.tsN - self.ts0)
def avgN(self):
with self.lock:
if self.N==0:
return 0.0
else:
return (self.cumuN - self.cumu0) / self.N
# number of measurements
def avgNumL(self):
with self.lock:
if (self.N-1)==0 or (self.tsN == self.ts1):
return 0.0
else:
return (N-1) / (self.tsN - self.ts1)
def avgNumR(self):
with self.lock:
if self.ts0 is None:
if (self.N-1)<=0 or (self.tsN == self.ts1):
return 0.0
else:
return (N-1) / (self.tsN - self.ts1)
else:
if self.N==0 or (self.tsN == self.ts0):
return 0.0
else:
return N / (self.tsN - self.ts0)
def windowL(self):
return self.lockTryNan(lambda : self.tsN - self.ts1)
def windowR(self):
with self.lock:
if self.ts0 is None:
return self.tsN - self.ts1
else:
return self.tsN - self.ts0
utils.registerEval('SlidingMetrics', SlidingMetrics)
utils.registerCreate("SlidingMetrics", lambda : SlidingMetrics(10, 20, Input.Value, bits=32))
# ============================
# Testing
from numpy import random
if __name__ == "__main__":
window = []
N = 100000
s = SlidingMetrics(9.0, 10.0, Input.Value)
subWindow = 1.0
numWindows = 10
ts = 0
r = random.RandomState(4532312)
lastPopped = None
valMin = 0
valMax = 20
tsDelta = 0.2
tsDeltaRand = 0.03
for i in range(N):
val = r.uniform(valMin, valMax)
s.add(ts, val)
window.append((ts, val))
# remove from window
curWindow = math.floor(ts/subWindow)
firstWindow = max(0, curWindow - numWindows + 1)
firstTs = firstWindow * subWindow
while len(window) > 0:
(t, v) = window[0]
if t < firstTs:
lastPopped = window.pop(0)
else:
break
#print("{0} {1}".format(len(window), window))
# compare
if lastPopped is not None:
t0 = lastPopped[0]
rStart = 0
else:
t0 = None
rStart = 1
sumL = 0
for j in range(0, len(window)-1):
sumL += window[j][1]
if len(window)<=1:
avgWinL = 0.0
else:
avgWinL = sumL / (window[-1][0] - window[0][0])
sumR = 0
for j in range(rStart, len(window)):
sumR += window[j][1]
if len(window)<=1:
avgWinR = 0.0
elif rStart==0:
avgWinR = sumR / (window[-1][0] - t0)
else:
avgWinR = sumR / (window[-1][0] - window[0][0])
avgWinN = (sumL + window[-1][1]) / len(window)
maxWin = max(window, key=lambda x: x[1])[1]
minWin = min(window, key=lambda x: x[1])[1]
if False:
print("T: {0} W: {1} V: {2}".format(ts, curWindow, val))
print("L: {0} {1}".format(avgWinL, s.avgL()))
print("R: {0} {1}".format(avgWinR, s.avgR()))
print("N: {0} {1}".format(avgWinN, s.avgN()))
error = abs(avgWinL-s.avgL()) + abs(avgWinR-s.avgR()) + abs(avgWinN-s.avgN())
errorMinMax = abs(maxWin-s.maxVal) + abs(minWin-s.minVal)
print("ERROR: {0:20.15f}\t ERRORMINMAX: {1:20.15f}".format(error, errorMinMax), end='\r')
if (abs(avgWinL-s.avgL()) > abs(avgWinL)*0.0000001 or
abs(avgWinR-s.avgR()) > abs(avgWinR)*0.0000001 or
abs(avgWinN-s.avgN()) > abs(avgWinN)*0.0000001 or
maxWin != s.maxVal or
minWin != s.minVal):
print("ERROR====")
print("T: {0} W: {1} V: {2}".format(ts, curWindow, val))
print("L: {0} {1}".format(avgWinL, s.avgL()))
print("R: {0} {1}".format(avgWinR, s.avgR()))
print("N: {0} {1}".format(avgWinN, s.avgN()))
print("M: {0} {1}".format(maxWin, s.maxVal))
print("m: {0} {1}".format(minWin, s.minVal))
ts += r.uniform(tsDelta - tsDeltaRand, tsDelta + tsDeltaRand)
| 11,259 | 3,944 |
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.compat import text_type
from whoosh.compat import xrange
from whoosh.analysis.acore import Token
from whoosh.analysis.filters import Filter, LowercaseFilter
from whoosh.analysis.tokenizers import Tokenizer, RegexTokenizer
# Tokenizer
class NgramTokenizer(Tokenizer):
"""Splits input text into N-grams instead of words.
>>> ngt = NgramTokenizer(4)
>>> [token.text for token in ngt("hi there")]
["hi t", "i th", " the", "ther", "here"]
Note that this tokenizer does NOT use a regular expression to extract
words, so the grams emitted by it will contain whitespace, punctuation,
etc. You may want to massage the input or add a custom filter to this
tokenizer's output.
Alternatively, if you only want sub-word grams without whitespace, you
could combine a RegexTokenizer with NgramFilter instead.
"""
__inittypes__ = dict(minsize=int, maxsize=int)
def __init__(self, minsize, maxsize=None):
"""
:param minsize: The minimum size of the N-grams.
:param maxsize: The maximum size of the N-grams. If you omit
this parameter, maxsize == minsize.
"""
self.min = minsize
self.max = maxsize or minsize
def __eq__(self, other):
if self.__class__ is other.__class__:
if self.min == other.min and self.max == other.max:
return True
return False
def __call__(self, value, positions=False, chars=False, keeporiginal=False,
removestops=True, start_pos=0, start_char=0, mode='',
**kwargs):
assert isinstance(value, text_type), "%r is not unicode" % value
inlen = len(value)
t = Token(positions, chars, removestops=removestops, mode=mode)
pos = start_pos
if mode == "query":
size = min(self.max, inlen)
for start in xrange(0, inlen - size + 1):
end = start + size
if end > inlen:
continue
t.text = value[start:end]
if keeporiginal:
t.original = t.text
t.stopped = False
if positions:
t.pos = pos
if chars:
t.startchar = start_char + start
t.endchar = start_char + end
yield t
pos += 1
else:
for start in xrange(0, inlen - self.min + 1):
for size in xrange(self.min, self.max + 1):
end = start + size
if end > inlen:
continue
t.text = value[start:end]
if keeporiginal:
t.original = t.text
t.stopped = False
if positions:
t.pos = pos
if chars:
t.startchar = start_char + start
t.endchar = start_char + end
yield t
pos += 1
# Filter
class NgramFilter(Filter):
"""Splits token text into N-grams.
>>> rext = RegexTokenizer()
>>> stream = rext("hello there")
>>> ngf = NgramFilter(4)
>>> [token.text for token in ngf(stream)]
["hell", "ello", "ther", "here"]
"""
__inittypes__ = dict(minsize=int, maxsize=int)
def __init__(self, minsize, maxsize=None, at=None):
"""
:param minsize: The minimum size of the N-grams.
:param maxsize: The maximum size of the N-grams. If you omit this
parameter, maxsize == minsize.
:param at: If 'start', only take N-grams from the start of each word.
if 'end', only take N-grams from the end of each word. Otherwise,
take all N-grams from the word (the default).
"""
self.min = minsize
self.max = maxsize or minsize
self.at = 0
if at == "start":
self.at = -1
elif at == "end":
self.at = 1
def __eq__(self, other):
return other and self.__class__ is other.__class__\
and self.min == other.min and self.max == other.max
def __call__(self, tokens):
assert hasattr(tokens, "__iter__")
at = self.at
for t in tokens:
text = t.text
if len(text) < self.min:
continue
chars = t.chars
if chars:
startchar = t.startchar
# Token positions don't mean much for N-grams,
# so we'll leave the token's original position
# untouched.
if t.mode == "query":
size = min(self.max, len(t.text))
if at == -1:
t.text = text[:size]
if chars:
t.endchar = startchar + size
yield t
elif at == 1:
t.text = text[0 - size:]
if chars:
t.startchar = t.endchar - size
yield t
else:
for start in xrange(0, len(text) - size + 1):
t.text = text[start:start + size]
if chars:
t.startchar = startchar + start
t.endchar = startchar + start + size
yield t
else:
if at == -1:
limit = min(self.max, len(text))
for size in xrange(self.min, limit + 1):
t.text = text[:size]
if chars:
t.endchar = startchar + size
yield t
elif at == 1:
if chars:
original_startchar = t.startchar
start = max(0, len(text) - self.max)
for i in xrange(start, len(text) - self.min + 1):
t.text = text[i:]
if chars:
t.startchar = original_startchar + i
yield t
else:
for start in xrange(0, len(text) - self.min + 1):
for size in xrange(self.min, self.max + 1):
end = start + size
if end > len(text):
continue
t.text = text[start:end]
if chars:
t.startchar = startchar + start
t.endchar = startchar + end
yield t
# Analyzers
def NgramAnalyzer(minsize, maxsize=None):
"""Composes an NgramTokenizer and a LowercaseFilter.
>>> ana = NgramAnalyzer(4)
>>> [token.text for token in ana("hi there")]
["hi t", "i th", " the", "ther", "here"]
"""
return NgramTokenizer(minsize, maxsize=maxsize) | LowercaseFilter()
def NgramWordAnalyzer(minsize, maxsize=None, tokenizer=None, at=None):
if not tokenizer:
tokenizer = RegexTokenizer()
return tokenizer | LowercaseFilter() | NgramFilter(minsize, maxsize, at=at)
| 9,025 | 2,709 |
import re
from nonebot.default_config import *
HOST = '0.0.0.0'
SECRET = 'abc'
SUPERUSERS = {1002647525}
NICKNAME = {'奶茶', '小奶茶'}
COMMAND_START = {'', '/', '!', '/', '!', re.compile(r'^>+\s*')}
COMMAND_SEP = {'/', '.', re.compile(r'#|::?')}
| 244 | 135 |
import warnings
from ..water_viscosity_korson_1969 import water_viscosity
def test_water_viscosity():
warnings.filterwarnings("error") # Table II (p. 38):
assert abs(water_viscosity(273.15 + 0) - 1.7916) < 5e-4
assert abs(water_viscosity(273.15 + 5) - 1.5192) < 5e-4
assert abs(water_viscosity(273.15 + 10) - 1.3069) < 5e-4
assert abs(water_viscosity(273.15 + 15) - 1.1382) < 5e-4
assert abs(water_viscosity(273.15 + 20) - 1.0020) < 5e-4
assert abs(water_viscosity(273.15 + 25) - 0.8903) < 5e-4
assert abs(water_viscosity(273.15 + 30) - 0.7975) < 5e-4
assert abs(water_viscosity(273.15 + 35) - 0.7195) < 5e-4
assert abs(water_viscosity(273.15 + 40) - 0.6532) < 5e-4
assert abs(water_viscosity(273.15 + 45) - 0.5963) < 5e-4
assert abs(water_viscosity(273.15 + 50) - 0.5471) < 5e-4
assert abs(water_viscosity(273.15 + 55) - 0.5042) < 5e-4
assert abs(water_viscosity(273.15 + 60) - 0.4666) < 5e-4
assert abs(water_viscosity(273.15 + 65) - 0.4334) < 5e-4
assert abs(water_viscosity(273.15 + 70) - 0.4039) < 5e-4
assert abs(water_viscosity(273.15 + 75) - 0.3775) < 5e-4
assert abs(water_viscosity(273.15 + 80) - 0.3538) < 5e-4
assert abs(water_viscosity(273.15 + 85) - 0.3323) < 5e-4
assert abs(water_viscosity(273.15 + 90) - 0.3128) < 5e-4
assert abs(water_viscosity(273.15 + 95) - 0.2949) < 6e-4
assert abs(water_viscosity(273.15 + 100) - 0.2783) < 2e-3
warnings.resetwarnings()
| 1,471 | 844 |
# -*- coding: utf-8 -*-
from litNlp.predict import SA_Model_Predict
import matplotlib.pyplot as plt
from setting import *
import numpy as np
import os
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
def topic_sa_analysis():
sa_model = SA_Model_Predict(tokenize_path, sa_model_path_m, max_len=100)
if not os.path.exists(topic_emotion_pic):
os.mkdir(topic_emotion_pic)
print(topic_emotion_pic+'文件夹已经建立,请查看当前文件路径')
for key_word in topic_words_list.keys():
sa_analysis_(key_word, sa_model)
def sa_analysis_(key_word, sa_model):
print('{} 正在执行...'.format(key_word))
key_txt = open('{}/{}.txt'.format(topic_path, key_word), 'r', encoding='utf-8').readlines()
sentiments_score_predict = sa_model.predict(key_txt)
# 情感极性输出
sentiments_score_list = [i[1] for i in sentiments_score_predict]
plt.hist(sentiments_score_list, bins=np.arange(0, 1, 0.01))
plt.xlabel("情感值")
plt.ylabel("评论数目")
plt.title(key_word+'-情感极性分布图')
plt.savefig('{}/{}.png'.format(topic_emotion_pic, key_word))
plt.show()
plt.close()
print('{} 情感极性图完成'.format(key_word))
# if __name__ == '__main__':
# # 添加多线程提升预测速度
# topic_sa_analysis()
| 1,235 | 540 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified by: Shangeth Rajaa, Zhengying Liu, Isabelle Guyon
"""An example of code submission for the AutoDL challenge.
It implements 3 compulsory methods ('__init__', 'train' and 'test') and
an attribute 'done_training' for indicating if the model will not proceed more
training due to convergence or limited time budget.
To create a valid submission, zip model.py together with other necessary files
such as Python modules/packages, pre-trained weights, etc. The final zip file
should not exceed 300MB.
"""
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.autograd import Variable
import datetime
import logging
import numpy as np
import os
import sys
import time
import torch.utils.data as data_utils
import torch
import torch.nn as nn
import torchvision
import tensorflow as tf
# seeding randomness for reproducibility
np.random.seed(42)
torch.manual_seed(1)
# PyTorch Model class
class TorchModel(nn.Module):
def __init__(self, input_shape, output_dim):
''' 3D CNN Model with no of CNN layers depending on the input size'''
super(TorchModel, self).__init__()
self.conv = torch.nn.Sequential()
cnn_ch = 16
if input_shape[1] == 1: # if num_channels = 1
self.conv.add_module('cnn1', nn.Conv3d(input_shape[0], cnn_ch, (1,3,3)))
else:
self.conv.add_module('cnn1', nn.Conv3d(input_shape[0], cnn_ch, 3))
self.conv.add_module('pool1', nn.MaxPool3d(2,2))
i = 2
while True:
self.conv.add_module('cnn{}'.format(i),
nn.Conv3d(cnn_ch * (i-1), cnn_ch * i, (1,3,3)))
self.conv.add_module('pool{}'.format(i), nn.MaxPool3d(2,2))
i += 1
n_size, out_len = self.get_fc_size(input_shape)
# no more CNN layers if Linear layers get input size < 1000
if n_size < 1000 or out_len[3] < 3 or out_len[3] < 3:
break
fc_size, _ = self.get_fc_size(input_shape)
self.fc = nn.Linear(fc_size, output_dim)
def forward_cnn(self, x):
x = self.conv(x)
return x
def get_fc_size(self, input_shape):
''' function to get the size for Linear layers
with given number of CNN layers
'''
sample_input = Variable(torch.rand(1, *input_shape))
output_feat = self.forward_cnn(sample_input)
out_shape = output_feat.shape
n_size = output_feat.data.view(1, -1).size(1)
return n_size, out_shape
def forward(self, x):
x = self.forward_cnn(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
# PyTorch Dataset to get data from tensorflow Dataset.
class TFDataset(torch.utils.data.Dataset):
def __init__(self, dataset, session, num_samples):
super(TFDataset, self).__init__()
self.dataset = dataset
self.session = session
self.num_samples = num_samples
self.next_element = None
self.reset()
def reset(self):
dataset = self.dataset
iterator = dataset.make_one_shot_iterator()
self.next_element = iterator.get_next()
return self
def __len__(self):
return self.num_samples
def __getitem__(self, index):
session = self.session if self.session is not None else tf.Session()
try:
example, label = session.run(self.next_element)
except tf.errors.OutOfRangeError:
self.reset()
example, label = session.run(self.next_element)
return example.transpose(3,0,1,2), label
class Model():
def __init__(self, metadata):
"""
Args:
metadata: an AutoDLMetadata object. Its definition can be found in
AutoDL_ingestion_program/dataset.py
"""
# Attribute necessary for ingestion program to stop evaluation process
self.done_training = False
self.metadata_ = metadata
# Getting details of the data from meta data
self.output_dim = self.metadata_.get_output_size()
self.num_examples_train = self.metadata_.size()
row_count, col_count = self.metadata_.get_matrix_size(0)
channel = self.metadata_.get_num_channels(0)
sequence_size = self.metadata_.get_sequence_size()
self.num_train = self.metadata_.size()
test_metadata_filename = self.metadata_.get_dataset_name()\
.replace('train', 'test') + '/metadata.textproto'
self.num_test = [int(line.split(':')[1]) for line
in open(test_metadata_filename, 'r').readlines()
if 'sample_count' in line][0]
# Getting the device available
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Device Found = ', self.device,
'\nMoving Model and Data into the device...')
# Attributes for preprocessing
self.default_image_size = (112,112)
self.default_num_frames = 15
self.default_shuffle_buffer = 100
if row_count == -1 or col_count == -1 :
row_count = self.default_image_size[0]
col_count = self.default_image_size[1]
if sequence_size == -1: sequence_size = self.default_num_frames
self.input_shape = (channel, sequence_size, row_count, col_count)
print('\n\nINPUT SHAPE = ', self.input_shape)
# getting an object for the PyTorch Model class for Model Class
# use CUDA if available
self.pytorchmodel = TorchModel(self.input_shape, self.output_dim)
print('\nPyModel Defined\n')
print(self.pytorchmodel)
self.pytorchmodel.to(self.device)
# PyTorch Optimizer and Criterion
self.criterion = nn.BCEWithLogitsLoss()
self.optimizer = torch.optim.Adam(self.pytorchmodel.parameters(), lr=1e-2)
# Attributes for managing time budget
# Cumulated number of training steps
self.birthday = time.time()
self.total_train_time = 0
self.cumulated_num_steps = 0
self.estimated_time_per_step = None
self.total_test_time = 0
self.cumulated_num_tests = 0
self.estimated_time_test = None
self.trained = False
# PYTORCH
# Critical number for early stopping
self.num_epochs_we_want_to_train = 100
# no of examples at each step/batch
self.train_batch_size = 30
self.test_batch_size = 30
# Tensorflow sessions to get the data from TFDataset
self.train_session = tf.Session()
self.test_session = tf.Session()
def train(self, dataset, remaining_time_budget=None):
"""Train this algorithm on the tensorflow |dataset|.
This method will be called REPEATEDLY during the whole training/predicting
process. So your `train` method should be able to handle repeated calls and
hopefully improve your model performance after each call.
****************************************************************************
****************************************************************************
IMPORTANT: the loop of calling `train` and `test` will only run if
self.done_training = False
(the corresponding code can be found in ingestion.py, search
'M.done_training')
Otherwise, the loop will go on until the time budget is used up. Please
pay attention to set self.done_training = True when you think the model is
converged or when there is not enough time for next round of training.
****************************************************************************
****************************************************************************
Args:
dataset: a `tf.data.Dataset` object. Each of its examples is of the form
(example, labels)
where `example` is a dense 4-D Tensor of shape
(sequence_size, row_count, col_count, num_channels)
and `labels` is a 1-D Tensor of shape
(output_dim,).
Here `output_dim` represents number of classes of this
multilabel classification task.
IMPORTANT: some of the dimensions of `example` might be `None`,
which means the shape on this dimension might be variable. In this
case, some preprocessing technique should be applied in order to
feed the training of a neural network. For example, if an image
dataset has `example` of shape
(1, None, None, 3)
then the images in this datasets may have different sizes. On could
apply resizing, cropping or padding in order to have a fixed size
input tensor.
remaining_time_budget: time remaining to execute train(). The method
should keep track of its execution time to avoid exceeding its time
budget. If remaining_time_budget is None, no time budget is imposed.
"""
steps_to_train = self.get_steps_to_train(remaining_time_budget)
if steps_to_train <= 0:
logger.info("Not enough time remaining for training. " +
"Estimated time for training per step: {:.2f}, "\
.format(self.estimated_time_per_step) +
"but remaining time budget is: {:.2f}. "\
.format(remaining_time_budget) +
"Skipping...")
self.done_training = True
else:
msg_est = ""
if self.estimated_time_per_step:
msg_est = "estimated time for this: " +\
"{:.2f} sec.".format(steps_to_train * self.estimated_time_per_step)
logger.info("Begin training for another {} steps...{}".format(steps_to_train, msg_est))
# If PyTorch dataloader for training set doen't already exists, get the train dataloader
if not hasattr(self, 'trainloader'):
self.trainloader = self.get_dataloader(dataset, self.num_train, batch_size=self.train_batch_size)
train_start = time.time()
# Training loop
self.trainloop(self.criterion, self.optimizer, steps=steps_to_train)
train_end = time.time()
# Update for time budget managing
train_duration = train_end - train_start
self.total_train_time += train_duration
self.cumulated_num_steps += steps_to_train
self.estimated_time_per_step = self.total_train_time / self.cumulated_num_steps
logger.info("{} steps trained. {:.2f} sec used. ".format(steps_to_train, train_duration) +\
"Now total steps trained: {}. ".format(self.cumulated_num_steps) +\
"Total time used for training: {:.2f} sec. ".format(self.total_train_time) +\
"Current estimated time per step: {:.2e} sec.".format(self.estimated_time_per_step))
def test(self, dataset, remaining_time_budget=None):
"""Test this algorithm on the tensorflow |dataset|.
Args:
Same as that of `train` method, except that the `labels` will be empty.
Returns:
predictions: A `numpy.ndarray` matrix of shape (sample_count, output_dim).
here `sample_count` is the number of examples in this dataset as test
set and `output_dim` is the number of labels to be predicted. The
values should be binary or in the interval [0,1].
"""
if self.done_training:
return None
if self.choose_to_stop_early():
logger.info("Oops! Choose to stop early for next call!")
self.done_training = True
test_begin = time.time()
if remaining_time_budget and self.estimated_time_test and\
self.estimated_time_test > remaining_time_budget:
logger.info("Not enough time for test. " +\
"Estimated time for test: {:.2e}, ".format(self.estimated_time_test) +\
"But remaining time budget is: {:.2f}. ".format(remaining_time_budget) +\
"Stop train/predict process by returning None.")
return None
msg_est = ""
if self.estimated_time_test:
msg_est = "estimated time: {:.2e} sec.".format(self.estimated_time_test)
logger.info("Begin testing..." + msg_est)
# If PyTorch dataloader for training set doen't already exists, get the test dataloader
if not hasattr(self, 'testloader'):
self.testloader = self.get_dataloader_test(dataset, self.num_test,
self.test_batch_size)
# get predictions from the test loop
predictions = self.testloop(self.testloader)
test_end = time.time()
# Update some variables for time management
test_duration = test_end - test_begin
self.total_test_time += test_duration
self.cumulated_num_tests += 1
self.estimated_time_test = self.total_test_time / self.cumulated_num_tests
logger.info("[+] Successfully made one prediction. {:.2f} sec used. ".format(test_duration) +\
"Total time used for testing: {:.2f} sec. ".format(self.total_test_time) +\
"Current estimated time for test: {:.2e} sec.".format(self.estimated_time_test))
return predictions
##############################################################################
#### Above 3 methods (__init__, train, test) should always be implemented ####
##############################################################################
def preprocess_tensor_4d(self, tensor_4d):
"""Preprocess a 4-D tensor (only when some dimensions are `None`, i.e.
non-fixed). The output tensor wil have fixed, known shape.
Args:
tensor_4d: A Tensor of shape
[sequence_size, row_count, col_count, num_channels]
where some dimensions might be `None`.
Returns:
A 4-D Tensor with fixed, known shape.
"""
tensor_4d_shape = tensor_4d.shape
logger.info("Tensor shape before preprocessing: {}".format(tensor_4d_shape))
if tensor_4d_shape[0] > 0 and tensor_4d_shape[0] < 10:
num_frames = tensor_4d_shape[0]
else:
num_frames = self.default_num_frames
if tensor_4d_shape[1] > 0:
new_row_count = tensor_4d_shape[1]
else:
new_row_count=self.default_image_size[0]
if tensor_4d_shape[2] > 0:
new_col_count = tensor_4d_shape[2]
else:
new_col_count=self.default_image_size[1]
if not tensor_4d_shape[0] > 0:
logger.info("Detected that examples have variable sequence_size, will " +
"randomly crop a sequence with num_frames = " +
"{}".format(num_frames))
tensor_4d = crop_time_axis(tensor_4d, num_frames=num_frames)
if not tensor_4d_shape[1] > 0 or not tensor_4d_shape[2] > 0:
logger.info("Detected that examples have variable space size, will " +
"resize space axes to (new_row_count, new_col_count) = " +
"{}".format((new_row_count, new_col_count)))
tensor_4d = resize_space_axes(tensor_4d,
new_row_count=new_row_count,
new_col_count=new_col_count)
logger.info("Tensor shape after preprocessing: {}".format(tensor_4d.shape))
return tensor_4d
def get_dataloader(self, tf_dataset, num_images, batch_size):
''' Get the training PyTorch dataloader
Args:
tf_dataset: Tensorflow Dataset which is given in train function
num_images : number of examples in train data
batch_size : batch_size for training set
Return:
dataloader: PyTorch Training Dataloader
'''
tf_dataset = tf_dataset.map(lambda *x: (self.preprocess_tensor_4d(x[0]), x[1]))
train_dataset = TFDataset(tf_dataset, self.train_session, num_images)
dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=self.train_batch_size,
shuffle=True,
drop_last=False
)
return dataloader
def get_dataloader_test(self, tf_dataset, num_images, batch_size):
''' Get the test PyTorch dataloader
Args:
tf_dataset: Tensorflow Dataset which is given in test function
num_images : number of examples in test data
batch_size : batch_size for test set
Return:
dataloader: PyTorch Test Dataloader
'''
tf_dataset = tf_dataset.map(lambda *x: (self.preprocess_tensor_4d(x[0]), x[1]))
dataset = TFDataset(tf_dataset, self.test_session, num_images)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
return dataloader
def trainloop(self, criterion, optimizer, steps):
''' Training loop with no of given steps
Args:
criterion: PyTorch Loss function
Optimizer: PyTorch optimizer for training
steps: No of steps to train the model
Return:
None, updates the model parameters
'''
self.pytorchmodel.train()
data_iterator = iter(self.trainloader)
for i in range(steps):
try:
images, labels = next(data_iterator)
except StopIteration:
data_iterator = iter(self.trainloader)
images, labels = next(data_iterator)
images = images.float().to(self.device)
labels = labels.float().to(self.device)
optimizer.zero_grad()
log_ps = self.pytorchmodel(images)
loss = criterion(log_ps, labels)
if hasattr(self, 'scheduler'):
self.scheduler.step(loss)
loss.backward()
optimizer.step()
def get_steps_to_train(self, remaining_time_budget):
"""Get number of steps for training according to `remaining_time_budget`.
The strategy is:
1. If no training is done before, train for 10 steps (ten batches);
2. Otherwise, estimate training time per step and time needed for test,
then compare to remaining time budget to compute a potential maximum
number of steps (max_steps) that can be trained within time budget;
3. Choose a number (steps_to_train) between 0 and max_steps and train for
this many steps. Double it each time.
"""
if not remaining_time_budget: # This is never true in the competition anyway
remaining_time_budget = 1200 # if no time limit is given, set to 20min
if not self.estimated_time_per_step:
steps_to_train = 10
else:
if self.estimated_time_test:
tentative_estimated_time_test = self.estimated_time_test
else:
tentative_estimated_time_test = 50 # conservative estimation for test
max_steps = int((remaining_time_budget - tentative_estimated_time_test) / self.estimated_time_per_step)
max_steps = max(max_steps, 1)
if self.cumulated_num_tests < np.log(max_steps) / np.log(2):
steps_to_train = int(2 ** self.cumulated_num_tests) # Double steps_to_train after each test
else:
steps_to_train = 0
return steps_to_train
def testloop(self, dataloader):
'''
Args:
dataloader: PyTorch test dataloader
Return:
preds: Predictions of the model as Numpy Array.
'''
preds = []
with torch.no_grad():
self.pytorchmodel.eval()
for images, _ in dataloader:
if torch.cuda.is_available():
images = images.float().cuda()
else:
images = images.float()
log_ps = self.pytorchmodel(images)
pred = torch.sigmoid(log_ps).data > 0.5
preds.append(pred.cpu().numpy())
preds = np.vstack(preds)
return preds
def choose_to_stop_early(self):
"""The criterion to stop further training (thus finish train/predict
process).
"""
# return self.cumulated_num_tests > 10 # Limit to make 10 predictions
# return np.random.rand() < self.early_stop_proba
batch_size = self.train_batch_size
num_examples = self.metadata_.size()
num_epochs = self.cumulated_num_steps * batch_size / num_examples
logger.info("Model already trained for {} epochs.".format(num_epochs))
return num_epochs > self.num_epochs_we_want_to_train # Train for at least certain number of epochs then stop
#### Other helper functions
def crop_time_axis(tensor_4d, num_frames, begin_index=None):
"""Given a 4-D tensor, take a slice of length `num_frames` on its time axis.
Args:
tensor_4d: A Tensor of shape
[sequence_size, row_count, col_count, num_channels]
num_frames: An integer representing the resulted chunk (sequence) length
begin_index: The index of the beginning of the chunk. If `None`, chosen
randomly.
Returns:
A Tensor of sequence length `num_frames`, which is a chunk of `tensor_4d`.
"""
# pad sequence if not long enough
pad_size = tf.maximum(num_frames - tf.shape(tensor_4d)[0], 0)
padded_tensor = tf.pad(tensor_4d, ((0, pad_size), (0, 0), (0, 0), (0, 0)))
# If not given, randomly choose the beginning index of frames
if not begin_index:
maxval = tf.shape(padded_tensor)[0] - num_frames + 1
begin_index = tf.random.uniform([1],
minval=0,
maxval=maxval,
dtype=tf.int32)
begin_index = tf.stack([begin_index[0], 0, 0, 0], name='begin_index')
sliced_tensor = tf.slice(padded_tensor,
begin=begin_index,
size=[num_frames, -1, -1, -1])
return sliced_tensor
def resize_space_axes(tensor_4d, new_row_count, new_col_count):
"""Given a 4-D tensor, resize space axes to have target size.
Args:
tensor_4d: A Tensor of shape
[sequence_size, row_count, col_count, num_channels].
new_row_count: An integer indicating the target row count.
new_col_count: An integer indicating the target column count.
Returns:
A Tensor of shape [sequence_size, target_row_count, target_col_count].
"""
resized_images = tf.image.resize_images(tensor_4d,
size=(new_row_count, new_col_count))
return resized_images
def get_logger(verbosity_level):
"""Set logging format to something like:
2019-04-25 12:52:51,924 INFO model.py: <message>
"""
logger = logging.getLogger(__file__)
logging_level = getattr(logging, verbosity_level)
logger.setLevel(logging_level)
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)s %(filename)s: %(message)s')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging_level)
stdout_handler.setFormatter(formatter)
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.WARNING)
stderr_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
logger.addHandler(stderr_handler)
logger.propagate = False
return logger
logger = get_logger('INFO')
| 22,570 | 7,082 |
def wakeup():
print "This program will ask you for 5 integer or float values." + "\n" + "It will calculate the average of all values from 0 inclusive to 10 exclusive." + "\n" + "It will print out whether the resulting average is even or odd."
def numbers():
n1 = float(raw_input("n0: "))
n2 = float(raw_input("n1: "))
n3 = float(raw_input("n2: "))
n4 = float(raw_input("n3: "))
n5 = float(raw_input("n4: "))
numbers ()
def main():
if n1 < 0 or n1 >= 10:
print str(n1)+(" is out of range")
if n2 < 0 or n1 >= 10:
print str(n2)+(" is out of range")
if n3 < 0 or n1 >= 10:
print str(n3)+(" is out of range")
if n4 < 0 or n1 >= 10:
print str(n4)+(" is out of range")
if n5 < 0 or n1 >= 10:
print str(n5)+(" is out of range")
main ()
def avgvalue():
avgvalue = int(n1) + int(n2) + int(n3) + int(n4) + int(n5) / 5
avgvalue()
def evenodd():
greatestnumber == bool(str(greatestnumber))
def result(avgvalue, intpart, evenodd):
result + """
The average is {}
The intger part of the average is {}.
THe integer part is {}.
""".format(avgvalue, intpart, evenodd)
return result
| 1,169 | 456 |
class ListNode:
def __init__(self, val, next = None):
self.val = val
self.next = next
class LinkedList:
def __init__(self):
self.head = None
def removeNthFromEnd(self, head, n):
fast = slow = head
for _ in range(n):
if not fast:
self.printNode(head)
return head
fast = fast.next
# self.printNode(fast)
while fast.next:
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return head
def remove_nth_from_end2(self, head, n):
def remove(head,n):
if head is None: return head, 0
node, count = remove(head.next, n)
if node is not None: print('node: ', node.val, ' - count: ', count)
count += 1
head.next = node
if count == n:
print('Somthing here: ', count, head.val, node.val)
head = head.next
return head, count
return remove(head, n)[0] #Get head
def count(self,head):
def count_size(head):
if head is None: return 0
count = count_size(head.next) + 1
return count
return count_size(head)
def shift(self, val):
new_node = ListNode(val)
new_node.next = self.head
self.head = new_node
def printNode(self,head):
result = []
if head is None:
print("List node null")
while(head):
result.append(head.val)
head=head.next
print(result)
if __name__ == '__main__':
lk = LinkedList()
lk.shift(5)
lk.shift(8)
lk.shift(6)
lk.shift(4)
lk.shift(3)
lk.shift(2)
lk.shift(1)
lk.shift(0)
lk.printNode(lk.head)
ans = lk.remove_nth_from_end2(lk.head, 2)
lk.printNode(ans)
# while ans is not None:
# print(ans.val)
# ans = ans.next | 1,726 | 634 |
import re
from django.conf import settings
from rest_framework.views import APIView
from rest_framework.generics import CreateAPIView, RetrieveAPIView, UpdateAPIView
from random import randint
from django_redis import get_redis_connection
from rest_framework.response import Response
from rest_framework_jwt.views import ObtainJSONWebToken
from celery_tasks.sms_code.tasks import send_sms_code
from goods.models import SKU
from goods.serializers import SKUListSerializers
from users.models import User
from users.serializers import UserSerializers, UserDetailSerializer, EmailSerializer, AddUserBrowsingHistorySerializer
from rest_framework.permissions import IsAuthenticated
from itsdangerous import TimedJSONWebSignatureSerializer as TJS
from users.utils import merge_cart_cookie_to_redis
from django.http import HttpResponse
from meiduo_mall.libs.captcha.captcha import captcha
from oauth import constants
# 发送短信
class SmsCodeView(APIView):
def get(self, request, mobile):
# 1.获取手机号,进行正则匹配
conn = get_redis_connection('sms_code')
# 先判断是否间隔了1分钟
flag = conn.get('sms_code_flag_%s' % mobile)
if flag:
return Response({'error': '请求过于频繁'}, status=400)
# 2.生成验证码
sms_code = '%06d' % randint(0, 999999)
print(sms_code)
# 3. 保存验证码到redis
pl = conn.pipeline()
# 通过管道将2个相同操作进行整合,只需要连接一次redis
pl.setex('sms_code_%s'%mobile, 300, sms_code)
# 设置一个条件判断是否为1分钟后再次发送
pl.setex('sms_code_flag_%s' %mobile, 60, 'a')
pl.execute()
# 4.发送验证码
# 1.ccp = CCP()
# 手机号, 短信验证码+过期时间,1号模板
# ccp.send_template_sms(mobile, [sms_code, '5'], 1)
# 2.线程
# t = Thread(target=work, kwargs={'mobile':mobile, 'sms_code':sms_code})
# t.start()
# 3.celery异步发送
send_sms_code.delay(mobile, sms_code)
# 5.返回信息
return Response({'message': 'ok'})
# 判断用户名
class UserNameView(APIView):
def get(self, request, username):
count = User.objects.filter(username=username).count()
return Response({
'username': username,
'count': count
})
# 判断手机号
class MobileView(APIView):
def get(self, request, mobile):
count = User.objects.filter(mobile=mobile).count()
return Response({
'mobile': mobile,
'count': count
})
# 绑定
class UsersView(CreateAPIView):
serializer_class = UserSerializers
# 用户中心信息显示
class UserDetailView(RetrieveAPIView):
serializer_class = UserDetailSerializer
permission_classes = [IsAuthenticated]
def get_object(self):
# self代表当前类实例对象(genericAPIview),使用它里面的request属性
# 接着将数据返回给RetrieveAPIView(大部分将数据在拓展类中进行处理,在拓展类处理的过程中又用到了序列化器.)
return self.request.user
# 发送验证邮件
class EmailView(UpdateAPIView):
serializer_class = EmailSerializer
permission_classes = [IsAuthenticated]
# 原方法需要pk值,而我们的前端没有传递,所以要进行重写
def get_object(self, *args, **kwargs):
# 返回对象
return self.request.user
# 验证邮箱有效性
class VerifyEmailView(APIView):
def get(self, request):
# 获取前端传入的token
token = request.query_params.get('token')
if not token:
return Response({'error': '缺少token'}, status=400)
tjs = TJS(settings.SECRET_KEY, 300)
try:
# 检查token
data = tjs.loads(token)
except Exception:
return Response({'errors': '无效token'}, status=400)
username = data['name']
user = User.objects.get(username)
user.email_active = True
user.save()
print(111)
return Response({
'message': 'ok'
})
# 保存用户浏览记录
class UserBrowsingHistoryView(CreateAPIView):
serializer_class = AddUserBrowsingHistorySerializer
# permission_classes = [IsAuthenticated]
# 获取用户浏览记录
def get(self, request):
user = request.user
conn = get_redis_connection('history')
# 取出5条浏览记录
sku_ids = conn.lrange('history_%s'% user.id, 0, 6)
# 通过sku——id在SKU表里过滤出对应的数据对象
skus = SKU.objects.filter(id__in=sku_ids)
# 序列化返回
ser = SKUListSerializers(skus, many=True)
return Response(ser.data)
# 重写ObtainJSONWebToken登陆,合并购物车
class UserAuthorizeView(ObtainJSONWebToken):
def post(self, request, *args, **kwargs):
response = super().post(request, *args, **kwargs)
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
user = serializer.object.get('user') or request.user
# 普通传参.传参顺序必须一致
response = merge_cart_cookie_to_redis(request, user,response)
# 结果返回
return response
# 重置密码
class PasswordResetView(UpdateAPIView):
def put(self, request,user_id):
try:
user = User.objects.get(id=user_id)
except:
return Response({'error':'数据库异常'})
else:
data = request.data
# 1.获取前端密码数据
old_password = data['old_password']
password = data['password']
password2 = data['password2']
# 2.判断旧密码是否正确
if not user.check_password(old_password):
return Response({'error':'密码错误'})
# 3.判断两次密码是否相同
if password != password2:
return Response({'error':'两次密码输入不一致'})
# 4.旧密码正确则保存新密码
user.set_password(password)
user.save()
return Response({'message': 'ok'})
# 生成图片验证码
class ImageCodeView(APIView):
"""
图片验证码
"""
def get(self, request, image_code_id):
name, text, image = captcha.generate_captcha()
redis_conn = get_redis_connection("img_codes")
redis_conn.setex("img_%s" % image_code_id, constants.IMAGE_CODE_REDIS_EXPIRES, text)
# 固定返回验证码图片数据,不需要REST framework框架的Response帮助我们决定返回响应数据的格式
# 所以此处直接使用Django原生的HttpResponse即可
return HttpResponse(image)
# 忘记密码 - 第一步
# 检验图片验证码,判断用户名是否存在
class CheckUsernameVIew(APIView):
def get(self,request,username):
# 获取前端图片验证码数据
image_code = request.GET.get('text')
image_code_id = request.GET.get('image_code_id')
# 验证图片验证码
conn = get_redis_connection("img_codes")
if not image_code:
return Response({'error':'图片验证码错误'})
img_code = conn.get('img_%s' % image_code_id)
if img_code.decode().lower() != image_code.lower():
return Response({'error':'图片验证码错误'})
# 判断用户名是否存在
try:
if re.match('^1[3-9]\d{9}',username):
user = User.objects.get(mobile=username)
else:
user = User.objects.get(username = username)
except:
return Response({'error':'用户不存在'})
return Response({
'mobile':user.mobile,
'access_token':user.id,
})
# 忘记密码-第二步
# 发送短信验证码
class SendSmsCodeView(APIView):
def get(self, request):
# 1.获取access_token
access_token = request.GET.get('access_token')
conn = get_redis_connection('sms_code')
user = User.objects.get(id = access_token)
mobile = user.mobile
# 判断是否间隔了1分钟
flag = conn.get('sms_code_flag_%s' % mobile)
if flag:
return Response({'error': '请求过于频繁'}, status=400)
# 2.生成验证码
sms_code = '%06d' % randint(0, 999999)
print(sms_code)
# 3. 保存验证码到redis
pl = conn.pipeline()
# 通过管道将2个相同操作进行整合,只需要连接一次redis
pl.setex('sms_code_%s'% mobile, 300, sms_code)
# 设置一个条件判断是否为1分钟后再次发送
pl.setex('sms_code_flag_%s' % mobile, 60, 'a')
pl.execute()
send_sms_code.delay(mobile, sms_code)
# 5.返回信息
return Response({'message': 'ok'})
# 忘记密码-第三步
# 验证短信验证码
class CheckSmsCode(APIView):
def get(self,request,username):
# 从前端获取数据
sms_code = request.GET.get('sms_code')
user = User.objects.get(username = username)
conn = get_redis_connection('sms_code')
# 从redis中取出的数据为byte类型
try:
real_sms_code = conn.get('sms_code_%s' % user.mobile)
except:
raise Response({'message':'短信验证码过期'})
if real_sms_code.decode().lower() != sms_code.lower():
raise Response({'message':'验证码错误'})
return Response({
'user_id':user.id,
'access_token':user.id
})
# 忘记密码-第四步
# 保存新密码
class NewPassword(APIView):
def post(self,request,user_id):
pwd = request.data['password']
pwd2 = request.data['password2']
if pwd != pwd2:
return Response({'message':'两次密码不一致'})
user = User.objects.get(id = user_id)
user.set_password(pwd)
user.save()
return Response('ok')
| 8,800 | 3,491 |
from django.conf import settings
from django.conf.urls.defaults import *
from socialregistration.contrib.googleapps.views import GoogleAppsRedirect, \
GoogleAppsSetup, GoogleAppsCallback
urlpatterns = patterns('',
url('^redirect/$', GoogleAppsRedirect.as_view(), name='redirect'),
url('^callback/$', GoogleAppsCallback.as_view(), name='callback'),
url('^setup/$', GoogleAppsSetup.as_view(), name='setup'),
)
| 425 | 124 |
# Display output
print("New python file!")
print("Add a new print line.") | 73 | 21 |
import yaml
import os
import datetime
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual
from IPython.display import display
import sys
class POCS_devices_database(object):
"""
This class manages serial numbers and other information of multiple devices being used with POCS.
It can be used to display ipython widgets to select the device information, and then create a .yaml
config file that can be read and implemented by POCS.
"""
def __init__(self,
device_info_master_directory='/var/huntsman-pocs/conf_files/',
device_info_master_file='device_info_master.yaml',
local_directory='/var/huntsman-pocs/conf_files/',
archive_directory='/var/huntsman-pocs/conf_files/archive/',
output_yaml_filename='huntsman.yaml'):
"""
Sets up the location to save all files, loads information off previous files, and gets the current
datetime info for the archive filename.
Args:
device_info_master_directory : the file path of where the .yaml file that all the device info is in
local_directory : the dir where the config file needs to be saved to be used by POCS
archive_directory : the dir where the archive/version control of the config files are kept
output_yaml_filename : the chosen filename of the local config file used by POCS
"""
self.local_directory = local_directory
self.archive_directory = archive_directory
self.device_info_master_directory = device_info_master_directory
self.output_yaml_filename = output_yaml_filename
self.device_info_master_file = device_info_master_file
device_info_file = os.path.join(
self.device_info_master_directory, self.device_info_master_file)
try:
with open(device_info_file, 'r') as file:
self.data = yaml.load(file)
except FileNotFoundError:
sys.exit("Cannot find device information master file")
date_info = datetime.datetime.today()
datetime_str = date_info.strftime('%Y_%m_%d_%H_%M')
self.archive_filename = '{}_{}.{}'.format('huntsman', datetime_str, 'yaml')
previous_file = os.path.join(self.local_directory, self.output_yaml_filename)
# loading general data from the previous .yaml file used
try:
with open(previous_file, 'r') as file:
self.data_dict = yaml.load(file)
if self.data_dict is not None and 'cameras' in self.data_dict:
del self.data_dict['cameras']
except FileNotFoundError:
self.data_dict = {}
self.data_dict.update(
{'cameras': {'hdr_mode': True, 'auto_detect': False, 'devices': [None]}})
def add_device_widget(self, dummy_variable_for_widget):
"""Function to add the details selected using the drop-down menu widgets to the 'data_dict'
dictionary.
The function is called by a widget in start_interface() and is then run when the user clicks
on the widget button.
Args:
dummy_variable_for_widget : the widget needs an extra arg for some reason
Output:
Appends the data_dict dict with the information chosen from the device information widgets.
"""
additional_device = {'model': self.camera_type_chosen,
'port': self.camera_sn_chosen,
'filter_type': self.filter_ID_chosen,
'focuser': {'model': 'birger',
'port': self.birger_sn_chosen
},
'lens': {'model': 'canon',
'port': self.lens_sn_chosen,
'name': self.lens_name_chosen,
'image_stabalisataion': self.lens_image_stabalisation_chosen},
'USB_hub_serial_number': self.USB_hub_SN_chosen,
'camera_into_serial_adaptor_port': self.camera_to_serial_port_chosen,
'serial_adaptor_into_USBhub_port': self.serial_to_USBhub_port_chosen,
'camera_into_USBhub_port': self.camera_to_USBhub_port_chosen
}
if self.data_dict['cameras']['devices'] == [None]:
self.data_dict['cameras']['devices'] = [additional_device]
else:
self.data_dict['cameras']['devices'].append(additional_device)
return self.data_dict
def save_file(self, dummy_variable_for_widget):
"""This function writes the 'data_dict' dictionary to a .yaml text file.
The function is called by a widget in start_interface() and is run when the user clicks on the
widget button.
Args:
dummy_variable_for_widget : the widget needs an extra arg for some reason
Output:
Writes the information in the dict into a .yaml file in two locations, as determined by the
assign_local_dir() and assign_archive_dir methods.
The default locations are:
'/var/huntsman-pocs/conf_files/huntsman.yaml'
for the local config file to be used by POCS
and
'/var/huntsman-pocs/conf_files/huntsman_archive/huntsman_YYYY_mm_dd_hh_MM.yaml'
for the archive of all version of the config files, with the date it was created in
the filename
"""
strOutFile1 = os.path.join(self.local_directory, self.output_yaml_filename)
objFile1 = open(strOutFile1, "w")
yaml.dump(self.data_dict, objFile1, default_flow_style=False, indent=4)
objFile1.close()
strOutFile = os.path.join(self.archive_directory, self.archive_filename)
objFile = open(strOutFile, "w")
yaml.dump(self.data_dict, objFile, default_flow_style=False, indent=4)
objFile.close()
def start_interface(self):
"""This function runs all the code to generate the .yaml config files for the Huntsman-POCS system.
It displays the Jupyter widgets which the user can interact with to write and save the config files.
Files are saved in two locations, one for the local file that POCS will access,
and the other is an archive of all previous config files which acts as a version control.
By default, these locations are: (but can be changed using the arguments in the __init__ method)
'/var/huntsman-pocs/conf_files/huntsman.yaml' for the local file.
'/var/huntsman-pocs/conf_files/huntsman_archive/huntsman_YYYY_mm_dd_hh_MM.yaml' for the archive file.
Steps for the user to follow:
Select from the dropdown menus the information for one device set.
Click 'Add new device set'.
Select from the dropdown menus the information for the next device set.
Click 'Add new device set'.
Repeat until all device sets have been added.
Click 'Save File' to write the .yaml file.
Displays:
Jupyter widgets of drop-down menus to select the device sets.
These widgets are used to generate and save the .yaml config files.
Output:
A .yaml config file for Huntsman
"""
print(self.start_interface.__doc__)
birger_sn = self.data['birger_SN']
self.birger_serial_number = interactive(
birger_sn_widget, birger_serial_number_displayed=birger_sn)
camera_sn = self.data['camera_SN']
self.camera_serial_number = interactive(
camera_sn_widget, camera_serial_number_displayed=camera_sn)
lens_sn = self.data['lens_SN']
self.lens_serial_number = interactive(lens_sn_widget, lens_serial_number_displayed=lens_sn)
filter_ID = self.data['filter_ID']
self.filter_ID_code = interactive(filter_ID_widget, filter_ID_code_displayed=filter_ID)
serial_into_USBhub = self.data['serial_into_USBhub_port']
self.serial_into_USBhub_port = interactive(
serial_to_usb_widget, serial_into_USBhub_port_displayed=serial_into_USBhub)
camera_into_serial = self.data['camera_into_serial_port']
self.camera_into_serial_port = interactive(
camera_to_serial_widget, camera_into_serial_port_displayed=camera_into_serial)
USBhub = self.data['USBhub_SN']
self.USBhub_SN = interactive(usbhub_sn_widget, USBhub_SN_displayed=USBhub)
camera_into_USBhub = self.data['camera_into_USBhub_port']
self.camera_into_USBhub_port = interactive(
camera_to_usb_widget, camera_into_USBhub_port_displayed=camera_into_USBhub)
display(self.birger_serial_number)
display(self.camera_serial_number)
display(self.lens_serial_number)
display(self.filter_ID_code)
display(self.serial_into_USBhub_port)
display(self.camera_into_serial_port)
display(self.USBhub_SN)
display(self.camera_into_USBhub_port)
self.birger_sn_chosen = self.birger_serial_number.result
self.camera_sn_chosen = self.camera_serial_number.result
self.lens_sn_chosen = self.lens_serial_number.result
self.filter_ID_chosen = self.filter_ID_code.result
self.serial_to_USBhub_port_chosen = self.serial_into_USBhub_port.result
self.camera_to_serial_port_chosen = self.camera_into_serial_port.result
self.USB_hub_SN_chosen = self.USBhub_SN.result
self.camera_to_USBhub_port_chosen = self.camera_into_USBhub_port.result
self.camera_type_chosen = self.data['camera_type'][self.camera_sn_chosen]
self.lens_name_chosen = self.data['lens_name'][self.lens_sn_chosen]
self.lens_image_stabalisation_chosen = self.data['lens_image_stabalisation'][self.lens_sn_chosen]
button1 = widgets.Button(description="Add new device set")
display(button1)
button1.on_click(self.add_device_widget)
button = widgets.Button(description="Save File")
display(button)
button.on_click(self.save_file)
def birger_sn_widget(birger_serial_number_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
birger_serial_number (str) : the serial number of the birger device as selected from the widget.
Returns:
The result of the widget; the chosen focuser serial number
"""
return birger_serial_number_displayed
def camera_sn_widget(camera_serial_number_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
camera_serial_number (str) : the serial number of the camera device as selected from the widget.
Returns:
The result of the widget; the chosen camera serial number
"""
return camera_serial_number_displayed
def lens_sn_widget(lens_serial_number_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
lens_serial_number (str) : the serial number of the lens device as selected from the widget.
Returns:
The result of the widget; the chosen lens serial number
"""
return lens_serial_number_displayed
def filter_ID_widget(filter_ID_code_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
filter_ID_code (str) : the ID number of the lens as selected from the widget.
Returns:
The result of the widget; the chosen filter ID number
"""
return filter_ID_code_displayed
def serial_to_usb_widget(serial_into_USBhub_port_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
serial_into_USBhub_port (str) : the port number of the USB Hub that the Serial Adaptor is plugged
into as selected from the widget.
Returns:
The result of the widget; the chosen USB port number
"""
return serial_into_USBhub_port_displayed
def camera_to_serial_widget(camera_into_serial_port_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
camera_into_serial_port (str) : the port number of the Serial Adaptor that the camera is plugged
into as selected from the widget.
Returns:
The result of the widget; the chosen serial port number
"""
return camera_into_serial_port_displayed
def usbhub_sn_widget(USBhub_SN_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
USBhub_SN (str) : the serial number of the USB Hub as selected from the widget.
Returns:
The result of the widget; the chosen USB Hub serial number
"""
return USBhub_SN_displayed
def camera_to_usb_widget(camera_into_USBhub_port_displayed):
"""Function used to create Jupyter widget.
It takes the parameter chosen from the widget and returns it such that it can be used as a variable.
Args:
camera_into_USBhub_port (str) : the port number of the USB Hub that the camera is plugged into as
selected from the widget.
Returns:
The result of the widget; the chosen USB port number
"""
return camera_into_USBhub_port_displayed
| 14,049 | 4,015 |
import getpass
from datetime import datetime
from synapse.resources.resources import ResourcesController
from synapse.logger import logger
from synapse.synapse_exceptions import ResourceException
@logger
class DirectoriesController(ResourcesController):
__resource__ = "directories"
def read(self, res_id=None, attributes={}):
status = {}
self.check_mandatory(res_id)
present = self.module.is_dir(res_id)
status['present'] = present
if present:
status['owner'] = self.module.owner(res_id)
status['group'] = self.module.group(res_id)
status['mode'] = self.module.mode(res_id)
status['mod_time'] = self.module.mod_time(res_id)
status['c_time'] = self.module.c_time(res_id)
return status
def create(self, res_id=None, attributes={}):
self.check_mandatory(res_id)
monitor = attributes.get('monitor')
owner = self._get_owner(res_id, attributes)
group = self._get_group(res_id, attributes)
mode = self._get_mode(res_id, attributes)
state = {
'owner': owner,
'group': group,
'mode': mode,
'mod_time': str(datetime.now()),
'c_time': str(datetime.now()),
'present': True
}
self.save_state(res_id, state, monitor=monitor)
self.module.create_folders(res_id)
# Update meta of given file
self.module.update_meta(res_id, owner, group, mode)
return self.read(res_id=res_id)
def update(self, res_id=None, attributes={}):
return self.create(res_id=res_id, attributes=attributes)
def delete(self, res_id=None, attributes={}):
self.check_mandatory(res_id)
monitor = attributes.get('monitor')
state = {'present': False}
self.save_state(res_id, state, monitor=monitor)
previous_state = self.read(res_id=res_id)
self.module.delete_folder(res_id)
if not self.module.exists(res_id):
previous_state['present'] = False
self.response = previous_state
return self.read(res_id)
def is_compliant(self, persisted_state, current_state):
compliant = True
# First, compare the present flag. If it differs, no need to go
# further, there's a compliance issue.
# Check the next path state
if persisted_state.get("present") != current_state.get("present"):
compliant = False
return compliant
# Secondly, compare path attributes
for attr in ("name", "owner", "group", "mode"):
if persisted_state.get(attr) != current_state.get(attr):
compliant = False
break
return compliant
def _get_owner(self, path, attributes):
# Default, get the current user. getpass is portable Unix/Windows
owner = getpass.getuser()
# If path exists, get path owner
if self.module.exists(path):
owner = self.module.owner(path)
# Overwrite if owner name is provided
if attributes.get('owner'):
owner = attributes['owner']
return owner
def _get_group(self, path, attributes):
# Default, get the current user's group.
# getpass is portable Unix/Windows
group = getpass.getuser()
# If path exists, get path group
if self.module.exists(path):
group = self.module.group(path)
# Overwrite if group name is provided
if attributes.get('group'):
group = attributes['group']
return group
def _get_mode(self, path, attributes):
# Default, get default mode according to current umask
mode = self.module.get_default_mode(path)
# If path exists, get current mode
if self.module.exists(path):
mode = self.module.mode(path)
# If mode is provided, return its octal value as string
if attributes.get('mode'):
try:
mode = oct(int(attributes['mode'], 8))
except ValueError as err:
raise ResourceException("Error with path mode (%s)" % err)
return mode
| 4,231 | 1,204 |
from functools import wraps
from types import CoroutineType
import flask
from biothings.web import templates
from biothings.web.options import OptionError
from biothings.web.query.pipeline import (QueryPipelineException,
QueryPipelineInterrupt)
from tornado.template import Loader
routes = []
def route(pattern, methods=("GET", "POST")):
def A(f):
async def B(*args, **kwargs):
biothings = flask.current_app.biothings
optionsets = biothings.optionsets
optionset = optionsets.get(f.__name__)
if optionset:
try:
_args = optionset.parse(flask.request.method, (
(tuple(kwargs.values()), {}),
flask.request.args,
flask.request.form,
flask.request.get_json()
))
except OptionError as err:
return err.info, 400
else:
_args = {}
result = f(biothings, _args)
if isinstance(result, CoroutineType):
return await result
return result
B.pattern = pattern
B.methods = methods
B.name = f.__name__
routes.append(B)
return B
return A
@route("/")
def homepage(biothings, args):
loader = Loader(templates.__path__[0])
template = loader.load("home.html")
return template.generate(
alert='Front Page Not Configured.',
title='Biothings API',
contents=biothings.handlers.keys(),
support=biothings.metadata.types,
url='http://biothings.io/'
)
def handle_es_conn(f):
@wraps(f)
async def _(biothings, *args, **kwargs):
client = biothings.elasticsearch.async_client
# because of the flask execution model
# each time the async function is executed
# it is executed on a different event loop
# reset the connections to use the active loop
del client.transport.connection_pool
await client.transport._async_init()
try:
response = await f(biothings, *args, **kwargs)
except QueryPipelineInterrupt as itr:
return itr.details
except QueryPipelineException as exc:
kwargs = exc.details if isinstance(exc.details, dict) else {}
kwargs["success"] = False
kwargs["status"] = exc.code
kwargs["reason"] = exc.summary
return kwargs, exc.code
finally:
await client.close()
return response
return _
@route("/{ver}/query")
@handle_es_conn
async def query(biothings, args):
return await biothings.pipeline.search(**args)
@route([
"/{ver}/{typ}/",
"/{ver}/{typ}/<id>"])
@handle_es_conn
async def annotation(biothings, args):
# could be a list, in which case we need jsonify.
return flask.jsonify(await biothings.pipeline.fetch(**args))
@route("/{ver}/metadata")
@handle_es_conn
async def metadata(biothings, args):
await biothings.metadata.refresh(None)
return biothings.metadata.get_metadata(None)
@route("/{ver}/metadata/fields")
@handle_es_conn
async def fields(biothings, args):
await biothings.metadata.refresh(None)
mappings = biothings.metadata.get_mappings(None)
return biothings.pipeline.formatter.transform_mapping(mappings)
@route("/status")
@handle_es_conn
async def status(biothings, args):
return await biothings.health.async_check()
| 3,532 | 995 |
# Generated by Django 3.2.4 on 2021-06-26 20:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Sentence',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(db_index=True, max_length=255)),
('book', models.CharField(max_length=255)),
('sentence', models.CharField(max_length=2048)),
],
),
]
| 633 | 198 |
import ast as _ast
from .compat import PY3, PY35
If = _ast.If
Str = _ast.Str
Num = _ast.Num
For = _ast.For
Expr = _ast.Expr
Name = _ast.Name
Load = _ast.Load
List = _ast.List
Index = _ast.Index
Store = _ast.Store
IfExp = _ast.IfExp
IsNot = _ast.IsNot
Tuple = _ast.Tuple
Module = _ast.Module
Assign = _ast.Assign
Compare = _ast.Compare
ListComp = _ast.ListComp
Subscript = _ast.Subscript
GeneratorExp = _ast.GeneratorExp
comprehension = _ast.comprehension
if PY3:
def arg(arg):
return _ast.arg(arg, None)
def arguments(args, vararg, kwarg, defaults):
return _ast.arguments(args, vararg, [], [], kwarg, defaults)
def FunctionDef(name, args, body, decorator_list):
return _ast.FunctionDef(name, args, body, decorator_list, None)
Name = _ast.Name
keyword = _ast.keyword
Attribute = _ast.Attribute
if PY35:
def Call(func, args, keywords, starargs, kwargs):
return _ast.Call(func, args, keywords)
else:
Call = _ast.Call
else:
def arg(arg):
return _ast.Name(str(arg), _ast.Param())
def arguments(args, vararg, kwarg, defaults):
return _ast.arguments(args, vararg, kwarg, defaults)
def FunctionDef(name, args, body, decorator_list):
return _ast.FunctionDef(str(name), args, body, decorator_list)
def Name(id, ctx):
return _ast.Name(str(id), ctx)
def keyword(arg, value):
return _ast.keyword(str(arg), value)
def Attribute(value, attr, ctx):
return _ast.Attribute(value, str(attr), ctx)
Call = _ast.Call
| 1,571 | 552 |
import pytest
from practice_atcoder.abs.product import question
class Test(object):
@pytest.mark.parametrize("ab,expect", [
("3 4", "Even"),
("1 21", "Odd"),
])
def test(self, ab, expect):
assert question(ab) == expect
| 258 | 91 |
#
# Copyright 2018 Alexander Fasching
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__all__ = ('SaturatingCounter', 'History')
class SaturatingCounter(object):
value = property(lambda self: self._value)
def __init__(self, minval, maxval, init=None):
assert init is None or minval <= init <= maxval
self._minval = minval
self._maxval = maxval
self._value = init or minval
def increment(self):
self._value = min(self._value + 1, self._maxval)
def decrement(self):
self._value = max(self._value - 1, self._minval)
class History(object):
value = property(lambda self: self._value)
def __init__(self, length):
self._length = length
self._value = 0
def update(self, taken):
self._value = ((self._value << 1) | taken) & ((1 << self._length) - 1)
| 1,440 | 444 |
import os
from pathlib import Path
import joblib
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_validate
root_folder = Path(os.path.dirname(os.path.dirname(__file__)))
training_data = root_folder / 'data' / 'processed' / 'train.csv'
model_folder = root_folder / 'models'
model_file = model_folder / 'classifier.bin'
df = pd.read_csv(training_data)
x = df.drop(['LABEL'], axis=1)
y = df['LABEL']
model = RandomForestClassifier()
results = cross_validate(model, x, y, cv=5, return_estimator=True)
os.makedirs(model_folder, exist_ok=True)
best_estimator = results['estimator'][np.argmax(results['test_score'])]
joblib.dump(best_estimator, model_file)
print(f"Done training. Stored model with score: {np.max(results['test_score'])}") | 824 | 282 |
from .error import exception_type, check_method_lauched, check_is_int, \
check_is_in, check_key_is_in
from .normal_hist import compare_hist_to_norm | 152 | 56 |
"""Meta network using past gradients."""
import tensorflow as tf
class DualRNN(tf.keras.layers.Layer):
"""
Pretty similar to LayerCompetition, except:
1) Optionally aggregate features across batch before feeding into
the RNN. Doing this because if the RNN states were to
represent training state of the underlying network, the whole
batch is used for the underlying network and not just one
instance from the batch. Doing this also means that we'd be
training the meta-network RNN with batch_size = 1.
2) Extend the backward masking to be more similar to forward
masking - the past masked gradients are passed into the RNN,
while the current unmasked gradient is passed in through a
separate branch. (This needs the inner network to pass in
the masked gradient instead)
Because the RNN states could potentially not have the batch
dimension, we need to also pass in the current gradient at
the end to get the mask output as (B, N)
3) Added a few more options to experiment with different network
design.
Parameters
----------
"""
def __init__(
self,
rnn_type,
rnn_units,
input_mlp,
fwd_output_mlp,
bwd_output_mlp,
mask_thresh=0.1,
dist_fn='none',
use_bwd_mask=False,
normalize_grads=False,
normalize_acts=False,
random_grads_stddev=None,
use_nearest_grads=False,
use_node_set=True,
node_set_version='v3',
use_batch_set=False,
use_batch_summary=True,
cur_reuse_branch=False,
bwd_return_grads=False,
):
super(DualRNN, self).__init__()
assert rnn_type in ['simplernn', 'gru', 'lstm']
if rnn_type == 'simplernn':
self._rnn = tf.keras.layers.SimpleRNN(rnn_units)
elif rnn_type == 'gru':
self._rnn = tf.keras.layers.GRU(rnn_units)
elif rnn_type == 'lstm':
self._rnn = tf.keras.layers.LSTM(rnn_units)
self._input_mlp = input_mlp
self._fwd_output_mlp = fwd_output_mlp
self._bwd_output_mlp = bwd_output_mlp
self._mask_thresh = mask_thresh
self._rnn_units = rnn_units
self._dist_fn = dist_fn
self._use_bwd_mask = use_bwd_mask
self._normalize_grads = normalize_grads
self._normalize_acts = normalize_acts
self._use_node_set = use_node_set
self._node_set_version = node_set_version
self._use_batch_set = use_batch_set
self._use_batch_summary = use_batch_summary
self._random_grads_stddev = random_grads_stddev
self._use_nearest_grads = use_nearest_grads
self._cur_reuse_branch = cur_reuse_branch
self._bwd_return_grads = bwd_return_grads
self._last_input_mlp_input = None
if self._fwd_output_mlp._last_layer_act_fn_str == 'linear':
self._fwd_apply_sigmoid = True
elif self._fwd_output_mlp._last_layer_act_fn_str == 'sigmoid':
self._fwd_apply_sigmoid = False
else:
raise ValueError()
if self._use_bwd_mask is False:
assert self._bwd_output_mlp is None
else:
if self._bwd_output_mlp._last_layer_act_fn_str == 'linear':
self._bwd_apply_sigmoid = True
elif self._bwd_output_mlp._last_layer_act_fn_str == 'sigmoid':
self._bwd_apply_sigmoid = False
else:
raise ValueError()
def warm_start(self):
batch = {
'past_grads': tf.zeros((1, 1, 1)),
'past_acts': tf.zeros((1, 1, 1)),
'cur_acts': tf.zeros((1, 1)),
'cur_grads': tf.zeros((1, 1)),
}
self.forward(batch, training=False)
if self._use_bwd_mask:
self.backward(batch, training=False)
def first_forward(self, batch, training=None):
"""
batch : (B, N)
current activations.
"""
B = tf.shape(batch)[0]
N = tf.shape(batch)[1]
# currently initial state is zeros
h = tf.zeros((B * N, self._rnn_units), dtype=tf.float32)
# prepare the branch from cur_acts
if self._cur_reuse_branch:
default_grads = self._get_default_grads(
past_grads=None,
past_acts=None,
cur_acts=cur_acts,
)
# (B, 1, N, cur_F)
cur_act_input, cur_F = self._prepare_input_mlp_input(
past_acts=cur_acts[:, tf.newaxis],
past_grads=default_grads
)
# (B*N, cur_F)
cur_act_input = tf.reshape(cur_act_input, (B * N, cur_F))
# (B*N, F)
cur_act_feats = self._input_mlp.call(
cur_act_input,
training=training
)
F = self._input_mlp._filters[-1]
cur_act_feats = tf.reshape(cur_act_feats, (B, 1, N, F))
# also run set features on cur_acts
# (B, 1, N, F')
cur_act_feats, F_p = self._get_set_feature(
cur_act_feats, F
)
else:
if self._normalize_acts:
# (B, N), (B, 1)
nacts, norm = _safe_normalize(batch, axis=-1)
norm = tf.tile(norm, [1, N])
# (B, N, 2)
cur_act_feats = tf.stack([nacts, norm], axis=-1)
F_p = 2
else:
cur_act_feats = cur_acts
F_p = 1
# concat with current activation to feed into output_mlp
# (B*N, U+F')
feat = tf.concat([
h, tf.reshape(cur_act_feats, (B * N, F_p))
], axis=-1)
out = self._fwd_output_mlp(feat, training=training)
# (B, N)
out = tf.reshape(out, (B, N))
if self._fwd_apply_sigmoid:
mask = tf.nn.sigmoid(out)
else:
mask = out
# to avoid gradient underflow in the inner net, make mask
# smaller than `mask_thresh` 0s
mask = tf.where(
mask < self._mask_thresh,
tf.zeros_like(mask),
mask,
)
return mask
def forward(self, batch, training=None):
"""Returns the mask for forward inner network
Parameters
----------
batch : dict
"past_grads" : (B, T, N)
"past_acts" : (B, T, N)
"cur_acts" : (B, N)
"""
past_grads = batch['past_grads']
past_acts = batch['past_acts']
cur_acts = batch['cur_acts']
B = tf.shape(cur_acts)[0]
N = tf.shape(cur_acts)[1]
T = tf.shape(past_grads)[1]
# (B, T, N, Fin)
feat, Fin = self._prepare_input_mlp_input(
past_grads=past_grads,
past_acts=past_acts,
)
feat = tf.reshape(feat, (-1, Fin))
#print("fwd Fin: {}".format(Fin))
# (B * T * N, F)
feat = self._input_mlp.call(feat, training=training)
F = self._input_mlp._filters[-1]
feat = tf.reshape(feat, (B, T, N, F))
# (B, T, N, F')
all_feats, F_p = self.get_set_feature(feat, F)
#print("fwd Fp: {}".format(F_p))
if self._use_batch_summary:
# (T, N, F')
all_feats, F_p = self._get_batch_summary(all_feats, F_p)
# (N, T, F')
seq = tf.transpose(all_feats, (1, 0, 2))
# (N, U)
last_h = self._rnn(seq, training=training)
# (B, N, U)
last_h = tf.tile(last_h[tf.newaxis], [B, 1, 1])
last_h = tf.reshape(last_h, (B * N, self._rnn_units))
else:
# (B, N, T, F')
seq = tf.transpose(all_feats, (0, 2, 1, 3))
seq = tf.reshape(seq, (B * N, T, F_p))
# (B*N, U)
last_h = self._rnn(seq, training=training)
# prepare the branch from cur_acts
if self._cur_reuse_branch:
default_grads = self._get_default_grads(
past_grads=past_grads,
past_acts=past_acts,
cur_acts=cur_acts,
)
# (B, 1, N, cur_F)
cur_act_input, cur_F = self._prepare_input_mlp_input(
past_acts=cur_acts[:, tf.newaxis],
past_grads=default_grads
)
# (B*N, cur_F)
cur_act_input = tf.reshape(cur_act_input, (-1, cur_F))
# (B*N, F)
cur_act_feats = self._input_mlp.call(
cur_act_input,
training=training
)
F = self._input_mlp._filters[-1]
cur_act_feats = tf.reshape(cur_act_feats, (B, 1, N, F))
# also run set features on cur_acts
# (B, 1, N, F')
cur_act_feats, F_p = self._get_set_feature(
cur_act_feats, F
)
else:
if self._normalize_acts:
# (B, N), (B, 1)
nacts, norm = _safe_normalize(cur_acts, axis=-1)
norm = tf.tile(norm, [1, N])
cur_act_feats = tf.stack([nacts, norm], axis=-1)
F_p = 2
else:
cur_act_feats = cur_acts
F_p = 1
# prepare inputs for output_mlp
# (B*N, U + F')
feat = tf.concat([
last_h,
tf.reshape(cur_act_feats, (B * N, F_p))
], axis=-1)
out = self._fwd_output_mlp(feat, training=training)
# (B, N)
out = tf.reshape(out, (B, N))
if self._fwd_apply_sigmoid:
mask = tf.nn.sigmoid(out)
else:
mask = out
# to avoid gradient underflow in the inner net, make mask
# smaller than `mask_thresh` 0s
# TODO: not sure if this is needed
mask = tf.where(
mask < self._mask_thresh,
tf.zeros_like(mask),
mask,
)
return mask
def first_backward(self, batch, training=None):
"""Returns the mask for backward gradient masking
Parameters
----------
batch : dict
"cur_acts" : (B, N)
"cur_grads" : (B, N)
"""
cur_acts = batch['cur_acts']
cur_grads = batch['cur_grads']
B = tf.shape(cur_acts)[0]
N = tf.shape(cur_acts)[1]
# currently initial state is zeros
h = tf.zeros((B * N, self._rnn_units), dtype=tf.float32)
# prepare the branch from cur_acts
if self._cur_reuse_branch:
# (B, 1, N, cur_F)
cur_input, cur_F = self._prepare_input_mlp_input(
past_acts=cur_acts[:, tf.newaxis],
past_grads=cur_grads[:, tf.newaxis],
)
# (B*N, cur_F)
cur_input = tf.reshape(cur_input, (B * N, cur_F))
# (B*N, F)
cur_feats = self._input_mlp.call(
cur_input,
training=training
)
F = self._input_mlp._filters[-1]
cur_feats = tf.reshape(cur_feats, (B, 1, N, F))
# also run set features on cur_feats
# (B, 1, N, F')
cur_feats, F_p = self._get_set_feature(
cur_feats, F
)
else:
if self._normalize_acts:
# (B, N), (B, 1)
nacts, norm = _safe_normalize(cur_acts, axis=-1)
norm = tf.tile(norm, [1, N])
# (B, N, 2)
cur_feats = tf.stack([nacts, norm], axis=-1)
F_p = 2
else:
cur_feats = cur_acts
F_p = 1
if self._normalize_grads:
ngrads, norm = _safe_normalize(cur_grads, axis=-1)
norm = tf.tile(norm, [1, N])
cur_feats = tf.concat([
cur_feats,
ngrads[..., tf.newaxis],
norm[..., tf.newaxis]
], axis=-1)
F_p += 2
else:
cur_feats = tf.concat([
cur_feats, cur_grads[..., tf.newaxis]
], axis=-1)
F_p += 1
# concat with current activation to feed into output_mlp
# (B*N, U+F')
feat = tf.concat([
h, tf.reshape(cur_feats, (B * N, F_p))
], axis=-1)
out = self._bwd_output_mlp(feat, training=training)
# (B, N)
out = tf.reshape(out, (B, N))
if self._bwd_return_grads:
weights = tf.nn.softmax(
tf.reshape(out, (B, N, 4)), axis=-1
)
grads = self._bwd_weighted_grads(
cur_grads=cur_grads,
weights=weights,
)
return grads
if self._bwd_apply_sigmoid:
mask = tf.nn.sigmoid(out)
else:
mask = out
# to avoid gradient underflow in the inner net, make mask
# smaller than `mask_thresh` 0s
mask = tf.where(
mask < self._mask_thresh,
tf.zeros_like(mask),
mask,
)
return mask
def backward(self, batch, training=None):
"""Returns the mask for backward gradient masking
Parameters
----------
batch : dict
"past_grads" : (B, T, N)
"past_acts" : (B, T, N)
"cur_acts" : (B, N)
"cur_grads" : (B, N)
"""
past_grads = batch['past_grads']
past_acts = batch['past_acts']
cur_acts = batch['cur_acts']
cur_grads = batch['cur_grads']
B = tf.shape(cur_acts)[0]
N = tf.shape(cur_acts)[1]
T = tf.shape(past_grads)[1]
# (B, T, N, Fin)
feat, Fin = self._prepare_input_mlp_input(
past_grads=past_grads,
past_acts=past_acts,
)
feat = tf.reshape(feat, (-1, Fin))
#print("bwd Fin: {}".format(Fin))
# (B * T * N, F)
feat = self._input_mlp.call(feat, training=training)
F = self._input_mlp._filters[-1]
feat = tf.reshape(feat, (B, T, N, F))
# (B, T, N, F')
all_feats, F_p = self.get_set_feature(feat, F)
#print("bwd Fp: {}".format(F_p))
if self._use_batch_summary:
# (T, N, F')
all_feats, F_p = self._get_batch_summary(all_feats, F_p)
# (N, T, F')
seq = tf.transpose(all_feats, (1, 0, 2))
# (N, U)
last_h = self._rnn(seq, training=training)
# (B, N, U)
last_h = tf.tile(last_h[tf.newaxis], [B, 1, 1])
last_h = tf.reshape(last_h, (B * N, self._rnn_units))
else:
# (B, N, T, F')
seq = tf.transpose(all_feats, (0, 2, 1, 3))
seq = tf.reshape(seq, (B * N, T, F_p))
# (B*N, U)
last_h = self._rnn(seq, training=training)
# prepare the branch from cur_acts
if self._cur_reuse_branch:
# (B, 1, N, cur_F)
cur_input, cur_F = self._prepare_input_mlp_input(
past_acts=cur_acts[:, tf.newaxis],
past_grads=cur_grads[:, tf.newaxis],
)
# (B*N, cur_F)
cur_input = tf.reshape(cur_input, (-1, cur_F))
# (B*N, F)
cur_feats = self._input_mlp.call(
cur_input,
training=training
)
F = self._input_mlp._filters[-1]
cur_feats = tf.reshape(cur_feats, (B, 1, N, F))
# also run set features on cur_acts
# (B, 1, N, F')
cur_feats, F_p = self._get_set_feature(
cur_feats, F
)
else:
if self._normalize_acts:
# (B, N), (B, 1)
nacts, norm = _safe_normalize(cur_acts, axis=-1)
norm = tf.tile(norm, [1, N])
cur_feats = tf.stack([nacts, norm], axis=-1)
F_p = 2
else:
cur_feats = cur_acts
F_p = 1
if self._normalize_grads:
ngrads, norm = _safe_normalize(cur_grads, axis=-1)
norm = tf.tile(norm, [1, N])
cur_feats = tf.concat([
cur_feats,
ngrads[..., tf.newaxis],
norm[..., tf.newaxis]
], axis=-1)
F_p += 2
else:
cur_feats = tf.concat([
cur_feats, cur_grads[..., tf.newaxis]
], axis=-1)
F_p += 1
# prepare inputs for output_mlp
# (B*N, U + F')
feat = tf.concat([
last_h,
tf.reshape(cur_feats, (B * N, F_p))
], axis=-1)
out = self._bwd_output_mlp(feat, training=training)
if self._bwd_return_grads:
weights = tf.nn.softmax(
tf.reshape(out, (B, N, 4)), axis=-1
)
grads = self._bwd_weighted_grads(
cur_grads=cur_grads,
weights=weights,
)
return grads
# (B, N)
out = tf.reshape(out, (B, N))
if self._bwd_apply_sigmoid:
mask = tf.nn.sigmoid(out)
else:
mask = out
# to avoid gradient underflow in the inner net, make mask
# smaller than `mask_thresh` 0s
# TODO: not sure if this is needed
mask = tf.where(
mask < self._mask_thresh,
tf.zeros_like(mask),
mask,
)
return mask
def _prepare_input_mlp_input(self, past_grads, past_acts):
if self._normalize_acts:
# (B, T, N), (B, T, 1)
nacts, norm = _safe_normalize(past_acts, axis=2)
N = tf.shape(nacts)[-1]
# (B, T, N)
norm = tf.tile(norm, [1, 1, N])
# (B, T, N, 2)
feat = tf.stack([nacts, norm], axis=-1)
F = 2
else:
# (B, T, N, 1)
feat = past_acts[..., tf.newaxis]
F = 1
if self._normalize_grads:
# (B, T, N), (B, T, 1)
ngrads, norm = _safe_normalize(past_grads, axis=2)
N = tf.shape(ngrads)[-1]
# (B, T, N)
norm = tf.tile(norm, [1, 1, N])
# (B, T, N, F+2)
feat = tf.concat([
feat, ngrads[..., tf.newaxis], norm[..., tf.newaxis]
], axis=-1)
F = F + 2
else:
feat = tf.concat([feat, past_grads[..., tf.newaxis]])
F = F + 1
return feat, F
def get_set_feature(self, feat, F):
"""Returns the features extracted based on sets.
Parameters
----------
feat : tf.Tensor, shape (B, T, N, F)
`N` is the dimension for the set
F : int
The number of channels for the input feature
Returns
-------
set_feat : tf.Tensor, shape (B, T, N, F')
F' : int
The number of channels of the output feature
"""
if not self._use_node_set and not self._use_batch_set:
# if coordinate-wise, use original features
return feat, F
if self._use_node_set:
if self._node_set_version == 'v1':
feat, F = self._get_node_set_feature(feat, F)
elif self._node_set_version == 'v2':
feat, F = self._get_node_set_feature_v2(feat, F)
elif self._node_set_version == 'v3':
# (B, T, N, Fn)
feat, F = self._get_node_set_feature_v3(feat, F)
else:
raise ValueError()
if self._use_batch_set:
feat_b, Fb = self._get_batch_set_feature(feat, F)
if self._use_node_set:
feat = tf.concat([feat, feat_b], axis=-1)
F = F + Fb
else:
feat = feat_b
F = Fb
return feat, F
def _get_node_set_feature(self, feat, F):
"""Returns the features extracted based on sets.
Parameters
----------
feat : tf.Tensor, shape (B, T, N, F)
`N` is the dimension for the set
F : int
The number of channels for the input feature
Returns
-------
set_feat : tf.Tensor, shape (B, T, N, F')
F' : int
The number of channels of the output feature
"""
B = tf.shape(feat)[0]
T = tf.shape(feat)[1]
# (BT, N, F)
feat = tf.reshape(feat, (B * T, -1, F))
# obtain pair-wise feats for nodes
# (BT, N, 1, F)
src_feat = feat[:, :, tf.newaxis, :]
# (BT, 1, N, F)
dst_feat = feat[:, tf.newaxis, :, :]
N = tf.shape(feat)[1]
BT = B * T
if self._dist_fn == 'diff':
# (BT, N, N, F)
dist = dst_feat - src_feat
self_feat = feat
elif self._dist_fn == 'dot':
# (BT, N, N, F)
dist = dst_feat * src_feat
self_feat = feat
elif self._dist_fn == 'norm_dot':
n_dst_feat, _ = _safe_normalize(dst_feat, axis=-1)
n_src_feat, _ = _safe_normalize(src_feat, axis=-1)
dist = tf.reduce_sum(
n_dst_feat * n_src_feat, axis=-1, keepdims=True
)
self_feat = tf.ones([BT, N, 1])
F = 1
elif self._dist_fn == 'concat':
# (BT, N, N, F*2)
dist = tf.concat([
tf.tile(src_feat, [1, 1, N, 1]),
tf.tile(dst_feat, [1, N, 1, 1])
], axis=-1)
# (BT, N, F*2)
self_feat = tf.concat([feat, feat], axis=-1)
F = F * 2
elif self._dist_fn == 'none':
# need to tile the first `N` dimension and not the 2nd
# (BT, N, N, F)
dist = tf.tile(dst_feat, [1, N, 1, 1])
# (BT, N, F)
self_feat = feat
else:
raise ValueError()
# (N, N, B*T, F)
dist = tf.transpose(dist, (1, 2, 0, 3))
# Aggregate over node features
# Create an "other" mask
mask = tf.ones((N, N)) - tf.eye(N)
# (N * (N-1), 2)
to_take = tf.where(mask > 0.5)
# (N * (N-1), BT, F)
gathered = tf.gather_nd(dist, to_take)
# (N, N-1, BT, F)
other_feat = tf.reshape(gathered, (N, N - 1, BT, F))
# So, what are some options after here?
# I have NxN pairwise distance, and eventually I want to
# reduce to N and the RNN will share weights among the N
# nodes.
# It'd be quite intuitive to apply attention of some form to
# see what are the other nodes that a node should pay
# attention to. So feature for RNN input would be
# concat(self_feat, att(other_feat))
# I don't want to directly aggregate from NxN -> N without
# distinguish self-vs-other because, well, seems like a
# useful distinction.
# But perhaps I'll start with some hard coded aggregation
# (BT, N, N-1, F)
other_feat = tf.transpose(other_feat, (2, 0, 1, 3))
# (BT, N, F)
other_mean = tf.reduce_mean(other_feat, axis=2)
other_min = tf.reduce_min(other_feat, axis=2)
other_max = tf.reduce_max(other_feat, axis=2)
# put them together
agg_feats = [self_feat, other_mean, other_min, other_max]
n_agg_feats = len(agg_feats)
# (BT, N, F*n_agg_feats)
all_feats = tf.concat(agg_feats, axis=-1)
all_feats = tf.reshape(all_feats, (B, T, N, F * n_agg_feats))
return all_feats, F * n_agg_feats
def _get_node_set_feature_v2(self, feat, F):
"""Returns the features extracted based on sets.
Skip the pairwise distance as in v1 as it takes too much
memory. Start looking at aggregation stats directly.
Parameters
----------
feat : tf.Tensor, shape (B, T, N, F)
`N` is the dimension for the set
F : int
The number of channels for the input feature
Returns
-------
set_feat : tf.Tensor, shape (B, T, N, F')
F' : int
The number of channels of the output feature
"""
B = tf.shape(feat)[0]
T = tf.shape(feat)[1]
N = tf.shape(feat)[2]
def _other_stats(self_idx):
# (N,)
self_idx_one_hot = tf.one_hot(self_idx, depth=N)
# (N-1, B, T, F)
other_feat = tf.gather(
tf.transpose(feat, (2, 0, 1, 3)), # (N, B, T, F)
tf.where(self_idx_one_hot < 0.5)[:, 0], # (N-1, 1)
)
# (B, T, N-1, F)
other_feat = tf.transpose(other_feat, (1, 2, 0, 3))
# (B, T, F)
other_min = tf.reduce_min(other_feat, axis=2)
other_max = tf.reduce_max(other_feat, axis=2)
other_mean = tf.reduce_mean(other_feat, axis=2)
# (B, T, F * 3)
return tf.concat(
[other_min, other_max, other_mean], axis=-1
)
self_idxs = tf.range(N)
# (N, B, T, F * 3)
other_feats = tf.map_fn(
fn=_other_stats,
elems=self_idxs,
fn_output_signature=tf.float32,
)
# (B, T, N, F*3)
other_feats = tf.transpose(other_feats, (1, 2, 0, 3))
# (B, T, N, F*4)
all_feats = tf.concat([feat, other_feats], axis=-1)
return all_feats, F * 4
def _get_node_set_feature_v3(self, feat, F):
"""Returns the features extracted based on sets.
Skip the pairwise distance as in v1 as it takes too much
memory. Start looking at aggregation stats directly.
Skip self vs other and just use self vs all.
Parameters
----------
feat : tf.Tensor, shape (B, T, N, F)
`N` is the dimension for the set
F : int
The number of channels for the input feature
Returns
-------
set_feat : tf.Tensor, shape (B, T, N, F')
F' : int
The number of channels of the output feature
"""
B = tf.shape(feat)[0]
T = tf.shape(feat)[1]
N = tf.shape(feat)[2]
# (B, T, 1, F)
all_min = tf.reduce_min(feat, axis=2, keepdims=True)
all_max = tf.reduce_max(feat, axis=2, keepdims=True)
all_mean = tf.reduce_mean(feat, axis=2, keepdims=True)
# (B, T, 1, F*3)
all_feats = tf.concat([all_min, all_max, all_mean], axis=-1)
# (B, T, N, F*3)
all_feats = tf.tile(all_feats, [1, 1, N, 1])
# (B, T, N, F*4)
all_feats = tf.concat([feat, all_feats], axis=-1)
return all_feats, F * 4
def _get_batch_summary(self, feat, F):
"""Returns some summary of the current batch.
Reduces over the batch dimension
Parameters
----------
feat : tf.Tensor, shape (B, ..., F)
F : int
Returns
-------
summary : tf.Tensor, shape (..., F')
F' : int
"""
bmean = tf.reduce_mean(feat, axis=0)
bmin = tf.reduce_min(feat, axis=0)
bmax = tf.reduce_max(feat, axis=0)
feat = tf.concat([bmean, bmin, bmax], axis=-1)
F = F * 3
return feat, F
def _bwd_weighted_grads(self, cur_grads, weights):
# cur_grads: (B, N)
# weights: (B, N, 4)
# (B, 1, N, 4)
set_grads, F_p = self._get_node_set_feature_v2(
cur_grads[:, tf.newaxis, :, tf.newaxis],
F=1,
)
# (B, N)
weighted_grads = tf.reduce_sum(
weights * set_grads[:, 0], axis=-1
)
return weighted_grads
def _get_batch_set_feature(self, feat, F):
"""Returns the features extracted based on sets.
Parameters
----------
feat : tf.Tensor, shape (B, T, N, F)
`N` is the dimension for the set
F : int
The number of channels for the input feature
Returns
-------
set_feat : tf.Tensor, shape (B, T, N, F')
F' : int
The number of channels of the output feature
"""
B = tf.shape(feat)[0]
T = tf.shape(feat)[1]
N = tf.shape(feat)[2]
# (B, TN, F)
feat = tf.reshape(feat, (B, -1, F))
# obtain pair-wise feats for nodes
# (B, 1, TN, F)
src_feat = feat[:, tf.newaxis, :, :]
# (1, B, TN, F)
dst_feat = feat[tf.newaxis, :, :, :]
TN = T * N
if self._dist_fn == 'diff':
# (B, B, TN, F)
dist = dst_feat - src_feat
self_feat = feat
elif self._dist_fn == 'dot':
# (B, B, TN, F)
dist = dst_feat * src_feat
self_feat = feat
elif self._dist_fn == 'norm_dot':
n_dst_feat, _ = _safe_normalize(dst_feat, axis=-1)
n_src_feat, _ = _safe_normalize(src_feat, axis=-1)
dist = tf.reduce_sum(
n_dst_feat * n_src_feat, axis=-1, keepdims=True
)
self_feat = tf.ones([B, TN, 1])
F = 1
elif self._dist_fn == 'concat':
# (B, B, TN, F*2)
dist = tf.concat([
tf.tile(src_feat, [1, B, 1, 1]),
tf.tile(dst_feat, [B, 1, 1, 1]),
], axis=-1)
# (B, TN, F*2)
self_feat = tf.concat([feat, feat], axis=-1)
F = F * 2
elif self._dist_fn == 'none':
# (B, B, TN, F)
dist = tf.tile(dst_feat, [B, 1, 1, 1])
# (B, TN, F)
self_feat = feat
else:
raise ValueError()
# Aggregate over node features
# Create an "other" mask
mask = tf.ones((B, B)) - tf.eye(B) # here
# (B * (B-1), 2)
to_take = tf.where(mask > 0.5)
# (B * (B-1), TN, F)
gathered = tf.gather_nd(dist, to_take)
# (B, B-1, TN, F)
other_feat = tf.reshape(gathered, (B, B - 1, TN, F))
# (B, TN, F)
other_mean = tf.reduce_mean(other_feat, axis=1)
other_min = tf.reduce_min(other_feat, axis=1)
other_max = tf.reduce_max(other_feat, axis=1)
# put them together
agg_feats = [self_feat, other_mean, other_min, other_max]
n_agg_feats = len(agg_feats)
# (B, TN, F*n_agg_feats)
all_feats = tf.concat(agg_feats, axis=-1)
all_feats = tf.reshape(all_feats, (B, T, N, F * n_agg_feats))
return all_feats, F * n_agg_feats
def _get_default_grads(self, past_grads, past_acts, cur_acts):
if self._random_grads_stddev is not None:
default_grads = tf.random.normal(
shape=(B, 1, N),
stddev=self._random_grads_stddev
)
elif self._use_nearest_grads:
# TODO: can look at other batch instances too
# which would create a (B, B, T, N) diff
# TODO: can limit the time window that we look back
# (B, T, N)
diff = tf.math.abs(cur_acts[:, tf.newaxis] - past_acts)
# (B, N)
closest_idx = tf.math.argmin(diff, axis=1)
# (B * N, 1)
closest_idx = tf.reshape(closest_idx, (B * N, 1))
idx = tf.range(B * N)
# (B * N, 2)
closest_idx = tf.concat(
[closest_idx, idx[..., tf.newaxis]], axis=-1
)
# (T, B, N)
pg = tf.transpose(past_grads, (1, 0, 2))
# (T, B * N)
pg = tf.reshape(pg, (T, B * N))
# (B * N)
closest_grads = tf.gather_nd(pg, closest_idx)
default_grads = tf.reshape(closest_grads, (B, 1, N))
else:
default_grads = tf.zeros((B, 1, N), dtype=tf.float32)
return default_grads
def train_callback(self):
self._input_mlp.train_callback()
if self._fwd_output_mlp is not None:
self._fwd_output_mlp.train_callback()
if self._bwd_output_mlp is not None:
self._bwd_output_mlp.train_callback()
def _safe_normalize(tensor, axis, eps=1e-8):
tensor, norm = tf.linalg.normalize(tensor + eps, axis=axis)
return tensor, norm
| 32,457 | 11,427 |
import pkg_resources
def get_example_eplus_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/json/eplus_params.json')
def get_example_batch_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/json/batch_params.json')
def get_example_htgsetp_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/json/htgsetp_params.json')
def get_example_htgsetp_params_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/electric/htgsetp_params_electric.csv')
def get_example_clgsetp_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/json/clgsetp_params.json')
def get_example_summary_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/electric/summary.zip')
def get_example_idd_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/eplus/Energy+.idd')
def get_example_electric_idf_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/idf/electric.idf')
def get_example_gas_idf_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/idf/gas.idf')
def get_example_main_idf_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/idf/main.idf')
| 1,920 | 595 |
import os
import pathlib
import re
import typing
def fmt_seconds(time_in_sec: int, units: str='auto', round_digits: int=4) -> dict:
"""
Format time in seconds to a custom string. `units` parameter can be
one of 'auto', 'seconds', 'minutes', 'hours' or 'days'.
"""
if units == 'auto':
if time_in_sec < 60:
time_diff = round(time_in_sec, round_digits)
time_measure = 'seconds'
elif time_in_sec >= 60 and time_in_sec < 3600:
time_diff = round(time_in_sec/60, round_digits)
time_measure = 'minutes'
elif time_in_sec >= 3600 and time_in_sec < 86400:
time_diff = round(time_in_sec/3600, round_digits)
time_measure = 'hours'
else:
time_diff = round(time_in_sec/86400, round_digits)
time_measure = 'days'
elif units in ['seconds', 'minutes', 'hours', 'days']:
time_measure = units
if units == 'seconds':
time_diff = round(time_in_sec, round_digits)
elif units == 'minutes':
time_diff = round(time_in_sec/60, round_digits)
elif units == 'hours':
time_diff = round(time_in_sec/3600, round_digits)
else:
# Days
time_diff = round(time_in_sec/86400, round_digits)
return dict(zip(['units', 'value'], [time_measure, time_diff]))
def human_filesize(nbytes: int) -> str:
"""
Convert number of bytes to human-readable filesize string.
Source: https://stackoverflow.com/questions/5194057/better-way-to-convert-file-sizes-in-python
"""
base = 1
for unit in ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']:
n = nbytes / base
if n < 9.95 and unit != 'B':
# Less than 10 then keep 1 decimal place
value = '{:.1f} {}'.format(n, unit)
return value
if round(n) < 1000:
# Less than 4 digits so use this
value = f'{round(n)} {unit}'
return value
base *= 1024
value = f'{round(n)} {unit}'
return value
def strip_ws(string: str):
"""
Strip whitespace off a string and replace all instances of >1 space with a single space.
"""
return re.sub(r'\s+', ' ', string.strip())
def ensurelist(val: typing.Any) -> list:
"""
Accept a string or list and ensure that it is formatted as a list. If `val` is not a list,
return [val]. If `val` is already a list, return as is.
"""
return [val] if not isinstance(val, list) else val
def listfiles(path: typing.Union[str, pathlib.Path]='.',
ext=None,
pattern=None,
ignore_case=True,
full_names=False,
recursive=False,
include_hidden=True) -> list:
"""
List files in a given directory.
path (str): absolute path to search for files in
ext (str): optional file extension or list of extensions to filter resulting files by
pattern (str): optional filter resulting files by matching regex pattern
ignore_case (bool): do not consider case in when filtering for `pattern` parameter
full_names (bool): return absolute filepaths
recursive (bool): search recursively down the directory tree
include_hidden (bool): include hidden files in resulting file list
"""
owd = os.getcwd()
os.chdir(path)
if recursive:
fpaths = []
for root, dpaths, filenames in os.walk('.'):
for f in filenames:
fpaths.append(os.path.join(root, f).replace('./', ''))
else:
fpaths = [f for f in os.listdir() if os.path.isfile(f)]
if not include_hidden:
fpaths = [f for f in fpaths if not os.path.basename(f).startswith('.')]
if pattern is not None:
if ignore_case:
fpaths = [f for f in fpaths if re.search(pattern, f, re.IGNORECASE)]
else:
fpaths = [f for f in fpaths if re.search(pattern, f)]
if ext:
ext = [x.lower() for x in ensurelist(ext)]
ext = ['.' + x if not x.startswith('.') else x for x in ext]
fpaths = [x for x in fpaths if os.path.splitext(x)[1].lower() in ext]
if full_names:
path_expand = os.getcwd() if path == '.' else path
fpaths = [os.path.join(path_expand, f) for f in fpaths]
os.chdir(owd)
return fpaths
def duplicated(lst: list) -> list:
"""
Return list of boolean values indicating whether each item in a list is a duplicate of
a previous item in the list. Order matters!
"""
dup_ind = []
for i, item in enumerate(lst):
tmplist = lst.copy()
del tmplist[i]
if item in tmplist:
# Test if this is the first occurrence of this item in the list. If so, do not
# count as duplicate, as the first item in a set of identical items should not
# be counted as a duplicate
first_idx = min(
[i for i, x in enumerate(tmplist) if x == item])
if i != first_idx:
dup_ind.append(True)
else:
dup_ind.append(False)
else:
dup_ind.append(False)
return dup_ind | 5,193 | 1,651 |
# Copyright (c) 2020-2021 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
from .base_component import *
from .branch_models import *
from .branch_w_internals_models import *
from .branch_wo_internals_models import *
from .branch_wzerolength_models import *
from .node_element_models import *
from .node_models import *
from .const_flow_models import *
from .circulation_pump import *
| 575 | 177 |
from pathlib import Path
from ..base_classes.base_sprite_loader import BaseSpriteLoader
class Big1(BaseSpriteLoader):
_sprite_sheet_path = Path("Data/Textures/monstersbig01.png")
_chunk_size = 128
_chunk_map = {
"cavemanboss": (0, 0, 2, 2),
"giantspider": (0, 10, 2, 12),
"queenbee": (0, 14, 2, 16),
}
class Big2(BaseSpriteLoader):
_sprite_sheet_path = Path("Data/Textures/monstersbig02.png")
_chunk_size = 128
_chunk_map = {
"mummy": (0, 0, 2, 2),
"anubis": (4, 8, 6, 11),
"anubis2": (2, 8, 4, 10),
}
class Big3(BaseSpriteLoader):
_sprite_sheet_path = Path("Data/Textures/monstersbig03.png")
_chunk_size = 128
_chunk_map = {
"lamassu": (0, 0, 2, 2),
"yeti_king": (0, 4, 2, 6),
"yeti_queen": (0, 10, 2, 12),
}
class Big4(BaseSpriteLoader):
_sprite_sheet_path = Path("Data/Textures/monstersbig04.png")
_chunk_size = 128
_chunk_map = {
"crabman": (0, 0, 2, 2),
"lavamander": (10, 4, 12, 6),
"giantfly": (0, 12, 2, 14),
}
class Big5(BaseSpriteLoader):
_sprite_sheet_path = Path("Data/Textures/monstersbig05.png")
_chunk_size = 128
_chunk_map = {
"ammit": (0, 4, 2, 5),
"apep": (8, 0, 10, 2),
"madametusk": (0, 8, 2, 10),
"giant_frog": (0, 13, 3, 16),
"minister": (0, 10, 1, 13),
}
class Big6(BaseSpriteLoader):
_sprite_sheet_path = Path("Data/Textures/monstersbig06.png")
_chunk_size = 128
_chunk_map = {
"kingu": (0, 0, 5, 6),
"waddler": (0, 12, 2, 14),
"humphead": (0, 14, 4, 16),
}
| 1,651 | 794 |
# Source of values.txt: 'https://pastebin.com/api/'
values = []
with open('values.txt', 'r') as myfile:
data = myfile.read()
data = data.split("\n")
for d in data:
result = d.split(" = ")
values.append(result[0].replace(" ", ""))
# rust_formats.txt is the list of the Enum present in src/paster/format.rs
with open('rust_formats.txt', 'r') as myfile:
data = myfile.read()
data = data.replace("\n", "").replace(" ", "")
data = data.split(",")
i = 0
for d in data:
print("&Format::" + d + " => \"" + values[i] + "\",")
i += 1
| 590 | 211 |
import regex as re
from math import ceil
from typing import List
from ByteReader import Reader, SeekOrigin as so
from DataTypes import int_32
class rpg_file:
offset: int_32 = int_32(0)
size: int_32 = int_32(0)
key: int_32 = int_32(0)
name: str
def decrypt_name(data: bytes, key: int|int_32) -> str:
if type(key) != int_32: key = int_32(key)
key.to_unsigned()
decrypted_name: bytes = b""
key_bytes = key.to_bytes()
j = 0
for i in range(len(data)):
if j == 4:
j = 0
decrypted_name += int_32(data[i] ^ (key_bytes[j] if j < len(key_bytes) else 0)).to_bytes()
j += 1
return decrypted_name.decode("utf-8")
def read_archive(file_path: str, match_str: str = None) -> List[rpg_file]:
reader: Reader = Reader(file_path)
reader.seek(8, so.Begin)
key = reader.read_int32().to_unsigned()
key *= 9
key += 3
files: List[rpg_file] = []
while(1):
file = rpg_file()
file.offset = int_32(reader.read_int32() ^ key)
file.size = int_32(reader.read_int32() ^ key)
file.key = int_32(reader.read_int32() ^ key).to_unsigned()
length = int_32(reader.read_int32() ^ key)
if file.offset < 0 or reader._p + length >= len(reader._data): break
try:
file.name = decrypt_name(reader.read_bytes(length), key).replace("\\", "/")
if match_str is not None and not re.match(match_str, file.name, flags=re.IGNORECASE): continue
files.append(file)
except Exception as e:
print('skipping: ' + str(e))
break
return files
def decrypt(files: List[rpg_file], file_location: str, save_location: str) -> None:
reader = Reader(file_location)
for file in files:
file_name = file.name.split("/")[-1:][0]
reader.seek(file.offset, so.Begin)
data: bytes = reader.read_bytes(file.size)
decrypted_file = b""
key = file.key.to_unsigned()
key_bytes = key.to_bytes() + b'\x00\x00\x00\x00'
j = 0
for i in range(len(data)):
if j == 4:
j = 0
key *= 7
key += 3
key_bytes = key.to_bytes() + b'\x00\x00\x00\x00'
result = data[i] ^ key_bytes[j]
decrypted_file += result.to_bytes(1, 'little')
j += 1
open(f"{save_location.rstrip('/')}/{file_name}", "wb").write(decrypted_file) | 2,545 | 967 |
from __future__ import annotations
import logging
from typing import Any, Dict, Mapping, Optional, Union
import numpy as np
import torch
from transformers import PreTrainedTokenizer
from .assembler import Assembler
from .qa_dataset import QADataset
from .rank_dataset import RankGroupDataset
from .tsv_dataset import TsvCollection
from .utils import (
make_targets_mlm_inputs,
make_targets_ntp_inputs,
mask_difference,
mask_whole_word,
)
log = logging.getLogger(__name__)
class MtlSepDataset(RankGroupDataset):
def __init__(
self,
array: Mapping[str, np.ndarray],
tokenizer: PreTrainedTokenizer,
query_col: TsvCollection,
doc_col: TsvCollection,
num_dup: int,
num_neg: int,
decoder_start_token_id: int,
src_max_length: int,
tgt_max_length: int,
sample: Optional[Union[float, int]] = None,
sort: Optional[str] = None,
max_length: Optional[int] = None,
summarizer_prefix_token_ids: Optional[str] = None,
rank_prefix_token_ids: Optional[str] = None,
pad_to_max_length: bool = True,
**kwargs: Any,
):
if kwargs:
log.warning(f"Unused parameters: {kwargs}")
super().__init__(
array,
tokenizer,
query_col,
doc_col,
num_dup,
num_neg,
sample,
sort,
max_length,
summarizer_prefix_token_ids,
pad_to_max_length,
)
self._pas_pad = tokenizer.pad_token_id
self._sum_assembler = Assembler(
tokenizer=tokenizer,
max_length=src_max_length,
prefix_token_ids=summarizer_prefix_token_ids,
pad_to_max_length=pad_to_max_length,
)
self._rank_assembler = Assembler(
tokenizer=tokenizer,
max_length=src_max_length,
prefix_token_ids=rank_prefix_token_ids,
pad_to_max_length=pad_to_max_length,
)
decoder_start_token = tokenizer.decode(decoder_start_token_id)
self._decoder_assembler = Assembler(
tokenizer=tokenizer,
max_length=tgt_max_length,
prefix_token_ids=decoder_start_token,
pad_to_max_length=False,
add_special_tokens=False,
return_token_type_ids=None,
)
self._label_assembler = Assembler(
tokenizer=tokenizer,
max_length=tgt_max_length,
suffix_token_ids=tokenizer.eos_token,
pad_to_max_length=False,
add_special_tokens=False,
return_token_type_ids=None,
)
def __getitem__(self, index: int) -> Dict[str, torch.Tensor]:
qid = self._array["qid"][index]
did = self._array["did"][index]
label = self._array["label"][index]
assert qid.shape == (self._num_neg + 1,)
assert did.shape == (self._num_neg + 1,)
assert label.shape == (self._num_neg + 1,)
queries = [self._query_col[x] for x in qid]
passages = [self._doc_col[x] for x in did]
sum_inputs = self._sum_assembler.batch_assemble(passages)
sum_decoder_inputs = self._decoder_assembler.batch_assemble(queries)
lm_labels = self._label_assembler.batch_assemble(queries)
lm_labels["input_ids"].masked_fill_(~lm_labels["attention_mask"].bool(), -100)
rank_inputs = self._rank_assembler.batch_assemble(passages)
rank_decoder_inputs = self._decoder_assembler.batch_assemble(queries)
item: Dict[str, Any] = {
"qids": torch.tensor([int(x) for x in qid]),
"dnos": torch.tensor([int(x) for x in did]),
"sum_input_ids": sum_inputs["input_ids"],
"sum_attention_mask": sum_inputs["attention_mask"],
"sum_decoder_input_ids": sum_decoder_inputs["input_ids"],
"sum_decoder_attention_mask": sum_decoder_inputs["attention_mask"],
"rank_input_ids": rank_inputs["input_ids"],
"rank_attention_mask": rank_inputs["attention_mask"],
"rank_decoder_input_ids": rank_decoder_inputs["input_ids"],
"rank_decoder_attention_mask": rank_decoder_inputs["attention_mask"],
"lm_labels": lm_labels["input_ids"],
}
assert item["sum_input_ids"].dim() == 2
assert item["sum_attention_mask"].dim() == 2
assert item["sum_decoder_input_ids"].dim() == 2
assert item["sum_decoder_attention_mask"].dim() == 2
assert item["rank_input_ids"].dim() == 2
assert item["rank_attention_mask"].dim() == 2
assert item["rank_decoder_input_ids"].dim() == 2
assert item["rank_decoder_attention_mask"].dim() == 2
assert item["lm_labels"].dim() == 2
return item
class MtlMixedDataset(RankGroupDataset):
def __init__(
self,
array: Mapping[str, np.ndarray],
tokenizer: PreTrainedTokenizer,
query_col: TsvCollection,
doc_col: TsvCollection,
num_dup: int,
num_neg: int,
src_max_length: int,
sample: Optional[Union[float, int]] = None,
sort: Optional[str] = None,
max_length: Optional[int] = None,
summarizer_prefix_token_ids: Optional[str] = None,
rank_prefix_token_ids: Optional[str] = None,
pad_to_max_length: bool = True,
qa_data: Optional[Union[QADataset, str]] = None,
qa_prefix: str = "",
mask_whole_word_prob: float = 0.0,
mask_qgen_query: bool = False,
mask_query_from_passage: float = 0.0,
min_rel_for_qgen: int = 1,
**kwargs: Any,
):
if kwargs:
log.warning(f"Unused params {kwargs}")
super(MtlMixedDataset, self).__init__(
array,
tokenizer,
query_col,
doc_col,
num_dup,
num_neg,
sample,
sort,
max_length,
summarizer_prefix_token_ids,
pad_to_max_length,
)
self._pas_pad = tokenizer.pad_token_id
self._tokenizer = tokenizer
self._mask_whole_word_prob = mask_whole_word_prob
self._mask_query_from_passage = mask_query_from_passage
self._mask_qgen_query = mask_qgen_query
self._min_rel_for_qgen = min_rel_for_qgen
self._sum_assembler = Assembler(
tokenizer=tokenizer,
max_length=src_max_length,
prefix_token_ids=summarizer_prefix_token_ids,
pad_to_max_length=pad_to_max_length,
)
self._rank_assembler = Assembler(
tokenizer=tokenizer,
max_length=src_max_length,
prefix_token_ids=rank_prefix_token_ids,
pad_to_max_length=pad_to_max_length,
)
if qa_data is not None:
if isinstance(qa_data, str):
self._qa = QADataset(
path=qa_data,
tokenizer=tokenizer,
max_length=max_length,
prefix=qa_prefix,
)
else:
self._qa = qa_data
def __getitem__(self, index: int) -> Dict[str, Any]:
qid = self._array["qid"][index]
did = self._array["did"][index]
label = self._array["label"][index]
assert qid.shape == (self._num_neg + 1,)
assert did.shape == (self._num_neg + 1,)
assert label.shape == (self._num_neg + 1,)
if label[0] < self._min_rel_for_qgen:
idx = (self._array["label"][:, 0] >= self._min_rel_for_qgen).nonzero()[0]
sample = np.random.choice(idx)
qgen_queries = [self._query_col[x] for x in self._array["qid"][sample]]
passages = [self._doc_col[x] for x in self._array["did"][sample]]
qgen_passages = [self._doc_col[x] for x in self._array["did"][sample]]
sum_input_weights = torch.tensor(
self._array["label"][sample][:1], dtype=torch.float
)
else:
qgen_queries = [self._query_col[x] for x in qid]
passages = [self._doc_col[x] for x in did]
qgen_passages = [self._doc_col[x] for x in did]
sum_input_weights = torch.tensor(label[:1], dtype=torch.float)
if self._mask_query_from_passage > 0.0:
qgen_passages = [
mask_difference(self._tokenizer, x, y, self._mask_query_from_passage)
for x, y in zip(qgen_passages, qgen_queries)
]
if self._mask_whole_word_prob > 0:
qgen_passages = [
mask_whole_word(self._tokenizer, x, self._mask_whole_word_prob)
for x in qgen_passages
]
if self._mask_qgen_query:
sum_inputs = make_targets_mlm_inputs(
self._assembler,
self._tokenizer,
passages[:1],
qgen_queries[:1],
qgen_passages[:1],
)
else:
sum_inputs = make_targets_ntp_inputs(
self._assembler,
self._tokenizer,
passages[:1],
qgen_queries[:1],
qgen_passages[:1],
)
rank_queries = [self._query_col[x] for x in qid]
rank_passages = [self._doc_col[x] for x in did]
rank_inputs = self._rank_assembler.batch_assemble(rank_passages, rank_queries)
item: Dict[str, Any] = {
"qids": torch.tensor([int(x) for x in qid]),
"dnos": torch.tensor([int(x) for x in did]),
"sum_input_ids": sum_inputs["input_ids"],
"sum_token_type_ids": sum_inputs["token_type_ids"],
"sum_attention_mask": sum_inputs["attention_mask"],
"sum_input_weights": sum_input_weights,
"rank_input_ids": rank_inputs["input_ids"],
"rank_token_type_ids": rank_inputs["token_type_ids"],
"rank_attention_mask": rank_inputs["attention_mask"],
"lm_labels": sum_inputs["lm_labels"],
}
assert item["sum_input_ids"].dim() == 2
if self._mask_qgen_query:
assert item["sum_attention_mask"].dim() == 2
else:
assert item["sum_attention_mask"].dim() == 3
assert item["sum_token_type_ids"].dim() == 2
assert item["sum_input_weights"].dim() == 1
assert item["rank_input_ids"].dim() == 2
assert item["rank_token_type_ids"].dim() == 2
assert item["rank_attention_mask"].dim() == 2
assert item["lm_labels"].dim() == 2
if hasattr(self, "_qa"):
pos_qid = qid[0]
qa_inputs = {f"qa_{k}": v for k, v in self._qa.by_qid(pos_qid).items()}
item.update(qa_inputs)
return item
class MtlCatDataset(RankGroupDataset):
def __init__(
self,
array: Mapping[str, np.ndarray],
tokenizer: PreTrainedTokenizer,
query_col: TsvCollection,
doc_col: TsvCollection,
num_dup: int,
num_neg: int,
src_max_length: int,
tgt_max_length: int,
decoder_start_token_id: int,
sample: Optional[Union[float, int]] = None,
sort: Optional[str] = None,
max_length: Optional[int] = None,
summarizer_prefix_token_ids: Optional[str] = None,
rank_prefix_token_ids: Optional[str] = None,
pad_to_max_length: bool = True,
**kwargs: Any,
):
if kwargs:
log.warning(f"Unused params {kwargs}")
super().__init__(
array,
tokenizer,
query_col,
doc_col,
num_dup,
num_neg,
sample,
sort,
max_length,
summarizer_prefix_token_ids,
pad_to_max_length,
)
self._pas_pad = tokenizer.pad_token_id
self._tokenizer = tokenizer
self._sum_assembler = Assembler(
tokenizer=tokenizer,
max_length=src_max_length,
prefix_token_ids=summarizer_prefix_token_ids,
pad_to_max_length=pad_to_max_length,
)
self._rank_assembler = Assembler(
tokenizer=tokenizer,
max_length=src_max_length,
prefix_token_ids=rank_prefix_token_ids,
pad_to_max_length=pad_to_max_length,
)
decoder_start_token = tokenizer.decode(decoder_start_token_id)
self._decoder_assembler = Assembler(
tokenizer=tokenizer,
max_length=tgt_max_length,
prefix_token_ids=decoder_start_token,
pad_to_max_length=False,
add_special_tokens=False,
return_token_type_ids=None,
)
self._label_assembler = Assembler(
tokenizer=tokenizer,
max_length=tgt_max_length,
suffix_token_ids=tokenizer.eos_token,
pad_to_max_length=False,
add_special_tokens=False,
return_token_type_ids=None,
)
def __getitem__(self, index: int) -> Dict[str, Any]:
qid = self._array["qid"][index]
did = self._array["did"][index]
label = self._array["label"][index]
assert qid.shape == (self._num_neg + 1,)
assert did.shape == (self._num_neg + 1,)
assert label.shape == (self._num_neg + 1,)
queries = [self._query_col[x] for x in qid]
passages = [self._doc_col[x] for x in did]
sum_inputs = self._sum_assembler.batch_assemble(passages)
sum_decoder_inputs = self._decoder_assembler.batch_assemble(queries)
lm_labels = self._label_assembler.batch_assemble(queries)
lm_labels["input_ids"].masked_fill_(~lm_labels["attention_mask"].bool(), -100)
rank_passages = [self._doc_col[x] for x in did]
rank_inputs = self._rank_assembler.batch_assemble(rank_passages, queries)
item: Dict[str, Any] = {
"qids": torch.tensor([int(x) for x in qid]),
"dnos": torch.tensor([int(x) for x in did]),
"sum_input_ids": sum_inputs["input_ids"],
"sum_attention_mask": sum_inputs["attention_mask"],
"sum_decoder_input_ids": sum_decoder_inputs["input_ids"],
"sum_decoder_attention_mask": sum_decoder_inputs["attention_mask"],
"rank_input_ids": rank_inputs["input_ids"],
"rank_attention_mask": rank_inputs["attention_mask"],
"lm_labels": lm_labels["input_ids"],
}
assert item["sum_input_ids"].dim() == 2
assert item["sum_attention_mask"].dim() == 2
assert item["sum_decoder_input_ids"].dim() == 2
assert item["sum_decoder_attention_mask"].dim() == 2
assert item["rank_input_ids"].dim() == 2
assert item["rank_attention_mask"].dim() == 2
assert item["lm_labels"].dim() == 2
return item
| 14,919 | 4,856 |
import time
import datetime
from dateutil.parser import parse
class JobStatus:
def __init__(self, queue_id, status, walltime, number_nodes, submit_time, start_time, end_time):
self.queue_id=queue_id
self.status=status
self.walltime=walltime
self.number_nodes=number_nodes
self.submit_time="-" if submit_time == "Unknown" else submit_time
self.start_time="-" if start_time == "Unknown" else start_time
self.end_time="-" if end_time == "Unknown" else end_time
def getQueueId(self):
return self.queue_id
def getStatus(self):
return self.status
def getWalltime(self):
return self.walltime
def getNumberNodes(self):
return self.number_nodes
def getQueueTime(self):
if self.submit_time=="-" or self.start_time == "-":
return "-"
submit=time.mktime(parse(self.submit_time).timetuple())
start=time.mktime(parse(self.start_time).timetuple())
return str(start-submit)
def getRunTime(self):
if self.start_time == "-" or self.end_time == "-":
return "-"
start=time.mktime(parse(self.start_time).timetuple())
end=time.mktime(parse(self.end_time).timetuple())
return str(end-start)
def toString(self):
return self.queue_id+" "+self.status+" "+self.walltime+" "+self.number_nodes+" "+self.getQueueTime() +" " +self.getRunTime()
| 1,446 | 458 |
#!/usr/bin/env python3
# -*- coding utf-8 -*-
__Author__='eamon'
'threading multithreading '
import time,threading
def loop():
print('thread %s is running ...' % threading.current_thread().name)
n=0
while n <5:
n+=1
print('thread %s >> %s ' %(threading.current_thread().name,n))
time.sleep(1)
print('thread %s ended.' % threading.current_thread().name)
def testThread():
print('thread %s is running..' % threading.current_thread().name)
t=threading.Thread(target=loop,name='LoopThread')
t.start()
t.join()
print('thread % s ended.' % threading.current_thread().name)
# testThread()
balance =0
def change_it(n):
global balance
balance =balance +n
balance =balance -n
lock = threading.Lock()
def run_thread(n):
for i in range(100000):
lock.acquire()
try:
change_it(n)
finally:
lock.release()
def testMultiThreadDanger():
t1= threading.Thread(target=run_thread,args=(5,))
t2= threading.Thread(target=run_thread,args=(8,))
t1.start()
t2.start()
t1.join()
t2.join()
print(balance)
# testMultiThreadDanger()
import threading,multiprocessing
def loop():
x=0
while True:
x=x^1
def testRunfullCPU():
print('cpu num:',multiprocessing.cpu_count())
for i in range(multiprocessing.cpu_count()):
t=threading.Thread(target=loop)
t.start()
| 1,308 | 546 |
#!/usr/bin/env python
import argparse
import sys
import lcm_adapter as lcm
if len(sys.argv) < 2:
print """The first argument to this command must be the directory
of the generated python files for LCM messages."""
sys.exit(1)
MESSAGE_CLASSES = lcm.get_all_lcm_message_classes(sys.argv[1])
def set_up_subscriptions(lcm_connection, args):
"""Sets up all of the LCMSubscription objects for the channels indicated in
the arguments."""
if args.format == "lcm":
lcm_logger = lcm.EventLog(args.logfile, mode='w', overwrite=True)
else:
# Deliberately leaked; we'll let the GC handle the close() for us.
logfile = (sys.stdout if args.logfile is None
else open(args.logfile, 'w'))
write_csv_headers(logfile)
def handle_message(channel, message):
decoded = try_decode(message)
if decoded is None:
print "Received unreadable message on channel", channel
return
if args.format == "lcm":
lcm_logger.write_event(decoded.timestamp, channel, message)
elif args.format == "csv":
fields = ([channel, type(decoded).__name__] +
[getattr(decoded, slot)
for slot in sorted(decoded.__slots__)])
logfile.write(",".join(['"%s"' % f for f in fields]) + "\n")
elif args.format == "pretty":
lcm.debug_print_msg(decoded, logfile)
else:
assert False
for channel in args.channel:
print "subscribing to channel", channel
lcm_connection.subscribe(channel, handle_message)
def try_decode(message):
"""Try to decode the message with each known message class; return
the first successful decode, or None."""
for c in MESSAGE_CLASSES:
try:
return c.decode(message)
except ValueError:
pass # The message was probably of a different type.
return None
def write_csv_headers(logfile):
"""Write header lines in the CSV file with the schema of the messages
involved."""
for c in MESSAGE_CLASSES:
header_prefix = ["", c.__name__]
header_elements = sorted(c.__slots__)
logfile.write(",".join(
['"%s"' % h for h in (header_prefix + header_elements)]) + "\n")
def main(argv):
parser = argparse.ArgumentParser(description='Log some local LCM traffic.')
parser.add_argument(
'-l', '--logfile', metavar='LOGFILE', type=str, default=None,
help="File name for lcm log; default is stdout.")
parser.add_argument(
'-f', '--format', default='lcm', choices=('lcm', 'csv', 'pretty'),
help="Log format: 'lcm' (binary), 'csv', or 'pretty' (human-readable)")
parser.add_argument('channel', nargs="+", metavar='CHANNEL')
args = parser.parse_args(argv[2:])
lcm_connection = lcm.LCM()
set_up_subscriptions(lcm_connection, args)
while True:
lcm_connection.handle()
if __name__ == "__main__":
main(sys.argv)
| 3,024 | 925 |
def merge_sort(lst):
"""function to prvide a merge sort on the given list, calles recursively """
n = len(lst)
if n > 1:
mid = n//2
left = lst[: mid]
right = lst[mid:]
# sort the left side
merge_sort(left)
# sort the right side
merge_sort(right)
# merge the sorted left and right sides together
merge(left, right, lst)
def merge(left, right, lst):
"""function to merge left sublist and rightsublist to the list in proper order"""
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
lst[k] = left[i]
i += 1
else:
lst[k] = right[j]
j += 1
k += 1
if i == len(left):
for el in right[j:]:
lst[k] = el
k += 1
else:
for el in left[i:]:
lst[k] = el
k +=1
| 941 | 317 |
from django.db import models
# Create your models here.
class Teacher(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField()
org_name = models.CharField(max_length=255)
password = models.CharField(max_length=255)
class Student(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField()
class_code = models.CharField(max_length=255)
password = models.CharField(max_length=255)
| 572 | 192 |
"""
Zero Matrix:
Write an algorithm such that
if an element in an MxN matrix is 0,
its entire row and column are set to O.
Clarifying Questions and Assumptions:
- so we have a rectangular matrix? yes
- just integers? yes
- and what are the inputs to the function?
- are we given the indicies of a single element ---> use a helper function
- or are we given the entire matrix, and expected to do this
over the whole matrix? yes
- is the input mutable? no ---> otherwise it'll be ambiguous about
- which rows and cols to "zeroify" as the function goes on
- are we guaranteed to have at least 1 row with at least 1 element? no
- are we allowed to use NumPy? no, you don't really need to
- what is the return value --> a matrix?
Intuition:
- traverse the 2D matrix
Approach Ideas:
test input =
[
[0, 5 ,6, 7, 3, 1, -5],
[8, 8, 0, 6, 0, 2, 4],
[5, 0, 3, 6, 7, 3, -3]
]
====>
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]
---------------------------
[
[0, 5 ,6, 7, 3, 1, -5],
[8, 8, 0, 6, 0, 2, 4],
[5, 5, 3, 6, 7, 3, -3]
]
rows = 0, 1
cols = 0, 2, 3
====>
[
[0, 5 ,6, 7, 3, 1, -5],
[8, 8, 0, 6, 0, 2, 4],
[5, 5, 3, 6, 7, 3, -3]
]
zeroes = [
(0, 0), (1, 2), (1, 4)
]
1. Brute Force - Start with 0's, Try to Keep Elements
- make a MxN matrix of all zeroes
- check rows
- if the corresponding row in the input contains a 0,
leave the output as is
- otherwise, copy over the row
2. Brute Force idea 2 --> can be in-place or out of place
- record locations of all the 0s
- iterate back over the array
- if we hit one of those locations from before,
"zeroify" that row and column
- return the output
Edge Cases:
- empty array (check for that)
"""
from typing import List
def find_zeroes(matrix):
rows, cols = set(), set()
for row_ndx, row in enumerate(matrix):
for col_ndx, element in enumerate(row):
# only add the location if it's in a unique row and column
if element == 0:
if row_ndx not in rows:
rows.add(row_ndx)
if col_ndx not in cols:
cols.add(col_ndx)
return rows, cols
def zeroify_row(matrix, zero_row_ndx):
# zeroify the matrix row
for col_ndx in range(len(matrix[zero_row_ndx])):
matrix[zero_row_ndx][col_ndx] = 0
def zeroify_col(matrix, zero_col_ndx):
# zeroify the matrix column
for row_ndx in range(len(matrix)):
matrix[row_ndx][zero_col_ndx] = 0
def zero_matrix(matrix: List[List[int]]) -> List[List[int]]:
"""
Input:
[
0 1 2 3 4 5 6
> 0 [0, 0 ,0, 0, 0, 0, 0],
> 1 [0, 8, 0, 6, 0, 2, 4],
> 2 [0, 5, 3, 6, 7, 3, -3]
]
locations = [
(0, 0), (1, 2), (1, 4),
]
ROW_LENGTH = 7
rndx row cndx e
0 [0, 5 ,6, 7, 3, 1, -5], 0 0
1 5
2 6
3 7
4 3
5 1
6 -5
zrndx cndx zcndx
0 0 0
1
2
3
4
5
6
Big O:
Time: O(MxN)
Space: O(M + N)
Improvements:
- remember the rows and cols we've already marked for zeroifying:
TODO: ---> IN PLACE -->
1. First pass: edit the cols in top row to be zero, if they contain zero
edit the row vals in left col to be zero, if their rows
contain zero
2. Second pass: just check the top row and left col
top row: zeroify the col
left col: zeroify the row
"""
# - record locations of all the rows and cols to zeroify
rows, cols = find_zeroes(matrix) # MxN iterations
# "zeroify" the rows
for row_ndx in rows: # M
zeroify_row(matrix, row_ndx) # N
# "zeroify" the columns
if len(cols) < len(matrix):
for col_ndx in cols: # N
zeroify_col(matrix, col_ndx) # M
# - return the output
return matrix
| 4,401 | 1,586 |
"""
batch_generator.py
"""
import os, random
import numpy as np
from PIL import Image
import tensorflow as tf
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.utils import to_categorical as tocat_fn
Image.LOAD_TRUNCATED_IMAGES = True
class BatchGenerator(tf.keras.utils.Sequence):
def __init__(self, data_list, label_list, batch_size, image_size=(150, 150), aug_flag=False):
self.data_list = data_list
self.label_list = label_list
self.batch_size = batch_size
self.image_size = image_size
self.aug_flag = aug_flag
self.total_images = len(self.data_list)
self.indices = np.arange(self.total_images)
self.num_batches = int(np.ceil(self.total_images/self.batch_size))
#self.on_epoch_end()
def __len__(self):
""" iterations per epoch """
return self.num_batches
def on_epoch_end(self):
random.shuffle(self.indices)
def __getitem__(self, index):
""" return batch of (data, label) pairs """
batch_x, batch_y = [], []
batch_indices = self.indices[index*self.batch_size:min((index+1)*self.batch_size, self.total_images)]
for loop in batch_indices:
loaded_image = img_to_array((load_img(os.path.join(
self.data_list[loop]))).resize(self.image_size, Image.ANTIALIAS))
loaded_label = tocat_fn(self.label_list[loop], 100)
if self.aug_flag:
loaded_image = self._random_rotate(loaded_image)
batch_x.append(loaded_image)
batch_y.append(loaded_label)
return (np.asarray(batch_x, dtype=np.float32),
np.asarray(batch_y, dtype=np.uint8))
def _random_augment(self, image):
if np.random.uniform(-1, 1) > 0:
return self._random_rotate(image)
else:
return self._random_brightness_distort(image)
@staticmethod
def _random_rotate(image):
angle_multiplier = np.random.randint(3)
return np.rot90(image, angle_multiplier)
@staticmethod
def _random_brightness_distort(image):
noise_shift = np.random.normal(0., .05, image.shape)
noise_scale = np.random.normal(1., .01, image.shape)
return (image + noise_shift) * noise_scale
| 2,316 | 765 |
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='python-image',
version='0.1.0',
description='project for OCT',
long_description=readme,
author='Alexander Sovetsky',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
)
| 337 | 121 |
print("hi")
print("Hello World") #print #diff
print("hellooo") | 62 | 21 |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 15 19:25:58 2022
@author: User
"""
class person:
def __init__(self, name, phone_number, email_addres, addres):
self.name = name
self.phone_number = phone_number
self.email_addres = email_addres
self.addres = addres
def purchaseParkingPass():
pass
class student(person):
def __init__(self, name, phone_number, email_addres, student_number, average_mark,addres):
person.__init__(self, name, phone_number, email_addres, addres)
self.student_number = student_number
self.average_mark = average_mark
def isElegibleToEnroll():
pass
def getSeminarsTaken():
pass
class professor(person):
def __init__(self, name, phone_number, email_addres, salary, addres):
person.__init__(self, name, phone_number, email_addres, addres)
self.salary = salary
class addres:
def __init__(self, street, city, state, postal_code, country):
self.street = street
self.city = city
self.state = state
self.postal_code = postal_code
self.country = country
def validate():
print("validado")
def outputAsLabel():
pass
direc = addres("santa Fe", "La Paz", "021","Bolivia")
profe = professor("Guido", "60584523", "zallesguido@gmail.com", "8000 bs", direc)
| 1,481 | 491 |
import random
vowels = [
"a",
"au",
"o",
"e",
"i",
"u",
]
prefixes = [
"b",
"c",
"d",
"f",
"g",
"gh",
"h",
"k",
"l",
"m",
"n",
"p",
"qu",
"r",
"s",
"t",
"v",
"w",
"x",
"y",
"z"
]
suffixes = [
"b",
"c",
"cc",
"ck",
"d",
"dd",
"f",
"g",
"gh",
"h",
"i",
"k",
"l",
"ll",
"m",
"n",
"p",
"r",
"rr",
"s",
"t",
"tt",
"v",
"w",
"x",
"y",
"z"
]
def generate_name():
result = ""
length = random.randint(3, 15)
while len(result) < length:
result = result + random.choice(prefixes) + random.choice(vowels) + random.choice(suffixes)
return result
def proper_case(string):
return string[:1].upper() + string[1:]
| 663 | 398 |
from helper import reeting
if __name__ == "__main__":
reeting("hello")
| 84 | 32 |
import turtle
colors = ['green','blue','orange', 'red']
turtle.speed(900)
for i in range(99999999):
turtle.pencolor(colors[i%4])
turtle.bgcolor('black')
turtle.forward(i)
turtle.degrees()
turtle.right(70)
| 245 | 114 |
"""Utility functions."""
import numpy as np
def caffe_load_image(image_filename):
"""Load image using caffe.io.load_image.
This is to maintain shape expectation across the caffe library.
Args:
image_filename (str): String filename.
Returns:
numpy.ndarray: an image with the following properties:
shape: [Height, Width, Channels]
channel_order: RGB
scale: [0, 1]
dtype: np.float32
"""
import caffe
return caffe.io.load_image(image_filename, color=True)
def caffe_load_image_batch(image_filenames, batch_size=None):
"""Load image using caffe.io.load_image.
This is to maintain shape expectation across the caffe library.
Args:
image_filename (list of str): List of string filenames.
batch_size (int): If batch_size is None, then all filenames are read.
Otherwise only the first `batch_size` number of filenames are read.
Returns:
numpy.ndarray: an image with the following properties:
shape: [batch_size, Height, Width, Channels]
channel_order: RGB
scale: [0, 1]
dtype: np.float32
"""
if batch_size is None:
batch_size = len(image_filenames)
image_batch = [caffe_load_image(image_filename) for image_filename in image_filenames[:batch_size]]
image_batch = np.array(image_batch) # converting list into numpy array
return image_batch
# TODO (nitred): LRU cache
def get_caffe_transformer(net_input_shape, mean_bgr_255=None):
"""Transform a batch of images which were loaded by caffe.io.load_image.
Transformations:
- mean subtraction (if mean provided)
- transposes data to become [Channels x Height x Width]
- swaps channels to convert RGB to BGR
- scales the data to [0., 255.]
Args:
net_input_shape (numpy.ndarray): The expected 4-dimensional shape of the network.
The first dimension i.e. the batch_size doesn't really matter.
Usually the expected shape is [BATCH_SIZE, Height, Width, Channels]
mean_bgr_255 (numpy.ndarray): 1-dimensional array of means.
Channel order should be BGR and scale should be [0., 255.]
Returns:
caffe.io.Transformer: With all standard transformations set.
"""
import caffe
transformer = caffe.io.Transformer({'data': net_input_shape})
if mean_bgr_255 is not None:
transformer.set_mean('data', mean_bgr_255)
transformer.set_transpose('data', (2, 0, 1))
transformer.set_channel_swap('data', (2, 1, 0))
transformer.set_raw_scale('data', 255.0)
return transformer
def caffe_transform_batch(X, net_input_shape, mean_bgr_255=None):
"""Transform a batch of images which were loaded by caffe.io.load_image.
Transformations:
- mean subtraction (if mean provided)
- transposes data to become [Channels x Height x Width]
- swaps channels to convert RGB to BGR
- scales the data to [0., 255.]
Args:
X (numpy.ndarray): A batch of images of shape [BATCH_SIZE, Height, Width, RGB-Channels].
Can be obtained by using `caffe_utils.caffe_load_image`.
"""
transformer = get_caffe_transformer(net_input_shape, mean_bgr_255)
transformed_batch = np.array([transformer.preprocess('data', image) for image in X])
return transformed_batch
def caffe_load_network_with_input_batch(net, X, mean_bgr_255=None, net_input_blob_name='data'):
"""Load the network with the input batch `inplace`.
Args:
net (caffe.Network): The network to load the input batch.
X (numpy.ndarray): A batch of images of shape [BATCH_SIZE, Height, Width, RGB-Channels].
Can be obtained by using `caffe_utils.caffe_load_image`.
mean_bgr_255 (numpy.ndarray): 1-dimensional array of means.
Channel order should be BGR and scale should be [0., 255.]
net_input_blob_name (str): The input blob name of the network. Default blob name is "data".
Returns:
net: The network is loaded with input inplace but it's returned anyway.
"""
net_input_shape = net.blobs[net_input_blob_name].data.shape
transformed_batch = caffe_transform_batch(X, net_input_shape, mean_bgr_255)
net.blobs[net_input_blob_name].reshape(*transformed_batch.shape)
net.blobs[net_input_blob_name].data[...] = transformed_batch
return net
| 4,444 | 1,405 |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from threading import Lock
from time import time
class MsgId:
last_time = 0
offset = 0
lock = Lock()
def __new__(cls) -> int:
with cls.lock:
now = time()
cls.offset = cls.offset + 4 if now == cls.last_time else 0
msg_id = int(now * 2 ** 32) + cls.offset
cls.last_time = now
return msg_id
| 1,183 | 384 |
_base_ = './config_base/garbage_ddp.py'
optimizer = dict(lr=0.1)
| 66 | 32 |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#Fasion mnist=data of accesories like boats,dresses,bags etc
fashion_mnist=tf.keras.datasets.fashion_mnist
(train_images,train_labels),(test_images,test_labels)=fashion_mnist.load_data()
print(train_images.shape)
print(train_labels.shape)
print(test_images.shape)
print(test_labels.shape)
plt.imshow(train_images[4])
plt.show()
model=tf.keras.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=(28,28)))
model.add(tf.keras.layers.Dense(units=120,activation="relu"))
model.add(tf.keras.layers.Dense(units=10,activation="softmax"))
# model.compile(optimizer=tf.keras.optimizers.Adam(0.01),loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),metrics=["accuracy"])
# model.fit(train_images,train_labels,epochs=20,batch_size=500)
def cross_entropy(y_pred,y_true):
return tf.reduce_mean(tf.keras.losses.SparseCategoricalCrossentropy(y_true,y_pred))
def accuracy(y_pred,y_true):
correct_prediction=tf.equal(tf.cast(y_pred,tf.int64),tf.cast(y_true,tf.int64))
return tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
optimizer=tf.optimizers.Adam()
def train_step(x,y):
with tf.GradientTape() as tape:
pred=tf.argmax(model.predict(x),axis=1)
loss=cross_entropy(pred,y)
trainable_variables=model.trainable_variables
gradients=tape.gradient(loss,trainable_variables)
optimizer.apply_gradients(zip(gradients,trainable_variables))
return pred,loss
train_data=tf.data.Dataset.from_tensor_slices((train_images,train_labels))
train_data=train_data.repeat().shuffle(100).batch(32).prefetch(1)
for epoch in range(20):
for step,(batch_x,batch_y) in enumerate(train_data.take(train_images.shape[0]//32),1):
pred,loss=train_step(batch_x,batch_y)
acc=accuracy(pred,batch_y)
print(acc,loss)
| 1,967 | 801 |
##
# @file visualization.py
# @brief Python file for visualization of the testcase.
# Contains the driver code for reading the file and plotting it.
#
# @authors Kumar Pranjal 2018A7PS0163H
# @authors Ashna Swaika 2018A7PS0027H
# @authors Abhishek Bapna 2018A7PS0184H
# @authors Ashish Verma 2018A7PS0009H
# Importing required modules
from sys import argv
import matplotlib.pyplot as plt
import numpy as np
# Program starts here
if __name__ == '__main__':
fname = f'autotestcase/{argv[1]}'
fname2 = f'{fname}_line.txt'
X = []
Y = []
# Plotting the points
with open(fname, 'r', encoding='utf8') as f:
lines = f.readlines()
for line in lines:
x, y = list(map(float, line.split()))
X.append(x)
Y.append(y)
plt.plot(X, Y, '--.', color='red', linewidth=0.5)
# Plotting the partitions
with open(fname2, 'r', encoding='utf8') as f:
lines = f.readlines()
err_tot = 0
for line in lines:
cost, err, m, c, xmin, xmax = list(map(float, line.split()))
if m == float('inf'):
continue
x = np.linspace(xmin, xmax, 1000)
plt.plot(x, m*x+c, c=np.random.rand(3,), linewidth=3,label='y = %.3f x + %.3f' % (m, c))
err_tot += err
plt.legend()
# Displaying and saving the plot
plt.title('Cost : %.3f Error : %.3f'%(cost,err_tot))
plt.savefig(f'{fname}.png')
plt.show()
exit(0)
| 1,489 | 559 |
#! /usr/bin/env python
# This is to allow operators to disable a site and separately
# kill the process without un-disabling the site
import unittest
from dynamo_consistency import signaling
from dynamo_consistency import summary
from dynamo_consistency import main
from dynamo_consistency import picker
import base
class TestSignaling(base.TestSimple):
def test_signaling(self):
site = picker.pick_site()
main.main(site)
summary.unlock_site(site)
self.assertEqual(summary.get_status(site), summary.READY)
signaling.halt(2, 'dummy')
self.assertEqual(summary.get_status(site), summary.HALT)
summary.set_status(site, summary.DISABLED)
signaling.halt(2, 'dummy')
self.assertEqual(summary.get_status(site), summary.DISABLED)
if __name__ == '__main__':
unittest.main(argv=base.ARGS)
| 869 | 276 |
#!/usr/bin/env python
# This script is intended to be run from within the OSCar MIDI Sysex Grammar file
# OSCar Exclusive Data Format
#
# 2 MIDI bytes contain one byte of data
# first byte contains low 4 bit nibble
# second byte contains high 4 bit nibble
#
# 0 8 16
# +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
# |0|0|0|0|l|l|l|l| |0|0|0|0|h|h|h|h|
# +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
# \______/ \______/ \______/ \______/
# 4 zero 4 low 4 zero 4 high
def parseByteRange(element, byteView, bitPos, bitLength, results):
"""parseByteRange method"""
processedBytes = 0
initialBitLow = byteView.readUnsignedIntBits(bitPos, 1, ENDIAN_BIG)
if (initialBitLow == 0):
initialBitHigh = byteView.readUnsignedIntBits(bitPos+8, 1, ENDIAN_BIG)
if (initialBitHigh == 0):
# combine high and low nibbles from two bytes into one byte
low = byteView.readUnsignedIntBits(bitPos+4, 4, ENDIAN_BIG)
high = byteView.readUnsignedIntBits(bitPos+12, 4, ENDIAN_BIG)
result = (high << 4) | low;
# return value to results
value = Value()
value.setString(str(result))
results.addElement(element, 2, 0, value)
processedBytes = 2
return processedBytes
def fillByteRange(value, byteArray, bitPos, bitLength):
"""fillByteRange method"""
if (bitLength < 16):
print "Not enough space for OSCar Exclusive Data Format, 16 bits needed"
# get number edited by user
number = value.getUnsigned()
high, low = number >> 4, number & 0x0F
# verbose flag
verbose = False
# verbose info
if verbose:
print("Input value: " + str(number))
print("byteArray length: " + str(byteArray.getLength()))
print("bitPos: " + str(bitPos))
print("bitLength: " + str(bitLength))
# number in hex
numHex = str.format('0x{:02X}', int(str(number), 16))
print("Input value hex: " + str(numHex))
# number in binary
numBinary = '{0:08b}'.format(number)
print("Input value binary: " + str(numBinary))
# number high and low nibbles
print("Input value binary (low nibble): " + str('{0:04b}'.format(low)))
print("Input value binary (high nibble): " + str('{0:04b}'.format(high)))
if (number < 256):
byteArray.writeUnsignedIntBits(low, bitPos, 8, ENDIAN_BIG)
byteArray.writeUnsignedIntBits(high, bitPos+8, 8, ENDIAN_BIG)
else:
print("Input value out of range (0-255). Value not updated.")
| 2,359 | 950 |
# flake8: noqa
__version__ = "0.5.0"
| 38 | 22 |
"""Transformations to be used on tremor accelerometry data (e.g.: FFT)."""
from __future__ import annotations
from typing import Iterable
import numpy as np
import pandas as pd
from scipy.signal import periodogram
def fft_spectra(
input_dataframe: pd.DataFrame,
columns: Iterable[str] | None = None,
sampling_rate: int | float = 128,
norm=False,
):
"""Calculate the FFT of accelerometry data.
Parameters
----------
input_dataframe : pd.DataFrame
Dataframe containing accelerometry data.
columns : Iterable[str], optional
Columns co calculate the FFT for,
by default None which results in all columns to be used
sampling_rate : int, optional
Number of sample per second, by default 128
norm : bool, optional
Whether to normalize the the data to 1 or not, by default False
Returns
-------
pd.DataFrame
FFT spectra of the accelerometry data.
"""
n_samples = input_dataframe.shape[0]
freq = np.fft.fftfreq(n_samples, d=1 / sampling_rate)
fft_results = {}
if columns is None:
columns = input_dataframe.columns
for column in columns:
fft_vals = 2 / n_samples * np.abs(np.fft.fft(input_dataframe[column]))
if norm:
fft_vals /= fft_vals.max()
fft_results[column] = fft_vals
fft_df = pd.DataFrame(fft_results, index=freq)
return fft_df.iloc[freq >= 0, :]
def power_density_spectra(
input_dataframe: pd.DataFrame,
columns: Iterable[str] | None = None,
sampling_rate: int | float = 128,
norm=False,
):
"""Calculate the power density spectra of accelerometry data.
Compared to the FFT the resulting values are FFT[-freq]*FFT[freq] with freq>=0.
Parameters
----------
input_dataframe : pd.DataFrame
Dataframe containing accelerometry data.
columns : Iterable[str], optional
Columns co calculate the FFT for,
by default None which results in all columns to be used
sampling_rate : int, optional
Number of sample per second, by default 128
norm : bool, optional
Whether to normalize the the data to 1 or not, by default False
Returns
-------
pd.DataFrame
Power density spectra accelerometry data.
"""
pds_results = {}
if columns is None:
columns = input_dataframe.columns
for column in columns:
frequency, power_density = periodogram(input_dataframe[column], sampling_rate)
if norm:
power_density /= power_density.max()
else:
power_density *= 2 / (input_dataframe.shape[0] / sampling_rate)
pds_results[column] = power_density
return pd.DataFrame(pds_results, index=frequency)
| 2,744 | 832 |
""" API to grab text content from images ID's and pdf's.
Endpoints
---------
* GET /: root: shows api info to new users on run
* POST /: convert_pdf_to_image: converts a pdf doc to an image for processing
* POST /: passport: extracts target text based information from pasport
* POST /: image: extracts target text based information from jpg or png image
USAGE
-----
Run local:
run app.py in virtual env after installing the requirments files
You should then be able to navigate to localhost:5000 if you see message API if operational
"""
import os
import json
import logging
from flask import Flask, request, make_response, jsonify
from werkzeug.utils import secure_filename
from passporteye.mrz.image import MRZPipeline
from passporteye import read_mrz
from pdfUtil import pdf_to_png
try:
from PIL import Image
except ImportError:
import Image
import pytesseract
import cv2
import numpy as np
import re
from random import *
from flask_cors import CORS# CORS allows cross origin requests from web browsers
from extract_image_data import *
from nlp_ops import sentiment_analysis_score
from nlpbot import NLPBot
from scanner import scan_barcode_image
#new addtions
#%pip install easyocr
import easyocr
reader = easyocr.Reader(['es', 'en'], gpu=False)
# for running locally
#UPLOAD_FOLDER = 'uploads'
#EDIT_FOLDER = 'edit'
# for docker build
UPLOAD_FOLDER = '/uploads'
EDIT_FOLDER = '/edit'
MAXIMUM_IMAGE_ROTATIONS = 3
app = Flask(__name__)
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
#Endpoint Routes
@app.route('/')
def root():
"""Get and return root text response from API
Parameters
----------
None
Returns
-------
None
"""
return 'Welcome ! The endpoint for images is at <b>/passport</b>, <b>/image</b> or <b>/barcode</b> the key is imagefile , The EndPoint of pdfs is <b>pdf</b> and the key is pdf'
@app.route('/pdf', methods=['POST'])
def convert_pdf_to_image():
"""Post a pdf file for conversion to image format for data extraction
Parameters
----------
None
Returns
-------
png image converted from orginal pdf
"""
# Get PDF file from request and save to local directory
pdfFile = request.files.get('pdf', None)
if not pdfFile:
return make_response("Missing file parameter", 400)
filename = secure_filename(pdfFile.filename)
full_path = os.path.join(UPLOAD_FOLDER, filename)
pdfFile.save(full_path)
# Convert PDF file to image
png_path_array = pdf_to_png(full_path)
# Convert image to text
text_array = []
for png_path in png_path_array:
converted_text = image_to_string(png_path)
text_array.append(converted_text)
return jsonify(text_array)
@app.route('/passport', methods=['POST'])
def passport():
"""Post a passport image file for text data to be extracted
Parameters
----------
None
Returns
-------
json format - text data feilds extracted from the passport
"""
imagefile = request.files.get('imagefile', None)
if not imagefile:
return make_response("Missing file parameter", 400)
mrz, full_content = get_image_content(imagefile)
if mrz is None:
return make_response("Can not read image", 400)
mrz_data = mrz.to_dict()
all_infos = {}
all_infos['last_name'] = mrz_data['surname'].upper()
all_infos['first_name'] = mrz_data['names'].upper()
all_infos['country_code'] = mrz_data['country']
all_infos['country'] = get_country_name(all_infos['country_code'])
all_infos['nationality'] = get_country_name(mrz_data['nationality'])
all_infos['number'] = mrz_data['number']
all_infos['sex'] = mrz_data['sex']
# all_infos['full_text'] = full_content
valid_score = mrz_data['valid_score']
# Trying to extract full name
if all_infos['last_name'] in full_content:
splitted_fulltext = full_content.split("\n")
for w in splitted_fulltext:
if all_infos['last_name'] in w:
all_infos['last_name'] = w
continue
splitted_firstname = all_infos['first_name'].split(" ")
if splitted_firstname[0] in full_content:
splitted_fulltext = full_content.split("\n")
for w in splitted_fulltext:
if splitted_firstname[0] in w:
all_infos['first_name'] = clean_name(w)
continue
#clean out text
all_infos['last_name'] = all_infos['last_name'].replace('>','')
all_infos['last_name'] = all_infos['last_name'].replace('<','')
all_infos['last_name'] = all_infos['last_name'].replace('$','')
#fix sex if misidentified
s = all_infos['sex'].upper()
s = s.strip()
if(s != 'M' and s !='F'):
i = randint(0, 1)
if(i ==0):
s ='M'
else:
s='F'
all_infos['sex'] = s
return jsonify(all_infos)
@app.route('/image', methods=['POST'])
def image():
"""Post an image file for text data to be extracted
Parameters
----------
None
Returns
-------
json format - text data extracted from the image png or jpg
"""
imagefile = request.files.get('imagefile', None)
if not imagefile:
return make_response("Missing file parameter", 400)
filename = secure_filename(imagefile.filename)
full_path = os.path.join(UPLOAD_FOLDER, filename)
imagefile.save(full_path)
text = ''
try:
# Convert image to text
im = cv2.imread(full_path)
imC = clean_image(im)
text = pytesseract.image_to_string(imC, lang ='eng')
if text == "":
text = pytesseract.image_to_string(im, lang ='eng')
# logging.info('full image content = %s' %(full_content))
except:
text = 'Error : Can Not Read the current Image'
return jsonify(text)
@app.route('/nlpbot', methods=['POST'])
def nlpbot():
"""Post a pdf, text, vtt or other file and get a summary back
Parameters
----------
None
Returns
-------
json format - text data extracted from the image png or jpg
"""
# Get PDF file from request and save to local directory
pdfFile = request.files.get('pdf', None)
if not pdfFile:
return make_response("Missing file parameter", 400)
filename = secure_filename(pdfFile.filename)
full_path = os.path.join(UPLOAD_FOLDER, filename)
pdfFile.save(full_path)
nlpbot = NLPBot(infile_path=full_path)
nlpbot.summarize()
result = {"original_text": nlpbot.text, "summary_text": nlpbot.final_text}
return jsonify(result)
@app.route('/nlp_sa', methods=['POST'])
def nlp_sa():
"""Post a list of text and get sentiment analysis reports back on the data
Parameters
----------
None
Returns
-------
json format - text data and response report scores
"""
#extract from json responnse - {"words":["list of words"]}
data = request.json
words = data["words"]
result = sentiment_analysis_score(words)
return jsonify(result)
@app.route('/barcode', methods=['POST'])
def barcode():
"""Post a barcode image file for text data to be extracted
Parameters
----------
imagefile
Returns
-------
json format - text data extracted from the image png or jpg
"""
imagefile = request.files.get('imagefile', None)
if not imagefile:
return make_response("Missing file parameter", 400)
filename = secure_filename(imagefile.filename)
full_path = os.path.join(UPLOAD_FOLDER, filename)
imagefile.save(full_path)
text = ''
try:
# Convert image to text
text = scan_barcode_image(full_path)
except:
return make_response("Error processing image", 500)
return jsonify(text)
@app.route('/drivers_license', methods=['POST'])
def drivers_license():
"""Post an image file for text data to be extracted
Parameters
----------
None
Returns
-------
json format - text data extracted from the image png or jpg
example -
{"name":"JANICE ANN","address":"123 MAIN STREET, AARRISBURG, PA 17101-0000","state":"Pennsylvana",
"class":"A","sex":"F","height":"5'-06\"","eyes":"BRO","dob":"08/04/1975","exp":"08/05/2023"}
"""
imagefile = request.files.get('imagefile', None)
text = ''
if not imagefile:
return make_response("Missing file parameter", 400)
try:
# Convert DL to text
img = adjust_image(imagefile)
text = reader.readtext(img, detail=0)
parcetext={}
other_info =[]
#parce out data
i = -1
for x in text:
try:
x = str(x).upper()
x = str(x).replace('$','S')
i+=1
s = x.split(":")
if(len(s)>1):
s=s[1]
else:
s=x
if 'DL' in x:
parcetext['DLN']=s
continue
if 'CLASS' in x:
parcetext['CLASS']=s
continue
if 'SEX' in x:
parcetext['SEX']=s
continue
if 'HGT' in x:
parcetext['HGT']=s
continue
if 'WGT' in x:
parcetext['WGT']=s
continue
if 'EXP' in x:
parcetext['EXP']=s
continue
if 'EYE' in x:
parcetext['EYES']=s
continue
if 'ISS' in x:
parcetext['ISS']=s
if len(x)<7:
parcetext['ISS']=s+" "+ text[i-1]
continue
if 'DOB' in x or 'D0B'in x:
parcetext['DOB']=s
continue
if 'DD' in x or '00:'in x:
parcetext['DD']=s
continue
if 'DUPS' in x:
parcetext['DUPS']=s
continue
if(len(x)>0):
other_info.append(x)
except:
continue
parcetext['personal_info'] =other_info
except:
parcetext = 'Error : Can Not Read the current Image'
return jsonify(parcetext)
@app.route('/drivers_license_raw', methods=['POST'])
def drivers_license_raw():
"""Post an image file for text data to be extracted
Parameters
----------
None
Returns
-------
json format - text data extracted from the image png or jpg
"""
imagefile = request.files.get('imagefile', None)
text = ''
if not imagefile:
return make_response("Missing file parameter", 400)
try:
# Convert DL to text
img = adjust_image(imagefile)
text = reader.readtext(img, detail=0)
except:
text = 'Error : Can Not Read the current Image'
return jsonify(text)
@app.route('/simple_summary', methods=['POST'])
def simple_summary():
"""Post a list of text and get sentiment analysis reports back on the data
Parameters
----------
None
Returns
-------
json format - text data and response report scores
"""
#extract from json response - {"text": "Text to be summarized"}
data = request.json
nlpbot = NLPBot(text=data["text"])
nlpbot.summarize()
result = {"original_text": nlpbot.text, "summary_text": nlpbot.final_text}
return jsonify(result)
@app.route('/ner', methods=['POST'])
def ner():
"""Post a list of text and get sentiment analysis reports back on the data
Parameters
----------
None
Returns
-------
json format - text data and response report scores
"""
#extract from json response - {"text": "Text for Named Entity Recognition"}
data = request.json
nlpbot = NLPBot(text=data["text"])
nlpbot.ner()
result = {"original_text": nlpbot.text, "ner_text": nlpbot.tags}
return jsonify(result)
if __name__ == "__main__":
CORS(app)
app.run(host="0.0.0.0", debug=True)
| 12,633 | 3,920 |
#!/usr/bin/env python3
#
# Copyright 2021 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from unittest import mock
from cros.factory.umpire.server.service import multicast
from cros.factory.utils import json_utils
DEFAULT_PORT = 8080
TESTDATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
def TestData(filename):
return os.path.join(TESTDATA_DIR, filename)
class GenerateConfigTest(unittest.TestCase):
def setUp(self):
self.payload = json_utils.LoadFile(
os.path.join(TESTDATA_DIR, 'example_payload.json'))
def testEnableAll(self):
_SERVICE_CONFIG_ENABLE_ALL = {
'mgroup': '224.1.2.3',
'server_ip': '192.168.1.1',
'required_components': {
"release_image": True,
"test_image": True,
"toolkit": True
}
}
generated_config = multicast.MulticastService.GenerateConfig(
_SERVICE_CONFIG_ENABLE_ALL, self.payload, DEFAULT_PORT)
expected_config = json_utils.LoadFile(
TestData('mcast_config_enable_all.json'))
self.assertEqual(generated_config, expected_config)
def testEnableToolkit(self):
_SERVICE_CONFIG_ENABLE_TOOLKIT = {
'mgroup': '224.1.2.3',
'server_ip': '192.168.1.1',
'required_components': {
"release_image": False,
"test_image": False,
"toolkit": True
}
}
generated_config = multicast.MulticastService.GenerateConfig(
_SERVICE_CONFIG_ENABLE_TOOLKIT, self.payload, DEFAULT_PORT)
expected_config = json_utils.LoadFile(
TestData('mcast_config_enable_toolkit.json'))
self.assertEqual(generated_config, expected_config)
def testDefaultValues(self):
# Enable one component here to test default mgroup value.
_SERVICE_CONFIG_DEFAULT_VALUES = {
'required_components': {
"test_image": True
}
}
generated_config = multicast.MulticastService.GenerateConfig(
_SERVICE_CONFIG_DEFAULT_VALUES, self.payload, DEFAULT_PORT)
expected_config = json_utils.LoadFile(
TestData('mcast_config_default_values.json'))
self.assertEqual(generated_config, expected_config)
def testNoServerIp(self):
"""Test when `server_ip` is assigned, but `mgroup` is not given."""
_SERVICE_CONFIG_NO_SERVER_IP = {
'mgroup': '224.1.2.3',
'required_components': {
"test_image": True
}
}
generated_config = multicast.MulticastService.GenerateConfig(
_SERVICE_CONFIG_NO_SERVER_IP, self.payload, DEFAULT_PORT)
expected_config = json_utils.LoadFile(
TestData('mcast_config_no_server_ip.json'))
self.assertEqual(generated_config, expected_config)
def testAutoAssignMgroup(self):
"""Test auto assigning `mgroup` from server_ip."""
_SERVICE_CONFIG_AUTO_ASSIGN_MGROUP = {
'server_ip': '192.168.12.34',
'required_components': {
"test_image": True
}
}
generated_config = multicast.MulticastService.GenerateConfig(
_SERVICE_CONFIG_AUTO_ASSIGN_MGROUP, self.payload, DEFAULT_PORT)
expected_config = json_utils.LoadFile(
TestData('mcast_config_auto_assign_mgroup.json'))
self.assertEqual(generated_config, expected_config)
def testBadMgroup(self):
_SERVICE_CONFIG_BAD_MGROUP = {
'mgroup': '123456',
'required_components': {
"test_image": True
}
}
with self.assertRaises(AssertionError):
multicast.MulticastService.GenerateConfig(_SERVICE_CONFIG_BAD_MGROUP,
self.payload, DEFAULT_PORT)
def testAutoAssignMgroupWithBadServerIp(self):
_SERVICE_CONFIG_BAD_SERVER_IP = {
'server_ip': '123456',
'required_components': {
"test_image": True
}
}
# Raised by the `.group()` call from a None object returned by `re.search`.
with self.assertRaises(AttributeError):
multicast.MulticastService.GenerateConfig(_SERVICE_CONFIG_BAD_SERVER_IP,
self.payload, DEFAULT_PORT)
class MulticastServiceTest(unittest.TestCase):
_DUMMY_MCAST_CONFIG = {
'dummy_key': 'dummy_value'
}
_FAKE_UMPIRE_CONFIG = {
'services': {
'multicast': {}
}
}
_FAKE_UMPIRE_BASE_DIR = 'umpire_base_dir'
_FAKE_MCAST_RESOURCE_NAME = 'multicast.32d4f1f4ba53b174acc8aa0a68fb53bd.json'
@mock.patch('cros.factory.utils.file_utils.ForceSymlink')
@mock.patch(multicast.__name__ + '.MulticastService.GenerateConfig')
def testCreateProcesses(self, mock_generate_config, mock_force_sym_link):
mock_generate_config.return_value = self._DUMMY_MCAST_CONFIG
mock_env = mock.MagicMock()
mock_env.base_dir = self._FAKE_UMPIRE_BASE_DIR
mock_env.AddConfigFromBlob.return_value = self._FAKE_MCAST_RESOURCE_NAME
ret = multicast.MulticastService().CreateProcesses(self._FAKE_UMPIRE_CONFIG,
mock_env)
self.assertEqual(ret, [])
mock_env.AddConfigFromBlob.assert_called_once_with(
json_utils.DumpStr(self._DUMMY_MCAST_CONFIG, pretty=True),
'multicast_config')
mock_force_sym_link.assert_called_once_with(
os.path.join('resources', self._FAKE_MCAST_RESOURCE_NAME),
os.path.join(self._FAKE_UMPIRE_BASE_DIR, 'multicast_config.json'))
if __name__ == '__main__':
unittest.main()
| 5,540 | 1,878 |