hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fc9cc9174dbb7e4b0ff4a213151c6b43bd8c10aa | 2,190 | py | Python | src/IceRayPy/utility/light/sun.py | dmilos/IceRay | 4e01f141363c0d126d3c700c1f5f892967e3d520 | [
"MIT-0"
] | 2 | 2020-09-04T12:27:15.000Z | 2022-01-17T14:49:40.000Z | src/IceRayPy/utility/light/sun.py | dmilos/IceRay | 4e01f141363c0d126d3c700c1f5f892967e3d520 | [
"MIT-0"
] | null | null | null | src/IceRayPy/utility/light/sun.py | dmilos/IceRay | 4e01f141363c0d126d3c700c1f5f892967e3d520 | [
"MIT-0"
] | 1 | 2020-09-04T12:27:52.000Z | 2020-09-04T12:27:52.000Z |
import IceRayPy
Coord3D = IceRayPy.type.math.coord.Scalar3D
class Point:
def __init__( self, P_dll, P_center = None ):
self.m_implementation = IceRayPy.core.light.SunG( P_dll, IceRayPy.core.light.Point( P_dll, IceRayPy.core.light.Spot( Coord3D( 0, 0, 10 ) ) ) )
#self.m_implementation = IceRayPy.core.light.SunS( P_dll )
self.m_cargo = self.m_implementation.m_cargo
if( None != P_center ):
self.m_cargo.center( P_center )
def __del__( self ):
pass # Do nothing
class Area:
def __init__( self, P_dll, P_origin = None ):
self.m_implementation = IceRayPy.core.light.SunG( P_dll, IceRayPy.core.light.Area( P_dll ) )
self.m_cargo = self.m_implementation.m_cargo
if( None != P_origin ):
self.m_implementation.origin( P_origin )
def __del__( self ):
pass # Do nothing
class Line:
def __init__( self, P_dll, P_start = None , P_end = None ):
self.m_implementation = IceRayPy.core.light.SunG( P_dll, IceRayPy.core.light.Line( P_dll ) )
self.m_cargo = self.m_implementation.m_cargo
if( None != P_start ):
self.m_implementation.start( P_start )
if( None != P_end ):
self.m_implementation.end( P_end )
def __del__( self ):
pass # Do nothing
class Circle:
def __init__( self, P_dll, P_center = None ):
self.m_implementation = IceRayPy.core.light.SunG( P_dll, IceRayPy.core.light.Circle( P_dll ) )
self.m_cargo = self.m_implementation.m_cargo
if( None != P_center ):
self.m_implementation.center( P_center )
def __del__( self ):
pass # Do nothing
class Disc:
def __init__( self, P_dll, P_center = None ):
self.m_implementation = IceRayPy.core.light.SunG( P_dll, IceRayPy.core.light.Disc( P_dll ) )
self.m_cargo = self.m_implementation.m_cargo
if( None != P_center ):
self.m_implementation.center( P_center )
if( None != P_center ):
self.m_implementation.center( P_center )
def __del__( self ):
pass # Do nothing
| 35.322581 | 151 | 0.615068 |
6f044f58d9c75674b31f54cd8721b95f574a099d | 2,533 | py | Python | monitis_dump_monitor_list.py | shinhermit/ipsen-oxiane-automation | 3730a28a801f3b18855cd1c00729192abee64fd0 | [
"MIT"
] | null | null | null | monitis_dump_monitor_list.py | shinhermit/ipsen-oxiane-automation | 3730a28a801f3b18855cd1c00729192abee64fd0 | [
"MIT"
] | null | null | null | monitis_dump_monitor_list.py | shinhermit/ipsen-oxiane-automation | 3730a28a801f3b18855cd1c00729192abee64fd0 | [
"MIT"
] | 1 | 2017-12-06T09:13:50.000Z | 2017-12-06T09:13:50.000Z | from webapis.monitisapi import api_connector
from webapis.monitisapi.data_model import Monitor
from webapis.common.data_model import GenericWrappingIterator
from webapis import utils
from webapis.utils import Console
welcome_msg = """
-------------------------------------------------------------------------------------------------
** **
** MONITIS'S MONITORS DUMP **
** **
** Dump the list of all Google Analytics properties in a CSV file **
-------------------------------------------------------------------------------------------------
"""
def main():
"""
Dum the list of all Monitis' monitors in a CSV file.
This script expects:
- a credentials file which contains the Monitis API credentials
- the path to the output file, where the data will be written.
The directories of this path must exist.
The credentials file contains JSON in the form:
```
{
"api_key": "your api key",
"secret_key": "your secret key",
"agent_key": "your agent key",
"user_key": "your user key"
}
```
Usage:
```
<python 3 interpreter> monitis_dump_monitor_list.py \
--credentials etc/credentials/monitisapi/secret_credentials.json \
--output etc/dump/monitis_rum_monitors.csv
```
"""
Console.print_header(welcome_msg)
parser = utils.get_output_arg_parser(description='Dump the list of monitor in Monitis into a CSV file.')
args = parser.parse_args()
service = api_connector.Service(args.credentials)
print("\nRequesting the list of monitors...\n")
res = service.list_monitors()
dumped_monitors_count = 0
with open(args.dump_file, "w+") as file:
file.write("Domain,URL,Monitor ID\n")
for monitor in GenericWrappingIterator(res, Monitor):
csv_line = '"{domain}","{url}",{id}\r\n'.format(
domain=monitor.params.domain,
url=monitor.params.url,
id=monitor.id)
file.write(csv_line)
dumped_monitors_count += 1
print("\t**** " + csv_line)
Console.print_green(dumped_monitors_count, " monitors dumped")
Console.print_good_bye_message()
if __name__ == "__main__":
main()
| 36.185714 | 108 | 0.523885 |
ec9413a1cae9e04e4fc8db3832cb74d8f54cb2a7 | 3,182 | py | Python | pyprom/tests/util/helpers.py | Marchowes/pyProm | 0fb67597ba1f0be2e55e30c5e029df2a60e9ac56 | [
"BSD-3-Clause"
] | 5 | 2016-09-12T15:18:46.000Z | 2019-01-22T01:51:01.000Z | pyprom/tests/util/helpers.py | Marchowes/pyProm | 0fb67597ba1f0be2e55e30c5e029df2a60e9ac56 | [
"BSD-3-Clause"
] | 134 | 2016-08-15T02:14:06.000Z | 2021-09-01T14:54:16.000Z | pyprom/tests/util/helpers.py | Marchowes/pyProm | 0fb67597ba1f0be2e55e30c5e029df2a60e9ac56 | [
"BSD-3-Clause"
] | 2 | 2016-09-12T15:41:42.000Z | 2017-08-17T17:30:55.000Z | """
pyProm: Copyright 2018.
This software is distributed under a license that is described in
the LICENSE file that accompanies it.
"""
from pyprom.lib.containers.multipoint import MultiPoint
from pyprom.lib.locations.saddle import Saddle
def generate_MultiPoint(x, y, xSpan, ySpan,
datamap, elevation,
excludeBGPC=[]):
"""
Generate a rectangular MultiPoint, with the ability to exclude
points.
:param x: upper x coordinate
:param y: upper y coordinate
:param xSpan: span on x axis
:param ySpan: span on y axis
:param datamap: :class:`Datamap`
:param elevation: elevation
:param excludeBGPC: [BaseGridPointContainer, BaseGridPointContainer...]
points to remove from MultiPoint
:return: :class:`MultiPoint`
"""
mpBlock = []
for xx in range(x, x + xSpan):
for yy in range(y, y + ySpan):
# leave these ones out, they're our islands.
mpBlock.append((xx, yy))
mp = MultiPoint(mpBlock, elevation, datamap)
for excluded in excludeBGPC:
for point in excluded.points:
mp.points.remove(point.to_tuple())
return mp
def generate_multipoint_saddle(x, y, xSpan, ySpan,
datamap, elevation,
islands=[],
perimeterHighNeighborhoods=1):
"""
Generate a rectangular MultiPoint Saddle, with the ability to exclude
points (islands). and generate highPerimeterNeighborhoods on the Perimeter.
:param x: upper x coordinate
:param y: upper y coordinate
:param xSpan: span on x axis
:param ySpan: span on y axis
:param datamap: :class:`Datamap`
:param elevation: elevation
:param islands: [BaseGridPointContainer, BaseGridPointContainer...]
islands to remove from multipoint. islands will be elevation of mp +1
DO NOT MAKE AN ISLAND MORE THAN 2 POINTS WIDE. This function is
not designed to be smart in any way.
:param perimeterHighNeighborhoods: number of perimeter highPerimeterNeighborhoods to make up.
:return: :class:`MultiPoint`
"""
mp = generate_MultiPoint(x, y, xSpan, ySpan,
datamap, elevation,
excludeBGPC=islands)
saddle = Saddle(x, y, elevation)
saddle.multipoint = mp
islandGPCs = []
for island in islands:
islandGridPoints = []
for islandPoint in island:
islandGridPoints.append((islandPoint.x,
islandPoint.y,
elevation + 1))
islandGPCs.append(islandGridPoints)
highPerimeter = []
# Dumb highPerimeterNeighborhood generator. One point along y axis. Since
# this is for testing, make sure not to set `perimeterHighNeighborhoods`
# to more than the ySpan + 2. Again, this is dumb.
for highPerimeterNeighborhoodIdx in range(perimeterHighNeighborhoods):
hs = (x - 1, y - 1 + highPerimeterNeighborhoodIdx, elevation + 1)
highPerimeter.append([hs])
saddle.highPerimeterNeighborhoods = islandGPCs + highPerimeter
return saddle
| 37.435294 | 97 | 0.638906 |
7a1a923122febb983836d91f5d22acb278da2fa2 | 3,547 | py | Python | deepobs/pytorch/datasets/cifar100.py | H0merJayS1mpson/deepobscustom | e85816ce42466326dac18841c58b79f87a4a1a7c | [
"MIT"
] | 7 | 2019-09-06T04:51:14.000Z | 2020-05-12T09:05:47.000Z | deepobs/pytorch/datasets/cifar100.py | H0merJayS1mpson/deepobscustom | e85816ce42466326dac18841c58b79f87a4a1a7c | [
"MIT"
] | 16 | 2019-09-06T10:58:31.000Z | 2020-07-08T09:22:06.000Z | deepobs/pytorch/datasets/cifar100.py | H0merJayS1mpson/deepobscustom | e85816ce42466326dac18841c58b79f87a4a1a7c | [
"MIT"
] | 5 | 2019-07-24T14:20:15.000Z | 2020-10-14T13:14:08.000Z | # -*- coding: utf-8 -*-
"""CIFAR-100 DeepOBS dataset."""
from . import dataset
from deepobs import config
from torch.utils import data as dat
from torchvision import datasets
from torchvision import transforms
from .datasets_utils import train_eval_sampler
training_transform_augmented = transforms.Compose([
transforms.Pad(padding=2),
transforms.RandomCrop(size=(32, 32)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=63. / 255., saturation=[0.5, 1.5], contrast=[0.2, 1.8]),
transforms.ToTensor(),
transforms.Normalize((0.50707516, 0.48654887, 0.44091784), (0.26733429, 0.25643846, 0.27615047))
])
training_transform_not_augmented = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.50707516, 0.48654887, 0.44091784), (0.26733429, 0.25643846, 0.27615047))
])
class cifar100(dataset.DataSet):
"""DeepOBS data set class for the `CIFAR-100\
<https://www.cs.toronto.edu/~kriz/cifar.html>`_ data set.
Args:
batch_size (int): The mini-batch size to use. Note that, if ``batch_size``
is not a divider of the dataset size (``50 000`` for train, ``10 000``
for test) the remainder is dropped in each epoch (after shuffling).
data_augmentation (bool): If ``True`` some data augmentation operations
(random crop window, horizontal flipping, lighting augmentation) are
applied to the training data (but not the test data).
train_eval_size (int): Size of the train eval data set.
Defaults to ``10 000`` the size of the test set.
Methods:
_make_dataloader: A helper that is shared by all three data loader methods.
"""
def __init__(self,
batch_size,
data_augmentation=True,
train_eval_size=10000):
"""Creates a new CIFAR-100 instance.
Args:
batch_size (int): The mini-batch size to use. Note that, if ``batch_size``
is not a divider of the dataset size (``50 000`` for train, ``10 000``
for test) the remainder is dropped in each epoch (after shuffling).
data_augmentation (bool): If ``True`` some data augmentation operations
(random crop window, horizontal flipping, lighting augmentation) are
applied to the training data (but not the test data).
train_eval_size (int): Size of the train eval data set.
Defaults to ``10 000`` the size of the test set.
"""
self._name = "cifar100"
self._data_augmentation = data_augmentation
self._train_eval_size = train_eval_size
super(cifar100, self).__init__(batch_size)
def _make_train_and_valid_dataloader(self):
if self._data_augmentation:
transform = training_transform_augmented
else:
transform = training_transform_not_augmented
train_dataset = datasets.CIFAR100(root=config.get_data_dir(), train=True, download=True, transform=transform)
valid_dataset = datasets.CIFAR100(root=config.get_data_dir(), train=True, download=True, transform=training_transform_not_augmented)
train_loader, valid_loader = self._make_train_and_valid_dataloader_helper(train_dataset, valid_dataset)
return train_loader, valid_loader
def _make_test_dataloader(self):
transform = training_transform_not_augmented
test_dataset = datasets.CIFAR100(root=config.get_data_dir(), train=False, download=True, transform=transform)
return self._make_dataloader(test_dataset, sampler=None)
| 44.898734 | 140 | 0.695799 |
e59ff302c9dc9d270bc1c1463d520c438ab768a5 | 2,792 | py | Python | test/bot_test.py | tienhien1102/neu-chatbot | f2a792b67a04ccfe5aa0b84c373a5918bb700ca6 | [
"MIT"
] | null | null | null | test/bot_test.py | tienhien1102/neu-chatbot | f2a792b67a04ccfe5aa0b84c373a5918bb700ca6 | [
"MIT"
] | null | null | null | test/bot_test.py | tienhien1102/neu-chatbot | f2a792b67a04ccfe5aa0b84c373a5918bb700ca6 | [
"MIT"
] | null | null | null | import os
from pymessenger.bot import Bot
from pymessenger import Element, Button
TOKEN = os.environ.get('EAAGQpPkuP5YBACDRuZBc1uS2DMn0aewRfuidgZBtuT78bf87YpFw3jwaAaBeI4JcoxcyKmgcshF80uJrAaI2BQLBAz9r4gZAT9k2PGbKaUbDQeYDsth1xOGc4IwlKt6rkFipKT6CwXJCPvH7gl80irRNQoUTn52K0NHKBBaEwZDZD')
APP_SECRET = os.environ.get('483d04288562e56de7501c4a9cda253e')
bot = Bot(TOKEN, app_secret=APP_SECRET)
recipient_id = os.environ.get('RECIPIENT_ID')
def test_wrong_format_message():
result = bot.send_text_message(recipient_id, {'text': "its a test"})
assert type(result) is dict
assert result.get('message_id') is None
def test_text_message():
result = bot.send_text_message(recipient_id, "test")
assert type(result) is dict
assert result.get('message_id') is not None
assert result.get('recipient_id') is not None
def test_elements():
image_url = 'https://lh4.googleusercontent.com/-dZ2LhrpNpxs/AAAAAAAAAAI/AAAAAAAA1os/qrf-VeTVJrg/s0-c-k-no-ns/photo.jpg'
elements = []
element = Element(title="Arsenal", image_url=image_url, subtitle="Click to go to Arsenal website.",
item_url="http://arsenal.com")
elements.append(element)
result = bot.send_generic_message(recipient_id, elements)
assert type(result) is dict
assert result.get('message_id') is not None
assert result.get('recipient_id') is not None
def test_image_url():
image_url = 'https://lh4.googleusercontent.com/-dZ2LhrpNpxs/AAAAAAAAAAI/AAAAAAAA1os/qrf-VeTVJrg/s0-c-k-no-ns/photo.jpg'
result = bot.send_image_url(recipient_id, image_url)
assert type(result) is dict
assert result.get('message_id') is not None
assert result.get('recipient_id') is not None
def test_image_gif_url():
image_url = 'https://media.giphy.com/media/rl0FOxdz7CcxO/giphy.gif'
result = bot.send_image_url(recipient_id, image_url)
assert type(result) is dict
assert result.get('message_id') is not None
assert result.get('recipient_id') is not None
def test_button_message():
buttons = []
button = Button(title='Arsenal', type='web_url', url='http://arsenal.com')
buttons.append(button)
button = Button(title='Other', type='postback', payload='other')
buttons.append(button)
text = 'Select'
result = bot.send_button_message(recipient_id, text, buttons)
assert type(result) is dict
assert result.get('message_id') is not None
assert result.get('recipient_id') is not None
def test_fields_blank():
user_profile = bot.get_user_info(recipient_id)
assert user_profile is not None
def test_fields():
fields = ['first_name', 'last_name']
user_profile = bot.get_user_info(recipient_id, fields=fields)
assert user_profile is not None
assert len(user_profile.keys()) == len(fields)
| 36.25974 | 200 | 0.738897 |
2382488e875478320b273a8ef859effae260c1f6 | 23,150 | py | Python | lib/modeling/detector.py | skokec/detectron-villard | 9e420bf3fb75a8f06f6e3fd970fc2600d8969d10 | [
"Apache-2.0"
] | 27 | 2019-05-03T14:24:51.000Z | 2022-03-25T13:13:56.000Z | lib/modeling/detector.py | skokec/detectron-villard | 9e420bf3fb75a8f06f6e3fd970fc2600d8969d10 | [
"Apache-2.0"
] | 5 | 2019-10-03T06:28:50.000Z | 2021-11-22T12:16:31.000Z | lib/modeling/detector.py | skokec/detectron-villard | 9e420bf3fb75a8f06f6e3fd970fc2600d8969d10 | [
"Apache-2.0"
] | 16 | 2019-05-10T09:46:40.000Z | 2022-03-18T02:12:41.000Z | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Defines DetectionModelHelper, the class that represents a Detectron model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import logging
from caffe2.python import cnn
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags
from core.config import cfg
from ops.collect_and_distribute_fpn_rpn_proposals \
import CollectAndDistributeFpnRpnProposalsOp
from ops.generate_proposal_labels import GenerateProposalLabelsOp
from ops.generate_proposal_labels import GenerateHardProposalLabelsOp
from ops.generate_proposals import GenerateProposalsOp
from utils import lr_policy
import roi_data.fast_rcnn
import utils.c2 as c2_utils
logger = logging.getLogger(__name__)
class DetectionModelHelper(cnn.CNNModelHelper):
def __init__(self, **kwargs):
# Handle args specific to the DetectionModelHelper, others pass through
# to CNNModelHelper
self.train = kwargs.get('train', False)
self.num_classes = kwargs.get('num_classes', -1)
assert self.num_classes > 0, 'num_classes must be > 0'
for k in ('train', 'num_classes'):
if k in kwargs:
del kwargs[k]
kwargs['order'] = 'NCHW'
# Defensively set cudnn_exhaustive_search to False in case the default
# changes in CNNModelHelper. The detection code uses variable size
# inputs that might not play nicely with cudnn_exhaustive_search.
kwargs['cudnn_exhaustive_search'] = False
super(DetectionModelHelper, self).__init__(**kwargs)
self.roi_data_loader = None
self.losses = []
self.metrics = []
self.do_not_update_params = [] # Param on this list are not updated
self.net.Proto().type = cfg.MODEL.EXECUTION_TYPE
self.net.Proto().num_workers = cfg.NUM_GPUS * 4
self.prev_use_cudnn = self.use_cudnn
def TrainableParams(self, gpu_id=-1):
"""Get the blob names for all trainable parameters, possibly filtered by
GPU id.
"""
return [
p for p in self.params
if (
p in self.param_to_grad and # p has a gradient
p not in self.do_not_update_params and # not on the blacklist
(gpu_id == -1 or # filter for gpu assignment, if gpu_id set
str(p).find('gpu_{}'.format(gpu_id)) == 0)
)]
def AffineChannel(self, blob_in, blob_out, share_with=None, inplace=False):
"""Affine transformation to replace BN in networks where BN cannot be
used (e.g., because the minibatch size is too small).
The AffineChannel parameters may be shared with another AffineChannelOp
by specifying its blob name (excluding the '_{s,b}' suffix) in the
share_with argument. The operations can be done in place to save memory.
"""
blob_out = blob_out or self.net.NextName()
is_not_sharing = share_with is None
param_prefix = blob_out if is_not_sharing else share_with
scale = core.ScopedBlobReference(
param_prefix + '_s', self.param_init_net)
bias = core.ScopedBlobReference(
param_prefix + '_b', self.param_init_net)
if is_not_sharing:
self.net.Proto().external_input.extend([str(scale), str(bias)])
self.params.extend([scale, bias])
self.weights.append(scale)
self.biases.append(bias)
if inplace:
return self.net.AffineChannel([blob_in, scale, bias], blob_in)
else:
return self.net.AffineChannel([blob_in, scale, bias], blob_out)
def GenerateProposals(self, blobs_in, blobs_out, anchors, spatial_scale):
"""Op for generating RPN porposals.
blobs_in:
- 'rpn_cls_probs': 4D tensor of shape (N, A, H, W), where N is the
number of minibatch images, A is the number of anchors per
locations, and (H, W) is the spatial size of the prediction grid.
Each value represents a "probability of object" rating in [0, 1].
- 'rpn_bbox_pred': 4D tensor of shape (N, 4 * A, H, W) of predicted
deltas for transformation anchor boxes into RPN proposals.
- 'im_info': 2D tensor of shape (N, 3) where the three columns encode
the input image's [height, width, scale]. Height and width are
for the input to the network, not the original image; scale is the
scale factor used to scale the original image to the network input
size.
blobs_out:
- 'rpn_rois': 2D tensor of shape (R, 5), for R RPN proposals where the
five columns encode [batch ind, x1, y1, x2, y2]. The boxes are
w.r.t. the network input, which is a *scaled* version of the
original image; these proposals must be scaled by 1 / scale (where
scale comes from im_info; see above) to transform it back to the
original input image coordinate system.
- 'rpn_roi_probs': 1D tensor of objectness probability scores
(extracted from rpn_cls_probs; see above).
"""
name = 'GenerateProposalsOp:' + ','.join([str(b) for b in blobs_in])
self.net.Python(
GenerateProposalsOp(anchors, spatial_scale, self.train).forward
)(blobs_in, blobs_out, name=name)
return blobs_out
def GenerateProposalLabels(self, blobs_in, ohem = False):
"""Op for generating training labels for RPN proposals. This is used
when training RPN jointly with Fast/Mask R-CNN (as in end-to-end
Faster R-CNN training).
blobs_in:
- 'rpn_rois': 2D tensor of RPN proposals output by GenerateProposals
- 'roidb': roidb entries that will be labeled
- 'im_info': See GenerateProposals doc.
blobs_out:
- (variable set of blobs): returns whatever blobs are required for
training the model. It does this by querying the data loader for
the list of blobs that are needed.
"""
name = 'GenerateProposalLabelsOp:' + ','.join(
[str(b) for b in blobs_in]
)
# The list of blobs is not known before run-time because it depends on
# the specific model being trained. Query the data loader to get the
# list of output blob names.
blobs_out = roi_data.fast_rcnn.get_fast_rcnn_blob_names(
is_training=self.train, ohem=ohem
)
blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]
self.net.Python(GenerateProposalLabelsOp(ohem=ohem).forward)(
blobs_in, blobs_out, name=name
)
return blobs_out
def GenerateHardProposalLabels(self, blobs_in):
"""Op for generating training labels for RPN proposals. This is used
when training RPN jointly with Fast/Mask R-CNN (as in end-to-end
Faster R-CNN training).
blobs_in:
- 'ohem_rois'
- 'ohem_cls_score': 2D tensor for classification loss of all proposals
- (variable set of blobs): blobs from GenerateProposalLabels are
inputted here and filtered base on classi
blobs_out:
- (variable set of blobs): returns whatever blobs are required for
training the model. It does this by querying the data loader for
the list of blobs that are needed.
"""
# This list of input blobs depends on list of output blobs for
# GenerateProposalLabels + 'ohem_loss_cls' (from blobs_in)
blobs_in = blobs_in + roi_data.fast_rcnn.get_fast_rcnn_blob_names(
is_training=self.train, ohem=True
)
name = 'GenerateHardProposalLabelsOp:' + ','.join(
[str(b) for b in blobs_in]
)
# The list of blobs is not known before run-time because it depends on
# the specific model being trained. Query the data loader to get the
# list of output blob names.
blobs_out = roi_data.fast_rcnn.get_fast_rcnn_blob_names(
is_training=self.train
)
blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]
self.net.Python(GenerateHardProposalLabelsOp().forward)(
blobs_in, blobs_out, name=name
)
return blobs_out
def CollectAndDistributeFpnRpnProposals(self, ohem = False):
"""Merge RPN proposals generated at multiple FPN levels and then
distribute those proposals to their appropriate FPN levels. An anchor
at one FPN level may predict an RoI that will map to another level,
hence the need to redistribute the proposals.
This function assumes standard blob names for input and output blobs.
Input blobs: [rpn_rois_fpn<min>, ..., rpn_rois_fpn<max>,
rpn_roi_probs_fpn<min>, ..., rpn_roi_probs_fpn<max>]
- rpn_rois_fpn<i> are the RPN proposals for FPN level i; see rpn_rois
documentation from GenerateProposals.
- rpn_roi_probs_fpn<i> are the RPN objectness probabilities for FPN
level i; see rpn_roi_probs documentation from GenerateProposals.
If used during training, then the input blobs will also include:
[roidb, im_info] (see GenerateProposalLabels).
Output blobs: [rois_fpn<min>, ..., rois_rpn<max>, rois,
rois_idx_restore]
- rois_fpn<i> are the RPN proposals for FPN level i
- rois_idx_restore is a permutation on the concatenation of all
rois_fpn<i>, i=min...max, such that when applied the RPN RoIs are
restored to their original order in the input blobs.
If used during training, then the output blobs will also include:
[labels, bbox_targets, bbox_inside_weights, bbox_outside_weights].
"""
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
# Prepare input blobs
rois_names = ['rpn_rois_fpn' + str(l) for l in range(k_min, k_max + 1)]
score_names = [
'rpn_roi_probs_fpn' + str(l) for l in range(k_min, k_max + 1)
]
blobs_in = rois_names + score_names
if self.train:
blobs_in += ['roidb', 'im_info']
blobs_in = [core.ScopedBlobReference(b) for b in blobs_in]
name = 'CollectAndDistributeFpnRpnProposalsOp:' + ','.join(
[str(b) for b in blobs_in]
)
# Prepare output blobs
blobs_out = roi_data.fast_rcnn.get_fast_rcnn_blob_names(
is_training=self.train, ohem=ohem
)
blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]
outputs = self.net.Python(
CollectAndDistributeFpnRpnProposalsOp(self.train, ohem=ohem).forward
)(blobs_in, blobs_out, name=name)
return outputs
def DropoutIfTraining(self, blob_in, dropout_rate):
"""Add dropout to blob_in if the model is in training mode and
dropout_rate is > 0."""
blob_out = blob_in
if self.train and dropout_rate > 0:
blob_out = self.Dropout(
blob_in, blob_in, ratio=dropout_rate, is_test=False
)
return blob_out
def RoIFeatureTransform(
self,
blobs_in,
blob_out,
blob_rois='rois',
method='RoIPoolF',
resolution=7,
spatial_scale=1. / 16.,
sampling_ratio=0
):
"""Add the specified RoI pooling method. The sampling_ratio argument
is supported for some, but not all, RoI transform methods.
RoIFeatureTransform abstracts away:
- Use of FPN or not
- Specifics of the transform method
"""
assert method in {'RoIPoolF', 'RoIAlign'}, \
'Unknown pooling method: {}'.format(method)
has_argmax = (method == 'RoIPoolF')
if isinstance(blobs_in, list):
# FPN case: add RoIFeatureTransform to each FPN level
k_max = cfg.FPN.ROI_MAX_LEVEL # coarsest level of pyramid
k_min = cfg.FPN.ROI_MIN_LEVEL # finest level of pyramid
assert len(blobs_in) == k_max - k_min + 1
bl_out_list = []
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl] # blobs_in is in reversed order
sc = spatial_scale[k_max - lvl] # in reversed order
bl_rois = blob_rois + '_fpn' + str(lvl)
bl_out = blob_out + '_fpn' + str(lvl)
bl_out_list.append(bl_out)
bl_argmax = ['_argmax_' + bl_out] if has_argmax else []
self.net.__getattr__(method)(
[bl_in, bl_rois], [bl_out] + bl_argmax,
pooled_w=resolution,
pooled_h=resolution,
spatial_scale=sc,
sampling_ratio=sampling_ratio
)
# The pooled features from all levels are concatenated along the
# batch dimension into a single 4D tensor.
xform_shuffled, _ = self.net.Concat(
bl_out_list, [blob_out + '_shuffled', '_concat_' + blob_out],
axis=0
)
# Unshuffle to match rois from dataloader
restore_bl = blob_rois + '_idx_restore_int32'
xform_out = self.net.BatchPermutation(
[xform_shuffled, restore_bl], blob_out
)
else:
# Single feature level
bl_argmax = ['_argmax_' + blob_out] if has_argmax else []
# sampling_ratio is ignored for RoIPoolF
xform_out = self.net.__getattr__(method)(
[blobs_in, blob_rois], [blob_out] + bl_argmax,
pooled_w=resolution,
pooled_h=resolution,
spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio
)
# Only return the first blob (the transformed features)
return xform_out
def ConvShared(
self,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight=None,
bias=None,
**kwargs
):
"""Add conv op that shares weights and/or biases with another conv op.
"""
use_bias = (
False if ('no_bias' in kwargs and kwargs['no_bias']) else True
)
if self.use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = self.cudnn_exhaustive_search
if self.ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = self.ws_nbytes_limit
if use_bias:
blobs_in = [blob_in, weight, bias]
else:
blobs_in = [blob_in, weight]
if 'no_bias' in kwargs:
del kwargs['no_bias']
return self.net.Conv(
blobs_in, blob_out, kernel=kernel, order=self.order, **kwargs
)
def FCShared(self, blob_in, blob_out, dim_in, dim_out, shared_blob_name, weight_init=None,
bias_init=None, WeightInitializer=None, BiasInitializer=None,
enable_tensor_core=False, float16_compute=False, **kwargs):
WeightInitializer = initializers.update_initializer(
WeightInitializer, weight_init, ("XavierFill", {})
)
BiasInitializer = initializers.update_initializer(
BiasInitializer, bias_init, ("ConstantFill", {})
)
if not self.init_params:
WeightInitializer = initializers.ExternalInitializer()
BiasInitializer = initializers.ExternalInitializer()
blob_out = blob_out or self.net.NextName()
bias_tags = [ParameterTags.BIAS]
if 'freeze_bias' in kwargs:
bias_tags.append(ParameterTags.COMPUTED_PARAM)
weight = self.create_param(
param_name=shared_blob_name + '_w',
shape=[dim_out, dim_in],
initializer=WeightInitializer,
tags=ParameterTags.WEIGHT
)
bias = self.create_param(
param_name=shared_blob_name + '_b',
shape=[dim_out, ],
initializer=BiasInitializer,
tags=bias_tags
)
# enable TensorCore by setting appropriate engine
if enable_tensor_core:
kwargs['engine'] = 'TENSORCORE'
# Enable float 16 compute kernel (relevant for CUDA)
if float16_compute:
kwargs['float16_compute'] = True
return self.net.FC([blob_in, weight, bias], blob_out, **kwargs)
def BilinearInterpolation(
self, blob_in, blob_out, dim_in, dim_out, up_scale
):
"""Bilinear interpolation in space of scale.
Takes input of NxKxHxW and outputs NxKx(sH)x(sW), where s:= up_scale
Adapted from the CVPR'15 FCN code.
See: https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/surgery.py
"""
assert dim_in == dim_out
assert up_scale % 2 == 0, 'Scale should be even'
def upsample_filt(size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return ((1 - abs(og[0] - center) / factor) *
(1 - abs(og[1] - center) / factor))
kernel_size = up_scale * 2
bil_filt = upsample_filt(kernel_size)
kernel = np.zeros(
(dim_in, dim_out, kernel_size, kernel_size), dtype=np.float32
)
kernel[range(dim_out), range(dim_in), :, :] = bil_filt
blob = self.ConvTranspose(
blob_in,
blob_out,
dim_in,
dim_out,
kernel_size,
stride=int(up_scale),
pad=int(up_scale / 2),
weight_init=('GivenTensorFill', {'values': kernel}),
bias_init=('ConstantFill', {'value': 0.})
)
self.do_not_update_params.append(self.weights[-1])
self.do_not_update_params.append(self.biases[-1])
return blob
def ConvAffine( # args in the same order of Conv()
self, blob_in, prefix, dim_in, dim_out, kernel, stride, pad,
group=1, dilation=1,
weight_init=None,
bias_init=None,
suffix='_bn',
inplace=False
):
"""ConvAffine adds a Conv op followed by a AffineChannel op (which
replaces BN during fine tuning).
"""
conv_blob = self.Conv(
blob_in,
prefix,
dim_in,
dim_out,
kernel,
stride=stride,
pad=pad,
group=group,
dilation=dilation,
weight_init=weight_init,
bias_init=bias_init,
no_bias=1
)
blob_out = self.AffineChannel(
conv_blob, prefix + suffix, inplace=inplace
)
return blob_out
def DisableCudnn(self):
self.prev_use_cudnn = self.use_cudnn
self.use_cudnn = False
def RestorePreviousUseCudnn(self):
prev_use_cudnn = self.use_cudnn
self.use_cudnn = self.prev_use_cudnn
self.prev_use_cudnn = prev_use_cudnn
def UpdateWorkspaceLr(self, cur_iter):
"""Updates the model's current learning rate and the workspace (learning
rate and update history/momentum blobs).
"""
# The workspace is the one source of truth for the lr
# The lr is always the same on all GPUs
cur_lr = workspace.FetchBlob('gpu_0/lr')[0]
new_lr = lr_policy.get_lr_at_iter(cur_iter)
# There are no type conversions between the lr in Python and the lr in
# the GPU (both are float32), so exact comparision is ok
if cur_lr != new_lr:
ratio = _get_lr_change_ratio(cur_lr, new_lr)
if ratio > cfg.SOLVER.LOG_LR_CHANGE_THRESHOLD:
logger.info(
'Changing learning rate {:.6f} -> {:.6f} at iter {:d}'.
format(cur_lr, new_lr, cur_iter))
self._SetNewLr(cur_lr, new_lr)
return new_lr
def _SetNewLr(self, cur_lr, new_lr):
"""Do the actual work of updating the model and workspace blobs.
"""
for i in range(cfg.NUM_GPUS):
with c2_utils.CudaScope(i):
workspace.FeedBlob(
'gpu_{}/lr'.format(i), np.array([new_lr], dtype=np.float32))
ratio = _get_lr_change_ratio(cur_lr, new_lr)
if cfg.SOLVER.SCALE_MOMENTUM and cur_lr > 1e-7 and \
ratio > cfg.SOLVER.SCALE_MOMENTUM_THRESHOLD:
self._CorrectMomentum(new_lr / cur_lr)
def _CorrectMomentum(self, correction):
"""The MomentumSGDUpdate op implements the update V as
V := mu * V + lr * grad,
where mu is the momentum factor, lr is the learning rate, and grad is
the stochastic gradient. Since V is not defined independently of the
learning rate (as it should ideally be), when the learning rate is
changed we should scale the update history V in order to make it
compatible in scale with lr * grad.
"""
logger.info(
'Scaling update history by {:.6f} (new lr / old lr)'.
format(correction))
for i in range(cfg.NUM_GPUS):
with c2_utils.CudaScope(i):
for param in self.TrainableParams(gpu_id=i):
op = core.CreateOperator(
'Scale', [param + '_momentum'], [param + '_momentum'],
scale=correction)
workspace.RunOperatorOnce(op)
def AddLosses(self, losses):
if not isinstance(losses, list):
losses = [losses]
# Conversion to str allows losses to include BlobReferences
losses = [c2_utils.UnscopeName(str(l)) for l in losses]
self.losses = list(set(self.losses + losses))
def AddMetrics(self, metrics):
if not isinstance(metrics, list):
metrics = [metrics]
self.metrics = list(set(self.metrics + metrics))
def _get_lr_change_ratio(cur_lr, new_lr):
eps = 1e-10
ratio = np.max(
(new_lr / np.max((cur_lr, eps)), cur_lr / np.max((new_lr, eps)))
)
return ratio
| 39.982729 | 94 | 0.613002 |
7e990b880e414d1e950311cd9cf3fe7f5a65376a | 11,787 | py | Python | tests/integration/ec2/cloudwatch/test_connection.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | 15 | 2015-03-25T05:24:11.000Z | 2021-12-18T04:24:06.000Z | tests/integration/ec2/cloudwatch/test_connection.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | 1 | 2021-04-30T21:27:50.000Z | 2021-04-30T21:27:50.000Z | tests/integration/ec2/cloudwatch/test_connection.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | 10 | 2015-04-26T17:56:37.000Z | 2020-09-24T14:01:53.000Z | # Copyright (c) 2010 Hunter Blanks http://artifex.org/~hblanks/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Initial, and very limited, unit tests for CloudWatchConnection.
"""
import datetime
import time
import unittest
from boto.ec2.cloudwatch import CloudWatchConnection
from boto.ec2.cloudwatch.metric import Metric
# HTTP response body for CloudWatchConnection.describe_alarms
DESCRIBE_ALARMS_BODY = """<DescribeAlarmsResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<DescribeAlarmsResult>
<NextToken>mynexttoken</NextToken>
<MetricAlarms>
<member>
<StateUpdatedTimestamp>2011-11-18T23:43:59.111Z</StateUpdatedTimestamp>
<InsufficientDataActions/>
<StateReasonData>{"version":"1.0","queryDate":"2011-11-18T23:43:59.089+0000","startDate":"2011-11-18T23:30:00.000+0000","statistic":"Maximum","period":60,"recentDatapoints":[1.0,null,null,null,null,null,null,null,null,null,1.0],"threshold":1.0}</StateReasonData>
<AlarmArn>arn:aws:cloudwatch:us-east-1:1234:alarm:FancyAlarm</AlarmArn>
<AlarmConfigurationUpdatedTimestamp>2011-11-18T23:43:58.489Z</AlarmConfigurationUpdatedTimestamp>
<AlarmName>FancyAlarm</AlarmName>
<StateValue>OK</StateValue>
<Period>60</Period>
<OKActions/>
<ActionsEnabled>true</ActionsEnabled>
<Namespace>AcmeCo/Cronjobs</Namespace>
<EvaluationPeriods>15</EvaluationPeriods>
<Threshold>1.0</Threshold>
<Statistic>Maximum</Statistic>
<AlarmActions>
<member>arn:aws:sns:us-east-1:1234:Alerts</member>
</AlarmActions>
<StateReason>Threshold Crossed: 2 datapoints were not less than the threshold (1.0). The most recent datapoints: [1.0, 1.0].</StateReason>
<Dimensions>
<member>
<Name>Job</Name>
<Value>ANiceCronJob</Value>
</member>
</Dimensions>
<ComparisonOperator>LessThanThreshold</ComparisonOperator>
<MetricName>Success</MetricName>
</member>
<member>
<StateUpdatedTimestamp>2011-11-19T08:09:20.655Z</StateUpdatedTimestamp>
<InsufficientDataActions/>
<StateReasonData>{"version":"1.0","queryDate":"2011-11-19T08:09:20.633+0000","startDate":"2011-11-19T08:07:00.000+0000","statistic":"Maximum","period":60,"recentDatapoints":[1.0],"threshold":1.0}</StateReasonData>
<AlarmArn>arn:aws:cloudwatch:us-east-1:1234:alarm:SuprtFancyAlarm</AlarmArn>
<AlarmConfigurationUpdatedTimestamp>2011-11-19T16:20:19.687Z</AlarmConfigurationUpdatedTimestamp>
<AlarmName>SuperFancyAlarm</AlarmName>
<StateValue>OK</StateValue>
<Period>60</Period>
<OKActions/>
<ActionsEnabled>true</ActionsEnabled>
<Namespace>AcmeCo/CronJobs</Namespace>
<EvaluationPeriods>60</EvaluationPeriods>
<Threshold>1.0</Threshold>
<Statistic>Maximum</Statistic>
<AlarmActions>
<member>arn:aws:sns:us-east-1:1234:alerts</member>
</AlarmActions>
<StateReason>Threshold Crossed: 1 datapoint (1.0) was not less than the threshold (1.0).</StateReason>
<Dimensions>
<member>
<Name>Job</Name>
<Value>ABadCronJob</Value>
</member>
</Dimensions>
<ComparisonOperator>GreaterThanThreshold</ComparisonOperator>
<MetricName>Success</MetricName>
</member>
</MetricAlarms>
</DescribeAlarmsResult>
<ResponseMetadata>
<RequestId>f621311-1463-11e1-95c3-312389123</RequestId>
</ResponseMetadata>
</DescribeAlarmsResponse>"""
class CloudWatchConnectionTest(unittest.TestCase):
ec2 = True
def test_build_list_params(self):
c = CloudWatchConnection()
params = {}
c.build_list_params(
params, ['thing1', 'thing2', 'thing3'], 'ThingName%d')
expected_params = {
'ThingName1': 'thing1',
'ThingName2': 'thing2',
'ThingName3': 'thing3'
}
self.assertEqual(params, expected_params)
def test_build_put_params_one(self):
c = CloudWatchConnection()
params = {}
c.build_put_params(params, name="N", value=1, dimensions={"D": "V"})
expected_params = {
'MetricData.member.1.MetricName': 'N',
'MetricData.member.1.Value': 1,
'MetricData.member.1.Dimensions.member.1.Name': 'D',
'MetricData.member.1.Dimensions.member.1.Value': 'V',
}
self.assertEqual(params, expected_params)
def test_build_put_params_multiple_metrics(self):
c = CloudWatchConnection()
params = {}
c.build_put_params(params, name=["N", "M"], value=[1, 2], dimensions={"D": "V"})
expected_params = {
'MetricData.member.1.MetricName': 'N',
'MetricData.member.1.Value': 1,
'MetricData.member.1.Dimensions.member.1.Name': 'D',
'MetricData.member.1.Dimensions.member.1.Value': 'V',
'MetricData.member.2.MetricName': 'M',
'MetricData.member.2.Value': 2,
'MetricData.member.2.Dimensions.member.1.Name': 'D',
'MetricData.member.2.Dimensions.member.1.Value': 'V',
}
self.assertEqual(params, expected_params)
def test_build_put_params_multiple_dimensions(self):
c = CloudWatchConnection()
params = {}
c.build_put_params(params, name="N", value=[1, 2], dimensions=[{"D": "V"}, {"D": "W"}])
expected_params = {
'MetricData.member.1.MetricName': 'N',
'MetricData.member.1.Value': 1,
'MetricData.member.1.Dimensions.member.1.Name': 'D',
'MetricData.member.1.Dimensions.member.1.Value': 'V',
'MetricData.member.2.MetricName': 'N',
'MetricData.member.2.Value': 2,
'MetricData.member.2.Dimensions.member.1.Name': 'D',
'MetricData.member.2.Dimensions.member.1.Value': 'W',
}
self.assertEqual(params, expected_params)
def test_build_put_params_multiple_parameter_dimension(self):
from collections import OrderedDict
self.maxDiff = None
c = CloudWatchConnection()
params = {}
dimensions = [OrderedDict((("D1", "V"), ("D2", "W")))]
c.build_put_params(params,
name="N",
value=[1],
dimensions=dimensions)
expected_params = {
'MetricData.member.1.MetricName': 'N',
'MetricData.member.1.Value': 1,
'MetricData.member.1.Dimensions.member.1.Name': 'D1',
'MetricData.member.1.Dimensions.member.1.Value': 'V',
'MetricData.member.1.Dimensions.member.2.Name': 'D2',
'MetricData.member.1.Dimensions.member.2.Value': 'W',
}
self.assertEqual(params, expected_params)
def test_build_get_params_multiple_parameter_dimension1(self):
from collections import OrderedDict
self.maxDiff = None
c = CloudWatchConnection()
params = {}
dimensions = OrderedDict((("D1", "V"), ("D2", "W")))
c.build_dimension_param(dimensions, params)
expected_params = {
'Dimensions.member.1.Name': 'D1',
'Dimensions.member.1.Value': 'V',
'Dimensions.member.2.Name': 'D2',
'Dimensions.member.2.Value': 'W',
}
self.assertEqual(params, expected_params)
def test_build_get_params_multiple_parameter_dimension2(self):
from collections import OrderedDict
self.maxDiff = None
c = CloudWatchConnection()
params = {}
dimensions = OrderedDict((("D1", ["V1", "V2"]), ("D2", "W"), ("D3", None)))
c.build_dimension_param(dimensions, params)
expected_params = {
'Dimensions.member.1.Name': 'D1',
'Dimensions.member.1.Value': 'V1',
'Dimensions.member.2.Name': 'D1',
'Dimensions.member.2.Value': 'V2',
'Dimensions.member.3.Name': 'D2',
'Dimensions.member.3.Value': 'W',
'Dimensions.member.4.Name': 'D3',
}
self.assertEqual(params, expected_params)
def test_build_put_params_invalid(self):
c = CloudWatchConnection()
params = {}
try:
c.build_put_params(params, name=["N", "M"], value=[1, 2, 3])
except:
pass
else:
self.fail("Should not accept lists of different lengths.")
def test_get_metric_statistics(self):
c = CloudWatchConnection()
m = c.list_metrics()[0]
end = datetime.datetime.now()
start = end - datetime.timedelta(hours=24*14)
c.get_metric_statistics(
3600*24, start, end, m.name, m.namespace, ['Average', 'Sum'])
def test_put_metric_data(self):
c = CloudWatchConnection()
now = datetime.datetime.now()
name, namespace = 'unit-test-metric', 'boto-unit-test'
c.put_metric_data(namespace, name, 5, now, 'Bytes')
# Uncomment the following lines for a slower but more thorough
# test. (Hurrah for eventual consistency...)
#
# metric = Metric(connection=c)
# metric.name = name
# metric.namespace = namespace
# time.sleep(60)
# l = metric.query(
# now - datetime.timedelta(seconds=60),
# datetime.datetime.now(),
# 'Average')
# assert l
# for row in l:
# self.assertEqual(row['Unit'], 'Bytes')
# self.assertEqual(row['Average'], 5.0)
def test_describe_alarms(self):
c = CloudWatchConnection()
def make_request(*args, **kwargs):
class Body(object):
def __init__(self):
self.status = 200
def read(self):
return DESCRIBE_ALARMS_BODY
return Body()
c.make_request = make_request
alarms = c.describe_alarms()
self.assertEquals(alarms.next_token, 'mynexttoken')
self.assertEquals(alarms[0].name, 'FancyAlarm')
self.assertEquals(alarms[0].comparison, '<')
self.assertEquals(alarms[0].dimensions, {u'Job': [u'ANiceCronJob']})
self.assertEquals(alarms[1].name, 'SuperFancyAlarm')
self.assertEquals(alarms[1].comparison, '>')
self.assertEquals(alarms[1].dimensions, {u'Job': [u'ABadCronJob']})
if __name__ == '__main__':
unittest.main()
| 42.399281 | 380 | 0.626792 |
bd5a9fa80be5e4d2ee85afebba27dc3f37b4f105 | 2,022 | py | Python | aliyun-python-sdk-ocs/aliyunsdkocs/request/v20150407/DescribeClusterNodesForInnerInstRequest.py | DataDog/aliyun-openapi-python-sdk | 5cbee29bce6416dd62f61f0c3786b1af6ea0d84f | [
"Apache-2.0"
] | 1 | 2019-12-23T12:36:43.000Z | 2019-12-23T12:36:43.000Z | aliyun-python-sdk-ocs/aliyunsdkocs/request/v20150407/DescribeClusterNodesForInnerInstRequest.py | liusc27/aliyun-openapi-python-sdk | 5e3db3535dd21de987dc5981e71151327d5a884f | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ocs/aliyunsdkocs/request/v20150407/DescribeClusterNodesForInnerInstRequest.py | liusc27/aliyun-openapi-python-sdk | 5e3db3535dd21de987dc5981e71151327d5a884f | [
"Apache-2.0"
] | 1 | 2021-02-23T11:27:54.000Z | 2021-02-23T11:27:54.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeClusterNodesForInnerInstRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ocs', '2015-04-07', 'DescribeClusterNodesForInnerInst')
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_instanceId(self):
return self.get_query_params().get('instanceId')
def set_instanceId(self,instanceId):
self.add_query_param('instanceId',instanceId)
def get_region(self):
return self.get_query_params().get('region')
def set_region(self,region):
self.add_query_param('region',region)
def get_zoneId(self):
return self.get_query_params().get('zoneId')
def set_zoneId(self,zoneId):
self.add_query_param('zoneId',zoneId) | 33.7 | 84 | 0.783877 |
c0577069bc367af1d143680bd10d4ba7bd9b126c | 4,794 | py | Python | tests/test_webapp.py | thiagolcmelo/dspreview | b20e7e3788f7d7cb7c44b17fc1c8e8a87c45f41e | [
"MIT"
] | null | null | null | tests/test_webapp.py | thiagolcmelo/dspreview | b20e7e3788f7d7cb7c44b17fc1c8e8a87c45f41e | [
"MIT"
] | null | null | null | tests/test_webapp.py | thiagolcmelo/dspreview | b20e7e3788f7d7cb7c44b17fc1c8e8a87c45f41e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# python standard
import sys
from datetime import datetime
sys.path.insert(0, "./src")
sys.path.insert(0, "./src/webapp")
# third-party imports
from flask_testing import TestCase
from flask import abort, url_for
# local imports
from utils.sql_helper import get_connection_strs
from webapp.app import create_app, db
from webapp.app.models import User, Classification
from webapp.app.models import DCMRaw, DCM, DSPRaw, DSP, Report
class TestBase(TestCase):
def create_app(self):
# pass in test configurations
config_name = 'testing'
app = create_app(config_name)
app.config.update(
SQLALCHEMY_DATABASE_URI=get_connection_strs().test_str
)
return app
def setUp(self):
# it needs to be here
"""
Will be called before every test
"""
db.create_all()
# create test admin user
admin = User(username="admin", password="admin2018", is_admin=True)
# create test non-admin user
normal_user = User(username="normal_user", password="test2018")
# create test for classification
classification = Classification(pattern=".*", brand="some brand",
sub_brand="some sub brand",
dsp="some dsp",
use_campaign_id=True,
use_campaign=False,
use_placement_id=False,
use_placement=True)
# create a test for dcm
dcm = DCM(date=datetime.now(), campaign_id=85989,
campaign="some campaign", placement_id=54786,
placement="some placement", impressions=87884.8,
clicks=874, reach=7581.5, brand="some brand",
sub_brand="some sub brand", dsp="some dsp")
# create a test for dcm raw
dcm_raw = DCMRaw(date=datetime.now(), campaign_id=85989,
campaign="some campaign", placement_id=54786,
placement="some placement", impressions=87884.8,
clicks=874, reach=7581.5)
# create a test for dsp
dsp = DSP(date=datetime.now(), campaign_id=85989,
campaign="some campaign", impressions=87884.8,
clicks=874, cost=7581.5, brand="some brand",
sub_brand="some sub brand", dsp="some dsp")
# create a test for dsp raw
dsp_raw = DSPRaw(date=datetime.now(), campaign_id=85989,
campaign="some campaign", impressions=87884.8,
clicks=874, cost=7581.5)
report = Report(date=datetime.now(), brand="some brand",
sub_brand="some sub brand", ad_campaign_id=89865,
ad_campaign="some campaign", dsp="some dsp",
dsp_campaign_id=87897, dsp_campaign="some campaign",
ad_impressions=783546.8, ad_clicks=578698,
ad_reach=879.9, dsp_impressions=7987.9,
dsp_clicks=7897, dsp_cost=578979.8)
# save users to database
db.session.add(admin)
db.session.add(normal_user)
db.session.add(classification)
db.session.add(dcm)
db.session.add(dcm_raw)
db.session.add(dsp)
db.session.add(dsp_raw)
db.session.add(report)
db.session.commit()
def tearDown(self):
"""
Will be called after every test
"""
db.session.remove()
db.drop_all()
class TestModels(TestBase):
def test_user_model(self):
"""
Test number of records in User table
"""
self.assertEqual(User.query.count(), 2)
def test_classification_model(self):
"""
Test number of records in Classification table
"""
self.assertEqual(Classification.query.count(), 1)
def test_dsp_model(self):
"""
Test number of records in DSP table
"""
self.assertEqual(DSP.query.count(), 1)
def test_dsp_raw_model(self):
"""
Test number of records in DSPRaw table
"""
self.assertEqual(DSPRaw.query.count(), 1)
def test_dcm_model(self):
"""
Test number of records in DCM table
"""
self.assertEqual(DCM.query.count(), 1)
def test_dcm_raw_model(self):
"""
Test number of records in DCMRaw table
"""
self.assertEqual(DCMRaw.query.count(), 1)
def test_report_model(self):
"""
Test number of records in DCMRaw table
"""
self.assertEqual(Report.query.count(), 1)
| 31.96 | 76 | 0.561952 |
70fc72a9a25ed7ad1070317dfaeaf838bc3f765b | 2,578 | py | Python | tests/unit_test.py | HBPMedical/i2b2-import | 444d492d6161e1792d3e283c0d85255308dbcd5f | [
"Apache-2.0"
] | 1 | 2021-07-02T16:38:16.000Z | 2021-07-02T16:38:16.000Z | tests/unit_test.py | HBPMedical/i2b2-import | 444d492d6161e1792d3e283c0d85255308dbcd5f | [
"Apache-2.0"
] | null | null | null | tests/unit_test.py | HBPMedical/i2b2-import | 444d492d6161e1792d3e283c0d85255308dbcd5f | [
"Apache-2.0"
] | 2 | 2017-02-02T09:51:30.000Z | 2019-11-13T06:48:44.000Z | import logging
import os
from nose.tools import assert_greater_equal
from sqlalchemy.exc import IntegrityError
from i2b2_import import i2b2_connection
from i2b2_import import data_catalog_connection
from i2b2_import import features_csv_import
from i2b2_import import meta_files_import
from i2b2_import import data_catalog_import
if 'DATA_CATALOG_DB_URL' in os.environ:
DATA_CATALOG_DB_URL = os.environ['DATA_CATALOG_DB_URL']
else:
DATA_CATALOG_DB_URL = 'postgresql://postgres:postgres@localhost:5433/postgres'
if 'I2B2_DB_URL' in os.environ:
I2B2_DB_URL = os.environ['I2B2_DB_URL']
else:
I2B2_DB_URL = 'postgresql://postgres:postgres@localhost:5434/postgres'
class TestPublicFunctions:
def __init__(self):
self.i2b2_db_conn = None
self.dcdb_conn = None
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup(self):
self.i2b2_db_conn = i2b2_connection.Connection(I2B2_DB_URL)
self.dcdb_conn = data_catalog_connection.Connection(DATA_CATALOG_DB_URL)
def teardown(self):
self.i2b2_db_conn.close()
self.dcdb_conn.close()
def test_01_ppmi_xml_import(self):
meta_files_import.meta2i2b2('./data/ppmi_meta/ppmi.xml', I2B2_DB_URL, 'PPMI')
meta_files_import.meta2i2b2('./data/ppmi_meta/ppmi2.xml', I2B2_DB_URL, 'PPMI')
assert_greater_equal(self.i2b2_db_conn.db_session.query(self.i2b2_db_conn.ObservationFact).count(), 36)
def test_02_clm_xlsx_import(self):
meta_files_import.meta2i2b2('./data/clm_meta/clm.xlsx', I2B2_DB_URL, 'CLM')
# TODO: add assertion here
def test_03_data_catalog_import(self):
try:
with open('./data/sql/test.sql', 'r') as sql_file:
self.dcdb_conn.engine.execute(sql_file.read())
except IntegrityError:
logging.warning("Cannot populate DB")
data_catalog_import.catalog2i2b2(DATA_CATALOG_DB_URL, I2B2_DB_URL)
assert_greater_equal(self.i2b2_db_conn.db_session.query(self.i2b2_db_conn.ObservationFact).count(), 36)
def test_04_brain_features_import(self):
features_csv_import.csv2db(
'./data/features/PR00003/01/mt_al_mtflash3d_v2l_1mm/05/'
'PR00003_Neuromorphics_Vols_MPMs_global_std_values.csv', I2B2_DB_URL, 'PPMI')
assert_greater_equal(self.i2b2_db_conn.db_session.query(self.i2b2_db_conn.ObservationFact).count(), 324)
assert_greater_equal(self.i2b2_db_conn.db_session.query(self.i2b2_db_conn.ConceptDimension).count(), 324)
| 36.309859 | 113 | 0.737393 |
909695ddf3284eb34843b03beae6dd21990a3c7c | 114 | py | Python | build_tools/frontend_test_nonstop_config.py | yandexdataschool/everware | 78c3a02acfcf3f3170981f2319d061eaf9c407aa | [
"BSD-3-Clause"
] | 130 | 2015-08-13T15:51:17.000Z | 2021-10-04T05:12:51.000Z | build_tools/frontend_test_nonstop_config.py | yandexdataschool/everware | 78c3a02acfcf3f3170981f2319d061eaf9c407aa | [
"BSD-3-Clause"
] | 184 | 2015-08-01T11:40:10.000Z | 2018-03-23T12:00:57.000Z | build_tools/frontend_test_nonstop_config.py | yandexdataschool/everware | 78c3a02acfcf3f3170981f2319d061eaf9c407aa | [
"BSD-3-Clause"
] | 38 | 2015-07-31T18:27:05.000Z | 2021-01-05T02:15:18.000Z | c = get_config()
load_subconfig('build_tools/frontend_test_normal_config.py')
c.Spawner.remove_containers = False
| 28.5 | 60 | 0.833333 |
8a5b6752610340409a8be9f4c8e87835597a413f | 840 | py | Python | SortingAlgorithm/QuickSort.py | weaponsX/SortingAlgorithmForPython3 | 8b5e1dd3aaf30b74c53ff643a225024ef8fbac13 | [
"Apache-2.0"
] | null | null | null | SortingAlgorithm/QuickSort.py | weaponsX/SortingAlgorithmForPython3 | 8b5e1dd3aaf30b74c53ff643a225024ef8fbac13 | [
"Apache-2.0"
] | null | null | null | SortingAlgorithm/QuickSort.py | weaponsX/SortingAlgorithmForPython3 | 8b5e1dd3aaf30b74c53ff643a225024ef8fbac13 | [
"Apache-2.0"
] | null | null | null | #交换排序-快速排序
"""
快速排序是通常被认为在同数量级(O(nlog2n))的排序方法中平均性能最好的。但若初始序列按关键码有序或基本有序时,快排序反而蜕化为冒泡排序。
为改进之,通常以“三者取中法”来选取基准记录,即将排序区间的两个端点与中点三个记录关键码居中的调整为支点记录。快速排序是一个不稳定的排序方法。
"""
def partition(numbers, low, high):
i = low-1
for j in range(low, high):
if numbers[j] <= numbers[high]:
i = i + 1
numbers[i], numbers[j] = numbers[j], numbers[i]
numbers[i+1], numbers[high] = numbers[high], numbers[i+1]
return i
# 递归实现
def quickSort(numbers, low, high):
if low < high:
# 将表一分为二
privotLoc = partition(numbers, low, high)
# 递归对低子表递归排序
quickSort(numbers, low, privotLoc-1)
# 递归对高子表递归排序
quickSort(numbers, privotLoc+1, high)
else:
return
MyList = [1, 4, 7, 1, 5, 5, 3, 85, 34, 75, 23, 75, 2, 0]
quickSort(MyList, 0, len(MyList)-1)
print(MyList)
| 24.705882 | 72 | 0.616667 |
8c54a4ea82d34045202556569d56f116bff08463 | 88 | py | Python | API-Server/wsgi.py | lenayq/COVIDContactTracerApp | efbe3edb8feb4048dadb9e161810dfdd9fdb83b2 | [
"Apache-2.0"
] | 5 | 2020-07-31T18:34:29.000Z | 2020-08-21T07:08:59.000Z | API-Server/wsgi.py | lenayq/COVIDContactTracerApp | efbe3edb8feb4048dadb9e161810dfdd9fdb83b2 | [
"Apache-2.0"
] | null | null | null | API-Server/wsgi.py | lenayq/COVIDContactTracerApp | efbe3edb8feb4048dadb9e161810dfdd9fdb83b2 | [
"Apache-2.0"
] | 2 | 2020-07-31T18:34:31.000Z | 2020-07-31T19:23:26.000Z | from server import app as application
if __name__ == "__main__":
application.run()
| 17.6 | 37 | 0.727273 |
068d8c1352eab1b4c16f7fb8a9d8d1986ab28c38 | 179 | py | Python | edx_rest_api_client/exceptions.py | regisb/edx-rest-api-client | 130b5aa1285cd45118becc5021285fdc03e2d56a | [
"Apache-2.0"
] | 14 | 2016-02-15T03:32:26.000Z | 2021-10-14T19:14:25.000Z | edx_rest_api_client/exceptions.py | regisb/edx-rest-api-client | 130b5aa1285cd45118becc5021285fdc03e2d56a | [
"Apache-2.0"
] | 40 | 2015-10-20T16:51:13.000Z | 2021-08-16T13:27:46.000Z | edx_rest_api_client/exceptions.py | regisb/edx-rest-api-client | 130b5aa1285cd45118becc5021285fdc03e2d56a | [
"Apache-2.0"
] | 10 | 2016-01-04T18:51:10.000Z | 2021-06-22T12:41:14.000Z | # noinspection PyUnresolvedReferences
from requests.exceptions import Timeout # pylint: disable=unused-import
from slumber.exceptions import * # pylint: disable=wildcard-import
| 44.75 | 72 | 0.826816 |
4b7c18e96992393b68a8f7b519ddd4024eaba977 | 627 | py | Python | implementation/manage.py | dwws-ufes/2019-SocialMeetScheduler | b5c94afb674ddcca5e53a0dff062edcb22343408 | [
"MIT"
] | 1 | 2020-01-10T19:18:14.000Z | 2020-01-10T19:18:14.000Z | implementation/manage.py | dwws-ufes/2019-SocialMeetScheduler | b5c94afb674ddcca5e53a0dff062edcb22343408 | [
"MIT"
] | 2 | 2020-06-05T19:25:25.000Z | 2021-06-04T21:59:09.000Z | implementation/manage.py | dwws-ufes/2019-SocialMeetScheduler | b5c94afb674ddcca5e53a0dff062edcb22343408 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webproj.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.5 | 73 | 0.682616 |
97c13fd496c65a8ef7909962ce6419f061de03f1 | 1,947 | py | Python | tests/functional/utils/test_misc.py | johnson2427/ape | e0dbce86d7ddb4574df109f443718f19dc183608 | [
"Apache-2.0"
] | null | null | null | tests/functional/utils/test_misc.py | johnson2427/ape | e0dbce86d7ddb4574df109f443718f19dc183608 | [
"Apache-2.0"
] | null | null | null | tests/functional/utils/test_misc.py | johnson2427/ape | e0dbce86d7ddb4574df109f443718f19dc183608 | [
"Apache-2.0"
] | null | null | null | from typing import Iterator
import pytest
from ape.exceptions import APINotImplementedError
from ape.utils.misc import (
add_padding_to_strings,
cached_iterator,
extract_nested_value,
raises_not_implemented,
)
def test_extract_nested_value():
structure = {"foo": {"bar": {"test": "expected_value"}}}
assert extract_nested_value(structure, "foo", "bar", "test") == "expected_value"
def test_extract_nested_value_non_dict_in_middle_returns_none():
structure = {"foo": {"non_dict": 3, "bar": {"test": "expected_value"}}}
assert not extract_nested_value(structure, "foo", "non_dict", "test")
def test_add_spacing_to_strings():
string_list = ["foo", "address", "ethereum"]
expected = ["foo ", "address ", "ethereum "]
actual = add_padding_to_strings(string_list, extra_spaces=4)
assert actual == expected
def test_cached_iterator():
class _Class:
call_count = 0
raw_list = [1, 2, 3]
@cached_iterator
def iterator(self) -> Iterator:
return self.get_list()
def get_list(self) -> Iterator:
self.call_count += 1
yield from self.raw_list
demo_class = _Class()
assert [i for i in demo_class.iterator] == demo_class.raw_list
assert [i for i in demo_class.iterator] == demo_class.raw_list
assert [i for i in demo_class.iterator] == demo_class.raw_list
# Since it is cached, it should only actually get called once.
assert demo_class.call_count == 1
def test_raises_not_implemented():
@raises_not_implemented
def unimplemented_api_method():
pass
with pytest.raises(APINotImplementedError) as err:
unimplemented_api_method()
assert str(err.value) == (
"Attempted to call method 'test_raises_not_implemented.<locals>.unimplemented_api_method', "
"method not supported."
)
assert isinstance(err.value, NotImplementedError)
| 29.5 | 100 | 0.682589 |
fcf7f4261ded22614e9b64060049dbd377f573a9 | 5,730 | py | Python | data_managers/data_manager_dada2/data_manager/data_manager.py | meugr/tools-iuc | 14002b1c1601aa7afaa7194385bf45285877baab | [
"MIT"
] | null | null | null | data_managers/data_manager_dada2/data_manager/data_manager.py | meugr/tools-iuc | 14002b1c1601aa7afaa7194385bf45285877baab | [
"MIT"
] | 2 | 2018-04-16T17:38:53.000Z | 2019-09-19T06:55:26.000Z | data_managers/data_manager_dada2/data_manager/data_manager.py | meugr/tools-iuc | 14002b1c1601aa7afaa7194385bf45285877baab | [
"MIT"
] | 1 | 2018-04-19T11:08:07.000Z | 2018-04-19T11:08:07.000Z | import argparse
import json
import os
try:
# For Python 3.0 and later
from urllib.request import Request, urlopen
except ImportError:
# Fall back to Python 2 imports
from urllib2 import Request, urlopen
DEFAULT_TAXLEVELS = "Kingdom,Phylum,Class,Order,Family,Genus,Species"
FILE2NAME = {
"silva_132": "Silva version 132",
"silva_128": "Silva version 128",
"rdp_16": "RDP trainset 16",
"rdp_14": "RDP trainset 14",
"greengenes_13.84": "GreenGenes version 13.84",
"unite_8.0_fungi": "UNITE: General Fasta release 8.0 for Fungi",
"unite_8.0_fungi_singletons": "UNITE: General Fasta release 8.0 for Fungi including global and 97% singletons",
"RefSeq_RDP_2018_05": "NCBI RefSeq 16S rRNA database supplemented by RDP (05/2018)",
"gtdb_2018_11": "GTDB: Genome Taxonomy Database (Bacteria & Archaea) (11/2018)",
"hitdb_1": "HitDB version 1 (Human InTestinal 16S rRNA)",
"silva_euk_18S_132": "Silva version 132 Eukaryotic 18S",
"PR2_4.11.1": "Protist Ribosomal Reference database (PR2) 4.11.1"
}
FILE2TAXURL = {
"silva_132": "https://zenodo.org/record/1172783/files/silva_nr_v132_train_set.fa.gz?download=1",
"silva_128": "https://zenodo.org/record/824551/files/silva_nr_v128_train_set.fa.gz?download=1",
"rdp_16": "https://zenodo.org/record/801828/files/rdp_train_set_16.fa.gz?download=1",
"rdp_14": "https://zenodo.org/record/158955/files/rdp_train_set_14.fa.gz?download=1",
"unite_8.0_fungi": "https://files.plutof.ut.ee/public/orig/EB/0C/EB0CCB3A871B77EA75E472D13926271076904A588D2E1C1EA5AFCF7397D48378.zip",
"unite_8.0_fungi_singletons": "https://files.plutof.ut.ee/doi/06/A2/06A2C86256EED64085670EB0C54B7115F6DAC8F311C656A9CB33E386CFABA0D0.zip",
"greengenes_13.84": "https://zenodo.org/record/158955/files/gg_13_8_train_set_97.fa.gz?download=1",
"RefSeq_RDP_2018_05": "https://zenodo.org/record/2541239/files/RefSeq-RDP16S_v2_May2018.fa.gz?download=1",
"gtdb_2018_11": "https://zenodo.org/record/2541239/files/GTDB_bac-arc_ssu_r86.fa.gz?download=1",
"hitdb_1": "https://zenodo.org/record/159205/files/hitdb_v1.00.fa.gz?download=1",
"silva_euk_18S_132": "https://zenodo.org/record/1447330/files/silva_132.18s.99_rep_set.dada2.fa.gz?download=1",
"PR2_4.11.1": "https://github.com/pr2database/pr2database/releases/download/4.11.1/pr2_version_4.11.1_dada2.fasta.gz"
}
FILE2SPECIESURL = {
"silva_132": "https://zenodo.org/record/1172783/files/silva_species_assignment_v132.fa.gz?download=1",
"silva_128": "https://zenodo.org/record/824551/files/silva_species_assignment_v128.fa.gz?download=1",
"rdp_16": "https://zenodo.org/record/801828/files/rdp_species_assignment_16.fa.gz?download=1",
"rdp_14": "https://zenodo.org/record/158955/files/rdp_species_assignment_14.fa.gz?download=1"
}
FILE2TAXLEVELS = {
"PR2_4.11.1": "Kingdom,Supergroup,Division,Class,Order,Family,Genus,Species"
}
def url_download(url, fname, workdir):
"""
download url to workdir/fname
"""
file_path = os.path.join(workdir, fname)
if not os.path.exists(workdir):
os.makedirs(workdir)
src = None
dst = None
try:
req = Request(url)
src = urlopen(req)
with open(file_path, 'wb') as dst:
while True:
chunk = src.read(2**10)
if chunk:
dst.write(chunk)
else:
break
finally:
if src:
src.close()
# special treatment of UNITE DBs: they are zip files containing two fasta (xyz.fasta and developer/xyz.fasta)
if fname.startswith("unite"):
import glob
import gzip
import shutil
import zipfile
# unzip download
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall(workdir)
zip_ref.close()
# gzip top level fasta file
fastas = glob.glob("%s/*fasta" % workdir)
if len(fastas) != 1:
msg = "UNITE download %s contained %d fasta file(s): %s" % (url, len(fastas), " ".join(fastas))
raise Exception(msg)
with open(fastas[0], 'rb') as f_in:
with gzip.open(file_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def remote_dataset(dataset, outjson):
with open(outjson) as jf:
params = json.loads(jf.read())
workdir = params['output_data'][0]['extra_files_path']
os.mkdir(workdir)
url_download( FILE2TAXURL[dataset], dataset + ".taxonomy", workdir)
data_manager_json = {"data_tables": {}}
data_manager_entry = {}
data_manager_entry['value'] = dataset
data_manager_entry['name'] = FILE2NAME[dataset]
data_manager_entry['path'] = dataset + ".taxonomy"
data_manager_entry['taxlevels'] = FILE2TAXLEVELS.get(dataset, DEFAULT_TAXLEVELS)
data_manager_json["data_tables"]["dada2_taxonomy"] = data_manager_entry
if FILE2SPECIESURL.get(dataset, False ):
url_download( FILE2SPECIESURL[dataset], dataset + ".species", workdir)
data_manager_entry = {}
data_manager_entry['value'] = dataset
data_manager_entry['name'] = FILE2NAME[dataset]
data_manager_entry['path'] = dataset + ".species"
data_manager_json["data_tables"]["dada2_species"] = data_manager_entry
with open(outjson, 'w') as jf:
jf.write(json.dumps(data_manager_json, sort_keys=True))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create data manager json.')
parser.add_argument('--out', action='store', help='JSON filename')
parser.add_argument('--dataset', action='store', help='Download data set name')
args = parser.parse_args()
remote_dataset(args.dataset, args.out)
| 42.761194 | 142 | 0.683421 |
6821fb7c4a264741d94bff25b8943766548eac91 | 528 | py | Python | experiments/scripts/visualize_gamma.py | wecacuee/POMDPy | c23ebf90f5815db4326564110487779961736b60 | [
"MIT"
] | 210 | 2015-04-23T17:05:50.000Z | 2022-03-14T08:00:00.000Z | experiments/scripts/visualize_gamma.py | wecacuee/POMDPy | c23ebf90f5815db4326564110487779961736b60 | [
"MIT"
] | 15 | 2015-04-13T05:36:14.000Z | 2019-05-06T19:14:50.000Z | experiments/scripts/visualize_gamma.py | wecacuee/POMDPy | c23ebf90f5815db4326564110487779961736b60 | [
"MIT"
] | 76 | 2016-08-18T03:54:08.000Z | 2022-01-26T09:08:23.000Z | from __future__ import absolute_import
from experiments.scripts import plot_alpha_vectors
from experiments.scripts import pickle_wrapper
import os
if __name__ == '__main__':
n_actions = 3
my_dir = os.path.dirname(__file__)
weight_dir = os.path.join(my_dir, '..', '..', 'experiments', 'pickle_jar')
gamma = pickle_wrapper.load_pkl(os.path.join(weight_dir, 'linear_alpha_net_vectors.pkl'))
plot_alpha_vectors.plot_alpha_vectors('Alpha vectors computed with linear function approximation', gamma, n_actions)
| 37.714286 | 120 | 0.772727 |
87c4626938e2406df59b4d4126476ae008ed0810 | 19,542 | py | Python | arviz/stats/stats_utils.py | sudojarvis/arviz | 73531be4f23df7d764b2e3bec8c5ef5cb882590d | [
"Apache-2.0"
] | 1 | 2021-08-18T15:53:39.000Z | 2021-08-18T15:53:39.000Z | arviz/stats/stats_utils.py | sudojarvis/arviz | 73531be4f23df7d764b2e3bec8c5ef5cb882590d | [
"Apache-2.0"
] | null | null | null | arviz/stats/stats_utils.py | sudojarvis/arviz | 73531be4f23df7d764b2e3bec8c5ef5cb882590d | [
"Apache-2.0"
] | null | null | null | """Stats-utility functions for ArviZ."""
import warnings
from collections.abc import Sequence
from copy import copy as _copy
from copy import deepcopy as _deepcopy
import numpy as np
import pandas as pd
from scipy.fftpack import next_fast_len
from scipy.interpolate import CubicSpline
from scipy.stats.mstats import mquantiles
from xarray import apply_ufunc
from .. import _log
from ..utils import conditional_jit, conditional_vect, conditional_dask
from .density_utils import histogram as _histogram
__all__ = ["autocorr", "autocov", "ELPDData", "make_ufunc", "wrap_xarray_ufunc"]
def autocov(ary, axis=-1):
"""Compute autocovariance estimates for every lag for the input array.
Parameters
----------
ary : Numpy array
An array containing MCMC samples
Returns
-------
acov: Numpy array same size as the input array
"""
axis = axis if axis > 0 else len(ary.shape) + axis
n = ary.shape[axis]
m = next_fast_len(2 * n)
ary = ary - ary.mean(axis, keepdims=True)
# added to silence tuple warning for a submodule
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ifft_ary = np.fft.rfft(ary, n=m, axis=axis)
ifft_ary *= np.conjugate(ifft_ary)
shape = tuple(
slice(None) if dim_len != axis else slice(0, n) for dim_len, _ in enumerate(ary.shape)
)
cov = np.fft.irfft(ifft_ary, n=m, axis=axis)[shape]
cov /= n
return cov
def autocorr(ary, axis=-1):
"""Compute autocorrelation using FFT for every lag for the input array.
See https://en.wikipedia.org/wiki/autocorrelation#Efficient_computation
Parameters
----------
ary : Numpy array
An array containing MCMC samples
Returns
-------
acorr: Numpy array same size as the input array
"""
corr = autocov(ary, axis=axis)
axis = axis = axis if axis > 0 else len(corr.shape) + axis
norm = tuple(
slice(None, None) if dim != axis else slice(None, 1) for dim, _ in enumerate(corr.shape)
)
with np.errstate(invalid="ignore"):
corr /= corr[norm]
return corr
def make_ufunc(
func, n_dims=2, n_output=1, n_input=1, index=Ellipsis, ravel=True, check_shape=None
): # noqa: D202
"""Make ufunc from a function taking 1D array input.
Parameters
----------
func : callable
n_dims : int, optional
Number of core dimensions not broadcasted. Dimensions are skipped from the end.
At minimum n_dims > 0.
n_output : int, optional
Select number of results returned by `func`.
If n_output > 1, ufunc returns a tuple of objects else returns an object.
n_input : int, optional
Number of **array** inputs to func, i.e. ``n_input=2`` means that func is called
with ``func(ary1, ary2, *args, **kwargs)``
index : int, optional
Slice ndarray with `index`. Defaults to `Ellipsis`.
ravel : bool, optional
If true, ravel the ndarray before calling `func`.
check_shape: bool, optional
If false, do not check if the shape of the output is compatible with n_dims and
n_output. By default, True only for n_input=1. If n_input is larger than 1, the last
input array is used to check the shape, however, shape checking with multiple inputs
may not be correct.
Returns
-------
callable
ufunc wrapper for `func`.
"""
if n_dims < 1:
raise TypeError("n_dims must be one or higher.")
if n_input == 1 and check_shape is None:
check_shape = True
elif check_shape is None:
check_shape = False
def _ufunc(*args, out=None, out_shape=None, **kwargs):
"""General ufunc for single-output function."""
arys = args[:n_input]
n_dims_out = None
if out is None:
if out_shape is None:
out = np.empty(arys[-1].shape[:-n_dims])
else:
out = np.empty((*arys[-1].shape[:-n_dims], *out_shape))
n_dims_out = -len(out_shape)
elif check_shape:
if out.shape != arys[-1].shape[:-n_dims]:
msg = f"Shape incorrect for `out`: {out.shape}."
msg += f" Correct shape is {arys[-1].shape[:-n_dims]}"
raise TypeError(msg)
for idx in np.ndindex(out.shape[:n_dims_out]):
arys_idx = [ary[idx].ravel() if ravel else ary[idx] for ary in arys]
out[idx] = np.asarray(func(*arys_idx, *args[n_input:], **kwargs))[index]
return out
def _multi_ufunc(*args, out=None, out_shape=None, **kwargs):
"""General ufunc for multi-output function."""
arys = args[:n_input]
element_shape = arys[-1].shape[:-n_dims]
if out is None:
if out_shape is None:
out = tuple(np.empty(element_shape) for _ in range(n_output))
else:
out = tuple(np.empty((*element_shape, *out_shape[i])) for i in range(n_output))
elif check_shape:
raise_error = False
correct_shape = tuple(element_shape for _ in range(n_output))
if isinstance(out, tuple):
out_shape = tuple(item.shape for item in out)
if out_shape != correct_shape:
raise_error = True
else:
raise_error = True
out_shape = "not tuple, type={type(out)}"
if raise_error:
msg = f"Shapes incorrect for `out`: {out_shape}."
msg += f" Correct shapes are {correct_shape}"
raise TypeError(msg)
for idx in np.ndindex(element_shape):
arys_idx = [ary[idx].ravel() if ravel else ary[idx] for ary in arys]
results = func(*arys_idx, *args[n_input:], **kwargs)
for i, res in enumerate(results):
out[i][idx] = np.asarray(res)[index]
return out
if n_output > 1:
ufunc = _multi_ufunc
else:
ufunc = _ufunc
update_docstring(ufunc, func, n_output)
return ufunc
@conditional_dask
def wrap_xarray_ufunc(
ufunc,
*datasets,
ufunc_kwargs=None,
func_args=None,
func_kwargs=None,
dask_kwargs=None,
**kwargs,
):
"""Wrap make_ufunc with xarray.apply_ufunc.
Parameters
----------
ufunc : callable
datasets : xarray.dataset
ufunc_kwargs : dict
Keyword arguments passed to `make_ufunc`.
- 'n_dims', int, by default 2
- 'n_output', int, by default 1
- 'n_input', int, by default len(datasets)
- 'index', slice, by default Ellipsis
- 'ravel', bool, by default True
func_args : tuple
Arguments passed to 'ufunc'.
func_kwargs : dict
Keyword arguments passed to 'ufunc'.
- 'out_shape', int, by default None
dask_kwargs : dict
Dask related kwargs passed to :func:`xarray:xarray.apply_ufunc`.
Use :meth:`~arviz.Dask.enable_dask` to set default kwargs.
**kwargs
Passed to xarray.apply_ufunc.
Returns
-------
xarray.dataset
"""
if ufunc_kwargs is None:
ufunc_kwargs = {}
ufunc_kwargs.setdefault("n_input", len(datasets))
if func_args is None:
func_args = tuple()
if func_kwargs is None:
func_kwargs = {}
if dask_kwargs is None:
dask_kwargs = {}
kwargs.setdefault(
"input_core_dims", tuple(("chain", "draw") for _ in range(len(func_args) + len(datasets)))
)
ufunc_kwargs.setdefault("n_dims", len(kwargs["input_core_dims"][-1]))
kwargs.setdefault("output_core_dims", tuple([] for _ in range(ufunc_kwargs.get("n_output", 1))))
callable_ufunc = make_ufunc(ufunc, **ufunc_kwargs)
return apply_ufunc(
callable_ufunc, *datasets, *func_args, kwargs=func_kwargs, **dask_kwargs, **kwargs
)
def update_docstring(ufunc, func, n_output=1):
"""Update ArviZ generated ufunc docstring."""
module = ""
name = ""
docstring = ""
if hasattr(func, "__module__") and isinstance(func.__module__, str):
module += func.__module__
if hasattr(func, "__name__"):
name += func.__name__
if hasattr(func, "__doc__") and isinstance(func.__doc__, str):
docstring += func.__doc__
ufunc.__doc__ += "\n\n"
if module or name:
ufunc.__doc__ += "This function is a ufunc wrapper for "
ufunc.__doc__ += module + "." + name
ufunc.__doc__ += "\n"
ufunc.__doc__ += 'Call ufunc with n_args from xarray against "chain" and "draw" dimensions:'
ufunc.__doc__ += "\n\n"
input_core_dims = 'tuple(("chain", "draw") for _ in range(n_args))'
if n_output > 1:
output_core_dims = f" tuple([] for _ in range({n_output}))"
msg = f"xr.apply_ufunc(ufunc, dataset, input_core_dims={input_core_dims}, "
msg += f"output_core_dims={ output_core_dims})"
ufunc.__doc__ += msg
else:
output_core_dims = ""
msg = f"xr.apply_ufunc(ufunc, dataset, input_core_dims={input_core_dims})"
ufunc.__doc__ += msg
ufunc.__doc__ += "\n\n"
ufunc.__doc__ += "For example: np.std(data, ddof=1) --> n_args=2"
if docstring:
ufunc.__doc__ += "\n\n"
ufunc.__doc__ += module
ufunc.__doc__ += name
ufunc.__doc__ += " docstring:"
ufunc.__doc__ += "\n\n"
ufunc.__doc__ += docstring
def logsumexp(ary, *, b=None, b_inv=None, axis=None, keepdims=False, out=None, copy=True):
"""Stable logsumexp when b >= 0 and b is scalar.
b_inv overwrites b unless b_inv is None.
"""
# check dimensions for result arrays
ary = np.asarray(ary)
if ary.dtype.kind == "i":
ary = ary.astype(np.float64)
dtype = ary.dtype.type
shape = ary.shape
shape_len = len(shape)
if isinstance(axis, Sequence):
axis = tuple(axis_i if axis_i >= 0 else shape_len + axis_i for axis_i in axis)
agroup = axis
else:
axis = axis if (axis is None) or (axis >= 0) else shape_len + axis
agroup = (axis,)
shape_max = (
tuple(1 for _ in shape)
if axis is None
else tuple(1 if i in agroup else d for i, d in enumerate(shape))
)
# create result arrays
if out is None:
if not keepdims:
out_shape = (
tuple()
if axis is None
else tuple(d for i, d in enumerate(shape) if i not in agroup)
)
else:
out_shape = shape_max
out = np.empty(out_shape, dtype=dtype)
if b_inv == 0:
return np.full_like(out, np.inf, dtype=dtype) if out.shape else np.inf
if b_inv is None and b == 0:
return np.full_like(out, -np.inf) if out.shape else -np.inf
ary_max = np.empty(shape_max, dtype=dtype)
# calculations
ary.max(axis=axis, keepdims=True, out=ary_max)
if copy:
ary = ary.copy()
ary -= ary_max
np.exp(ary, out=ary)
ary.sum(axis=axis, keepdims=keepdims, out=out)
np.log(out, out=out)
if b_inv is not None:
ary_max -= np.log(b_inv)
elif b:
ary_max += np.log(b)
out += ary_max.squeeze() if not keepdims else ary_max
# transform to scalar if possible
return out if out.shape else dtype(out)
def quantile(ary, q, axis=None, limit=None):
"""Use same quantile function as R (Type 7)."""
if limit is None:
limit = tuple()
return mquantiles(ary, q, alphap=1, betap=1, axis=axis, limit=limit)
def not_valid(ary, check_nan=True, check_shape=True, nan_kwargs=None, shape_kwargs=None):
"""Validate ndarray.
Parameters
----------
ary : numpy.ndarray
check_nan : bool
Check if any value contains NaN.
check_shape : bool
Check if array has correct shape. Assumes dimensions in order (chain, draw, *shape).
For 1D arrays (shape = (n,)) assumes chain equals 1.
nan_kwargs : dict
Valid kwargs are:
axis : int,
Defaults to None.
how : str, {"all", "any"}
Default to "any".
shape_kwargs : dict
Valid kwargs are:
min_chains : int
Defaults to 1.
min_draws : int
Defaults to 4.
Returns
-------
bool
"""
ary = np.asarray(ary)
nan_error = False
draw_error = False
chain_error = False
if check_nan:
if nan_kwargs is None:
nan_kwargs = {}
isnan = np.isnan(ary)
axis = nan_kwargs.get("axis", None)
if nan_kwargs.get("how", "any").lower() == "all":
nan_error = isnan.all(axis)
else:
nan_error = isnan.any(axis)
if (isinstance(nan_error, bool) and nan_error) or nan_error.any():
_log.warning("Array contains NaN-value.")
if check_shape:
shape = ary.shape
if shape_kwargs is None:
shape_kwargs = {}
min_chains = shape_kwargs.get("min_chains", 2)
min_draws = shape_kwargs.get("min_draws", 4)
error_msg = f"Shape validation failed: input_shape: {shape}, "
error_msg += f"minimum_shape: (chains={min_chains}, draws={min_draws})"
chain_error = ((min_chains > 1) and (len(shape) < 2)) or (shape[0] < min_chains)
draw_error = ((len(shape) < 2) and (shape[0] < min_draws)) or (
(len(shape) > 1) and (shape[1] < min_draws)
)
if chain_error or draw_error:
_log.warning(error_msg)
return nan_error | chain_error | draw_error
def get_log_likelihood(idata, var_name=None):
"""Retrieve the log likelihood dataarray of a given variable."""
if (
not hasattr(idata, "log_likelihood")
and hasattr(idata, "sample_stats")
and hasattr(idata.sample_stats, "log_likelihood")
):
warnings.warn(
"Storing the log_likelihood in sample_stats groups has been deprecated",
DeprecationWarning,
)
return idata.sample_stats.log_likelihood
if not hasattr(idata, "log_likelihood"):
raise TypeError("log likelihood not found in inference data object")
if var_name is None:
var_names = list(idata.log_likelihood.data_vars)
if len(var_names) > 1:
raise TypeError(
f"Found several log likelihood arrays {var_names}, var_name cannot be None"
)
return idata.log_likelihood[var_names[0]]
else:
try:
log_likelihood = idata.log_likelihood[var_name]
except KeyError as err:
raise TypeError(f"No log likelihood data named {var_name} found") from err
return log_likelihood
BASE_FMT = """Computed from {{n_samples}} by {{n_points}} log-likelihood matrix
{{0:{0}}} Estimate SE
{{scale}}_{{kind}} {{1:8.2f}} {{2:7.2f}}
p_{{kind:{1}}} {{3:8.2f}} -"""
POINTWISE_LOO_FMT = """------
Pareto k diagnostic values:
{{0:>{0}}} {{1:>6}}
(-Inf, 0.5] (good) {{2:{0}d}} {{6:6.1f}}%
(0.5, 0.7] (ok) {{3:{0}d}} {{7:6.1f}}%
(0.7, 1] (bad) {{4:{0}d}} {{8:6.1f}}%
(1, Inf) (very bad) {{5:{0}d}} {{9:6.1f}}%
"""
SCALE_DICT = {"deviance": "deviance", "log": "elpd", "negative_log": "-elpd"}
class ELPDData(pd.Series): # pylint: disable=too-many-ancestors
"""Class to contain the data from elpd information criterion like waic or loo."""
def __str__(self):
"""Print elpd data in a user friendly way."""
kind = self.index[0]
if kind not in ("loo", "waic"):
raise ValueError("Invalid ELPDData object")
scale_str = SCALE_DICT[self[f"{kind}_scale"]]
padding = len(scale_str) + len(kind) + 1
base = BASE_FMT.format(padding, padding - 2)
base = base.format(
"",
kind=kind,
scale=scale_str,
n_samples=self.n_samples,
n_points=self.n_data_points,
*self.values,
)
if self.warning:
base += "\n\nThere has been a warning during the calculation. Please check the results."
if kind == "loo" and "pareto_k" in self:
bins = np.asarray([-np.Inf, 0.5, 0.7, 1, np.Inf])
counts, *_ = _histogram(self.pareto_k.values, bins)
extended = POINTWISE_LOO_FMT.format(max(4, len(str(np.max(counts)))))
extended = extended.format(
"Count", "Pct.", *[*counts, *(counts / np.sum(counts) * 100)]
)
base = "\n".join([base, extended])
return base
def __repr__(self):
"""Alias to ``__str__``."""
return self.__str__()
def copy(self, deep=True):
"""Perform a pandas deep copy of the ELPDData plus a copy of the stored data."""
copied_obj = pd.Series.copy(self)
for key in copied_obj.keys():
if deep:
copied_obj[key] = _deepcopy(copied_obj[key])
else:
copied_obj[key] = _copy(copied_obj[key])
return ELPDData(copied_obj)
@conditional_jit
def stats_variance_1d(data, ddof=0):
a_a, b_b = 0, 0
for i in data:
a_a = a_a + i
b_b = b_b + i * i
var = b_b / (len(data)) - ((a_a / (len(data))) ** 2)
var = var * (len(data) / (len(data) - ddof))
return var
def stats_variance_2d(data, ddof=0, axis=1):
if data.ndim == 1:
return stats_variance_1d(data, ddof=ddof)
a_a, b_b = data.shape
if axis == 1:
var = np.zeros(a_a)
for i in range(a_a):
var[i] = stats_variance_1d(data[i], ddof=ddof)
return var
else:
var = np.zeros(b_b)
for i in range(b_b):
var[i] = stats_variance_1d(data[:, i], ddof=ddof)
return var
@conditional_vect
def _sqrt(a_a, b_b):
return (a_a + b_b) ** 0.5
def _circfunc(samples, high, low, skipna):
samples = np.asarray(samples)
if skipna:
samples = samples[~np.isnan(samples)]
if samples.size == 0:
return np.nan
return _angle(samples, low, high, np.pi)
@conditional_vect
def _angle(samples, low, high, p_i=np.pi):
ang = (samples - low) * 2.0 * p_i / (high - low)
return ang
def _circular_standard_deviation(samples, high=2 * np.pi, low=0, skipna=False, axis=None):
ang = _circfunc(samples, high, low, skipna)
s_s = np.sin(ang).mean(axis=axis)
c_c = np.cos(ang).mean(axis=axis)
r_r = np.hypot(s_s, c_c)
return ((high - low) / 2.0 / np.pi) * np.sqrt(-2 * np.log(r_r))
def smooth_data(obs_vals, pp_vals):
"""Smooth data, helper function for discrete data in plot_pbv, loo_pit and plot_loo_pit."""
x = np.linspace(0, 1, len(obs_vals))
csi = CubicSpline(x, obs_vals)
obs_vals = csi(np.linspace(0.01, 0.99, len(obs_vals)))
x = np.linspace(0, 1, pp_vals.shape[1])
csi = CubicSpline(x, pp_vals, axis=1)
pp_vals = csi(np.linspace(0.01, 0.99, pp_vals.shape[1]))
return obs_vals, pp_vals
| 33.986087 | 101 | 0.576962 |
8bdd693a8bea73e6ccec85e37ec5be7c0c6b4d35 | 371 | py | Python | python.py | Uzikan/phone-hack-prank | 430ebcadc011e50fd1953dae0710ecdc3de974de | [
"MIT"
] | null | null | null | python.py | Uzikan/phone-hack-prank | 430ebcadc011e50fd1953dae0710ecdc3de974de | [
"MIT"
] | null | null | null | python.py | Uzikan/phone-hack-prank | 430ebcadc011e50fd1953dae0710ecdc3de974de | [
"MIT"
] | null | null | null | from random import *
guess=""
password=input("password")
letters = ["0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0"]
while(guess != password):
Guess=""
for letter in password:
guessletter=letters[randint(0,25)]
guess=str(guessletter) + str(guess)
print(guess)
input("") | 24.733333 | 116 | 0.504043 |
f4e19898fee62c0b2bf1490e6f31106b5e364cc3 | 3,822 | py | Python | task1.py | Dima12101/2019_IT | 876f161bbc7286372a98dc1acc582fbd55735b21 | [
"MIT"
] | 3 | 2019-09-26T08:26:19.000Z | 2019-10-22T20:05:05.000Z | task1.py | Dima12101/2019_IT | 876f161bbc7286372a98dc1acc582fbd55735b21 | [
"MIT"
] | null | null | null | task1.py | Dima12101/2019_IT | 876f161bbc7286372a98dc1acc582fbd55735b21 | [
"MIT"
] | 51 | 2019-09-10T14:46:09.000Z | 2019-10-31T14:10:25.000Z | x = [1.49066127e-06, 1.00024454e-02, 2.00039718e-02, 3.00063867e-02,
4.00101677e-02, 5.00160261e-02, 6.00250086e-02, 7.00386374e-02,
8.00590993e-02, 9.00894983e-02, 1.00134185e-01, 1.10199182e-01,
1.20292721e-01, 1.30425906e-01, 1.40613524e-01, 1.50874996e-01,
1.61235486e-01, 1.71727136e-01, 1.82390409e-01, 1.93275480e-01,
2.04443599e-01, 2.15968314e-01, 2.27936438e-01, 2.40448564e-01,
2.53618982e-01, 2.67574773e-01, 2.82453905e-01, 2.98402141e-01,
3.15568647e-01, 3.34100210e-01, 3.54134113e-01, 3.75789783e-01,
3.99159480e-01, 4.24298431e-01, 4.51214920e-01, 4.79860987e-01,
5.10124440e-01, 5.41822943e-01, 5.74700902e-01, 6.08429771e-01,
6.42612264e-01, 6.76790724e-01, 7.10459615e-01, 7.43081815e-01,
7.74108085e-01, 8.02998761e-01, 8.29246539e-01, 8.52398993e-01,
8.72079469e-01, 8.88004992e-01, 9.00000000e-01, 9.08004992e-01,
9.12079469e-01, 9.12398993e-01, 9.09246539e-01, 9.02998761e-01,
8.94108085e-01, 8.83081815e-01, 8.70459615e-01, 8.56790724e-01,
8.42612264e-01, 8.28429771e-01, 8.14700902e-01, 8.01822943e-01,
7.90124440e-01, 7.79860987e-01, 7.71214920e-01, 7.64298431e-01,
7.59159480e-01, 7.55789783e-01, 7.54134113e-01, 7.54100210e-01,
7.55568647e-01, 7.58402141e-01, 7.62453905e-01, 7.67574773e-01,
7.73618982e-01, 7.80448564e-01, 7.87936438e-01, 7.95968314e-01,
8.04443599e-01, 8.13275480e-01, 8.22390409e-01, 8.31727136e-01,
8.41235486e-01, 8.50874996e-01, 8.60613524e-01, 8.70425906e-01,
8.80292721e-01, 8.90199182e-01, 9.00134185e-01, 9.10089498e-01,
9.20059099e-01, 9.30038637e-01, 9.40025009e-01, 9.50016026e-01,
9.60010168e-01, 9.70006387e-01, 9.80003972e-01, 9.90002445e-01]
u = [3.72665317e-06, 6.11356797e-06, 9.92950431e-06, 1.59667839e-05,
2.54193465e-05, 4.00652974e-05, 6.25215038e-05, 9.65934137e-05,
1.47748360e-04, 2.23745794e-04, 3.35462628e-04, 4.97955422e-04,
7.31802419e-04, 1.06476624e-03, 1.53381068e-03, 2.18749112e-03,
3.08871541e-03, 4.31784001e-03, 5.97602290e-03, 8.18870101e-03,
1.11089965e-02, 1.49207861e-02, 1.98410947e-02, 2.61214099e-02,
3.40474547e-02, 4.39369336e-02, 5.61347628e-02, 7.10053537e-02,
8.89216175e-02, 1.10250525e-01, 1.35335283e-01, 1.64474457e-01,
1.97898699e-01, 2.35746077e-01, 2.78037300e-01, 3.24652467e-01,
3.75311099e-01, 4.29557358e-01, 4.86752256e-01, 5.46074427e-01,
6.06530660e-01, 6.66976811e-01, 7.26149037e-01, 7.82704538e-01,
8.35270211e-01, 8.82496903e-01, 9.23116346e-01, 9.55997482e-01,
9.80198673e-01, 9.95012479e-01, 1.00000000e+00, 9.95012479e-01,
9.80198673e-01, 9.55997482e-01, 9.23116346e-01, 8.82496903e-01,
8.35270211e-01, 7.82704538e-01, 7.26149037e-01, 6.66976811e-01,
6.06530660e-01, 5.46074427e-01, 4.86752256e-01, 4.29557358e-01,
3.75311099e-01, 3.24652467e-01, 2.78037300e-01, 2.35746077e-01,
1.97898699e-01, 1.64474457e-01, 1.35335283e-01, 1.10250525e-01,
8.89216175e-02, 7.10053537e-02, 5.61347628e-02, 4.39369336e-02,
3.40474547e-02, 2.61214099e-02, 1.98410947e-02, 1.49207861e-02,
1.11089965e-02, 8.18870101e-03, 5.97602290e-03, 4.31784001e-03,
3.08871541e-03, 2.18749112e-03, 1.53381068e-03, 1.06476624e-03,
7.31802419e-04, 4.97955422e-04, 3.35462628e-04, 2.23745794e-04,
1.47748360e-04, 9.65934137e-05, 6.25215038e-05, 4.00652974e-05,
2.54193465e-05, 1.59667839e-05, 9.92950431e-06, 6.11356797e-06]
for i in range(len(x)-1):
if x[i+1] < x[i]:
i_end = i
break
for i in range(i_end, len(x)-1):
if x[i+1] > x[i]:
i_start = i
break
print(f"ambiguous region: [{x[i_start]}, {x[i_end]}]")
print(f"ambiguous indices are between {i_end} and {i_start}")
# todo: find x0 between x[i_start], x[i_end]
# e.g. brute-force search | 57.044776 | 68 | 0.682365 |
664d3511799a8ffa8ec9f4afa4bd48581ee82883 | 1,417 | py | Python | aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/GetImageJobRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/GetImageJobRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-imm/aliyunsdkimm/request/v20170906/GetImageJobRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class GetImageJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imm', '2017-09-06', 'GetImageJob','imm')
def get_Project(self):
return self.get_query_params().get('Project')
def set_Project(self,Project):
self.add_query_param('Project',Project)
def get_JobId(self):
return self.get_query_params().get('JobId')
def set_JobId(self,JobId):
self.add_query_param('JobId',JobId)
def get_JobType(self):
return self.get_query_params().get('JobType')
def set_JobType(self,JobType):
self.add_query_param('JobType',JobType) | 32.953488 | 70 | 0.752294 |
83fa1bbcedc11a1fdea83012040cc64f6f5a3b9e | 7,617 | py | Python | sdk/python/pulumi_gcp/organizations/iam_custom_role.py | 23doors/pulumi-gcp | ded01b199f95b164884266ea3e6f8206c8231270 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-12-20T22:08:20.000Z | 2019-12-20T22:08:20.000Z | sdk/python/pulumi_gcp/organizations/iam_custom_role.py | pellizzetti/pulumi-gcp | fad74dd55a0cf7723f73046bb0e6fcbfd948ba84 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_gcp/organizations/iam_custom_role.py | pellizzetti/pulumi-gcp | fad74dd55a0cf7723f73046bb0e6fcbfd948ba84 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class IAMCustomRole(pulumi.CustomResource):
deleted: pulumi.Output[bool]
"""
(Optional) The current deleted state of the role.
"""
description: pulumi.Output[str]
"""
A human-readable description for the role.
"""
org_id: pulumi.Output[str]
"""
The numeric ID of the organization in which you want to create a custom role.
"""
permissions: pulumi.Output[list]
"""
The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.
"""
role_id: pulumi.Output[str]
"""
The role id to use for this role.
"""
stage: pulumi.Output[str]
"""
The current launch stage of the role.
Defaults to `GA`.
List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage).
"""
title: pulumi.Output[str]
"""
A human-readable title for the role.
"""
def __init__(__self__, resource_name, opts=None, description=None, org_id=None, permissions=None, role_id=None, stage=None, title=None, __props__=None, __name__=None, __opts__=None):
"""
Allows management of a customized Cloud IAM organization role. For more information see
[the official documentation](https://cloud.google.com/iam/docs/understanding-custom-roles)
and
[API](https://cloud.google.com/iam/reference/rest/v1/organizations.roles).
> **Warning:** Note that custom roles in GCP have the concept of a soft-delete. There are two issues that may arise
from this and how roles are propagated. 1) creating a role may involve undeleting and then updating a role with the
same name, possibly causing confusing behavior between undelete and update. 2) A deleted role is permanently deleted
after 7 days, but it can take up to 30 more days (i.e. between 7 and 37 days after deletion) before the role name is
made available again. This means a deleted role that has been deleted for more than 7 days cannot be changed at all
by this provider, and new roles cannot share that name.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A human-readable description for the role.
:param pulumi.Input[str] org_id: The numeric ID of the organization in which you want to create a custom role.
:param pulumi.Input[list] permissions: The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.
:param pulumi.Input[str] role_id: The role id to use for this role.
:param pulumi.Input[str] stage: The current launch stage of the role.
Defaults to `GA`.
List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage).
:param pulumi.Input[str] title: A human-readable title for the role.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/organization_iam_custom_role.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
if org_id is None:
raise TypeError("Missing required property 'org_id'")
__props__['org_id'] = org_id
if permissions is None:
raise TypeError("Missing required property 'permissions'")
__props__['permissions'] = permissions
if role_id is None:
raise TypeError("Missing required property 'role_id'")
__props__['role_id'] = role_id
__props__['stage'] = stage
if title is None:
raise TypeError("Missing required property 'title'")
__props__['title'] = title
__props__['deleted'] = None
super(IAMCustomRole, __self__).__init__(
'gcp:organizations/iAMCustomRole:IAMCustomRole',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, deleted=None, description=None, org_id=None, permissions=None, role_id=None, stage=None, title=None):
"""
Get an existing IAMCustomRole resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] deleted: (Optional) The current deleted state of the role.
:param pulumi.Input[str] description: A human-readable description for the role.
:param pulumi.Input[str] org_id: The numeric ID of the organization in which you want to create a custom role.
:param pulumi.Input[list] permissions: The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.
:param pulumi.Input[str] role_id: The role id to use for this role.
:param pulumi.Input[str] stage: The current launch stage of the role.
Defaults to `GA`.
List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage).
:param pulumi.Input[str] title: A human-readable title for the role.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/organization_iam_custom_role.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["deleted"] = deleted
__props__["description"] = description
__props__["org_id"] = org_id
__props__["permissions"] = permissions
__props__["role_id"] = role_id
__props__["stage"] = stage
__props__["title"] = title
return IAMCustomRole(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 52.171233 | 186 | 0.675857 |
22324e2f80e2ed2117be716abb860427eb8eff3e | 9,220 | py | Python | mmseg/models/decode_heads/decode_head.py | xushige/mmseg-combine-cls | 9b645cd1cc6d1c145d5234a3ba554404889cb869 | [
"Apache-2.0"
] | 1 | 2021-09-16T14:47:11.000Z | 2021-09-16T14:47:11.000Z | mmseg/models/decode_heads/decode_head.py | xushige/mmseg-combine-cls | 9b645cd1cc6d1c145d5234a3ba554404889cb869 | [
"Apache-2.0"
] | null | null | null | mmseg/models/decode_heads/decode_head.py | xushige/mmseg-combine-cls | 9b645cd1cc6d1c145d5234a3ba554404889cb869 | [
"Apache-2.0"
] | null | null | null | from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmseg.core import build_pixel_sampler
from mmseg.ops import resize
from ..builder import build_loss
from ..losses import accuracy
class BaseDecodeHead(BaseModule, metaclass=ABCMeta):
"""Base class for BaseDecodeHead.
Args:
in_channels (int|Sequence[int]): Input channels.
channels (int): Channels after modules, before conv_seg.
num_classes (int): Number of classes.
dropout_ratio (float): Ratio of dropout layer. Default: 0.1.
conv_cfg (dict|None): Config of conv layers. Default: None.
norm_cfg (dict|None): Config of norm layers. Default: None.
act_cfg (dict): Config of activation layers.
Default: dict(type='ReLU')
in_index (int|Sequence[int]): Input feature index. Default: -1
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
Default: None.
loss_decode (dict): Config of decode loss.
Default: dict(type='CrossEntropyLoss').
ignore_index (int | None): The label index to be ignored. When using
masked BCE loss, ignore_index should be set to None. Default: 255
sampler (dict|None): The config of segmentation map sampler.
Default: None.
align_corners (bool): align_corners argument of F.interpolate.
Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
channels,
*,
num_classes,
dropout_ratio=0.1,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
in_index=-1,
input_transform=None,
loss_decode=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
ignore_index=255,
sampler=None,
align_corners=False,
init_cfg=dict(
type='Normal', std=0.01, override=dict(name='conv_seg'))):
super(BaseDecodeHead, self).__init__(init_cfg)
self._init_inputs(in_channels, in_index, input_transform)
self.channels = channels
self.num_classes = num_classes
self.dropout_ratio = dropout_ratio
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.in_index = in_index
self.loss_decode = build_loss(loss_decode)
self.ignore_index = ignore_index
self.align_corners = align_corners
if sampler is not None:
self.sampler = build_pixel_sampler(sampler, context=self)
else:
self.sampler = None
self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1)
if dropout_ratio > 0:
self.dropout = nn.Dropout2d(dropout_ratio)
else:
self.dropout = None
self.fp16_enabled = False
def extra_repr(self):
"""Extra repr."""
s = f'input_transform={self.input_transform}, ' \
f'ignore_index={self.ignore_index}, ' \
f'align_corners={self.align_corners}'
return s
def _init_inputs(self, in_channels, in_index, input_transform):
"""Check and initialize input transforms.
The in_channels, in_index and input_transform must match.
Specifically, when input_transform is None, only single feature map
will be selected. So in_channels and in_index must be of type int.
When input_transform
Args:
in_channels (int|Sequence[int]): Input channels.
in_index (int|Sequence[int]): Input feature index.
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
"""
if input_transform is not None:
assert input_transform in ['resize_concat', 'multiple_select']
self.input_transform = input_transform
self.in_index = in_index
if input_transform is not None:
assert isinstance(in_channels, (list, tuple))
assert isinstance(in_index, (list, tuple))
assert len(in_channels) == len(in_index)
if input_transform == 'resize_concat':
self.in_channels = sum(in_channels)
else:
self.in_channels = in_channels
else:
assert isinstance(in_channels, int)
assert isinstance(in_index, int)
self.in_channels = in_channels
def _transform_inputs(self, inputs):
"""Transform inputs for decoder.
Args:
inputs (list[Tensor]): List of multi-level img features.
Returns:
Tensor: The transformed inputs
"""
if self.input_transform == 'resize_concat':
inputs = [inputs[i] for i in self.in_index]
upsampled_inputs = [
resize(
input=x,
size=inputs[0].shape[2:],
mode='bilinear',
align_corners=self.align_corners) for x in inputs
]
inputs = torch.cat(upsampled_inputs, dim=1)
elif self.input_transform == 'multiple_select':
inputs = [inputs[i] for i in self.in_index]
else:
inputs = inputs[self.in_index]
return inputs
@auto_fp16()
@abstractmethod
def forward(self, inputs):
"""Placeholder of forward function."""
pass
def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):
"""Forward function for training.
Args:
inputs (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
train_cfg (dict): The training config.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
seg_logits = self.forward(inputs)
losses = self.losses(seg_logits, gt_semantic_seg)
return losses
def forward_test(self, inputs, img_metas, test_cfg):
"""Forward function for testing.
Args:
inputs (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
test_cfg (dict): The testing config.
Returns:
Tensor: Output segmentation map.
"""
return self.forward(inputs)
def cls_seg(self, feat):
"""Classify each pixel."""
if self.dropout is not None:
feat = self.dropout(feat)
output = self.conv_seg(feat)
return output
@force_fp32(apply_to=('seg_logit', ))
def losses(self, seg_logit, seg_label):
"""Compute segmentation loss."""
loss = dict()
seg_logit = resize(
input=seg_logit,
size=seg_label.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
if self.sampler is not None:
seg_weight = self.sampler.sample(seg_logit, seg_label)
else:
seg_weight = None
seg_label = seg_label.squeeze(1) #通道维如果是1就去除,但一般不是,都是三通道
loss['loss_seg'] = self.loss_decode(
seg_logit,
seg_label,
weight=seg_weight,
ignore_index=self.ignore_index)
loss['acc_seg'] = accuracy(seg_logit, seg_label)
return loss
| 39.570815 | 79 | 0.595879 |
98f6fda87ccb4570723283635ff4e6ecc3625e5d | 12,540 | py | Python | tests/test_suite.py | Justin-Fisher/webots | 8a39e8e4390612919a8d82c7815aa914f4c079a4 | [
"Apache-2.0"
] | 1,561 | 2019-09-04T11:32:32.000Z | 2022-03-31T18:00:09.000Z | tests/test_suite.py | Justin-Fisher/webots | 8a39e8e4390612919a8d82c7815aa914f4c079a4 | [
"Apache-2.0"
] | 2,184 | 2019-09-03T11:35:02.000Z | 2022-03-31T10:01:44.000Z | tests/test_suite.py | Justin-Fisher/webots | 8a39e8e4390612919a8d82c7815aa914f4c079a4 | [
"Apache-2.0"
] | 1,013 | 2019-09-07T05:09:32.000Z | 2022-03-31T13:01:28.000Z | #!/usr/bin/env python
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test suite."""
import sys
import os
import shutil
import platform
import datetime
import getpass
import glob
import subprocess
import threading
import time
import multiprocessing
from command import Command
# monitor failures
failures = 0
systemFailures = []
whitelist = ['ContextResult::kTransientFailure: Failed to send GpuChannelMsg_CreateCommandBuffer']
# parse arguments
filesArguments = []
nomakeOption = False
ansiEscape = True
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if arg == '--nomake':
nomakeOption = True
elif arg == '--no-ansi-escape':
ansiEscape = False
elif os.path.exists(arg):
filesArguments.append(arg)
else:
raise RuntimeError('Unknown option "' + arg + '"')
testGroups = ['api', 'other_api', 'physics', 'protos', 'parser', 'rendering', 'with_rendering']
if sys.platform == 'win32':
testGroups.remove('parser') # this one doesn't work on Windows
# global files
testsFolderPath = os.path.dirname(os.path.abspath(__file__)) + os.sep
outputFilename = testsFolderPath + 'output.txt'
defaultProjectPath = testsFolderPath + 'default' + os.sep
supervisorControllerName = 'test_suite_supervisor'
protoFileNames = ['TestSuiteSupervisor.proto', 'TestSuiteEmitter.proto']
tempWorldCounterFilename = testsFolderPath + 'world_counter.txt'
webotsStdOutFilename = testsFolderPath + 'webots_stdout.txt'
webotsStdErrFilename = testsFolderPath + 'webots_stderr.txt'
# Webots setup (cf. setupWebots() below)
webotsFullPath = ''
webotsVersion = ''
def setupWebots():
"""Find webots binary thanks to WEBOTS_HOME."""
os.putenv('WEBOTS_TEST_SUITE', 'TRUE')
os.putenv('WEBOTS_EMPTY_PROJECT_PATH', defaultProjectPath)
global webotsFullPath
global webotsVersion
global webotsSysInfo
if sys.platform == 'win32':
webotsFullPath = os.environ['WEBOTS_HOME'] + os.sep + 'msys64' + \
os.sep + 'mingw64' + os.sep + 'bin' + os.sep + 'webots.exe'
else:
webotsBinary = 'webots'
if 'WEBOTS_HOME' in os.environ:
webotsFullPath = os.environ['WEBOTS_HOME'] + os.sep + webotsBinary
else:
webotsFullPath = '..' + os.sep + '..' + os.sep + webotsBinary
if not os.path.isfile(webotsFullPath):
sys.exit('Error: ' + webotsBinary + ' binary not found')
webotsFullPath = os.path.normpath(webotsFullPath)
command = Command(webotsFullPath + ' --version')
command.run()
if command.returncode != 0:
raise RuntimeError('Error when getting the Webots version')
webotsVersion = command.output.replace('\n', ' ').split(' ')[2].split('.')
command = Command(webotsFullPath + ' --sysinfo')
command.run()
if command.returncode != 0:
raise RuntimeError('Error when getting the Webots information of the system')
webotsSysInfo = command.output.split('\n')
def findFirstWorldFilename(worldsFilename):
"""Get the first world file name."""
file = open(worldsFilename)
worldFilename = file.readline().strip()
file.close()
return worldFilename
def resetIndexFile(indexFilename):
"""Create the index file."""
file = open(indexFilename, 'w')
file.write('0\n')
file.close()
def formatString(s):
"""Add a predefined number of spaces after the ':' character."""
try:
index = s.index(': ')
s0 = '{:<20}'.format(s[0:index])
s0 += s[index:]
return s0
except ValueError: # can be thrown by string.index()
return s
def resetOutputFile():
"""Create the output file."""
file = open(outputFilename, 'w')
file.write(formatString('Webots binary: ' + webotsFullPath) + '\n')
file.write(formatString('Webots version: ' + str(webotsVersion)) + '\n')
file.write(formatString(
'Operating System: ' + platform.platform() +
' [' + platform.machine() + '] ' + platform.processor() +
' (' + platform.node() + ')') + '\n'
)
file.write(formatString('Date: ' + datetime.datetime.now().ctime()) + '\n')
file.write(formatString('Tester: ' + getpass.getuser()) + '\n')
for line in webotsSysInfo:
file.write(formatString(line) + '\n')
file.close()
def appendToOutputFile(txt):
"""Append txt to output file."""
file = open(outputFilename, 'a')
file.write(txt)
file.close()
def executeMake():
"""Execute 'make release' to ensure every controller/plugin is compiled."""
curdir = os.getcwd()
os.chdir(testsFolderPath)
command = Command('make release -j%d' % multiprocessing.cpu_count())
command.run(silent=False)
os.chdir(curdir)
if command.returncode != 0:
raise RuntimeError('Error when executing the Make command')
def generateWorldsList(groupName, worldsFilename):
"""Generate the list of worlds to run."""
f = open(worldsFilename, 'w')
worldsCount = 0
# generate the list from the arguments
if filesArguments:
for file in filesArguments:
if file.startswith(groupName):
f.write(file + '\n')
worldsCount = len(filesArguments)
# generate the list from 'ls worlds/*.wbt'
else:
filenames = glob.glob(testsFolderPath + groupName + os.sep + 'worlds' + os.sep + '*.wbt')
# remove the generic name
for filename in filenames:
if filename.endswith('test_suite'):
filenames.remove(filename)
# alphabetical order
filenames.sort()
# to file
for filename in filenames:
# speaker test not working on travis/github action because of missing sound drivers
if (not filename.endswith('_temp.wbt') and
not (('TRAVIS' in os.environ or 'GITHUB_ACTIONS' in os.environ) and filename.endswith('speaker.wbt'))):
f.write(filename + '\n')
worldsCount += 1
f.close()
return worldsCount
def monitorOutputFile(finalMessage):
"""Display the output file on the console."""
global monitorOutputCommand
monitorOutputCommand = Command('tail -f ' + outputFilename, ansiEscape)
monitorOutputCommand.run(expectedString=finalMessage, silent=False)
if not nomakeOption:
executeMake()
setupWebots()
resetOutputFile()
finalMessage = 'Test suite complete'
thread = threading.Thread(target=monitorOutputFile, args=[finalMessage])
thread.start()
webotsArguments = '--mode=fast --stdout --stderr --batch'
if sys.platform != 'win32':
webotsArguments += ' --no-sandbox'
webotsArgumentsNoRendering = webotsArguments + ' --no-rendering --minimize'
for groupName in testGroups:
testFailed = False
appendToOutputFile('\n### ' + groupName + ' test\n\n')
# clear stdout and stderr files
open(webotsStdErrFilename, 'w').close()
open(webotsStdOutFilename, 'w').close()
worldsFilename = testsFolderPath + groupName + os.sep + 'worlds.txt'
indexFilename = testsFolderPath + groupName + os.sep + 'worlds_index.txt'
# init temporary world counter file
tempFile = open(tempWorldCounterFilename, 'w')
tempFile.write('0')
tempFile.close()
supervisorTargetDirectory = testsFolderPath + groupName + os.sep + 'controllers' + os.sep + \
supervisorControllerName
if not os.path.exists(supervisorTargetDirectory):
os.makedirs(supervisorTargetDirectory)
shutil.copyfile(
defaultProjectPath + 'controllers' + os.sep +
supervisorControllerName + os.sep +
supervisorControllerName + '.py',
supervisorTargetDirectory + os.sep + supervisorControllerName + '.py'
)
# parser tests uses a slightly different Supervisor PROTO
protosTargetDirectory = testsFolderPath + groupName + os.sep + 'protos'
protosSourceDirectory = defaultProjectPath + 'protos' + os.sep
if not os.path.exists(protosTargetDirectory):
os.makedirs(protosTargetDirectory)
for protoFileName in protoFileNames:
shutil.copyfile(protosSourceDirectory + protoFileName,
protosTargetDirectory + os.sep + protoFileName)
worldsCount = generateWorldsList(groupName, worldsFilename)
firstSimulation = findFirstWorldFilename(worldsFilename)
if not os.path.exists(firstSimulation):
continue
resetIndexFile(indexFilename)
# Here is an example to run webots in gdb and display the stack
# when it crashes.
# this is particuarliy useful to debug on the jenkins server
# command = Command('gdb -ex run --args ' + webotsFullPath + '-bin ' +
# firstSimulation + ' --mode=fast --no-rendering --minimize')
# command.run(silent = False)
if groupName == 'with_rendering':
command = Command(webotsFullPath + ' ' + firstSimulation + ' ' + webotsArguments)
else:
command = Command(webotsFullPath + ' ' + firstSimulation + ' ' + webotsArgumentsNoRendering)
# redirect stdout and stderr to files
command.runTest(timeout=10 * 60) # 10 minutes
if command.isTimeout or command.returncode != 0:
if command.isTimeout:
failures += 1
appendToOutputFile(
'FAILURE: Webots has been terminated ' +
'by the test suite script\n')
else:
failures += 1
appendToOutputFile(
'FAILURE: Webots exits abnormally with this error code: ' +
str(command.returncode) + '\n')
testFailed = True
else:
# check count of executed worlds
tempFile = open(tempWorldCounterFilename)
counterString = tempFile.read()
tempFile.close()
if int(counterString) < worldsCount:
testFailed = True
appendToOutputFile('FAILURE: Some tests have not been executed\n')
appendToOutputFile('- expected number of worlds: %d\n' % (worldsCount))
appendToOutputFile('- number of worlds actually tested: %s)\n' % (counterString))
else:
lines = open(webotsStdErrFilename, 'r').readlines()
for line in lines:
if 'Failure' in line:
# check if it should be ignored
if not any(item in line for item in whitelist):
failures += 1
systemFailures.append(line)
if testFailed:
appendToOutputFile('\nWebots complete STDOUT log:\n')
with open(webotsStdOutFilename) as f:
for line in f:
appendToOutputFile(line)
appendToOutputFile('\nWebots complete STDERR log:\n')
with open(webotsStdErrFilename) as f:
for line in f:
appendToOutputFile(line)
if '(core dumped)' in line:
seg_fault_line = line[0:line.find(' Segmentation fault')]
pid = int(seg_fault_line[seg_fault_line.rfind(' ') + 1:])
core_dump_file = '/tmp/core_webots-bin.' + str(pid)
if os.path.exists(core_dump_file):
appendToOutputFile(subprocess.check_output([
'gdb', '--batch', '--quiet', '-ex', 'bt', '-ex',
'quit', '../bin/webots-bin', core_dump_file
]))
os.remove(core_dump_file)
else:
appendToOutputFile(
'Cannot get the core dump file: "%s" does not exist.' % core_dump_file
)
appendToOutputFile('\n' + finalMessage + '\n')
if len(systemFailures) > 0:
appendToOutputFile('\nSystem Failures:\n')
for message in systemFailures:
appendToOutputFile(message)
time.sleep(1)
if monitorOutputCommand.isRunning():
monitorOutputCommand.terminate(force=True)
with open(outputFilename, 'r') as file:
content = file.read()
failures += content.count('FAILURE ')
sys.exit(failures)
| 35.423729 | 123 | 0.640351 |
54cf2f9e5823897d6235ee3a85d59f9df0e373aa | 158 | py | Python | lessons/ObjectOrientedProgramming/IdeFiles/4a_binomial_package/distributions/__init__.py | Meng-nanco/DSND_Term2 | 00489e641eafc8b3843bfaa0a49297aa9767984e | [
"MIT"
] | null | null | null | lessons/ObjectOrientedProgramming/IdeFiles/4a_binomial_package/distributions/__init__.py | Meng-nanco/DSND_Term2 | 00489e641eafc8b3843bfaa0a49297aa9767984e | [
"MIT"
] | null | null | null | lessons/ObjectOrientedProgramming/IdeFiles/4a_binomial_package/distributions/__init__.py | Meng-nanco/DSND_Term2 | 00489e641eafc8b3843bfaa0a49297aa9767984e | [
"MIT"
] | null | null | null | from .Gaussiandistribution import Gaussian
# TODO: import the Binomial class from the Binomialdistribution module
from .Binomialdistribution import Binomial
| 31.6 | 70 | 0.85443 |
9db5dcb4685acd73951f2d8ddd4dac02f3499eb7 | 1,245 | py | Python | Thresholds/SusyD/Approach 2 - Sequential/susycommon.py | Suvoo/Formal-Concept-Analysis | 53e895b7112a741af352fc60869e689324c75557 | [
"MIT"
] | 4 | 2021-07-14T16:53:15.000Z | 2021-07-15T04:03:27.000Z | Thresholds/SusyD/Approach 2 - Sequential/susycommon.py | Suvoo/Formal-Concept-Analysis | 53e895b7112a741af352fc60869e689324c75557 | [
"MIT"
] | null | null | null | Thresholds/SusyD/Approach 2 - Sequential/susycommon.py | Suvoo/Formal-Concept-Analysis | 53e895b7112a741af352fc60869e689324c75557 | [
"MIT"
] | null | null | null | threshold = 929794
theta = 2500000
a,brr=[],[]
ac=''
with open('Thresholds\SusyD\Approach 1 - FrontRear\susy_support_att') as file:
for line in file:
line = line.strip()
line = line + '\t'
if(line[0] != 'S'):
for c in line:
if(c != '\t'):
ac = ac + c
else:
a.append(int(ac))
# print(ac)
ac=''
brr.append(a)
a=[]
# print(brr)
brr.reverse()
# "Supp of the Att" "Att ID"
# tindex is if arr[i][0] == threshold - unique for all
# for i in range(len(brr)):
# if brr[i][0] == threshold:
# tindex = i
arr = []
tindex = 15
for i in range(len(brr)):
if i>= tindex:
arr.append(brr[i])
# print(arr)
sgr = 0
ans = []
front = 1
nxt = 0
for i in range(18): # do unitl elements start repeating
begin = arr[nxt][0]
to_append = arr[nxt][1]
ans.append(to_append)
for j in range(front,len(arr)): # minus lenght from rear
sgr = sgr + arr[j][0]
if(sgr + begin >= theta):
front += len(ans)
nxt = j
break
else:
ans.append(arr[j][1])
print(ans)
ans = []
sgr = 0
| 21.101695 | 78 | 0.472289 |
73029aed48847c8ad509c1269e89ad27e03667e7 | 142 | py | Python | app/review/blueprint.py | MhmdRyhn/flask_restful_with_blueprint | 8122797008e4b6325ca3c4460542f0c72cb1a19c | [
"MIT"
] | null | null | null | app/review/blueprint.py | MhmdRyhn/flask_restful_with_blueprint | 8122797008e4b6325ca3c4460542f0c72cb1a19c | [
"MIT"
] | null | null | null | app/review/blueprint.py | MhmdRyhn/flask_restful_with_blueprint | 8122797008e4b6325ca3c4460542f0c72cb1a19c | [
"MIT"
] | null | null | null | import flask
import flask_restful
blueprint = flask.Blueprint('review', __name__)
url_prefix = '/review'
API = flask_restful.Api(blueprint)
| 17.75 | 47 | 0.78169 |
a0e34aa9986b18b393789b634c0d49a68981ba90 | 15,994 | py | Python | azurelinuxagent/ga/monitor.py | vrdmr/WALinuxAgent | 6d680e071cdf31049c4c636633bf3bcfa5366ccc | [
"Apache-2.0"
] | null | null | null | azurelinuxagent/ga/monitor.py | vrdmr/WALinuxAgent | 6d680e071cdf31049c4c636633bf3bcfa5366ccc | [
"Apache-2.0"
] | null | null | null | azurelinuxagent/ga/monitor.py | vrdmr/WALinuxAgent | 6d680e071cdf31049c4c636633bf3bcfa5366ccc | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import threading
import time
import uuid
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.networkutil as networkutil
from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry
from azurelinuxagent.common.errorstate import ErrorState
from azurelinuxagent.common.event import add_event, WALAEventOperation, report_metric, collect_events
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.protocol.util import get_protocol_util
from azurelinuxagent.common.protocol.healthservice import HealthService
from azurelinuxagent.common.protocol.imds import get_imds_client
from azurelinuxagent.common.utils.restutil import IOErrorCounter
from azurelinuxagent.common.utils.textutil import hash_strings
from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION
def generate_extension_metrics_telemetry_dictionary(schema_version=1.0,
performance_metrics=None):
if schema_version == 1.0:
telemetry_dict = {"SchemaVersion": 1.0}
if performance_metrics:
telemetry_dict["PerfMetrics"] = performance_metrics
return telemetry_dict
else:
return None
def get_monitor_handler():
return MonitorHandler()
class MonitorHandler(object):
EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=1)
TELEMETRY_HEARTBEAT_PERIOD = datetime.timedelta(minutes=30)
# extension metrics period
CGROUP_TELEMETRY_POLLING_PERIOD = datetime.timedelta(minutes=5)
CGROUP_TELEMETRY_REPORTING_PERIOD = datetime.timedelta(minutes=30)
# host plugin
HOST_PLUGIN_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1)
HOST_PLUGIN_HEALTH_PERIOD = datetime.timedelta(minutes=5)
# imds
IMDS_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1)
IMDS_HEALTH_PERIOD = datetime.timedelta(minutes=3)
# Resetting loggers period
RESET_LOGGERS_PERIOD = datetime.timedelta(hours=12)
def __init__(self):
self.osutil = get_osutil()
self.imds_client = None
self.event_thread = None
self.last_reset_loggers_time = None
self.last_event_collection = None
self.last_telemetry_heartbeat = None
self.last_cgroup_polling_telemetry = None
self.last_cgroup_report_telemetry = None
self.last_host_plugin_heartbeat = None
self.last_imds_heartbeat = None
self.protocol = None
self.protocol_util = None
self.health_service = None
self.last_route_table_hash = b''
self.last_nic_state = {}
self.should_run = True
self.heartbeat_id = str(uuid.uuid4()).upper()
self.host_plugin_errorstate = ErrorState(min_timedelta=MonitorHandler.HOST_PLUGIN_HEALTH_PERIOD)
self.imds_errorstate = ErrorState(min_timedelta=MonitorHandler.IMDS_HEALTH_PERIOD)
def run(self):
self.start(init_data=True)
def stop(self):
self.should_run = False
if self.is_alive():
self.event_thread.join()
def init_protocols(self):
# The initialization of ProtocolUtil for the Monitor thread should be done within the thread itself rather
# than initializing it in the ExtHandler thread. This is done to avoid any concurrency issues as each
# thread would now have its own ProtocolUtil object as per the SingletonPerThread model.
self.protocol_util = get_protocol_util()
self.protocol = self.protocol_util.get_protocol()
self.health_service = HealthService(self.protocol.get_endpoint())
def init_imds_client(self):
wireserver_endpoint = self.protocol_util.get_wireserver_endpoint()
self.imds_client = get_imds_client(wireserver_endpoint)
def is_alive(self):
return self.event_thread is not None and self.event_thread.is_alive()
def start(self, init_data=False):
self.event_thread = threading.Thread(target=self.daemon, args=(init_data,))
self.event_thread.setDaemon(True)
self.event_thread.setName("MonitorHandler")
self.event_thread.start()
def collect_and_send_events(self):
"""
Periodically send any events located in the events folder
"""
try:
if self.last_event_collection is None:
self.last_event_collection = datetime.datetime.utcnow() - MonitorHandler.EVENT_COLLECTION_PERIOD
if datetime.datetime.utcnow() >= (self.last_event_collection + MonitorHandler.EVENT_COLLECTION_PERIOD):
try:
event_list = collect_events()
if len(event_list.events) > 0:
self.protocol.report_event(event_list)
except Exception as e:
logger.warn("{0}", ustr(e))
except Exception as e:
logger.warn("Failed to send events: {0}", ustr(e))
self.last_event_collection = datetime.datetime.utcnow()
def daemon(self, init_data=False):
if init_data:
self.init_protocols()
self.init_imds_client()
min_delta = min(MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD,
MonitorHandler.CGROUP_TELEMETRY_POLLING_PERIOD,
MonitorHandler.CGROUP_TELEMETRY_REPORTING_PERIOD,
MonitorHandler.EVENT_COLLECTION_PERIOD,
MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD,
MonitorHandler.IMDS_HEARTBEAT_PERIOD).seconds
while self.should_run:
try:
self.protocol.update_host_plugin_from_goal_state()
self.send_telemetry_heartbeat()
self.poll_telemetry_metrics()
# This will be removed in favor of poll_telemetry_metrics() and it'll directly send the perf data for
# each cgroup.
self.send_telemetry_metrics()
self.collect_and_send_events()
self.send_host_plugin_heartbeat()
self.send_imds_heartbeat()
self.log_altered_network_configuration()
self.reset_loggers()
except Exception as e:
logger.warn("An error occurred in the monitor thread main loop; will skip the current iteration.\n{0}", ustr(e))
time.sleep(min_delta)
def reset_loggers(self):
"""
The loggers maintain hash-tables in memory and they need to be cleaned up from time to time.
For reference, please check azurelinuxagent.common.logger.Logger and
azurelinuxagent.common.event.EventLogger classes
"""
try:
time_now = datetime.datetime.utcnow()
if not self.last_reset_loggers_time:
self.last_reset_loggers_time = time_now
if time_now >= (self.last_reset_loggers_time + MonitorHandler.RESET_LOGGERS_PERIOD):
logger.reset_periodic()
except Exception as e:
logger.warn("Failed to clear periodic loggers: {0}", ustr(e))
self.last_reset_loggers_time = time_now
def send_imds_heartbeat(self):
"""
Send a health signal every IMDS_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have
successfully called and validated a response in the last IMDS_HEALTH_PERIOD.
"""
try:
if self.last_imds_heartbeat is None:
self.last_imds_heartbeat = datetime.datetime.utcnow() - MonitorHandler.IMDS_HEARTBEAT_PERIOD
if datetime.datetime.utcnow() >= (self.last_imds_heartbeat + MonitorHandler.IMDS_HEARTBEAT_PERIOD):
is_currently_healthy, response = self.imds_client.validate()
if is_currently_healthy:
self.imds_errorstate.reset()
else:
self.imds_errorstate.incr()
is_healthy = self.imds_errorstate.is_triggered() is False
logger.verbose("IMDS health: {0} [{1}]", is_healthy, response)
self.health_service.report_imds_status(is_healthy, response)
except Exception as e:
msg = "Exception sending imds heartbeat: {0}".format(ustr(e))
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.ImdsHeartbeat,
is_success=False,
message=msg,
log_event=False)
self.last_imds_heartbeat = datetime.datetime.utcnow()
def send_host_plugin_heartbeat(self):
"""
Send a health signal every HOST_PLUGIN_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have been able to
communicate with HostGAPlugin at least once in the last HOST_PLUGIN_HEALTH_PERIOD.
"""
try:
if self.last_host_plugin_heartbeat is None:
self.last_host_plugin_heartbeat = datetime.datetime.utcnow() - MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD
if datetime.datetime.utcnow() >= (
self.last_host_plugin_heartbeat + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD):
host_plugin = self.protocol.client.get_host_plugin()
host_plugin.ensure_initialized()
is_currently_healthy = host_plugin.get_health()
if is_currently_healthy:
self.host_plugin_errorstate.reset()
else:
self.host_plugin_errorstate.incr()
is_healthy = self.host_plugin_errorstate.is_triggered() is False
logger.verbose("HostGAPlugin health: {0}", is_healthy)
self.health_service.report_host_plugin_heartbeat(is_healthy)
if not is_healthy:
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HostPluginHeartbeatExtended,
is_success=False,
message='{0} since successful heartbeat'.format(self.host_plugin_errorstate.fail_time),
log_event=False)
except Exception as e:
msg = "Exception sending host plugin heartbeat: {0}".format(ustr(e))
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HostPluginHeartbeat,
is_success=False,
message=msg,
log_event=False)
self.last_host_plugin_heartbeat = datetime.datetime.utcnow()
def send_telemetry_heartbeat(self):
try:
if self.last_telemetry_heartbeat is None:
self.last_telemetry_heartbeat = datetime.datetime.utcnow() - MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD
if datetime.datetime.utcnow() >= (self.last_telemetry_heartbeat + MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD):
io_errors = IOErrorCounter.get_and_reset()
hostplugin_errors = io_errors.get("hostplugin")
protocol_errors = io_errors.get("protocol")
other_errors = io_errors.get("other")
if hostplugin_errors > 0 or protocol_errors > 0 or other_errors > 0:
msg = "hostplugin:{0};protocol:{1};other:{2}".format(hostplugin_errors, protocol_errors,
other_errors)
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HttpErrors,
is_success=True,
message=msg,
log_event=False)
except Exception as e:
logger.warn("Failed to send heartbeat: {0}", ustr(e))
self.last_telemetry_heartbeat = datetime.datetime.utcnow()
def poll_telemetry_metrics(self):
"""
This method polls the tracked cgroups to get data from the cgroups filesystem and send the data directly.
:return: List of Metrics (which would be sent to PerfCounterMetrics directly.
"""
try: # If there is an issue in reporting, it should not take down whole monitor thread.
time_now = datetime.datetime.utcnow()
if not self.last_cgroup_polling_telemetry:
self.last_cgroup_polling_telemetry = time_now
if time_now >= (self.last_cgroup_polling_telemetry +
MonitorHandler.CGROUP_TELEMETRY_POLLING_PERIOD):
metrics = CGroupsTelemetry.poll_all_tracked()
if metrics:
for metric in metrics:
report_metric(metric.category, metric.counter, metric.instance, metric.value)
except Exception as e:
logger.warn("Could not poll all the tracked telemetry due to {0}", ustr(e))
self.last_cgroup_polling_telemetry = datetime.datetime.utcnow()
def send_telemetry_metrics(self):
"""
The send_telemetry_metrics would soon be removed in favor of sending performance metrics directly.
:return:
"""
try: # If there is an issue in reporting, it should not take down whole monitor thread.
time_now = datetime.datetime.utcnow()
if not self.last_cgroup_report_telemetry:
self.last_cgroup_report_telemetry = time_now
if time_now >= (self.last_cgroup_report_telemetry + MonitorHandler.CGROUP_TELEMETRY_REPORTING_PERIOD):
performance_metrics = CGroupsTelemetry.report_all_tracked()
if performance_metrics:
message = generate_extension_metrics_telemetry_dictionary(schema_version=1.0,
performance_metrics=performance_metrics)
add_event(name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.ExtensionMetricsData,
is_success=True,
message=ustr(message),
log_event=False)
except Exception as e:
logger.warn("Could not report all the tracked telemetry due to {0}", ustr(e))
self.last_cgroup_report_telemetry = datetime.datetime.utcnow()
def log_altered_network_configuration(self):
"""
Check various pieces of network configuration and, if altered since the last check, log the new state.
"""
raw_route_list = self.osutil.read_route_table()
digest = hash_strings(raw_route_list)
if digest != self.last_route_table_hash:
self.last_route_table_hash = digest
route_list = self.osutil.get_list_of_routes(raw_route_list)
logger.info("Route table: [{0}]".format(",".join(map(networkutil.RouteEntry.to_json, route_list))))
nic_state = self.osutil.get_nic_state()
if nic_state != self.last_nic_state:
description = "Initial" if self.last_nic_state == {} else "Updated"
logger.info("{0} NIC state: [{1}]".format(description, ", ".join(map(str, nic_state.values()))))
self.last_nic_state = nic_state
| 43.819178 | 128 | 0.64543 |
339542e2b3a3e90c42d586da30354dee02e8397e | 33,150 | py | Python | trax/layers/combinators.py | YannickWehr/trax | 67dda3b236339a7f6de803a3f84a9e92d0f0442c | [
"Apache-2.0"
] | null | null | null | trax/layers/combinators.py | YannickWehr/trax | 67dda3b236339a7f6de803a3f84a9e92d0f0442c | [
"Apache-2.0"
] | null | null | null | trax/layers/combinators.py | YannickWehr/trax | 67dda3b236339a7f6de803a3f84a9e92d0f0442c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Combinators for composing layers."""
from trax import fastmath
from trax.fastmath import numpy as jnp
from trax.layers import base
from trax.layers.base import Fn
from trax.shapes import ShapeDtype
class Serial(base.Layer):
"""Combinator that applies layers serially (by function composition).
This combinator is commonly used to construct deep networks, e.g., like this::
mlp = tl.Serial(
tl.Dense(128),
tl.Relu(),
tl.Dense(10),
tl.LogSoftmax()
)
A Serial combinator uses stack semantics to manage data for its sublayers.
Each sublayer sees only the inputs it needs and returns only the outputs it
has generated. The sublayers interact via the data stack. For instance, a
sublayer k, following sublayer j, gets called with the data stack in the
state left after layer j has applied. The Serial combinator then:
- takes n_in items off the top of the stack (n_in = k.n_in) and calls
layer k, passing those items as arguments; and
- takes layer k's n_out return values (n_out = k.n_out) and pushes
them onto the data stack.
A Serial instance with no sublayers acts as a special-case (but useful)
1-input 1-output no-op.
"""
def __init__(self, *sublayers, name=None, sublayers_to_print=None):
super().__init__(
name=name, sublayers_to_print=sublayers_to_print)
sublayers = _ensure_flat(sublayers)
self._sublayers = sublayers
self._n_layers = len(sublayers)
if sublayers:
self._n_in, self._n_out = self._n_inputs_n_outputs(sublayers)
self._weights = tuple(None for l in sublayers)
self._state = tuple(None for l in sublayers)
def forward(self, xs):
self._validate_forward_inputs(xs)
state, weights = self.state, self.weights
rngs = _split_rngs(self.rng, self._n_layers)
if not self.sublayers: # No-op: leave args unchanged.
return xs
stack = xs
new_state = []
n_layers = self._n_layers
if len(weights) != n_layers:
raise ValueError(
f'Number of weight elements ({len(weights)}) does not equal '
f'number of sublayers ({n_layers}).')
if len(state) != n_layers:
raise ValueError(
f'Number of state elements ({len(state)}) does not equal '
f'number of sublayers ({n_layers}).')
for layer, w, s, rng in zip(self.sublayers, weights, state, rngs):
inputs = _inputs_from_stack(layer, stack)
outputs, s = layer.pure_fn(inputs, w, s, rng, use_cache=True)
stack = _outputs_onto_stack(layer, outputs, stack)
new_state.append(s)
self.state = new_state
return stack
# pylint: disable=protected-access
def init_weights_and_state(self, input_signature):
weights = []
states = []
# In the code below, stack, inputs, and outputs are abstract (shapes and
# dtypes), but weights and states are non-abstract actual values.
stack = input_signature
for sublayer in self.sublayers:
inputs = _inputs_from_stack(sublayer, stack)
weights_or_cache_marker, state_or_cache_marker = (
sublayer.init(inputs, use_cache=True))
outputs, _ = sublayer._forward_abstract(inputs)
stack = _outputs_onto_stack(sublayer, outputs, stack)
weights.append(weights_or_cache_marker)
states.append(state_or_cache_marker)
self.state = states
self.weights = weights
# pylint: enable=protected-access
def _n_inputs_n_outputs(self, layers):
del self
running_max = 0
running_total = 0
for layer in layers:
running_total += layer.n_in
running_max = max(running_max, running_total)
running_total -= layer.n_out
return running_max, (running_max - running_total)
def _validate_forward_inputs(self, xs):
if not isinstance(xs, (tuple, list)) and self._n_in != 1:
raise TypeError(f'Serial.forward input must be a tuple or list; '
f'instead got {type(xs)}.')
# TODO(jonni): Include full xs (or shape) in error message?
len_xs = 1 if isinstance(xs, jnp.ndarray) else len(xs)
if len_xs < self.n_in:
raise ValueError(
f'Number of inputs ({len(xs)}) to Serial.forward less than n_in '
f'({self.n_in}).')
class Parallel(base.Layer):
"""Combinator that applies a list of layers in parallel to its inputs.
Layers in the list apply to successive spans of inputs, where the spans are
determined how many inputs each layer takes. The resulting output is the
(flattened) concatenation of the respective layer outputs.
For example, suppose one has three layers:
- F: 1 input, 1 output
- G: 3 inputs, 1 output
- H: 2 inputs, 2 outputs (h1, h2)
Then Parallel(F, G, H) will take 6 inputs and give 4 outputs:
- inputs: a, b, c, d, e, f
- outputs: F(a), G(b, c, d), h1, h2
As an important special case, a None argument to Parallel acts as if it takes
one argument, which it leaves unchanged. (It acts as a one-arg no-op.) For
example:
Parallel(None, F)
creates a layer that passes its first input unchanged and applies F to the
following input(s).
"""
def __init__(self, *sublayers, name=None):
"""The constructor.
Args:
*sublayers: A list of sublayers.
name: Descriptive name for this layer.
Returns:
A new layer in which each of the given sublayers applies to its
corresponding span of elements in the dataflow stack.
"""
super().__init__(name=name)
sublayers = self._validate(sublayers)
self._n_layers = len(sublayers)
self._sublayers = sublayers
self._n_in = sum(l.n_in for l in sublayers)
self._n_out = sum(l.n_out for l in sublayers)
self._weights = tuple(None for l in sublayers)
self._state = tuple(None for l in sublayers)
def forward(self, inputs):
n_layers, layers = self._n_layers, self.sublayers
sublayer_inputs = self._allot_to_sublayers(inputs)
state, weights = self.state, self.weights
rngs = _split_rngs(self.rng, n_layers)
if len(sublayer_inputs) != n_layers:
raise ValueError(
f'Number of inputs for sublayers ({len(sublayer_inputs)}) does not equal '
f'number of sublayers ({n_layers}).')
if len(weights) != n_layers:
raise ValueError(
f'Number of weight elements ({len(weights)}) does not equal '
f'number of sublayers ({n_layers}).')
if len(state) != n_layers:
raise ValueError(
f'Number of state elements ({len(state)}) does not equal '
f'number of sublayers ({n_layers}).')
if len(rngs) != n_layers:
raise ValueError(
f'Number of rngs ({len(rngs)}) does not equal '
f'number of sublayers ({n_layers}).')
outputs = []
new_state = []
for layer, x, w, s, r in zip(layers, sublayer_inputs, weights, state, rngs):
# Note that zip silently truncates its result if lengths don't match.
sub_outputs, sub_state = layer.pure_fn(x, w, s, r, use_cache=True)
if layer.n_out == 1:
outputs.append(sub_outputs)
else:
outputs.extend(sub_outputs)
new_state.append(sub_state)
output = outputs[0] if self.n_out == 1 else tuple(outputs)
self.state = tuple(new_state)
return output
def init_weights_and_state(self, input_signature):
sublayer_signatures = self._allot_to_sublayers(input_signature)
inits = [layer.init(signature, use_cache=True)
for layer, signature
in zip(self.sublayers, sublayer_signatures)]
if inits:
weights, state = tuple(zip(*inits))
self.state = state
self.weights = weights
def _validate(self, layers):
if not layers or len(layers) < 2:
raise ValueError(
f'layers ({layers}) must be a list with at least two elements')
layers = list(layers) # Ensure we can modify layers.
for i, obj in enumerate(layers):
if obj is None or obj == []: # pylint: disable=g-explicit-bool-comparison
layers[i] = Serial(None)
elif isinstance(obj, (list, tuple)):
layers[i] = Serial(obj)
else:
if not isinstance(obj, base.Layer):
raise ValueError(
f'Found nonlayer object ({obj}) in layers list: [{layers}]')
if layers[i].n_in == 0:
raise ValueError(
f'Sublayer with n_in = 0 not allowed in Parallel: {layers[i]}')
return layers
def _allot_to_sublayers(self, inputs):
"""Divides Parallel's inputs for use by the sublayers.
Args:
inputs: Tuple of ndarrays or ShapeDtype instances.
Returns:
A tuple that partitions this layer's inputs among its sublayers.
Sublayers that take one argument get that argument directly. All other
sublayers get a tuple of items.
"""
start, end = 0, 0
sub_inputs = []
for layer in self.sublayers:
n_in = layer.n_in
end = start + n_in
if n_in == 1:
sub_inputs.append(inputs[start])
else:
sub_inputs.append(inputs[start:end])
start = end
return tuple(sub_inputs)
class Concatenate(base.Layer):
"""Concatenates n tensors into a single tensor."""
def __init__(self, n_items=2, axis=-1):
name = 'Concatenate' if axis == -1 else f'Concatenate_axis{axis}'
super().__init__(n_in=n_items, name=name)
self._n_items = n_items
self._axis = axis
def forward(self, xs):
return jnp.concatenate(xs, self._axis)
class Split(base.Layer):
"""Splits the input into n items along an axis."""
def __init__(self, n_items=2, axis=-1):
super().__init__(n_out=n_items)
self._n_items = n_items
self._axis = axis
def forward(self, inputs):
return tuple(jnp.split(inputs, self._n_items, self._axis))
def _scan(f, xs, init_value, axis=0, remat=False):
"""Scans the f over the given axis of xs.
In pseudo-python, the scan function would look as follows:
def scan(f, xs, init_value, axis):
xs = [xs[..., i, ...] for i in range(xs.shape[axis])]
cur_value = init_value
ys = []
for x in xs:
y, cur_value = f(x, cur_value)
ys.append(y)
return np.stack(ys, axis), cur_value
Args:
f: function (x, carry) -> (y, new_carry)
xs: tensor, x will be xs slices on axis
init_value: tensor, initial value of the carry-over
axis: int, the axis on which to slice xs
remat: whether to re-materialize f
Returns:
A pair (ys, last_value) as described above.
"""
def swapaxes(x):
transposed_axes = list(range(len(x.shape)))
transposed_axes[axis] = 0
transposed_axes[0] = axis
return jnp.transpose(x, axes=transposed_axes)
if axis != 0:
xs = fastmath.nested_map(swapaxes, xs)
def transposed_f(c, x):
y, d = f(x, c)
return d, y
if remat:
transposed_f = fastmath.remat(transposed_f)
last_value, ys = fastmath.scan(transposed_f, init_value, xs)
if axis != 0:
ys = fastmath.nested_map(swapaxes, ys)
return ys, last_value
class Scan(base.Layer):
"""Applies a layer progressively/cumulatively to an axis-derived sequence.
Conceptually, this is a function from a list to a same-length list of partial
(cumulative) results. For instance, a list of values (`[1, 2, 3, 4, 5]`) can
transform to a list of cumulative sums (`[1, 3, 6, 10, 15]`). Functions for
the same concept are called `scan` in Scala, `scanl` in Haskell, and
`accumulate*` in Factor.
In more detail, we assume the layer takes a tuple of inputs of the following
form:
(input1, ..., inputN, carry1, ..., carryM)
and returns:
(output1, ..., outputK, new_carry1, ..., new_carryM)
The scanned version applies the layer iteratively to a tensor treating values
at the given axis as if they were a list. For example, to calculate all
sums of prefixes of a tensor, we can do this::
def add(x, carry):
def f(input, carry):
res = input + carry
return res, res # output and carry are the same
return tl.Fn('add', f, n_out=2)
Scan(add)([1, 2, 3], 0) = [1, 3, 6], 6
"""
def __init__(self, layer, axis=0, n_carry=1, remat=False):
super().__init__(n_in=layer.n_in, n_out=layer.n_out)
self._sublayers = [layer]
self._n_carry = n_carry
self._axis = axis
self._remat = remat
self._weights = (None,)
self._state = (None,)
@property
def sublayer(self):
"""Returns the unique sublayer managed by this layer."""
return self._sublayers[0]
def forward(self, inputs):
weights = self.weights[0]
if isinstance(inputs, list):
inputs = tuple(inputs) # so that inputs structure matches outputs
n_carry = self._n_carry
def scannable_fn(x, carry_and_state): # pylint: disable=invalid-name
carry, state = carry_and_state
x_and_carry = x + carry if n_carry > 0 else x
res, new_state = self.sublayer.pure_fn(
x_and_carry, weights, state, self.rng, use_cache=True)
if n_carry > 0:
return (res[:-n_carry], (res[-n_carry:], new_state))
else:
return (res, ([], new_state))
if n_carry > 0:
xs = inputs[:-n_carry] # Split input stack into inputs and carry.
init = (inputs[-n_carry:], self.state[0])
else:
xs, init = inputs, ([], self.state[0])
ys, (carry, new_state) = _scan(scannable_fn, xs, init,
axis=self._axis, remat=self._remat)
res = ys + carry if n_carry > 0 else ys
self.state = (new_state,)
return res # Put outputs and carry back on stack.
def init_weights_and_state(self, input_signature):
n_carry = self._n_carry
if n_carry == 0:
if isinstance(input_signature, (list, tuple)):
layer_sig = [ShapeDtype(_shape_without_axis(x, self._axis), x.dtype)
for x in input_signature]
layer_sig = tuple(layer_sig)
else:
layer_sig = ShapeDtype(_shape_without_axis(input_signature, self._axis),
input_signature.dtype)
weights, state = self.sublayer.init(layer_sig)
self.state = (state,)
self.weights = (weights,)
else:
xs = input_signature[:-n_carry]
init = input_signature[-n_carry:]
xs_slices = [ShapeDtype(_shape_without_axis(x, self._axis), x.dtype)
for x in xs]
layer_signature = tuple(xs_slices + list(init))
weights, state = self.sublayer.init(layer_signature, use_cache=True)
self.state = (state,)
self.weights = (weights,)
class Cond(base.Layer):
"""Applies layers conditionally.
For parameters `cond`, `true`, and `false` runs the equivalent of `true(y)
if cond(x) else false(y)`, where `x` is `cond.n_in` elements from front of the
stack and `y` is the rest of the stack.
Exactly one of `true` and `false` functions is executed, so it can be used to
conditionally run long computations. The state of non-executed function is not
updated. Note that different branches may be executed on different devices
if `cond` returns different values on them.
By default 'false' function is an identity.
`cond` must return exactly one element: a Boolean value.
`true` and `false` must have the same n_in, and the same n_out.
"""
def __init__(self, cond, true, false=None, name=None):
super(Cond, self).__init__(name=name)
sublayers = [cond, true, false]
self._sublayers = sublayers
self._n_layers = len(sublayers)
self._cond = cond
self._true = true
if false is None:
self._identity_false_fun = True
# We don't need this function, but it will be useful for checking if
# 'true' has proper n_in/n_out.
self._false = Serial()
else:
self._identity_false_fun = False
self._false = false
if cond.n_out != 1:
raise ValueError(
'cond.n_out must be 1: cond:{}->{}'.format(cond.n_in, cond.n_out))
if true.n_in != false.n_in:
raise ValueError(
'true.n_in and false.n_in must be equal: true:{}->{} ; false:{}->{}'
.format(true.n_in, true.n_out, false.n_in, false.n_out))
if true.n_out != false.n_out:
raise ValueError(
'true.n_out and false.n_out must be equal: true:{}->{} ; false:{}->{}'
.format(true.n_in, true.n_out, false.n_in, false.n_out))
self._n_in = cond.n_in + true.n_in
self._n_out = true.n_out
self._weights = tuple(None for l in sublayers)
self._state = tuple(None for l in sublayers)
# pylint: disable=protected-access
def init_weights_and_state(self, input_signature):
weights = []
states = []
# In the code below, stack, inputs, and outputs are abstract (shapes and
# dtypes), but weights and states are non-abstract actual values.
stack = _make_tuple(input_signature)
# Inputs/outputs of `cond`.
inputs = _inputs_from_stack(self._cond, stack)
weights_or_cache_marker, state_or_cache_marker = (
self._cond.init(inputs, use_cache=True))
weights.append(weights_or_cache_marker)
states.append(state_or_cache_marker)
self._cond._forward_abstract(inputs)
stack = _make_tuple(_outputs_onto_stack(self._cond, [], stack))
# Inputs/outputs of `true` and `false`.
for sublayer in [self._true, self._false]:
inputs = _inputs_from_stack(sublayer, stack)
weights_or_cache_marker, state_or_cache_marker = (
sublayer.init(inputs, use_cache=True))
weights.append(weights_or_cache_marker)
states.append(state_or_cache_marker)
self.state = states
self.weights = weights
# pylint: enable=protected-access
def _validate_forward_inputs(self, xs):
xs = _make_tuple(xs)
if len(xs) < self.n_in:
raise ValueError(
f'Number of inputs ({len(xs)}) to Cond.forward less than n_in '
f'({self.n_in}).')
def forward(self, xs):
# TODO(jaszczur): modify; it's a copy from SkippingSerial
self._validate_forward_inputs(xs)
layers_state = self.state
# Get 3 rngs, one for each layer.
rngs = _split_rngs(self.rng, 3)
# Prepare the stack and do some safety checks as in the parent class.
stack = _make_tuple(xs)
weights = self.weights
if len(weights) != 3:
raise ValueError('number of weights ({}) not equal to 3'
.format(len(weights)))
if len(layers_state) != 3:
raise ValueError('length of state ({}) not equal to 3'
.format(len(layers_state)))
def true_func(t):
outputs, new_true_state = self._true.pure_fn(
t[0][0], t[1][0], t[2][0], t[3][0])
# t[2][1] is old_false_state which is not changing if true is executed.
return outputs, (new_true_state, t[2][1])
def false_func(t):
if self._identity_false_fun:
# Memory optimization: we don't need pure_fn call.
return t[0][1], t[2]
outputs, new_false_state = self._false.pure_fn(
t[0][1], t[1][1], t[2][1], t[3][1])
# t[2][1] is old_true_state, which is not changing if false is executed.
return outputs, (t[2][0], new_false_state)
cond_inputs = _inputs_from_stack(self._cond, xs)
cond_output, s = self._cond.pure_fn(cond_inputs, self.weights[0],
self.state[0], rngs[0], use_cache=True)
stack = _outputs_onto_stack(self._cond, [], stack)
self._cond.state = s
outputs, both_states = fastmath.cond(
cond_output,
true_func,
false_func,
[(stack, stack),
(self.weights[1], self.weights[2]),
(self.state[1], self.state[2]),
(rngs[1], rngs[2])]
)
stack = _outputs_onto_stack(self._cond, [], stack)
# We don't know which (`true` or `false`) branch was run, but both of them
# are adding (n_out) and removing (n_in) the same number of elements of the
# stack (this was checked in __init__). _outputs_onto_stack just uses the
# layer's n_in and n_out, so we can pass either `true` or `false` to it.
# Note that `outputs` is the actual output of `true` or `false` branch,
# whichever was run, and we add it to the stack in any case.
stack = _outputs_onto_stack(self._true, outputs, stack)
self._true.state = both_states[0]
self._false.state = both_states[1]
return _make_singleitem_or_original(stack)
# pylint: disable=invalid-name
def Branch(*layers, name='Branch'):
"""Combinator that applies a list of layers in parallel to copies of inputs.
Each layer in the input list is applied to as many inputs from the stack
as it needs, and their outputs are successively combined on stack.
For example, suppose one has three layers:
- F: 1 input, 1 output
- G: 3 inputs, 1 output
- H: 2 inputs, 2 outputs (h1, h2)
Then Branch(F, G, H) will take 3 inputs and give 4 outputs:
- inputs: a, b, c
- outputs: F(a), G(a, b, c), h1, h2 where h1, h2 = H(a, b)
As an important special case, a None argument to Branch acts as if it takes
one argument, which it leaves unchanged. (It acts as a one-arg no-op.)
Args:
*layers: List of layers.
name: Descriptive name for this layer.
Returns:
A branch layer built from the given sublayers.
"""
if len(layers) == 1:
return layers[0]
parallel_layer = Parallel(*layers)
indices = [list(range(layer.n_in)) for layer in parallel_layer.sublayers]
return Serial(Select(_deep_flatten(indices)), parallel_layer,
name=name, sublayers_to_print=layers)
def Residual(*layers, shortcut=None):
"""Wraps a series of layers with a residual connection.
Args:
*layers: One or more layers, to be applied in series.
shortcut: If None (the usual case), the Residual layer computes the
element-wise sum of the stack-top input with the output of the layer
series. If specified, the `shortcut` layer applies to a copy of the
inputs and (elementwise) adds its output to the output from the main
layer series.
Returns:
A layer representing a residual connection paired with a layer series.
"""
layers = _ensure_flat(layers)
layer = layers[0] if len(layers) == 1 else Serial(layers)
# TODO(jonni): Should we require layer.n_out = 1 and shortcut.n_out = 1?
return Serial(
Branch(shortcut, layer),
Add(), # pylint: disable=no-value-for-parameter
)
def Select(indices, n_in=None, name=None):
"""Copies, reorders, or deletes stack elements according to `indices`.
Args:
indices: A list or tuple of 0-based indices to select elements relative to
the top of the stack.
n_in: Number of input elements to pop from the stack, and replace with
those specified by `indices`. If not specified, its value will be
calculated as `max(indices) + 1`.
name: Descriptive name for this layer.
Returns:
Tensors, matching the number selected (`n_out = len(indices)`).
Specifically:
- n_out = 0: an empty tuple
- n_out = 1: one tensor (NOT wrapped in a tuple)
- n_out > 1: a tuple of tensors, with n_out items
"""
if n_in is None:
n_in = max(indices) + 1
if name is None:
name = f'Select{indices}'.replace(' ', '')
def select(xs): # pylint: disable=invalid-name
if not isinstance(xs, (tuple, list)):
xs = (xs,)
selected = tuple(xs[i] for i in indices)
return selected[0] if len(selected) == 1 else selected
return base.PureLayer(select, n_in=n_in, n_out=len(indices), name=name)
def Drop():
"""Drops the top stack element."""
return Fn('Drop', lambda x: (), n_out=0)
def Dup():
"""Duplicates (copies) the top element on the data stack."""
return Fn('Dup', lambda x: (x, x), n_out=2)
def Swap():
"""Swaps the top two stack elements."""
return Fn('Swap', lambda x0, x1: (x1, x0), n_out=2)
def SerialWithSideOutputs(layers, n_side_outputs=1):
"""Serial layer with side outputs.
This layer makes it easier to manage the stack when layers have side outputs.
In the simplest case of layers with n_in=1, n_out=2 and with
n_side_outputs=1, this layer runs the following computation on x::
side_outputs = []
for i in range(len(layers)):
x, side_output = layers[i](x)
side_outputs.append(side_output)
return [x] + side_outputs
In the general case of layers with variable n_in and n_out and
n_side_outputs being a list of N integers, it does the following::
side_outputs = []
for i in range(N):
res = layer[i](cur_stack) # remove n_in from stack
cur_stack.append(res[:n_side_outputs[i]]) # put back some on stack
side_outputs.extend(res[n_side_outputs:])
return cur_stack + side_outputs
Args:
layers: a list of layers to execute
n_side_outputs: an int or a list of ints, how many outputs of each layer
to put aside
Returns:
A layer that performs the above computation.
"""
if isinstance(n_side_outputs, int):
n_side_outputs = [n_side_outputs] * len(layers)
# Calculate the n_in for this layer.
running_max = 0
running_total = 0
for layer, n_side_output in zip(layers, n_side_outputs):
running_total += layer.n_in
running_max = max(running_max, running_total)
running_total -= layer.n_out - n_side_output
n_in = running_max
# Create the list of layers to run serially.
cur_stack_size = n_in
serial_layers = []
for layer, n_side_output in zip(layers, n_side_outputs):
serial_layers.append(layer)
cur_stack_size += layer.n_out - layer.n_in
# Indices to move n_side_outputs to the back of the stack.
# Don't touch first n_out - n_side_outputs.
move_back_indices = list(range(layer.n_out - n_side_output))
# Then comes the rest of the stack that we're not moving.
move_back_indices += [i + layer.n_out
for i in range(cur_stack_size - layer.n_out)]
# Finally the indices we move.
move_back_indices += [i + layer.n_out - n_side_output
for i in range(n_side_output)]
# Swap them on stack.
serial_layers.append(Select(move_back_indices))
return Serial(serial_layers)
def FlattenList():
"""Flatten lists."""
# TODO(jonni): Consider renaming layer to DeepFlatten.
return Fn('FlattenList', lambda x: tuple(_deep_flatten(x)))
def Add():
"""Adds two tensors."""
return Fn('Add', lambda x0, x1: x0 + x1)
def SubtractTop():
"""Subtracts the first tensor from the second."""
return Fn('SubtractTop', lambda x0, x1: x1 - x0)
def Multiply():
"""Multiplies two tensors."""
return Fn('Multiply', lambda x0, x1: x0 * x1)
def Gate():
"""Returns a gating layer on a (memory, gate, candidate) tuple.
Final update is memory * gate + (1 - gate) * candidate
This gating equation may also be referred to as Highway Network.
Highway Networks: https://arxiv.org/abs/1505.00387
"""
return Fn('Gate', lambda m, g, c: g * m + (1.0 - g) * c)
class Cache(base.Layer):
"""Applies a layer on the first run and returns the outputs on next calls."""
def __init__(self, layer):
super().__init__(n_in=layer.n_in, n_out=layer.n_out)
self._sublayers = [layer]
@property
def sublayer(self):
"""Returns the unique sublayer managed by this layer."""
return self._sublayers[0]
@property
def state(self):
"""Returns a tuple containing this layer's state; may be empty."""
return self._state
@state.setter
def state(self, state):
"""Recursively sets state on this layer and all sublayers."""
if isinstance(state, dict) and state == base.GET_STATE_FROM_CACHE:
return
self._state = state
self.sublayer.state = state[1]
def init_weights_and_state(self, input_signature):
weights, layer_state = self.sublayer.init(input_signature, use_cache=True)
self.state = ((), layer_state)
self._weights = (weights,)
def forward(self, inputs):
state, weights = self.state, self.weights[0]
if state[0] is (): # pylint: disable=literal-comparison
res, layer_state = self.sublayer.pure_fn(
inputs, weights, state[1], self.rng)
self.state = (res, layer_state)
return res
else:
return state[0]
class BatchLeadingAxes(base.Layer):
"""Applies a layer after flattening all but n_last_axes_to_keep to batch.
This can be used to make layers accept an arbitrary number of leading
axes (dimensions) as batch. For example, a Convolution layer may normally
only operate on tensors of shape [B, W, H, C]. In this case, the layer
BatchLeadingAxes(Convolution(), n_last_axes_to_keep=3)
will operate on any tensor [..., W, H, C] and treat the leading axes as batch.
"""
def __init__(self, layer, n_last_axes_to_keep=1):
super().__init__(n_in=layer.n_in, n_out=layer.n_out)
self._sublayers = [layer]
self._n_last_axes_to_keep = n_last_axes_to_keep
self._weights = (None,)
self._state = (None,)
@property
def sublayer(self):
"""Returns the unique sublayer managed by this layer."""
return self._sublayers[0]
def forward(self, inputs):
batched_axes_shape = list(inputs.shape[:-self._n_last_axes_to_keep])
batched_shape = [-1] + list(inputs.shape[-self._n_last_axes_to_keep:])
inputs = jnp.reshape(inputs, batched_shape)
res, layer_state = self.sublayer.pure_fn(
inputs, self.weights[0], self.state[0], self.rng)
self.state = (layer_state,)
return jnp.reshape(res, batched_axes_shape + list(res.shape[1:]))
def init_weights_and_state(self, input_signature):
weights, layer_state = self.sublayer.init(input_signature, use_cache=True)
self.state = (layer_state,)
self.weights = (weights,)
# All module-private helper functions are below.
# pylint: disable=invalid-name
def _deep_flatten(items):
"""Returns a list of objects, flattening sublists/subtuples along the way.
Example: _deep_flatten([1, (2, 3, (4, 5), [6, 7]), [[[8]]]]) would return
the list [1, 2, 3, 4, 5, 6, 7, 8].
Args:
items: An iterable. If elements of this iterable are lists or tuples, they
will be (recursively) flattened until non-list non-tuple objects are
reached.
Returns:
A list of non-list, non-tuple objects.
"""
def _flat_gen(xs):
for x in xs:
if isinstance(x, (list, tuple)):
for y in _flat_gen(x):
yield y
else:
yield x
return list(_flat_gen(items))
def _ensure_sublayers(layers):
"""Ensures that elements in a layer list are layers.
Args:
layers: A tuple or list whose elements can each be a layer, tuple, or list,
and so on recursively.
Returns:
An analogous collection of layers in which embedded layer lists are
wrapped in Serial layer instances.
"""
if not layers: # None or an empty list can signal a no-op.
return Serial(None) # no-op, but still handles shapes and initialization
elif isinstance(layers, (list, tuple)):
sublayers_not_lists = []
for layer in layers:
sublayers_not_lists.append(
Serial(layer) if isinstance(layer, (list, tuple)) else layer)
return sublayers_not_lists
else:
raise TypeError(type(layers))
def _split_rngs(rng, n_copies):
if rng is None:
return (None,) * n_copies
return fastmath.random.split(rng, n_copies)
def _inputs_from_stack(layer, stack, n_in=None):
"""Returns the correct number/format of inputs for the given layer."""
if n_in is None:
n_in = layer.n_in
stack = _make_tuple(stack)
return _make_singleitem_or_original(stack[:n_in])
def _outputs_onto_stack(layer, outputs, stack, n_in=None, n_out=None):
""""Returns the new stack after outputs have been pushed onto it."""
if n_in is None:
n_in = layer.n_in
if n_out is None:
n_out = layer.n_out
outputs = _make_tuple(outputs)
stack = _make_tuple(stack)
return _make_singleitem_or_original(outputs + stack[n_in:])
def _make_tuple(xs):
"""Returns a tuple from a list, a tuple, or a single element."""
if isinstance(xs, (list, tuple)):
return tuple(xs)
else:
return (xs,)
def _make_singleitem_or_original(xs):
"""Returns a single element if possible, or the original list/tuple if not."""
if isinstance(xs, (list, tuple)) and len(xs) == 1:
return xs[0]
else:
return xs
def _shape_without_axis(x, axis):
return x.shape[:axis] + x.shape[axis + 1:]
def _ensure_flat(layers):
"""Ensures that layers is a single flat list of Layer instances."""
if len(layers) == 1 and layers[0] is None:
layers = ()
else:
layers = _deep_flatten(layers)
for obj in layers:
if not isinstance(obj, base.Layer):
raise ValueError(
f'Found nonlayer object ({obj}) in layers: {layers}')
return layers
| 34.104938 | 84 | 0.666667 |
e1a492bb68b278dd946f5acbb009808edb52f6e3 | 6,213 | py | Python | selfdrive/car/honda/carcontroller.py | openmechanics/opv0431arch | 994d9b3f74b41fbe7060be8ee2ecc87870d4a1e3 | [
"MIT"
] | 2 | 2021-04-07T02:39:00.000Z | 2021-08-30T19:54:27.000Z | selfdrive/car/honda/carcontroller.py | openmechanics/opv0431arch | 994d9b3f74b41fbe7060be8ee2ecc87870d4a1e3 | [
"MIT"
] | null | null | null | selfdrive/car/honda/carcontroller.py | openmechanics/opv0431arch | 994d9b3f74b41fbe7060be8ee2ecc87870d4a1e3 | [
"MIT"
] | null | null | null | from collections import namedtuple
import os
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.controls.lib.drive_helpers import rate_limit
from common.numpy_fast import clip
from . import hondacan
from .values import AH
from common.fingerprints import HONDA as CAR
from selfdrive.can.packer import CANPacker
def actuator_hystereses(brake, braking, brake_steady, v_ego, car_fingerprint):
# hyst params... TODO: move these to VehicleParams
brake_hyst_on = 0.02 # to activate brakes exceed this value
brake_hyst_off = 0.005 # to deactivate brakes below this value
brake_hyst_gap = 0.01 # don't change brake command for small ocilalitons within this value
#*** histeresys logic to avoid brake blinking. go above 0.1 to trigger
if (brake < brake_hyst_on and not braking) or brake < brake_hyst_off:
brake = 0.
braking = brake > 0.
# for small brake oscillations within brake_hyst_gap, don't change the brake command
if brake == 0.:
brake_steady = 0.
elif brake > brake_steady + brake_hyst_gap:
brake_steady = brake - brake_hyst_gap
elif brake < brake_steady - brake_hyst_gap:
brake_steady = brake + brake_hyst_gap
brake = brake_steady
if (car_fingerprint in (CAR.ACURA_ILX, CAR.CRV)) and brake > 0.0:
brake += 0.15
return brake, braking, brake_steady
def process_hud_alert(hud_alert):
# initialize to no alert
fcw_display = 0
steer_required = 0
acc_alert = 0
if hud_alert == AH.NONE: # no alert
pass
elif hud_alert == AH.FCW: # FCW
fcw_display = hud_alert[1]
elif hud_alert == AH.STEER: # STEER
steer_required = hud_alert[1]
else: # any other ACC alert
acc_alert = hud_alert[1]
return fcw_display, steer_required, acc_alert
HUDData = namedtuple("HUDData",
["pcm_accel", "v_cruise", "mini_car", "car", "X4",
"lanes", "beep", "chime", "fcw", "acc_alert", "steer_required"])
class CarController(object):
def __init__(self, dbc_name, enable_camera=True):
self.braking = False
self.brake_steady = 0.
self.brake_last = 0.
self.enable_camera = enable_camera
self.packer = CANPacker(dbc_name)
def update(self, sendcan, enabled, CS, frame, actuators, \
pcm_speed, pcm_override, pcm_cancel_cmd, pcm_accel, \
hud_v_cruise, hud_show_lanes, hud_show_car, hud_alert, \
snd_beep, snd_chime):
""" Controls thread """
if not self.enable_camera:
return
# *** apply brake hysteresis ***
brake, self.braking, self.brake_steady = actuator_hystereses(actuators.brake, self.braking, self.brake_steady, CS.v_ego, CS.CP.carFingerprint)
# *** no output if not enabled ***
if not enabled and CS.pcm_acc_status:
# send pcm acc cancel cmd if drive is disabled but pcm is still on, or if the system can't be activated
pcm_cancel_cmd = True
# *** rate limit after the enable check ***
self.brake_last = rate_limit(brake, self.brake_last, -2., 1./100)
# vehicle hud display, wait for one update from 10Hz 0x304 msg
if hud_show_lanes:
hud_lanes = 1
else:
hud_lanes = 0
# TODO: factor this out better
if enabled:
if hud_show_car:
hud_car = 2
else:
hud_car = 1
else:
hud_car = 0
#print chime, alert_id, hud_alert
fcw_display, steer_required, acc_alert = process_hud_alert(hud_alert)
hud = HUDData(int(pcm_accel), int(round(hud_v_cruise)), 1, hud_car,
0xc1, hud_lanes, int(snd_beep), snd_chime, fcw_display, acc_alert, steer_required)
if not all(isinstance(x, int) and 0 <= x < 256 for x in hud):
print "INVALID HUD", hud
hud = HUDData(0xc6, 255, 64, 0xc0, 209, 0x40, 0, 0, 0, 0)
# **** process the car messages ****
# *** compute control surfaces ***
BRAKE_MAX = 1024/4
if CS.CP.carFingerprint in (CAR.CIVIC, CAR.ODYSSEY, CAR.PILOT):
is_fw_modified = os.getenv("DONGLE_ID") in ['99c94dc769b5d96e']
STEER_MAX = 0x1FFF if is_fw_modified else 0x1000
elif CS.CP.carFingerprint in (CAR.CRV, CAR.ACURA_RDX):
STEER_MAX = 0x3e8 # CR-V only uses 12-bits and requires a lower value (max value from energee)
else:
STEER_MAX = 0xF00
# steer torque is converted back to CAN reference (positive when steering right)
apply_gas = clip(actuators.gas, 0., 1.)
apply_brake = int(clip(self.brake_last * BRAKE_MAX, 0, BRAKE_MAX - 1))
apply_steer = int(clip(-actuators.steer * STEER_MAX, -STEER_MAX, STEER_MAX))
# any other cp.vl[0x18F]['STEER_STATUS'] is common and can happen during user override. sending 0 torque to avoid EPS sending error 5
if CS.steer_not_allowed:
apply_steer = 0
# Send CAN commands.
can_sends = []
# Send steering command.
idx = frame % 4
can_sends.append(hondacan.create_steering_control(self.packer, apply_steer, CS.CP.carFingerprint, idx))
# Send gas and brake commands.
if (frame % 2) == 0:
idx = (frame / 2) % 4
can_sends.append(
hondacan.create_brake_command(self.packer, apply_brake, pcm_override,
pcm_cancel_cmd, hud.chime, hud.fcw, idx))
if not CS.brake_only:
# send exactly zero if apply_gas is zero. Interceptor will send the max between read value and apply_gas.
# This prevents unexpected pedal range rescaling
can_sends.append(hondacan.create_gas_command(self.packer, apply_gas, idx))
# Send dashboard UI commands.
if (frame % 10) == 0:
idx = (frame/10) % 4
can_sends.extend(hondacan.create_ui_commands(self.packer, pcm_speed, hud, CS.CP.carFingerprint, idx))
# radar at 20Hz, but these msgs need to be sent at 50Hz on ilx (seems like an Acura bug)
if CS.CP.carFingerprint == CAR.ACURA_ILX:
radar_send_step = 2
else:
radar_send_step = 5
if (frame % radar_send_step) == 0:
idx = (frame/radar_send_step) % 4
can_sends.extend(hondacan.create_radar_commands(CS.v_ego, CS.CP.carFingerprint, idx))
sendcan.send(can_list_to_can_capnp(can_sends, msgtype='sendcan').to_bytes())
| 36.763314 | 146 | 0.6723 |
85ccc31d04bac6b8c03ddfc9307d2460cf4ba30c | 18,414 | py | Python | barentsz/_discover.py | robinklaassen/barentsz | 5c289d071ddf42d0a9a6c3dbd1db0bf809ae136a | [
"MIT"
] | null | null | null | barentsz/_discover.py | robinklaassen/barentsz | 5c289d071ddf42d0a9a6c3dbd1db0bf809ae136a | [
"MIT"
] | null | null | null | barentsz/_discover.py | robinklaassen/barentsz | 5c289d071ddf42d0a9a6c3dbd1db0bf809ae136a | [
"MIT"
] | null | null | null | import glob
import inspect
import re
import sys
from importlib import import_module
from pathlib import Path
from typing import (
Union,
Dict,
List,
Any,
Callable,
Type,
Iterable,
Optional,
Tuple,
Set,
TypeVar,
)
from typish import Module, subclass_of, instance_of
from barentsz._here import here
from barentsz._attribute import Attribute
def discover(
source: Any = None,
*,
what: Any = List[type],
**kwargs: dict,
) -> list:
"""
Convenience function for discovering types in some source. If not source
is given, the directory is used in which the calling module is located.
Args:
source: the source in which is searched or the directory of the
caller if None.
what: the type that is to be discovered.
**kwargs: any keyword argument that is passed on.
Returns: a list of discoveries.
"""
source = source or here(1)
delegates = [
(List[type], _discover_list),
(list, _discover_list),
(List, _discover_list),
]
for tuple_ in delegates:
type_, delegate = tuple_
if subclass_of(what, type_):
return delegate(what, source, **kwargs)
accepted_types = ', '.join(['`{}`'.format(delegate)
for delegate, _ in delegates])
raise ValueError('Type `{}` is not supported. This function accepts: '
'{}'.format(what, accepted_types))
def discover_paths(directory: Union[Path, str], pattern: str) -> List[Path]:
"""
Return a list of Paths within the given directory that match the given
pattern.
Args:
directory: the directory in which is searched for paths.
pattern: a pattern (example: '**/*.py').
Returns: a list of Path objects.
"""
directory_path = _path(directory)
abspath = str(directory_path.absolute())
sys.path.insert(0, abspath)
path_to_discover = directory_path.joinpath(pattern)
result = [Path(filename) for filename in
glob.iglob(str(path_to_discover), recursive=True)]
result.sort()
return result
def discover_packages(directory: Union[Path, str]) -> List[str]:
"""
Return a list of packages within the given directory. The directory must be
a package.
Args:
directory: the directory in which is searched for packages.
Returns: a list of packages.
"""
result = list(_discover_packages_per_path(directory).values())
result.sort()
return result
def discover_module_names(
directory: Union[Path, str],
include_privates: bool = False) -> List[str]:
"""
Return a list of module names within the given directory. The directory
must be a package and only names are returned of modules that are in
packages.
Args:
directory: the directory in which is searched for modules.
include_privates: if True, privates (unders and dunders) are also
included.
Returns: a list of module names (strings).
"""
result = []
packages_per_path = _discover_packages_per_path(directory)
for path, package_name in packages_per_path.items():
result.extend(['{}.{}'.format(package_name, p.stem)
for p in discover_paths(path, '*.py')
if p.stem != '__init__'
and (include_privates or not p.stem.startswith('_'))])
result.sort()
return result
def discover_modules(
directory: Union[Path, str],
include_privates: bool = False,
raise_on_fail: bool = False) -> List[Module]:
"""
Return a list of modules within the given directory. The directory must be
a package and only modules are returned that are in packages.
Args:
directory: the directory in which is searched for modules.
include_privates: if True, privates (unders and dunders) are also
included.
raise_on_fail: if True, an ImportError is raised upon failing to
import any module.
Returns: a list of module objects.
"""
modules = discover_module_names(directory, include_privates)
result = []
for module in modules:
try:
imported_module = import_module(module)
result.append(imported_module)
except Exception as err:
if raise_on_fail:
raise ImportError(err)
result.sort(key=lambda module: module.__name__)
return result
def discover_classes(
source: Union[Path, str, Module, Iterable[Module]],
signature: type = Any, # type: ignore
include_privates: bool = False,
in_private_modules: bool = False,
raise_on_fail: bool = False,
exclude: Union[Iterable[type], type] = None
) -> List[type]:
"""
Discover any classes within the given source and according to the given
constraints.
Args:
source: the source in which is searched for any classes.
signature: only classes that inherit from signature are returned.
include_privates: if True, private classes are included as well.
in_private_modules: if True, private modules are explored as well.
raise_on_fail: if True, raises an ImportError upon the first import
failure.
exclude: a type or multiple types that are to be excluded from the
result.
Returns: a list of all discovered classes (types).
"""
exclude_ = _ensure_set(exclude)
elements = _discover_elements(source, inspect.isclass, include_privates,
in_private_modules, raise_on_fail)
result = list({cls for cls in elements
if (signature is Any or subclass_of(cls, signature))
and cls not in exclude_})
result.sort(key=lambda cls: cls.__name__)
return result
def discover_functions(
source: Union[Path, str, Module, Iterable[Module], type],
signature: Type[Callable] = Callable, # type: ignore
include_privates: bool = False,
in_private_modules: bool = False,
raise_on_fail: bool = False) -> List[type]:
"""
Discover any functions within the given source and according to the given
constraints.
Args:
source: the source in which is searched for any functions.
signature: only functions that have this signature (parameters and
return type) are included.
include_privates: if True, private functions are included as well.
in_private_modules: if True, private modules are explored as well.
raise_on_fail: if True, raises an ImportError upon the first import
failure.
Returns: a list of all discovered functions.
"""
def filter_(*args_: Iterable[Any]) -> bool:
return (inspect.isfunction(*args_)
or inspect.ismethod(*args_))
if not isinstance(source, type):
filter_ = inspect.isfunction # type: ignore
elements = _discover_elements(source, filter_, include_privates,
in_private_modules, raise_on_fail)
result = [elem for elem in elements
if (signature is Callable or instance_of(elem, signature))]
result.sort(key=lambda func: func.__name__)
return result
def discover_attributes(
source: Union[Path, str, Module, Iterable[Module]],
signature: type = Any, # type: ignore
include_privates: bool = False,
in_private_modules: bool = False,
raise_on_fail: bool = False) -> List[Attribute]:
"""
Discover any attributes within the given source and according to the given
constraints.
Args:
source: the source in which is searched for any attributes.
signature: only attributes that are subtypes of this signature are
included.
include_privates: if True, private attributes are included as well.
in_private_modules: if True, private modules are explored as well.
raise_on_fail: if True, raises an ImportError upon the first import
failure.
Returns: a list of all discovered attributes.
"""
modules = _get_modules_from_source(source, in_private_modules,
raise_on_fail)
attributes: List[Attribute] = []
for module in modules:
with open(module.__file__) as module_file:
lines = list(module_file)
attributes += _discover_attributes_in_lines(
lines, module, signature, include_privates)
attributes.sort(key=lambda attr: attr.name)
return attributes
def _discover_attributes_in_lines(
lines: List[str],
module: Module,
signature: type,
include_privates: bool) -> List[Attribute]:
"""
Discover any attributes within the lines of codee and according to the
given constraints.
Args:
lines: the lines of code in which is searched for any attributes.
module: the module from which these lines originate.
signature: only attributes that are subtypes of this signature are
included.
include_privates: if True, private attributes are included as well.
Returns: a list of all discovered attributes.
"""
attributes = []
for index, line in enumerate(lines):
match = _match_attribute(line)
if match:
name, hint, value, comment = match
docstring = _find_attribute_docstring(lines[0:index])
attribute = _create_attribute(name, hint, value, docstring,
comment, module, line, index + 1)
if (instance_of(attribute.value, signature)
and (attribute.is_public or include_privates)):
attributes.append(attribute)
return attributes
def _discover_elements(
source: Union[Path, str, Module, Iterable[Module], type],
filter_: Callable[[Any], bool],
include_privates: bool = False,
in_private_modules: bool = False,
raise_on_fail: bool = False) -> List[Any]:
"""
Discover elements (such as attributes or functions) in the given source.
Args:
source: the source that is explored.
filter_: the filter that determines the type of element.
include_privates: if True, private elements are returned as well.
in_private_modules: if True, private modules are examined as well.
raise_on_fail: if True, an ImportError will be raised upon import
failure.
Returns: a list of elements.
"""
if isinstance(source, type):
sources = [source] # type: Iterable
else:
sources = _get_modules_from_source(source, in_private_modules,
raise_on_fail)
elements = [elem for src in sources
for _, elem in inspect.getmembers(src, filter_)
if (in_private_modules or not src.__name__.startswith('_'))
and (include_privates or not elem.__name__.startswith('_'))]
return elements
def _discover_packages_per_path(
directory: Union[Path, str]) -> Dict[Path, str]:
"""
Discover packages and their original Paths within the given directory.
Args:
directory: the directory in which is searched for modules.
Returns: a dict with Paths as keys and strings (the package names) as
values.
"""
directory_path = _path(directory)
if not directory_path.exists():
raise ValueError('The given directory does not exist. '
'Given: {}'.format(directory))
if not _is_package(directory_path):
raise ValueError('The given directory must itself be a package. '
'Given: {}'.format(directory))
paths_to_inits = discover_paths(directory_path, '**/__init__.py')
paths = [p.parent for p in paths_to_inits]
packages_per_path = {p: _to_package_name(p) for p in paths}
# All packages must have a straight line of packages from the base package.
base_package = _to_package_name(directory_path)
result = {path: package for path, package in packages_per_path.items()
if package.startswith(base_package)}
return result
def _path(directory: Union[Path, str]) -> Path:
"""
Return a path if directory is a string or return directory if it is a Path
already. Raise a ValueError if it is neither a Path nor a string.
Args:
directory: the directory that is a string or Path.
Returns: a Path instance.
"""
if isinstance(directory, Path):
result = directory
elif isinstance(directory, str):
result = Path(directory)
else:
raise ValueError('Invalid type ({}) for directory, provide a Path or '
'a string.'.format(type(directory)))
return result
def _get_modules_from_source(
source: Union[Path, str, Module, Iterable[Module]],
in_private_modules: bool = False,
raise_on_fail: bool = False
) -> Iterable[Module]:
"""
Get an iterable of Modules from the given source.
Args:
source: anything that can be turned into an iterable of Modules.
in_private_modules: if True, private modules are explored as well.
raise_on_fail: if True, raises an ImportError upon the first import
failure.
Returns: an iterable of Module instances.
"""
if isinstance(source, Path):
modules = discover_modules(source, in_private_modules, raise_on_fail)
elif isinstance(source, str):
modules = discover_modules(Path(source), in_private_modules,
raise_on_fail)
elif isinstance(source, Module):
modules = [source]
elif instance_of(source, Iterable[Module]):
modules = source # type: ignore
else:
raise ValueError('The given source must be a Path, string or module. '
'Given: {}'.format(source))
return modules
def _match_attribute(line: str) -> Optional[Tuple[str, str, str, str]]:
"""
Try to match the given line with an attribute and return the name,
type hint, value and inline comment (respectively) if a match was
found.
Args:
line: the line of code that (may) contain an attribute declaration.
Returns: a tuple with strings (name, hint, value, comment) or None.
"""
attr_pattern = re.compile(
r'^'
r'\s*'
r'([a-zA-Z_]+[a-zA-Z_0-9]*)' # 1: Name.
r'(\s*:\s*(\w+)\s*)?' # 3: Type hint.
r'\s*=\s*'
r'(.+?)' # 4: Value.
r'\s*'
r'(#\s*(.*?)\s*)?' # 6: Inline comment.
r'$'
)
match = attr_pattern.match(line)
result = None
if match:
attr_name = match.group(1)
hint = match.group(3)
attr_value = match.group(4)
inline_comments = match.group(6)
result = attr_name, hint, attr_value, inline_comments
return result
def _create_attribute(
name: str,
hint: Optional[str],
assigned_value: str,
docstring: Optional[str],
comment: Optional[str],
module: Module,
line: str,
line_nr: int) -> Attribute:
"""
Create and return an Attribute instance from the given parameters.
Args:
name: the name of the attribute.
hint: the type hint of the attribute (if any).
assigned_value: the string that was literally assigned.
docstring: the docstring above this attribute.
comment: an inline comment (if any).
module: the module that contains the attribute.
line: the line that defines the attribute.
line_nr: the line number of the attribute.
Returns: an Attribute instance.
"""
value = getattr(module, name)
type_ = type(value)
return Attribute(
name=name,
type_=type_,
value=value,
doc=docstring,
comment=comment,
hint=hint,
module=module,
assigned_value=assigned_value,
line=line,
line_nr=line_nr
)
def _is_package(directory: Path) -> bool:
"""
Return True if the given directory is a package and False otherwise.
Args:
directory: the directory to check.
Returns: True if directory is a package.
"""
paths = discover_paths(directory, '__init__.py')
return len(paths) > 0
def _to_package_name(directory: Path) -> str:
"""
Translate the given directory to a package (str). Check every parent
directory in the tree to find the complete fully qualified package name.
Args:
directory: the directory that is to become a package name.
Returns: a package name as string.
"""
parts: List[str] = []
current_dir = directory
while _is_package(current_dir):
# See how far up the tree we can go while still in a package.
parts.insert(0, current_dir.stem)
current_dir = current_dir.parent
return '.'.join(parts)
def _find_attribute_docstring(lines: List[str]) -> Optional[str]:
"""
Find any docstring that is right above an attribute.
Args:
lines: the lines of code that may contain a docstring.
Returns: a docstring (str) or None.
"""
result = None
if lines:
joined_lines = ''.join(lines).strip()
docstring_pattern = re.compile(
r'("{3}\s*([\s\S]+)\s*"{3}|' # 2: docstring content.
r'\'{3}\s*([\s\S]+)\s*\'{3})' # 3: docstring content.
r'$'
)
match = docstring_pattern.match(joined_lines)
if match:
result = (match.group(2) or match.group(3)).strip()
return result
def _ensure_set(arg: Union[object, Iterable[object]]) -> Set[object]:
# Make sure that arg is a set.
result = arg or set()
if not isinstance(result, Iterable):
result = {result}
else:
result = set(result)
return result
def _discover_list(
what_: List[type],
source: Union[Path, str, Module, Iterable[Module]],
**kwargs: dict) -> List[type]:
args = getattr(what_, '__args__', None) or [Any]
signature = args[0]
if signature in (type, Type) or isinstance(signature, TypeVar): # type: ignore[arg-type] # noqa
signature = Any
kwargs['signature'] = signature
return discover_classes(source, **kwargs) # type: ignore[arg-type]
| 32.765125 | 100 | 0.632942 |
a0e4dae891748b8a01307ae7aac7bc7715d4cc4e | 9,199 | py | Python | examples/the-feeling-of-success/run_experiments.py | yujialuo/erdos | 7a631b55895f1a473b0f4d38a0d6053851e65b5d | [
"Apache-2.0"
] | null | null | null | examples/the-feeling-of-success/run_experiments.py | yujialuo/erdos | 7a631b55895f1a473b0f4d38a0d6053851e65b5d | [
"Apache-2.0"
] | null | null | null | examples/the-feeling-of-success/run_experiments.py | yujialuo/erdos | 7a631b55895f1a473b0f4d38a0d6053851e65b5d | [
"Apache-2.0"
] | null | null | null | import logging
from absl import app
from sensor_msgs.msg import Image
from insert_table_op import InsertTableOperator
from insert_block_op import InsertBlockOperator
from init_robot_op import InitRobotOperator
from gel_sight_op import GelSightOperator
from mock_loc_obj_op import MockLocateObjectOperator
from goto_xyz_op import GoToXYZOperator
from move_above_object_op import MoveAboveObjectOperator
from mock_gripper_op import MockGripperOperator
from mock_grasp_object_op import MockGraspObjectOperator
from raise_object_op import RaiseObjectOperator
from mock_predict_grip_op import MockPredictGripOperator
from random_position_op import RandomPositionOperator
from mock_ungrasp_object_op import MockUngraspObjectOperator
import erdos.graph
from erdos.ros.ros_subscriber_op import ROSSubscriberOp
logger = logging.getLogger(__name__)
table_init_arguments = {"_x": 0.75, "_y": 0.0, "_z": 0.0, "ref_frame": "world"}
block_init_arguments = {
"_x": 0.4225,
"_y": 0.1265,
"_z": 0.7725,
"ref_frame": "world"
}
robot_init_arguments = {
"joint_angles": {
'right_j0': -0.041662954890248294,
'right_j1': -1.0258291091425074,
'right_j2': 0.0293680414401436,
'right_j3': 2.17518162913313,
'right_j4': -0.06703022873354225,
'right_j5': 0.3968371433926965,
'right_j6': 1.7659649178699421
},
"limb_name": "right"
}
def construct_graph(graph):
logger.info("Starting the construction of the graph.")
# First, insert the table in the world.
insert_table_op = graph.add(
InsertTableOperator, init_args=table_init_arguments)
# Now, insert the block in the world.
insert_block_op = graph.add(
InsertBlockOperator, init_args=block_init_arguments)
graph.connect([insert_table_op], [insert_block_op])
# Initialize the robot and move it to the rest position.
init_robot_op = graph.add(
InitRobotOperator, init_args=robot_init_arguments)
graph.connect([insert_block_op], [init_robot_op])
# Initialize the gelsight operators and connect them to the rostopics.
gel_sight_topics = [("/gelsightA/image_raw", Image, "gelsightA"),
("/gelsightB/image_raw", Image, "gelsightB")]
ros_gel_sight_op = graph.add(
ROSSubscriberOp,
name='ros_gel_sight',
init_args={'ros_topics_type': gel_sight_topics},
setup_args={'ros_topics_type': gel_sight_topics})
gel_sight_a = graph.add(
GelSightOperator,
name="gelsight-a-op",
init_args={'output_name': "gelsight-stream-a"},
setup_args={
'input_name': "gelsightA",
'output_name': "gelsight-stream-a"
})
gel_sight_b = graph.add(
GelSightOperator,
name="gelsight-b-op",
init_args={'output_name': "gelsight-stream-b"},
setup_args={
'input_name': "gelsightB",
'output_name': "gelsight-stream-b"
})
graph.connect([ros_gel_sight_op], [gel_sight_a])
graph.connect([ros_gel_sight_op], [gel_sight_b])
# Retrieve the kinect images from the rostopics and feed them to the
# object locator.
ros_kinect_topics = [("/kinectA/image_raw", Image, "image-stream"),
("/kinectA/depth_raw", Image, "depth-stream")]
ros_kinect_op = graph.add(
ROSSubscriberOp,
name='ros_kinect',
init_args={'ros_topics_type': ros_kinect_topics},
setup_args={'ros_topics_type': ros_kinect_topics})
locate_object_op = graph.add(
MockLocateObjectOperator,
name='locate-object-op',
init_args={
'image_stream_name': 'image-stream',
'depth_stream_name': 'depth-stream',
'trigger_stream_name': InitRobotOperator.stream_name
},
setup_args={
'image_stream_name': 'image-stream',
'depth_stream_name': 'depth-stream',
'trigger_stream_name': InitRobotOperator.stream_name
})
graph.connect([ros_kinect_op, init_robot_op], [locate_object_op])
# Move the Sawyer arm above the detected object.
goto_xyz_move_above_op = graph.add(
GoToXYZOperator,
name='goto-xyz',
init_args={
'limb_name': 'right',
'output_stream_name': 'goto-move-above'
},
setup_args={
'input_stream_name': MoveAboveObjectOperator.goto_stream_name,
'output_stream_name': 'goto-move-above'
})
move_above_object_op = graph.add(
MoveAboveObjectOperator,
name='controller',
setup_args={
'trigger_stream_name': MockLocateObjectOperator.stream_name,
'goto_xyz_stream_name': 'goto-move-above'
})
graph.connect([locate_object_op, goto_xyz_move_above_op],
[move_above_object_op])
graph.connect([move_above_object_op], [goto_xyz_move_above_op])
# Closes the gripper.
gripper_close_op = graph.add(
MockGripperOperator,
name="gripper-close-op",
init_args={
'gripper_speed': 0.25,
'output_stream_name': 'gripper_close_stream'
},
setup_args={
'gripper_stream': MockGraspObjectOperator.gripper_stream,
'output_stream_name': 'gripper_close_stream'
})
grasp_object_op = graph.add(
MockGraspObjectOperator,
name='mock-grasp-object',
setup_args={
'trigger_stream_name': MoveAboveObjectOperator.stream_name,
'gripper_stream_name': 'gripper_close_stream'
})
graph.connect([move_above_object_op, gripper_close_op], [grasp_object_op])
graph.connect([grasp_object_op], [gripper_close_op])
# Raises the object.
raise_object_op = graph.add(
RaiseObjectOperator,
name='raise-object',
setup_args={
'location_stream_name': MockLocateObjectOperator.stream_name,
'trigger_stream_name': MockGraspObjectOperator.
action_complete_stream_name
})
goto_xyz_raise_op = graph.add(
GoToXYZOperator,
name="goto-xyz-raise",
init_args={
'limb_name': 'right',
'output_stream_name': 'goto_xyz_raise'
},
setup_args={
'input_stream_name': RaiseObjectOperator.stream_name,
'output_stream_name': 'goto_xyz_raise'
})
graph.connect([locate_object_op, grasp_object_op], [raise_object_op])
graph.connect([raise_object_op], [goto_xyz_raise_op])
# Predicts whether the grip was successful using the gelsight cameras.
predict_grip_op = graph.add(
MockPredictGripOperator,
name='predict-grip-op',
setup_args={
'gel_sight_a_stream_name': 'gelsight-stream-a',
'gel_sight_b_stream_name': 'gelsight-stream-b',
'trigger_stream_name': 'goto_xyz_raise'
})
graph.connect([gel_sight_a, gel_sight_b, goto_xyz_raise_op],
[predict_grip_op])
# If the grip is successful, we return it to a random location.
random_position_op = graph.add(
RandomPositionOperator,
name="random-pos-op",
setup_args={
'locate_object_stream_name': MockLocateObjectOperator.stream_name,
'trigger_stream_name': MockPredictGripOperator.success_stream_name,
'goto_xyz_stream_name': 'goto_random_pos'
})
goto_xyz_random_op = graph.add(
GoToXYZOperator,
name="goto-xyz-random",
init_args={
'limb_name': 'right',
'output_stream_name': 'goto_random_pos'
},
setup_args={
'input_stream_name': RandomPositionOperator.position_stream_name,
'output_stream_name': 'goto_random_pos'
})
graph.connect([locate_object_op, predict_grip_op, goto_xyz_random_op],
[random_position_op])
graph.connect([random_position_op], [goto_xyz_random_op])
# Now, ungrasp the object.
gripper_open_op = graph.add(
MockGripperOperator,
name="gripper-open-op",
init_args={
'gripper_speed': 0.25,
'output_stream_name': 'gripper_open_stream'
},
setup_args={
'gripper_stream': MockUngraspObjectOperator.gripper_stream,
'output_stream_name': 'gripper_open_stream'
})
ungrasp_object_op = graph.add(
MockUngraspObjectOperator,
name = "ungrasp-object-op",
setup_args = {
'trigger_stream_name': RandomPositionOperator.\
action_complete_stream_name,
'gripper_stream_name': 'gripper_open_stream'
})
graph.connect([random_position_op, gripper_open_op], [ungrasp_object_op])
graph.connect([ungrasp_object_op], [gripper_open_op])
logger.info("Finished constructing the execution graph!")
def main(argv):
# Create the graph.
graph = erdos.graph.get_current_graph()
construct_graph(graph)
# Execute the graph.
graph.execute("ros")
try:
while True:
pass
except KeyboardInterrupt:
pass
if __name__ == "__main__":
app.run(main)
| 35.245211 | 79 | 0.655941 |
580e4527d60cfad9625c9ee7cfe7648df3a25260 | 24,697 | py | Python | tensorflow/python/keras/saving/saved_model/save_impl.py | leike666666/tensorflow | a3fd0ddfcb716be124e95b51e96e6c1e4507ef64 | [
"Apache-2.0"
] | 12 | 2020-12-28T18:42:10.000Z | 2022-03-24T17:34:21.000Z | tensorflow/python/keras/saving/saved_model/save_impl.py | sagol/tensorflow | 04f2870814d2773e09dcfa00cbe76a66a2c4de88 | [
"Apache-2.0"
] | 2 | 2021-08-25T15:57:35.000Z | 2022-02-10T01:09:32.000Z | tensorflow/python/keras/saving/saved_model/save_impl.py | sagol/tensorflow | 04f2870814d2773e09dcfa00cbe76a66a2c4de88 | [
"Apache-2.0"
] | 3 | 2020-03-09T19:17:02.000Z | 2020-06-26T23:14:31.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras SavedModel serialization.
TODO (kathywu): Move to layer_serialization.py. Some model-specific logic should
go to model_serialization.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import weakref
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.saving import saving_utils
from tensorflow.python.keras.saving.saved_model import constants
from tensorflow.python.keras.saving.saved_model import load as keras_load
from tensorflow.python.keras.saving.saved_model import serialized_attributes
from tensorflow.python.keras.saving.saved_model import utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.lazy_loader import LazyLoader
# To avoid circular dependencies between keras/engine and keras/saving,
# code in keras/saving must delay imports.
# TODO(b/134426265): Switch back to single-quotes to match the rest of the file
# once the issue with copybara is fixed.
# pylint:disable=g-inconsistent-quotes
base_layer = LazyLoader(
"base_layer", globals(),
"tensorflow.python.keras.engine.base_layer")
input_layer = LazyLoader(
"input_layer", globals(),
"tensorflow.python.keras.engine.input_layer")
training_lib = LazyLoader(
"training_lib", globals(),
"tensorflow.python.keras.engine.training")
sequential_lib = LazyLoader(
"sequential_lib", globals(),
"tensorflow.python.keras.engine.sequential")
# pylint:enable=g-inconsistent-quotes
def should_skip_serialization(layer):
"""Skip serializing extra objects and functions if layer inputs aren't set."""
if isinstance(layer, training_lib.Model):
try:
# pylint:disable=pointless-statement
layer.inputs
layer.input_names
# pylint:enable=pointless-statement
except AttributeError:
# If the model does not have inputs set, because it was not called or its
# input shapes were not recorded, we won't have a signature so can't trace
# a function. But the user may still save an object with this Model
# attached; we won't fail the whole tf.saved_model.save.
logging.warning('Skipping full serialization of Keras model {}, because '
'its inputs are not defined.'.format(layer))
return True
else:
return False
else:
if not layer.built:
logging.warning('Skipping full serialization of Keras layer {}, because '
'it is not built.'.format(layer))
return True
return False
def wrap_layer_objects(layer, serialization_cache):
"""Returns extra trackable objects to attach to the serialized layer.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
A dictionary containing all checkpointable objects from a
SerializedAttributes object. See LayerAttributes and ModelAttributes for
entire list of objects
"""
# Wrap all regularization losses as tf.functions.
# First, generate list of all regularization losses in this layer and
# sublayers.
all_losses = layer._callable_losses[:] # pylint: disable=protected-access
for child_layer in utils.list_all_layers(layer):
all_losses.extend(child_layer._callable_losses) # pylint: disable=protected-access
# Next, wrap all loss functions as tf.functions. Use the serialization cache
# to store already-wrapped functions.
keras_loss_cache = serialization_cache.setdefault('keras_losses', {})
wrapped_loss_functions = []
for loss_fn in all_losses:
if loss_fn in keras_loss_cache:
wrapped_loss_functions.append(keras_loss_cache[loss_fn])
else:
wrapped_loss = _wrap_unconditional_loss(loss_fn, len(keras_loss_cache))
keras_loss_cache[loss_fn] = wrapped_loss
wrapped_loss_functions.append(wrapped_loss)
wrapped_layer_losses = [keras_loss_cache[fn]
for fn in layer._callable_losses[:]] # pylint: disable=protected-access
return dict(
variables=data_structures.ListWrapper(layer.variables),
trainable_variables=data_structures.ListWrapper(
layer.trainable_variables),
non_trainable_variables=data_structures.ListWrapper(
layer.non_trainable_variables),
layers=data_structures.ListWrapper(utils.list_all_layers(layer)),
metrics=data_structures.ListWrapper(layer.metrics),
regularization_losses=data_structures.ListWrapper(
wrapped_loss_functions),
layer_regularization_losses=data_structures.ListWrapper(
wrapped_layer_losses))
def wrap_layer_functions(layer, serialization_cache):
"""Returns dict of wrapped layer call function and losses in tf.functions.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
A dictionary containing all keras tf.functions to serialize. See
LayerAttributes and ModelAttributes for the list of all attributes.
"""
# Since Sequential models may be modified in place using model.add() or
# model.pop(), don't use saved functions.
if (isinstance(layer, keras_load.RevivedLayer) and
not isinstance(layer, sequential_lib.Sequential)):
return {fn_name: getattr(layer.keras_api, fn_name, None)
for fn_name in serialized_attributes.LayerAttributes.all_functions}
# Reset the losses of the layer and its children. The call function in each
# child layer is replaced with tf.functions.
original_fns = _replace_child_layer_functions(layer, serialization_cache)
original_losses = _reset_layer_losses(layer)
# Wrap all the layer call and activity regularizer functions.
# Use LayerCallCollection to ensure that all layer call functions (__call__,
# call with losses) are traced with the same inputs.
call_collection = LayerCallCollection(layer)
call_fn_with_losses = call_collection.add_function(
_wrap_call_and_conditional_losses(layer),
'{}_layer_call_and_return_conditional_losses'.format(layer.name))
call_fn = call_collection.add_function(
_extract_outputs_from_fn(layer, call_fn_with_losses),
'{}_layer_call_fn'.format(layer.name))
fns = {'call_and_return_conditional_losses': call_fn_with_losses,
'__call__': call_fn}
if layer._activity_regularizer is not None: # pylint: disable=protected-access
fns['activity_regularizer_fn'] = _wrap_activity_regularizer(layer)
fns['call_and_return_all_conditional_losses'] = (
call_collection.add_function(
_append_activity_regularizer_loss(layer,
call_fn_with_losses,
fns['activity_regularizer_fn']),
'{}_layer_call_and_return_all_conditional_losses'.format(layer.name)
))
else:
fns['activity_regularizer_fn'] = None
fns['call_and_return_all_conditional_losses'] = call_fn_with_losses
# Manually trigger traces before restoring the overwritten functions. The
# functions are traced within the layer call context to ensure that layer
# functions (e.g. add_loss) behave as though running in graph mode.
with base_layer_utils.call_context().enter(
layer, inputs=None, build_graph=True, training=None, saving=True):
for fn in fns.values():
if fn is not None and fn.input_signature is not None:
fn.get_concrete_function()
# Restore overwritten functions and losses
_restore_child_layer_functions(original_fns)
_restore_layer_losses(original_losses)
return fns
def default_save_signature(layer):
original_losses = _reset_layer_losses(layer)
fn = saving_utils.trace_model_call(layer)
fn.get_concrete_function()
_restore_layer_losses(original_losses)
return fn
def _replace_child_layer_functions(layer, serialization_cache):
"""Replaces functions in the children layers with wrapped tf.functions.
This step allows functions from parent layers to reference the wrapped
functions from their children layers instead of retracing the ops.
This function also resets all losses stored in the layer. These are stored in
the returned dictionary. Use `_restore_child_layer_functions` to restore
the original attributes.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
Dictionary mapping layer objects -> original functions and losses:
{ Child layer 1: {
'losses': Original losses,
'call': Original call function
'activity_regularizer': Original activity regularizer},
Child layer 2: ...
}
"""
# pylint: disable=protected-access
original_fns = {}
for child_layer in utils.list_all_layers(layer):
if isinstance(child_layer, input_layer.InputLayer):
continue
if child_layer not in serialization_cache[constants.KERAS_CACHE_KEY]:
layer_fns = (
child_layer._trackable_saved_model_saver._get_serialized_attributes(
serialization_cache).functions)
else:
layer_fns = (
serialization_cache[constants.KERAS_CACHE_KEY][child_layer].functions)
if not layer_fns:
# This indicates either:
# - circular dependency, which means the current layer's functions
# should be wrapped first.
# - Child layer's inputs are not defined, so its functions have not been
# wrapped. In this case, no replacement is necessary so move on to the
# next child.
continue
original_fns[child_layer] = {
'call': child_layer.call,
'activity_regularizer': child_layer._activity_regularizer
}
with trackable.no_automatic_dependency_tracking_scope(child_layer):
try:
child_layer._activity_regularizer = layer_fns.get(
'activity_regularizer_fn')
except AttributeError:
# Some layers have an unsettable activity regularizer.
pass
child_layer.call = utils.use_wrapped_call(
child_layer, layer_fns['call_and_return_conditional_losses'],
default_training_value=False)
return original_fns
# pylint: enable=protected-access
def _restore_child_layer_functions(original_fns):
"""Restores attributes replaced with `_replace_child_layer_functions`."""
for child_layer, fns in original_fns.items():
with trackable.no_automatic_dependency_tracking_scope(child_layer):
child_layer.call = fns['call']
try:
child_layer._activity_regularizer = fns['activity_regularizer'] # pylint: disable=protected-access
except AttributeError:
pass
# pylint: disable=protected-access
def _reset_layer_losses(parent_layer):
"""Resets losses of layer and its sublayers, and returns original losses."""
losses_dict = {}
for layer in utils.list_all_layers_and_sublayers(parent_layer):
losses_dict[layer] = {'losses': layer._losses[:],
'eager_losses': layer._eager_losses[:]}
with trackable.no_automatic_dependency_tracking_scope(layer):
layer._losses = []
layer._eager_losses = []
return losses_dict
def _restore_layer_losses(losses_dict):
for layer in losses_dict:
with trackable.no_automatic_dependency_tracking_scope(layer):
layer._losses = losses_dict[layer]['losses']
layer._eager_losses = losses_dict[layer]['eager_losses']
# pylint: enable=protected-access
class LayerCallCollection(object):
"""Groups wrapped layer call functions.
This is used to ensure that all layer call functions are traced with the same
inputs-
- call
- call_and_return_conditional_losses
- call_and_return_all_conditional_losses
"""
def __init__(self, layer):
self.layer = layer
self.layer_call_method = _get_layer_call_method(layer)
self._expects_training_arg = utils.layer_uses_training_bool(layer)
self._training_arg_index = utils.get_training_arg_index(
self.layer_call_method)
# If the layer call function has kwargs, then the traced function cannot
# have an input signature.
arg_spec = tf_inspect.getfullargspec(self.layer_call_method)
self._has_kwargs = bool(self._expects_training_arg or
arg_spec.defaults or
arg_spec.kwonlyargs or
arg_spec.varkw)
self._input_signature = self._generate_input_signature(layer)
self._functions = weakref.WeakValueDictionary()
# Bool indicating whether this object is currently tracing the layer call
# functions.
self.tracing = False
# Get the input argument name from the args.
args = arg_spec.args
if tf_inspect.ismethod(self.layer_call_method):
args = args[1:]
self._input_arg_name = args[0] if args else 'inputs'
def _generate_input_signature(self, layer):
"""Inspects layer object and returns the inferred input signature.
Args:
layer: Layer object.
Returns:
List of possibly nested TensorSpecs of the layer call function inputs.
The list does not contain the `training` argument.
"""
if (isinstance(layer.call, def_function.Function) and
layer.call.input_signature is not None):
return layer.call.input_signature
else:
if isinstance(layer, training_lib.Model):
return saving_utils.model_input_signature(layer)
elif layer.input_spec is not None:
def to_tensor_spec_or_none(x):
spec = input_spec.to_tensor_spec(x, layer._compute_dtype) # pylint: disable=protected-access
# If the shape is too general (e.g. multiple dimensions are allowed),
# return None so that separate functions can be generated for each
# inferred input signature.
# TODO(b/134962016): currently partial signatures are not supported.
if spec.shape == tensor_shape.TensorShape(None):
return None
return spec
input_signature = [nest.map_structure(
to_tensor_spec_or_none, layer.input_spec)]
return input_signature
else:
return None
def add_trace(self, *args, **kwargs):
"""Traces all functions with the same args and kwargs.
Args:
*args: Positional args passed to the original function.
**kwargs: Keyword args passed to the original function.
"""
args = list(args)
kwargs = kwargs.copy()
self.tracing = True
for fn in self._functions.values():
# TODO(kathywu): Replace arguments with broader shapes defined in the
# input signature.
if self._expects_training_arg:
def trace_with_training(value, fn=fn):
utils.set_training_arg(value, self._training_arg_index, args, kwargs)
with K.learning_phase_scope(value):
fn.get_concrete_function(*args, **kwargs)
trace_with_training(True)
trace_with_training(False)
else:
fn.get_concrete_function(*args, **kwargs)
self.tracing = False
@property
def fn_input_signature(self):
"""Returns input signature for the wrapped layer call function."""
if self._has_kwargs:
# Input signatures may only describe tensor arguments and kwargs are not
# supported.
return None
if None in nest.flatten(self._input_signature):
# TODO(b/134962016): If input signature cannot be partially defined.
return None
return self._input_signature
def training_arg_was_passed(self, args, kwargs):
if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access
return (utils.get_training_arg(self._training_arg_index, args, kwargs)
is not None)
else:
return self.layer._call_arg_was_passed( # pylint: disable=protected-access
'training', args, kwargs, inputs_in_args=True)
def get_training_arg_value(self, args, kwargs):
if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access
return utils.get_training_arg(self._training_arg_index, args, kwargs)
else:
return self.layer._get_call_arg_value( # pylint: disable=protected-access
'training', args, kwargs, inputs_in_args=True)
def get_input_arg_value(self, args, kwargs):
return self.layer._get_call_arg_value( # pylint: disable=protected-access
self._input_arg_name, args, kwargs, inputs_in_args=True)
def _maybe_wrap_with_training_arg(self, call_fn):
"""Wraps call function with added training argument if necessary."""
if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access
# Add training arg to wrapper function.
arg_spec = tf_inspect.getfullargspec(call_fn)
args = arg_spec.args + ['training']
defaults = list(arg_spec.defaults or [])
defaults.append(False)
new_arg_spec = tf_inspect.FullArgSpec(
args=args,
varargs=arg_spec.varargs,
varkw=arg_spec.varkw,
defaults=defaults,
kwonlyargs=arg_spec.kwonlyargs,
kwonlydefaults=arg_spec.kwonlydefaults,
annotations=arg_spec.annotations)
# Set new training arg index
self._training_arg_index = len(args) - 1
if tf_inspect.ismethod(call_fn):
self._training_arg_index -= 1
def wrap_with_training_arg(*args, **kwargs):
# Remove the training value, since the original call_fn does not expect
# a training arg. Instead, the training value will be propagated using
# the call context created in LayerCall.
args = list(args)
kwargs = kwargs.copy()
utils.remove_training_arg(self._training_arg_index, args, kwargs)
return call_fn(*args, **kwargs)
return tf_decorator.make_decorator(
target=call_fn,
decorator_func=wrap_with_training_arg,
decorator_argspec=new_arg_spec)
return call_fn
def add_function(self, call_fn, name):
"""Adds a layer call function to the collection."""
self._functions[name] = fn = LayerCall(
self, self._maybe_wrap_with_training_arg(call_fn), name,
input_signature=self.fn_input_signature)
if (None not in nest.flatten(self._input_signature) and
self._has_kwargs):
# Manually add traces for layers that have keyword arguments and have
# a fully defined input signature.
self.add_trace(*self._input_signature)
return fn
def layer_call_wrapper(call_collection, method):
"""Ensures layer losses are kept the same, and runs method in call context."""
def wrapper(*args, **kwargs):
"""Calls method within call context."""
layer = call_collection.layer
training = None
inputs = call_collection.get_input_arg_value(args, kwargs)
# pylint: disable=protected-access
if (args or kwargs) and call_collection.training_arg_was_passed(
args, kwargs):
training = call_collection.get_training_arg_value(args, kwargs)
# pylint: enable=protected-access
original_losses = _reset_layer_losses(layer)
with base_layer_utils.call_context().enter(
layer, inputs=inputs, build_graph=False, training=training,
saving=True):
with base_layer_utils.autocast_context_manager(layer._compute_dtype): # pylint: disable=protected-access
ret = method(*args, **kwargs)
_restore_layer_losses(original_losses)
return ret
return tf_decorator.make_decorator(target=method, decorator_func=wrapper)
class LayerCall(def_function.Function):
"""Function that triggers traces of other functions in the same collection."""
def __init__(self, call_collection, python_function, *args, **kwargs):
self.call_collection = call_collection
self.original_call = call_collection.layer_call_method
python_function = layer_call_wrapper(call_collection, python_function)
super(LayerCall, self).__init__(python_function, *args, **kwargs)
def __call__(self, *args, **kwargs):
if not self.call_collection.tracing:
self.call_collection.add_trace(*args, **kwargs)
return super(LayerCall, self).__call__(*args, **kwargs)
def get_concrete_function(self, *args, **kwargs):
if not self.call_collection.tracing:
self.call_collection.add_trace(*args, **kwargs)
return super(LayerCall, self).get_concrete_function(*args, **kwargs)
def _wrap_call_and_conditional_losses(layer):
"""Wraps call function that returns a tuple of (outputs, losses).
The losses returned are conditional on the inputs passed to the call function.
Unconditional losses (e.g. weight regularizeration) are wrapped separately.
Args:
layer: a Keras layer object
Returns:
python call function that returns outputs and conditional losses -- excludes
activity regularizer
"""
# Create function that generates both outputs and losses
layer_call = _get_layer_call_method(layer)
def call_and_return_conditional_losses(inputs, *args, **kwargs):
return layer_call(inputs, *args, **kwargs), layer.get_losses_for(inputs)
return _create_call_fn_decorator(layer, call_and_return_conditional_losses)
def _extract_outputs_from_fn(layer, call_and_return_conditional_losses):
"""Returns a function that returns only call function outputs."""
if isinstance(layer, keras_load.RevivedLayer):
return layer.keras_api.__call__ # pylint: disable=protected-access
def call(inputs, *args, **kwargs):
return call_and_return_conditional_losses(inputs, *args, **kwargs)[0]
return _create_call_fn_decorator(layer, call)
def _append_activity_regularizer_loss(
layer, call_fn_with_losses, activity_regularizer_fn):
"""Appends activity regularizer loss to losses returned by the wrapped fn."""
def fn(inputs, *args, **kwargs):
outputs, losses = call_fn_with_losses(inputs, *args, **kwargs)
losses.append(activity_regularizer_fn(outputs))
return outputs, losses
return _create_call_fn_decorator(layer, fn)
def _create_call_fn_decorator(layer, wrapped_call):
call_fn = _get_layer_call_method(layer)
fn, arg_spec = utils.maybe_add_training_arg(
call_fn, wrapped_call, layer._expects_training_arg, # pylint: disable=protected-access
default_training_value=False)
return tf_decorator.make_decorator(
target=call_fn,
decorator_func=fn,
decorator_argspec=arg_spec)
def _wrap_unconditional_loss(loss_fn, index):
"""Wraps callable/unconditonal loss, returning a serializable function."""
# Extract original loss function from partial function
fn = loss_fn.args[0] if isinstance(loss_fn, functools.partial) else loss_fn
if isinstance(fn, def_function.Function):
return fn
else:
return def_function.Function(
fn, 'loss_fn_{}'.format(index), input_signature=[])
def _wrap_activity_regularizer(layer):
"""Wraps the activity regularizer."""
# pylint: disable=protected-access
if isinstance(layer._activity_regularizer, def_function.Function):
return layer._activity_regularizer
return def_function.Function(
layer._activity_regularizer,
'{}_activity_regularizer'.format(layer.name),
input_signature=[tensor_spec.TensorSpec(None, layer.dtype or K.floatx())])
# pylint: enable=protected-access
def _get_layer_call_method(layer):
if isinstance(layer.call, (def_function.Function, function.ConcreteFunction)):
return layer.call.python_function
return layer.call
| 40.420622 | 111 | 0.732356 |
9fd7553a0e07c8f735cf9112db4d2a7abd4af39b | 759 | py | Python | Algorithms/037_AFRQ.py | ChaoticMarauder/Project_Rosalind | 6c70cd32908f3b11285e8505c3b43f1ea222decb | [
"MIT"
] | null | null | null | Algorithms/037_AFRQ.py | ChaoticMarauder/Project_Rosalind | 6c70cd32908f3b11285e8505c3b43f1ea222decb | [
"MIT"
] | null | null | null | Algorithms/037_AFRQ.py | ChaoticMarauder/Project_Rosalind | 6c70cd32908f3b11285e8505c3b43f1ea222decb | [
"MIT"
] | null | null | null | import math
def hardy_weinberg(p_list):
recessive_list=[]
for idx in range(len(p_list)):
r = math.sqrt(p_list[idx])
h = 2-r
recessive_allele_probability = r*h
recessive_list.append(recessive_allele_probability)
return recessive_list
def main():
with open('datasets/rosalind_afrq.txt') as input_file:
p_list = input_file.read().strip().split()
p_list = list(map(float,p_list))
recessive_list = hardy_weinberg(p_list)
print(' '.join(list(map(str,recessive_list))))
with open('solutions/rosalind_afrq.txt', 'w') as output_file:
output_file.write(' '.join(list(map(str,recessive_list))))
if(__name__=="__main__"):
main() | 26.172414 | 66 | 0.623188 |
589958f2609b4e4bf2b08d5b35bc038388e03d99 | 1,125 | py | Python | docs/setup-example.py | tomochikahara/django-reusableapps | bedc0d80ea58b98ffaece46f96f1d137005b39b2 | [
"BSD-3-Clause"
] | 2 | 2015-07-03T13:00:49.000Z | 2016-05-08T07:46:39.000Z | docs/setup-example.py | tomochikahara/django-reusableapps | bedc0d80ea58b98ffaece46f96f1d137005b39b2 | [
"BSD-3-Clause"
] | null | null | null | docs/setup-example.py | tomochikahara/django-reusableapps | bedc0d80ea58b98ffaece46f96f1d137005b39b2 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
import os
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('dummyapp'):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
pkg = dirpath.replace(os.path.sep, '.')
if os.path.altsep:
pkg = pkg.replace(os.path.altsep, '.')
packages.append(pkg)
elif filenames:
prefix = dirpath[len('dummyapp')+1:] # Strip "dummyapp/" or "dummyapp\"
for f in filenames:
data_files.append(os.path.join(prefix, f))
setup(
name='dummyapp',
version='0.1',
description='Here be dragons.',
author='Dummy van Dum',
author_email='dummy@dummy.com',
packages=packages,
package_dir={'dummyapp': 'dummyapp'},
package_data={'dummyapp': data_files},
entry_points={'django.apps': 'dummyapp = dummyapp'},
)
| 31.25 | 79 | 0.648 |
3bbb55099b7830d95580ba2108812e6ca536062b | 58,775 | py | Python | src/pretix/base/services/cart.py | NorDULaN/pretix | e2b9fe8e71f3852721a42c594047d88f5181fd29 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-04-25T00:11:00.000Z | 2020-04-25T00:11:00.000Z | src/pretix/base/services/cart.py | NorDULaN/pretix | e2b9fe8e71f3852721a42c594047d88f5181fd29 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/base/services/cart.py | NorDULaN/pretix | e2b9fe8e71f3852721a42c594047d88f5181fd29 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from collections import Counter, defaultdict, namedtuple
from datetime import datetime, time, timedelta
from decimal import Decimal
from typing import List, Optional
from celery.exceptions import MaxRetriesExceededError
from django.core.exceptions import ValidationError
from django.db import DatabaseError, transaction
from django.db.models import Count, Exists, OuterRef, Q
from django.dispatch import receiver
from django.utils.timezone import make_aware, now
from django.utils.translation import gettext as _, pgettext_lazy
from django_scopes import scopes_disabled
from pretix.base.channels import get_all_sales_channels
from pretix.base.i18n import language
from pretix.base.models import (
CartPosition, Event, InvoiceAddress, Item, ItemVariation, Seat,
SeatCategoryMapping, Voucher,
)
from pretix.base.models.event import SubEvent
from pretix.base.models.orders import OrderFee
from pretix.base.models.tax import TAXED_ZERO, TaxedPrice, TaxRule
from pretix.base.reldate import RelativeDateWrapper
from pretix.base.services.checkin import _save_answers
from pretix.base.services.locking import LockTimeoutException, NoLockManager
from pretix.base.services.pricing import get_price
from pretix.base.services.tasks import ProfiledEventTask
from pretix.base.settings import PERSON_NAME_SCHEMES
from pretix.base.signals import validate_cart_addons
from pretix.base.templatetags.rich_text import rich_text
from pretix.celery_app import app
from pretix.presale.signals import (
checkout_confirm_messages, fee_calculation_for_cart,
)
class CartError(Exception):
def __init__(self, *args):
msg = args[0]
msgargs = args[1] if len(args) > 1 else None
self.args = args
if msgargs:
msg = _(msg) % msgargs
else:
msg = _(msg)
super().__init__(msg)
error_messages = {
'busy': _('We were not able to process your request completely as the '
'server was too busy. Please try again.'),
'empty': _('You did not select any products.'),
'unknown_position': _('Unknown cart position.'),
'subevent_required': pgettext_lazy('subevent', 'No date was specified.'),
'not_for_sale': _('You selected a product which is not available for sale.'),
'unavailable': _('Some of the products you selected are no longer available. '
'Please see below for details.'),
'in_part': _('Some of the products you selected are no longer available in '
'the quantity you selected. Please see below for details.'),
'max_items': _("You cannot select more than %s items per order."),
'max_items_per_product': _("You cannot select more than %(max)s items of the product %(product)s."),
'min_items_per_product': _("You need to select at least %(min)s items of the product %(product)s."),
'min_items_per_product_removed': _("We removed %(product)s from your cart as you can not buy less than "
"%(min)s items of it."),
'not_started': _('The presale period for this event has not yet started.'),
'ended': _('The presale period for this event has ended.'),
'some_subevent_not_started': _('The presale period for this event has not yet started. The affected positions '
'have been removed from your cart.'),
'some_subevent_ended': _('The presale period for one of the events in your cart has ended. The affected '
'positions have been removed from your cart.'),
'price_too_high': _('The entered price is to high.'),
'voucher_invalid': _('This voucher code is not known in our database.'),
'voucher_redeemed': _('This voucher code has already been used the maximum number of times allowed.'),
'voucher_redeemed_cart': _('This voucher code is currently locked since it is already contained in a cart. This '
'might mean that someone else is redeeming this voucher right now, or that you tried '
'to redeem it before but did not complete the checkout process. You can try to use it '
'again in %d minutes.'),
'voucher_redeemed_partial': _('This voucher code can only be redeemed %d more times.'),
'voucher_double': _('You already used this voucher code. Remove the associated line from your '
'cart if you want to use it for a different product.'),
'voucher_expired': _('This voucher is expired.'),
'voucher_invalid_item': _('This voucher is not valid for this product.'),
'voucher_invalid_seat': _('This voucher is not valid for this seat.'),
'voucher_no_match': _('We did not find any position in your cart that we could use this voucher for. If you want '
'to add something new to your cart using that voucher, you can do so with the voucher '
'redemption option on the bottom of the page.'),
'voucher_item_not_available': _(
'Your voucher is valid for a product that is currently not for sale.'),
'voucher_invalid_subevent': pgettext_lazy('subevent', 'This voucher is not valid for this event date.'),
'voucher_required': _('You need a valid voucher code to order this product.'),
'inactive_subevent': pgettext_lazy('subevent', 'The selected event date is not active.'),
'addon_invalid_base': _('You can not select an add-on for the selected product.'),
'addon_duplicate_item': _('You can not select two variations of the same add-on product.'),
'addon_max_count': _('You can select at most %(max)s add-ons from the category %(cat)s for the product %(base)s.'),
'addon_min_count': _('You need to select at least %(min)s add-ons from the category %(cat)s for the '
'product %(base)s.'),
'addon_only': _('One of the products you selected can only be bought as an add-on to another project.'),
'bundled_only': _('One of the products you selected can only be bought part of a bundle.'),
'seat_required': _('You need to select a specific seat.'),
'seat_invalid': _('Please select a valid seat.'),
'seat_forbidden': _('You can not select a seat for this position.'),
'seat_unavailable': _('The seat you selected has already been taken. Please select a different seat.'),
'seat_multiple': _('You can not select the same seat multiple times.'),
'gift_card': _("You entered a gift card instead of a voucher. Gift cards can be entered later on when you're asked for your payment details."),
}
class CartManager:
AddOperation = namedtuple('AddOperation', ('count', 'item', 'variation', 'price', 'voucher', 'quotas',
'addon_to', 'subevent', 'includes_tax', 'bundled', 'seat',
'price_before_voucher'))
RemoveOperation = namedtuple('RemoveOperation', ('position',))
VoucherOperation = namedtuple('VoucherOperation', ('position', 'voucher', 'price'))
ExtendOperation = namedtuple('ExtendOperation', ('position', 'count', 'item', 'variation', 'price', 'voucher',
'quotas', 'subevent', 'seat', 'price_before_voucher'))
order = {
RemoveOperation: 10,
VoucherOperation: 15,
ExtendOperation: 20,
AddOperation: 30
}
def __init__(self, event: Event, cart_id: str, invoice_address: InvoiceAddress=None, widget_data=None,
sales_channel='web'):
self.event = event
self.cart_id = cart_id
self.now_dt = now()
self._operations = []
self._quota_diff = Counter()
self._voucher_use_diff = Counter()
self._items_cache = {}
self._subevents_cache = {}
self._variations_cache = {}
self._seated_cache = {}
self._expiry = None
self.invoice_address = invoice_address
self._widget_data = widget_data or {}
self._sales_channel = sales_channel
@property
def positions(self):
return CartPosition.objects.filter(
Q(cart_id=self.cart_id) & Q(event=self.event)
).select_related('item', 'subevent')
def _is_seated(self, item, subevent):
if (item, subevent) not in self._seated_cache:
self._seated_cache[item, subevent] = item.seat_category_mappings.filter(subevent=subevent).exists()
return self._seated_cache[item, subevent]
def _calculate_expiry(self):
self._expiry = self.now_dt + timedelta(minutes=self.event.settings.get('reservation_time', as_type=int))
def _check_presale_dates(self):
if self.event.presale_start and self.now_dt < self.event.presale_start:
raise CartError(error_messages['not_started'])
if self.event.presale_has_ended:
raise CartError(error_messages['ended'])
if not self.event.has_subevents:
tlv = self.event.settings.get('payment_term_last', as_type=RelativeDateWrapper)
if tlv:
term_last = make_aware(datetime.combine(
tlv.datetime(self.event).date(),
time(hour=23, minute=59, second=59)
), self.event.timezone)
if term_last < self.now_dt:
raise CartError(error_messages['ended'])
def _extend_expiry_of_valid_existing_positions(self):
# Extend this user's cart session to ensure all items in the cart expire at the same time
# We can extend the reservation of items which are not yet expired without risk
self.positions.filter(expires__gt=self.now_dt).update(expires=self._expiry)
def _delete_out_of_timeframe(self):
err = None
for cp in self.positions:
if cp.subevent and cp.subevent.presale_start and self.now_dt < cp.subevent.presale_start:
err = error_messages['some_subevent_not_started']
cp.addons.all().delete()
cp.delete()
if cp.subevent and cp.subevent.presale_end and self.now_dt > cp.subevent.presale_end:
err = error_messages['some_subevent_ended']
cp.addons.all().delete()
cp.delete()
if cp.subevent:
tlv = self.event.settings.get('payment_term_last', as_type=RelativeDateWrapper)
if tlv:
term_last = make_aware(datetime.combine(
tlv.datetime(cp.subevent).date(),
time(hour=23, minute=59, second=59)
), self.event.timezone)
if term_last < self.now_dt:
err = error_messages['some_subevent_ended']
cp.addons.all().delete()
cp.delete()
return err
def _update_subevents_cache(self, se_ids: List[int]):
self._subevents_cache.update({
i.pk: i
for i in self.event.subevents.filter(id__in=[i for i in se_ids if i and i not in self._items_cache])
})
def _update_items_cache(self, item_ids: List[int], variation_ids: List[int]):
self._items_cache.update({
i.pk: i
for i in self.event.items.select_related('category').prefetch_related(
'addons', 'bundles', 'addons__addon_category', 'quotas'
).annotate(
has_variations=Count('variations'),
).filter(
id__in=[i for i in item_ids if i and i not in self._items_cache]
).order_by()
})
self._variations_cache.update({
v.pk: v
for v in ItemVariation.objects.filter(item__event=self.event).prefetch_related(
'quotas'
).select_related('item', 'item__event').filter(
id__in=[i for i in variation_ids if i and i not in self._variations_cache]
).order_by()
})
def _check_max_cart_size(self):
if not get_all_sales_channels()[self._sales_channel].unlimited_items_per_order:
cartsize = self.positions.filter(addon_to__isnull=True).count()
cartsize += sum([op.count for op in self._operations if isinstance(op, self.AddOperation) and not op.addon_to])
cartsize -= len([1 for op in self._operations if isinstance(op, self.RemoveOperation) if
not op.position.addon_to_id])
if cartsize > int(self.event.settings.max_items_per_order):
# TODO: i18n plurals
raise CartError(_(error_messages['max_items']) % (self.event.settings.max_items_per_order,))
def _check_item_constraints(self, op, current_ops=[]):
if isinstance(op, self.AddOperation) or isinstance(op, self.ExtendOperation):
if not (
(isinstance(op, self.AddOperation) and op.addon_to == 'FAKE') or
(isinstance(op, self.ExtendOperation) and op.position.is_bundled)
):
if op.item.require_voucher and op.voucher is None:
raise CartError(error_messages['voucher_required'])
if op.item.hide_without_voucher and (op.voucher is None or not op.voucher.show_hidden_items):
raise CartError(error_messages['voucher_required'])
if not op.item.is_available() or (op.variation and not op.variation.active):
raise CartError(error_messages['unavailable'])
if self._sales_channel not in op.item.sales_channels:
raise CartError(error_messages['unavailable'])
if op.item.has_variations and not op.variation:
raise CartError(error_messages['not_for_sale'])
if op.variation and op.variation.item_id != op.item.pk:
raise CartError(error_messages['not_for_sale'])
if op.voucher and not op.voucher.applies_to(op.item, op.variation):
raise CartError(error_messages['voucher_invalid_item'])
if op.voucher and op.voucher.seat and op.voucher.seat != op.seat:
raise CartError(error_messages['voucher_invalid_seat'])
if op.voucher and op.voucher.subevent_id and op.voucher.subevent_id != op.subevent.pk:
raise CartError(error_messages['voucher_invalid_subevent'])
if op.subevent and not op.subevent.active:
raise CartError(error_messages['inactive_subevent'])
if op.subevent and op.subevent.presale_start and self.now_dt < op.subevent.presale_start:
raise CartError(error_messages['not_started'])
if op.subevent and op.subevent.presale_has_ended:
raise CartError(error_messages['ended'])
seated = self._is_seated(op.item, op.subevent)
if seated and (not op.seat or (op.seat.blocked and self._sales_channel not in self.event.settings.seating_allow_blocked_seats_for_channel)):
raise CartError(error_messages['seat_invalid'])
elif op.seat and not seated:
raise CartError(error_messages['seat_forbidden'])
elif op.seat and op.seat.product != op.item:
raise CartError(error_messages['seat_invalid'])
elif op.seat and op.count > 1:
raise CartError('Invalid request: A seat can only be bought once.')
if op.subevent:
tlv = self.event.settings.get('payment_term_last', as_type=RelativeDateWrapper)
if tlv:
term_last = make_aware(datetime.combine(
tlv.datetime(op.subevent).date(),
time(hour=23, minute=59, second=59)
), self.event.timezone)
if term_last < self.now_dt:
raise CartError(error_messages['ended'])
if isinstance(op, self.AddOperation):
if op.item.category and op.item.category.is_addon and not (op.addon_to and op.addon_to != 'FAKE'):
raise CartError(error_messages['addon_only'])
if op.item.require_bundling and not op.addon_to == 'FAKE':
raise CartError(error_messages['bundled_only'])
def _get_price(self, item: Item, variation: Optional[ItemVariation],
voucher: Optional[Voucher], custom_price: Optional[Decimal],
subevent: Optional[SubEvent], cp_is_net: bool=None, force_custom_price=False,
bundled_sum=Decimal('0.00')):
try:
return get_price(
item, variation, voucher, custom_price, subevent,
custom_price_is_net=cp_is_net if cp_is_net is not None else self.event.settings.display_net_prices,
invoice_address=self.invoice_address, force_custom_price=force_custom_price, bundled_sum=bundled_sum
)
except ValueError as e:
if str(e) == 'price_too_high':
raise CartError(error_messages['price_too_high'])
else:
raise e
def extend_expired_positions(self):
expired = self.positions.filter(expires__lte=self.now_dt).select_related(
'item', 'variation', 'voucher', 'addon_to', 'addon_to__item'
).annotate(
requires_seat=Exists(
SeatCategoryMapping.objects.filter(
Q(product=OuterRef('item'))
& (Q(subevent=OuterRef('subevent')) if self.event.has_subevents else Q(subevent__isnull=True))
)
)
).prefetch_related(
'item__quotas',
'variation__quotas',
'addons'
).order_by('-is_bundled')
err = None
changed_prices = {}
for cp in expired:
removed_positions = {op.position.pk for op in self._operations if isinstance(op, self.RemoveOperation)}
if cp.pk in removed_positions or (cp.addon_to_id and cp.addon_to_id in removed_positions):
continue
cp.item.requires_seat = cp.requires_seat
if cp.is_bundled:
bundle = cp.addon_to.item.bundles.filter(bundled_item=cp.item, bundled_variation=cp.variation).first()
if bundle:
price = bundle.designated_price or 0
else:
price = cp.price
changed_prices[cp.pk] = price
if not cp.includes_tax:
price = self._get_price(cp.item, cp.variation, cp.voucher, price, cp.subevent,
force_custom_price=True, cp_is_net=False)
price = TaxedPrice(net=price.net, gross=price.net, rate=0, tax=0, name='')
else:
price = self._get_price(cp.item, cp.variation, cp.voucher, price, cp.subevent,
force_custom_price=True)
pbv = TAXED_ZERO
else:
bundled_sum = Decimal('0.00')
if not cp.addon_to_id:
for bundledp in cp.addons.all():
if bundledp.is_bundled:
bundledprice = changed_prices.get(bundledp.pk, bundledp.price)
bundled_sum += bundledprice
if not cp.includes_tax:
price = self._get_price(cp.item, cp.variation, cp.voucher, cp.price, cp.subevent,
cp_is_net=True, bundled_sum=bundled_sum)
price = TaxedPrice(net=price.net, gross=price.net, rate=0, tax=0, name='')
pbv = self._get_price(cp.item, cp.variation, None, cp.price, cp.subevent,
cp_is_net=True, bundled_sum=bundled_sum)
pbv = TaxedPrice(net=pbv.net, gross=pbv.net, rate=0, tax=0, name='')
else:
price = self._get_price(cp.item, cp.variation, cp.voucher, cp.price, cp.subevent,
bundled_sum=bundled_sum)
pbv = self._get_price(cp.item, cp.variation, None, cp.price, cp.subevent,
bundled_sum=bundled_sum)
quotas = list(cp.quotas)
if not quotas:
self._operations.append(self.RemoveOperation(position=cp))
err = error_messages['unavailable']
continue
if not cp.voucher or (not cp.voucher.allow_ignore_quota and not cp.voucher.block_quota):
for quota in quotas:
self._quota_diff[quota] += 1
else:
quotas = []
op = self.ExtendOperation(
position=cp, item=cp.item, variation=cp.variation, voucher=cp.voucher, count=1,
price=price, quotas=quotas, subevent=cp.subevent, seat=cp.seat, price_before_voucher=pbv
)
self._check_item_constraints(op)
if cp.voucher:
self._voucher_use_diff[cp.voucher] += 1
self._operations.append(op)
return err
def apply_voucher(self, voucher_code: str):
if self._operations:
raise CartError('Applying a voucher to the whole cart should not be combined with other operations.')
try:
voucher = self.event.vouchers.get(code__iexact=voucher_code.strip())
except Voucher.DoesNotExist:
raise CartError(error_messages['voucher_invalid'])
voucher_use_diff = Counter()
ops = []
if not voucher.is_active():
raise CartError(error_messages['voucher_expired'])
for p in self.positions:
if p.voucher_id:
continue
if not voucher.applies_to(p.item, p.variation):
continue
if voucher.seat and voucher.seat != p.seat:
continue
if voucher.subevent_id and voucher.subevent_id != p.subevent_id:
continue
if p.is_bundled:
continue
bundled_sum = Decimal('0.00')
if not p.addon_to_id:
for bundledp in p.addons.all():
if bundledp.is_bundled:
bundledprice = bundledp.price
bundled_sum += bundledprice
price = self._get_price(p.item, p.variation, voucher, None, p.subevent, bundled_sum=bundled_sum)
"""
if price.gross > p.price:
continue
"""
voucher_use_diff[voucher] += 1
ops.append((p.price - price.gross, self.VoucherOperation(p, voucher, price)))
# If there are not enough voucher usages left for the full cart, let's apply them in the order that benefits
# the user the most.
ops.sort(key=lambda k: k[0], reverse=True)
self._operations += [k[1] for k in ops]\
if not voucher_use_diff:
raise CartError(error_messages['voucher_no_match'])
self._voucher_use_diff += voucher_use_diff
def add_new_items(self, items: List[dict]):
# Fetch items from the database
self._update_items_cache([i['item'] for i in items], [i['variation'] for i in items])
self._update_subevents_cache([i['subevent'] for i in items if i.get('subevent')])
quota_diff = Counter()
voucher_use_diff = Counter()
operations = []
for i in items:
if self.event.has_subevents:
if not i.get('subevent') or int(i.get('subevent')) not in self._subevents_cache:
raise CartError(error_messages['subevent_required'])
subevent = self._subevents_cache[int(i.get('subevent'))]
else:
subevent = None
# When a seat is given, we ignore the item that was given, since we can infer it from the
# seat. The variation is still relevant, though!
seat = None
if i.get('seat'):
try:
seat = (subevent or self.event).seats.get(seat_guid=i.get('seat'))
except Seat.DoesNotExist:
raise CartError(error_messages['seat_invalid'])
except Seat.MultipleObjectsReturned:
raise CartError(error_messages['seat_invalid'])
i['item'] = seat.product_id
if i['item'] not in self._items_cache:
self._update_items_cache([i['item']], [i['variation']])
# Check whether the specified items are part of what we just fetched from the database
# If they are not, the user supplied item IDs which either do not exist or belong to
# a different event
if i['item'] not in self._items_cache or (i['variation'] and i['variation'] not in self._variations_cache):
raise CartError(error_messages['not_for_sale'])
item = self._items_cache[i['item']]
variation = self._variations_cache[i['variation']] if i['variation'] is not None else None
voucher = None
if i.get('voucher'):
try:
voucher = self.event.vouchers.get(code__iexact=i.get('voucher').strip())
except Voucher.DoesNotExist:
raise CartError(error_messages['voucher_invalid'])
else:
voucher_use_diff[voucher] += i['count']
# Fetch all quotas. If there are no quotas, this item is not allowed to be sold.
quotas = list(item.quotas.filter(subevent=subevent)
if variation is None else variation.quotas.filter(subevent=subevent))
if not quotas:
raise CartError(error_messages['unavailable'])
if not voucher or (not voucher.allow_ignore_quota and not voucher.block_quota):
for quota in quotas:
quota_diff[quota] += i['count']
else:
quotas = []
# Fetch bundled items
bundled = []
bundled_sum = Decimal('0.00')
db_bundles = list(item.bundles.all())
self._update_items_cache([b.bundled_item_id for b in db_bundles], [b.bundled_variation_id for b in db_bundles])
for bundle in db_bundles:
if bundle.bundled_item_id not in self._items_cache or (
bundle.bundled_variation_id and bundle.bundled_variation_id not in self._variations_cache
):
raise CartError(error_messages['not_for_sale'])
bitem = self._items_cache[bundle.bundled_item_id]
bvar = self._variations_cache[bundle.bundled_variation_id] if bundle.bundled_variation_id else None
bundle_quotas = list(bitem.quotas.filter(subevent=subevent)
if bvar is None else bvar.quotas.filter(subevent=subevent))
if not bundle_quotas:
raise CartError(error_messages['unavailable'])
if not voucher or not voucher.allow_ignore_quota:
for quota in bundle_quotas:
quota_diff[quota] += bundle.count * i['count']
else:
bundle_quotas = []
if bundle.designated_price:
bprice = self._get_price(bitem, bvar, None, bundle.designated_price, subevent, force_custom_price=True,
cp_is_net=False)
else:
bprice = TAXED_ZERO
bundled_sum += bundle.designated_price * bundle.count
bop = self.AddOperation(
count=bundle.count, item=bitem, variation=bvar, price=bprice,
voucher=None, quotas=bundle_quotas, addon_to='FAKE', subevent=subevent,
includes_tax=bool(bprice.rate), bundled=[], seat=None, price_before_voucher=bprice,
)
self._check_item_constraints(bop, operations)
bundled.append(bop)
price = self._get_price(item, variation, voucher, i.get('price'), subevent, bundled_sum=bundled_sum)
pbv = self._get_price(item, variation, None, i.get('price'), subevent, bundled_sum=bundled_sum)
op = self.AddOperation(
count=i['count'], item=item, variation=variation, price=price, voucher=voucher, quotas=quotas,
addon_to=False, subevent=subevent, includes_tax=bool(price.rate), bundled=bundled, seat=seat,
price_before_voucher=pbv
)
self._check_item_constraints(op, operations)
operations.append(op)
self._quota_diff.update(quota_diff)
self._voucher_use_diff += voucher_use_diff
self._operations += operations
def remove_item(self, pos_id: int):
# TODO: We could calculate quotadiffs and voucherdiffs here, which would lead to more
# flexible usages (e.g. a RemoveOperation and an AddOperation in the same transaction
# could cancel each other out quota-wise). However, we are not taking this performance
# penalty for now as there is currently no outside interface that would allow building
# such a transaction.
try:
cp = self.positions.get(pk=pos_id)
except CartPosition.DoesNotExist:
raise CartError(error_messages['unknown_position'])
self._operations.append(self.RemoveOperation(position=cp))
def clear(self):
# TODO: We could calculate quotadiffs and voucherdiffs here, which would lead to more
# flexible usages (e.g. a RemoveOperation and an AddOperation in the same transaction
# could cancel each other out quota-wise). However, we are not taking this performance
# penalty for now as there is currently no outside interface that would allow building
# such a transaction.
for cp in self.positions.filter(addon_to__isnull=True):
self._operations.append(self.RemoveOperation(position=cp))
def set_addons(self, addons):
self._update_items_cache(
[a['item'] for a in addons],
[a['variation'] for a in addons],
)
# Prepare various containers to hold data later
current_addons = defaultdict(dict) # CartPos -> currently attached add-ons
input_addons = defaultdict(set) # CartPos -> add-ons according to input
selected_addons = defaultdict(set) # CartPos -> final desired set of add-ons
cpcache = {} # CartPos.pk -> CartPos
quota_diff = Counter() # Quota -> Number of usages
operations = []
available_categories = defaultdict(set) # CartPos -> Category IDs to choose from
price_included = defaultdict(dict) # CartPos -> CategoryID -> bool(price is included)
toplevel_cp = self.positions.filter(
addon_to__isnull=True
).prefetch_related(
'addons', 'item__addons', 'item__addons__addon_category'
).select_related('item', 'variation')
# Prefill some of the cache containers
for cp in toplevel_cp:
available_categories[cp.pk] = {iao.addon_category_id for iao in cp.item.addons.all()}
price_included[cp.pk] = {iao.addon_category_id: iao.price_included for iao in cp.item.addons.all()}
cpcache[cp.pk] = cp
current_addons[cp] = {
(a.item_id, a.variation_id): a
for a in cp.addons.all()
if not a.is_bundled
}
# Create operations, perform various checks
for a in addons:
# Check whether the specified items are part of what we just fetched from the database
# If they are not, the user supplied item IDs which either do not exist or belong to
# a different event
if a['item'] not in self._items_cache or (a['variation'] and a['variation'] not in self._variations_cache):
raise CartError(error_messages['not_for_sale'])
# Only attach addons to things that are actually in this user's cart
if a['addon_to'] not in cpcache:
raise CartError(error_messages['addon_invalid_base'])
cp = cpcache[a['addon_to']]
item = self._items_cache[a['item']]
variation = self._variations_cache[a['variation']] if a['variation'] is not None else None
if item.category_id not in available_categories[cp.pk]:
raise CartError(error_messages['addon_invalid_base'])
# Fetch all quotas. If there are no quotas, this item is not allowed to be sold.
quotas = list(item.quotas.filter(subevent=cp.subevent)
if variation is None else variation.quotas.filter(subevent=cp.subevent))
if not quotas:
raise CartError(error_messages['unavailable'])
# Every item can be attached to very CartPosition at most once
if a['item'] in ([_a[0] for _a in input_addons[cp.id]]):
raise CartError(error_messages['addon_duplicate_item'])
input_addons[cp.id].add((a['item'], a['variation']))
selected_addons[cp.id, item.category_id].add((a['item'], a['variation']))
if (a['item'], a['variation']) not in current_addons[cp]:
# This add-on is new, add it to the cart
for quota in quotas:
quota_diff[quota] += 1
if price_included[cp.pk].get(item.category_id):
price = TAXED_ZERO
else:
price = self._get_price(item, variation, None, None, cp.subevent)
op = self.AddOperation(
count=1, item=item, variation=variation, price=price, voucher=None, quotas=quotas,
addon_to=cp, subevent=cp.subevent, includes_tax=bool(price.rate), bundled=[], seat=None,
price_before_voucher=None
)
self._check_item_constraints(op, operations)
operations.append(op)
# Check constraints on the add-on combinations
for cp in toplevel_cp:
item = cp.item
for iao in item.addons.all():
selected = selected_addons[cp.id, iao.addon_category_id]
if len(selected) > iao.max_count:
# TODO: Proper i18n
# TODO: Proper pluralization
raise CartError(
error_messages['addon_max_count'],
{
'base': str(item.name),
'max': iao.max_count,
'cat': str(iao.addon_category.name),
}
)
elif len(selected) < iao.min_count:
# TODO: Proper i18n
# TODO: Proper pluralization
raise CartError(
error_messages['addon_min_count'],
{
'base': str(item.name),
'min': iao.min_count,
'cat': str(iao.addon_category.name),
}
)
validate_cart_addons.send(
sender=self.event,
addons={
(self._items_cache[s[0]], self._variations_cache[s[1]] if s[1] else None)
for s in selected
},
base_position=cp,
iao=iao
)
# Detect removed add-ons and create RemoveOperations
for cp, al in current_addons.items():
for k, v in al.items():
if k not in input_addons[cp.id]:
if v.expires > self.now_dt:
quotas = list(v.quotas)
for quota in quotas:
quota_diff[quota] -= 1
op = self.RemoveOperation(position=v)
operations.append(op)
self._quota_diff.update(quota_diff)
self._operations += operations
def _get_quota_availability(self):
quotas_ok = defaultdict(int)
for quota, count in self._quota_diff.items():
if count <= 0:
quotas_ok[quota] = 0
avail = quota.availability(self.now_dt)
if avail[1] is not None and avail[1] < count:
quotas_ok[quota] = min(count, avail[1])
else:
quotas_ok[quota] = count
return quotas_ok
def _get_voucher_availability(self):
vouchers_ok = {}
self._voucher_depend_on_cart = set()
for voucher, count in self._voucher_use_diff.items():
voucher.refresh_from_db()
if voucher.valid_until is not None and voucher.valid_until < self.now_dt:
raise CartError(error_messages['voucher_expired'])
redeemed_in_carts = CartPosition.objects.filter(
Q(voucher=voucher) & Q(event=self.event) &
Q(expires__gte=self.now_dt)
).exclude(pk__in=[
op.position.id for op in self._operations if isinstance(op, self.ExtendOperation)
])
cart_count = redeemed_in_carts.count()
v_avail = voucher.max_usages - voucher.redeemed - cart_count
if cart_count > 0:
self._voucher_depend_on_cart.add(voucher)
vouchers_ok[voucher] = v_avail
return vouchers_ok
def _check_min_max_per_product(self):
items = Counter()
for p in self.positions:
items[p.item] += 1
for op in self._operations:
if isinstance(op, self.AddOperation):
items[op.item] += op.count
elif isinstance(op, self.RemoveOperation):
items[op.position.item] -= 1
err = None
for item, count in items.items():
if count == 0:
continue
if item.max_per_order and count > item.max_per_order:
raise CartError(
_(error_messages['max_items_per_product']) % {
'max': item.max_per_order,
'product': item.name
}
)
if item.min_per_order and count < item.min_per_order:
self._operations = [o for o in self._operations if not (
isinstance(o, self.AddOperation) and o.item.pk == item.pk
)]
removals = [o.position.pk for o in self._operations if isinstance(o, self.RemoveOperation)]
for p in self.positions:
if p.item_id == item.pk and p.pk not in removals:
self._operations.append(self.RemoveOperation(position=p))
err = _(error_messages['min_items_per_product_removed']) % {
'min': item.min_per_order,
'product': item.name
}
if not err:
raise CartError(
_(error_messages['min_items_per_product']) % {
'min': item.min_per_order,
'product': item.name
}
)
return err
def _perform_operations(self):
vouchers_ok = self._get_voucher_availability()
quotas_ok = self._get_quota_availability()
err = None
new_cart_positions = []
err = err or self._check_min_max_per_product()
self._operations.sort(key=lambda a: self.order[type(a)])
seats_seen = set()
for iop, op in enumerate(self._operations):
if isinstance(op, self.RemoveOperation):
if op.position.expires > self.now_dt:
for q in op.position.quotas:
quotas_ok[q] += 1
op.position.addons.all().delete()
op.position.delete()
elif isinstance(op, self.AddOperation) or isinstance(op, self.ExtendOperation):
# Create a CartPosition for as much items as we can
requested_count = quota_available_count = voucher_available_count = op.count
if op.seat:
if op.seat in seats_seen:
err = err or error_messages['seat_multiple']
seats_seen.add(op.seat)
if op.quotas:
quota_available_count = min(requested_count, min(quotas_ok[q] for q in op.quotas))
if op.voucher:
voucher_available_count = min(voucher_available_count, vouchers_ok[op.voucher])
if quota_available_count < 1:
err = err or error_messages['unavailable']
elif quota_available_count < requested_count:
err = err or error_messages['in_part']
if voucher_available_count < 1:
if op.voucher in self._voucher_depend_on_cart:
err = err or error_messages['voucher_redeemed_cart'] % self.event.settings.reservation_time
else:
err = err or error_messages['voucher_redeemed']
elif voucher_available_count < requested_count:
err = err or error_messages['voucher_redeemed_partial'] % voucher_available_count
available_count = min(quota_available_count, voucher_available_count)
if isinstance(op, self.AddOperation):
for b in op.bundled:
b_quotas = list(b.quotas)
if not b_quotas:
if not op.voucher or not op.voucher.allow_ignore_quota:
err = err or error_messages['unavailable']
available_count = 0
continue
b_quota_available_count = min(available_count * b.count, min(quotas_ok[q] for q in b_quotas))
if b_quota_available_count < b.count:
err = err or error_messages['unavailable']
available_count = 0
elif b_quota_available_count < available_count * b.count:
err = err or error_messages['in_part']
available_count = b_quota_available_count // b.count
for q in b_quotas:
quotas_ok[q] -= available_count * b.count
# TODO: is this correct?
for q in op.quotas:
quotas_ok[q] -= available_count
if op.voucher:
vouchers_ok[op.voucher] -= available_count
if any(qa < 0 for qa in quotas_ok.values()):
# Safeguard, shouldn't happen
err = err or error_messages['unavailable']
available_count = 0
if isinstance(op, self.AddOperation):
if op.seat and not op.seat.is_available(ignore_voucher_id=op.voucher.id if op.voucher else None, sales_channel=self._sales_channel):
available_count = 0
err = err or error_messages['seat_unavailable']
for k in range(available_count):
cp = CartPosition(
event=self.event, item=op.item, variation=op.variation,
price=op.price.gross, expires=self._expiry, cart_id=self.cart_id,
voucher=op.voucher, addon_to=op.addon_to if op.addon_to else None,
subevent=op.subevent, includes_tax=op.includes_tax, seat=op.seat,
price_before_voucher=op.price_before_voucher.gross if op.price_before_voucher is not None else None
)
if self.event.settings.attendee_names_asked:
scheme = PERSON_NAME_SCHEMES.get(self.event.settings.name_scheme)
if 'attendee-name' in self._widget_data:
cp.attendee_name_parts = {'_legacy': self._widget_data['attendee-name']}
if any('attendee-name-{}'.format(k.replace('_', '-')) in self._widget_data for k, l, w
in scheme['fields']):
cp.attendee_name_parts = {
k: self._widget_data.get('attendee-name-{}'.format(k.replace('_', '-')), '')
for k, l, w in scheme['fields']
}
if self.event.settings.attendee_emails_asked and 'email' in self._widget_data:
cp.attendee_email = self._widget_data.get('email')
cp._answers = {}
for k, v in self._widget_data.items():
if not k.startswith('question-'):
continue
q = cp.item.questions.filter(ask_during_checkin=False, identifier__iexact=k[9:]).first()
if q:
try:
cp._answers[q] = q.clean_answer(v)
except ValidationError:
pass
if op.bundled:
cp.save() # Needs to be in the database already so we have a PK that we can reference
for b in op.bundled:
for j in range(b.count):
new_cart_positions.append(CartPosition(
event=self.event, item=b.item, variation=b.variation,
price=b.price.gross, expires=self._expiry, cart_id=self.cart_id,
voucher=None, addon_to=cp,
subevent=b.subevent, includes_tax=b.includes_tax, is_bundled=True
))
new_cart_positions.append(cp)
elif isinstance(op, self.ExtendOperation):
if op.seat and not op.seat.is_available(ignore_cart=op.position, sales_channel=self._sales_channel,
ignore_voucher_id=op.position.voucher_id):
err = err or error_messages['seat_unavailable']
op.position.addons.all().delete()
op.position.delete()
elif available_count == 1:
op.position.expires = self._expiry
op.position.price = op.price.gross
if op.price_before_voucher is not None:
op.position.price_before_voucher = op.price_before_voucher.gross
try:
op.position.save(force_update=True)
except DatabaseError:
# Best effort... The position might have been deleted in the meantime!
pass
elif available_count == 0:
op.position.addons.all().delete()
op.position.delete()
else:
raise AssertionError("ExtendOperation cannot affect more than one item")
elif isinstance(op, self.VoucherOperation):
if vouchers_ok[op.voucher] < 1:
if iop == 0:
raise CartError(error_messages['voucher_redeemed'])
else:
# We fail silently if we could only apply the voucher to part of the cart, since that might
# be expected
continue
op.position.price_before_voucher = op.position.price
op.position.price = op.price.gross
op.position.voucher = op.voucher
op.position.save()
vouchers_ok[op.voucher] -= 1
for p in new_cart_positions:
if getattr(p, '_answers', None):
if not p.pk: # We stored some to the database already before
p.save()
_save_answers(p, {}, p._answers)
CartPosition.objects.bulk_create([p for p in new_cart_positions if not getattr(p, '_answers', None) and not p.pk])
return err
def _require_locking(self):
if self._voucher_use_diff:
# If any vouchers are used, we lock to make sure we don't redeem them to often
return True
if self._quota_diff and any(q.size is not None for q in self._quota_diff):
# If any quotas are affected that are not unlimited, we lock
return True
if any(getattr(o, 'seat', False) for o in self._operations):
return True
return False
def commit(self):
self._check_presale_dates()
self._check_max_cart_size()
self._calculate_expiry()
err = self._delete_out_of_timeframe()
err = self.extend_expired_positions() or err
lockfn = NoLockManager
if self._require_locking():
lockfn = self.event.lock
with lockfn() as now_dt:
with transaction.atomic():
self.now_dt = now_dt
self._extend_expiry_of_valid_existing_positions()
err = self._perform_operations() or err
if err:
raise CartError(err)
def update_tax_rates(event: Event, cart_id: str, invoice_address: InvoiceAddress):
positions = CartPosition.objects.filter(
cart_id=cart_id, event=event
).select_related('item', 'item__tax_rule')
totaldiff = Decimal('0.00')
for pos in positions:
if not pos.item.tax_rule:
continue
charge_tax = pos.item.tax_rule.tax_applicable(invoice_address)
if pos.includes_tax and not charge_tax:
price = pos.item.tax(pos.price, base_price_is='gross').net
totaldiff += price - pos.price
pos.price = price
pos.includes_tax = False
pos.save(update_fields=['price', 'includes_tax'])
elif charge_tax and not pos.includes_tax:
price = pos.item.tax(pos.price, base_price_is='net').gross
totaldiff += price - pos.price
pos.price = price
pos.includes_tax = True
pos.save(update_fields=['price', 'includes_tax'])
return totaldiff
def get_fees(event, request, total, invoice_address, provider, positions):
from pretix.presale.views.cart import cart_session
fees = []
for recv, resp in fee_calculation_for_cart.send(sender=event, request=request, invoice_address=invoice_address,
total=total, positions=positions):
if resp:
fees += resp
total = total + sum(f.value for f in fees)
cs = cart_session(request)
if cs.get('gift_cards'):
gcs = cs['gift_cards']
gc_qs = event.organizer.accepted_gift_cards.filter(pk__in=cs.get('gift_cards'), currency=event.currency)
summed = 0
for gc in gc_qs:
if gc.testmode != event.testmode:
gcs.remove(gc.pk)
continue
fval = Decimal(gc.value) # TODO: don't require an extra query
fval = min(fval, total - summed)
if fval > 0:
total -= fval
summed += fval
fees.append(OrderFee(
fee_type=OrderFee.FEE_TYPE_GIFTCARD,
internal_type='giftcard',
description=gc.secret,
value=-1 * fval,
tax_rate=Decimal('0.00'),
tax_value=Decimal('0.00'),
tax_rule=TaxRule.zero()
))
cs['gift_cards'] = gcs
if provider and total != 0:
provider = event.get_payment_providers().get(provider)
if provider:
payment_fee = provider.calculate_fee(total)
if payment_fee:
payment_fee_tax_rule = event.settings.tax_rate_default or TaxRule.zero()
if payment_fee_tax_rule.tax_applicable(invoice_address):
payment_fee_tax = payment_fee_tax_rule.tax(payment_fee, base_price_is='gross')
fees.append(OrderFee(
fee_type=OrderFee.FEE_TYPE_PAYMENT,
value=payment_fee,
tax_rate=payment_fee_tax.rate,
tax_value=payment_fee_tax.tax,
tax_rule=payment_fee_tax_rule
))
else:
fees.append(OrderFee(
fee_type=OrderFee.FEE_TYPE_PAYMENT,
value=payment_fee,
tax_rate=Decimal('0.00'),
tax_value=Decimal('0.00'),
tax_rule=payment_fee_tax_rule
))
return fees
@app.task(base=ProfiledEventTask, bind=True, max_retries=5, default_retry_delay=1, throws=(CartError,))
def add_items_to_cart(self, event: int, items: List[dict], cart_id: str=None, locale='en',
invoice_address: int=None, widget_data=None, sales_channel='web') -> None:
"""
Adds a list of items to a user's cart.
:param event: The event ID in question
:param items: A list of dicts with the keys item, variation, count, custom_price, voucher, seat ID
:param cart_id: Session ID of a guest
:raises CartError: On any error that occurred
"""
with language(locale):
ia = False
if invoice_address:
try:
with scopes_disabled():
ia = InvoiceAddress.objects.get(pk=invoice_address)
except InvoiceAddress.DoesNotExist:
pass
try:
try:
cm = CartManager(event=event, cart_id=cart_id, invoice_address=ia, widget_data=widget_data,
sales_channel=sales_channel)
cm.add_new_items(items)
cm.commit()
except LockTimeoutException:
self.retry()
except (MaxRetriesExceededError, LockTimeoutException):
raise CartError(error_messages['busy'])
@app.task(base=ProfiledEventTask, bind=True, max_retries=5, default_retry_delay=1, throws=(CartError,))
def apply_voucher(self, event: Event, voucher: str, cart_id: str=None, locale='en', sales_channel='web') -> None:
"""
Removes a list of items from a user's cart.
:param event: The event ID in question
:param voucher: A voucher code
:param session: Session ID of a guest
"""
with language(locale):
try:
try:
cm = CartManager(event=event, cart_id=cart_id, sales_channel=sales_channel)
cm.apply_voucher(voucher)
cm.commit()
except LockTimeoutException:
self.retry()
except (MaxRetriesExceededError, LockTimeoutException):
raise CartError(error_messages['busy'])
@app.task(base=ProfiledEventTask, bind=True, max_retries=5, default_retry_delay=1, throws=(CartError,))
def remove_cart_position(self, event: Event, position: int, cart_id: str=None, locale='en', sales_channel='web') -> None:
"""
Removes a list of items from a user's cart.
:param event: The event ID in question
:param position: A cart position ID
:param session: Session ID of a guest
"""
with language(locale):
try:
try:
cm = CartManager(event=event, cart_id=cart_id, sales_channel=sales_channel)
cm.remove_item(position)
cm.commit()
except LockTimeoutException:
self.retry()
except (MaxRetriesExceededError, LockTimeoutException):
raise CartError(error_messages['busy'])
@app.task(base=ProfiledEventTask, bind=True, max_retries=5, default_retry_delay=1, throws=(CartError,))
def clear_cart(self, event: Event, cart_id: str=None, locale='en', sales_channel='web') -> None:
"""
Removes a list of items from a user's cart.
:param event: The event ID in question
:param session: Session ID of a guest
"""
with language(locale):
try:
try:
cm = CartManager(event=event, cart_id=cart_id, sales_channel=sales_channel)
cm.clear()
cm.commit()
except LockTimeoutException:
self.retry()
except (MaxRetriesExceededError, LockTimeoutException):
raise CartError(error_messages['busy'])
@app.task(base=ProfiledEventTask, bind=True, max_retries=5, default_retry_delay=1, throws=(CartError,))
def set_cart_addons(self, event: Event, addons: List[dict], cart_id: str=None, locale='en',
invoice_address: int=None, sales_channel='web') -> None:
"""
Removes a list of items from a user's cart.
:param event: The event ID in question
:param addons: A list of dicts with the keys addon_to, item, variation
:param session: Session ID of a guest
"""
with language(locale):
ia = False
if invoice_address:
try:
with scopes_disabled():
ia = InvoiceAddress.objects.get(pk=invoice_address)
except InvoiceAddress.DoesNotExist:
pass
try:
try:
cm = CartManager(event=event, cart_id=cart_id, invoice_address=ia, sales_channel=sales_channel)
cm.set_addons(addons)
cm.commit()
except LockTimeoutException:
self.retry()
except (MaxRetriesExceededError, LockTimeoutException):
raise CartError(error_messages['busy'])
@receiver(checkout_confirm_messages, dispatch_uid="cart_confirm_messages")
def confirm_messages(sender, *args, **kwargs):
if not sender.settings.confirm_text:
return {}
return {
'confirm_text': rich_text(str(sender.settings.confirm_text))
}
| 47.823434 | 152 | 0.578375 |
d2f23b7d3121207be55d1e274d0dbfd666691cad | 119 | py | Python | monitor/users/admin.py | SunWind2000/battery-monitor | ec8c58382389dc5bfe791aee0ab609a3c514ed8a | [
"MIT"
] | 1 | 2021-10-20T16:19:15.000Z | 2021-10-20T16:19:15.000Z | monitor/users/admin.py | SunWind2000/battery-monitor | ec8c58382389dc5bfe791aee0ab609a3c514ed8a | [
"MIT"
] | 8 | 2020-02-12T00:05:43.000Z | 2021-09-22T17:52:01.000Z | monitor/users/admin.py | SunWind2000/battery-monitor | ec8c58382389dc5bfe791aee0ab609a3c514ed8a | [
"MIT"
] | null | null | null | from django.contrib import admin
from users.models import User
# Register your models here.
admin.site.register(User)
| 19.833333 | 32 | 0.806723 |
74abfe03e9384f6d0ca1d8f193e746aca3025f98 | 4,766 | py | Python | tests/test_io.py | cclauss/vergeml | 3403574db9c1df232809ddf85bbc415c60d35c7c | [
"MIT"
] | 324 | 2018-10-28T19:29:47.000Z | 2020-01-24T20:22:07.000Z | tests/test_io.py | cclauss/vergeml | 3403574db9c1df232809ddf85bbc415c60d35c7c | [
"MIT"
] | 8 | 2018-10-30T10:57:19.000Z | 2019-06-05T10:21:30.000Z | tests/test_io.py | cclauss/vergeml | 3403574db9c1df232809ddf85bbc415c60d35c7c | [
"MIT"
] | 19 | 2018-10-29T18:43:03.000Z | 2019-02-21T15:08:41.000Z | from vergeml.io import SourcePlugin, Source, source, Sample
from vergeml.option import option
import random
def test_source_scan(tmpdir):
_prepare_dir(tmpdir)
st = SourceTest({'samples-dir': str(tmpdir)})
st.begin_read_samples()
assert st.num_samples('train') == 8
assert st.num_samples('test') == 1
assert st.num_samples('val') == 1
def test_source_config_split_perc(tmpdir):
_prepare_dir(tmpdir)
st = SourceTest({'samples-dir': str(tmpdir), 'val-split': '20%'})
st.begin_read_samples()
assert st.num_samples('train') == 7
assert st.num_samples('test') == 1
assert st.num_samples('val') == 2
def test_source_config_split_num(tmpdir):
_prepare_dir(tmpdir)
st = SourceTest({'samples-dir': str(tmpdir), 'test-split': 4})
st.begin_read_samples()
assert st.num_samples('train') == 5
assert st.num_samples('test') == 4
assert st.num_samples('val') == 1
def test_source_config_split_dir(tmpdir):
_prepare_dir(tmpdir)
test_dir = tmpdir.mkdir('test')
_prepare_dir(test_dir)
st = SourceTest({'samples-dir': str(tmpdir), 'test-split': str(test_dir)})
st.begin_read_samples()
assert st.num_samples('train') == 9
assert st.num_samples('test') == 10
assert st.num_samples('val') == 1
def test_reproduce_hash(tmpdir):
_prepare_dir(tmpdir)
st = SourceTest({'samples-dir': str(tmpdir), 'test-split': 4})
st.begin_read_samples()
hash_value = st.hash("abc")
st = SourceTest({'samples-dir': str(tmpdir), 'test-split': 4})
st.begin_read_samples()
assert st.hash("abc") == hash_value
# calling hash again still yields the same value
assert st.hash("abc") == hash_value
st = SourceTest({'samples-dir': str(tmpdir), 'test-split': 3})
st.begin_read_samples()
# different params yield a different value
assert st.hash("abc") != hash_value
def test_read_source(tmpdir):
_prepare_dir(tmpdir)
st = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
st.begin_read_samples()
train_samples = st.read_samples('train', 0, st.num_samples('train'))
assert list(map(lambda s: s.x, train_samples)) == \
['content8', 'content2', 'content9', 'content3', 'content5', 'content7']
assert len(train_samples) == 6
val_samples = st.read_samples('val', 0, st.num_samples('train'))
assert list(map(lambda s: s.x, val_samples)) == \
['content0', 'content1']
assert len(val_samples) == 2
test_samples = st.read_samples('test', 0, st.num_samples('test'))
assert list(map(lambda s: s.x, test_samples)) == \
['content4', 'content6']
assert len(test_samples) == 2
def test_repeatable_split(tmpdir):
_prepare_dir(tmpdir)
st = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
st.begin_read_samples()
train_samples = st.read_samples('train', 0, st.num_samples('train'))
filenames = list(map(lambda s: s.meta['filename'], train_samples))
assert filenames == ['file8.test', 'file2.test', 'file9.test', 'file3.test', 'file5.test', 'file7.test']
val_samples = st.read_samples('val', 0, st.num_samples('train'))
filenames = list(map(lambda s: s.meta['filename'],val_samples))
assert filenames == ['file0.test', 'file1.test']
test_samples = st.read_samples('test', 0, st.num_samples('test'))
filenames = list(map(lambda s: s.meta['filename'],test_samples))
assert filenames == ['file4.test', 'file6.test']
@source('test-source', 'A test source.', input_patterns="**/*.test")
class SourceTest(SourcePlugin):
def __init__(self, args: dict={}):
self.files = None
super().__init__(args)
def begin_read_samples(self):
if self.files:
return
self.files = self.scan_and_split_files()
def num_samples(self, split: str) -> int:
return len(self.files[split])
def read_samples(self, split, index, n=1):
items = self.files[split][index:index+n]
items = [(self.read_file(filename), meta) for filename, meta in items]
res = []
for item, meta in items:
rng = random.Random(str(self.random_seed) + meta['filename'])
res.append(Sample(item, None, meta.copy(), rng))
return res
def read_file(self, path):
with open(path, "r") as f:
return f.read()
def transform(self, sample):
sample.x = list(sample)
sample.y = None
return sample
def hash(self, state: str) -> str:
return super().hash(state + self.hash_files(self.files))
def _prepare_dir(tmpdir):
for i in range(0, 10):
p = tmpdir.join(f"file{i}.test")
p.write("content" + str(i))
| 34.536232 | 108 | 0.638481 |
30003202af42ddc3c47e7565dbac682877a3904c | 2,339 | py | Python | tests/storage/test_storage.py | pir8aye/DEXBot | cb963391174850796cc5cf76e4c73eb02da8000d | [
"MIT"
] | 1 | 2021-04-22T09:18:55.000Z | 2021-04-22T09:18:55.000Z | tests/storage/test_storage.py | pir8aye/DEXBot | cb963391174850796cc5cf76e4c73eb02da8000d | [
"MIT"
] | null | null | null | tests/storage/test_storage.py | pir8aye/DEXBot | cb963391174850796cc5cf76e4c73eb02da8000d | [
"MIT"
] | 2 | 2021-02-13T10:58:33.000Z | 2022-03-04T14:01:58.000Z | import logging
import pytest
log = logging.getLogger("dexbot")
log.setLevel(logging.DEBUG)
@pytest.mark.mandatory
def test_fetch_orders(storage):
order = {'id': '111', 'base': '10 CNY', 'quote': '1 BTS'}
storage.save_order(order)
fetched = storage.fetch_orders()
# Return value is dict {'id': 'order'}
assert fetched[order['id']] == order
@pytest.mark.mandatory
def test_fetch_orders_extended(storage):
order = {'id': '111', 'base': '10 CNY', 'quote': '1 BTS'}
text = 'foo bar'
storage.save_order_extended(order, virtual=True, custom=text)
fetched = storage.fetch_orders_extended(only_real=True)
assert len(fetched) == 0
fetched = storage.fetch_orders_extended(only_virtual=True)
assert len(fetched) == 1
fetched = storage.fetch_orders_extended(custom=text)
assert len(fetched) == 1
fetched = storage.fetch_orders_extended(return_ids_only=True)
assert fetched == ['111']
fetched = storage.fetch_orders_extended()
assert isinstance(fetched, list)
result = fetched[0]
assert result['custom'] == 'foo bar'
assert result['virtual'] is True
assert result['order'] == order
@pytest.mark.mandatory
def test_clear_orders(storage):
order = {'id': '111', 'base': '10 CNY', 'quote': '1 BTS'}
storage.save_order(order)
storage.clear_orders()
fetched = storage.fetch_orders()
assert fetched is None
@pytest.mark.mandatory
def test_clear_orders_extended(storage):
order = {'id': '111', 'base': '10 CNY', 'quote': '1 BTS'}
storage.save_order_extended(order, virtual=True)
storage.clear_orders_extended(only_virtual=True)
fetched = storage.fetch_orders_extended()
assert fetched == []
storage.save_order_extended(order, custom='foo')
storage.clear_orders_extended(custom='foo')
fetched = storage.fetch_orders_extended()
assert fetched == []
@pytest.mark.mandatory
def test_remove_order(storage):
order = {'id': '111', 'base': '10 CNY', 'quote': '1 BTS'}
storage.save_order(order)
storage.remove_order(order)
assert storage.fetch_orders() is None
@pytest.mark.mandatory
def test_remove_order_by_id(storage):
order = {'id': '111', 'base': '10 CNY', 'quote': '1 BTS'}
storage.save_order(order)
storage.remove_order(order['id'])
assert storage.fetch_orders() is None
| 30.376623 | 65 | 0.689611 |
f751b039578a7c622060bbd33808632d2dc01c69 | 8,787 | py | Python | tests/h/routes_test.py | julien-cheng/h | 36c8ec044725720cf36f0986cdf025395aca8929 | [
"BSD-2-Clause"
] | 2 | 2019-08-04T07:22:11.000Z | 2020-07-17T05:01:41.000Z | tests/h/routes_test.py | 11-eleven-11/h | 91c7a4504ad7471ed3e30246763a03e6c1cc531b | [
"BSD-2-Clause"
] | null | null | null | tests/h/routes_test.py | 11-eleven-11/h | 91c7a4504ad7471ed3e30246763a03e6c1cc531b | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from mock import Mock, call
from h.routes import includeme
def test_includeme():
config = Mock(spec_set=["add_route"])
includeme(config)
# This may look like a ridiculous test, but the cost of keeping it
# up-to-date is hopefully pretty low (run the tests with -vv, copy the new
# expected value, strip out any Unicode prefixes) and it serves as a check
# to ensure that any changes made to the routes were intended.
assert config.add_route.mock_calls == [
call("index", "/"),
call("robots", "/robots.txt"),
call("via_redirect", "/via"),
call("login", "/login"),
call("logout", "/logout"),
call("signup", "/signup"),
call("activate", "/activate/{id}/{code}"),
call("forgot_password", "/forgot-password"),
call("account_reset", "/account/reset"),
call("account_reset_with_code", "/account/reset/{code}"),
call("account", "/account/settings"),
call("account_profile", "/account/profile"),
call("account_notifications", "/account/settings/notifications"),
call("account_developer", "/account/developer"),
call("claim_account_legacy", "/claim_account/{token}"),
call("dismiss_sidebar_tutorial", "/app/dismiss_sidebar_tutorial"),
call("activity.search", "/search"),
call(
"activity.user_search",
"/users/{username}",
factory="h.traversal:UserRoot",
traverse="/{username}",
),
call("admin.index", "/admin/"),
call("admin.admins", "/admin/admins"),
call("admin.badge", "/admin/badge"),
call("admin.features", "/admin/features"),
call("admin.cohorts", "/admin/features/cohorts"),
call("admin.cohorts_edit", "/admin/features/cohorts/{id}"),
call("admin.groups", "/admin/groups"),
call("admin.groups_create", "/admin/groups/new"),
call(
"admin.groups_delete",
"/admin/groups/delete/{id}",
factory="h.traversal.GroupRoot",
traverse="/{id}",
),
call(
"admin.groups_edit",
"/admin/groups/{id}",
factory="h.traversal.GroupRoot",
traverse="/{id}",
),
call("admin.mailer", "/admin/mailer"),
call("admin.mailer_test", "/admin/mailer/test"),
call("admin.nipsa", "/admin/nipsa"),
call("admin.oauthclients", "/admin/oauthclients"),
call("admin.oauthclients_create", "/admin/oauthclients/new"),
call(
"admin.oauthclients_edit",
"/admin/oauthclients/{id}",
factory="h.traversal.AuthClientRoot",
traverse="/{id}",
),
call("admin.organizations", "/admin/organizations"),
call("admin.organizations_create", "/admin/organizations/new"),
call(
"admin.organizations_delete",
"/admin/organizations/delete/{pubid}",
factory="h.traversal.OrganizationRoot",
traverse="/{pubid}",
),
call(
"admin.organizations_edit",
"/admin/organizations/{pubid}",
factory="h.traversal.OrganizationRoot",
traverse="/{pubid}",
),
call("admin.staff", "/admin/staff"),
call("admin.users", "/admin/users"),
call("admin.users_activate", "/admin/users/activate"),
call("admin.users_delete", "/admin/users/delete"),
call("admin.users_rename", "/admin/users/rename"),
call(
"annotation",
"/a/{id}",
factory="h.traversal:AnnotationRoot",
traverse="/{id}",
),
call("stream", "/stream"),
call("stream.user_query", "/u/{user}"),
call("stream.tag_query", "/t/{tag}"),
call("assets", "/assets/*subpath"),
call("api.index", "/api/"),
call("api.links", "/api/links"),
call(
"api.annotations", "/api/annotations", factory="h.traversal:AnnotationRoot"
),
call(
"api.annotation",
"/api/annotations/{id:[A-Za-z0-9_-]{20,22}}",
factory="h.traversal:AnnotationRoot",
traverse="/{id}",
),
call(
"api.annotation_flag",
"/api/annotations/{id:[A-Za-z0-9_-]{20,22}}/flag",
factory="h.traversal:AnnotationRoot",
traverse="/{id}",
),
call(
"api.annotation_hide",
"/api/annotations/{id:[A-Za-z0-9_-]{20,22}}/hide",
factory="h.traversal:AnnotationRoot",
traverse="/{id}",
),
call(
"api.annotation.jsonld",
"/api/annotations/{id:[A-Za-z0-9_-]{20,22}}.jsonld",
factory="h.traversal:AnnotationRoot",
traverse="/{id}",
),
call("api.groups", "/api/groups", factory="h.traversal.GroupRoot"),
call(
"api.group_upsert",
"/api/groups/{id}",
request_method="PUT",
factory="h.traversal.GroupUpsertRoot",
traverse="/{id}",
),
call(
"api.group",
"/api/groups/{id}",
request_method=("GET", "PATCH"),
factory="h.traversal.GroupRoot",
traverse="/{id}",
),
call("api.profile", "/api/profile", factory="h.traversal.ProfileRoot"),
call("api.profile_groups", "/api/profile/groups"),
call("api.debug_token", "/api/debug-token"),
call(
"api.group_members",
"/api/groups/{pubid}/members",
factory="h.traversal.GroupRoot",
traverse="/{pubid}",
),
call(
"api.group_member",
"/api/groups/{pubid}/members/{userid}",
factory="h.traversal.GroupRoot",
traverse="/{pubid}",
),
call("api.search", "/api/search"),
call("api.users", "/api/users", factory="h.traversal.UserRoot"),
call(
"api.user",
"/api/users/{username}",
factory="h.traversal.UserRoot",
traverse="/{username}",
),
call("badge", "/api/badge"),
call("token", "/api/token"),
call("oauth_authorize", "/oauth/authorize"),
call("oauth_revoke", "/oauth/revoke"),
call("sidebar_app", "/app.html"),
call("embed", "/embed.js"),
call("stream_atom", "/stream.atom"),
call("stream_rss", "/stream.rss"),
call(
"organization_logo",
"/organizations/{pubid}/logo",
factory="h.traversal.OrganizationLogoRoot",
traverse="/{pubid}",
),
call("group_create", "/groups/new"),
call(
"group_edit",
"/groups/{pubid}/edit",
factory="h.traversal.GroupRoot",
traverse="/{pubid}",
),
call(
"group_read",
"/groups/{pubid}/{slug:[^/]*}",
factory="h.traversal.GroupRoot",
traverse="/{pubid}",
),
call(
"group_read_noslug",
"/groups/{pubid}",
factory="h.traversal.GroupRoot",
traverse="/{pubid}",
),
call("help", "/docs/help"),
call("onboarding", "/welcome/"),
call("custom_onboarding", "/welcome/{slug}"),
call("unsubscribe", "/notification/unsubscribe/{token}"),
call("status", "/_status"),
call("about", "/about/", static=True),
call("bioscience", "/bioscience/", static=True),
call("blog", "/blog/", static=True),
call(
"chrome-extension",
"https://chrome.google.com/webstore/detail/bjfhmglciegochdpefhhlphglcehbmek",
static=True,
),
call("contact", "/contact/", static=True),
call("contribute", "/contribute/", static=True),
call("education", "/education/", static=True),
call("for-publishers", "/for-publishers/", static=True),
call("fund", "/fund/", static=True),
call("help-center", "/help/", static=True),
call("hypothesis-github", "https://github.com/hypothesis", static=True),
call("hypothesis-twitter", "https://twitter.com/hypothes_is", static=True),
call("jobs", "/jobs/", static=True),
call("press", "/press/", static=True),
call("privacy", "/privacy/", static=True),
call("roadmap", "/roadmap/", static=True),
call("team", "/team/", static=True),
call("terms-of-service", "/terms-of-service/", static=True),
call(
"wordpress-plugin", "https://wordpress.org/plugins/hypothesis/", static=True
),
]
| 37.551282 | 89 | 0.528963 |
6de75f3a0de7216b50e97803371f07b6e2128543 | 4,182 | py | Python | script/numerate.py | markus-schoen/arcgis-pro-geometry | 7f65515366535162d2eab461f4254dd06507e53e | [
"Apache-2.0"
] | null | null | null | script/numerate.py | markus-schoen/arcgis-pro-geometry | 7f65515366535162d2eab461f4254dd06507e53e | [
"Apache-2.0"
] | null | null | null | script/numerate.py | markus-schoen/arcgis-pro-geometry | 7f65515366535162d2eab461f4254dd06507e53e | [
"Apache-2.0"
] | null | null | null | # SCRIPT --------------------------------------------------------------------------------------------------------------
# Numerate all features by it's coordinates.
# To store the results, this method creates a short integer field. The lowest value is 1.
# ---------------------------------------------------------------------------------------------------------------------
# LICENSE -------------------------------------------------------------------------------------------------------------
# Copyright 2021 by Markus Schön
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------------------------------------------------------
# CREDITS -------------------------------------------------------------------------------------------------------------
# Thanks to all developers who created the used modules.
# ---------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
# TODO /
# ---------------------------------------------------------------------------------------------------------------------
# MODULES -------------------------------------------------------------------------------------------------------------
import arcpy
from Geometry import Geometry
# ---------------------------------------------------------------------------------------------------------------------
# GENERAL INFORMATION -------------------------------------------------------------------------------------------------
__author__ = 'Markus Schön'
__version__ = '1.0.0'
__copyright__ = 'Copyright 2021 by Markus Schön'
__license__ = 'Apache License, Version 2.0'
# ---------------------------------------------------------------------------------------------------------------------
# VARIABLES -----------------------------------------------------------------------------------------------------------
point_fc = arcpy.GetParameterAsText(0)
sort_by = arcpy.GetParameterAsText(1)
id_field_name = arcpy.GetParameterAsText(2)
# ---------------------------------------------------------------------------------------------------------------------
# PATHS ---------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
# CLASSES -------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
# FUNCTIONS -----------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
# PREPARATION ---------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
# MAIN PROGRAM --------------------------------------------------------------------------------------------------------
with Geometry(point_fc) as point_fc_geom:
point_fc_geom.numerate(sort_by=sort_by, field_name=id_field_name)
# ---------------------------------------------------------------------------------------------------------------------
| 55.76 | 120 | 0.246055 |
ba5604d6cbe8921b85bcd001827068b9c4bd6569 | 6,928 | py | Python | data/p3BR/R2/benchmark/startQiskit_Class284.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit_Class284.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit_Class284.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=60
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.cx(input_qubit[0],input_qubit[2]) # number=54
prog.x(input_qubit[2]) # number=55
prog.cx(input_qubit[0],input_qubit[2]) # number=56
prog.cx(input_qubit[0],input_qubit[2]) # number=47
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.cx(input_qubit[2],input_qubit[0]) # number=57
prog.z(input_qubit[2]) # number=58
prog.cx(input_qubit[2],input_qubit[0]) # number=59
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_Class284.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 31.779817 | 140 | 0.638279 |
7ac1ee0a1d3bf9c85e52c7c2a4200e13e40b65ac | 1,246 | py | Python | Draft_Phuong/ternary_ver2/test.py | phuong27102000/NTRU_HRSS_KEM_SV | fe4fd095134a41f4131a3aa953197e3933b303ad | [
"MIT"
] | null | null | null | Draft_Phuong/ternary_ver2/test.py | phuong27102000/NTRU_HRSS_KEM_SV | fe4fd095134a41f4131a3aa953197e3933b303ad | [
"MIT"
] | null | null | null | Draft_Phuong/ternary_ver2/test.py | phuong27102000/NTRU_HRSS_KEM_SV | fe4fd095134a41f4131a3aa953197e3933b303ad | [
"MIT"
] | null | null | null | import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.triggers import Timer
from secrets import token_bytes as prg
from ter_model import ter_model
report = open('report.txt','w')
@cocotb.test()
async def test_ternary(dut):
"""Try accessing the design."""
dut._log.info("Running test...")
cocotb.fork(Clock(dut.clk, 1, units="ns").start())
fail = 0
for i in range(10):
dut.rst <= 1
bit_str = prg(700)
dut.bit_str <= int.from_bytes(bit_str, "big")
await RisingEdge(dut.clk)
await Timer(1, units="ns")
dut.rst <= 0
await Timer(1500, units="ns")
expect = ter_model(bit_str)
try:
if dut.out.value != expect:
fail = 1
report.write( "When bit_str = %X, v = %s, but i expect it = %s\n" %( dut.bit_str.value, bin( int(dut.out.value) ), bin(expect) ) )
except:
report.write( "When bit_str = %X, v = ...xxx, but i expect it = %s\n" %( dut.bit_str.value, bin(expect) ) )
if fail == 0: report.write("------VERIFICATION SUCCEED------")
else: report.write("------VERIFICATION FAIL------")
dut._log.info("Running test...done")
report.close()
| 34.611111 | 146 | 0.589888 |
84bc2e0e2407e158d931c8071f9d380c33a3e535 | 218 | py | Python | exercises/de/test_03_16_01.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 2,085 | 2019-04-17T13:10:40.000Z | 2022-03-30T21:51:46.000Z | exercises/de/test_03_16_01.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 79 | 2019-04-18T14:42:55.000Z | 2022-03-07T08:15:43.000Z | exercises/de/test_03_16_01.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 361 | 2019-04-17T13:34:32.000Z | 2022-03-28T04:42:45.000Z | def test():
assert (
"doc = nlp.make_doc(text)" in __solution__
or "doc = nlp.tokenizer(text)" in __solution__
), "Verwendest du tatsächlich nur den Tokenizer?"
__msg__.good("Sehr schön!")
| 27.25 | 54 | 0.633028 |
19aa585e14a2927e3ae96020a912f625dc293a39 | 10,307 | py | Python | model.py | atch841/one-shot-texture-segmentation | 5f3bc3d2ca103fee56f8957b22a89b8a4e032933 | [
"MIT"
] | 6 | 2019-05-08T11:16:38.000Z | 2021-01-07T19:35:58.000Z | model.py | atch841/one-shot-texture-segmentation | 5f3bc3d2ca103fee56f8957b22a89b8a4e032933 | [
"MIT"
] | null | null | null | model.py | atch841/one-shot-texture-segmentation | 5f3bc3d2ca103fee56f8957b22a89b8a4e032933 | [
"MIT"
] | 3 | 2020-07-22T02:43:47.000Z | 2022-01-17T05:33:01.000Z | import tensorflow as tf
from tensorflow.contrib import autograph
def get_model(is_training):
texture = tf.placeholder(tf.float32, shape=(None, 256, 256, 3), name='texture_input')
ref = tf.placeholder(tf.float32, shape=(None, 64, 64, 3), name='ref_input')
label = tf.placeholder(tf.float32, shape=(None, 256, 256, 1), name='label')
with tf.variable_scope('VGG'):
# texture vgg
vgg1 = tf.layers.conv2d(texture, 64, 3, padding='SAME', activation=tf.nn.relu, name='conv1_1')
# vgg1 = tf.layers.batch_normalization(vgg1, training=is_training)
v = tf.layers.conv2d(vgg1, 64, 3, padding='SAME', activation=tf.nn.relu, name='conv1_2')
# v = tf.layers.batch_normalization(v, training=is_training)
v = tf.layers.max_pooling2d(v, 2, 2, padding='SAME')
vgg2 = tf.layers.conv2d(v, 128, 3, padding='SAME', activation=tf.nn.relu, name='conv2_1')
# vgg2 = tf.layers.batch_normalization(vgg2, training=is_training)
v = tf.layers.conv2d(vgg2, 128, 3, padding='SAME', activation=tf.nn.relu, name='conv2_2')
# v = tf.layers.batch_normalization(v, training=is_training)
v = tf.layers.max_pooling2d(v, 2, 2, padding='SAME')
vgg3 = tf.layers.conv2d(v, 256, 3, padding='SAME', activation=tf.nn.relu, name='conv3_1')
# vgg3 = tf.layers.batch_normalization(vgg3, training=is_training)
v = tf.layers.conv2d(vgg3, 256, 3, padding='SAME', activation=tf.nn.relu, name='conv3_2')
# v = tf.layers.batch_normalization(v, training=is_training)
v = tf.layers.conv2d(v, 256, 3, padding='SAME', activation=tf.nn.relu, name='conv3_3')
# v = tf.layers.batch_normalization(v, training=is_training)
v = tf.layers.max_pooling2d(v, 2, 2, padding='SAME')
vgg4 = tf.layers.conv2d(v, 512, 3, padding='SAME', activation=tf.nn.relu, name='conv4_1')
# vgg4 = tf.layers.batch_normalization(vgg4, training=is_training)
v = tf.layers.conv2d(vgg4, 512, 3, padding='SAME', activation=tf.nn.relu, name='conv4_2')
# v = tf.layers.batch_normalization(v, training=is_training)
v = tf.layers.conv2d(v, 512, 3, padding='SAME', activation=tf.nn.relu, name='conv4_3')
# v = tf.layers.batch_normalization(v, training=is_training)
v = tf.layers.max_pooling2d(v, 2, 2, padding='SAME')
vgg5 = tf.layers.conv2d(v, 512, 3, padding='SAME', activation=tf.nn.relu, name='conv5_1')
# vgg5 = tf.layers.batch_normalization(vgg5, training=is_training)
print('vgg:', vgg1.shape, vgg2.shape, vgg3.shape, vgg4.shape, vgg5.shape)
# reference texture vgg
vgg1_ref = tf.layers.conv2d(ref, 64, 3, padding='SAME', activation=tf.nn.relu, name='conv1_1', reuse=True)
# vgg1_ref = tf.layers.batch_normalization(vgg1_ref, training=is_training)
v = tf.layers.conv2d(vgg1_ref, 64, 3, padding='SAME', activation=tf.nn.relu, name='conv1_2', reuse=True)
# v = tf.layers.batch_normalization(v, training=is_training)
v = tf.layers.max_pooling2d(v, 2, 2, padding='SAME')
vgg2_ref = tf.layers.conv2d(v, 128, 3, padding='SAME', activation=tf.nn.relu, name='conv2_1', reuse=True)
# vgg2_ref = tf.layers.batch_normalization(vgg2_ref, training=is_training)
v = tf.layers.conv2d(vgg2_ref, 128, 3, padding='SAME', activation=tf.nn.relu, name='conv2_2', reuse=True)
# v = tf.layers.batch_normalization(v, training=is_training)
v = tf.layers.max_pooling2d(v, 2, 2, padding='SAME')
vgg3_ref = tf.layers.conv2d(v, 256, 3, padding='SAME', activation=tf.nn.relu, name='conv3_1', reuse=True)
# vgg3_ref = tf.layers.batch_normalization(vgg3_ref, training=is_training)
v = tf.layers.conv2d(vgg3_ref, 256, 3, padding='SAME', activation=tf.nn.relu, name='conv3_2', reuse=True)
# v = tf.layers.batch_normalization(v, training=is_training)
v = tf.layers.conv2d(v, 256, 3, padding='SAME', activation=tf.nn.relu, name='conv3_3', reuse=True)
# v = tf.layers.batch_normalization(v, training=is_training)
v = tf.layers.max_pooling2d(v, 2, 2, padding='SAME')
vgg4_ref = tf.layers.conv2d(v, 512, 3, padding='SAME', activation=tf.nn.relu, name='conv4_1', reuse=True)
# vgg4_ref = tf.layers.batch_normalization(vgg4_ref, training=is_training)
v = tf.layers.conv2d(vgg4_ref, 512, 3, padding='SAME', activation=tf.nn.relu, name='conv4_2', reuse=True)
# v = tf.layers.batch_normalization(v, training=is_training)
v = tf.layers.conv2d(v, 512, 3, padding='SAME', activation=tf.nn.relu, name='conv4_3', reuse=True)
# v = tf.layers.batch_normalization(v, training=is_training)
v = tf.layers.max_pooling2d(v, 2, 2, padding='SAME')
vgg5_ref = tf.layers.conv2d(v, 512, 3, padding='SAME', activation=tf.nn.relu, name='conv5_1', reuse=True)
# vgg5_ref = tf.layers.batch_normalization(vgg5_ref, training=is_training)
print('vgg_ref:', vgg1_ref.shape, vgg2_ref.shape, vgg3_ref.shape, vgg4_ref.shape, vgg5_ref.shape)
with tf.variable_scope('encoding_network'):
print('encoding')
# texture encode
v = tf.layers.conv2d(vgg5, 512, 1, padding='SAME', name='conv1')
v = resblock(v, 512, name='res1', is_training=is_training)
v = tf.keras.layers.UpSampling2D()(v)
v = tf.concat([v, vgg4], axis=3)
print(v.shape)
v = tf.layers.conv2d(v, 512, 1, padding='SAME', name='conv2')
v = resblock(v, 512, name='res2', is_training=is_training)
v = tf.keras.layers.UpSampling2D()(v)
v = tf.concat([v, vgg3], axis=3)
print(v.shape)
v = tf.layers.conv2d(v, 256, 1, padding='SAME', name='conv3')
v = resblock(v, 256, name='res3', is_training=is_training)
v = tf.keras.layers.UpSampling2D()(v)
v = tf.concat([v, vgg2], axis=3)
print(v.shape)
v = tf.layers.conv2d(v, 128, 1, padding='SAME', name='conv4')
v = resblock(v, 128, name='res4', is_training=is_training)
v = tf.keras.layers.UpSampling2D()(v)
v = tf.concat([v, vgg1], axis=3)
print(v.shape)
v = tf.layers.conv2d(v, 128, 1, padding='SAME', name='conv5')
v = resblock(v, 128, name='res5', is_training=is_training)
encode_texture = tf.layers.conv2d(v, 64, 1, padding='SAME', name='conv_out')
print('encode_texture:', encode_texture.shape)
# reference texture encode
v = tf.layers.conv2d(vgg5_ref, 512, 1, padding='SAME', name='conv1', reuse=True)
v = resblock(v, 512, name='res1', reuse=True, is_training=is_training)
v = tf.keras.layers.UpSampling2D()(v)
v = tf.concat([v, vgg4_ref], axis=3)
print(v.shape)
v = tf.layers.conv2d(v, 512, 1, padding='SAME', name='conv2', reuse=True)
v = resblock(v, 512, name='res2', reuse=True, is_training=is_training)
v = tf.keras.layers.UpSampling2D()(v)
v = tf.concat([v, vgg3_ref], axis=3)
print(v.shape)
v = tf.layers.conv2d(v, 256, 1, padding='SAME', name='conv3', reuse=True)
v = resblock(v, 256, name='res3', reuse=True, is_training=is_training)
v = tf.keras.layers.UpSampling2D()(v)
v = tf.concat([v, vgg2_ref], axis=3)
print(v.shape)
v = tf.layers.conv2d(v, 128, 1, padding='SAME', name='conv4', reuse=True)
v = resblock(v, 128, name='res4', reuse=True, is_training=is_training)
v = tf.keras.layers.UpSampling2D()(v)
v = tf.concat([v, vgg1_ref], axis=3)
print(v.shape)
v = tf.layers.conv2d(v, 128, 1, padding='SAME', name='conv5', reuse=True)
v = resblock(v, 128, name='res5', reuse=True, is_training=is_training)
encode_ref = tf.layers.conv2d(v, 64, 1, padding='SAME', name='conv_out', reuse=True)
print('encode_ref:', encode_ref.shape)
cor = correlation(encode_texture, encode_ref)
print('cor:', cor.shape)
with tf.variable_scope('decoding_network'):
print('decoding')
d_cor = tf.image.resize_images(cor, [16, 16])
d_texture = tf.image.resize_images(encode_texture, [16, 16])
d_v = tf.layers.conv2d(vgg5, 64, 1, padding='SAME', name='conv1_1')
v = tf.concat([d_v, d_cor, d_texture], axis=3)
print(v.shape)
v = tf.layers.conv2d(v, 64, 1, padding='SAME', name='conv2_1')
v = resblock(v, 64, name='res2', is_training=is_training)
v = tf.keras.layers.UpSampling2D()(v)
d_cor = tf.image.resize_images(cor, [32, 32])
d_v = tf.layers.conv2d(vgg4, 64, 1, padding='SAME', name='conv2_2')
v = tf.concat([d_v, d_cor, v], axis=3)
print(v.shape)
v = tf.layers.conv2d(v, 64, 1, padding='SAME', name='conv3_1')
v = resblock(v, 64, name='res3', is_training=is_training)
v = tf.keras.layers.UpSampling2D()(v)
d_cor = tf.image.resize_images(cor, [64, 64])
d_v = tf.layers.conv2d(vgg3, 64, 1, padding='SAME', name='conv3_2')
v = tf.concat([d_v, d_cor, v], axis=3)
print(v.shape)
v = tf.layers.conv2d(v, 64, 1, padding='SAME', name='conv4_1')
v = resblock(v, 64, name='res4', is_training=is_training)
v = tf.keras.layers.UpSampling2D()(v)
d_cor = tf.image.resize_images(cor, [128, 128])
d_v = tf.layers.conv2d(vgg2, 64, 1, padding='SAME', name='conv4_2')
v = tf.concat([d_v, d_cor, v], axis=3)
print(v.shape)
v = tf.layers.conv2d(v, 64, 1, padding='SAME', name='conv5_1')
v = resblock(v, 64, name='res5', is_training=is_training)
v = tf.keras.layers.UpSampling2D()(v)
# d_cor = tf.image.resize_images(cor, [256, 256])
d_v = tf.layers.conv2d(vgg1, 64, 1, padding='SAME', name='conv5_2')
v = tf.concat([d_v, cor, v], axis=3)
print(v.shape)
v = tf.layers.conv2d(v, 64, 1, padding='SAME', name='conv6_1')
v = resblock(v, 64, name='res6', is_training=is_training)
decode_mask = tf.layers.conv2d(v, 1, 1, padding='SAME', activation=tf.sigmoid, name='conv_out')
print('decode_out:', decode_mask.shape)
return texture, ref, label, decode_mask
def correlation(texture, ref):
# texture = norm_to_one(texture)
# ref = norm_to_one(ref)
texture = tf.nn.l2_normalize(texture, 3)
ref = tf.nn.l2_normalize(ref, 3)
cor = tf.nn.conv2d(texture, ref, [1, 1, 1, 1], padding='SAME', name='correlation')
return cor
@autograph.convert()
def norm_to_one(t):
s = tf.reduce_sum(t, axis=3, keepdims=True)
return t / s
def resblock(input, filters, name, reuse=None, is_training=True):
with tf.variable_scope(name):
r = tf.layers.conv2d(input, filters, 3, padding='SAME', activation=tf.nn.relu, name='conv1', reuse=reuse)
# r = tf.layers.batch_normalization(r, training=is_training, reuse=reuse)
r = tf.layers.conv2d(r, filters, 3, padding='SAME', activation=tf.nn.relu, name='conv2', reuse=reuse)
# r = tf.layers.batch_normalization(r, training=is_training, reuse=reuse)
r = tf.layers.conv2d(r, filters, 3, padding='SAME', name='conv3', reuse=reuse)
# r = tf.layers.batch_normalization(r, training=is_training, reuse=reuse)
r = tf.add(input, r)
return r | 47.497696 | 108 | 0.6938 |
db97a4c3b955262d5ebb9aa0a5f46c0330b51653 | 62,844 | py | Python | saleor/graphql/meta/tests/test_meta_mutations.py | maersu/saleor | 7e1d340fe5c36eab2a1170f1567b6af43a112ea5 | [
"CC-BY-4.0"
] | 1 | 2020-09-02T00:14:04.000Z | 2020-09-02T00:14:04.000Z | saleor/graphql/meta/tests/test_meta_mutations.py | maersu/saleor | 7e1d340fe5c36eab2a1170f1567b6af43a112ea5 | [
"CC-BY-4.0"
] | 1 | 2022-02-15T03:31:12.000Z | 2022-02-15T03:31:12.000Z | saleor/graphql/meta/tests/test_meta_mutations.py | maersu/saleor | 7e1d340fe5c36eab2a1170f1567b6af43a112ea5 | [
"CC-BY-4.0"
] | null | null | null | import base64
import uuid
from unittest.mock import patch
import graphene
import pytest
from ....core.error_codes import MetadataErrorCode
from ....core.models import ModelWithMetadata
from ....invoice.models import Invoice
from ...tests.utils import assert_no_permission, get_graphql_content
PRIVATE_KEY = "private_key"
PRIVATE_VALUE = "private_vale"
PUBLIC_KEY = "key"
PUBLIC_KEY2 = "key2"
PUBLIC_VALUE = "value"
PUBLIC_VALUE2 = "value2"
UPDATE_PUBLIC_METADATA_MUTATION = """
mutation UpdatePublicMetadata($id: ID!, $input: [MetadataInput!]!) {
updateMetadata(
id: $id
input: $input
) {
metadataErrors{
field
code
message
}
item {
metadata{
key
value
}
...on %s{
id
}
}
}
}
"""
def execute_update_public_metadata_for_item(
client, permissions, item_id, item_type, key=PUBLIC_KEY, value=PUBLIC_VALUE,
):
variables = {
"id": item_id,
"input": [{"key": key, "value": value}],
}
response = client.post_graphql(
UPDATE_PUBLIC_METADATA_MUTATION % item_type,
variables,
permissions=[permissions] if permissions else None,
)
response = get_graphql_content(response)
return response
def execute_update_public_metadata_for_multiple_items(
client,
permissions,
item_id,
item_type,
key=PUBLIC_KEY,
value=PUBLIC_VALUE,
key2=PUBLIC_KEY2,
value2=PUBLIC_VALUE2,
):
variables = {
"id": item_id,
"input": [{"key": key, "value": value}, {"key": key2, "value": value2}],
}
response = client.post_graphql(
UPDATE_PUBLIC_METADATA_MUTATION % item_type,
variables,
permissions=[permissions] if permissions else None,
)
response = get_graphql_content(response)
return response
def item_contains_proper_public_metadata(
item_from_response, item, item_id, key=PUBLIC_KEY, value=PUBLIC_VALUE,
):
if item_from_response["id"] != item_id:
return False
item.refresh_from_db()
return item.get_value_from_metadata(key) == value
def item_contains_multiple_proper_public_metadata(
item_from_response,
item,
item_id,
key=PUBLIC_KEY,
value=PUBLIC_VALUE,
key2=PUBLIC_KEY2,
value2=PUBLIC_VALUE2,
):
if item_from_response["id"] != item_id:
return False
item.refresh_from_db()
return all(
[
item.get_value_from_metadata(key) == value,
item.get_value_from_metadata(key2) == value2,
]
)
def test_add_public_metadata_for_customer_as_staff(
staff_api_client, permission_manage_users, customer_user
):
# given
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
# when
response = execute_update_public_metadata_for_item(
staff_api_client, permission_manage_users, customer_id, "User"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], customer_user, customer_id
)
def test_add_public_metadata_for_customer_as_app(
app_api_client, permission_manage_users, customer_user
):
# given
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
# when
response = execute_update_public_metadata_for_item(
app_api_client, permission_manage_users, customer_id, "User"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], customer_user, customer_id
)
def test_add_multiple_public_metadata_for_customer_as_app(
app_api_client, permission_manage_users, customer_user
):
# given
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
# when
response = execute_update_public_metadata_for_multiple_items(
app_api_client, permission_manage_users, customer_id, "User"
)
# then
assert item_contains_multiple_proper_public_metadata(
response["data"]["updateMetadata"]["item"], customer_user, customer_id
)
def test_add_public_metadata_for_other_staff_as_staff(
staff_api_client, permission_manage_staff, admin_user
):
# given
assert admin_user.pk != staff_api_client.user.pk
admin_id = graphene.Node.to_global_id("User", admin_user.pk)
# when
response = execute_update_public_metadata_for_item(
staff_api_client, permission_manage_staff, admin_id, "User"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], admin_user, admin_id
)
def test_add_public_metadata_for_staff_as_app_no_permission(
app_api_client, permission_manage_staff, admin_user
):
# given
admin_id = graphene.Node.to_global_id("User", admin_user.pk)
variables = {
"id": admin_id,
"input": [{"key": PUBLIC_KEY, "value": PUBLIC_VALUE}],
}
# when
response = app_api_client.post_graphql(
UPDATE_PRIVATE_METADATA_MUTATION % "User",
variables,
permissions=[permission_manage_staff],
)
# then
assert_no_permission(response)
@pytest.mark.parametrize(
"input", [{"key": " ", "value": "test"}, {"key": " ", "value": ""}],
)
def test_staff_update_metadata_empty_key(
input, staff_api_client, permission_manage_staff, admin_user
):
# given
admin_id = graphene.Node.to_global_id("User", admin_user.pk)
# when
response = execute_update_public_metadata_for_item(
staff_api_client,
permission_manage_staff,
admin_id,
"User",
input["key"],
input["value"],
)
# then
data = response["data"]["updateMetadata"]
errors = data["metadataErrors"]
assert not data["item"]
assert len(errors) == 1
assert errors[0]["code"] == MetadataErrorCode.REQUIRED.name
assert errors[0]["field"] == "input"
def test_add_public_metadata_for_myself_as_customer(user_api_client):
# given
customer = user_api_client.user
customer_id = graphene.Node.to_global_id("User", customer.pk)
# when
response = execute_update_public_metadata_for_item(
user_api_client, None, customer_id, "User"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], customer, customer_id
)
def test_add_private_metadata_for_invoice(staff_api_client, permission_manage_orders):
# given
invoice = Invoice.objects.create(number="1/7/2020")
invoice_id = graphene.Node.to_global_id("Invoice", invoice.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_orders, invoice_id, "Invoice"
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], invoice, invoice_id
)
def test_add_public_metadata_for_invoice(staff_api_client, permission_manage_orders):
# given
invoice = Invoice.objects.create(number="1/7/2020")
invoice_id = graphene.Node.to_global_id("Invoice", invoice.pk)
# when
response = execute_update_public_metadata_for_item(
staff_api_client, permission_manage_orders, invoice_id, "Invoice"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], invoice, invoice_id
)
def test_add_public_metadata_for_myself_as_staff(staff_api_client):
# given
staff = staff_api_client.user
staff_id = graphene.Node.to_global_id("User", staff.pk)
# when
response = execute_update_public_metadata_for_item(
staff_api_client, None, staff_id, "User"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], staff, staff_id
)
def test_add_public_metadata_for_checkout(api_client, checkout):
# given
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
# when
response = execute_update_public_metadata_for_item(
api_client, None, checkout_id, "Checkout"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], checkout, checkout_id
)
def test_add_public_metadata_for_order(api_client, order):
# given
order_id = graphene.Node.to_global_id("Order", order.pk)
# when
response = execute_update_public_metadata_for_item(
api_client, None, order_id, "Order"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], order, order_id
)
def test_add_public_metadata_for_draft_order(api_client, draft_order):
# given
draft_order_id = graphene.Node.to_global_id("Order", draft_order.pk)
# when
response = execute_update_public_metadata_for_item(
api_client, None, draft_order_id, "Order"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], draft_order, draft_order_id
)
def test_add_public_metadata_for_attribute(
staff_api_client, permission_manage_products, color_attribute
):
# given
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.pk)
# when
response = execute_update_public_metadata_for_item(
staff_api_client, permission_manage_products, attribute_id, "Attribute"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], color_attribute, attribute_id
)
def test_add_public_metadata_for_category(
staff_api_client, permission_manage_products, category
):
# given
category_id = graphene.Node.to_global_id("Category", category.pk)
# when
response = execute_update_public_metadata_for_item(
staff_api_client, permission_manage_products, category_id, "Category"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], category, category_id
)
def test_add_public_metadata_for_collection(
staff_api_client, permission_manage_products, collection
):
# given
collection_id = graphene.Node.to_global_id("Collection", collection.pk)
# when
response = execute_update_public_metadata_for_item(
staff_api_client, permission_manage_products, collection_id, "Collection"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], collection, collection_id
)
def test_add_public_metadata_for_digital_content(
staff_api_client, permission_manage_products, digital_content
):
# given
digital_content_id = graphene.Node.to_global_id(
"DigitalContent", digital_content.pk
)
# when
response = execute_update_public_metadata_for_item(
staff_api_client,
permission_manage_products,
digital_content_id,
"DigitalContent",
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], digital_content, digital_content_id
)
def test_add_public_metadata_for_fulfillment(
staff_api_client, permission_manage_orders, fulfillment
):
# given
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.pk)
# when
response = execute_update_public_metadata_for_item(
staff_api_client, permission_manage_orders, fulfillment_id, "Fulfillment"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], fulfillment, fulfillment_id
)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_add_public_metadata_for_product(
updated_webhook_mock, staff_api_client, permission_manage_products, product
):
# given
product_id = graphene.Node.to_global_id("Product", product.pk)
# when
response = execute_update_public_metadata_for_item(
staff_api_client, permission_manage_products, product_id, "Product"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], product, product_id
)
updated_webhook_mock.assert_called_once_with(product)
def test_add_public_metadata_for_product_type(
staff_api_client, permission_manage_products, product_type
):
# given
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
# when
response = execute_update_public_metadata_for_item(
staff_api_client, permission_manage_products, product_type_id, "ProductType"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], product_type, product_type_id
)
def test_add_public_metadata_for_product_variant(
staff_api_client, permission_manage_products, variant
):
# given
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
# when
response = execute_update_public_metadata_for_item(
staff_api_client, permission_manage_products, variant_id, "ProductVariant",
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], variant, variant_id
)
def test_add_public_metadata_for_app(staff_api_client, permission_manage_apps, app):
# given
app_id = graphene.Node.to_global_id("App", app.pk)
# when
response = execute_update_public_metadata_for_item(
staff_api_client, permission_manage_apps, app_id, "App",
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"], app, app_id
)
def test_update_public_metadata_for_item(api_client, checkout):
# given
checkout.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
checkout.save(update_fields=["metadata"])
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
# when
response = execute_update_public_metadata_for_item(
api_client, None, checkout_id, "Checkout", value="NewMetaValue"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["updateMetadata"]["item"],
checkout,
checkout_id,
value="NewMetaValue",
)
def test_update_public_metadata_for_non_exist_item(api_client):
# given
checkout_id = "Checkout:" + str(uuid.uuid4())
checkout_id = base64.b64encode(str.encode(checkout_id)).decode("utf-8")
# when
response = execute_update_public_metadata_for_item(
api_client, None, checkout_id, "Checkout"
)
# then
errors = response["data"]["updateMetadata"]["metadataErrors"]
assert errors[0]["field"] == "id"
assert errors[0]["code"] == MetadataErrorCode.NOT_FOUND.name
def test_update_public_metadata_for_item_without_meta(api_client, address):
# given
assert not issubclass(type(address), ModelWithMetadata)
address_id = graphene.Node.to_global_id("Address", address.pk)
# when
# We use "User" type inside mutation for valid graphql query with fragment
# without this we are not able to reuse UPDATE_PUBLIC_METADATA_MUTATION
response = execute_update_public_metadata_for_item(
api_client, None, address_id, "User"
)
# then
errors = response["data"]["updateMetadata"]["metadataErrors"]
assert errors[0]["field"] == "id"
assert errors[0]["code"] == MetadataErrorCode.NOT_FOUND.name
DELETE_PUBLIC_METADATA_MUTATION = """
mutation DeletePublicMetadata($id: ID!, $keys: [String!]!) {
deleteMetadata(
id: $id
keys: $keys
) {
metadataErrors{
field
code
}
item {
metadata{
key
value
}
...on %s{
id
}
}
}
}
"""
def execute_clear_public_metadata_for_item(
client, permissions, item_id, item_type, key=PUBLIC_KEY,
):
variables = {
"id": item_id,
"keys": [key],
}
response = client.post_graphql(
DELETE_PUBLIC_METADATA_MUTATION % item_type,
variables,
permissions=[permissions] if permissions else None,
)
response = get_graphql_content(response)
return response
def execute_clear_public_metadata_for_multiple_items(
client, permissions, item_id, item_type, key=PUBLIC_KEY, key2=PUBLIC_KEY2
):
variables = {
"id": item_id,
"keys": [key, key2],
}
response = client.post_graphql(
DELETE_PUBLIC_METADATA_MUTATION % item_type,
variables,
permissions=[permissions] if permissions else None,
)
response = get_graphql_content(response)
return response
def item_without_public_metadata(
item_from_response, item, item_id, key=PUBLIC_KEY, value=PUBLIC_VALUE,
):
if item_from_response["id"] != item_id:
return False
item.refresh_from_db()
return item.get_value_from_metadata(key) != value
def item_without_multiple_public_metadata(
item_from_response,
item,
item_id,
key=PUBLIC_KEY,
value=PUBLIC_VALUE,
key2=PUBLIC_KEY2,
value2=PUBLIC_VALUE2,
):
if item_from_response["id"] != item_id:
return False
item.refresh_from_db()
return all(
[
item.get_value_from_metadata(key) != value,
item.get_value_from_metadata(key2) != value2,
]
)
def test_delete_public_metadata_for_customer_as_staff(
staff_api_client, permission_manage_users, customer_user
):
# given
customer_user.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
customer_user.save(update_fields=["metadata"])
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
# when
response = execute_clear_public_metadata_for_item(
staff_api_client, permission_manage_users, customer_id, "User"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], customer_user, customer_id
)
def test_delete_public_metadata_for_customer_as_app(
app_api_client, permission_manage_users, customer_user
):
# given
customer_user.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
customer_user.save(update_fields=["metadata"])
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
# when
response = execute_clear_public_metadata_for_item(
app_api_client, permission_manage_users, customer_id, "User"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], customer_user, customer_id
)
def test_delete_multiple_public_metadata_for_customer_as_app(
app_api_client, permission_manage_users, customer_user
):
# given
customer_user.store_value_in_metadata(
{PUBLIC_KEY: PUBLIC_VALUE, PUBLIC_KEY2: PUBLIC_VALUE2}
)
customer_user.save(update_fields=["metadata"])
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
# when
response = execute_clear_public_metadata_for_multiple_items(
app_api_client, permission_manage_users, customer_id, "User"
)
# then
assert item_without_multiple_public_metadata(
response["data"]["deleteMetadata"]["item"], customer_user, customer_id
)
def test_delete_public_metadata_for_other_staff_as_staff(
staff_api_client, permission_manage_staff, admin_user
):
# given
assert admin_user.pk != staff_api_client.user.pk
admin_user.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
admin_user.save(update_fields=["metadata"])
admin_id = graphene.Node.to_global_id("User", admin_user.pk)
# when
response = execute_clear_public_metadata_for_item(
staff_api_client, permission_manage_staff, admin_id, "User"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], admin_user, admin_id
)
def test_delete_public_metadata_for_staff_as_app_no_permission(
app_api_client, permission_manage_staff, admin_user
):
# given
admin_user.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
admin_user.save(update_fields=["metadata"])
admin_id = graphene.Node.to_global_id("User", admin_user.pk)
variables = {
"id": admin_id,
"keys": [PRIVATE_KEY],
}
# when
response = app_api_client.post_graphql(
DELETE_PRIVATE_METADATA_MUTATION % "User",
variables,
permissions=[permission_manage_staff],
)
# then
assert_no_permission(response)
def test_delete_public_metadata_for_myself_as_customer(user_api_client):
# given
customer = user_api_client.user
customer.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
customer.save(update_fields=["metadata"])
customer_id = graphene.Node.to_global_id("User", customer.pk)
# when
response = execute_clear_public_metadata_for_item(
user_api_client, None, customer_id, "User"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], customer, customer_id
)
def test_delete_public_metadata_for_myself_as_staff(staff_api_client):
# given
staff = staff_api_client.user
staff.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
staff.save(update_fields=["metadata"])
staff_id = graphene.Node.to_global_id("User", staff.pk)
# when
response = execute_clear_public_metadata_for_item(
staff_api_client, None, staff_id, "User"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], staff, staff_id
)
def test_delete_public_metadata_for_checkout(api_client, checkout):
# given
checkout.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
checkout.save(update_fields=["metadata"])
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
# when
response = execute_clear_public_metadata_for_item(
api_client, None, checkout_id, "Checkout"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], checkout, checkout_id
)
def test_delete_public_metadata_for_order(api_client, order):
# given
order.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
order.save(update_fields=["metadata"])
order_id = graphene.Node.to_global_id("Order", order.pk)
# when
response = execute_clear_public_metadata_for_item(
api_client, None, order_id, "Order"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], order, order_id
)
def test_delete_public_metadata_for_draft_order(api_client, draft_order):
# given
draft_order.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
draft_order.save(update_fields=["metadata"])
draft_order_id = graphene.Node.to_global_id("Order", draft_order.pk)
# when
response = execute_clear_public_metadata_for_item(
api_client, None, draft_order_id, "Order"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], draft_order, draft_order_id
)
def test_delete_public_metadata_for_attribute(
staff_api_client, permission_manage_products, color_attribute
):
# given
color_attribute.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
color_attribute.save(update_fields=["metadata"])
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.pk)
# when
response = execute_clear_public_metadata_for_item(
staff_api_client, permission_manage_products, attribute_id, "Attribute"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], color_attribute, attribute_id
)
def test_delete_public_metadata_for_category(
staff_api_client, permission_manage_products, category
):
# given
category.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
category.save(update_fields=["metadata"])
category_id = graphene.Node.to_global_id("Category", category.pk)
# when
response = execute_clear_public_metadata_for_item(
staff_api_client, permission_manage_products, category_id, "Category"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], category, category_id
)
def test_delete_public_metadata_for_collection(
staff_api_client, permission_manage_products, collection
):
# given
collection.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
collection.save(update_fields=["metadata"])
collection_id = graphene.Node.to_global_id("Collection", collection.pk)
# when
response = execute_clear_public_metadata_for_item(
staff_api_client, permission_manage_products, collection_id, "Collection"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], collection, collection_id
)
def test_delete_public_metadata_for_digital_content(
staff_api_client, permission_manage_products, digital_content
):
# given
digital_content.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
digital_content.save(update_fields=["metadata"])
digital_content_id = graphene.Node.to_global_id(
"DigitalContent", digital_content.pk
)
# when
response = execute_clear_public_metadata_for_item(
staff_api_client,
permission_manage_products,
digital_content_id,
"DigitalContent",
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], digital_content, digital_content_id
)
def test_delete_public_metadata_for_fulfillment(
staff_api_client, permission_manage_orders, fulfillment
):
# given
fulfillment.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
fulfillment.save(update_fields=["metadata"])
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.pk)
# when
response = execute_clear_public_metadata_for_item(
staff_api_client, permission_manage_orders, fulfillment_id, "Fulfillment"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], fulfillment, fulfillment_id
)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_delete_public_metadata_for_product(
updated_webhook_mock, staff_api_client, permission_manage_products, product
):
# given
product.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
product.save(update_fields=["metadata"])
product_id = graphene.Node.to_global_id("Product", product.pk)
# when
response = execute_clear_public_metadata_for_item(
staff_api_client, permission_manage_products, product_id, "Product"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], product, product_id
)
updated_webhook_mock.assert_called_once_with(product)
def test_delete_public_metadata_for_product_type(
staff_api_client, permission_manage_products, product_type
):
# given
product_type.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
product_type.save(update_fields=["metadata"])
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
# when
response = execute_clear_public_metadata_for_item(
staff_api_client, permission_manage_products, product_type_id, "ProductType"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], product_type, product_type_id
)
def test_delete_public_metadata_for_product_variant(
staff_api_client, permission_manage_products, variant
):
# given
variant.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
variant.save(update_fields=["metadata"])
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
# when
response = execute_clear_public_metadata_for_item(
staff_api_client, permission_manage_products, variant_id, "ProductVariant"
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], variant, variant_id
)
def test_delete_public_metadata_for_app(staff_api_client, permission_manage_apps, app):
# given
app_id = graphene.Node.to_global_id("App", app.pk)
# when
response = execute_clear_public_metadata_for_item(
staff_api_client, permission_manage_apps, app_id, "App",
)
# then
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"], app, app_id
)
def test_delete_public_metadata_for_non_exist_item(api_client):
# given
checkout_id = "Checkout:" + str(uuid.uuid4())
checkout_id = base64.b64encode(str.encode(checkout_id)).decode("utf-8")
# when
response = execute_clear_public_metadata_for_item(
api_client, None, checkout_id, "Checkout"
)
# then
errors = response["data"]["deleteMetadata"]["metadataErrors"]
assert errors[0]["field"] == "id"
assert errors[0]["code"] == MetadataErrorCode.NOT_FOUND.name
def test_delete_public_metadata_for_item_without_meta(api_client, address):
# given
assert not issubclass(type(address), ModelWithMetadata)
address_id = graphene.Node.to_global_id("Address", address.pk)
# when
# We use "User" type inside mutation for valid graphql query with fragment
# without this we are not able to reuse DELETE_PUBLIC_METADATA_MUTATION
response = execute_clear_public_metadata_for_item(
api_client, None, address_id, "User"
)
# then
errors = response["data"]["deleteMetadata"]["metadataErrors"]
assert errors[0]["field"] == "id"
assert errors[0]["code"] == MetadataErrorCode.NOT_FOUND.name
def test_delete_public_metadata_for_not_exist_key(api_client, checkout):
# given
checkout.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})
checkout.save(update_fields=["metadata"])
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
# when
response = execute_clear_public_metadata_for_item(
api_client, None, checkout_id, "Checkout", key="Not-exits"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["deleteMetadata"]["item"], checkout, checkout_id
)
def test_delete_public_metadata_for_one_key(api_client, checkout):
# given
checkout.store_value_in_metadata(
{PUBLIC_KEY: PUBLIC_VALUE, "to_clear": PUBLIC_VALUE},
)
checkout.save(update_fields=["metadata"])
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
# when
response = execute_clear_public_metadata_for_item(
api_client, None, checkout_id, "Checkout", key="to_clear"
)
# then
assert item_contains_proper_public_metadata(
response["data"]["deleteMetadata"]["item"], checkout, checkout_id
)
assert item_without_public_metadata(
response["data"]["deleteMetadata"]["item"],
checkout,
checkout_id,
key="to_clear",
)
UPDATE_PRIVATE_METADATA_MUTATION = """
mutation UpdatePrivateMetadata($id: ID!, $input: [MetadataInput!]!) {
updatePrivateMetadata(
id: $id
input: $input
) {
metadataErrors{
field
code
}
item {
privateMetadata{
key
value
}
...on %s{
id
}
}
}
}
"""
def execute_update_private_metadata_for_item(
client, permissions, item_id, item_type, key=PRIVATE_KEY, value=PRIVATE_VALUE,
):
variables = {
"id": item_id,
"input": [{"key": key, "value": value}],
}
response = client.post_graphql(
UPDATE_PRIVATE_METADATA_MUTATION % item_type,
variables,
permissions=[permissions] if permissions else None,
)
response = get_graphql_content(response)
return response
def execute_update_private_metadata_for_multiple_items(
client,
permissions,
item_id,
item_type,
key=PUBLIC_KEY,
value=PUBLIC_VALUE,
key2=PUBLIC_KEY2,
value2=PUBLIC_VALUE2,
):
variables = {
"id": item_id,
"input": [{"key": key, "value": value}, {"key": key2, "value": value2}],
}
response = client.post_graphql(
UPDATE_PRIVATE_METADATA_MUTATION % item_type,
variables,
permissions=[permissions] if permissions else None,
)
response = get_graphql_content(response)
return response
def item_contains_proper_private_metadata(
item_from_response, item, item_id, key=PRIVATE_KEY, value=PRIVATE_VALUE,
):
if item_from_response["id"] != item_id:
return False
item.refresh_from_db()
return item.get_value_from_private_metadata(key) == value
def item_contains_multiple_proper_private_metadata(
item_from_response,
item,
item_id,
key=PUBLIC_KEY,
value=PUBLIC_VALUE,
key2=PUBLIC_KEY2,
value2=PUBLIC_VALUE2,
):
if item_from_response["id"] != item_id:
return False
item.refresh_from_db()
return all(
[
item.get_value_from_private_metadata(key) == value,
item.get_value_from_private_metadata(key2) == value2,
]
)
def test_add_private_metadata_for_customer_as_staff(
staff_api_client, permission_manage_users, customer_user
):
# given
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_users, customer_id, "User"
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], customer_user, customer_id
)
def test_add_private_metadata_for_customer_as_app(
app_api_client, permission_manage_users, customer_user
):
# given
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
# when
response = execute_update_private_metadata_for_item(
app_api_client, permission_manage_users, customer_id, "User"
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], customer_user, customer_id
)
def test_add_multiple_private_metadata_for_customer_as_app(
app_api_client, permission_manage_users, customer_user
):
# given
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
# when
response = execute_update_private_metadata_for_multiple_items(
app_api_client, permission_manage_users, customer_id, "User"
)
# then
assert item_contains_multiple_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], customer_user, customer_id
)
def test_add_private_metadata_for_other_staff_as_staff(
staff_api_client, permission_manage_staff, admin_user
):
# given
assert admin_user.pk != staff_api_client.user.pk
admin_id = graphene.Node.to_global_id("User", admin_user.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_staff, admin_id, "User"
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], admin_user, admin_id
)
def test_add_private_metadata_for_staff_as_app_no_permission(
app_api_client, permission_manage_staff, admin_user
):
# given
admin_id = graphene.Node.to_global_id("User", admin_user.pk)
variables = {
"id": admin_id,
"input": [{"key": PRIVATE_KEY, "value": PRIVATE_VALUE}],
}
# when
response = app_api_client.post_graphql(
UPDATE_PRIVATE_METADATA_MUTATION % "User",
variables,
permissions=[permission_manage_staff],
)
# then
assert_no_permission(response)
def test_add_private_metadata_for_myself_as_customer_no_permission(user_api_client):
# given
customer = user_api_client.user
variables = {
"id": graphene.Node.to_global_id("User", customer.pk),
"input": [{"key": PRIVATE_KEY, "value": PRIVATE_VALUE}],
}
# when
response = user_api_client.post_graphql(
UPDATE_PRIVATE_METADATA_MUTATION % "User", variables, permissions=[],
)
# then
assert_no_permission(response)
@pytest.mark.parametrize(
"input", [{"key": " ", "value": "test"}, {"key": " ", "value": ""}],
)
def test_staff_update_private_metadata_empty_key(
input, staff_api_client, permission_manage_staff, admin_user
):
# given
admin_id = graphene.Node.to_global_id("User", admin_user.pk)
# when
response = response = execute_update_private_metadata_for_item(
staff_api_client,
permission_manage_staff,
admin_id,
"User",
input["key"],
input["value"],
)
# then
data = response["data"]["updatePrivateMetadata"]
errors = data["metadataErrors"]
assert not data["item"]
assert len(errors) == 1
assert errors[0]["code"] == MetadataErrorCode.REQUIRED.name
assert errors[0]["field"] == "input"
def test_add_private_metadata_for_myself_as_staff(staff_api_client):
# given
staff = staff_api_client.user
variables = {
"id": graphene.Node.to_global_id("User", staff.pk),
"input": [{"key": PRIVATE_KEY, "value": PRIVATE_VALUE}],
}
# when
response = staff_api_client.post_graphql(
UPDATE_PRIVATE_METADATA_MUTATION % "User", variables, permissions=[],
)
# then
assert_no_permission(response)
def test_add_private_metadata_for_checkout(
staff_api_client, checkout, permission_manage_checkouts
):
# given
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_checkouts, checkout_id, "Checkout"
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], checkout, checkout_id
)
def test_add_private_metadata_for_order(
staff_api_client, order, permission_manage_orders
):
# given
order_id = graphene.Node.to_global_id("Order", order.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_orders, order_id, "Order"
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], order, order_id
)
def test_add_private_metadata_for_draft_order(
staff_api_client, draft_order, permission_manage_orders
):
# given
draft_order_id = graphene.Node.to_global_id("Order", draft_order.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_orders, draft_order_id, "Order"
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], draft_order, draft_order_id
)
def test_add_private_metadata_for_attribute(
staff_api_client, permission_manage_products, color_attribute
):
# given
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_products, attribute_id, "Attribute"
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], color_attribute, attribute_id
)
def test_add_private_metadata_for_category(
staff_api_client, permission_manage_products, category
):
# given
category_id = graphene.Node.to_global_id("Category", category.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_products, category_id, "Category"
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], category, category_id
)
def test_add_private_metadata_for_collection(
staff_api_client, permission_manage_products, collection
):
# given
collection_id = graphene.Node.to_global_id("Collection", collection.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_products, collection_id, "Collection"
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], collection, collection_id
)
def test_add_private_metadata_for_digital_content(
staff_api_client, permission_manage_products, digital_content
):
# given
digital_content_id = graphene.Node.to_global_id(
"DigitalContent", digital_content.pk
)
# when
response = execute_update_private_metadata_for_item(
staff_api_client,
permission_manage_products,
digital_content_id,
"DigitalContent",
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"],
digital_content,
digital_content_id,
)
def test_add_private_metadata_for_fulfillment(
staff_api_client, permission_manage_orders, fulfillment
):
# given
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_orders, fulfillment_id, "Fulfillment"
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], fulfillment, fulfillment_id
)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_add_private_metadata_for_product(
updated_webhook_mock, staff_api_client, permission_manage_products, product
):
# given
product_id = graphene.Node.to_global_id("Product", product.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_products, product_id, "Product"
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], product, product_id
)
updated_webhook_mock.assert_called_once_with(product)
def test_add_private_metadata_for_product_type(
staff_api_client, permission_manage_products, product_type
):
# given
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_products, product_type_id, "ProductType"
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], product_type, product_type_id
)
def test_add_private_metadata_for_product_variant(
staff_api_client, permission_manage_products, variant
):
# given
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_products, variant_id, "ProductVariant",
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], variant, variant_id
)
def test_add_private_metadata_for_app(staff_api_client, permission_manage_apps, app):
# given
app_id = graphene.Node.to_global_id("App", app.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_apps, app_id, "App",
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"], app, app_id,
)
def test_update_private_metadata_for_item(
staff_api_client, checkout, permission_manage_checkouts
):
# given
checkout.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_KEY})
checkout.save(update_fields=["private_metadata"])
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
# when
response = execute_update_private_metadata_for_item(
staff_api_client,
permission_manage_checkouts,
checkout_id,
"Checkout",
value="NewMetaValue",
)
# then
assert item_contains_proper_private_metadata(
response["data"]["updatePrivateMetadata"]["item"],
checkout,
checkout_id,
value="NewMetaValue",
)
def test_update_private_metadata_for_non_exist_item(
staff_api_client, permission_manage_checkouts
):
# given
checkout_id = "Checkout:" + str(uuid.uuid4())
checkout_id = base64.b64encode(str.encode(checkout_id)).decode("utf-8")
# when
response = execute_update_private_metadata_for_item(
staff_api_client, permission_manage_checkouts, checkout_id, "Checkout"
)
# then
errors = response["data"]["updatePrivateMetadata"]["metadataErrors"]
assert errors[0]["field"] == "id"
assert errors[0]["code"] == MetadataErrorCode.NOT_FOUND.name
def test_update_private_metadata_for_item_without_meta(api_client, address):
# given
assert not issubclass(type(address), ModelWithMetadata)
address_id = graphene.Node.to_global_id("Address", address.pk)
# when
# We use "User" type inside mutation for valid graphql query with fragment
# without this we are not able to reuse UPDATE_PRIVATE_METADATA_MUTATION
response = execute_update_private_metadata_for_item(
api_client, None, address_id, "User"
)
# then
errors = response["data"]["updatePrivateMetadata"]["metadataErrors"]
assert errors[0]["field"] == "id"
assert errors[0]["code"] == MetadataErrorCode.NOT_FOUND.name
DELETE_PRIVATE_METADATA_MUTATION = """
mutation DeletePrivateMetadata($id: ID!, $keys: [String!]!) {
deletePrivateMetadata(
id: $id
keys: $keys
) {
metadataErrors{
field
code
}
item {
privateMetadata{
key
value
}
...on %s{
id
}
}
}
}
"""
def execute_clear_private_metadata_for_item(
client, permissions, item_id, item_type, key=PRIVATE_KEY,
):
variables = {
"id": item_id,
"keys": [key],
}
response = client.post_graphql(
DELETE_PRIVATE_METADATA_MUTATION % item_type,
variables,
permissions=[permissions] if permissions else None,
)
response = get_graphql_content(response)
return response
def execute_clear_private_metadata_for_multiple_items(
client, permissions, item_id, item_type, key=PUBLIC_KEY, key2=PUBLIC_KEY2
):
variables = {
"id": item_id,
"keys": [key, key2],
}
response = client.post_graphql(
DELETE_PUBLIC_METADATA_MUTATION % item_type,
variables,
permissions=[permissions] if permissions else None,
)
response = get_graphql_content(response)
return response
def item_without_private_metadata(
item_from_response, item, item_id, key=PRIVATE_KEY, value=PRIVATE_VALUE,
):
if item_from_response["id"] != item_id:
return False
item.refresh_from_db()
return item.get_value_from_private_metadata(key) != value
def item_without_multiple_private_metadata(
item_from_response,
item,
item_id,
key=PUBLIC_KEY,
value=PUBLIC_VALUE,
key2=PUBLIC_KEY2,
value2=PUBLIC_VALUE2,
):
if item_from_response["id"] != item_id:
return False
item.refresh_from_db()
return all(
[
item.get_value_from_private_metadata(key) != value,
item.get_value_from_private_metadata(key2) != value2,
]
)
def test_delete_private_metadata_for_customer_as_staff(
staff_api_client, permission_manage_users, customer_user
):
# given
customer_user.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
customer_user.save(update_fields=["private_metadata"])
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_users, customer_id, "User"
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], customer_user, customer_id
)
def test_delete_private_metadata_for_customer_as_app(
app_api_client, permission_manage_users, customer_user
):
# given
customer_user.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
customer_user.save(update_fields=["private_metadata"])
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
# when
response = execute_clear_private_metadata_for_item(
app_api_client, permission_manage_users, customer_id, "User"
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], customer_user, customer_id
)
def test_delete_multiple_private_metadata_for_customer_as_app(
app_api_client, permission_manage_users, customer_user
):
# given
customer_user.store_value_in_metadata(
{PUBLIC_KEY: PUBLIC_VALUE, PUBLIC_KEY2: PUBLIC_VALUE2}
)
customer_user.save(update_fields=["metadata"])
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
# when
response = execute_clear_private_metadata_for_multiple_items(
app_api_client, permission_manage_users, customer_id, "User"
)
# then
assert item_without_multiple_private_metadata(
response["data"]["deleteMetadata"]["item"], customer_user, customer_id
)
def test_delete_private_metadata_for_other_staff_as_staff(
staff_api_client, permission_manage_staff, admin_user
):
# given
assert admin_user.pk != staff_api_client.user.pk
admin_user.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
admin_user.save(update_fields=["private_metadata"])
admin_id = graphene.Node.to_global_id("User", admin_user.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_staff, admin_id, "User"
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], admin_user, admin_id
)
def test_delete_private_metadata_for_staff_as_app_no_permission(
app_api_client, permission_manage_staff, admin_user
):
# given
admin_user.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
admin_user.save(update_fields=["private_metadata"])
admin_id = graphene.Node.to_global_id("User", admin_user.pk)
variables = {
"id": admin_id,
"keys": [PRIVATE_KEY],
}
# when
response = app_api_client.post_graphql(
DELETE_PRIVATE_METADATA_MUTATION % "User",
variables,
permissions=[permission_manage_staff],
)
# then
assert_no_permission(response)
def test_delete_private_metadata_for_myself_as_customer_no_permission(user_api_client):
# given
customer = user_api_client.user
customer.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
customer.save(update_fields=["private_metadata"])
variables = {
"id": graphene.Node.to_global_id("User", customer.pk),
"keys": [PRIVATE_KEY],
}
# when
response = user_api_client.post_graphql(
DELETE_PRIVATE_METADATA_MUTATION % "User", variables, permissions=[]
)
# then
assert_no_permission(response)
def test_delete_private_metadata_for_myself_as_staff_no_permission(
staff_api_client, permission_manage_users
):
# given
staff = staff_api_client.user
staff.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
staff.save(update_fields=["private_metadata"])
variables = {
"id": graphene.Node.to_global_id("User", staff.pk),
"keys": [PRIVATE_KEY],
}
# when
response = staff_api_client.post_graphql(
DELETE_PRIVATE_METADATA_MUTATION % "User",
variables,
permissions=[permission_manage_users],
)
# then
assert_no_permission(response)
def test_delete_private_metadata_for_checkout(
staff_api_client, checkout, permission_manage_checkouts
):
# given
checkout.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
checkout.save(update_fields=["private_metadata"])
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_checkouts, checkout_id, "Checkout"
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], checkout, checkout_id
)
def test_delete_private_metadata_for_order(
staff_api_client, order, permission_manage_orders
):
# given
order.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
order.save(update_fields=["private_metadata"])
order_id = graphene.Node.to_global_id("Order", order.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_orders, order_id, "Order"
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], order, order_id
)
def test_delete_private_metadata_for_draft_order(
staff_api_client, draft_order, permission_manage_orders
):
# given
draft_order.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
draft_order.save(update_fields=["private_metadata"])
draft_order_id = graphene.Node.to_global_id("Order", draft_order.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_orders, draft_order_id, "Order"
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], draft_order, draft_order_id
)
def test_delete_private_metadata_for_attribute(
staff_api_client, permission_manage_products, color_attribute
):
# given
color_attribute.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
color_attribute.save(update_fields=["private_metadata"])
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_products, attribute_id, "Attribute"
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], color_attribute, attribute_id
)
def test_delete_private_metadata_for_category(
staff_api_client, permission_manage_products, category
):
# given
category.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
category.save(update_fields=["private_metadata"])
category_id = graphene.Node.to_global_id("Category", category.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_products, category_id, "Category"
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], category, category_id
)
def test_delete_private_metadata_for_collection(
staff_api_client, permission_manage_products, collection
):
# given
collection.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
collection.save(update_fields=["private_metadata"])
collection_id = graphene.Node.to_global_id("Collection", collection.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_products, collection_id, "Collection"
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], collection, collection_id
)
def test_delete_private_metadata_for_digital_content(
staff_api_client, permission_manage_products, digital_content
):
# given
digital_content.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
digital_content.save(update_fields=["private_metadata"])
digital_content_id = graphene.Node.to_global_id(
"DigitalContent", digital_content.pk
)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client,
permission_manage_products,
digital_content_id,
"DigitalContent",
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"],
digital_content,
digital_content_id,
)
def test_delete_private_metadata_for_fulfillment(
staff_api_client, permission_manage_orders, fulfillment
):
# given
fulfillment.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
fulfillment.save(update_fields=["private_metadata"])
fulfillment_id = graphene.Node.to_global_id("Fulfillment", fulfillment.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_orders, fulfillment_id, "Fulfillment"
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], fulfillment, fulfillment_id
)
@patch("saleor.plugins.manager.PluginsManager.product_updated")
def test_delete_private_metadata_for_product(
updated_webhook_mock, staff_api_client, permission_manage_products, product
):
# given
product.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
product.save(update_fields=["private_metadata"])
product_id = graphene.Node.to_global_id("Product", product.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_products, product_id, "Product"
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], product, product_id
)
updated_webhook_mock.assert_called_once_with(product)
def test_delete_private_metadata_for_product_type(
staff_api_client, permission_manage_products, product_type
):
# given
product_type.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
product_type.save(update_fields=["private_metadata"])
product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_products, product_type_id, "ProductType"
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], product_type, product_type_id
)
def test_delete_private_metadata_for_product_variant(
staff_api_client, permission_manage_products, variant
):
# given
variant.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
variant.save(update_fields=["private_metadata"])
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_products, variant_id, "ProductVariant"
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], variant, variant_id
)
def test_delete_private_metadata_for_app(staff_api_client, permission_manage_apps, app):
# given
app_id = graphene.Node.to_global_id("App", app.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_apps, app_id, "App",
)
# then
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], app, app_id,
)
def test_delete_private_metadata_for_non_exist_item(
staff_api_client, permission_manage_checkouts
):
# given
checkout_id = "Checkout:" + str(uuid.uuid4())
checkout_id = base64.b64encode(str.encode(checkout_id)).decode("utf-8")
# when
response = execute_clear_private_metadata_for_item(
staff_api_client, permission_manage_checkouts, checkout_id, "Checkout"
)
# then
errors = response["data"]["deletePrivateMetadata"]["metadataErrors"]
assert errors[0]["field"] == "id"
assert errors[0]["code"] == MetadataErrorCode.NOT_FOUND.name
def test_delete_private_metadata_for_item_without_meta(api_client, address):
# given
assert not issubclass(type(address), ModelWithMetadata)
address_id = graphene.Node.to_global_id("Address", address.pk)
# when
# We use "User" type inside mutation for valid graphql query with fragment
# without this we are not able to reuse DELETE_PRIVATE_METADATA_MUTATION
response = execute_clear_private_metadata_for_item(
api_client, None, address_id, "User"
)
# then
errors = response["data"]["deletePrivateMetadata"]["metadataErrors"]
assert errors[0]["field"] == "id"
assert errors[0]["code"] == MetadataErrorCode.NOT_FOUND.name
def test_delete_private_metadata_for_not_exist_key(
staff_api_client, checkout, permission_manage_checkouts
):
# given
checkout.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE})
checkout.save(update_fields=["private_metadata"])
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client,
permission_manage_checkouts,
checkout_id,
"Checkout",
key="Not-exits",
)
# then
assert item_contains_proper_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], checkout, checkout_id
)
def test_delete_private_metadata_for_one_key(
staff_api_client, checkout, permission_manage_checkouts
):
# given
checkout.store_value_in_private_metadata(
{PRIVATE_KEY: PRIVATE_VALUE, "to_clear": PRIVATE_VALUE},
)
checkout.save(update_fields=["private_metadata"])
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
# when
response = execute_clear_private_metadata_for_item(
staff_api_client,
permission_manage_checkouts,
checkout_id,
"Checkout",
key="to_clear",
)
# then
assert item_contains_proper_private_metadata(
response["data"]["deletePrivateMetadata"]["item"], checkout, checkout_id
)
assert item_without_private_metadata(
response["data"]["deletePrivateMetadata"]["item"],
checkout,
checkout_id,
key="to_clear",
)
| 29.148423 | 88 | 0.71442 |
6b34d713b3cdcce8cf4a26da087bc5c9d1ffdcab | 852 | py | Python | examples/get_windows.py | avivazran/UnrealEnginePython | 758ad1e5b3a871442d00bdc3144e246fa443098f | [
"MIT"
] | 2,350 | 2016-08-08T17:00:16.000Z | 2022-03-31T22:37:15.000Z | examples/get_windows.py | avivazran/UnrealEnginePython | 758ad1e5b3a871442d00bdc3144e246fa443098f | [
"MIT"
] | 820 | 2016-08-08T16:35:26.000Z | 2022-03-24T05:09:51.000Z | examples/get_windows.py | avivazran/UnrealEnginePython | 758ad1e5b3a871442d00bdc3144e246fa443098f | [
"MIT"
] | 658 | 2016-08-10T16:26:24.000Z | 2022-03-30T02:42:22.000Z | import unreal_engine as ue
from unreal_engine import FSlateApplication
from unreal_engine.classes import GameViewportClient
def iterate_window(window):
print(window.get_title())
for child in window.get_child_windows():
iterate_window(child)
# get the active top level window
top_window = FSlateApplication.get_active_top_level_window()
iterate_window(top_window)
# get Game GameViewportClient
try:
iterate_window(ue.get_game_viewport_client().game_viewport_client_get_window())
except:
pass
# get PIE GameViewportClient
try:
iterate_window(ue.get_editor_pie_game_viewport_client().game_viewport_client_get_window())
except:
pass
# iterate all GameViewportClient uobject's
for game_viewport_client in ue.tobject_iterator(GameViewportClient):
iterate_window(game_viewport_client.game_viewport_client_get_window()) | 30.428571 | 94 | 0.821596 |
e48da0bfe0875770f4f4de0fcbacbb85e5325a25 | 14,934 | py | Python | venv/lib/python2.7/site-packages/ansible/modules/cloud/docker/docker_network.py | aburan28/ansible-devops-pipeline | 50aa801632ca0828c16faac55732f1e79085f932 | [
"Apache-2.0"
] | 1 | 2019-04-16T21:23:15.000Z | 2019-04-16T21:23:15.000Z | venv/lib/python2.7/site-packages/ansible/modules/cloud/docker/docker_network.py | aburan28/ansible-devops-pipeline | 50aa801632ca0828c16faac55732f1e79085f932 | [
"Apache-2.0"
] | 5 | 2020-02-26T20:10:50.000Z | 2021-09-23T23:23:18.000Z | venv/lib/python2.7/site-packages/ansible/modules/cloud/docker/docker_network.py | aburan28/ansible-devops-pipeline | 50aa801632ca0828c16faac55732f1e79085f932 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: docker_network
version_added: "2.2"
short_description: Manage Docker networks
description:
- Create/remove Docker networks and connect containers to them.
- Performs largely the same function as the "docker network" CLI subcommand.
options:
name:
description:
- Name of the network to operate on.
required: true
aliases:
- network_name
connected:
description:
- List of container names or container IDs to connect to a network.
aliases:
- containers
driver:
description:
- Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
default: bridge
driver_options:
description:
- Dictionary of network settings. Consult docker docs for valid options and values.
force:
description:
- With state I(absent) forces disconnecting all containers from the
network prior to deleting the network. With state I(present) will
disconnect all containers, delete the network and re-create the
network. This option is required if you have changed the IPAM or
driver options and want an existing network to be updated to use the
new options.
type: bool
default: 'no'
appends:
description:
- By default the connected list is canonical, meaning containers not on the list are removed from the network.
Use C(appends) to leave existing containers connected.
type: bool
default: 'no'
aliases:
- incremental
ipam_driver:
description:
- Specify an IPAM driver.
ipam_options:
description:
- Dictionary of IPAM options.
state:
description:
- I(absent) deletes the network. If a network has connected containers, it
cannot be deleted. Use the C(force) option to disconnect all containers
and delete the network.
- I(present) creates the network, if it does not already exist with the
specified parameters, and connects the list of containers provided via
the connected parameter. Containers not on the list will be disconnected.
An empty list will leave no containers connected to the network. Use the
C(appends) option to leave existing containers connected. Use the C(force)
options to force re-creation of the network.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- docker
author:
- "Ben Keith (@keitwb)"
- "Chris Houseknecht (@chouseknecht)"
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Please note that the L(docker-py,https://pypi.org/project/docker-py/) Python
module has been superseded by L(docker,https://pypi.org/project/docker/)
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
install the C(docker) Python module. Note that both modules should I(not)
be installed at the same time. Also note that when both modules are installed
and one of them is uninstalled, the other might no longer function and a
reinstall of it is required."
- "The docker server >= 1.9.0"
'''
EXAMPLES = '''
- name: Create a network
docker_network:
name: network_one
- name: Remove all but selected list of containers
docker_network:
name: network_one
connected:
- container_a
- container_b
- container_c
- name: Remove a single container
docker_network:
name: network_one
connected: "{{ fulllist|difference(['container_a']) }}"
- name: Add a container to a network, leaving existing containers connected
docker_network:
name: network_one
connected:
- container_a
appends: yes
- name: Create a network with options
docker_network:
name: network_two
driver_options:
com.docker.network.bridge.name: net2
ipam_options:
subnet: '172.3.26.0/16'
gateway: 172.3.26.1
iprange: '192.168.1.0/24'
- name: Delete a network, disconnecting all containers
docker_network:
name: network_one
state: absent
force: yes
'''
RETURN = '''
facts:
description: Network inspection results for the affected network.
returned: success
type: dict
sample: {}
'''
from ansible.module_utils.docker_common import AnsibleDockerClient, DockerBaseClass, HAS_DOCKER_PY_2, HAS_DOCKER_PY_3
try:
from docker import utils
from docker.errors import NotFound
if HAS_DOCKER_PY_2 or HAS_DOCKER_PY_3:
from docker.types import IPAMPool, IPAMConfig
except Exception as dummy:
# missing docker-py handled in ansible.module_utils.docker_common
pass
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.network_name = None
self.connected = None
self.driver = None
self.driver_options = None
self.ipam_driver = None
self.ipam_options = None
self.appends = None
self.force = None
self.debug = None
for key, value in client.module.params.items():
setattr(self, key, value)
def container_names_in_network(network):
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
def get_driver_options(driver_options):
result = dict()
if driver_options is not None:
for k, v in driver_options.items():
# Go doesn't like 'True' or 'False'
if v is True:
v = 'true'
elif v is False:
v = 'false'
else:
v = str(v)
result[str(k)] = v
return result
class DockerNetworkManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {
u'changed': False,
u'actions': []
}
self.diff = self.client.module._diff
self.existing_network = self.get_existing_network()
if not self.parameters.connected and self.existing_network:
self.parameters.connected = container_names_in_network(self.existing_network)
if self.parameters.driver_options:
self.parameters.driver_options = get_driver_options(self.parameters.driver_options)
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
def get_existing_network(self):
try:
return self.client.inspect_network(self.parameters.network_name)
except NotFound:
return None
def has_different_config(self, net):
'''
Evaluates an existing network and returns a tuple containing a boolean
indicating if the configuration is different and a list of differences.
:param net: the inspection output for an existing network
:return: (bool, list)
'''
different = False
differences = []
if self.parameters.driver and self.parameters.driver != net['Driver']:
different = True
differences.append('driver')
if self.parameters.driver_options:
if not net.get('Options'):
different = True
differences.append('driver_options')
else:
for key, value in self.parameters.driver_options.items():
if not (key in net['Options']) or value != net['Options'][key]:
different = True
differences.append('driver_options.%s' % key)
if self.parameters.ipam_driver:
if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
different = True
differences.append('ipam_driver')
if self.parameters.ipam_options:
if not net.get('IPAM') or not net['IPAM'].get('Config'):
different = True
differences.append('ipam_options')
else:
for key, value in self.parameters.ipam_options.items():
camelkey = None
if value is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
for net_key in net['IPAM']['Config'][0]:
if key == net_key.lower():
camelkey = net_key
break
if not camelkey:
# key not found
different = True
differences.append('ipam_options.%s' % key)
elif net['IPAM']['Config'][0].get(camelkey) != value:
# key has different value
different = True
differences.append('ipam_options.%s' % key)
return different, differences
def create_network(self):
if not self.existing_network:
ipam_pools = []
if (self.parameters.ipam_options['subnet'] or self.parameters.ipam_options['iprange'] or
self.parameters.ipam_options['gateway'] or self.parameters.ipam_options['aux_addresses']):
if HAS_DOCKER_PY_2 or HAS_DOCKER_PY_3:
ipam_pools.append(IPAMPool(**self.parameters.ipam_options))
else:
ipam_pools.append(utils.create_ipam_pool(**self.parameters.ipam_options))
if HAS_DOCKER_PY_2 or HAS_DOCKER_PY_3:
ipam_config = IPAMConfig(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools)
else:
ipam_config = utils.create_ipam_config(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools)
if not self.check_mode:
resp = self.client.create_network(self.parameters.network_name,
driver=self.parameters.driver,
options=self.parameters.driver_options,
ipam=ipam_config)
self.existing_network = self.client.inspect_network(resp['Id'])
self.results['actions'].append("Created network %s with driver %s" % (self.parameters.network_name, self.parameters.driver))
self.results['changed'] = True
def remove_network(self):
if self.existing_network:
self.disconnect_all_containers()
if not self.check_mode:
self.client.remove_network(self.parameters.network_name)
self.results['actions'].append("Removed network %s" % (self.parameters.network_name,))
self.results['changed'] = True
def is_container_connected(self, container_name):
return container_name in container_names_in_network(self.existing_network)
def connect_containers(self):
for name in self.parameters.connected:
if not self.is_container_connected(name):
if not self.check_mode:
self.client.connect_container_to_network(name, self.parameters.network_name)
self.results['actions'].append("Connected container %s" % (name,))
self.results['changed'] = True
def disconnect_missing(self):
if not self.existing_network:
return
containers = self.existing_network['Containers']
if not containers:
return
for c in containers.values():
name = c['Name']
if name not in self.parameters.connected:
self.disconnect_container(name)
def disconnect_all_containers(self):
containers = self.client.inspect_network(self.parameters.network_name)['Containers']
if not containers:
return
for cont in containers.values():
self.disconnect_container(cont['Name'])
def disconnect_container(self, container_name):
if not self.check_mode:
self.client.disconnect_container_from_network(container_name, self.parameters.network_name)
self.results['actions'].append("Disconnected container %s" % (container_name,))
self.results['changed'] = True
def present(self):
different = False
differences = []
if self.existing_network:
different, differences = self.has_different_config(self.existing_network)
if self.parameters.force or different:
self.remove_network()
self.existing_network = None
self.create_network()
self.connect_containers()
if not self.parameters.appends:
self.disconnect_missing()
if self.diff or self.check_mode or self.parameters.debug:
self.results['diff'] = differences
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
self.results['ansible_facts'] = {u'docker_network': self.get_existing_network()}
def absent(self):
self.remove_network()
def main():
argument_spec = dict(
network_name=dict(type='str', required=True, aliases=['name']),
connected=dict(type='list', default=[], aliases=['containers'], elements='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='bridge'),
driver_options=dict(type='dict', default={}),
force=dict(type='bool', default=False),
appends=dict(type='bool', default=False, aliases=['incremental']),
ipam_driver=dict(type='str'),
ipam_options=dict(type='dict', default={}, options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
)),
debug=dict(type='bool', default=False)
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True
# "The docker server >= 1.9.0"
)
cm = DockerNetworkManager(client)
client.module.exit_json(**cm.results)
if __name__ == '__main__':
main()
| 35.472684 | 136 | 0.619593 |
41cecde9e9b393fc65673f61c224bd13aecad011 | 682 | py | Python | halogen/halogencli.py | AUCR/halogen | 33a26035fbd0a4a7f5d4ace482bb5a20cea9e5c4 | [
"MIT"
] | null | null | null | halogen/halogencli.py | AUCR/halogen | 33a26035fbd0a4a7f5d4ace482bb5a20cea9e5c4 | [
"MIT"
] | null | null | null | halogen/halogencli.py | AUCR/halogen | 33a26035fbd0a4a7f5d4ace482bb5a20cea9e5c4 | [
"MIT"
] | null | null | null | # coding=utf-8
""" The mfbot Python3 CLI script """
from halogen.mfbot import MFBot
def main() -> None:
""" Main function to start things up for the command line use of mfbot """
mfbot = MFBot()
mfbot.parse_args()
if mfbot.dir:
yara_rule_output = mfbot.dir_run()
if len(yara_rule_output) > 0:
mfbot.print_yara_rule(yara_rule_output)
else:
print("No images found within that directory")
else:
yara_rule_output = mfbot.run()
if len(yara_rule_output) > 0:
mfbot.print_yara_rule(yara_rule_output)
else:
print('No image found.')
if __name__ == "__main__":
main()
| 26.230769 | 78 | 0.611437 |
8d6cfcf77de016ef536279f1958b9c620f91dcf6 | 1,465 | py | Python | CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Demo/ceilometer/ceilometer/alarm/storage/impl_log.py | ishtjot/susereumutep | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | [
"Apache-2.0"
] | 2 | 2018-11-07T20:52:53.000Z | 2019-10-20T15:57:01.000Z | CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Demo/ceilometer/ceilometer/alarm/storage/impl_log.py | ishtjot/susereumutep | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | [
"Apache-2.0"
] | 3 | 2021-12-14T20:57:54.000Z | 2022-01-21T23:50:36.000Z | CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Demo/ceilometer/ceilometer/alarm/storage/impl_log.py | ishtjot/susereumutep | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | [
"Apache-2.0"
] | 2 | 2018-11-16T04:20:06.000Z | 2019-03-28T23:49:13.000Z | #
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Simple logging storage backend.
"""
from ceilometer.alarm.storage import base
from ceilometer.openstack.common import log
LOG = log.getLogger(__name__)
class Connection(base.Connection):
"""Log the data."""
def upgrade(self):
pass
def clear(self):
pass
def get_alarms(self, name=None, user=None, state=None, meter=None,
project=None, enabled=None, alarm_id=None, pagination=None):
"""Yields a lists of alarms that match filters."""
return []
def create_alarm(self, alarm):
"""Create alarm."""
return alarm
def update_alarm(self, alarm):
"""Update alarm."""
return alarm
def delete_alarm(self, alarm_id):
"""Delete an alarm."""
| 29.3 | 80 | 0.660751 |
ae21ef01ecf50bd9ba0911ab0dbf1becf395e830 | 438 | py | Python | handover/migrations/0007_auto_20190603_0328.py | utkarsh3mehta/todo | 8b1b22a3b016658050572abf818beabac8658f12 | [
"MIT"
] | null | null | null | handover/migrations/0007_auto_20190603_0328.py | utkarsh3mehta/todo | 8b1b22a3b016658050572abf818beabac8658f12 | [
"MIT"
] | null | null | null | handover/migrations/0007_auto_20190603_0328.py | utkarsh3mehta/todo | 8b1b22a3b016658050572abf818beabac8658f12 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2019-06-03 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('handover', '0006_auto_20190603_0327'),
]
operations = [
migrations.AlterField(
model_name='user',
name='userlastname',
field=models.CharField(blank=True, db_column='userLastName', max_length=50, null=True),
),
]
| 23.052632 | 99 | 0.623288 |
c835e4b92306928ed8c23dbad5e97168e97aeee0 | 3,046 | py | Python | hlutils/tools/pyrep.py | huleiak47/python-hlutils | 2178bfaeff71081082051d229b6dfb8ad510fb6f | [
"Apache-2.0"
] | 2 | 2016-05-21T14:10:32.000Z | 2019-02-20T22:15:14.000Z | hlutils/tools/pyrep.py | huleiak47/python-hlutils | 2178bfaeff71081082051d229b6dfb8ad510fb6f | [
"Apache-2.0"
] | null | null | null | hlutils/tools/pyrep.py | huleiak47/python-hlutils | 2178bfaeff71081082051d229b6dfb8ad510fb6f | [
"Apache-2.0"
] | 1 | 2019-02-20T08:44:18.000Z | 2019-02-20T08:44:18.000Z | #!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
Replace str from file or stdin
'''
import os
import sys
import argparse
import re
import locale
SYSENC = locale.getdefaultlocale()[1]
stdin = os.fdopen(sys.stdin.fileno(), 'rb')
stdout = os.fdopen(sys.stdout.fileno(), 'wb')
def parse_command():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--from', metavar='pattern', dest='pattern', action='store', help='pattern of string to be replaced')
parser.add_argument('-t', '--to', metavar='str', action='store', help='pattern of string to replace')
parser.add_argument('-e', '--encoding', action='store', default=SYSENC, help='encoding of file, default ' + SYSENC)
parser.add_argument('-i', '--inplace', action='store_true', help='change file, not print to stdout')
parser.add_argument('-u', '--backup', metavar='ext', action='store', help='backup file before change and set file extend name')
parser.add_argument('-I', '--ignorecase', action='store_true', help='ignore case')
parser.add_argument('-M', '--multiline', action='store_true', help='`^` and `$` match beginning and end of each line')
parser.add_argument('-D', '--dotall', action='store_true', help='Make the `.` special character match any character at all, including a newline; without this flag, `.` will match anything except a newline')
parser.add_argument('filename', nargs='*', help='files, if not specified, read strings from stdin')
ns = parser.parse_args(sys.argv[1:])
return ns
def do_replace(string, reobj, to_str):
if reobj is None or to_str is None:
return string
else:
return reobj.sub(to_str, string)
def main():
ns = parse_command()
reobj = None
if ns.pattern is not None:
ns.pattern = ns.pattern.encode(ns.encoding)
flags = 0
if ns.ignorecase:
flags |= re.IGNORECASE
if ns.multiline:
flags |= re.MULTILINE
if ns.dotall:
flags |= re.DOTALL
reobj = re.compile(ns.pattern, flags)
to_str = None
if ns.to is not None:
to_str = ns.to.encode(ns.encoding)
if ns.filename:
for fname in ns.filename:
with open(fname, 'rb') as f:
string = f.read()
retstr = do_replace(string, reobj, to_str)
changed = (retstr != string)
if ns.inplace:
if changed:
if ns.backup:
if os.path.exists(fname + ns.backup):
os.remove(fname + ns.backup)
os.rename(fname, fname + ns.backup)
with open(fname, 'wb') as f:
f.write(retstr)
else:
stdout.write(retstr)
else:
retstr = do_replace(stdin.read(), reobj, to_str)
stdout.write(retstr)
if __name__ == '__main__':
try:
main()
except Exception as e:
import traceback
traceback.print_exc()
print(str(e))
sys.exit(1)
| 35.835294 | 210 | 0.59455 |
229dc8355cb215165fd1abbcdc21361f7132e863 | 5,174 | py | Python | skimage/feature/tests/test_orb.py | thewtex/scikit-image | 22bb6b94698b8889cbdf26b25d9e4fdb8b968d97 | [
"BSD-3-Clause"
] | 2 | 2020-02-24T02:24:43.000Z | 2021-12-19T11:44:34.000Z | skimage/feature/tests/test_orb.py | thewtex/scikit-image | 22bb6b94698b8889cbdf26b25d9e4fdb8b968d97 | [
"BSD-3-Clause"
] | null | null | null | skimage/feature/tests/test_orb.py | thewtex/scikit-image | 22bb6b94698b8889cbdf26b25d9e4fdb8b968d97 | [
"BSD-3-Clause"
] | 2 | 2019-06-16T06:38:28.000Z | 2021-12-19T11:44:48.000Z | import numpy as np
from skimage._shared.testing import assert_equal, assert_almost_equal
from skimage.feature import ORB
from skimage._shared import testing
from skimage import data
from skimage._shared.testing import test_parallel
img = data.coins()
@test_parallel()
def test_keypoints_orb_desired_no_of_keypoints():
detector_extractor = ORB(n_keypoints=10, fast_n=12, fast_threshold=0.20)
detector_extractor.detect(img)
exp_rows = np.array([ 141. , 108. , 214.56 , 131. , 214.272,
67. , 206. , 177. , 108. , 141. ])
exp_cols = np.array([ 323. , 328. , 282.24 , 292. , 281.664,
85. , 260. , 284. , 328.8 , 267. ])
exp_scales = np.array([1, 1, 1.44, 1, 1.728, 1, 1, 1, 1.2, 1])
exp_orientations = np.array([ -53.97446153, 59.5055285 , -96.01885186,
-149.70789506, -94.70171899, -45.76429535,
-51.49752849, 113.57081195, 63.30428063,
-79.56091118])
exp_response = np.array([ 1.01168357, 0.82934145, 0.67784179, 0.57176438,
0.56637459, 0.52248355, 0.43696175, 0.42992376,
0.37700486, 0.36126832])
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
assert_almost_equal(exp_scales, detector_extractor.scales)
assert_almost_equal(exp_response, detector_extractor.responses)
assert_almost_equal(exp_orientations,
np.rad2deg(detector_extractor.orientations), 5)
detector_extractor.detect_and_extract(img)
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def test_keypoints_orb_less_than_desired_no_of_keypoints():
detector_extractor = ORB(n_keypoints=15, fast_n=12,
fast_threshold=0.33, downscale=2, n_scales=2)
detector_extractor.detect(img)
exp_rows = np.array([ 58., 65., 108., 140., 203.])
exp_cols = np.array([ 291., 130., 293., 202., 267.])
exp_scales = np.array([1., 1., 1., 1., 1.])
exp_orientations = np.array([-158.26941428, -59.42996346, 151.93905955,
-79.46341354, -56.90052451])
exp_response = np.array([ 0.2667641 , 0.04009017, -0.17641695, -0.03243431,
0.26521259])
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
assert_almost_equal(exp_scales, detector_extractor.scales)
assert_almost_equal(exp_response, detector_extractor.responses)
assert_almost_equal(exp_orientations,
np.rad2deg(detector_extractor.orientations), 5)
detector_extractor.detect_and_extract(img)
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def test_descriptor_orb():
detector_extractor = ORB(fast_n=12, fast_threshold=0.20)
exp_descriptors = np.array([[0, 1, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 1, 1, 0, 0, 1, 0, 0, 1, 1],
[1, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 1, 0, 1, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 1, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 1, 0, 1, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 0, 0, 1, 0, 1],
[1, 1, 0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 1, 0, 1, 1],
[0, 0, 0, 0, 1, 0, 1, 0, 1, 1]], dtype=bool)
detector_extractor.detect(img)
detector_extractor.extract(img, detector_extractor.keypoints,
detector_extractor.scales,
detector_extractor.orientations)
assert_equal(exp_descriptors,
detector_extractor.descriptors[100:120, 10:20])
detector_extractor.detect_and_extract(img)
assert_equal(exp_descriptors,
detector_extractor.descriptors[100:120, 10:20])
def test_no_descriptors_extracted_orb():
img = np.ones((128, 128))
detector_extractor = ORB()
with testing.raises(RuntimeError):
detector_extractor.detect_and_extract(img)
| 46.196429 | 80 | 0.528605 |
97594c6f1177b5b05a3e94a14ea78f4dac704139 | 2,613 | py | Python | lib/surface/firebase/test/__init__.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/surface/firebase/test/__init__.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/surface/firebase/test/__init__.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The 'gcloud firebase test' sub-group."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
from googlecloudsdk.api_lib.firebase.test import endpoints
from googlecloudsdk.api_lib.firebase.test import exceptions
from googlecloudsdk.api_lib.firebase.test import util
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
class Test(base.Group):
"""Interact with Firebase Test Lab.
Explore devices and OS versions available as test targets, run tests, monitor
test progress, and view detailed test results in the Firebase console.
"""
def Filter(self, context, args):
"""Modify the context that will be given to this group's commands when run.
Args:
context: {str:object}, The current context, which is a set of key-value
pairs that can be used for common initialization among commands.
args: argparse.Namespace: The same Namespace given to the corresponding
.Run() invocation.
Returns:
The refined command context.
"""
# Make sure service endpoints are compatible with each other.
endpoints.ValidateTestServiceEndpoints()
# Create the client for the Testing service.
testing_client = apis.GetClientInstance('testing', 'v1')
testing_client.num_retries = 9 # Add extra retries due to b/76429898.
context['testing_client'] = testing_client
context['testing_messages'] = apis.GetMessagesModule('testing', 'v1')
# Create the client for the Tool Results service.
context['toolresults_client'] = apis.GetClientInstance(
'toolresults', 'v1beta3')
context['toolresults_messages'] = apis.GetMessagesModule(
'toolresults', 'v1beta3')
# Create the client for the Storage service.
context['storage_client'] = apis.GetClientInstance('storage', 'v1')
return context
| 36.802817 | 79 | 0.746651 |
158d8437ce6a326b22916b0c5584e05ef7323fdf | 21,048 | py | Python | sample_xblocks/basic/problem.py | saharm1/xblock-sdk | 8d261eaadaf46be17f141a9db56cf9f9675fceea | [
"Apache-2.0"
] | null | null | null | sample_xblocks/basic/problem.py | saharm1/xblock-sdk | 8d261eaadaf46be17f141a9db56cf9f9675fceea | [
"Apache-2.0"
] | null | null | null | sample_xblocks/basic/problem.py | saharm1/xblock-sdk | 8d261eaadaf46be17f141a9db56cf9f9675fceea | [
"Apache-2.0"
] | null | null | null | """Problem XBlock, and friends.
Please note that this is a demonstrative implementation of how XBlocks can
be used, and is not an example of how problems are implemented in the
edx-platform runtime.
These implement a general mechanism for problems containing input fields
and checkers, wired together in interesting ways.
This code is in the XBlock layer.
A rough sequence diagram::
BROWSER (Javascript) SERVER (Python)
Problem Input Checker Problem Input Checker
| | | | | |
| submit()| | | | |
+-------->| | | | |
|<--------+ submit()| | | |
+------------------->| | | |
|<-------------------+ | | |
| | | "check" | | |
+------------------------------------>| submit()| |
| | | +-------->| |
| | | |<--------+ check()|
| | | +------------------>|
| | | |<------------------|
|<------------------------------------+ | |
| handleSubmit() | | | |
+-------->| handleCheck() | | |
+------------------->| | | |
| | | | | |
"""
import inspect
import random
import string
import time
from xblock.core import XBlock
from xblock.fields import Any, Boolean, Dict, Integer, Scope, String
from xblock.fragment import Fragment
from xblock.run_script import run_script
class ProblemBlock(XBlock):
"""A generalized container of InputBlocks and Checkers.
"""
script = String(help="Python code to compute values", scope=Scope.content, default="")
seed = Integer(help="Random seed for this student", scope=Scope.user_state, default=0)
problem_attempted = Boolean(help="Has the student attempted this problem?", scope=Scope.user_state, default=False)
has_children = True
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
block = runtime.construct_xblock_from_class(cls, keys)
# Find <script> children, turn them into script content.
for child in node:
if child.tag == "script":
block.script += child.text
else:
block.runtime.add_node_as_child(block, child, id_generator)
return block
def set_student_seed(self):
"""Set a random seed for the student so they each have different but repeatable data."""
# Don't return zero, that's the default, and the sign that we should make a new seed.
self.seed = int(time.time() * 1000) % 100 + 1
def calc_context(self, context):
"""If we have a script, run it, and return the resulting context."""
if self.script:
# Seed the random number for the student
if not self.seed:
self.set_student_seed()
random.seed(self.seed)
script_vals = run_script(self.script)
context = dict(context)
context.update(script_vals)
return context
# The content controls how the Inputs attach to Graders
def student_view(self, context=None):
"""Provide the default student view."""
if context is None:
context = {}
context = self.calc_context(context)
result = Fragment()
named_child_frags = []
# self.children is an attribute obtained from ChildrenModelMetaclass, so disable the
# static pylint checking warning about this.
for child_id in self.children: # pylint: disable=E1101
child = self.runtime.get_block(child_id)
frag = self.runtime.render_child(child, "problem_view", context)
result.add_frag_resources(frag)
named_child_frags.append((child.name, frag))
result.add_css("""
.problem {
border: solid 1px #888; padding: 3px;
}
""")
result.add_content(self.runtime.render_template(
"problem.html",
named_children=named_child_frags
))
result.add_javascript("""
function ProblemBlock(runtime, element) {
function callIfExists(obj, fn) {
if (typeof obj[fn] == 'function') {
return obj[fn].apply(obj, Array.prototype.slice.call(arguments, 2));
} else {
return undefined;
}
}
function handleCheckResults(results) {
$.each(results.submitResults || {}, function(input, result) {
callIfExists(runtime.childMap(element, input), 'handleSubmit', result);
});
$.each(results.checkResults || {}, function(checker, result) {
callIfExists(runtime.childMap(element, checker), 'handleCheck', result);
});
}
// To submit a problem, call all the named children's submit()
// function, collect their return values, and post that object
// to the check handler.
$(element).find('.check').bind('click', function() {
var data = {};
var children = runtime.children(element);
for (var i = 0; i < children.length; i++) {
var child = children[i];
if (child.name !== undefined) {
data[child.name] = callIfExists(child, 'submit');
}
}
var handlerUrl = runtime.handlerUrl(element, 'check')
$.post(handlerUrl, JSON.stringify(data)).success(handleCheckResults);
});
$(element).find('.rerandomize').bind('click', function() {
var handlerUrl = runtime.handlerUrl(element, 'rerandomize');
$.post(handlerUrl, JSON.stringify({}));
});
}
""")
result.initialize_js('ProblemBlock')
return result
@XBlock.json_handler
def check(self, submissions, suffix=''): # pylint: disable=unused-argument
"""
Processess the `submissions` with each provided Checker.
First calls the submit() method on each InputBlock. Then, for each Checker,
finds the values it needs and passes them to the appropriate `check()` method.
Returns a dictionary of 'submitResults': {input_name: user_submitted_results},
'checkResults': {checker_name: results_passed_through_checker}
"""
self.problem_attempted = True
context = self.calc_context({})
child_map = {}
# self.children is an attribute obtained from ChildrenModelMetaclass, so disable the
# static pylint checking warning about this.
for child_id in self.children: # pylint: disable=E1101
child = self.runtime.get_block(child_id)
if child.name:
child_map[child.name] = child
# For each InputBlock, call the submit() method with the browser-sent
# input data.
submit_results = {}
for input_name, submission in submissions.items():
child = child_map[input_name]
submit_results[input_name] = child.submit(submission)
child.save()
# For each Checker, find the values it wants, and pass them to its
# check() method.
checkers = list(self.runtime.querypath(self, "./checker"))
check_results = {}
for checker in checkers:
arguments = checker.arguments
kwargs = {}
kwargs.update(arguments)
for arg_name, arg_value in arguments.items():
if arg_value.startswith("."):
values = list(self.runtime.querypath(self, arg_value))
# TODO: What is the specific promised semantic of the iterability
# of the value returned by querypath?
kwargs[arg_name] = values[0]
elif arg_value.startswith("$"):
kwargs[arg_name] = context.get(arg_value[1:])
elif arg_value.startswith("="):
kwargs[arg_name] = int(arg_value[1:])
else:
raise ValueError("Couldn't interpret checker argument: %r" % arg_value)
result = checker.check(**kwargs)
if checker.name:
check_results[checker.name] = result
return {
'submitResults': submit_results,
'checkResults': check_results,
}
@XBlock.json_handler
def rerandomize(self, unused, suffix=''): # pylint: disable=unused-argument
"""Set a new random seed for the student."""
self.set_student_seed()
return {'status': 'ok'}
@staticmethod
def workbench_scenarios():
"""A few canned scenarios for display in the workbench."""
return [
("problem with thumbs and textbox",
"""\
<problem_demo>
<html_demo>
<p>You have three constraints to satisfy:</p>
<ol>
<li>The upvotes and downvotes must be equal.</li>
<li>You must enter the number of upvotes into the text field.</li>
<li>The number of upvotes must be $numvotes.</li>
</ol>
</html_demo>
<thumbs name='thumb'/>
<textinput_demo name='vote_count' input_type='int'/>
<script>
# Compute the random answer.
import random
numvotes = random.randrange(2,5)
</script>
<equality_demo name='votes_equal' left='./thumb/@upvotes' right='./thumb/@downvotes'>
Upvotes match downvotes
</equality_demo>
<equality_demo name='votes_named' left='./thumb/@upvotes' right='./vote_count/@student_input'>
Number of upvotes matches entered string
</equality_demo>
<equality_demo name='votes_specified' left='./thumb/@upvotes' right='$numvotes'>
Number of upvotes is $numvotes
</equality_demo>
</problem_demo>
"""),
("three problems 2",
"""
<vertical_demo>
<attempts_scoreboard_demo/>
<problem_demo>
<html_demo><p>What is $a+$b?</p></html_demo>
<textinput_demo name="sum_input" input_type="int" />
<equality_demo name="sum_checker" left="./sum_input/@student_input" right="$c" />
<script>
import random
a = random.randint(2, 5)
b = random.randint(1, 4)
c = a + b
</script>
</problem_demo>
<sidebar_demo>
<problem_demo>
<html_demo><p>What is $a × $b?</p></html_demo>
<textinput_demo name="sum_input" input_type="int" />
<equality_demo name="sum_checker" left="./sum_input/@student_input" right="$c" />
<script>
import random
a = random.randint(2, 6)
b = random.randint(3, 7)
c = a * b
</script>
</problem_demo>
</sidebar_demo>
<problem_demo>
<html_demo><p>What is $a+$b?</p></html_demo>
<textinput_demo name="sum_input" input_type="int" />
<equality_demo name="sum_checker" left="./sum_input/@student_input" right="$c" />
<script>
import random
a = random.randint(3, 5)
b = random.randint(2, 6)
c = a + b
</script>
</problem_demo>
</vertical_demo>
"""),
]
class InputBlock(XBlock):
"""Base class for blocks that accept inputs.
"""
def submit(self, submission):
"""
Called with the result of the javascript Block's submit() function.
Returns any data, which is passed to the Javascript handle_submit
function.
"""
pass
@XBlock.tag("checker")
class CheckerBlock(XBlock):
"""Base class for blocks that check answers.
"""
arguments = Dict(help="The arguments expected by `check`")
def set_arguments_from_xml(self, node):
"""
Set the `arguments` field from XML attributes based on `check` arguments.
"""
# Introspect the .check() method, and collect arguments it expects.
argspec = inspect.getargspec(self.check)
arguments = {}
for arg in argspec.args[1:]:
arguments[arg] = node.attrib.pop(arg)
self.arguments = arguments
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""
Parse the XML for a checker. A few arguments are handled specially,
then the rest get the usual treatment.
"""
block = super(CheckerBlock, cls).parse_xml(node, runtime, keys, id_generator)
block.set_arguments_from_xml(node)
return block
def check(self, **kwargs):
"""
Called with the data provided by the ProblemBlock.
Returns any data, which will be passed to the Javascript handle_check
function.
"""
raise NotImplementedError()
class TextInputBlock(InputBlock):
"""An XBlock that accepts text input."""
input_type = String(help="Type of conversion to attempt on input string")
student_input = Any(help="Last input submitted by the student", default="", scope=Scope.user_state)
def student_view(self, context=None): # pylint: disable=W0613
"""Returns default student view."""
return Fragment(u"<p>I can only appear inside problems.</p>")
def problem_view(self, context=None): # pylint: disable=W0613
"""Returns a view of the problem - a javascript text input field."""
html = u"<input type='text' name='input' value='{0}'><span class='message'></span>".format(self.student_input)
result = Fragment(html)
result.add_javascript("""
function TextInputBlock(runtime, element) {
return {
submit: function() {
return $(element).find(':input').serializeArray();
},
handleSubmit: function(result) {
$(element).find('.message').text((result || {}).error || '');
}
}
}
""")
result.initialize_js('TextInputBlock')
return result
def submit(self, submission):
self.student_input = submission[0]['value']
if self.input_type == 'int':
try:
self.student_input = int(submission[0]['value'])
except ValueError:
return {'error': '"%s" is not an integer' % self.student_input}
class EqualityCheckerBlock(CheckerBlock):
"""An XBlock that checks the equality of two student data fields."""
# Content: the problem will hook us up with our data.
content = String(help="Message describing the equality test", scope=Scope.content, default="Equality test")
# Student data
left = Any(scope=Scope.user_state)
right = Any(scope=Scope.user_state)
attempted = Boolean(scope=Scope.user_state)
def problem_view(self, context=None):
"""Renders the problem view.
The view is specific to whether or not this problem was attempted, and, if so,
if it was answered correctly.
"""
correct = self.left == self.right
# TODO: I originally named this class="data", but that conflicted with
# the CSS on the page! :( We might have to do something to namespace
# things.
# TODO: Should we have a way to spit out JSON islands full of data?
# Note the horror of mixed Python-Javascript data below...
content = string.Template(self.content).substitute(**context)
result = Fragment(
u"""
<span class="mydata" data-attempted='{ecb.attempted}' data-correct='{correct}'>
{content}
<span class='indicator'></span>
</span>
""".format(ecb=self, content=content, correct=correct)
)
# TODO: This is a runtime-specific URL. But if each XBlock ships their
# own copy of underscore.js, we won't be able to uniquify them.
# Perhaps runtimes can offer a palette of popular libraries so that
# XBlocks can refer to them in XBlock-standard ways?
result.add_javascript_url(
self.runtime.resource_url("js/vendor/underscore-min.js")
)
# TODO: The image tag here needs a magic URL, not a hard-coded one.
format_data = {
'correct': self.runtime.local_resource_url(
self, 'public/images/correct-icon.png'),
'incorrect': self.runtime.local_resource_url(
self, 'public/images/incorrect-icon.png'),
}
result.add_resource(
u"""
<script type="text/template" id="xblock-equality-template">
<% if (attempted !== "True") {{ %>
(Not attempted)
<% }} else if (correct === "True") {{ %>
<img src="{correct}">
<% }} else {{ %>
<img src="{incorrect}">
<% }} %>
</script>
""".format(**format_data),
"text/html"
)
result.add_javascript(
"""
function EqualityCheckerBlock(runtime, element) {
var template = _.template($("#xblock-equality-template").html());
function render() {
var data = $("span.mydata", element).data();
$("span.indicator", element).html(template(data));
}
render();
return {
handleCheck: function(result) {
$("span.mydata", element)
.data("correct", result ? "True" : "False")
.data("attempted", "True");
render();
}
}
}
"""
)
result.initialize_js('EqualityCheckerBlock')
return result
def check(self, left, right): # pylint: disable=W0221
self.attempted = True
self.left = left
self.right = right
event_data = {'value': 1 if left == right else 0, 'max_value': 1}
self.runtime.publish(self, 'grade', event_data)
return left == right
class AttemptsScoreboardBlock(XBlock):
"""
Show attempts on problems in my nieces.
"""
def student_view(self, context=None): # pylint: disable=W0613
"""Provide default student view."""
# Get the attempts for all problems in my parent.
if self.parent:
# these two lines are equivalent, and both work:
attempts = list(self.runtime.query(self).parent().descendants().attr("problem_attempted"))
attempts = list(self.runtime.querypath(self, "..//@problem_attempted"))
num_problems = len(attempts)
attempted = sum(attempts)
if num_problems == 0:
content = u"There are no problems here..."
elif attempted == num_problems:
content = u"Great! You attempted all %d problems!" % num_problems
else:
content = u"Hmm, you've only tried %d out of %d problems..." % (attempted, num_problems)
else:
content = u"I have nothing to live for! :("
return Fragment(content)
| 40.633205 | 118 | 0.514348 |
547b6654a8476e9135306a03ce35eb233d349f50 | 1,561 | py | Python | PyBank/main.py | ericwaxler/python-challenge | 09f65c218be94340e1812fe290fcaaf0c53c3a5c | [
"ADSL"
] | null | null | null | PyBank/main.py | ericwaxler/python-challenge | 09f65c218be94340e1812fe290fcaaf0c53c3a5c | [
"ADSL"
] | null | null | null | PyBank/main.py | ericwaxler/python-challenge | 09f65c218be94340e1812fe290fcaaf0c53c3a5c | [
"ADSL"
] | null | null | null | import csv
file = "Resources/budget_data.csv"
total = 0
avg = 0
months = 0
old = 0
delta = []
with open(file, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
first_row = next(csvreader)
months = sum(1 for row in csvreader)
with open(file, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
first_row = next(csvreader)
#second_row = next(csvreader)
for row in csvreader:
total = total + int(row[1])
with open(file, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
first_row = next(csvreader)
rows = list(csvreader)
linecount = 0
old = rows[0][1]
for row in rows:
#print(str(row[1]) + "\t" + str(int(row[1])-int(old)))
delta.append(int(row[1]) - int(old))
old = row[1]
with open("FinAnalysis.txt","w+") as outfile:
outfile.write("Financial Analysis \n-----------------------")
outfile.write("\nMonths: " + str(months))
outfile.write("\nTotal: $" + str(total))
outfile.write("\nAverage Change: $" + str(round(sum(delta)/(len(delta)-1),2)))
outfile.write("\nGreatest Increase in Profits : " + str(rows[delta.index(max(delta))][0])[0:4] + str(20) + str(rows[delta.index(max(delta))][0])[4:6] + " ($" + str(max(delta)) + ")")
outfile.write("\nGreatest Decrease in Profits : " + str(rows[delta.index(min(delta))][0])[0:4] + str(20) + str(rows[delta.index(min(delta))][0])[4:6] + " ($" + str(min(delta)) + ")")
with open("FinAnalysis.txt","r") as outfile:
print(outfile.read())
| 28.381818 | 185 | 0.591928 |
55adb8554c37df72d03ff7c91ac0a9f40f0a650a | 7,188 | py | Python | ingestion/src/metadata/ingestion/source/redash.py | mosiac1/OpenMetadata | 21c14d257c6ae7ed2aad2a9ccff2c3a3f1594681 | [
"Apache-2.0"
] | 1 | 2022-03-30T06:29:29.000Z | 2022-03-30T06:29:29.000Z | ingestion/src/metadata/ingestion/source/redash.py | mosiac1/OpenMetadata | 21c14d257c6ae7ed2aad2a9ccff2c3a3f1594681 | [
"Apache-2.0"
] | null | null | null | ingestion/src/metadata/ingestion/source/redash.py | mosiac1/OpenMetadata | 21c14d257c6ae7ed2aad2a9ccff2c3a3f1594681 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from dataclasses import dataclass, field
from typing import Dict, Iterable, List
import requests
from redash_toolbelt import Redash
from metadata.generated.schema.entity.data.chart import Chart
from metadata.generated.schema.entity.services.connections.dashboard.redashConnection import (
RedashConnection,
)
from metadata.generated.schema.entity.services.connections.metadata.openMetadataConnection import (
OpenMetadataConnection,
)
from metadata.generated.schema.entity.services.dashboardService import DashboardService
from metadata.generated.schema.metadataIngestion.workflow import (
Source as WorkflowSource,
)
from metadata.generated.schema.type.entityReference import EntityReference
from metadata.ingestion.api.common import Entity
from metadata.ingestion.api.source import InvalidSourceException, Source, SourceStatus
from metadata.ingestion.models.table_metadata import Chart as ModelChart
from metadata.ingestion.models.table_metadata import Dashboard
from metadata.ingestion.ometa.ometa_api import OpenMetadata
@dataclass
class RedashSourceStatus(SourceStatus):
items_scanned: int = 0
filtered: List[str] = field(default_factory=list)
def item_scanned_status(self) -> None:
self.items_scanned += 1
def item_dropped_status(self, item: str) -> None:
self.filtered.append(item)
class RedashSource(Source[Entity]):
config: WorkflowSource
metadata_config: OpenMetadataConnection
status: RedashSourceStatus
platform = "redash"
dashboards_to_charts: Dict[str, List[str]]
def __init__(
self,
config: WorkflowSource,
metadata_config: OpenMetadataConnection,
):
super().__init__()
self.config = config
self.metadata_config = metadata_config
self.metadata = OpenMetadata(metadata_config)
self.connection_config = self.config.serviceConnection.__root__.config
self.status = RedashSourceStatus()
self.client = Redash(
self.connection_config.hostPort, self.connection_config.apiKey
)
self.service = self.metadata.get_service_or_create(
entity=DashboardService, config=config
)
self.dashboards_to_charts = {}
@classmethod
def create(cls, config_dict: dict, metadata_config: OpenMetadataConnection):
config: WorkflowSource = WorkflowSource.parse_obj(config_dict)
connection: RedashConnection = config.serviceConnection.__root__.config
if not isinstance(connection, RedashConnection):
raise InvalidSourceException(
f"Expected RedashConnection, but got {connection}"
)
return cls(config, metadata_config)
def prepare(self):
pass
def next_record(self) -> Iterable[Entity]:
yield from self.get_redash_charts()
dashboard_info = self.client.dashboards()
yield from self.get_redash_dashboard_charts(dashboard_info)
yield from self.get_redash_dashboard(dashboard_info)
def get_redash_charts(self) -> Chart:
query_info = self.client.queries()
for query_info in query_info["results"]:
query_id = query_info["id"]
query_name = query_info["name"]
query_data = requests.get(
f"{self.connection_config.hostPort}/api/queries/{query_id}"
).json()
for visualization in query_data.get("Visualizations", []):
chart_type = visualization.get("type", "")
chart_description = (
visualization.get("description", "")
if visualization.get("description", "")
else ""
)
yield Chart(
id=uuid.uuid4(),
name=query_id,
displayName=query_name,
chartType=chart_type,
service=EntityReference(
id=self.service.id, type="dashboardService"
),
description=chart_description,
)
def get_redash_dashboard_charts(self, dashboard_info) -> Chart:
for dashboard_info in dashboard_info["results"]:
dashboard_id = dashboard_info["id"]
if dashboard_id is not None:
dashboard_data = self.client.get_dashboard(dashboard_id)
self.dashboards_to_charts[dashboard_id] = []
for widgets in dashboard_data.get("widgets", []):
visualization = widgets.get("visualization")
self.dashboards_to_charts[dashboard_id].append(widgets["id"])
yield ModelChart(
name=widgets["id"],
displayName=visualization["query"]["name"],
chart_type=visualization["type"],
service=EntityReference(
id=self.service.id, type="dashboardService"
),
url=(
f"{self.connection_config.hostPort}/dashboard/{dashboard_data.get('slug', '')}"
),
description=visualization["description"],
)
def get_redash_dashboard(self, dashboard_info) -> Dashboard:
for dashboard_info in dashboard_info["results"]:
dashboard_id = dashboard_info["id"]
if dashboard_id is not None:
self.status.item_scanned_status()
dashboard_data = self.client.get_dashboard(dashboard_id)
dashboard_url = f"{self.connection_config.hostPort}/dashboard/{dashboard_data.get('slug', '')}"
dashboard_description = ""
for widgets in dashboard_data.get("widgets", []):
dashboard_description = widgets.get("text")
yield Dashboard(
id=uuid.uuid4(),
name=dashboard_id,
displayName=dashboard_info["name"],
description=dashboard_description if dashboard_info else "",
charts=self.dashboards_to_charts[dashboard_id],
usageSummary=None,
service=EntityReference(
id=self.service.id, type="dashboardService"
),
url=dashboard_url,
)
def get_status(self) -> SourceStatus:
return self.status
def close(self):
self.client.session.close()
def test_connection(self) -> None:
pass
| 41.074286 | 111 | 0.631886 |
d52d2eef529891d28ac38fb5e5e31c75ab640c3e | 471 | py | Python | src/fs/config.py | severus21/PycLi | 7160c725a88e527bff3b13cb9bcf0486d55d2b52 | [
"Apache-2.0"
] | null | null | null | src/fs/config.py | severus21/PycLi | 7160c725a88e527bff3b13cb9bcf0486d55d2b52 | [
"Apache-2.0"
] | null | null | null | src/fs/config.py | severus21/PycLi | 7160c725a88e527bff3b13cb9bcf0486d55d2b52 | [
"Apache-2.0"
] | 1 | 2017-05-21T16:07:21.000Z | 2017-05-21T16:07:21.000Z | #speed use only to select : read replicat
#nom => identifiant unique pour l'utilisateur(au sein d'un mêm parent)
import os
from pkg_resources import resource_filename, resource_exists
max_ratio = 100 #on ne peut multiplier la proba que par 5 au plus
counter = 0
#delay_snapshot = 100#nombre d'operations entre deux snapshots
size_transaction = 100
delay_transaction = 1.
default_replicat = 2
location_pgs_default = resource_filename("lipyc.data", "default-pgs.json")
| 33.642857 | 74 | 0.791932 |
b84606ea11d4e2df2277c3800dbde481a9ac59ed | 11,794 | py | Python | Python/example_controllers/audio/rube_goldberg.py | ricklentz/tdw | da40eec151acae20b28d6486defb4358d96adb0e | [
"BSD-2-Clause"
] | null | null | null | Python/example_controllers/audio/rube_goldberg.py | ricklentz/tdw | da40eec151acae20b28d6486defb4358d96adb0e | [
"BSD-2-Clause"
] | null | null | null | Python/example_controllers/audio/rube_goldberg.py | ricklentz/tdw | da40eec151acae20b28d6486defb4358d96adb0e | [
"BSD-2-Clause"
] | null | null | null | from typing import List, Dict
from pathlib import Path
import json
from argparse import ArgumentParser
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
from tdw.librarian import ModelLibrarian
from tdw.add_ons.py_impact import PyImpact
from tdw.add_ons.audio_initializer import AudioInitializer
from tdw.add_ons.third_person_camera import ThirdPersonCamera
from tdw.add_ons.physics_audio_recorder import PhysicsAudioRecorder
from tdw.backend.paths import EXAMPLE_CONTROLLER_OUTPUT_PATH
from tdw.physics_audio.object_audio_static import ObjectAudioStatic, DEFAULT_OBJECT_AUDIO_STATIC_DATA
class RubeGoldbergDemo(Controller):
"""
Create a "Rube Goldberg machine" from a set of objects that will collide when the first is struck by a ball.
Impact sounds are generated for each collision.
Scene setup is handled through a json file -- rube_goldberg_object.json -- which defines the id number, position,
rotation and scale for every object in the scene. For some objects, it also has non-default physics values.
All other objects use default physics/audio values.
This controller will output two files per trial:
1. A log of the mode properties from PyImpact
2. A .wav file of the trial
"""
BALL_ID: int = 0
BOARD_ID: int = 2
def __init__(self, port: int = 1071, check_version: bool = True, launch_build: bool = True):
super().__init__(port=port, check_version=check_version, launch_build=launch_build)
# Cached commands to add the objects.
self.init_object_commands: List[dict] = list()
# Cached commands to destroy the objects.
self.destroy_object_commands: List[dict] = list()
# Cached audio override data.
self.static_audio_data_overrides: Dict[int, ObjectAudioStatic] = dict()
# Get commands to initialize the objects.
object_setup_data = json.loads(Path("rube_goldberg_objects.json").read_text())
for o in object_setup_data:
object_id = int(o)
# Cache the command to destroy the object.
self.destroy_object_commands.append({"$type": "destroy_object",
"id": object_id})
# Use non-default physics values.
if "physics" in object_setup_data[o]:
self.init_object_commands.extend(self.get_add_physics_object(model_name=object_setup_data[o]["model_name"],
object_id=object_id,
position=object_setup_data[o]["position"],
rotation=object_setup_data[o]["rotation"],
scale_factor=object_setup_data[o]["scale"],
library=object_setup_data[o]["library"],
default_physics_values=False,
scale_mass=False,
kinematic=object_setup_data[o]["physics"]["kinematic"],
mass=object_setup_data[o]["physics"]["mass"],
dynamic_friction=object_setup_data[o]["physics"]["dynamic_friction"],
static_friction=object_setup_data[o]["physics"]["static_friction"],
bounciness=object_setup_data[o]["physics"]["bounciness"]))
object_audio = DEFAULT_OBJECT_AUDIO_STATIC_DATA[object_setup_data[o]["model_name"]]
object_audio.mass = object_setup_data[o]["physics"]["mass"]
object_audio.bounciness = object_setup_data[o]["physics"]["bounciness"]
self.static_audio_data_overrides[object_id] = object_audio
# Use default physics values.
else:
self.init_object_commands.extend(self.get_add_physics_object(model_name=object_setup_data[o]["model_name"],
object_id=object_id,
position=object_setup_data[o]["position"],
rotation=object_setup_data[o]["rotation"],
scale_factor=object_setup_data[o]["scale"],
library=object_setup_data[o]["library"]))
# Set the collision detection mode.
self.init_object_commands.append({"$type": "set_object_collision_detection_mode",
"id": object_id,
"mode": "continuous_speculative"})
# "Aim" the ball at the monkey and apply the force.
# Note that this force value was arrived at through a number of trial-and-error iterations.
# Set a suitable drag value to "tune" how hard it will hit the monkey.
self.init_object_commands.extend([{"$type": "object_look_at_position",
"id": RubeGoldbergDemo.BALL_ID,
"position": {"x": -12.95, "y": 1.8, "z": -5.1}},
{"$type": "apply_force_magnitude_to_object",
"id": RubeGoldbergDemo.BALL_ID,
"magnitude": 20},
{"$type": "set_object_drag",
"angular_drag": 5.0,
"drag": 1.0,
"id": RubeGoldbergDemo.BALL_ID}])
# Set the visual material of the ball to metal and the board to a different wood than the bench.
self.init_object_commands.extend(TDWUtils.set_visual_material(self,
ModelLibrarian("models_special.json").get_record("prim_sphere").substructure,
RubeGoldbergDemo.BALL_ID,
"dmd_metallic_fine",
quality="high"))
self.init_object_commands.extend(TDWUtils.set_visual_material(self,
ModelLibrarian("models_core.json").get_record("wood_board").substructure,
RubeGoldbergDemo.BOARD_ID,
"wood_tropical_hardwood",
quality="high"))
# Add a camera.
camera = ThirdPersonCamera(position={"x": -15.57, "y": 1.886, "z": -4.97},
avatar_id="a",
rotation={"x": 6.36, "y": 109.13, "z": 0})
# Initialize audio.
audio_initializer = AudioInitializer(avatar_id="a", framerate=60)
# Add PyImpact.
# Here we have a large number of closely-occuring collisions resulting in a rapid series of "clustered"
# impact sounds, as opposed to a single object falling from a height.
# Using a higher value such as the 0.5 used in the example controller will definitely result in unpleasant
# distortion of the audio.
# Note that logging is also enabled.
self.py_impact = PyImpact(initial_amp=0.25, logging=True,
static_audio_data_overrides=self.static_audio_data_overrides, scrape=False)
# Add a recorder.
self.recorder: PhysicsAudioRecorder = PhysicsAudioRecorder()
# Add the add-ons.
self.add_ons.extend([camera, audio_initializer, self.py_impact, self.recorder])
# Keep track of the current trial number, for logging purposes.
self.current_trial_num = 0
# Set path to write out logging info.
self.output_directory = EXAMPLE_CONTROLLER_OUTPUT_PATH.joinpath("rube_goldberg")
print(f"Logs and .wav files will be output to: {self.output_directory}")
if not self.output_directory.exists():
self.output_directory.mkdir(parents=True)
def run(self, num_trials: int) -> None:
"""
Build a "Rube Goldberg" machine to produce impact sounds.
"""
# Load the photorealistic "archviz_house" environment.
# Set global values, including the desired screen size and aspect ratio (720P).
# Adjust post-processing settings.
# Set the shadow strength to maximum.
self.communicate([self.get_add_scene(scene_name="archviz_house"),
{"$type": "set_render_quality",
"render_quality": 5},
{"$type": "set_screen_size",
"width": 1280,
"height": 720},
{"$type": "set_time_step",
"time_step": 0.02},
{"$type": "set_post_exposure",
"post_exposure": 0.35},
{"$type": "set_screen_space_reflections",
"enabled": True},
{"$type": "set_vignette",
"enabled": False},
{"$type": "set_ambient_occlusion_intensity",
"intensity": 0.175},
{"$type": "set_ambient_occlusion_thickness_modifier",
"thickness": 5.0},
{"$type": "set_shadow_strength",
"strength": 1.0}])
for i in range(num_trials):
self.do_trial()
self.communicate({"$type": "terminate"})
def do_trial(self):
# Keep track of trial number.
self.current_trial_num += 1
# Create folder for this trial's logging info.
dest_dir = self.output_directory.joinpath(str(self.current_trial_num))
if not dest_dir.exists():
dest_dir.mkdir(parents=True)
# Reset PyImpact.
self.py_impact.reset(initial_amp=0.25, static_audio_data_overrides=self.static_audio_data_overrides)
# Initialize the objects.
self.communicate(self.init_object_commands)
# Start recording audio.
self.recorder.start(path=dest_dir.joinpath("audio.wav"))
# Record audio.
while self.recorder.recording:
self.communicate([])
# Save the log.
dest_dir.joinpath("mode_properties_log.json").write_text(json.dumps(self.py_impact.mode_properties_log, indent=2))
# Destroy the objects.
self.communicate(self.destroy_object_commands)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--num", type=int, default=5, help="Number of trials.")
parser.add_argument("--launch_build", action="store_true", help="Auto-launch the build")
args = parser.parse_args()
RubeGoldbergDemo(launch_build=args.launch_build).run(args.num)
| 59.86802 | 147 | 0.527556 |
36adac71e783fef3070c688e2f865ca97ef28bbb | 691 | py | Python | src/handbook2/part7/snippet17.py | apurvmishra99/keras-idiomatic-programmer | 40ee5482615eadff0a74c349706ae1184c904c13 | [
"Apache-2.0"
] | 2 | 2019-10-11T15:09:03.000Z | 2021-08-01T12:09:10.000Z | src/handbook2/part7/snippet17.py | apurvmishra99/keras-idiomatic-programmer | 40ee5482615eadff0a74c349706ae1184c904c13 | [
"Apache-2.0"
] | null | null | null | src/handbook2/part7/snippet17.py | apurvmishra99/keras-idiomatic-programmer | 40ee5482615eadff0a74c349706ae1184c904c13 | [
"Apache-2.0"
] | 1 | 2021-08-01T12:09:38.000Z | 2021-08-01T12:09:38.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# standardization by per channel mean
images = (images - np.mean(images, axis=(0,1))) / np.std(images, axis=(0,1))
| 40.647059 | 76 | 0.748191 |
9b8fc4c984fbd8d17f2fe9711c4043d424188a8f | 1,552 | py | Python | Data-Visulation-Using-matplotlib/code.py | peaceshadow07/greyatom-python-for-data-science | 5ea6d38ac0b1937b5678749efd810ecc401386c1 | [
"MIT"
] | null | null | null | Data-Visulation-Using-matplotlib/code.py | peaceshadow07/greyatom-python-for-data-science | 5ea6d38ac0b1937b5678749efd810ecc401386c1 | [
"MIT"
] | null | null | null | Data-Visulation-Using-matplotlib/code.py | peaceshadow07/greyatom-python-for-data-science | 5ea6d38ac0b1937b5678749efd810ecc401386c1 | [
"MIT"
] | null | null | null | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv(path)
loan_status = data['Loan_Status'].value_counts()
loan_status.plot(kind='bar')
#Code starts here
# --------------
#Code starts here
property_and_loan = data.groupby(['Property_Area','Loan_Status']).size().unstack()
property_and_loan.plot(kind='bar',stacked=False)
plt.xlabel('Property Area')
plt.ylabel('Loan Status')
plt.xticks(rotation=45)
plt.show()
# --------------
#Code starts here
education_and_loan = data.groupby(['Education','Loan_Status']).size().unstack()
education_and_loan.plot(kind='bar')
plt.xlabel('Education Status')
plt.ylabel('Loan Status')
plt.xticks(rotation=45)
# --------------
#Code starts here
graduate = data[data['Education']=='Graduate']
not_graduate = data[data['Education']=='Not Graduate']
graduate.plot(kind='density',label='Graduate')
not_graduate.plot(kind='density',label='Not Grasduate')
#Code ends here
#For automatic legend display
plt.legend()
# --------------
#Code starts here
fig,(ax_1,ax_2,ax_3) = plt.subplots(3,1,figsize=(20,10))
data.plot.scatter(x='ApplicantIncome',y='LoanAmount',ax=ax_1)
ax_1.set_title('Applicant Income')
data.plot.scatter(x='CoapplicantIncome',y='LoanAmount',ax=ax_2)
ax_2.set_title('Coapplicant Income')
data['TotalIncome'] = data['ApplicantIncome']+data['CoapplicantIncome']
data.plot.scatter(x='TotalIncome',y='LoanAmount',ax=ax_3)
ax_3.set_title('Total Income')
| 21.260274 | 83 | 0.684923 |
dc020e52b3a77a4b3e6c463a8eac93482c1b9371 | 5,915 | py | Python | src/webpubsub/azext_webpubsub/_params.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/webpubsub/azext_webpubsub/_params.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/webpubsub/azext_webpubsub/_params.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.parameters import (
tags_type,
get_three_state_flag,
get_enum_type
)
from .vendored_sdks.azure_mgmt_webpubsub.models import WebPubSubRequestType
from ._actions import (
EventHandlerTemplateUpdateAction
)
from ._validator import validate_network_rule
WEBPUBSUB_KEY_TYPE = ['primary', 'secondary', 'salt']
SKU_TYPE = ['Standard_S1', 'Free_F1']
PERMISSION_TYPE = ['joinLeaveGroup', 'sendToGroup']
def load_arguments(self, _):
from azure.cli.core.commands.validators import get_default_location_from_resource_group
webpubsub_name_type = CLIArgumentType(options_list='--webpubsub-name-name', help='Name of the Webpubsub.', id_part='name')
webpubsubhub_name_type = CLIArgumentType(help='Name of the hub.', id_part='child_name_1')
with self.argument_context('webpubsub') as c:
c.argument('tags', tags_type)
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('webpubsub_name', webpubsub_name_type, options_list=['--name', '-n'])
with self.argument_context('webpubsub create') as c:
c.argument('sku', arg_type=get_enum_type(SKU_TYPE), help='The sku name of the signalr service.')
c.argument('unit_count', help='The number of signalr service unit count', type=int)
with self.argument_context('webpubsub update') as c:
c.argument('sku', arg_type=get_enum_type(SKU_TYPE), help='The sku name of the signalr service.')
c.argument('unit_count', help='The number of signalr service unit count', type=int)
with self.argument_context('webpubsub key regenerate') as c:
c.argument('key_type', arg_type=get_enum_type(WEBPUBSUB_KEY_TYPE), help='The name of access key to regenerate')
# Network Rule
with self.argument_context('webpubsub network-rule update', validator=validate_network_rule) as c:
c.argument('connection_name', nargs='*', help='Space-separeted list of private endpoint connection name.', required=False, arg_group='Private Endpoint Connection')
c.argument('public_network', arg_type=get_three_state_flag(), help='Set rules for public network.', required=False, arg_group='Public Network')
c.argument('allow', arg_type=get_enum_type(WebPubSubRequestType), nargs='*', help='The allowed virtual network rule. Space-separeted list of scope to assign.', type=WebPubSubRequestType, required=False)
c.argument('deny', arg_type=get_enum_type(WebPubSubRequestType), nargs='*', help='The denied virtual network rule. Space-separeted list of scope to assign.', type=WebPubSubRequestType, required=False)
for scope in ['webpubsub hub delete',
'webpubsub hub show']:
with self.argument_context(scope) as c:
c.argument('hub_name', webpubsubhub_name_type)
for scope in ['webpubsub hub update',
'webpubsub hub create']:
with self.argument_context(scope) as c:
c.argument('hub_name', help='The hub to manage')
c.argument('event_handler', action=EventHandlerTemplateUpdateAction, nargs='*', help='Template item for event handler settings. Use key=value pattern to set properties. Supported keys are "url-template", "user-event-pattern", "system-event", "auth-type" and "auth-resource". Setting multiple "system-event" results in an array and for other properties, only last set takes active.')
c.argument('allow_anonymous', arg_type=get_three_state_flag(), help='Set if anonymous connections are allowed for this hub. True means allow and False means deny.')
with self.argument_context('webpubsub hub list') as c:
c.argument('webpubsub_name', webpubsub_name_type, options_list=['--name', '-n'], id_part=None)
with self.argument_context('webpubsub client') as c:
c.argument('hub_name', help='The hub which client connects to')
with self.argument_context('webpubsub service') as c:
c.argument('hub_name', help='The hub to manage.')
for scope in ['webpubsub service broadcast', 'webpubsub service connection send', 'webpubsub service group send', 'webpubsub service user send']:
with self.argument_context(scope) as c:
c.argument('payload', help='A string payload to send.')
for scope in ['webpubsub service connection',
'webpubsub service group add-connection',
'webpubsub service group remove-connection',
'webpubsub service permission grant',
'webpubsub service permission revoke',
'webpubsub service permission check']:
with self.argument_context(scope) as c:
c.argument('connection_id', help='The connection id.')
for scope in ['webpubsub service group',
'webpubsub service permission grant',
'webpubsub service permission revoke',
'webpubsub service permission check']:
with self.argument_context(scope) as c:
c.argument('group_name', help='The group name.')
for scope in ['webpubsub client',
'webpubsub service group add-user',
'webpubsub service group remove-user',
'webpubsub service user']:
with self.argument_context(scope) as c:
c.argument('user_id', help='The user id.')
with self.argument_context('webpubsub service permission') as c:
c.argument('permission', arg_type=get_enum_type(PERMISSION_TYPE), help='The permission')
| 57.427184 | 394 | 0.680473 |
eb03b90c20a955247d0ed384bcffaf077c0962cc | 1,259 | py | Python | bot/utils/tools.py | markelovstyle/duty-bot | 3c8686e1ab99402e3481b10876e3a529c132e1d6 | [
"Apache-2.0"
] | null | null | null | bot/utils/tools.py | markelovstyle/duty-bot | 3c8686e1ab99402e3481b10876e3a529c132e1d6 | [
"Apache-2.0"
] | null | null | null | bot/utils/tools.py | markelovstyle/duty-bot | 3c8686e1ab99402e3481b10876e3a529c132e1d6 | [
"Apache-2.0"
] | null | null | null | import re
from typing import Union
from pymorphy2 import MorphAnalyzer
from .display import *
m = MorphAnalyzer()
def get_case(
num: Union[float, int],
word: str,
case: str = "nomn",
include: bool = True
) -> str:
inflected = m.parse(word)[0].inflect({case})[0]
p = m.parse(inflected)[0]
agree = p.make_agree_with_number(int(num)).word
if include:
return "{} {}".format(num, agree)
return agree
def display_time(seconds: int, case: str = "nomn", cut: bool = False) -> str:
result = []
for name, count in list(display_intervals.items()):
value = int(seconds // count)
if not value:
continue
seconds -= value * count
if value == 1 and cut:
result.append(name)
else:
result.append(get_case(value, name, case))
return ' '.join(result[:3])
def parse_interval(text: str) -> int:
""" Парсинг ключевых слов (день, час, мин, сек ...)
в секунды.
:param text: -> string
:return: unix (total seconds)
"""
unix = 0
tags = re.findall(r'(\d+)[. ](день|дн|час|мин|сек|мес|г)', text)
for k, v in tags:
if not k:
continue
unix += int(k) * display_intervals[v]
return unix | 24.211538 | 77 | 0.576648 |
65f885d05db630a027c0d26c3bf07283a6cd0d48 | 6,673 | py | Python | docs/tutorials/utils/generate_cow_renders.py | jkxing/pytorch3d | 71dbebe8010a0dac3e56be464778aa48fbd3bcd3 | [
"BSD-3-Clause"
] | 6,041 | 2020-01-23T23:29:41.000Z | 2022-03-31T21:35:13.000Z | docs/tutorials/utils/generate_cow_renders.py | jkxing/pytorch3d | 71dbebe8010a0dac3e56be464778aa48fbd3bcd3 | [
"BSD-3-Clause"
] | 1,054 | 2020-01-24T15:23:15.000Z | 2022-03-31T15:31:59.000Z | docs/tutorials/utils/generate_cow_renders.py | jkxing/pytorch3d | 71dbebe8010a0dac3e56be464778aa48fbd3bcd3 | [
"BSD-3-Clause"
] | 943 | 2020-01-24T00:10:30.000Z | 2022-03-31T04:02:35.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import torch
from pytorch3d.io import load_objs_as_meshes
from pytorch3d.renderer import (
BlendParams,
FoVPerspectiveCameras,
MeshRasterizer,
MeshRenderer,
PointLights,
RasterizationSettings,
SoftPhongShader,
SoftSilhouetteShader,
look_at_view_transform,
)
# create the default data directory
current_dir = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = os.path.join(current_dir, "..", "data", "cow_mesh")
def generate_cow_renders(
num_views: int = 40, data_dir: str = DATA_DIR, azimuth_range: float = 180
):
"""
This function generates `num_views` renders of a cow mesh.
The renders are generated from viewpoints sampled at uniformly distributed
azimuth intervals. The elevation is kept constant so that the camera's
vertical position coincides with the equator.
For a more detailed explanation of this code, please refer to the
docs/tutorials/fit_textured_mesh.ipynb notebook.
Args:
num_views: The number of generated renders.
data_dir: The folder that contains the cow mesh files. If the cow mesh
files do not exist in the folder, this function will automatically
download them.
Returns:
cameras: A batch of `num_views` `FoVPerspectiveCameras` from which the
images are rendered.
images: A tensor of shape `(num_views, height, width, 3)` containing
the rendered images.
silhouettes: A tensor of shape `(num_views, height, width)` containing
the rendered silhouettes.
"""
# set the paths
# download the cow mesh if not done before
cow_mesh_files = [
os.path.join(data_dir, fl) for fl in ("cow.obj", "cow.mtl", "cow_texture.png")
]
if any(not os.path.isfile(f) for f in cow_mesh_files):
os.makedirs(data_dir, exist_ok=True)
os.system(
f"wget -P {data_dir} "
+ "https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.obj"
)
os.system(
f"wget -P {data_dir} "
+ "https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.mtl"
)
os.system(
f"wget -P {data_dir} "
+ "https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow_texture.png"
)
# Setup
if torch.cuda.is_available():
device = torch.device("cuda:0")
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
# Load obj file
obj_filename = os.path.join(data_dir, "cow.obj")
mesh = load_objs_as_meshes([obj_filename], device=device)
# We scale normalize and center the target mesh to fit in a sphere of radius 1
# centered at (0,0,0). (scale, center) will be used to bring the predicted mesh
# to its original center and scale. Note that normalizing the target mesh,
# speeds up the optimization but is not necessary!
verts = mesh.verts_packed()
N = verts.shape[0]
center = verts.mean(0)
scale = max((verts - center).abs().max(0)[0])
mesh.offset_verts_(-(center.expand(N, 3)))
mesh.scale_verts_((1.0 / float(scale)))
# Get a batch of viewing angles.
elev = torch.linspace(0, 0, num_views) # keep constant
azim = torch.linspace(-azimuth_range, azimuth_range, num_views) + 180.0
# Place a point light in front of the object. As mentioned above, the front of
# the cow is facing the -z direction.
lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])
# Initialize an OpenGL perspective camera that represents a batch of different
# viewing angles. All the cameras helper methods support mixed type inputs and
# broadcasting. So we can view the camera from the a distance of dist=2.7, and
# then specify elevation and azimuth angles for each viewpoint as tensors.
R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)
cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
# Define the settings for rasterization and shading. Here we set the output
# image to be of size 128X128. As we are rendering images for visualization
# purposes only we will set faces_per_pixel=1 and blur_radius=0.0. Refer to
# rasterize_meshes.py for explanations of these parameters. We also leave
# bin_size and max_faces_per_bin to their default values of None, which sets
# their values using heuristics and ensures that the faster coarse-to-fine
# rasterization method is used. Refer to docs/notes/renderer.md for an
# explanation of the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
image_size=128, blur_radius=0.0, faces_per_pixel=1
)
# Create a Phong renderer by composing a rasterizer and a shader. The textured
# Phong shader will interpolate the texture uv coordinates for each vertex,
# sample from a texture image and apply the Phong lighting model
blend_params = BlendParams(sigma=1e-4, gamma=1e-4, background_color=(0.0, 0.0, 0.0))
renderer = MeshRenderer(
rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
shader=SoftPhongShader(
device=device, cameras=cameras, lights=lights, blend_params=blend_params
),
)
# Create a batch of meshes by repeating the cow mesh and associated textures.
# Meshes has a useful `extend` method which allows us do this very easily.
# This also extends the textures.
meshes = mesh.extend(num_views)
# Render the cow mesh from each viewing angle
target_images = renderer(meshes, cameras=cameras, lights=lights)
# Rasterization settings for silhouette rendering
sigma = 1e-4
raster_settings_silhouette = RasterizationSettings(
image_size=128, blur_radius=np.log(1.0 / 1e-4 - 1.0) * sigma, faces_per_pixel=50
)
# Silhouette renderer
renderer_silhouette = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras, raster_settings=raster_settings_silhouette
),
shader=SoftSilhouetteShader(),
)
# Render silhouette images. The 3rd channel of the rendering output is
# the alpha/silhouette channel
silhouette_images = renderer_silhouette(meshes, cameras=cameras, lights=lights)
# binary silhouettes
silhouette_binary = (silhouette_images[..., 3] > 1e-4).float()
return cameras, target_images[..., :3], silhouette_binary
| 39.720238 | 88 | 0.69474 |
9b370be840f1dd50e93c2bd481b58fb1afc0f6b1 | 208 | py | Python | py_tdlib/constructors/validate_order_info.py | Mr-TelegramBot/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 24 | 2018-10-05T13:04:30.000Z | 2020-05-12T08:45:34.000Z | py_tdlib/constructors/validate_order_info.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 3 | 2019-06-26T07:20:20.000Z | 2021-05-24T13:06:56.000Z | py_tdlib/constructors/validate_order_info.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 5 | 2018-10-05T14:29:28.000Z | 2020-08-11T15:04:10.000Z | from ..factory import Method
class validateOrderInfo(Method):
chat_id = None # type: "int53"
message_id = None # type: "int53"
order_info = None # type: "orderInfo"
allow_save = None # type: "Bool"
| 23.111111 | 39 | 0.6875 |
15461590a4b08d324a23fcc7522d36f8c235b69f | 8,743 | py | Python | deprecated/update_fc_dataset_record.py | jeremyh/agdc | 2e22c6bdd9305555db3615305ff6a5df6219cd51 | [
"BSD-3-Clause"
] | 34 | 2015-03-10T22:27:50.000Z | 2021-09-27T09:26:31.000Z | deprecated/update_fc_dataset_record.py | jeremyh/agdc | 2e22c6bdd9305555db3615305ff6a5df6219cd51 | [
"BSD-3-Clause"
] | 81 | 2015-01-28T07:41:58.000Z | 2016-01-28T23:01:22.000Z | deprecated/update_fc_dataset_record.py | jeremyh/agdc | 2e22c6bdd9305555db3615305ff6a5df6219cd51 | [
"BSD-3-Clause"
] | 29 | 2015-01-08T05:06:40.000Z | 2021-09-27T09:26:34.000Z | #!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
'''
Sub-module to catalogue FC dataset (unpackaged) - called by dbupdater.py
N.B: This functionality is now provided by landsat_ingester.py
Created on 05/10/2012
@author: Alex Ip
'''
import os
import sys
import logging
from osgeo import gdal, osr, gdalconst
import re
import psycopg2
from datetime import datetime
from pytz import timezone
from glob import glob
from EOtools.execute import execute
from EOtools.utils import log_multiline
# Set top level standard output
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
logger = logging.getLogger(__name__)
if not logger.level:
logger.setLevel(logging.DEBUG) # Default logging level for all modules
logger.addHandler(console_handler)
def update_dataset_record(dataset_dir, db_cursor, refresh=True, debug=False):
if debug:
console_handler.setLevel(logging.DEBUG)
logger.debug('update_dataset_record(dataset_dir=%s, db_cursor=%s, refresh=%s, debug=%s) called', dataset_dir, db_cursor, refresh, debug)
def get_directory_size(directory):
command = "du -sk %s | cut -f1" % directory
logger.debug('executing "%s"', command)
result = execute(command)
assert not result['returncode'], '"%s" failed: %s' % (command, result['stderr'])
logger.debug('stdout = %s', result['stdout'])
return int(result['stdout'])
dataset_dir = os.path.abspath(dataset_dir)
m = re.match('.*(LS\d)_(\w*)_(FC)_.+_(\d{3})_(\d{3})_(\d{4})(\d{2})(\d{2})$', dataset_dir)
satellite_tag = m.groups()[0]
sensor_name = m.groups()[1]
processing_level = m.groups()[2]
path = int(m.groups()[3])
row = int(m.groups()[4])
date_string = m.groups()[5] + '-' + m.groups()[6] + '-' + m.groups()[7]
dataset_size = get_directory_size(dataset_dir)
datafile = glob(os.path.join(dataset_dir, 'scene01', 'L*.tif'))
assert datafile, 'No FC datafile found in %s' % dataset_dir
datafile = datafile[0]
# Convert local time to UTC and strip timestamp
file_mtime = datetime.fromtimestamp(os.path.getmtime(datafile))
file_mtime = file_mtime.replace(tzinfo=timezone('Australia/ACT'))
file_mtime = file_mtime.astimezone(timezone('UTC'))
file_mtime = file_mtime.replace(tzinfo=None)
sql = """-- Get scene values from existing NBAR dataset record
select
coalesce(fc.dataset_id, nextval('dataset_id_seq')) as dataset_id,
acquisition_id,
%(dataset_path)s as dataset_path,
coalesce(fc.level_id, (select level_id from processing_level where upper(level_name) like upper(%(level_name)s) || '%%')) as level_id,
cast(%(datetime_processed)s as timestamp without time zone) as datetime_processed,
%(dataset_size)s as dataset_size,
nbar.crs,
nbar.ll_x,
nbar.ll_y,
nbar.lr_x,
nbar.lr_y,
nbar.ul_x,
nbar.ul_y,
nbar.ur_x,
nbar.ur_y,
nbar.x_pixels,
nbar.y_pixels,
fc.dataset_id as fc_dataset_id
from (select * from acquisition
where satellite_id = (select satellite_id from satellite where upper(satellite_tag) = upper(%(satellite_tag)s))
and sensor_id = (select sensor_id from sensor inner join satellite using(satellite_id)
where upper(satellite_tag) = upper(%(satellite_tag)s) and upper(sensor_name) like upper(%(sensor_name)s) || '%%')
and x_ref = %(x_ref)s
and y_ref = %(y_ref)s
and start_datetime between cast(%(date_string)s || ' 00:00:00' as timestamp without time zone)
and cast(%(date_string)s || ' 23:59:59.999' as timestamp without time zone)
) acquisition
inner join (select * from dataset where level_id = 2) nbar using(acquisition_id)
left join (select * from dataset where level_id = 4 -- FC
and dataset_path = %(dataset_path)s) fc using (acquisition_id)
"""
params = {'satellite_tag': satellite_tag,
'sensor_name': sensor_name,
'x_ref': path,
'y_ref': row,
'dataset_path': dataset_dir,
'level_name': processing_level,
'datetime_processed': file_mtime,
'dataset_size': dataset_size,
'date_string': date_string
}
log_multiline(logger.debug, db_cursor.mogrify(sql, params), 'SQL', '\t')
db_cursor.execute(sql, params)
result = db_cursor.fetchone()
assert result, 'NBAR dataset not found for FC dataset %s' % dataset_dir
try:
xml_path = glob(os.path.join(dataset_dir, 'metadata.xml'))[0]
xml_file = open(xml_path)
xml_text = xml_file.read()
xml_file.close()
except IndexError: # No XML file exists
logger.debug('No metadata.xml file found')
xml_text = None
params = {'dataset_id': result[0],
'acquisition_id': result[1],
'dataset_path': result[2],
'level_id': result[3],
'datetime_processed': result[4],
'dataset_size': result[5],
'crs': result[6],
'll_x': result[7],
'll_y': result[8],
'lr_x': result[9],
'lr_y': result[10],
'ul_x': result[11],
'ul_y': result[12],
'ur_x': result[13],
'ur_y': result[14],
'x_pixels': result[15],
'y_pixels': result[16],
'fc_dataset_id': result[17],
'xml_text': xml_text
}
if params['fc_dataset_id']: # FC record already exists
if refresh:
logger.info('Updating existing record for %s', dataset_dir)
sql = """-- Update any values in dataset record not used to find record
update dataset
set
datetime_processed = %(datetime_processed)s,
dataset_size = %(dataset_size)s,
crs = %(crs)s,
ll_x = %(ll_x)s,
ll_y = %(ll_y)s,
lr_x = %(lr_x)s,
lr_y = %(lr_y)s,
ul_x = %(ul_x)s,
ul_y = %(ul_y)s,
ur_x = %(ur_x)s,
ur_y = %(ur_y)s,
x_pixels = %(x_pixels)s,
y_pixels = %(y_pixels)s,
xml_text = %(xml_text)s
where dataset_id = %(dataset_id)s
"""
else:
logger.info('Skipping existing record for %s', dataset_dir)
return
else: # Record doesn't already exist - insert it
logger.info('Creating new record for %s', dataset_dir)
sql = """-- Create new dataset record - acquisition record should already exist for nbar dataset
insert into dataset(
dataset_id,
acquisition_id,
dataset_path,
level_id,
datetime_processed,
dataset_size,
crs,
ll_x,
ll_y,
lr_x,
lr_y,
ul_x,
ul_y,
ur_x,
ur_y,
x_pixels,
y_pixels,
xml_text
)
values (
%(dataset_id)s,
%(acquisition_id)s,
%(dataset_path)s,
%(level_id)s,
%(datetime_processed)s,
%(dataset_size)s,
%(crs)s,
%(ll_x)s,
%(ll_y)s,
%(lr_x)s,
%(lr_y)s,
%(ul_x)s,
%(ul_y)s,
%(ur_x)s,
%(ur_y)s,
%(x_pixels)s,
%(y_pixels)s,
%(xml_text)s
)
"""
log_multiline(logger.debug, db_cursor.mogrify(sql, params), 'SQL', '\t')
db_cursor.execute(sql, params)
| 33.626923 | 140 | 0.657211 |
26f089a80aa78f0891b780b2a5eba61f08702920 | 1,877 | py | Python | graphs/graphs.py | ravewillow6383/data-structures-and-algorithms-python | 98533ee241a3ae452dab1ecb87aab39742005e35 | [
"MIT"
] | null | null | null | graphs/graphs.py | ravewillow6383/data-structures-and-algorithms-python | 98533ee241a3ae452dab1ecb87aab39742005e35 | [
"MIT"
] | null | null | null | graphs/graphs.py | ravewillow6383/data-structures-and-algorithms-python | 98533ee241a3ae452dab1ecb87aab39742005e35 | [
"MIT"
] | null | null | null | from collections import deque
from stacks import Stack
class Graph:
def __init__(self):
self._vertices = []
def add_vertex(self, value):
vert = Vertex(value)
self._vertices.append(vert)
return vert
def add_edge(self, vert1, vert2, weight=0):
if vert1 in self._vertices and vert2 in self._vertices:
vert1.neighbors.append(Edge(vert2, weight))
def get_neighbors(self, vertex):
return vertex.neighbors
def get_vertices(self):
if len(self._vertices) == 0:
return None
return self._vertices
def __len__(self):
return len(self._vertices)
def breadth_first(self, root, operate):
q = deque()
q.appendleft(root)
to_reset = set()
while q:
current = q.pop()
current.visited = True
to_reset.add(current)
operate(current)
for edge in current.neighbors:
if not edge.vertex.visited:
q.appendleft(edge.vertex)
for vertex in to_reset:
vertex.visited = False
def depth_first(self, root):
s = Stack()
visited = []
vertices = []
s.push(root)
visited.append(root)
while s.top:
current = s.top
vertices.append(current)
neighs = self.get_neighbors(current)
for neighbor[0] in neighs:
if neighbor[0] not in visited:
s.push(neighbor[0])
visited.append(neighbor[0])
return visited
class Edge:
def __init__(self, vertex, weight = 0):
self.vertex = vertex
self.weight = weight
class Vertex:
def __init__(self, value):
self.value = value
self.neighbors = []
self.visited = False | 22.890244 | 63 | 0.54715 |
65a89d7c22f38fd51847627106e7d043149e86c6 | 8,924 | py | Python | gram/views.py | KingVulkan/Instagram | c15ba6ba13d9fdfca6ed85f52e65ebe78aa04b2c | [
"Unlicense"
] | null | null | null | gram/views.py | KingVulkan/Instagram | c15ba6ba13d9fdfca6ed85f52e65ebe78aa04b2c | [
"Unlicense"
] | null | null | null | gram/views.py | KingVulkan/Instagram | c15ba6ba13d9fdfca6ed85f52e65ebe78aa04b2c | [
"Unlicense"
] | null | null | null | from django.http import Http404
from django.shortcuts import render,redirect
from . models import Image ,Profile, Like, Follow, Comment
import datetime as dt
from django.contrib.auth.decorators import login_required
from django.conf import settings
from . forms import ImageForm, CommentForm, ProfileUpdateForm,UpdateImageCaption
import os
from django.template.defaulttags import register
# Create your views here.
@login_required(login_url='/accounts/login/')
def timeline(request):
date = dt.date.today()
current_user = request.user
followed_people= []
images1 =[]
following = Follow.objects.filter(follower = current_user)
is_following = Follow.objects.filter(follower = current_user).count()
try:
if is_following != 0:
for folling_object in following:
image_set = Profile.objects.filter(id = folling_object.user.id)
for item in image_set:
followed_people.append(item)
for followed_profile in followed_people:
post = Image.objects.filter(user_key = followed_profile.user)
for item in post:
images1.append(item)
images= list(reversed(images1))
return render(request, 'all-grams/timeline.html',{"date":date,"timeline_images":images})
except:
raise Http404()
return render(request,'all-grams/time.html')
@login_required(login_url='/accounts/login/')
def search_results(request):
if 'name' in request.GET and request.GET["name"]:
search_name = request.GET.get("name")
found_users = Profile.find_profile(search_name)
message =f"{search_name}"
return render(request,'all-grams/search_results.html',{"message":message,"found_users":found_users})
else:
message = "Please enter a valid username"
return render(request,'all-grams/search_results.html',{"message":message})
@login_required(login_url='/accounts/login/')
def single_user(request,id):
try:
user = Profile.objects.get(id=id)
except:
raise Http404()
return render(request,'all-grams/single.html',{"user":user})
@login_required(login_url='/accounts/login/')
def single_image(request,image_id):
try:
image = Image.objects.get(id= image_id)
except:
raise Http404()
return render(request, 'all-grams/single_image.html',{"image":image})
@login_required(login_url='/accounts/login/')
def post(request):
'''
View function that displays a forms that allows users to upload images
'''
current_user = request.user
if request.method == 'POST':
form = ImageForm(request.POST ,request.FILES)
if form.is_valid():
image = form.save(commit = False)
image.user_key = current_user
image.likes +=0
image.save()
return redirect( timeline)
else:
form = ImageForm()
return render(request, 'all-grams/post.html',{"form" : form})
@login_required(login_url='/accounts/login/')
def comment(request, image_id):
comments = Comment.objects.filter(image_id=image_id)
current_image = Image.objects.get(id=image_id)
current_user = request.user
if request.method == 'POST':
form = CommentForm(request.POST)
logger_in = request.user
if form.is_valid():
comment = form.save(commit = False)
comment.user_id= current_user
comment.image_id = current_image
current_image.comments_number+=1
current_image.save_image()
comment.save()
return redirect(timeline)
else:
form = CommentForm()
return render(request,'all-grams/comment.html',{"form":form,"comments":comments})
@login_required(login_url='/accounts/login/')
def update_profile(request):
current_user = request.user
title = 'Update Profile'
try:
requested_profile = Profile.objects.get(user_id = current_user.id)
if request.method == 'POST':
form = ProfileUpdateForm(request.POST,request.FILES)
if form.is_valid():
requested_profile.profile_photo = form.cleaned_data['profile_photo']
requested_profile.bio = form.cleaned_data['bio']
requested_profile.username = form.cleaned_data['username']
requested_profile.save_profile()
return redirect( profile )
else:
form = ProfileUpdateForm()
except:
if request.method == 'POST':
form = ProfileUpdateForm(request.POST,request.FILES)
if form.is_valid():
new_profile = Profile(profile_photo= form.cleaned_data['profile_photo'],bio = form.cleaned_data['bio'],username = form.cleaned_data['username'],user = current_user)
new_profile.save_profile()
return redirect( profile )
else:
form = ProfileUpdateForm()
return render(request,'profile/update_profile.html',{"title":title,"current_user":current_user,"form":form})
@login_required(login_url='/accounts/login/')
def profile(request):
title = 'Profile'
current_user = request.user
try:
profile = Profile.objects.get(user = current_user)
following = Follow.objects.filter(follower = current_user)
followers = Follow.objects.filter(user = profile)
except:
profile = Profile.objects.get(username = 'default_user')
following = Follow.objects.filter(follower = current_user)
followers = Follow.objects.filter(user = profile)
return render(request, 'profile/profile.html',{"profile":profile,"current_user":current_user,"following":following,"followers":followers})
@login_required(login_url='/accounts/login/')
def more(request,image_id):
image = Image.objects.get(id = image_id)
current_user = request.user
update_image = Image.objects.get(id= image_id)
if request.method == 'POST':
form = UpdateImageCaption(request.POST)
if form.is_valid():
new_caption = form.cleaned_data['image_caption']
update_image.image_caption = new_caption
update_image.save_image()
return redirect( more ,image_id)
else:
form = UpdateImageCaption()
return render(request,'all-grams/more.html',{"image":image, "form":form})
@login_required(login_url='/accounts/login/')
def view_profiles(request):
all_profiles = Profile.objects.all()
return render(request,'profile/all.html',{"all_profiles":all_profiles})
@login_required(login_url='/accounts/login/')
def follow(request,profile_id):
current_user = request.user
requested_profile = Profile.objects.get(id = profile_id)
is_following = Follow.objects.filter(follower = current_user,user = requested_profile).count()
follow_object = Follow.objects.filter(follower = current_user,user = requested_profile)
if is_following == 0:
follower = Follow(follower = current_user,user = requested_profile)
follower.save()
return redirect(view_profiles)
else:
follow_object.delete()
return redirect(view_profiles)
@login_required(login_url='/accounts/login/')
def like(request,image_id):
requested_image = Image.objects.get(id = image_id)
current_user = request.user
if_voted = Like.objects.filter(image = requested_image,user = current_user).count()
unlike_parameter = Like.objects.filter(image = requested_image,user = current_user)
if if_voted==0:
requested_image.likes +=1
requested_image.save_image()
like = Like(user = current_user, image = requested_image )
like.save_like()
return redirect(timeline)
else:
requested_image.likes -=1
requested_image.save_image()
for single_unlike in unlike_parameter:
single_unlike.unlike()
return redirect(timeline)
return render(request,'all-grams/timeline.html')
@login_required(login_url='/accounts/login/')
def time(request):
return render(request, 'all-grams/time.html')
def test(request):
return render(request, 'all-grams/test.html')
| 38.465517 | 405 | 0.613402 |
0127767790510255d141cdf9523020c2de70f3e3 | 1,654 | py | Python | function_grab.py | brianvan555/Stock | f0115ef0773153cd8df361556e398935cdd1bdde | [
"BSD-2-Clause"
] | null | null | null | function_grab.py | brianvan555/Stock | f0115ef0773153cd8df361556e398935cdd1bdde | [
"BSD-2-Clause"
] | null | null | null | function_grab.py | brianvan555/Stock | f0115ef0773153cd8df361556e398935cdd1bdde | [
"BSD-2-Clause"
] | null | null | null | import requests
from io import StringIO
import pandas as pd
import numpy as np
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Cookie": "_ga=GA1.3.4382342.1624324217; _ga_F4L5BYPQDJ=GS1.1.1627522340.1.1.1627522368.0; _gid=GA1.3.2021134005.1627800072; JSESSIONID=7EB287947AAEDBB78D3F2CF861E2E493",
"Host": "www.twse.com.tw",
"Referer": "https://www.twse.com.tw/zh/",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-User": "?1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1"
}
def grab_price(date):
r = requests.post('http://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=' +
str(date).split(' ')[0].replace('-', '') + '&type=ALL', headers=headers)
ret = pd.read_csv(StringIO("\n".join([i.translate({ord(c): None for c in ' '})
for i in r.text.split('\n')
if len(i.split('",')) == 17 and i[0] != '='])), header=0)
ret = ret.set_index('證券代號')
# ret['成交金額'] = ret['成交金額'].str.replace(',', '') 無效
#ret['成交股數'] = ret['成交股數'].str.replace(',', '')
return ret
| 48.647059 | 175 | 0.588875 |
286b2d7939422958241ed42c2f26195a112d0d33 | 8,093 | py | Python | port/modules/gui.py | diskman88/mpython-desktop-robot | 01cd15fbeeba521ab874cf66f94d3909c4f8c39a | [
"MIT"
] | 53 | 2018-10-15T12:01:24.000Z | 2019-11-22T09:31:02.000Z | port/modules/gui.py | diskman88/mpython-desktop-robot | 01cd15fbeeba521ab874cf66f94d3909c4f8c39a | [
"MIT"
] | 10 | 2018-10-17T13:42:19.000Z | 2019-11-25T06:42:40.000Z | port/modules/gui.py | diskman88/mpython-desktop-robot | 01cd15fbeeba521ab874cf66f94d3909c4f8c39a | [
"MIT"
] | 26 | 2018-12-04T03:53:39.000Z | 2019-11-22T03:40:05.000Z | # gui for mpython
# MIT license; Copyright (c) 2019 Zhang Kaihua(apple_eat@126.com)
import time, math, struct, gc
from framebuf import FrameBuffer
import adafruit_miniqr,gc
class UI():
def __init__(self, oled):
self.display = oled
def ProgressBar(self, x, y, width, height, progress):
radius = int(height / 2)
xRadius = x + radius
yRadius = y + radius
doubleRadius = 2 * radius
innerRadius = radius - 2
self.display.RoundRect(x, y, width, height, radius, 1)
maxProgressWidth = int((width - doubleRadius + 1) * progress / 100)
self.display.fill_circle(xRadius, yRadius, innerRadius, 1)
self.display.fill_rect(xRadius + 1, y + 2, maxProgressWidth, height - 3, 1)
self.display.fill_circle(xRadius + maxProgressWidth, yRadius, innerRadius, 1)
def stripBar(self, x, y, width, height, progress, dir=1, frame=1):
self.display.rect(x, y, width, height, frame)
if dir:
Progress = int(progress / 100 * width)
self.display.fill_rect(x, y, Progress, height, 1)
else:
Progress = int(progress / 100 * height)
self.display.fill_rect(x, y + (height - Progress), width, Progress, 1)
def qr_code(self,str,x,y,scale=2):
qr = adafruit_miniqr.QRCode(qr_type=3, error_correct=adafruit_miniqr.L)
qr.add_data(str.encode())
qr.make()
for _y in range(qr.matrix.height): # each scanline in the height
for _x in range(qr.matrix.width):
if qr.matrix[_x, _y]:
self.display.fill_rect(_x*scale + x,_y*scale + y ,scale,scale,0)
else:
self.display.fill_rect(_x*scale + x ,_y*scale + y,scale,scale,1)
gc.collect()
class multiScreen():
def __init__(self, oled, framelist, w, h):
self.display = oled
self.framelist = framelist
self.width = w
self.hight = h
self.frameCount = len(framelist)
self.activeSymbol = bytearray([0x00, 0x18, 0x3c, 0x7e, 0x7e, 0x3c, 0x18, 0x00])
self.inactiveSymbol = bytearray([0x00, 0x0, 0x0, 0x18, 0x18, 0x0, 0x0, 0x00])
self.SymbolInterval = 1
def drawScreen(self, index):
self.index = index
self.display.fill(0)
self.display.Bitmap(int(64 - self.width / 2), int(0.3 * self.hight), self.framelist[self.index], self.width,
self.hight, 1)
SymbolWidth = self.frameCount * 8 + (self.frameCount - 1) * self.SymbolInterval
SymbolCenter = int(SymbolWidth / 2)
starX = 64 - SymbolCenter
for i in range(self.frameCount):
x = starX + i * 8 + i * self.SymbolInterval
y = int(1.1 * self.hight) + 8
if i == self.index:
self.display.Bitmap(x, y, self.activeSymbol, 8, 8, 1)
else:
self.display.Bitmap(x, y, self.inactiveSymbol, 8, 8, 1)
def nextScreen(self):
self.index = (self.index + 1) % self.frameCount
self.drawScreen(self.index)
class Clock:
def __init__(self, oled, x, y, radius): #定义时钟中心点和半径
self.display = oled
self.xc = x
self.yc = y
self.r = radius
def settime(self): #设定时间
t = time.localtime()
self.hour = t[3]
self.min = t[4]
self.sec = t[5]
def drawDial(self): #画钟表刻度
r_tic1 = self.r - 1
r_tic2 = self.r - 2
self.display.circle(self.xc, self.yc, self.r, 1)
self.display.fill_circle(self.xc, self.yc, 2, 1)
for h in range(12):
at = math.pi * 2.0 * h / 12.0
x1 = round(self.xc + r_tic1 * math.sin(at))
x2 = round(self.xc + r_tic2 * math.sin(at))
y1 = round(self.yc - r_tic1 * math.cos(at))
y2 = round(self.yc - r_tic2 * math.cos(at))
self.display.line(x1, y1, x2, y2, 1)
def drawHour(self): #画时针
r_hour = int(self.r / 10.0 * 5)
ah = math.pi * 2.0 * ((self.hour % 12) + self.min / 60.0) / 12.0
xh = int(self.xc + r_hour * math.sin(ah))
yh = int(self.yc - r_hour * math.cos(ah))
self.display.line(self.xc, self.yc, xh, yh, 1)
def drawMin(self): #画分针
r_min = int(self.r / 10.0 * 7)
am = math.pi * 2.0 * self.min / 60.0
xm = round(self.xc + r_min * math.sin(am))
ym = round(self.yc - r_min * math.cos(am))
self.display.line(self.xc, self.yc, xm, ym, 1)
def drawSec(self): #画秒针
r_sec = int(self.r / 10.0 * 9)
asec = math.pi * 2.0 * self.sec / 60.0
xs = round(self.xc + r_sec * math.sin(asec))
ys = round(self.yc - r_sec * math.cos(asec))
self.display.line(self.xc, self.yc, xs, ys, 1)
def drawClock(self): #画完整钟表
self.drawDial()
self.drawHour()
self.drawMin()
self.drawSec()
def clear(self): #清除
self.display.fill_circle(self.xc, self.yc, self.r, 0)
class Image():
def __init__(self):
self.image_type = None
def load(self, path, invert=0):
self.invert = invert
with open(path, 'rb') as file:
self.image_type = file.read(2).decode()
file.seek(0)
img_arrays = bytearray(file.read())
if self.image_type == 'P4':
fb = self._pbm_decode(img_arrays)
elif self.image_type == 'BM':
fb = self._bmp_decode(img_arrays)
else:
raise TypeError("Unsupported image format {}".format(self.image_type))
gc.collect()
return fb
def _pbm_decode(self, img_arrays):
next_value = bytearray()
pnm_header = []
stat = True
index = 3
while stat:
next_byte = bytes([img_arrays[index]])
if next_byte == b"#":
while bytes([img_arrays[index]]) not in [b"", b"\n"]:
index += 1
if not next_byte.isdigit():
if next_value:
pnm_header.append(int("".join(["%c" % char for char in next_value])))
next_value = bytearray()
else:
next_value += next_byte
if len(pnm_header) == 2:
stat = False
index += 1
pixel_arrays = img_arrays[index:]
if self.invert == 1:
for i in range(len(pixel_arrays)):
pixel_arrays[i] = (~pixel_arrays[i]) & 0xff
return FrameBuffer(pixel_arrays, pnm_header[0], pnm_header[1], 3)
def _bmp_decode(self, img_arrays):
file_size = int.from_bytes(img_arrays[2:6], 'little')
offset = int.from_bytes(img_arrays[10:14], 'little')
width = int.from_bytes(img_arrays[18:22], 'little')
height = int.from_bytes(img_arrays[22:26], 'little')
bpp = int.from_bytes(img_arrays[28:30], 'little')
if bpp != 1:
raise TypeError("Only support 1 bit color bmp")
line_bytes_size = (bpp * width + 31) // 32 * 4
array_size = width * abs(height) // 8
pixel_arrays = bytearray(array_size)
if width % 8:
array_row = width // 8 + 1
else:
array_row = width // 8
array_col = height
# print("fileSize:{}, offset: {} ".format(file_size, offset))
# print("width:{}, height: {},bit_count:{},line_bytes_size:{},array_size:{},".format(
# width, height, bpp, line_bytes_size, array_size))
# print('array_col:{},array_row:{}'.format(array_col, array_row))
for i in range(array_col):
for j in range(array_row):
index = -(array_row * (i + 1) - j)
_offset = offset + i * line_bytes_size + j
if self.invert == 0:
pixel_byte = (~img_arrays[_offset]) & 0xff
else:
pixel_byte = img_arrays[_offset]
pixel_arrays[index] = pixel_byte
return FrameBuffer(pixel_arrays, width, height, 3)
| 35.968889 | 116 | 0.546645 |
00ac06b4edfcba2a79eb56381245a4cd9f57dac7 | 1,123 | py | Python | hmmyfier.py | dtrain157/Hmmyfier | a1b3a93472691f9ba8d990b89e9cb5fcb9ce1a16 | [
"MIT"
] | null | null | null | hmmyfier.py | dtrain157/Hmmyfier | a1b3a93472691f9ba8d990b89e9cb5fcb9ce1a16 | [
"MIT"
] | null | null | null | hmmyfier.py | dtrain157/Hmmyfier | a1b3a93472691f9ba8d990b89e9cb5fcb9ce1a16 | [
"MIT"
] | null | null | null | import requests
import shutil
import os
import constants
from datetime import datetime, timedelta
from psaw import PushshiftAPI
api = PushshiftAPI()
today = datetime.today()
timedelta = timedelta(days=-7)
date = today + timedelta
submissions = api.search_submissions(after=int(date.timestamp()),
subreddit=constants.SUBREDDIT,
filter=['url'],
limit=25,
sort_type='score',
sort='desc')
for submission in submissions:
image_url = submission.url
folder = constants.OUTPUT_FOLDER + today.strftime('%Y%m%d/')
if not os.path.exists(folder):
os.makedirs(folder)
filename = folder + image_url.split("/")[-1]
r = requests.get(image_url, stream=True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filename, 'wb') as f:
shutil.copyfileobj(r.raw, f)
print('Image sucessfully Downloaded: ', filename)
else:
print('Image Couldn\'t be retreived')
| 28.075 | 67 | 0.575245 |
80f6d266fcdd12a1e9cda28991633a4f1a2aaa76 | 123 | py | Python | login/admin.py | BeeZed/BucketList-Travels | 10445137df3a7b3394394d8c70a1b3e006011ae9 | [
"MIT"
] | null | null | null | login/admin.py | BeeZed/BucketList-Travels | 10445137df3a7b3394394d8c70a1b3e006011ae9 | [
"MIT"
] | null | null | null | login/admin.py | BeeZed/BucketList-Travels | 10445137df3a7b3394394d8c70a1b3e006011ae9 | [
"MIT"
] | 1 | 2019-12-02T19:26:36.000Z | 2019-12-02T19:26:36.000Z | from django.contrib import admin
from .models import UserInfo
# Register your models here.
admin.site.register(UserInfo)
| 17.571429 | 32 | 0.804878 |
1ba07ebfce28117e4ad4db3f95de8e0f619cc1b9 | 7,934 | py | Python | item_engine/bnf/grammars/bnf_1_0_0/materials.py | GabrielAmare/ItemEngine | 10277626c3724ad9ae7b934f53e11e305dc34da5 | [
"MIT"
] | null | null | null | item_engine/bnf/grammars/bnf_1_0_0/materials.py | GabrielAmare/ItemEngine | 10277626c3724ad9ae7b934f53e11e305dc34da5 | [
"MIT"
] | null | null | null | item_engine/bnf/grammars/bnf_1_0_0/materials.py | GabrielAmare/ItemEngine | 10277626c3724ad9ae7b934f53e11e305dc34da5 | [
"MIT"
] | null | null | null | from __future__ import annotations
from item_engine.textbase.items.lemmas import Lemma
from item_engine.textbase.items.tokens import Token
from typing import List
# this module has been auto-generated by ItemEngine
__all__ = ['Any_', 'All_', 'GrammarRule', 'Atom_', 'Str', 'Match', 'MatchAs', 'MatchIn', 'All', 'Any', 'Optional', 'Repeat', 'Enum', 'Operator', 'Group', 'Grammar', 'build']
class Any_:
pass
class All_(Any_):
pass
class GrammarRule:
pass
class Atom_(All_):
pass
class Str(Atom_):
def __init__(self, content: str):
self.content: str = content
def __str__(self):
return str(self.content)
def __repr__(self):
return f'{self.__class__.__name__}({self.content!r})'
def __eq__(self, other):
if type(self) is type(other):
return self.content == other.content
else:
return NotImplemented
__hash__ = None
class Match(Atom_):
def __init__(self, name: str):
self.name: str = name
def __str__(self):
return '{' + str(self.name) + '}'
def __repr__(self):
return f'{self.__class__.__name__}({self.name!r})'
def __eq__(self, other):
if type(self) is type(other):
return self.name == other.name
else:
return NotImplemented
__hash__ = None
class MatchAs(Atom_):
def __init__(self, name: str, key: str):
self.name: str = name
self.key: str = key
def __str__(self):
return '{' + str(self.name) + ' !' + str(self.key) + '}'
def __repr__(self):
return f'{self.__class__.__name__}({self.name!r}, {self.key!r})'
def __eq__(self, other):
if type(self) is type(other):
return self.name == other.name and self.key == other.key
else:
return NotImplemented
__hash__ = None
class MatchIn(Atom_):
def __init__(self, name: str, key: str):
self.name: str = name
self.key: str = key
def __str__(self):
return '{' + str(self.name) + ' *' + str(self.key) + '}'
def __repr__(self):
return f'{self.__class__.__name__}({self.name!r}, {self.key!r})'
def __eq__(self, other):
if type(self) is type(other):
return self.name == other.name and self.key == other.key
else:
return NotImplemented
__hash__ = None
class All(All_):
def __init__(self, args: List[Atom_]):
self.args: List[Atom_] = args
def __str__(self):
return ' '.join(map(str, self.args))
def __repr__(self):
return f'{self.__class__.__name__}({self.args!r})'
def __eq__(self, other):
if type(self) is type(other):
return self.args == other.args
else:
return NotImplemented
__hash__ = None
class Any(Any_):
def __init__(self, args: List[All_]):
self.args: List[All_] = args
def __str__(self):
return ' | '.join(map(str, self.args))
def __repr__(self):
return f'{self.__class__.__name__}({self.args!r})'
def __eq__(self, other):
if type(self) is type(other):
return self.args == other.args
else:
return NotImplemented
__hash__ = None
class Optional(Atom_):
def __init__(self, child: Any_):
self.child: Any_ = child
def __str__(self):
return '[' + str(self.child) + ']'
def __repr__(self):
return f'{self.__class__.__name__}({self.child!r})'
def __eq__(self, other):
if type(self) is type(other):
return self.child == other.child
else:
return NotImplemented
__hash__ = None
class Repeat(Atom_):
def __init__(self, child: Any_):
self.child: Any_ = child
def __str__(self):
return '(' + str(self.child) + ')'
def __repr__(self):
return f'{self.__class__.__name__}({self.child!r})'
def __eq__(self, other):
if type(self) is type(other):
return self.child == other.child
else:
return NotImplemented
__hash__ = None
class Enum(Atom_):
def __init__(self, separator: Str, child: MatchIn):
self.separator: Str = separator
self.child: MatchIn = child
def __str__(self):
return str(self.separator) + '.' + str(self.child)
def __repr__(self):
return f'{self.__class__.__name__}({self.separator!r}, {self.child!r})'
def __eq__(self, other):
if type(self) is type(other):
return self.separator == other.separator and self.child == other.child
else:
return NotImplemented
__hash__ = None
class Operator(GrammarRule):
def __init__(self, name: str, rule: Any_):
self.name: str = name
self.rule: Any_ = rule
def __str__(self):
return str(self.name) + ' = ' + str(self.rule)
def __repr__(self):
return f'{self.__class__.__name__}({self.name!r}, {self.rule!r})'
def __eq__(self, other):
if type(self) is type(other):
return self.name == other.name and self.rule == other.rule
else:
return NotImplemented
__hash__ = None
class Group(GrammarRule):
def __init__(self, name: str, names: List[str]):
self.name: str = name
self.names: List[str] = names
def __str__(self):
return str(self.name) + ' > ' + ' | '.join(map(str, self.names))
def __repr__(self):
return f'{self.__class__.__name__}({self.name!r}, {self.names!r})'
def __eq__(self, other):
if type(self) is type(other):
return self.name == other.name and self.names == other.names
else:
return NotImplemented
__hash__ = None
class Grammar:
def __init__(self, branches: List[GrammarRule]):
self.branches: List[GrammarRule] = branches
def __str__(self):
return '\n'.join(map(str, self.branches))
def __repr__(self):
return f'{self.__class__.__name__}({self.branches!r})'
def __eq__(self, other):
if type(self) is type(other):
return self.branches == other.branches
else:
return NotImplemented
__hash__ = None
def build(obj):
if isinstance(obj, Lemma):
if obj.value == 'Str':
return Str(content=build(obj.data['content']))
elif obj.value == 'Match':
return Match(name=build(obj.data['name']))
elif obj.value == 'MatchAs':
return MatchAs(name=build(obj.data['name']), key=build(obj.data['key']))
elif obj.value == 'MatchIn':
return MatchIn(name=build(obj.data['name']), key=build(obj.data['key']))
elif obj.value == 'All':
return All(args=list(map(build, obj.data['args'])))
elif obj.value == 'Any':
return Any(args=list(map(build, obj.data['args'])))
elif obj.value == 'Optional':
return Optional(child=build(obj.data['child']))
elif obj.value == 'Repeat':
return Repeat(child=build(obj.data['child']))
elif obj.value == 'Enum':
return Enum(separator=build(obj.data['separator']), child=build(obj.data['child']))
elif obj.value == 'Operator':
return Operator(name=build(obj.data['name']), rule=build(obj.data['rule']))
elif obj.value == 'Group':
return Group(name=build(obj.data['name']), names=list(map(build, obj.data['names'])))
elif obj.value == 'Grammar':
return Grammar(branches=list(map(build, obj.data['branches'])))
else:
raise ValueError(obj.value)
elif isinstance(obj, Token):
return obj.content
else:
raise TypeError(type(obj))
| 26.986395 | 173 | 0.570708 |
bb43fcaa12e2c416db4a6d06624b5c97a072ef07 | 884 | py | Python | tests/fixtures/legacy-backend-package/setup.py | offby1/pipenv | ce83c629385b4e7a791bd6857c2cf1e26ceb2351 | [
"MIT"
] | 18,636 | 2017-12-06T14:53:18.000Z | 2022-03-31T13:12:34.000Z | tests/fixtures/legacy-backend-package/setup.py | offby1/pipenv | ce83c629385b4e7a791bd6857c2cf1e26ceb2351 | [
"MIT"
] | 3,640 | 2017-12-06T16:58:35.000Z | 2022-03-31T22:20:57.000Z | tests/fixtures/legacy-backend-package/setup.py | offby1/pipenv | ce83c629385b4e7a791bd6857c2cf1e26ceb2351 | [
"MIT"
] | 1,987 | 2017-12-06T15:04:51.000Z | 2022-03-26T10:05:15.000Z | import ast
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
ROOT = os.path.dirname(__file__)
PACKAGE_NAME = 'legacy_backend_package'
VERSION = None
with open(os.path.join(ROOT, 'src', PACKAGE_NAME.replace("-", "_"), '__init__.py')) as f:
for line in f:
if line.startswith('__version__ = '):
VERSION = ast.literal_eval(line[len('__version__ = '):].strip())
break
if VERSION is None:
raise OSError('failed to read version')
# Put everything in setup.cfg, except those that don't actually work?
setup(
# These really don't work.
package_dir={'': 'src'},
packages=find_packages('src'),
# I don't know how to specify an empty key in setup.cfg.
package_data={
'': ['LICENSE*', 'README*'],
},
# I need this to be dynamic.
version=VERSION,
)
| 24.555556 | 89 | 0.656109 |
fca7957d1749effcd159bf99a1e4a2b93e3914d2 | 463 | py | Python | demo_importing_Ms_packages.py | mrabdsaif/DS-training | aa2b2cb6e25d5f00baafcd699fb6ed64817f4ac3 | [
"MIT"
] | null | null | null | demo_importing_Ms_packages.py | mrabdsaif/DS-training | aa2b2cb6e25d5f00baafcd699fb6ed64817f4ac3 | [
"MIT"
] | null | null | null | demo_importing_Ms_packages.py | mrabdsaif/DS-training | aa2b2cb6e25d5f00baafcd699fb6ed64817f4ac3 | [
"MIT"
] | null | null | null | # importing modules that I have created by importing just the module
#import helpers
#helpers.display('Sample msg', True)
# importing the item inside the module
from helpers import display
display('Sample msg',True)
from helpers import display
display('Sample msg',False)
# Now To create a virtual env (python -m venv venv) this will be installing to the vs code and then you need to activate it to be
# into your workspace using the powershell | 33.071429 | 130 | 0.75378 |
13ba721fef4a6d5b377b4fbc2b86c3c567229c61 | 21,373 | py | Python | OmniDB/OmniDB_app/views/plugins.py | bylee5/OmniDB-AgensManager | 04da397694b2c7c47e0a9f43c049fab98b4e5fc6 | [
"MIT"
] | null | null | null | OmniDB/OmniDB_app/views/plugins.py | bylee5/OmniDB-AgensManager | 04da397694b2c7c47e0a9f43c049fab98b4e5fc6 | [
"MIT"
] | null | null | null | OmniDB/OmniDB_app/views/plugins.py | bylee5/OmniDB-AgensManager | 04da397694b2c7c47e0a9f43c049fab98b4e5fc6 | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from django.template import loader
from django.http import JsonResponse
from django.core import serializers
from django.shortcuts import redirect
from datetime import datetime
from math import ceil
import json
from os import listdir, makedirs, remove
from os.path import isfile, join, isdir
from OmniDB import settings
import importlib
from configparser import ConfigParser
from itertools import chain
import time
import shutil
import os
import OmniDB_app.include.OmniDatabase as OmniDatabase
from django import forms
class UploadFileForm(forms.Form):
file = forms.FileField()
#loading python plugins
plugins = {}
failed_plugins = {}
monitoring_units = []
omnidb_database = OmniDatabase.Generic.InstantiateDatabase(
'sqlite',
'',
'',
settings.OMNIDB_DATABASE,
'',
'',
'0',
'',
True
)
def load_plugin(plugin_folder, p_load):
plugin_name = ''
plugin_version = ''
enabled_message = ''
py_loaded = False
if plugin_folder[0]=='.':
return
if isfile(join(settings.PLUGINS_DIR,plugin_folder,'plugin.conf')):
conf_exists = True
else:
failed_plugins[plugin_folder] = {
'module' : None,
'folder' : plugin_folder,
'name' : '',
'version' : '',
'conf_exists' : False,
'js_exists' : False,
'py_exists' : False,
'py_loaded' : False,
'css_exists' : False,
'message': 'Missing plugin.conf file.',
'javascript_file': '',
'css_file' : '',
'plugin_folder' : ''
}
print('Missing plugin.conf file.')
return
if isfile(join(settings.PLUGINS_STATIC_DIR,plugin_folder,'plugin.js')):
js_exists = True
else:
js_exists = False
if isfile(join(settings.PLUGINS_DIR,plugin_folder,'plugin.py')):
py_exists = True
else:
py_exists = False
if isfile(join(settings.PLUGINS_STATIC_DIR,plugin_folder,'plugin.css')):
css_exists = True
else:
css_exists = False
module = None
#if is directory, try to import plugin.py inside it
if isdir(join(settings.PLUGINS_DIR,plugin_folder)):
try:
parser = ConfigParser()
with open(join(settings.PLUGINS_DIR,plugin_folder,'plugin.conf')) as lines:
lines = chain(("[top]",), lines)
parser.read_file(lines)
plugin_name = parser.get('top', 'name')
plugin_version = parser.get('top', 'version')
conf_parsed = True
except Exception as exc:
enabled = False
enabled_message = 'Failed to parse plugin configuration file.'
print('Failed to parse plugin configuration file.')
failed_plugins[plugin_folder] = {
'module' : None,
'folder' : plugin_folder,
'name' : '',
'version' : '',
'conf_exists' : conf_exists,
'js_exists' : js_exists,
'py_exists' : py_exists,
'py_loaded' : False,
'css_exists' : css_exists,
'message': 'Failed to parse plugin configuration file.',
'javascript_file': '/static/plugins/{0}/plugin.js'.format(plugin_folder),
'css_file' : '/static/plugins/{0}/plugin.css'.format(plugin_folder) if css_exists else '',
'plugin_folder' : '/static/plugins/{0}/'.format(plugin_folder)
}
return
#check that the plugin name wasn't loaded yet
try:
plugin_object = plugins[plugin_name]
#didn't raise exception, so plugin was already loaded. Exit
return
except:
None
if p_load and py_exists:
try:
loaded_folder_name = '{0}_{1}'.format(plugin_name,str(time.time()).replace('.','_'))
loaded_folder_complete_name = join(settings.PLUGINS_DIR,'temp_loaded',loaded_folder_name)
os.mkdir(loaded_folder_complete_name)
shutil.copytree(join(settings.PLUGINS_DIR,plugin_folder),join(loaded_folder_complete_name,plugin_name))
module = importlib.import_module('OmniDB_app.plugins.temp_loaded.{0}.{1}.plugin'.format(loaded_folder_name,plugin_name))
try:
mon_units = getattr(module, 'monitoring_units')
for mon_unit in mon_units:
mon_unit['plugin_name'] = plugin_name
monitoring_units.append(mon_unit)
except Exception as exc:
None
print('Loaded plugin {0}.'.format(plugin_name),flush=True)
py_loaded = True
except Exception as exc:
print('Failed to load plugin {0}: {1}.'.format(plugin_name, str(exc)),flush=True)
plugins[plugin_name] = {
'module' : None,
'folder' : plugin_folder,
'name' : plugin_name,
'version' : plugin_version,
'conf_exists' : conf_exists,
'js_exists' : js_exists,
'py_exists' : py_exists,
'py_loaded' : False,
'css_exists' : css_exists,
'message': str(exc),
'javascript_file': '/static/plugins/{0}/plugin.js'.format(plugin_folder),
'css_file' : '/static/plugins/{0}/plugin.css'.format(plugin_folder) if css_exists else '',
'plugin_folder' : '/static/plugins/{0}/'.format(plugin_folder)
}
return
elif py_exists:
enabled_message = 'OmniDB needs to be restarted to load plugin python file.'
plugins[plugin_name] = {
'module' : module,
'folder' : plugin_folder,
'name' : plugin_name,
'version' : plugin_version,
'conf_exists' : conf_exists,
'js_exists' : js_exists,
'py_exists' : py_exists,
'py_loaded' : py_loaded,
'css_exists' : css_exists,
'message' : enabled_message,
'javascript_file': '/static/plugins/{0}/plugin.js'.format(plugin_folder),
'css_file' : '/static/plugins/{0}/plugin.css'.format(plugin_folder) if css_exists else '',
'plugin_folder' : '/static/plugins/{0}/'.format(plugin_folder)
}
return
#loading javascript plugins
def list_plugins(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
plugin_list = []
plugin_message_list = []
for key, plugin in failed_plugins.items():
if plugin['conf_exists']:
conf_html = '<i class="fas fa-check-circle action-grid action-check"></i>'
else:
conf_html = '<i class="fas fa-exclamation-triangle action-grid action-close"></i>'
if plugin['js_exists']:
js_html = '<i class="fas fa-check-circle action-grid action-check"></i>'
else:
js_html = '<i class="fas fa-exclamation-triangle action-grid action-close"></i>'
if plugin['py_exists']:
py_html = '<i class="fas fa-check-circle action-grid action-check"></i>'
else:
py_html = '<i class="fas fa-exclamation-triangle action-grid action-close"></i>'
if plugin['css_exists']:
css_html = '<i class="fas fa-check-circle action-grid action-check"></i>'
else:
css_html = '<i class="fas fa-exclamation-triangle action-grid action-close"></i>'
if plugin['message'] == '':
plugin_enabled = '<i class="fas fa-check-circle action-grid action-check"></i>'
else:
plugin_enabled = '<i class="fas fa-exclamation-triangle action-grid action-close"></i>'
plugin_list.append([plugin['folder'],plugin['name'],plugin['version'],conf_html,js_html,py_html,css_html,plugin_enabled,'''<i title='Delete Plugin' class='fas fa-times action-grid action-close' onclick='deletePlugin("{0}","{1}")'></i>'''.format(plugin['name'],plugin['folder'])])
plugin_message_list.append(plugin['message'])
for key, plugin in plugins.items():
if plugin['conf_exists']:
conf_html = '<i class="fas fa-check-circle action-grid action-check"></i>'
else:
conf_html = '<i class="fas fa-exclamation-triangle action-grid action-close"></i>'
if plugin['js_exists']:
js_html = '<i class="fas fa-check-circle action-grid action-check"></i>'
else:
js_html = '<i class="fas fa-exclamation-triangle action-grid action-close"></i>'
if plugin['py_exists']:
py_html = '<i class="fas fa-check-circle action-grid action-check"></i>'
else:
py_html = '<i class="fas fa-exclamation-triangle action-grid action-close"></i>'
if plugin['css_exists']:
css_html = '<i class="fas fa-check-circle action-grid action-check"></i>'
else:
css_html = '<i class="fas fa-exclamation-triangle action-grid action-close"></i>'
if plugin['message'] == '':
plugin_enabled = '<i class="fas fa-check-circle action-grid action-check" onclick="getPluginMessage()"></i>'
else:
plugin_enabled = '<i class="fas fa-exclamation-triangle action-grid action-close" onclick="getPluginMessage()"></i>'
plugin_list.append([plugin['folder'],plugin['name'],plugin['version'],conf_html,js_html,py_html,css_html,plugin_enabled,'''<i title='Delete Plugin' class='fas fa-times action-grid action-close' onclick='deletePlugin("{0}","{1}")'></i>'''.format(plugin['name'],plugin['folder'])])
plugin_message_list.append(plugin['message'])
v_return['v_data'] = {
'list': plugin_list,
'message': plugin_message_list
}
return JsonResponse(v_return)
#loading javascript plugins
def get_plugins(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
plugin_list = []
for key, plugin in plugins.items():
if plugin['message']=='' and plugin['js_exists']:
plugin_list.append({ 'name': plugin['name'], 'file': plugin['javascript_file'], 'cssfile': plugin['css_file'], 'folder': plugin['plugin_folder']})
v_return['v_data'] = plugin_list
return JsonResponse(v_return)
def load_plugins():
#delete temp loaded python files
plugin_temp_files = listdir(join(settings.PLUGINS_DIR,'temp_loaded'))
for plugin_temp_file in plugin_temp_files:
try:
if plugin_temp_file!='.gitkeep':
item_name = join(settings.PLUGINS_DIR,'temp_loaded',plugin_temp_file)
if isfile(item_name):
os.remove(item_name)
else:
shutil.rmtree(item_name)
except Exception as exc:
None
plugins_folders = listdir(settings.PLUGINS_DIR)
for plugin_folder in plugins_folders:
if plugin_folder != 'temp_loaded':
load_plugin(plugin_folder,True)
#delete existing monitoring units from plugins that don't exist anymore
plugin_string = ''
first = True
for key, plugin in plugins.items():
if not first:
plugin_string = plugin_string + ','
first = False
plugin_string = plugin_string + "'" + plugin['name'] + "'"
try:
omnidb_database.v_connection.Execute('''
delete
from units_users_connections
where plugin_name <> ''
and plugin_name not in ({0})
'''.format(plugin_string))
except Exception as exc:
None
load_plugins()
#upload plugin
def upload_view(request):
return_object = {
'v_error': False
}
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
#Invalid session
if not request.session.get('omnidb_session'):
return_object = {
'v_error': True,
'v_message': 'Session object was destroyed, click <a href="Login.aspx">here</a> to be redirected to login screen or finish what you were doing and reload the page.'
}
return JsonResponse(return_object)
v_session = request.session.get('omnidb_session')
if not v_session.v_super_user:
return_object = {
'v_error': True,
'v_message': 'You must be superuser to delete a plugin.'
}
return JsonResponse(return_object)
try:
return_object = handle_uploaded_file(request.FILES['file'])
except Exception as exc:
return_object = {
'v_error': True,
'v_message': str(exc)
}
else:
form = UploadFileForm()
return JsonResponse(return_object)
#upload plugin helper
def handle_uploaded_file(f):
v_dir_name = join(settings.TEMP_DIR,'{0}'.format(str(time.time()).replace('.','_')))
makedirs(v_dir_name)
v_file = join(v_dir_name,f.name)
with open(v_file, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
#extracting
shutil.unpack_archive(v_file,v_dir_name)
#remove uploaded file
remove(v_file)
v_has_plugins_folder = isdir(join(v_dir_name,'plugins'))
v_has_static_folder = isdir(join(v_dir_name,'static','plugins'))
v_plugin_folder_name = ''
if not v_has_plugins_folder:
shutil.rmtree(v_dir_name)
return {
'v_error': True,
'v_message': '''Package doesn't have the plugins directory.'''
}
elif not v_has_static_folder:
shutil.rmtree(v_dir_name)
return {
'v_error': True,
'v_message': '''Package doesn't have the static/plugins directory.'''
}
else:
try:
files = listdir(join(v_dir_name,'plugins'))
if len(files)==0:
shutil.rmtree(v_dir_name)
return {
'v_error': True,
'v_message': '''plugins directory is empty.'''
}
elif len(files)>1:
shutil.rmtree(v_dir_name)
return {
'v_error': True,
'v_message': '''plugins directory contains more than one directory.'''
}
plugin_dir_name = files[0]
v_plugin_folder_name = plugin_dir_name
shutil.move(join(v_dir_name,'plugins',plugin_dir_name), settings.PLUGINS_DIR)
except Exception as exc:
None
try:
files = listdir(join(v_dir_name,'static','plugins'))
dir_name = files[0]
if len(files)==0:
shutil.rmtree(v_dir_name)
return {
'v_error': True,
'v_message': '''static/plugins directory is empty.'''
}
elif len(files)>1:
shutil.rmtree(v_dir_name)
return {
'v_error': True,
'v_message': '''static/plugins directory contains more than one directory.'''
}
elif dir_name!=plugin_dir_name:
shutil.rmtree(v_dir_name)
return {
'v_error': True,
'v_message': '''Folders plugins and static/plugins contain directories with different names.'''
}
else:
shutil.move(join(v_dir_name,'static','plugins',dir_name), settings.PLUGINS_STATIC_DIR)
except Exception as exc:
None
shutil.rmtree(v_dir_name)
if v_plugin_folder_name!='':
try:
load_plugin(v_plugin_folder_name,False)
except Exception as exc:
return {
'v_error': True,
'v_message': str(exc)
}
return {
'v_error': False
}
#reloading plugins
def reload_plugins(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
load_plugins()
v_return['v_data'] = True
return JsonResponse(v_return)
def delete_plugin(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
if not v_session.v_super_user:
v_return['v_error'] = True
v_return['v_data'] = 'You must be superuser to delete a plugin.'
return JsonResponse(v_return)
json_object = json.loads(request.POST.get('data', None))
p_plugin_name = json_object['p_plugin_name']
p_plugin_folder = json_object['p_plugin_folder']
try:
plugin = plugins[p_plugin_name]
try:
shutil.rmtree(join(settings.PLUGINS_STATIC_DIR,plugin['folder']))
except:
None
try:
shutil.rmtree(join(settings.PLUGINS_DIR,plugin['folder']))
except:
None
del plugins[p_plugin_name]
except:
None
try:
plugin = failed_plugins[p_plugin_folder]
try:
shutil.rmtree(join(settings.PLUGINS_STATIC_DIR,plugin['folder']))
except:
None
try:
shutil.rmtree(join(settings.PLUGINS_DIR,plugin['folder']))
except:
None
del plugins[p_plugin_name]
except:
None
v_return['v_data'] = 'Please restart OmniDB to unload plugin libraries.'
return JsonResponse(v_return)
def exec_plugin_function(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
p_plugin_name = json_object['p_plugin_name']
p_function_name = json_object['p_function_name']
p_data = json_object['p_data']
p_check_database_connection = json_object['p_check_database_connection']
p_database_index = json_object['p_database_index']
p_tab_id = json_object['p_tab_id']
try:
v_database_orig = v_session.v_tab_connections[p_tab_id]
v_database = OmniDatabase.Generic.InstantiateDatabase(
v_database_orig.v_db_type,
v_database_orig.v_connection.v_host,
str(v_database_orig.v_connection.v_port),
v_database_orig.v_active_service,
v_database_orig.v_active_user,
v_database_orig.v_connection.v_password,
v_database_orig.v_conn_id,
v_database_orig.v_alias,
'OmniDB / {0}'.format(p_plugin_name),
p_conn_string = v_database_orig.v_conn_string,
p_parse_conn_string = False
)
except:
v_database = None
#Check database prompt timeout
if p_check_database_connection and p_database_index:
v_timeout = v_session.DatabaseReachPasswordTimeout(int(p_database_index))
if v_timeout['timeout']:
v_return['v_data'] = {'password_timeout': True, 'message': v_timeout['message'] }
v_return['v_error'] = True
return JsonResponse(v_return)
try:
print(plugins[p_plugin_name]['module'])
v_return['v_data'] = getattr(plugins[p_plugin_name]['module'], p_function_name)(v_database,p_data)
except Exception as exc:
v_return['v_data'] = {'password_timeout': True, 'message': str(exc) }
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
| 36.410562 | 287 | 0.583727 |
a508b7265813cbd35e60dbaf9685fae34f33a15d | 5,512 | py | Python | app/wqFull/G200/tsMap.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | null | null | null | app/wqFull/G200/tsMap.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | null | null | null | app/wqFull/G200/tsMap.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | 2 | 2021-04-04T02:45:59.000Z | 2022-03-19T09:41:39.000Z |
import pandas as pd
from hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform, dbBasin
import numpy as np
import matplotlib.pyplot as plt
from hydroDL.post import axplot, figplot
from hydroDL import kPath, utils
import json
import os
import importlib
from hydroDL.master import basinFull
from hydroDL.app.waterQuality import WRTDS
import warnings
# warnings.simplefilter('error')
dataName = 'G200N'
# with warnings.catch_warnings():
# warnings.simplefilter('ignore', category=RuntimeWarning)
# DF = dbBasin.DataFrameBasin(dataName)
DF = dbBasin.DataFrameBasin(dataName)
codeLst = usgs.newC
trainLst = ['rmR20', 'rmL20', 'rmRT20', 'rmYr5', 'B10']
testLst = ['pkR20', 'pkL20', 'pkRT20', 'pkYr5', 'A10']
trainSet = 'rmR20'
testSet = 'pkR20'
# trainSet = 'B10'
# testSet = 'A10'
labelLst = ['QFPRT2C', 'QFRT2C', 'QFPT2C', 'FPRT2C']
nL = len(labelLst)
yLst = list()
for label in labelLst:
outName = '{}-{}-{}'.format(dataName, label, trainSet)
yP, ycP = basinFull.testModel(
outName, DF=DF, testSet=testSet, ep=500)
yOut = np.ndarray(yP.shape)
for k, code in enumerate(codeLst):
m = DF.g[:, DF.varG.index(code+'-M')]
s = DF.g[:, DF.varG.index(code+'-S')]
yOut[:, :, k] = yP[:, :, k]*s+m
yLst.append(yOut)
# WRTDS
# yW = WRTDS.testWRTDS(dataName, trainSet, testSet, codeLst)
dirRoot = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-dbBasin')
fileName = '{}-{}-{}'.format(dataName, trainSet, 'all')
yW = np.load(os.path.join(dirRoot, fileName)+'.npz')['arr_0']
code = '00945'
indC = codeLst.index(code)
# correlation matrix
d1 = dbBasin.DataModelBasin(DF, subset=trainSet, varY=codeLst)
d2 = dbBasin.DataModelBasin(DF, subset=testSet, varY=codeLst)
siteNoLst = DF.siteNoLst
matW = np.full([len(siteNoLst), 4], np.nan)
matLst = [np.full([len(siteNoLst), 4], np.nan) for x in labelLst]
for indS, siteNo in enumerate(siteNoLst):
n1 = np.sum(~np.isnan(d1.Y[:, indS, indC]), axis=0)
n2 = np.sum(~np.isnan(d2.Y[:, indS, indC]), axis=0)
if n1 >= 160 and n2 >= 40:
statW = utils.stat.calStat(yW[:, indS, indC], d2.Y[:, indS, indC])
matW[indS, :] = list(statW.values())
for k in range(nL):
yL = yLst[k]
statL = utils.stat.calStat(
yL[:, indS, indC], d2.Y[:, indS, indC])
matLst[k][indS, :] = list(statL.values())
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
figM, axM = plt.subplots(nL+1, 1, figsize=(8, 6))
for k, label in enumerate(labelLst):
axplot.mapPoint(axM[k], lat, lon, matLst[k][:, -1], s=12)
axplot.mapPoint(axM[-1], lat, lon, matW[:, -1], s=12)
figM.show()
code = '00955'
indC = codeLst.index(code)
indS = 0
figP, axP = plt.subplots(1, 1, figsize=(12, 3))
dataTS = [y[:, indS, indC] for y in yLst[:3]] + \
[DF.c[:, indS, indC]]
# dataTS = [yLst[2][:, indS, indC], yLst[1][:, indS, indC]] + \
# [yW[:, indS, indC]]+[DF.c[:, indS, indC]]
axplot.plotTS(axP, DF.t, dataTS, cLst='bcgk')
figP.show()
def funcMap():
nM = len(plotVar)
figM, axM = plt.subplots(nM, 1, figsize=(8, 6))
axM = np.array([axM]) if nM == 1 else axM
for k, var in enumerate(plotVar):
if var == '00060':
axplot.mapPoint(axM[k], lat, lon, errMatQ2[:, 0, 1], s=12)
axM[k].set_title('streamflow')
else:
ic = master['varYC'].index(var)
shortName = codePdf.loc[var]['shortName']
title = '{} {}'.format(shortName, var)
axplot.mapPoint(axM[k], lat, lon, errMatC2[:, ic, 1], s=12)
axM[k].set_title(title)
figP, axP = plt.subplots(nM, 1, figsize=(8, 6))
axP = np.array([axP]) if nM == 1 else axP
return figM, axM, figP, axP, lon, lat
def funcPoint(iP, axP):
siteNo = siteNoLst[iP]
dfPred, dfObs = basins.loadSeq(outName, siteNo)
t = dfPred.index.values.astype(np.datetime64)
tBar = np.datetime64('2000-01-01')
info1 = wqData.subsetInfo(trainset)
info2 = wqData.subsetInfo(testset)
ind1 = info1[info1['siteNo'] == siteNo].index
ind2 = info2[info2['siteNo'] == siteNo].index
t1 = info1['date'][ind1].values.astype(np.datetime64)
t2 = info2['date'][ind2].values.astype(np.datetime64)
tp = np.concatenate([t1, t2])
yp = np.concatenate([ycP1[ind1], ycP2[ind2]])
for k, var in enumerate(plotVar):
rmse, corr = waterQuality.calErrSeq(dfPred[var], dfObs[var])
tStr = '{}, rmse [{:.2f} {:.2f}], corr [{:.2f} {:.2f}]'.format(
siteNo, rmse[0], rmse[1], corr[0], corr[1])
if var == '00060':
styLst = '--'
title = 'streamflow '+tStr
axplot.plotTS(axP[k], t, [dfPred[var], dfObs[var]], tBar=tBar,
legLst=['LSTM', 'observation'], styLst=styLst, cLst='br')
axP[k].set_title(title)
else:
styLst = '-*'
shortName = codePdf.loc[var]['shortName']
title = shortName + ' ' + tStr
axplot.plotTS(axP[k], t, dfPred[var], tBar=tBar,
legLst=['LSTM-sequence'], styLst='-', cLst='b')
axplot.plotTS(axP[k], tp, yp, legLst=[
'LSTM-sample'], styLst='*', cLst='g')
axplot.plotTS(axP[k], t, dfObs[var],
legLst=['observation'], styLst='*', cLst='r')
axP[k].set_title(title)
importlib.reload(figplot)
figM, figP = figplot.clickMap(funcMap, funcPoint)
| 35.10828 | 83 | 0.596698 |
8e6130c08ab88a516259b8d25172f260f1ec1c29 | 2,285 | py | Python | src/DicomIMG.py | CFOP2357/Support-system-for-reading-mammograms | 97078b95fdcf144e4155a4d0d0eeefb727262e25 | [
"MIT"
] | null | null | null | src/DicomIMG.py | CFOP2357/Support-system-for-reading-mammograms | 97078b95fdcf144e4155a4d0d0eeefb727262e25 | [
"MIT"
] | null | null | null | src/DicomIMG.py | CFOP2357/Support-system-for-reading-mammograms | 97078b95fdcf144e4155a4d0d0eeefb727262e25 | [
"MIT"
] | null | null | null | from pydicom import dcmread
import cv2 as cv
import numpy as np
from PIL import Image
def unsharp_mask(image, kernel_size: tuple = (5, 5), sigma: float = 10000.0, amount: float = 100.0,
threshold: float = 10000.0) -> np.array:
"""Return a sharpened version of the image, using an unsharp mask."""
# Blur image
blurred = cv.GaussianBlur(image, kernel_size, sigma)
# Sharp using the difference between the images (unsharp_mask)
sharpened = float(amount + 1) * image - float(amount) * blurred
# Make sure that all pixel values are in the interval [0, 255]
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
if threshold > 0:
low_contrast_mask = np.absolute(image - blurred) < threshold
np.copyto(sharpened, image, where=low_contrast_mask) # Do not change pixels where low_contrast_mask is true
sharpened = sharpened.round().astype(np.uint8)
return sharpened
def pixel_array_to_gray(pixel_array):
"""Return a uint8 pixel array representation of
the original pixel array with values from 0 to 255
"""
pixel_array = pixel_array.astype("float32")
pixel_array -= np.amin(pixel_array)
max_val = np.amax(pixel_array)
pixel_array *= 255
pixel_array /= max_val
return pixel_array.astype("uint8")
def apply_clahe(img):
"""Apply CLAHE filter using GPU"""
clahe = cv.createCLAHE() # crete clahe parameters
img_umat = cv.UMat(img) # send img to gpu
img_umat = clahe.apply(img_umat)
# Normalize image to the interval [0, 255]
img_umat = cv.normalize(img_umat, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
return img_umat.get() # recover img from gpu
def dcm_to_pil_image_gray(file_path):
"""Read a DICOM file and return it as a gray scale PIL image"""
ds = dcmread(file_path)
# Get the image after apply clahe
img_filtered = Image.fromarray(apply_clahe(ds.pixel_array).astype("uint8"))
# Normalize original image to the interval [0, 255]
img = cv.normalize(ds.pixel_array, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
img = Image.fromarray(img.astype("uint8"))
return [img, img_filtered]
| 35.703125 | 116 | 0.698031 |
6c9e2170c3ce11a30612680fd4a0916ad4a89efd | 215 | py | Python | tests/testresources/pytest_code.py | yukihiko-shinoda/pytest-resource-path | bc56c4b5f2c8f3138baeac7f145717f6a70af7b6 | [
"MIT"
] | 5 | 2020-09-06T01:54:28.000Z | 2021-06-14T11:10:09.000Z | tests/testresources/pytest_code.py | yukihiko-shinoda/pytest-resource-path | bc56c4b5f2c8f3138baeac7f145717f6a70af7b6 | [
"MIT"
] | 5 | 2020-05-23T08:30:01.000Z | 2021-05-01T04:58:06.000Z | tests/testresources/pytest_code.py | yukihiko-shinoda/pytest-resource-path | bc56c4b5f2c8f3138baeac7f145717f6a70af7b6 | [
"MIT"
] | null | null | null | """This module is run in test_dir on pytest for test pytest FixtureRequest."""
def test_function_something():
"""This test is run in test_dir on pytest for test pytest FixtureRequest."""
assert 1 + 1 == 2
| 30.714286 | 80 | 0.711628 |
a83f1ea5ec94801c215c2ddc2b32063db3724be5 | 2,885 | py | Python | var/spack/repos/builtin/packages/r-rtracklayer/package.py | robertsawko/spack | 135cf4835f5b646c4aaa0e2eb5552c80fc3a5ce8 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-11-28T10:14:14.000Z | 2019-11-28T10:14:14.000Z | var/spack/repos/builtin/packages/r-rtracklayer/package.py | robertsawko/spack | 135cf4835f5b646c4aaa0e2eb5552c80fc3a5ce8 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/r-rtracklayer/package.py | robertsawko/spack | 135cf4835f5b646c4aaa0e2eb5552c80fc3a5ce8 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2017-01-21T17:19:32.000Z | 2017-01-21T17:19:32.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRtracklayer(RPackage):
"""R interface to genome annotation files and the UCSC genome browser.
Extensible framework for interacting with multiple genome browsers
(currently UCSC built-in) and manipulating annotation tracks in various
formats (currently GFF, BED, bedGraph, BED15, WIG, BigWig and 2bit
built-in). The user may export/import tracks to/from the supported
browsers, as well as query and modify the browser state, such as the
current viewport."""
homepage = "https://bioconductor.org/packages/rtracklayer"
git = "https://git.bioconductor.org/packages/rtracklayer.git"
version('1.44.4', commit='aec96e85daf53b5c5eb2e89250d2755352be4de3')
version('1.42.2', commit='76702f671faea736807d54aeecfbadcd152d94c5')
version('1.40.6', commit='ba9a6e711504a702147383bc7abfcc36eb304df7')
version('1.38.3', commit='f20db703c09dc7e808c09e9b78c15aec9e546248')
version('1.36.6', commit='8c0ac7230f94e0c5a981acbb178c8de70e968131')
depends_on('r@3.3:', type=('build', 'run'))
depends_on('r-genomicranges@1.21.20:', type=('build', 'run'))
depends_on('r-xml@1.98-0:', type=('build', 'run'))
depends_on('r-biocgenerics@0.13.8:', type=('build', 'run'))
depends_on('r-s4vectors@0.13.13:', type=('build', 'run'))
depends_on('r-iranges@2.3.7:', type=('build', 'run'))
depends_on('r-xvector@0.9.4:', type=('build', 'run'))
depends_on('r-genomeinfodb@1.3.14:', type=('build', 'run'))
depends_on('r-biostrings@2.43.7:', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r-rcurl@1.4-2:', type=('build', 'run'))
depends_on('r-rsamtools@1.17.8:', type=('build', 'run'))
depends_on('r-genomicalignments@1.5.4:', type=('build', 'run'))
depends_on('r-iranges@2.11.12:', when='@1.38.3:', type=('build', 'run'))
depends_on('r-genomicranges@1.31.8:', when='@1.40.6:', type=('build', 'run'))
depends_on('r-biocgenerics@0.25.1:', when='@1.40.6:', type=('build', 'run'))
depends_on('r-s4vectors@0.17.25:', when='@1.40.6:', type=('build', 'run'))
depends_on('r-iranges@2.13.13:', when='@1.40.6:', type=('build', 'run'))
depends_on('r-xvector@0.19.7:', when='@1.40.6:', type=('build', 'run'))
depends_on('r-genomeinfodb@1.15.2:', when='@1.40.6:', type=('build', 'run'))
depends_on('r-biostrings@2.47.6:', when='@1.40.6:', type=('build', 'run'))
depends_on('r-rsamtools@1.31.2:', when='@1.40.6:', type=('build', 'run'))
depends_on('r-genomicalignments@1.15.6:', when='@1.40.6:', type=('build', 'run'))
depends_on('r-s4vectors@0.19.22:', when='@1.42.2:', type=('build', 'run'))
| 52.454545 | 85 | 0.650953 |
ad4e53b0cb536d20f8c1264eaff305135e191859 | 8,929 | py | Python | src/chia_log/log_consumer.py | alexking/chiadog | c6d3fc108dc19266981582402e499636016b9ceb | [
"MIT"
] | 1 | 2022-03-10T08:25:16.000Z | 2022-03-10T08:25:16.000Z | src/chia_log/log_consumer.py | alexking/chiadog | c6d3fc108dc19266981582402e499636016b9ceb | [
"MIT"
] | null | null | null | src/chia_log/log_consumer.py | alexking/chiadog | c6d3fc108dc19266981582402e499636016b9ceb | [
"MIT"
] | null | null | null | """Log consumers are responsible for fetching chia logs
and propagating them to subscribers for further handling.
This abstraction should provide an easy ability to switch between
local file reader and fetching logs from a remote machine.
The latter has not been implemented yet. Feel free to add it.
"""
# std
import logging
from abc import ABC, abstractmethod
from pathlib import Path, PurePosixPath, PureWindowsPath, PurePath
from tempfile import mkdtemp
from threading import Thread
from time import sleep
from typing import List, Optional, Tuple
# project
from src.config import Config
from src.config import check_keys
from src.util import OS
# lib
import paramiko
from paramiko.channel import ChannelStdinFile, ChannelStderrFile, ChannelFile
from pygtail import Pygtail # type: ignore
from retry import retry
class LogConsumerSubscriber(ABC):
"""Interface for log consumer subscribers (i.e. handlers)"""
@abstractmethod
def consume_logs(self, logs: str):
"""This method will be called when new logs are available"""
pass
class LogConsumer(ABC):
"""Abstract class providing common interface for log consumers"""
def __init__(self):
self._subscribers: List[LogConsumerSubscriber] = []
@abstractmethod
def stop(self):
pass
def subscribe(self, subscriber: LogConsumerSubscriber):
self._subscribers.append(subscriber)
def _notify_subscribers(self, logs: str):
for subscriber in self._subscribers:
subscriber.consume_logs(logs)
class FileLogConsumer(LogConsumer):
def __init__(self, log_path: Path):
super().__init__()
logging.info("Enabled local file log consumer.")
self._expanded_log_path = str(log_path.expanduser())
self._offset_path = mkdtemp() / Config.get_log_offset_path()
logging.info(f"Using temporary directory {self._offset_path}")
self._is_running = True
self._thread = Thread(target=self._consume_loop)
self._thread.start()
self._log_size = 0
def stop(self):
logging.info("Stopping")
# Cleanup the temporary file
if self._offset_path.exists():
logging.info(f"Deleting {self._offset_path}")
self._offset_path.unlink()
self._is_running = False
@retry((FileNotFoundError, PermissionError), delay=2)
def _consume_loop(self):
while self._is_running:
sleep(1) # throttle polling for new logs
for log_line in Pygtail(self._expanded_log_path, read_from_end=True, offset_file=self._offset_path):
self._notify_subscribers(log_line)
class NetworkLogConsumer(LogConsumer):
"""Consume logs over SSH from a remote harvester"""
def __init__(
self, remote_log_path: PurePath, remote_user: str, remote_host: str, remote_port: int, remote_platform: OS
):
super().__init__()
self._remote_user = remote_user
self._remote_host = remote_host
self._remote_port = remote_port
self._remote_log_path = remote_log_path
self._remote_platform = remote_platform
self._log_size = 0
self._ssh_client = paramiko.client.SSHClient()
self._ssh_client.load_system_host_keys()
self._ssh_client.connect(hostname=self._remote_host, username=self._remote_user, port=self._remote_port)
# Start thread
self._is_running = True
self._thread = Thread(target=self._consume_loop)
self._thread.start()
def stop(self):
logging.info("Stopping")
self._is_running = False
def _consume_loop(self):
logging.info(
f"Consuming remote log file {self._remote_log_path}"
+ f" from {self._remote_host}:{self._remote_port} ({self._remote_platform})"
)
class PosixNetworkLogConsumer(NetworkLogConsumer):
"""Consume logs over SSH from a remote Linux/MacOS harvester"""
def __init__(
self, remote_log_path: PurePath, remote_user: str, remote_host: str, remote_port: int, remote_platform: OS
):
logging.info("Enabled Posix network log consumer.")
super(PosixNetworkLogConsumer, self).__init__(
remote_log_path, remote_user, remote_host, remote_port, remote_platform
)
def _consume_loop(self):
super(PosixNetworkLogConsumer, self)._consume_loop()
stdin, stdout, stderr = self._ssh_client.exec_command(f"tail -F {self._remote_log_path}")
while self._is_running:
log_line = stdout.readline()
self._notify_subscribers(log_line)
class WindowsNetworkLogConsumer(NetworkLogConsumer):
"""Consume logs over SSH from a remote Windows harvester"""
def __init__(
self, remote_log_path: PurePath, remote_user: str, remote_host: str, remote_port: int, remote_platform: OS
):
logging.info("Enabled Windows network log consumer.")
super(WindowsNetworkLogConsumer, self).__init__(
remote_log_path, remote_user, remote_host, remote_port, remote_platform
)
def _consume_loop(self):
super(WindowsNetworkLogConsumer, self)._consume_loop()
stdin, stdout, stderr = self._read_log()
while self._is_running:
if self._has_rotated(self._remote_log_path):
sleep(1)
stdin, stdout, stderr = self._read_log()
log_line = stdout.readline()
self._notify_subscribers(log_line)
def _read_log(self) -> Tuple[ChannelStdinFile, ChannelFile, ChannelStderrFile]:
stdin, stdout, stderr = self._ssh_client.exec_command(
f"powershell.exe Get-Content {self._remote_log_path} -Wait -Tail 1"
)
return stdin, stdout, stderr
def _has_rotated(self, path: PurePath) -> bool:
stdin, stdout, stderr = self._ssh_client.exec_command(f"powershell.exe Write-Host(Get-Item {str(path)}).length")
old_size = self._log_size
self._log_size = int(stdout.readline())
return old_size > self._log_size
def get_host_info(host: str, user: str, path: str, port: int) -> Tuple[OS, PurePath]:
client = paramiko.client.SSHClient()
client.load_system_host_keys()
client.connect(hostname=host, username=user, port=port)
stdin, stdout, stderr = client.exec_command("uname -a")
fout: str = stdout.readline().lower()
ferr: str = stderr.readline().lower()
if "linux" in fout:
return OS.LINUX, PurePosixPath(path)
elif "darwin" in fout:
return OS.MACOS, PurePosixPath(path)
elif "not recognized" in ferr:
return OS.WINDOWS, PureWindowsPath(path)
else:
logging.error("Found unsupported platform on remote host, assuming Linux and hope for the best.")
return OS.LINUX, PurePosixPath(path)
def create_log_consumer_from_config(config: dict) -> Optional[LogConsumer]:
enabled_consumer = None
for consumer in config.keys():
if config[consumer]["enable"]:
if enabled_consumer:
logging.error("Detected multiple enabled consumers. This is unsupported configuration!")
return None
enabled_consumer = consumer
if enabled_consumer is None:
logging.error("Couldn't find enabled log consumer in config.yaml")
return None
enabled_consumer_config = config[enabled_consumer]
if enabled_consumer == "file_log_consumer":
if not check_keys(required_keys=["file_path"], config=enabled_consumer_config):
return None
return FileLogConsumer(log_path=Path(enabled_consumer_config["file_path"]))
if enabled_consumer == "network_log_consumer":
if not check_keys(
required_keys=["remote_file_path", "remote_host", "remote_user"], config=enabled_consumer_config
):
return None
# default SSH Port : 22
remote_port = enabled_consumer_config.get("remote_port", 22)
platform, path = get_host_info(
enabled_consumer_config["remote_host"],
enabled_consumer_config["remote_user"],
enabled_consumer_config["remote_file_path"],
remote_port,
)
if platform == OS.WINDOWS:
return WindowsNetworkLogConsumer(
remote_log_path=path,
remote_host=enabled_consumer_config["remote_host"],
remote_user=enabled_consumer_config["remote_user"],
remote_port=remote_port,
remote_platform=platform,
)
else:
return PosixNetworkLogConsumer(
remote_log_path=path,
remote_host=enabled_consumer_config["remote_host"],
remote_user=enabled_consumer_config["remote_user"],
remote_port=remote_port,
remote_platform=platform,
)
logging.error("Unhandled consumer type")
return None
| 34.342308 | 120 | 0.67544 |
57ad433b681e21472697960151c3e38848cb80c0 | 660 | py | Python | smserver/controllers/legacy/hello.py | CorySanin/stepmania-server | 0573436abcd5951533f049dc56d91878a40726ad | [
"MIT"
] | 17 | 2016-04-19T18:37:31.000Z | 2020-12-15T04:14:20.000Z | smserver/controllers/legacy/hello.py | CorySanin/stepmania-server | 0573436abcd5951533f049dc56d91878a40726ad | [
"MIT"
] | 40 | 2017-02-05T17:17:13.000Z | 2018-04-08T10:31:39.000Z | smserver/controllers/legacy/hello.py | CorySanin/stepmania-server | 0573436abcd5951533f049dc56d91878a40726ad | [
"MIT"
] | 3 | 2021-03-13T08:40:20.000Z | 2021-09-17T07:31:22.000Z | """ Hello controller """
from smserver.smutils.smpacket import smcommand
from smserver.smutils.smpacket import smpacket
from smserver.stepmania_controller import StepmaniaController
class HelloController(StepmaniaController):
command = smcommand.SMClientCommand.NSCHello
require_login = False
def handle(self):
""" Handle a new HELLO packet. Use to declare the version used """
self.connection.client_version = self.packet["version"]
self.connection.client_name = self.packet["name"]
self.conn.send(smpacket.SMPacketServerNSCHello(
version=128,
name=self.server.config.server["name"]))
| 31.428571 | 74 | 0.722727 |
b12a67a99b71045ed4a974906b48e05d3cd0ba7c | 1,666 | py | Python | setup.py | kevinhowbrook/wagtail-factories | 2026711169d3cce799fed75d15420d74e2c4afeb | [
"MIT"
] | null | null | null | setup.py | kevinhowbrook/wagtail-factories | 2026711169d3cce799fed75d15420d74e2c4afeb | [
"MIT"
] | null | null | null | setup.py | kevinhowbrook/wagtail-factories | 2026711169d3cce799fed75d15420d74e2c4afeb | [
"MIT"
] | null | null | null | import re
from setuptools import find_packages, setup
install_requires = [
"factory-boy>=2.8.0",
"wagtail>=2.0",
]
docs_require = [
"sphinx>=1.4.0",
]
tests_require = [
"pytest==6.0.1",
"pytest-django==3.9.0",
"pytest-cov==2.7.1",
"pytest-pythonpath==0.7.3",
"psycopg2>=2.3.1",
"coverage==4.5.3",
"isort==4.3.21",
"flake8==3.7.8",
"flake8-blind-except==0.1.1",
"flake8-debugger==3.1.0",
]
with open("README.rst") as fh:
long_description = re.sub(
"^.. start-no-pypi.*^.. end-no-pypi", "", fh.read(), flags=re.M | re.S
)
setup(
name="wagtail_factories",
version="2.0.1",
description="Factory boy classes for wagtail",
long_description=long_description,
author="Michael van Tellingen",
author_email="michaelvantellingen@gmail.com",
url="https://github.com/wagtail/wagtail-factories/",
install_requires=install_requires,
tests_require=tests_require,
extras_require={
"docs": docs_require,
"test": tests_require,
},
entry_points={},
package_dir={"": "src"},
packages=find_packages("src"),
include_package_data=True,
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
zip_safe=False,
)
| 26.444444 | 78 | 0.606242 |
11782cc82324864db8a63219fb350178ee33713b | 7,778 | py | Python | utils.py | 12-wu/- | 69362fa00b3956e1d56caced340389e31cb944a0 | [
"MIT"
] | null | null | null | utils.py | 12-wu/- | 69362fa00b3956e1d56caced340389e31cb944a0 | [
"MIT"
] | null | null | null | utils.py | 12-wu/- | 69362fa00b3956e1d56caced340389e31cb944a0 | [
"MIT"
] | null | null | null | import torch
import random
import torchvision.transforms as transforms
import numpy as np
import cv2
from poissonblending import blend
def gen_input_mask(
shape, hole_size,
hole_area=None, max_holes=1):
"""
* inputs:
- shape (sequence, required):
Shape of a mask tensor to be generated.
A sequence of length 4 (N, C, H, W) is assumed.
- hole_size (sequence or int, required):
Size of holes created in a mask.
If a sequence of length 4 is provided,
holes of size (W, H) = (
hole_size[0][0] <= hole_size[0][1],
hole_size[1][0] <= hole_size[1][1],
) are generated.
All the pixel values within holes are filled with 1.0.
- hole_area (sequence, optional):
This argument constraints the area where holes are generated.
hole_area[0] is the left corner (X, Y) of the area,
while hole_area[1] is its width and height (W, H).
This area is used as the input region of Local discriminator.
The default value is None.
- max_holes (int, optional):
This argument specifies how many holes are generated.
The number of holes is randomly chosen from [1, max_holes].
The default value is 1.
* returns:
A mask tensor of shape [N, C, H, W] with holes.
All the pixel values within holes are filled with 1.0,
while the other pixel values are zeros.
"""
mask = torch.zeros(shape)
bsize, _, mask_h, mask_w = mask.shape
for i in range(bsize):
n_holes = random.choice(list(range(1, max_holes+1)))
for _ in range(n_holes):
# choose patch width
"""if isinstance(hole_size[0], tuple) and len(hole_size[0]) == 2:
hole_w = random.randint(hole_size[0][0], hole_size[0][1])
else:"""
hole_w = hole_size[0]
# choose patch height
"""if isinstance(hole_size[1], tuple) and len(hole_size[1]) == 2:
hole_h = random.randint(hole_size[1][0], hole_size[1][1])
else:"""
hole_h = hole_size[1]
# choose offset upper-left coordinate
"""if hole_area:
harea_xmin, harea_ymin = hole_area[0]
harea_w, harea_h = hole_area[1]
offset_x = random.randint(harea_xmin, harea_xmin + harea_w - hole_w)
offset_y = random.randint(harea_ymin, harea_ymin + harea_h - hole_h)
else:"""
#offset_x = (hole_w[0]//2,hole_w[0]//2)
#offset_y = (hole_h[0]//2,hole_h[0]//2)
#print(offset_x)
#print(offset_y)
mask[i, :, 35 : 90, 51: 111] = 1.0
return mask
def gen_hole_area(size, mask_size):
"""
* inputs:
- size (sequence, required)
A sequence of length 2 (W, H) is assumed.
(W, H) is the size of hole area.
- mask_size (sequence, required)
A sequence of length 2 (W, H) is assumed.
(W, H) is the size of input mask.
* returns:
A sequence used for the input argument 'hole_area' for function 'gen_input_mask'.
"""
mask_w, mask_h = mask_size
harea_w, harea_h = size
#offset_x = (harea_w)//2
#offset_y = (harea_h)//2
offset_x = random.randint(0, mask_w - harea_w)
offset_y = random.randint(0, mask_h - harea_h)
return ((offset_x, offset_y), (harea_w, harea_h))
def crop(x, area):
"""
* inputs:
- x (torch.Tensor, required)
A torch tensor of shape (N, C, H, W) is assumed.
- area (sequence, required)
A sequence of length 2 ((X, Y), (W, H)) is assumed.
sequence[0] (X, Y) is the left corner of an area to be cropped.
sequence[1] (W, H) is its width and height.
* returns:
A torch tensor of shape (N, C, H, W) cropped in the specified area.
"""
xmin, ymin = area[0]
w, h = area[1]
return x[:, :, ymin : ymin + h, xmin : xmin + w]
def sample_random_batch(dataset, batch_size=32):
"""
* inputs:
- dataset (torch.utils.data.Dataset, required)
An instance of torch.utils.data.Dataset.
- batch_size (int, optional)
Batch size.
* returns:
A mini-batch randomly sampled from the input dataset.
"""
num_samples = len(dataset)
batch = []
for _ in range(min(batch_size, num_samples)):
index = random.choice(range(0, num_samples))
x = torch.unsqueeze(dataset[index], dim=0)
batch.append(x)
return torch.cat(batch, dim=0)
def poisson_blend(x, output, mask):
"""
* inputs:
- x (torch.Tensor, required)
Input image tensor of shape (N, 3, H, W).
- output (torch.Tensor, required)
Output tensor from Completion Network of shape (N, 3, H, W).
- mask (torch.Tensor, required)
Input mask tensor of shape (N, 1, H, W).
* returns:
An image tensor of shape (N, 3, H, W) inpainted
using poisson image editing method.
"""
x = x.clone().cpu()
output = output.clone().cpu()
mask = mask.clone().cpu()
mask = torch.cat((mask,mask,mask), dim=1) # convert to 3-channel format
num_samples = x.shape[0]
ret = []
for i in range(num_samples):
dstimg = transforms.functional.to_pil_image(x[i])
dstimg = np.array(dstimg)[:, :, [2, 1, 0]]
srcimg = transforms.functional.to_pil_image(output[i])
srcimg = np.array(srcimg)[:, :, [2, 1, 0]]
msk = transforms.functional.to_pil_image(mask[i])
msk = np.array(msk)[:, :, [2, 1, 0]]
# compute mask's center
xs, ys = [], []
for i in range(msk.shape[0]):
for j in range(msk.shape[1]):
if msk[i,j,0] == 255:
ys.append(i)
xs.append(j)
xmin, xmax = min(xs), max(xs)
ymin, ymax = min(ys), max(ys)
center = ((xmax + xmin) // 2, (ymax + ymin) // 2)
out = cv2.seamlessClone(srcimg, dstimg, msk, center, cv2.NORMAL_CLONE)
out = out[:, :, [2, 1, 0]]
out = transforms.functional.to_tensor(out)
out = torch.unsqueeze(out, dim=0)
ret.append(out)
ret = torch.cat(ret, dim=0)
return ret
def poisson_blend_old(input, output, mask):
"""
* inputs:
- input (torch.Tensor, required)
Input tensor of Completion Network.
- output (torch.Tensor, required)
Output tensor of Completion Network.
- mask (torch.Tensor, required)
Input mask tensor of Completion Network.
* returns:
Image tensor inpainted using poisson image editing method.
"""
num_samples = input.shape[0]
ret = []
# convert torch array to numpy array followed by
# converting 'channel first' format to 'channel last' format.
input_np = np.transpose(np.copy(input.cpu().numpy()), axes=(0, 2, 3, 1))
output_np = np.transpose(np.copy(output.cpu().numpy()), axes=(0, 2, 3, 1))
mask_np = np.transpose(np.copy(mask.cpu().numpy()), axes=(0, 2, 3, 1))
# apply poisson image editing method for each input/output image and mask.
for i in range(num_samples):
inpainted_np = blend(input_np[i], output_np[i], mask_np[i])
inpainted = torch.from_numpy(np.transpose(inpainted_np, axes=(2, 0, 1)))
inpainted = torch.unsqueeze(inpainted, dim=0)
ret.append(inpainted)
ret = torch.cat(ret, dim=0)
return ret
| 37.574879 | 93 | 0.559784 |
85b7b22b2db7d245d8cfafc316df2f1d778a00a2 | 7,112 | py | Python | riskslim/tests/test_loss_functions.py | jnirschl/risk-slim | f0dc978780923e38d24e8219766c9f13d82a500f | [
"BSD-3-Clause"
] | 117 | 2017-02-15T01:44:53.000Z | 2022-03-26T14:11:51.000Z | riskslim/tests/test_loss_functions.py | vishalbelsare/risk-slim | 100aca0fc0475e1ffb22f5ecf8ba240384a10adb | [
"BSD-3-Clause"
] | 13 | 2017-07-19T14:34:31.000Z | 2021-11-02T18:28:02.000Z | riskslim/tests/test_loss_functions.py | jnirschl/risk-slim | f0dc978780923e38d24e8219766c9f13d82a500f | [
"BSD-3-Clause"
] | 30 | 2017-04-01T07:21:32.000Z | 2022-03-17T19:27:52.000Z | #noinspection
import numpy as np
import riskslim.loss_functions.fast_log_loss as fast
import riskslim.loss_functions.log_loss as normal
import riskslim.loss_functions.log_loss_weighted as weighted
import riskslim.loss_functions.lookup_log_loss as lookup
from riskslim.setup_functions import _setup_training_weights
np.random.seed(seed = 0)
#initialize data matrix X and label vector Y
n_rows = 1000000
n_cols = 20
rho_ub = 100
rho_lb = -100
#helper function s
def generate_binary_data(n_rows = 1000000, n_cols = 20):
X = np.random.randint(low=0, high=2, size=(n_rows, n_cols))
Y = np.random.randint(low=0, high=2, size=(n_rows, 1))
pos_ind = Y == 1
Y[~pos_ind] = -1
return X, Y
def generate_integer_model(n_cols = 20, rho_ub = 100, rho_lb = -100, sparse_pct = 0.5):
rho = np.random.randint(low=rho_lb, high=rho_ub, size=n_cols)
rho = np.require(rho, dtype=Z.dtype, requirements=['F'])
nnz_count = int(sparse_pct * np.floor(n_cols / 2))
set_to_zero = np.random.choice(range(0, n_cols), size=nnz_count, replace=False)
rho[set_to_zero] = 0.0
return rho
def get_score_bounds(Z_min, Z_max, rho):
pos_ind = np.where(rho>0.0)[0]
neg_ind = np.where(rho<0.0)[0]
s_min, s_max = 0, 0
for j in pos_ind:
s_max += rho[j] * Z_max[j]
s_min += rho[j] * Z_min[j]
for j in neg_ind:
s_max += rho[j] * Z_min[j]
s_min += rho[j] * Z_max[j]
return s_min, s_max
def get_score_bounds_from_range(Z_min, Z_max, rho_lb, rho_ub, L0_max = None):
"global variables: L0_reg_ind"
edge_values = np.vstack([Z_min * rho_lb,
Z_max * rho_lb,
Z_min * rho_ub,
Z_max * rho_ub])
if L0_max is None or L0_max == Z_min.shape[0]:
s_min = np.sum(np.min(edge_values, axis = 0))
s_max = np.sum(np.max(edge_values, axis = 0))
else:
min_values = np.min(edge_values, axis = 0)
s_min_reg = np.sum(np.sort(min_values[L0_reg_ind])[0:L0_max])
s_min_no_reg = np.sum(min_values[~L0_reg_ind])
s_min = s_min_reg + s_min_no_reg
max_values = np.max(edge_values, axis = 0)
s_max_reg = np.sum(-np.sort(-max_values[L0_reg_ind])[0:L0_max])
s_max_no_reg = np.sum(max_values[~L0_reg_ind])
s_max = s_max_reg + s_max_no_reg
return s_min, s_max
#generate data
X, Y = generate_binary_data(n_rows, n_cols)
Z = X * Y
Z = np.require(Z, requirements=['F'], dtype=np.float64)
rho = generate_integer_model(n_cols, rho_ub, rho_lb)
L0_reg_ind = np.ones(n_cols, dtype='bool')
L0_reg_ind[0] = False
Z_min = np.min(Z, axis = 0)
Z_max = np.max(Z, axis = 0)
#setup weights
weights = _setup_training_weights(Y, w_pos = 1.0, w_neg = 1.0, w_total_target = 2.0)
#create lookup table
min_score, max_score = get_score_bounds_from_range(Z_min, Z_max, rho_lb, rho_ub, L0_max = n_cols)
loss_value_tbl, prob_value_tbl, loss_tbl_offset = lookup.get_loss_value_and_prob_tables(min_score, max_score)
loss_tbl_offset = int(loss_tbl_offset)
#assert correctnes of log_loss from scores function
for s in range(int(min_score), int(max_score)+1):
normal_value = normal.log_loss_value_from_scores(np.array(s, dtype = Z.dtype, ndmin = 1)) #loss_value_tbl[s+loss_tbl_offset]
cython_value = fast.log_loss_value_from_scores(np.array(s, dtype = Z.dtype, ndmin = 1))
table_value = loss_value_tbl[s+loss_tbl_offset]
lookup_value = lookup.log_loss_value_from_scores(np.array(s,dtype = Z.dtype, ndmin = 1), loss_value_tbl, loss_tbl_offset)
assert(np.isclose(normal_value, cython_value, rtol = 1e-06))
assert(np.isclose(table_value, cython_value, rtol = 1e-06))
assert(np.isclose(table_value, normal_value, rtol = 1e-06))
assert(np.equal(table_value, lookup_value))
#python implementations need to be 'C' aligned instead of D aligned
Z_py = np.require(Z, requirements = ['C'])
rho_py = np.require(rho, requirements = ['C'])
scores_py = Z_py.dot(rho_py)
#define tests
def normal_value_test(): return normal.log_loss_value(Z_py, rho_py)
def fast_value_test(): return fast.log_loss_value(Z, rho)
def lookup_value_test(): return lookup.log_loss_value(Z, rho, loss_value_tbl, loss_tbl_offset)
def normal_cut_test(): return normal.log_loss_value_and_slope(Z_py, rho_py)
def fast_cut_test(): return fast.log_loss_value_and_slope(Z, rho)
def lookup_cut_test(): return lookup.log_loss_value_and_slope(Z, rho, loss_value_tbl, prob_value_tbl, loss_tbl_offset)
# def dynamic_lookup_value_test():
# s_min_dynamic, s_max_dynamic = get_score_bounds(Z_min, Z_max, rho)
# tbl, offset = lookup.get_loss_value_table(s_min_dynamic, s_max_dynamic)
# return lookup.log_loss_value(Z, rho, tbl, offset)
#check values and cuts
normal_cut = normal_cut_test()
cython_cut = fast_cut_test()
lookup_cut = lookup_cut_test()
assert(np.isclose(fast_value_test(), lookup_value_test()))
assert(np.isclose(normal_cut[0], cython_cut[0]))
assert(np.isclose(lookup_cut[0], cython_cut[0]))
assert(all(np.isclose(normal_cut[1], cython_cut[1])))
assert(all(np.isclose(lookup_cut[1], cython_cut[1])))
print("passed cut tests")
#weighted tests
def weighted_value_test(weights): return weighted.log_loss_value(Z_py, weights, np.sum(weights), rho_py)
def weighted_cut_test(weights): return weighted.log_loss_value_and_slope(Z_py, weights, np.sum(weights), rho_py)
def weighted_scores_test(weights): return weighted.log_loss_value_from_scores(weights, np.sum(weights), scores_py)
#w_pos = w_neg = 1.0
weights = _setup_training_weights(Y, w_pos = 1.0, w_neg = 1.0, w_total_target = 2.0)
weights_match_unit_weights = all(weights == 1.0)
if weights_match_unit_weights:
print("tests for match between normal and weighted loss function")
#value
assert(np.isclose(normal_value_test(), weighted_value_test(weights)))
assert(np.isclose(normal_value_test(), weighted_scores_test(weights)))
#cut
normal_cut = normal_cut_test()
weighted_cut = weighted_cut_test(weights)
assert(np.isclose(normal_cut[0], weighted_cut[0]))
assert(all(np.isclose(normal_cut[1], weighted_cut[1])))
print("passed all tests for weighted implementations when w_pos = w_neg = 1.0")
#w_pos = w_neg = 1.0
w_pos = 0.5 + np.random.rand()
w_neg = 1.0
weights = _setup_training_weights(Y, w_pos = 0.5 + np.random.rand(), w_neg = 1.0, w_total_target = 2.0)
weighted_value = weighted_value_test(weights)
weighted_cut = weighted_cut_test(weights)
weighted_value_from_scores = weighted_scores_test(weights)
assert(np.isclose(weighted_value, weighted_value_from_scores))
assert(np.isclose(weighted_value, weighted_cut[0]))
print("passed all tests for weighted loss functions when w_pos = %1.2f and w_neg = %1.2f" % (w_pos, w_neg))
# print 'timing for loss value computation \n'
# %timeit -n 20 normal_value = normal_value_test()
# %timeit -n 20 cython_value = fast_value_test()
# %timeit -n 20 lookup_value = lookup_value_test()
#
# print 'timing for loss cut computation \n'
# %timeit -n 20 normal_cut = normal_cut_test()
# %timeit -n 20 cython_cut = fast_cut_test()
# %timeit -n 20 lookup_cut = lookup_cut_test()
| 38.652174 | 128 | 0.724269 |
2eb21f542889b27a5ba82a8ab7bc716cef9f98b7 | 3,461 | py | Python | src/programy/parser/template/nodes/extension.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/parser/template/nodes/extension.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/parser/template/nodes/extension.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 4 | 2019-04-01T15:42:23.000Z | 2020-11-05T08:14:27.000Z | """
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.utils.classes.loader import ClassLoader
from programy.parser.template.nodes.base import TemplateNode
from programy.utils.text.text import TextUtils
from programy.parser.exceptions import ParserException
######################################################################################################################
#
class TemplateExtensionNode(TemplateNode):
def __init__(self):
TemplateNode.__init__(self)
self._path = None
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
def resolve_to_string(self, client_context):
data = self.resolve_children_to_string(client_context)
new_class = ClassLoader.instantiate_class(self._path)
instance = new_class()
resolved = instance.execute(client_context, data)
YLogger.debug(client_context, "[%s] resolved to [%s]", self.to_string(), resolved)
return resolved
def to_string(self):
return "[EXTENSION (%s)]" % self._path
def to_xml(self, client_context):
xml = '<extension'
xml += ' path="%s"' % self._path
xml += '>'
xml += self.children_to_xml(client_context)
xml += '</extension>'
return xml
#######################################################################################################
# EXTENSION_EXPRESSION ::== <extension>
# <path>programy.etension.SomeModule</path>
# parameters
# </extension>
def parse_expression(self, graph, expression):
if 'path' in expression.attrib:
self.path = expression.attrib['path']
head_text = self.get_text_from_element(expression)
self.parse_text(graph, head_text)
for child in expression:
tag_name = TextUtils.tag_from_text(child.tag)
if tag_name == 'path':
self.path = self.get_text_from_element(child)
else:
graph.parse_tag_expression(child, self)
tail_text = self.get_tail_from_element(child)
self.parse_text(graph, tail_text)
if self.path is None:
raise ParserException("EXTENSION node, path attribute missing !")
| 38.455556 | 120 | 0.6423 |
aec7bf983d79435d0e6e4bc7d02a2c12ee82315d | 4,147 | py | Python | LipidFinder/PeakFilter/SolventCalcs.py | s-andrews/LipidFinder | c91d6caa8008e0a67188914e48f30913deff888d | [
"MIT"
] | 12 | 2018-03-12T06:43:29.000Z | 2021-12-30T03:01:12.000Z | LipidFinder/PeakFilter/SolventCalcs.py | s-andrews/LipidFinder | c91d6caa8008e0a67188914e48f30913deff888d | [
"MIT"
] | 6 | 2019-05-07T02:40:19.000Z | 2022-01-25T18:58:16.000Z | LipidFinder/PeakFilter/SolventCalcs.py | s-andrews/LipidFinder | c91d6caa8008e0a67188914e48f30913deff888d | [
"MIT"
] | 12 | 2019-01-15T10:48:17.000Z | 2021-12-30T03:01:13.000Z | # Copyright (c) 2019 J. Alvarez-Jarreta and C.J. Brasher
#
# This file is part of the LipidFinder software tool and governed by the
# 'MIT License'. Please see the LICENSE file that should have been
# included as part of this software.
"""Set of methods focused on adjust sample replicate intensities that
may be altered during the experiment:
> remove_solvent_effect():
Remove the effect of solvent samples on biological samples.
> remove_low_intensity_frames():
Discard features (rows) where intensity is below the set
threshold.
Examples:
>>> from Configuration import LFParameters
>>> from LFDataFrame import LFDataFrame
>>> from PeakFilter import SolventCalcs
>>> parameters = LFParameters('peakfilter', 'parameters.json')
>>> data = LFDataFrame('dataset.csv', parameters)
>>> SolventCalcs.remove_solvent_effect(data, parameters)
>>> SolventCalcs.remove_low_intensity_frames(data, parameters)
"""
import numpy
import re
import pandas
from LipidFinder.PeakFilter import OutlierCorrection
def remove_solvent_effect(data, parameters):
# type: (LFDataFrame, LFParameters) -> None
"""Remove the effect of solvent samples on biological samples.
Keyword Arguments:
data -- LFDataFrame instance
parameters -- LipidFinder's PeakFilter parameters instance
"""
# Column index of first solvent sample
firstIndex = parameters['firstSampleIndex'] \
+ (parameters['numSamples'] * parameters['numTechReps']) \
+ parameters['numQCReps'] - 1
# Column index of last solvent sample
lastIndex = firstIndex + parameters['numSolventReps']
if (parameters['numSolventReps'] >= 3):
# Outlier correction of solvent samples
OutlierCorrection.remove_outliers(data, parameters, src='blanks')
# Insert mean colvent intensity column into the dataframe
solMeanCol = re.sub('\d+$', "", data.columns[firstIndex]) + '_mean'
# Get means (not taking into account zeros) of solvent samples
solMeans = data.iloc[:, firstIndex : lastIndex].apply(
lambda x: x[numpy.where(x>0)[0]].mean(), axis=1).fillna(0)
# Round to nearest integer and convert means to integer types
data[solMeanCol] = solMeans
# Subtracts solvent mean intensity from each sample replicate
firstIndex = parameters['firstSampleIndex'] - 1
lastIndex = firstIndex \
+ (parameters['numSamples'] * parameters['numTechReps'])
# Solvent fold elimination: remove frames where all technical
# replicates of all samples are less than "solventMinFoldDiff"
# parameter times the solvent mean
toRemove = numpy.where(data.iloc[:, firstIndex : lastIndex].max(axis=1)
< parameters['solventMinFoldDiff'] * data[solMeanCol]
)[0]
data.drop('Solvent removal', labels=toRemove, inplace=True)
data.reset_index(inplace=True, drop=True)
# Remove solvent from all remaining samples
data.iloc[:, firstIndex : lastIndex] = numpy.maximum(0.0,
data.iloc[:, firstIndex : lastIndex].sub(data[solMeanCol], axis=0))
# Drop empty frames (if any)
data.drop_empty_frames('Empty frames after Solvent removal', parameters)
def remove_low_intensity_frames(data, parameters):
# type: (LFDataFrame, LFParameters) -> None
"""Discard features (rows) where intensity is below the threshold.
The threshold is set by "intenSignifCutOff" parameter.
Keyword Arguments:
data -- LFDataFrame instance
parameters -- LipidFinder's PeakFilter parameters instance
"""
firstIndex = parameters['firstSampleIndex'] - 1
lastIndex = firstIndex \
+ (parameters['numSamples'] * parameters['numTechReps'])
# Set all replicate intensities that are less than
# "intenSignifCutOff" to zero
temp = data.iloc[:, firstIndex : lastIndex]
temp[temp < parameters['intenSignifCutOff']] = 0
data.iloc[:, firstIndex : lastIndex] = temp
# Remove features where all sample replicates are zero
data.drop_empty_frames('Background correction', parameters)
| 43.197917 | 80 | 0.693755 |
73764668f4a292487d563544ede2e201cef7d39b | 1,827 | py | Python | func_json.py | yavord/flaskREST_API | 4b667b8975121c0b0eeffb907c2d33ec39208762 | [
"MIT"
] | null | null | null | func_json.py | yavord/flaskREST_API | 4b667b8975121c0b0eeffb907c2d33ec39208762 | [
"MIT"
] | null | null | null | func_json.py | yavord/flaskREST_API | 4b667b8975121c0b0eeffb907c2d33ec39208762 | [
"MIT"
] | null | null | null | import json
def __get_recipes():
with open('data.json') as json_f:
return((json.load(json_f))['recipes'])
def __write_recipe(recipe):
with open('data.json', 'r+') as json_f:
file_data = json.load(json_f)
file_data['recipes'].append(recipe)
json_f.seek(0)
json.dump(file_data, json_f, indent=2)
def __update_recipe(recipe):
pass
def __check_if_exists(data, post):
return(any(item == post for item in data))
def get_recipe_names():
data = __get_recipes()
recipeNames = {'recipeNames':[]}
for recipe in range(len(data)):
recipeNames['recipeNames'].append(data[recipe]['name'])
return(recipeNames)
def get_ingredients(x):
data = __get_recipes()
details = {}
for recipe in range(len(data)):
if x == data[recipe]['name']:
ingredients = data[recipe]['ingredients']
details['details'] = {
"ingredients" : ingredients,
'numsteps' : len(ingredients)
}
return(details)
def add_recipe(x):
data = __get_recipes()
error = {"error": "Recipe already exists"}
check = __check_if_exists(data=data, post=x)
if check == True:
return(error, 400)
elif check == False:
__write_recipe(x)
return("",201)
def update_recipe(x):
data = __get_recipes()
error = {"error": "Recipe does not exist"}
check = __check_if_exists(data=data, post=x)
if check == True:
__update_recipe(x)
return("",204)
elif check == False:
return(error, 404)
### TEST
# print(get_recipe_names())
# print(get_ingredients('chai'))
# __recipe_post = {"name": "butteredBagel",
# "ingredients": ["1 bagel","butter"],
# "instructions": ["cut the bagel", "spread butter on bagel"]
# }
# add_recipe(__recipe_post) | 27.681818 | 63 | 0.61029 |
63ba9f34786283f918a98cac4ded58144ed77560 | 7,020 | py | Python | scGCN/GWNN/train_GWNN.py | Dee-chen/scGCN | 604818fbaf32ef2fd6ee7bd601f4fe8eff26ac94 | [
"MIT"
] | 24 | 2020-10-06T00:56:02.000Z | 2022-03-04T06:41:18.000Z | scGCN/GWNN/train_GWNN.py | Dee-chen/scGCN | 604818fbaf32ef2fd6ee7bd601f4fe8eff26ac94 | [
"MIT"
] | 6 | 2020-10-11T14:47:11.000Z | 2022-03-08T10:07:39.000Z | scGCN/GWNN/train_GWNN.py | Dee-chen/scGCN | 604818fbaf32ef2fd6ee7bd601f4fe8eff26ac94 | [
"MIT"
] | 8 | 2020-12-15T09:14:56.000Z | 2022-03-04T01:26:19.000Z | from __future__ import division
from __future__ import print_function
import tensorflow as tf
import warnings
warnings.filterwarnings("ignore")
from utils import *
from models import GCN, MLP, GCN_WeightShare, Wavelet_Neural_Network, Spectral_CNN
from sc_utilities import *
import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
# Set random seed
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'input', 'Dataset string.') # 'cora', 'citeseer', 'pubmed'
flags.DEFINE_string('model', 'wavelet_neural_network', 'Model string.') # 'wavelet_basis', 'spectral_basis', 'gcn', 'gcn_cheby', 'dense'
flags.DEFINE_float('wavelet_s', 1.0, 'wavelet s .')
flags.DEFINE_float('threshold', 1e-4, 'sparseness threshold .')
flags.DEFINE_bool('weight_share', True, 'Weight share string.') # 'gcn', 'gcn_cheby','wavelet','nmf', 'dense'
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_bool('alldata', False, 'All data string.')
flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')#1000
flags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer 1.')
flags.DEFINE_float('dropout', 0, 'Dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 0 , 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_integer('early_stopping', 10, 'Tolerance for early stopping (# of epochs).')#200
flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')
flags.DEFINE_bool('mask', False, 'mask string.')
flags.DEFINE_bool('normalize', False, 'normalize string.')
flags.DEFINE_bool('laplacian_normalize', True, 'laplacian normalize string.')
flags.DEFINE_bool('sparse_ness', True, 'wavelet sparse_ness string.')
flags.DEFINE_integer('order', 2, 'neighborhood order .')
flags.DEFINE_bool('weight_normalize', False, 'weight normalize string.')
# Load data
adj, features, y_train, y_val, y_test, y_pred, train_mask, pred_mask, val_mask, test_mask, labels, true_label = load_data(FLAGS.dataset)
#' labels, adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(FLAGS.dataset,alldata=FLAGS.alldata)
# Some preprocessing, normalization
features = preprocess_features(features)
print("************Loading data finished, Begin constructing wavelet************")
if FLAGS.model == 'gcn':
support = [preprocess_adj(adj)]
num_supports = 1
model_func = GCN
elif FLAGS.model == 'gcn_cheby':
support = chebyshev_polynomials(adj, FLAGS.max_degree)
num_supports = 1 + FLAGS.max_degree
if (FLAGS.weight_share):
model_func = GCN_WeightShare
else:
model_func = GCN
elif FLAGS.model == "spectral_basis":
dataset = FLAGS.dataset
s = FLAGS.wavelet_s
laplacian_normalize = FLAGS.laplacian_normalize
sparse_ness = FLAGS.sparse_ness
threshold = FLAGS.threshold
weight_normalize = FLAGS.weight_normalize
support = spectral_basis(dataset,adj, s, laplacian_normalize,sparse_ness,threshold,weight_normalize)
num_supports = len(support)
model_func = Spectral_CNN
elif FLAGS.model == "wavelet_neural_network":
dataset = FLAGS.dataset
s = FLAGS.wavelet_s
laplacian_normalize = FLAGS.laplacian_normalize
sparse_ness = FLAGS.sparse_ness
threshold = FLAGS.threshold
weight_normalize = FLAGS.weight_normalize
support = wavelet_basis(dataset,adj, s, laplacian_normalize,sparse_ness,threshold,weight_normalize)
num_supports = len(support)
model_func = Wavelet_Neural_Network
elif FLAGS.model == 'dense':
support = [preprocess_adj(adj)] # Not used
num_supports = 1
model_func = MLP
else:
raise ValueError('Invalid argument for model: ' + str(FLAGS.model))
# Define placeholders
placeholders = {
'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),
'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),
'labels_mask': tf.placeholder(tf.int32),
'dropout': tf.placeholder_with_default(0., shape=()),
'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout
}
# Create model
weight_normalize = FLAGS.weight_normalize
node_num = adj.shape[0]
model = model_func(node_num,weight_normalize, placeholders, input_dim=features[2][1], logging=True)
print("**************Constructing wavelet finished, Begin training**************")
# Initialize session
sess = tf.Session()
# Define model evaluation function
def evaluate(features, support, labels, mask, placeholders):
feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)
outs_val = sess.run([model.outputs,model.loss, model.accuracy], feed_dict=feed_dict_val)
return outs_val[0], outs_val[1], outs_val[2]
#' saver = tf.train.Saver()
# Init variables
sess.run(tf.global_variables_initializer())
# Train model
cost_val = []
best_val_acc = 0.0
output_test_acc = 0.0
for epoch in range(FLAGS.epochs):
feed_dict = construct_feed_dict(features, support, y_train, train_mask, placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict)
val_output,cost, acc = evaluate(features, support, y_val, val_mask, placeholders)
cost_val.append(cost)
test_output, test_cost, test_acc = evaluate(features, support, y_test, test_mask, placeholders)
pred_output, pred_cost, pred_acc = evaluate(features, support, y_pred, pred_mask, placeholders)
if(best_val_acc <= acc):
best_val_acc = acc
output_test_acc = test_acc
output_pred_acc = pred_acc
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(outs[1]),
"train_acc=", "{:.5f}".format(outs[2]), "val_loss=", "{:.5f}".format(cost),
"val_acc=", "{:.5f}".format(acc), "test_loss=", "{:.5f}".format(test_cost), "test_acc=", "{:.5f}".format(test_acc),
"pred_loss=", "{:.5f}".format(pred_cost), "pred_acc=", "{:.5f}".format(pred_acc))
if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]):
print("Early stopping...")
break
print("Optimization Finished!")
print("dataset: ", FLAGS.dataset," model: ",FLAGS.model,"order: ",FLAGS.order,",sparse_ness: ",FLAGS.sparse_ness,
",laplacian_normalize: ",FLAGS.laplacian_normalize,",threshold",FLAGS.threshold,",wavelet_s:",FLAGS.wavelet_s,",mask:",FLAGS.mask,
",normalize:",FLAGS.normalize,",weight_normalize:",FLAGS.weight_normalize," weight_share:",FLAGS.weight_share,
",learning_rate:",FLAGS.learning_rate,",hidden1:",FLAGS.hidden1,",dropout:",FLAGS.dropout,",max_degree:",FLAGS.max_degree,",alldata:",FLAGS.alldata)
print("Val accuracy:", best_val_acc, " Test accuracy: ", output_test_acc, "Pred accuracy: ", output_pred_acc)
print("********************************************************")
| 43.875 | 158 | 0.717521 |
bf4067eba083ea03acfaec44c46fd4bccb02a34c | 1,724 | py | Python | airflow/providers/apache/cassandra/example_dags/example_cassandra_dag.py | takuti/airflow | 0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e | [
"Apache-2.0"
] | 8,092 | 2016-04-27T20:32:29.000Z | 2019-01-05T07:39:33.000Z | airflow/providers/apache/cassandra/example_dags/example_cassandra_dag.py | takuti/airflow | 0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e | [
"Apache-2.0"
] | 2,961 | 2016-05-05T07:16:16.000Z | 2019-01-05T08:47:59.000Z | airflow/providers/apache/cassandra/example_dags/example_cassandra_dag.py | takuti/airflow | 0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e | [
"Apache-2.0"
] | 3,546 | 2016-05-04T20:33:16.000Z | 2019-01-05T05:14:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Ignore missing args provided by default_args
# type: ignore[call-arg]
"""
Example Airflow DAG to check if a Cassandra Table and a Records exists
or not using `CassandraTableSensor` and `CassandraRecordSensor`.
"""
from datetime import datetime
from airflow.models import DAG
from airflow.providers.apache.cassandra.sensors.record import CassandraRecordSensor
from airflow.providers.apache.cassandra.sensors.table import CassandraTableSensor
# [START howto_operator_cassandra_sensors]
with DAG(
dag_id='example_cassandra_operator',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
default_args={'table': 'keyspace_name.table_name'},
catchup=False,
tags=['example'],
) as dag:
table_sensor = CassandraTableSensor(task_id="cassandra_table_sensor")
record_sensor = CassandraRecordSensor(task_id="cassandra_record_sensor", keys={"p1": "v1", "p2": "v2"})
# [END howto_operator_cassandra_sensors]
| 38.311111 | 107 | 0.774942 |
cd92dbba8b9dc6ede4e2e40eb80d8d982afcaccd | 4,672 | py | Python | model_methods/model_methods.py | shivamshinde123/flight-fare-prediction | 630c8f41419c751c54ac8f1637748a548a426482 | [
"MIT"
] | null | null | null | model_methods/model_methods.py | shivamshinde123/flight-fare-prediction | 630c8f41419c751c54ac8f1637748a548a426482 | [
"MIT"
] | null | null | null | model_methods/model_methods.py | shivamshinde123/flight-fare-prediction | 630c8f41419c751c54ac8f1637748a548a426482 | [
"MIT"
] | null | null | null | import os
import pathlib
import pickle
import shutil
import warnings
from Logging.logging import Logger
warnings.simplefilter(action='ignore', category=FutureWarning)
class modelMethods:
"""
Description: This class will contain the methods used for saving, loading and finding the correct model for
correct cluster
Written By: Shivam Shinde
Version: 1.0
Revision: None
"""
def __init__(self):
self.model_directory = "Models"
self.logger = Logger()
self.file_object = open("TrainingLogs/modelMethodsLogs.txt","a+")
def modelSaving(self, model, filename,clusterno):
"""
Description: This method is used to save the created model as a python pickle file
:param model: Reference of the created model
:param filename: Name of the model after saving
:return: None
"""
self.logger.log(self.file_object, "Saving the created model into the python pickle file")
try:
if filename == "KMeansCluster":
path = os.path.join(self.model_directory, "ClusteringModel")
else:
path = os.path.join(self.model_directory, "ModelForClusterNo"+str(clusterno))
# removing the previously created models
if os.path.exists(path):
shutil.rmtree(self.model_directory)
os.makedirs(path)
else:
os.makedirs(path)
# saving the model as a python pickle file
pickle.dump(model, open(os.path.join(path, f"{filename}.pkl"),"wb"))
self.logger.log(self.file_object, f"Model {model} saved successfully in {path}")
except Exception as e:
self.logger.log(self.file_object, f"Exception occurred while saving the model {model}. Exception: {str(e)}")
raise e
def loadingSavedModel(self, filename, clusterno):
"""
Description: This method is used to load the saved method for the respective cluster
Written By: Shivam Shinde
Version: 1.0
Revision: None
:param clusterno: Cluster number for which the model is to be loaded
:param filename: Name of the model that needs to be saved
:return: Model
"""
try:
self.logger.log(self.file_object, f"Loading the model {filename}.pkl")
if filename == "KMeansCluster":
path = os.path.join(self.model_directory, "ClusteringModel")
else:
path = os.path.join(self.model_directory, "ModelForClusterNo" + str(clusterno))
# loading the saved model
path1 = os.path.join(path, f"{filename}.pkl")
model = pickle.load(open(path1,"rb"))
self.logger.log(self.file_object, f"Model {filename} loaded successfully")
# returning the model
return model
except Exception as e:
self.logger.log(self.file_object, f"Exception occurred while loading the model {filename}. "
f"Exception: {str(e)}")
raise e
def findCorrectModel(self, clusterNumber):
"""
Description: This method is used to find the correct model given the cluster number
Written By: Shivam Shinde
Version: 1.0
Revision: None
:param clusterNumber: Cluster number
:return: Model name
"""
self.logger.log(self.file_object, f"Finding the appropriate model for cluster number {clusterNumber}")
self.clusterNumber = clusterNumber
try:
# finding the appropriate model for the given cluster number
for file in os.listdir(self.model_directory):
path = os.path.join(self.model_directory,file)
path = pathlib.Path(path)
if (path.stem[-1]) == str(clusterNumber):
for file1 in os.listdir(path):
model_name = file1.split('.')[0]
self.logger.log(self.file_object,
f"Successfully found the name of the model for the cluster number "
f"{clusterNumber}")
# returning the model
return model_name
else:
continue
except Exception as e:
self.logger.log(self.file_object, f"Exception occurred while finding the name of the model for the "
f"cluster number {clusterNumber}. Exception: {str(e)}")
raise e
| 32.901408 | 120 | 0.583048 |
7fe25bcadc950758407f9d217a408428d0c461b8 | 45,987 | py | Python | urdf2webots/parserURDF.py | omichel/urdf2webots | 30d70198e492cccf9f5a149643ab55e31185c9fe | [
"Apache-2.0"
] | 7 | 2018-10-09T16:38:26.000Z | 2019-07-27T03:02:50.000Z | urdf2webots/parserURDF.py | omichel/urdf2webots | 30d70198e492cccf9f5a149643ab55e31185c9fe | [
"Apache-2.0"
] | 18 | 2018-10-10T06:44:42.000Z | 2019-08-06T13:22:30.000Z | urdf2webots/parserURDF.py | omichel/urdf2webots | 30d70198e492cccf9f5a149643ab55e31185c9fe | [
"Apache-2.0"
] | 4 | 2018-10-20T03:06:30.000Z | 2019-04-13T02:03:08.000Z | """Import modules."""
import math
import os
import sys
import numpy
try:
from PIL import Image
except ImportError as e:
if sys.platform == 'linux2':
sys.stderr.write("PIL module not found, please install it with:\n")
sys.stderr.write("apt-get install python-pip\n")
sys.stderr.write("pip install pillow\n")
raise e
import numbers
from urdf2webots.gazebo_materials import materials
from urdf2webots.math_utils import convertRPYtoEulerAxis, rotateVector, combineRotations, combineTranslations
try:
from collada import Collada, lineset
colladaIsAvailable = True
except ImportError:
colladaIsAvailable = False
# to pass from external
robotName = ''
extensionListSingleAppearance = ['.stl', '.obj']
disableMeshOptimization = False
class Trimesh():
"""Define triangular mesh object."""
def __init__(self):
"""Initializatization."""
self.coord = [] # list of coordinate points
self.coordIndex = [] # list of index of points
self.texCoord = [] # list of coordinate points for texture
self.texCoordIndex = [] # list of index for texture
self.normal = [] # list of normals
self.normalIndex = [] # list of index of normals
class Inertia():
"""Define inertia object."""
def __init__(self):
"""Initializatization."""
self.position = [0.0, 0.0, 0.0]
self.rotation = [0.0, 0.0, 1.0, 0.0]
self.mass = None
self.ixx = 1.0
self.ixy = 0.0
self.ixz = 0.0
self.iyy = 1.0
self.iyz = 0.0
self.izz = 1.0
class Box():
"""Define box object."""
def __init__(self):
"""Initializatization."""
self.x = 0.0
self.y = 0.0
self.z = 0.0
class Cylinder():
"""Define cylinder object."""
def __init__(self):
"""Initializatization."""
self.radius = 0.0
self.length = 0.0
class Sphere():
"""Define sphere object."""
def __init__(self):
"""Initializatization."""
self.radius = 0.0
class Mesh():
"""Define mesh object."""
def __init__(self):
"""Initializatization."""
self.url = ''
class Geometry():
"""Define geometry object."""
reference = {}
def __init__(self):
"""Initializatization."""
self.box = Box()
self.cylinder = Cylinder()
self.sphere = Sphere()
self.trimesh = Trimesh()
self.mesh = Mesh()
self.scale = [1.0, 1.0, 1.0]
self.name = None
self.defName = None
self.lineset = False
class Color():
"""Define color object."""
def __init__(self, red=0.5, green=0.0, blue=0.0, alpha=1.0):
"""Initializatization."""
self.red = red
self.green = green
self.blue = blue
self.alpha = alpha
class Material():
"""Define material object."""
namedMaterial = {}
def __init__(self):
"""Initializatization."""
self.emission = Color(0.0, 0.0, 0.0, 1.0)
self.ambient = Color(0.0, 0.0, 0.0, 0.0)
self.diffuse = Color(0.5, 0.5, 0.5, 1.0)
self.specular = Color(0.0, 0.0, 0.0, 1.0)
self.shininess = None
self.index_of_refraction = 1.0
self.texture = ""
self.name = None
self.defName = None
def parseFromMaterialNode(self, node):
"""Parse a material node."""
if hasElement(node, 'color'):
colorElement = node.getElementsByTagName('color')[0]
colors = colorElement.getAttribute('rgba').split()
self.diffuse.r = float(colors[0])
self.diffuse.g = float(colors[1])
self.diffuse.b = float(colors[2])
self.diffuse.alpha = float(colors[3])
if node.hasAttribute('name'):
self.name = node.getAttribute('name')
if self.name not in Material.namedMaterial:
Material.namedMaterial[self.name] = self
else:
assert False
class Visual():
"""Define visual object."""
def __init__(self):
"""Initializatization."""
self.position = [0.0, 0.0, 0.0]
self.rotation = [0.0, 0.0, 1.0, 0.0]
self.geometry = Geometry()
self.material = Material()
class Collision():
"""Define collision object."""
def __init__(self):
"""Initializatization."""
self.position = [0.0, 0.0, 0.0]
self.rotation = [0.0, 0.0, 1.0, 0.0]
self.geometry = Geometry()
class Calibration():
"""Define calibration object."""
def __init__(self):
"""Initializatization."""
self.limit = 0.0
self.rising = True
class Dynamics():
"""Define dynamics object."""
def __init__(self):
"""Initializatization."""
self.damping = 0.0
self.friction = 0.0
class Limit():
"""Define joint limit object."""
def __init__(self):
"""Initializatization."""
self.lower = 0.0
self.upper = 0.0
self.effort = 10000 # if not specified in the URDF, there is no limit
self.velocity = 0.0
class Safety():
"""Define joint safety object."""
def __init__(self):
"""Initializatization."""
self.lower = 0.0
self.upper = 0.0
self.kPosition = 0.0
self.kVelocity = 0.0
class Link():
"""Define link object."""
def __init__(self):
"""Initializatization."""
self.name = 'default'
self.inertia = Inertia()
self.visual = []
self.collision = []
self.forceSensor = False
class Joint():
"""Define joint object."""
def __init__(self):
"""Initializatization."""
self.name = 'default'
self.type = 'default'
self.position = [0.0, 0.0, 0.0]
self.rotation = [0.0, 0.0, 1.0, 0.0]
self.parent = 'default'
self.child = 'default'
self.axis = []
self.calibration = Calibration()
self.dynamics = Dynamics()
self.limit = Limit()
self.safety = Safety()
class IMU():
"""Define an IMU sensor."""
list = []
def __init__(self):
"""Initializatization."""
self.name = 'imu'
self.gaussianNoise = 0
self.parentLink = None
def export(self, file, indentationLevel):
"""Export this IMU."""
indent = ' '
# export InertialUnit
file.write(indentationLevel * indent + 'InertialUnit {\n')
file.write(indentationLevel * indent + ' name "%s inertial"\n' % self.name)
if self.gaussianNoise > 0:
file.write(indentationLevel * indent + ' noise %lf\n' % (self.gaussianNoise / (math.pi/2)))
file.write(indentationLevel * indent + '}\n')
# export Accelerometer
file.write(indentationLevel * indent + 'Accelerometer {\n')
file.write(indentationLevel * indent + ' name "%s accelerometer"\n' % self.name)
if self.gaussianNoise > 0:
file.write(indentationLevel * indent + ' lookupTable [-100 -100 %lf, 100 100 %lf]\n' %
(-self.gaussianNoise / 100.0, self.gaussianNoise / 100.0))
file.write(indentationLevel * indent + '}\n')
# export Gyro
file.write(indentationLevel * indent + 'Gyro {\n')
file.write(indentationLevel * indent + ' name "%s gyro"\n' % self.name)
if self.gaussianNoise > 0:
file.write(indentationLevel * indent + ' lookupTable [-100 -100 %lf, 100 100 %lf]\n' %
(-self.gaussianNoise / 100.0, self.gaussianNoise / 100.0))
file.write(indentationLevel * indent + '}\n')
# export Compass
file.write(indentationLevel * indent + 'Compass {\n')
file.write(indentationLevel * indent + ' name "%s compass"\n' % self.name)
if self.gaussianNoise > 0:
file.write(indentationLevel * indent + ' lookupTable [-1 -1 %lf, 1 1 %lf]\n' %
(-self.gaussianNoise, self.gaussianNoise))
file.write(indentationLevel * indent + '}\n')
class P3D():
"""Define P3D (ground truth pose)."""
list = []
def __init__(self):
"""Initializatization."""
self.name = 'p3d'
self.gaussianNoise = 0
self.noiseCorrelation = 0
self.speedNoise = 0
self.parentLink = None
def export(self, file, indentationLevel):
"""Export this P3D."""
indent = ' '
# export GPS
file.write(indentationLevel * indent + 'GPS {\n')
file.write(indentationLevel * indent + ' name "%s gps"\n' % self.name)
if self.noiseCorrelation > 0:
file.write(indentationLevel * indent + ' noiseCorrelation %lf\n' % self.noiseCorrelation)
if self.speedNoise > 0:
file.write(indentationLevel * indent + ' speedNoise %lf\n' % self.speedNoise)
file.write(indentationLevel * indent + '}\n')
# export InertialUnit
file.write(indentationLevel * indent + 'InertialUnit {\n')
file.write(indentationLevel * indent + ' name "%s inertial"\n' % self.name)
if self.gaussianNoise > 0:
file.write(indentationLevel * indent + ' noise %lf\n' % (self.gaussianNoise / (math.pi/2)))
file.write(indentationLevel * indent + '}\n')
# export Gyro
file.write(indentationLevel * indent + 'Gyro {\n')
file.write(indentationLevel * indent + ' name "%s gyro"\n' % self.name)
if self.gaussianNoise > 0:
file.write(indentationLevel * indent + ' lookupTable [-100 -100 %lf, 100 100 %lf]\n' %
(-self.gaussianNoise / 100.0, self.gaussianNoise / 100.0))
file.write(indentationLevel * indent + '}\n')
class Camera():
"""Define a camera sensor."""
list = []
def __init__(self):
"""Initializatization."""
self.name = 'camera'
self.fov = None
self.width = None
self.height = None
self.noise = None
self.isImager = True
def export(self, file, indentationLevel):
"""Export this camera."""
indent = ' '
file.write(indentationLevel * indent + 'Camera {\n')
file.write(indentationLevel * indent + ' name "%s"\n' % self.name)
if self.fov:
file.write(indentationLevel * indent + ' fieldOfView %lf\n' % self.fov)
if self.width:
file.write(indentationLevel * indent + ' width %d\n' % self.width)
if self.height:
file.write(indentationLevel * indent + ' height %d\n' % self.height)
if self.noise:
file.write(indentationLevel * indent + ' noise %lf\n' % self.noise)
file.write(indentationLevel * indent + '}\n')
class Lidar():
"""Define a lidar sensor."""
list = []
def __init__(self):
"""Initializatization."""
self.name = 'lidar'
self.fov = None
self.verticalFieldOfView = None
self.horizontalResolution = None
self.numberOfLayers = 1
self.minRange = None
self.maxRange = None
self.resolution = None
self.noise = None
def export(self, file, indentationLevel):
"""Export this lidar."""
indent = ' '
file.write(indentationLevel * indent + 'Lidar {\n')
file.write(indentationLevel * indent + ' name "%s"\n' % self.name)
if self.fov:
file.write(indentationLevel * indent + ' fieldOfView %lf\n' % self.fov)
if self.verticalFieldOfView:
file.write(indentationLevel * indent + ' verticalFieldOfView %lf\n' % self.verticalFieldOfView)
if self.horizontalResolution:
file.write(indentationLevel * indent + ' horizontalResolution %d\n' % self.horizontalResolution)
if self.numberOfLayers:
file.write(indentationLevel * indent + ' numberOfLayers %d\n' % self.numberOfLayers)
if self.minRange:
if self.minRange < 0.01:
file.write(indentationLevel * indent + ' near %lf\n' % self.minRange)
file.write(indentationLevel * indent + ' minRange %lf\n' % self.minRange)
if self.maxRange:
file.write(indentationLevel * indent + ' maxRange %lf\n' % self.maxRange)
if self.noise:
file.write(indentationLevel * indent + ' noise %lf\n' % self.noise)
if self.resolution:
file.write(indentationLevel * indent + ' resolution %lf\n' % self.resolution)
file.write(indentationLevel * indent + '}\n')
def colorVector2Instance(cv, alpha_last=True):
"""Eval color object from a vector."""
c = Color()
if alpha_last:
c.red = cv[0]
c.green = cv[1]
c.blue = cv[2]
c.alpha = cv[3]
else:
c.red = cv[1]
c.green = cv[2]
c.blue = cv[3]
c.alpha = cv[0]
return c
def getRobotName(node):
"""Parse robot name."""
name = node.getAttribute('name')
print('Robot name: ' + name)
return name
def hasElement(node, element):
"""Check if exlement existing in a tag."""
if node.getElementsByTagName(element).length > 0:
return True
else:
return False
def getColladaMesh(filename, node, link):
"""Read collada file."""
if not colladaIsAvailable:
sys.stderr.write('Collada module not found, please install it with:\n')
sys.stderr.write(' python -m pip install pycollada\n')
sys.stderr.write('Skipping "%s"\n' % filename)
return
print('Parsing Mesh: ' + filename)
colladaMesh = Collada(filename)
index = -1
if hasattr(node, 'material') and node.material:
for geometry in list(colladaMesh.scene.objects('geometry')):
for data in list(geometry.primitives()):
visual = Visual()
index += 1
visual.position = node.position
visual.rotation = node.rotation
visual.material.diffuse.red = node.material.diffuse.red
visual.material.diffuse.green = node.material.diffuse.green
visual.material.diffuse.blue = node.material.diffuse.blue
visual.material.diffuse.alpha = node.material.diffuse.alpha
visual.material.texture = node.material.texture
name = '%s_%d' % (os.path.splitext(os.path.basename(filename))[0], index)
if type(data.original) is lineset.LineSet:
visual.geometry.lineset = True
if name in Geometry.reference:
visual.geometry = Geometry.reference[name]
else:
Geometry.reference[name] = visual.geometry
visual.geometry.name = name
visual.geometry.scale = node.geometry.scale
for val in data.vertex:
visual.geometry.trimesh.coord.append(numpy.array(val))
for val in data.vertex_index:
visual.geometry.trimesh.coordIndex.append(val)
if data.texcoordset: # non-empty
for val in data.texcoordset[0]:
visual.geometry.trimesh.texCoord.append(val)
if data.texcoord_indexset: # non-empty
for val in data.texcoord_indexset[0]:
visual.geometry.trimesh.texCoordIndex.append(val)
if hasattr(data, '_normal') and data._normal is not None and data._normal.size > 0:
for val in data._normal:
visual.geometry.trimesh.normal.append(numpy.array(val))
if hasattr(data, '_normal_index') and data._normal_index is not None and data._normal_index.size > 0:
for val in data._normal_index:
visual.geometry.trimesh.normalIndex.append(val)
if data.material and data.material.effect:
if data.material.effect.emission and isinstance(data.material.effect.emission, tuple):
visual.material.emission = colorVector2Instance(data.material.effect.emission)
if data.material.effect.ambient and isinstance(data.material.effect.ambient, tuple):
visual.material.ambient = colorVector2Instance(data.material.effect.ambient)
if data.material.effect.specular and isinstance(data.material.effect.specular, tuple):
visual.material.specular = colorVector2Instance(data.material.effect.specular)
if data.material.effect.shininess:
visual.material.shininess = data.material.effect.shininess
if data.material.effect.index_of_refraction:
visual.material.index_of_refraction = data.material.effect.index_of_refraction
if data.material.effect.diffuse:
if numpy.size(data.material.effect.diffuse) > 1\
and all([isinstance(x, numbers.Number) for x in data.material.effect.diffuse]):
# diffuse is defined by values
visual.material.diffuse = colorVector2Instance(data.material.effect.diffuse)
else:
# diffuse is defined by *.tif files
visual.material.texture = 'textures/' + \
data.material.effect.diffuse.sampler.surface.image.path.split('/')[-1]
txt = os.path.splitext(visual.material.texture)[1]
if txt == '.tiff' or txt == '.tif':
for dirname, dirnames, filenames in os.walk('.'):
for file in filenames:
if file == str(visual.material.texture.split('/')[-1]):
try:
tifImage = Image.open(os.path.join(dirname, file))
img = './' + robotName + '_textures'
tifImage.save(os.path.splitext(os.path.join(img, file))[0] + '.png')
visual.material.texture = (robotName +
'_textures/' + os.path.splitext(file)[0] + '.png')
print('translated image ' + visual.material.texture)
except IOError:
visual.material.texture = ""
print('failed to open ' + os.path.join(dirname, file))
else:
visual.material.diffuse = colorVector2Instance([1.0, 1.0, 1.0, 1.0])
link.visual.append(visual)
else:
for geometry in list(colladaMesh.scene.objects('geometry')):
for data in list(geometry.primitives()):
collision = Collision()
collision.position = node.position
collision.rotation = node.rotation
collision.geometry.scale = node.geometry.scale
for value in data.vertex:
collision.geometry.trimesh.coord.append(numpy.array(value))
for value in data.vertex_index:
collision.geometry.trimesh.coordIndex.append(value)
link.collision.append(collision)
def getPosition(node):
"""Read position of a phsical or visual object."""
position = [0.0, 0.0, 0.0]
positionString = node.getElementsByTagName('origin')[0].getAttribute('xyz').split()
position[0] = float(positionString[0])
position[1] = float(positionString[1])
position[2] = float(positionString[2])
return position
def getRotation(node):
"""Read rotation of a phsical or visual object."""
rotation = [0.0, 0.0, 0.0]
if hasElement(node, 'origin'):
orientationString = node.getElementsByTagName('origin')[0].getAttribute('rpy').split()
rotation[0] = float(orientationString[0])
rotation[1] = float(orientationString[1])
rotation[2] = float(orientationString[2])
return convertRPYtoEulerAxis(rotation)
def getInertia(node):
"""Parse inertia of a link."""
inertia = Inertia()
inertialElement = node.getElementsByTagName('inertial')[0]
if hasElement(inertialElement, 'origin'):
if inertialElement.getElementsByTagName('origin')[0].getAttribute('xyz'):
inertia.position = getPosition(inertialElement)
if inertialElement.getElementsByTagName('origin')[0].getAttribute('rpy'):
inertia.rotation = getRotation(inertialElement)
if hasElement(inertialElement, 'mass'):
inertia.mass = float(inertialElement.getElementsByTagName('mass')[0].getAttribute('value'))
if hasElement(inertialElement, 'inertia'):
matrixNode = inertialElement.getElementsByTagName('inertia')[0]
inertia.ixx = float(matrixNode.getAttribute('ixx'))
inertia.ixy = float(matrixNode.getAttribute('ixy'))
inertia.ixz = float(matrixNode.getAttribute('ixz'))
inertia.iyy = float(matrixNode.getAttribute('iyy'))
inertia.iyz = float(matrixNode.getAttribute('iyz'))
inertia.izz = float(matrixNode.getAttribute('izz'))
return inertia
def getVisual(link, node, path):
"""Parse visual data of a link."""
for index in range(0, len(node.getElementsByTagName('visual'))):
visual = Visual()
visualElement = node.getElementsByTagName('visual')[index]
if hasElement(visualElement, 'origin'):
if visualElement.getElementsByTagName('origin')[0].getAttribute('xyz'):
visual.position = getPosition(visualElement)
if visualElement.getElementsByTagName('origin')[0].getAttribute('rpy'):
visual.rotation = getRotation(visualElement)
elif hasElement(visualElement.getElementsByTagName('geometry')[0], 'cylinder'):
visual.rotation = getRotation(visualElement)
geometryElement = visualElement.getElementsByTagName('geometry')[0]
if hasElement(visualElement, 'material'):
material = visualElement.getElementsByTagName('material')[0]
if material.hasAttribute('name') and material.getAttribute('name') in Material.namedMaterial:
visual.material = Material.namedMaterial[material.getAttribute('name')]
elif hasElement(material, 'color'):
colorElement = material.getElementsByTagName('color')[0].getAttribute('rgba').split()
visual.material.diffuse.red = float(colorElement[0])
visual.material.diffuse.green = float(colorElement[1])
visual.material.diffuse.blue = float(colorElement[2])
visual.material.diffuse.alpha = float(colorElement[3])
if material.hasAttribute('name'):
if material.getAttribute('name'):
visual.material.name = material.getAttribute('name')
else:
visual.material.name = node.getAttribute('name') + '_material'
Material.namedMaterial[visual.material.name] = visual.material
elif material.firstChild and material.firstChild.nodeValue in materials:
materialName = material.firstChild.nodeValue
visual.material.diffuse.red = float(materials[materialName]['diffuse'][0])
visual.material.diffuse.green = float(materials[materialName]['diffuse'][1])
visual.material.diffuse.blue = float(materials[materialName]['diffuse'][2])
visual.material.diffuse.alpha = float(materials[materialName]['diffuse'][3])
visual.material.ambient.red = float(materials[materialName]['ambient'][0])
visual.material.ambient.green = float(materials[materialName]['ambient'][1])
visual.material.ambient.blue = float(materials[materialName]['ambient'][2])
visual.material.ambient.alpha = float(materials[materialName]['ambient'][3])
visual.material.specular.red = float(materials[materialName]['specular'][0])
visual.material.specular.green = float(materials[materialName]['specular'][1])
visual.material.specular.blue = float(materials[materialName]['specular'][2])
visual.material.specular.alpha = float(materials[materialName]['specular'][3])
visual.material.name = materialName
Material.namedMaterial[materialName] = visual.material
if hasElement(material, 'texture'):
visual.material.texture = material.getElementsByTagName('texture')[0].getAttribute('filename')
if os.path.splitext(visual.material.texture)[1] == '.tiff' \
or os.path.splitext(visual.material.texture)[1] == '.tif':
for dirname, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename == str(visual.material.texture.split('/')[-1]):
print('try to translate image ' + filename)
try:
tifImage = Image.open(os.path.join(dirname, filename))
tifImage.save(os.path.splitext(os.path.join('./' + robotName + '_' + 'textures',
filename))[0] + '.png')
visual.material.texture = (robotName + '_' + 'textures/' +
os.path.splitext(filename)[0] + '.png')
except IOError:
visual.material.texture = ""
print('failed to open ' + os.path.join(dirname, filename))
if hasElement(geometryElement, 'box'):
visual.geometry.box.x = float(geometryElement.getElementsByTagName('box')[0].getAttribute('size').split()[0])
visual.geometry.box.y = float(geometryElement.getElementsByTagName('box')[0].getAttribute('size').split()[1])
visual.geometry.box.z = float(geometryElement.getElementsByTagName('box')[0].getAttribute('size').split()[2])
link.visual.append(visual)
elif hasElement(geometryElement, 'cylinder'):
visual.geometry.cylinder.radius = float(geometryElement.getElementsByTagName('cylinder')[0].getAttribute('radius'))
visual.geometry.cylinder.length = float(geometryElement.getElementsByTagName('cylinder')[0].getAttribute('length'))
link.visual.append(visual)
elif hasElement(geometryElement, 'sphere'):
visual.geometry.sphere.radius = float(geometryElement.getElementsByTagName('sphere')[0].getAttribute('radius'))
link.visual.append(visual)
elif hasElement(geometryElement, 'mesh'):
meshfile = geometryElement.getElementsByTagName('mesh')[0].getAttribute('filename')
if not os.path.isabs(meshfile):
meshfile = os.path.normpath(os.path.join(path, meshfile))
# hack for gazebo mesh database
if meshfile.count('package'):
idx0 = meshfile.find('package://')
meshfile = meshfile[idx0 + len('package://'):]
if geometryElement.getElementsByTagName('mesh')[0].getAttribute('scale'):
meshScale = geometryElement.getElementsByTagName('mesh')[0].getAttribute('scale').split()
visual.geometry.scale[0] = float(meshScale[0])
visual.geometry.scale[1] = float(meshScale[1])
visual.geometry.scale[2] = float(meshScale[2])
extension = os.path.splitext(meshfile)[1].lower()
if extension in extensionListSingleAppearance:
name = os.path.splitext(os.path.basename(meshfile))[0]
if name in Geometry.reference:
visual.geometry = Geometry.reference[name]
else:
visual.geometry.mesh.url = '"' + meshfile + '"'
visual.geometry.name = name
Geometry.reference[name] = visual.geometry
link.visual.append(visual)
elif extension == '.dae':
getColladaMesh(meshfile, visual, link)
else:
print('Unsupported mesh format: \"' + extension + '\"')
def getCollision(link, node, path):
"""Parse collision of a link."""
for index in range(0, len(node.getElementsByTagName('collision'))):
collision = Collision()
collisionElement = node.getElementsByTagName('collision')[index]
if hasElement(collisionElement, 'origin'):
if collisionElement.getElementsByTagName('origin')[0].getAttribute('xyz'):
collision.position = getPosition(collisionElement)
if collisionElement.getElementsByTagName('origin')[0].getAttribute('rpy'):
collision.rotation = getRotation(collisionElement)
elif hasElement(collisionElement.getElementsByTagName('geometry')[0], 'cylinder'):
collision.rotation = getRotation(collisionElement)
geometryElement = collisionElement.getElementsByTagName('geometry')[0]
if hasElement(geometryElement, 'box'):
size = geometryElement.getElementsByTagName('box')[0].getAttribute('size').split()
collision.geometry.box.x = float(size[0])
collision.geometry.box.y = float(size[1])
collision.geometry.box.z = float(size[2])
link.collision.append(collision)
elif hasElement(geometryElement, 'cylinder'):
element = geometryElement.getElementsByTagName('cylinder')[0]
collision.geometry.cylinder.radius = float(element.getAttribute('radius'))
collision.geometry.cylinder.length = float(element.getAttribute('length'))
link.collision.append(collision)
elif hasElement(geometryElement, 'sphere'):
collision.geometry.sphere.radius = float(geometryElement.getElementsByTagName('sphere')[0].getAttribute('radius'))
link.collision.append(collision)
elif hasElement(geometryElement, 'mesh'):
meshfile = os.path.normpath(os.path.join(path,
geometryElement.getElementsByTagName('mesh')[0].getAttribute('filename')))
if geometryElement.getElementsByTagName('mesh')[0].getAttribute('scale'):
meshScale = geometryElement.getElementsByTagName('mesh')[0].getAttribute('scale').split()
collision.geometry.scale[0] = float(meshScale[0])
collision.geometry.scale[1] = float(meshScale[1])
collision.geometry.scale[2] = float(meshScale[2])
# hack for gazebo mesh database
if meshfile.count('package'):
idx0 = meshfile.find('package://')
meshfile = meshfile[idx0 + len('package://'):]
extension = os.path.splitext(meshfile)[1].lower()
if extension in extensionListSingleAppearance:
name = os.path.splitext(os.path.basename(meshfile))[0]
if name in Geometry.reference:
collision.geometry = Geometry.reference[name]
else:
collision.geometry.mesh.url = '"' + meshfile + '"'
collision.geometry.name = name
Geometry.reference[name] = collision.geometry
link.collision.append(collision)
elif extension == '.dae':
getColladaMesh(meshfile, collision, link)
else:
print('Unsupported mesh format for collision: \"' + extension + '\"')
def getAxis(node):
"""Parse rotation axis of a joint."""
axis = [0.0, 0.0, 0.0]
axisElement = node.getElementsByTagName('axis')[0].getAttribute('xyz').split()
axis[0] = float(axisElement[0])
axis[1] = float(axisElement[1])
axis[2] = float(axisElement[2])
return axis
def getCalibration(node):
"""Get the URDF calibration tag."""
calibration = Calibration()
calibrationElement = node.getElementsByTagName('calibration')[0]
if hasElement(calibrationElement, 'rising'):
calibration.limit = calibrationElement.getAttribute('rising')
calibration.rising = True
else:
calibration.limit = calibrationElement.getAttribute('falling')
calibration.rising = False
return calibration
def getDynamics(node):
"""Parse dynamics parameters of a joint."""
dynamics = Dynamics()
dynamicsElement = node.getElementsByTagName('dynamics')[0]
if dynamicsElement.getAttribute('damping'):
dynamics.damping = float(dynamicsElement.getAttribute('damping'))
if dynamicsElement.getAttribute('friction'):
dynamics.friction = float(dynamicsElement.getAttribute('friction'))
return dynamics
def getLimit(node):
"""Get limits of a joint."""
limit = Limit()
limitElement = node.getElementsByTagName('limit')[0]
if limitElement.getAttribute('lower'):
limit.lower = float(limitElement.getAttribute('lower'))
if limitElement.getAttribute('upper'):
limit.upper = float(limitElement.getAttribute('upper'))
if float(limitElement.getAttribute('effort')) != 0:
limit.effort = float(limitElement.getAttribute('effort'))
limit.velocity = float(limitElement.getAttribute('velocity'))
return limit
def getSafety(node):
"""Get safety controller of a joint."""
safety = Safety()
if node.getElementsByTagName('safety_controller')[0].getAttribute('soft_lower_limit'):
safety.lower = float(node.getElementsByTagName('safety_controller')[0].getAttribute('soft_lower_limit'))
if node.getElementsByTagName('safety_controller')[0].getAttribute('soft_upper_limit'):
safety.upper = float(node.getElementsByTagName('safety_controller')[0].getAttribute('soft_upper_limit'))
if node.getElementsByTagName('safety_controller')[0].getAttribute('k_position'):
safety.kPosition = float(node.getElementsByTagName('safety_controller')[0].getAttribute('k_position'))
safety.kVelocity = float(node.getElementsByTagName('safety_controller')[0].getAttribute('k_velocity'))
return safety
def getLink(node, path):
"""Parse a link."""
link = Link()
link.name = node.getAttribute('name')
if hasElement(node, 'inertial'):
link.inertia = getInertia(node)
if hasElement(node, 'visual'):
getVisual(link, node, path)
if hasElement(node, 'collision'):
getCollision(link, node, path)
if not any([hasElement(node, 'inertial'), hasElement(node, 'visual'), hasElement(node, 'collision')]):
link.inertia.mass = None
return link
def getJoint(node):
"""Parse a joint."""
joint = Joint()
joint.name = node.getAttribute('name')
joint.type = node.getAttribute('type')
if hasElement(node, 'origin'):
if node.getElementsByTagName('origin')[0].getAttribute('xyz'):
joint.position = getPosition(node)
if node.getElementsByTagName('origin')[0].getAttribute('rpy'):
joint.rotation = getRotation(node)
joint.parent = node.getElementsByTagName('parent')[0].getAttribute('link')
joint.child = node.getElementsByTagName('child')[0].getAttribute('link')
if hasElement(node, 'axis'):
joint.axis = getAxis(node)
if hasElement(node, 'calibration'):
joint.calibration = getCalibration(node)
if hasElement(node, 'dynamics'):
joint.dynamics = getDynamics(node)
if hasElement(node, 'limit'):
joint.limit = getLimit(node)
if hasElement(node, 'safety_controller'):
joint.safety = getSafety(node)
return joint
def isRootLink(link, childList):
"""Check if a link is root link."""
for child in childList:
if link == child:
return False
return True
def removeDummyLinksAndStaticBaseFlag(linkList, jointList, toolSlot):
"""Remove the dummy links (links without masses) and return true in case a dummy link should
set the base of the robot as static. """
staticBase = False
linkIndex = 0
childList = []
for joint in jointList:
childList.append(joint.child)
while linkIndex < len(linkList):
link = linkList[linkIndex]
# We want to skip links between the robot root and the static environment.
if isRootLink(link.name, childList):
linkIndex += 1
continue
# This link will not have a 'physics' field and is not used to have a toolSlot or a static base -> remove it
if link.inertia.mass is None and not link.collision and link.name != toolSlot:
parentJointIndex = None
childJointIndex = None
index = -1
for joint in jointList:
index += 1
if joint.parent == link.name:
childJointIndex = index
elif joint.child == link.name:
parentJointIndex = index
if parentJointIndex is not None:
if childJointIndex is not None:
jointList[parentJointIndex].child = jointList[childJointIndex].child
jointList[parentJointIndex].position = combineTranslations(jointList[parentJointIndex].position, rotateVector(jointList[childJointIndex].position, jointList[parentJointIndex].rotation))
jointList[parentJointIndex].rotation = combineRotations(jointList[childJointIndex].rotation, jointList[parentJointIndex].rotation)
jointList[parentJointIndex].name = jointList[parentJointIndex].parent + "-" + jointList[parentJointIndex].child
jointList.remove(jointList[childJointIndex])
else:
# Special case for dummy non-root links used to fix the base of the robot
parentLink = jointList[parentJointIndex].parent
if isRootLink(parentLink, childList):
# Ensure the parent link does not have physics, if it does, it should be kept as-is
# since some sensors require the parent to have physics
for l in linkList:
if l.name == parentLink and l.inertia.mass is None:
staticBase = True
jointList.remove(jointList[parentJointIndex])
# This link can be removed
linkList.remove(link)
else:
linkIndex += 1
childList.clear()
return staticBase
def parseGazeboElement(element, parentLink, linkList):
"""Parse a Gazebo element."""
if element.hasAttribute("reference") and any([link.name == element.getAttribute('reference') for link in linkList]):
parentLink = element.getAttribute("reference")
for plugin in element.getElementsByTagName('plugin'):
if plugin.hasAttribute('filename') and plugin.getAttribute('filename').startswith('libgazebo_ros_imu'):
imu = IMU()
imu.parentLink = parentLink
if hasElement(plugin, 'topicName'):
imu.name = plugin.getElementsByTagName('topicName')[0].firstChild.nodeValue
if hasElement(plugin, 'gaussianNoise'):
imu.gaussianNoise = float(plugin.getElementsByTagName('gaussianNoise')[0].firstChild.nodeValue)
IMU.list.append(imu)
elif plugin.hasAttribute('filename') and plugin.getAttribute('filename').startswith('libgazebo_ros_f3d'):
if hasElement(plugin, "bodyName"):
name = plugin.getElementsByTagName('bodyName')[0].firstChild.nodeValue
for link in linkList:
if link.name == name:
link.forceSensor = True
break
elif plugin.hasAttribute('filename') and plugin.getAttribute('filename').startswith('libgazebo_ros_p3d'):
p3d = P3D()
p3d.parentLink = parentLink
if hasElement(plugin, 'topicName'):
p3d.name = plugin.getElementsByTagName('topicName')[0].firstChild.nodeValue
if hasElement(plugin, "xyzOffsets"):
print('\033[1;33mWarning: URDF parser cannot handle \"xyzOffsets\" from p3d!\033[0m')
if hasElement(plugin, "rpyOffsets"):
print('\033[1;33mWarning: URDF parser cannot handle \"rpyOffsets\" from p3d!\033[0m')
P3D.list.append(p3d)
for sensorElement in element.getElementsByTagName('sensor'):
if sensorElement.getAttribute('type') == 'camera':
camera = Camera()
camera.parentLink = parentLink
camera.name = sensorElement.getAttribute('name')
if hasElement(sensorElement, 'camera'):
cameraElement = sensorElement.getElementsByTagName('camera')[0]
if hasElement(cameraElement, 'horizontal_fov'):
camera.fov = float(cameraElement.getElementsByTagName('horizontal_fov')[0].firstChild.nodeValue)
if hasElement(cameraElement, 'image'):
imageElement = cameraElement.getElementsByTagName('image')[0]
if hasElement(imageElement, 'width'):
camera.width = int(imageElement.getElementsByTagName('width')[0].firstChild.nodeValue)
if hasElement(imageElement, 'height'):
camera.height = int(imageElement.getElementsByTagName('height')[0].firstChild.nodeValue)
if hasElement(imageElement, 'format') \
and imageElement.getElementsByTagName('format')[0].firstChild.nodeValue != 'R8G8B8A8':
print('Unsupported "%s" image format, using "R8G8B8A8" instead.' %
str(imageElement.getElementsByTagName('format')[0].firstChild.nodeValue))
if hasElement(sensorElement, 'noise'):
noiseElement = sensorElement.getElementsByTagName('noise')[0]
if hasElement(noiseElement, 'stddev'):
camera.noise = float(noiseElement.getElementsByTagName('stddev')[0].firstChild.nodeValue)
Camera.list.append(camera)
elif sensorElement.getAttribute('type') == 'ray' or sensorElement.getAttribute('type') == 'gpu_ray':
lidar = Lidar()
lidar.parentLink = parentLink
lidar.name = sensorElement.getAttribute('name')
if hasElement(sensorElement, 'ray'):
rayElement = sensorElement.getElementsByTagName('ray')[0]
if hasElement(rayElement, 'scan'):
scanElement = rayElement.getElementsByTagName('scan')[0]
if hasElement(scanElement, 'horizontal'):
horizontalElement = scanElement.getElementsByTagName('horizontal')[0]
if hasElement(horizontalElement, 'samples'):
lidar.horizontalResolution = \
int(float(horizontalElement.getElementsByTagName('samples')[0].firstChild.nodeValue))
if hasElement(horizontalElement, 'min_angle') and hasElement(horizontalElement, 'max_angle'):
minAngle = float(horizontalElement.getElementsByTagName('min_angle')[0].firstChild.nodeValue)
maxAngle = float(horizontalElement.getElementsByTagName('max_angle')[0].firstChild.nodeValue)
lidar.fov = maxAngle - minAngle
if hasElement(scanElement, 'vertical'):
verticalElement = scanElement.getElementsByTagName('vertical')[0]
if hasElement(verticalElement, 'samples'):
lidar.numberOfLayers = \
int(verticalElement.getElementsByTagName('samples')[0].firstChild.nodeValue)
if hasElement(verticalElement, 'min_angle') and hasElement(verticalElement, 'max_angle'):
minAngle = float(verticalElement.getElementsByTagName('min_angle')[0].firstChild.nodeValue)
maxAngle = float(verticalElement.getElementsByTagName('max_angle')[0].firstChild.nodeValue)
lidar.verticalFieldOfView = maxAngle - minAngle
if hasElement(rayElement, 'range'):
rangeElement = rayElement.getElementsByTagName('range')[0]
if hasElement(rangeElement, 'min'):
lidar.minRange = float(rangeElement.getElementsByTagName('min')[0].firstChild.nodeValue)
if hasElement(rangeElement, 'max'):
lidar.maxRange = float(rangeElement.getElementsByTagName('max')[0].firstChild.nodeValue)
if hasElement(rangeElement, 'resolution'):
lidar.resolution = float(rangeElement.getElementsByTagName('resolution')[0].firstChild.nodeValue)
if hasElement(sensorElement, 'noise'):
noiseElement = sensorElement.getElementsByTagName('noise')[0]
if hasElement(noiseElement, 'stddev'):
lidar.noise = float(noiseElement.getElementsByTagName('stddev')[0].firstChild.nodeValue)
if lidar.maxRange:
lidar.noise /= lidar.maxRange
Lidar.list.append(lidar)
| 45.576809 | 205 | 0.591776 |
431f0bfdd41900f7510432c2cb6d10113adde3d4 | 1,121 | py | Python | server/djangoapp/urls.py | Dee-Sol/agfzb-CloudAppDevelopment_Capstone | ad3c25acdf7b8f9643e4698462079fe8347d1256 | [
"Apache-2.0"
] | null | null | null | server/djangoapp/urls.py | Dee-Sol/agfzb-CloudAppDevelopment_Capstone | ad3c25acdf7b8f9643e4698462079fe8347d1256 | [
"Apache-2.0"
] | null | null | null | server/djangoapp/urls.py | Dee-Sol/agfzb-CloudAppDevelopment_Capstone | ad3c25acdf7b8f9643e4698462079fe8347d1256 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from django.conf.urls.static import static
from django.conf import settings
from . import views
app_name = 'djangoapp'
urlpatterns = [
# route is a string contains a URL pattern
# view refers to the view function
# name the URL
# path for about view
path(route='about/', view=views.about, name='about'),
# path for contact us view
path(route='contact/', view=views.contact, name='contact'),
# path for registration
path(route='registration/', view=views.registration_request, name='registration'),
# path for login
path('login/', views.login_request, name='login'),
# path for logout
path('logout/', views.logout_request, name='logout'),
# path for landing page
path(route='', view=views.get_dealerships, name='index'),
# path for dealer reviews view
path('dealer/<int:dealerId>/', views.get_dealer_details, name='dealer_details'),
# path for add a review view
path('dealer/<int:dealerId>/add_review', views.add_review, name='add_review'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 33.969697 | 86 | 0.698483 |
64286b3590b7ea586a84e45db679779948b89e0c | 4,921 | py | Python | core/sawtooth_poet/poet_consensus/wait_timer.py | peterschwarz/sawtooth-poet | 6bce9a7c11bd1bdb54e24224b69b75e29b08f1a1 | [
"Apache-2.0"
] | 14 | 2018-07-27T06:06:31.000Z | 2021-12-03T00:30:39.000Z | core/sawtooth_poet/poet_consensus/wait_timer.py | peterschwarz/sawtooth-poet | 6bce9a7c11bd1bdb54e24224b69b75e29b08f1a1 | [
"Apache-2.0"
] | 20 | 2018-07-24T00:58:10.000Z | 2021-12-05T22:42:14.000Z | core/sawtooth_poet/poet_consensus/wait_timer.py | peterschwarz/sawtooth-poet | 6bce9a7c11bd1bdb54e24224b69b75e29b08f1a1 | [
"Apache-2.0"
] | 38 | 2018-07-20T19:54:48.000Z | 2022-03-20T07:27:46.000Z | # Copyright 2016, 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
LOGGER = logging.getLogger(__name__)
class WaitTimer:
"""Wait timers represent a random duration incorporated into a wait
certificate.
Attributes:
previous_certificate_id (str): The id of the previous certificate.
local_mean (float): The local mean wait time based on the history of
certs.
request_time (float): The request time.
duration (float): The duration of the wait timer.
validator_address (str): The address of the validator that created the
wait timer
"""
@classmethod
def create_wait_timer(cls,
poet_enclave_module,
sealed_signup_data,
validator_address,
previous_certificate_id,
consensus_state,
poet_settings_view):
"""Creates a wait timer in the enclave and then constructs
a WaitTimer object.
Args:
poet_enclave_module (module): The module that implements the
underlying PoET enclave.
sealed_signup_data (str): Serialized blob of secret data
from create_signup_info. Used to authenticate with enclave.
validator_address (str): A string representing the address of the
validator creating the wait timer.
previous_certificate_id (str): The ID of the wait certificate for
the block attempting to build upon
consensus_state (ConsensusState): The current PoET consensus state
poet_settings_view (PoetSettingsView): The current PoET config view
Returns:
journal.consensus.poet.wait_timer.WaitTimer: A new wait timer.
"""
# Create an enclave timer object and then use it to create a
# WaitTimer object
enclave_timer = \
poet_enclave_module.create_wait_timer(
sealed_signup_data,
validator_address,
previous_certificate_id,
consensus_state.compute_local_mean(
poet_settings_view=poet_settings_view))
return cls(enclave_timer)
@property
def enclave_wait_timer(self):
"""Converts the serialized timer into an eclave timer object.
Returns:
<poet_enclave_module>.WaitTimer: The deserialized enclave timer
object.
"""
return self._enclave_wait_timer
def __init__(self, enclave_timer):
self.previous_certificate_id =\
str(enclave_timer.previous_certificate_id)
self.local_mean = float(enclave_timer.local_mean)
self.request_time = float(enclave_timer.request_time)
self.duration = float(enclave_timer.duration)
self.validator_address = str(enclave_timer.validator_address)
self._enclave_wait_timer = enclave_timer
self._expires = self.request_time + self.duration + 0.1
self._serialized_timer = None
def __str__(self):
return \
'TIMER, {0:0.2f}, {1:0.2f}, {2}'.format(
self.local_mean,
self.duration,
self.previous_certificate_id)
def population_estimate(self, poet_settings_view):
"""Return the population estimate for the block associated with this
wait timer
Args:
poet_settings_view (PoetSettingsView): The current PoET config view
Returns:
float: The population estimate for this wait timer
"""
return self.local_mean / poet_settings_view.target_wait_time
def serialize(self):
"""Serializes the underlying enclave wait timer
"""
if self._serialized_timer is None:
self._serialized_timer = self._enclave_wait_timer.serialize()
return self._serialized_timer
def has_expired(self, now):
"""Determines whether the timer has expired.
Args:
now (float): The current time.
Returns:
bool: True if the timer has expired, false otherwise.
"""
if now < self._expires:
return False
return self._enclave_wait_timer.has_expired()
| 36.183824 | 80 | 0.629547 |
dba1104153a91a29db6d1a1e87aa96a27de7fc8e | 1,823 | py | Python | models/encoders/map2style.py | RahulBhalley/restyle-encoder-light | 9111ee9caa1d82d7510c800850f7fe2e5a573f11 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | null | null | null | models/encoders/map2style.py | RahulBhalley/restyle-encoder-light | 9111ee9caa1d82d7510c800850f7fe2e5a573f11 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | null | null | null | models/encoders/map2style.py | RahulBhalley/restyle-encoder-light | 9111ee9caa1d82d7510c800850f7fe2e5a573f11 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | null | null | null | import numpy as np
from torch import nn
from torch.nn import Conv2d, Module, MaxPool2d
from models.stylegan2.model import EqualLinear
class GradualStyleBlock(Module):
def __init__(self, in_c, out_c, spatial):
super(GradualStyleBlock, self).__init__()
self.out_c = out_c
self.spatial = spatial
num_pools = int(np.log2(spatial))
modules = []
modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU()]
for i in range(num_pools - 1):
modules += [
Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU()
]
self.convs = nn.Sequential(*modules)
self.linear = EqualLinear(out_c, out_c, lr_mul=1)
def forward(self, x):
x = self.convs(x)
x = x.view(-1, self.out_c)
x = self.linear(x)
return x
class GradualStyleBlockLight(Module):
def __init__(self, in_c, out_c, spatial):
super(GradualStyleBlockLight, self).__init__()
self.out_c = out_c
self.spatial = spatial
num_pools = int(np.log2(spatial) / 2)
modules = []
modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1),
MaxPool2d(kernel_size=3, stride=2),
nn.LeakyReLU()]
for i in range(num_pools - 1):
modules += [
Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1),
MaxPool2d(kernel_size=2, stride=2),
nn.LeakyReLU()
]
self.convs = nn.Sequential(*modules)
self.linear = EqualLinear(out_c, out_c, lr_mul=1)
def forward(self, x):
x = self.convs(x)
x = x.view(-1, self.out_c)
x = self.linear(x)
return x
| 32.553571 | 76 | 0.565003 |
e5c5455badf1f6ed7f77a52d89c94aced808d4ae | 351 | py | Python | hpobench/benchmarks/od/od_benchmarks.py | pfistfl/HPOBench | a7ad8807bd2e058ff99f703ad057b64ecadd4b66 | [
"Apache-2.0"
] | 78 | 2017-01-14T14:25:55.000Z | 2020-09-30T22:57:14.000Z | hpobench/benchmarks/od/od_benchmarks.py | pfistfl/HPOBench | a7ad8807bd2e058ff99f703ad057b64ecadd4b66 | [
"Apache-2.0"
] | 84 | 2016-11-24T15:19:20.000Z | 2020-11-09T11:34:19.000Z | hpobench/benchmarks/od/od_benchmarks.py | pfistfl/HPOBench | a7ad8807bd2e058ff99f703ad057b64ecadd4b66 | [
"Apache-2.0"
] | 31 | 2016-11-29T19:56:06.000Z | 2020-07-10T04:13:33.000Z | """
This is just an entry point for starting the benchmarks.
"""
from hpobench.benchmarks.od.od_ae import ODAutoencoder
from hpobench.benchmarks.od.od_kde import ODKernelDensityEstimation
from hpobench.benchmarks.od.od_ocsvm import ODOneClassSupportVectorMachine
__all__ = [ODAutoencoder, ODKernelDensityEstimation, ODOneClassSupportVectorMachine]
| 31.909091 | 84 | 0.849003 |
05d78f0f78d3cad0b90a11968ce52a9c2501cadb | 10,397 | py | Python | calculates_results_stats.py | ian0549/Developing-a-Command-line-Image-Classifier-Application-with-Deep-Learning | 4e4d05933aedd716311dac4ec71e61a7878d290e | [
"MIT"
] | null | null | null | calculates_results_stats.py | ian0549/Developing-a-Command-line-Image-Classifier-Application-with-Deep-Learning | 4e4d05933aedd716311dac4ec71e61a7878d290e | [
"MIT"
] | null | null | null | calculates_results_stats.py | ian0549/Developing-a-Command-line-Image-Classifier-Application-with-Deep-Learning | 4e4d05933aedd716311dac4ec71e61a7878d290e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/calculates_results_stats.py
#
# PROGRAMMER: Ian Akoto
# DATE CREATED: 03/02/2020
# REVISED DATE: 03/04/2020
# PURPOSE: Create a function calculates_results_stats that calculates the
# statistics of the results of the programrun using the classifier's model
# architecture to classify the images. This function will use the
# results in the results dictionary to calculate these statistics.
# This function will then put the results statistics in a dictionary
# (results_stats_dic) that's created and returned by this function.
# This will allow the user of the program to determine the 'best'
# model for classifying the images. The statistics that are calculated
# will be counts and percentages. Please see "Intro to Python - Project
# classifying Images - xx Calculating Results" for details on the
# how to calculate the counts and percentages for this function.
# This function inputs:
# -The results dictionary as results_dic within calculates_results_stats
# function and results for the function call within main.
# This function creates and returns the Results Statistics Dictionary -
# results_stats_dic. This dictionary contains the results statistics
# (either a percentage or a count) where the key is the statistic's
# name (starting with 'pct' for percentage or 'n' for count) and value
# is the statistic's value. This dictionary should contain the
# following keys:
# n_images - number of images
# n_dogs_img - number of dog images
# n_notdogs_img - number of NON-dog images
# n_match - number of matches between pet & classifier labels
# n_correct_dogs - number of correctly classified dog images
# n_correct_notdogs - number of correctly classified NON-dog images
# n_correct_breed - number of correctly classified dog breeds
# pct_match - percentage of correct matches
# pct_correct_dogs - percentage of correctly classified dogs
# pct_correct_breed - percentage of correctly classified dog breeds
# pct_correct_notdogs - percentage of correctly classified NON-dogs
#
##
# TODO 5: Define calculates_results_stats function below, please be certain to replace None
# in the return statement with the results_stats_dic dictionary that you create
# with this function
#
def calculates_results_stats(results_dic):
"""
Calculates statistics of the results of the program run using classifier's model
architecture to classifying pet images. Then puts the results statistics in a
dictionary (results_stats_dic) so that it's returned for printing as to help
the user to determine the 'best' model for classifying images. Note that
the statistics calculated as the results are either percentages or counts.
Parameters:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifer labels and 0 = no match between labels
idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
idx 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
Returns:
results_stats_dic - Dictionary that contains the results statistics (either
a percentage or a count) where the key is the statistic's
name (starting with 'pct' for percentage or 'n' for count)
and the value is the statistic's value. See comments above
and the previous topic Calculating Results in the class for details
on how to calculate the counts and statistics.
"""
# Replace None with the results_stats_dic dictionary that you created with
# this function
# Creates empty dictionary for results_stats_dic
results_stats_dic = dict()
# Sets all counters to initial values of zero so that they can
# be incremented while processing through the images in results_dic
results_stats_dic['n_dogs_img'] = 0
results_stats_dic['n_match'] = 0
results_stats_dic['n_correct_dogs'] = 0
results_stats_dic['n_correct_notdogs'] = 0
results_stats_dic['n_correct_breed'] = 0
# process through the results dictionary
for key in results_dic:
# Labels Match Exactly
if results_dic[key][2] == 1:
results_stats_dic['n_match'] += 1
# TODO: 5a. REPLACE pass with CODE that counts how many pet images of
# dogs had their breed correctly classified. This happens
# when the pet image label indicates the image is-a-dog AND
# the pet image label and the classifier label match. You
# will need to write a conditional statement that determines
# when the dog breed is correctly classified and then
# increments 'n_correct_breed' by 1. Recall 'n_correct_breed'
# is a key in the results_stats_dic dictionary with it's value
# representing the number of correctly classified dog breeds.
#
# Pet Image Label is a Dog AND Labels match- counts Correct Breed
if results_dic[key][3] == 1 and results_dic[key][2] == 1 :
results_stats_dic['n_correct_breed'] += 1
# Pet Image Label is a Dog - counts number of dog images
if results_dic[key][3] == 1:
results_stats_dic['n_dogs_img'] += 1
# Classifier classifies image as Dog (& pet image is a dog)
# counts number of correct dog classifications
if results_dic[key][4] == 1:
results_stats_dic['n_correct_dogs'] += 1
# TODO: 5b. REPLACE pass with CODE that counts how many pet images
# that are NOT dogs were correctly classified. This happens
# when the pet image label indicates the image is-NOT-a-dog
# AND the classifier label indicates the images is-NOT-a-dog.
# You will need to write a conditional statement that
# determines when the classifier label indicates the image
# is-NOT-a-dog and then increments 'n_correct_notdogs' by 1.
# Recall the 'else:' above 'pass' already indicates that the
# pet image label indicates the image is-NOT-a-dog and
# 'n_correct_notdogs' is a key in the results_stats_dic dictionary
# with it's value representing the number of correctly
# classified NOT-a-dog images.
#
# Pet Image Label is NOT a Dog
else:
# Classifier classifies image as NOT a Dog(& pet image isn't a dog)
# counts number of correct NOT dog clasifications.
if results_dic[key][4] == 0 and results_dic[key][3] == 0:
results_stats_dic['n_correct_notdogs'] += 1
# Calculates run statistics (counts & percentages) below that are calculated
# using the counters from above.
# calculates number of total images
results_stats_dic['n_images'] = len(results_dic)
# calculates number of not-a-dog images using - images & dog images counts
results_stats_dic['n_notdogs_img'] = (results_stats_dic['n_images'] -
results_stats_dic['n_dogs_img'])
# TODO: 5c. REPLACE zero(0.0) with CODE that calculates the % of correctly
# matched images. Recall that this can be calculated by the
# number of correctly matched images ('n_match') divided by the
# number of images('n_images'). This result will need to be
# multiplied by 100.0 to provide the percentage.
#
# Calculates % correct for matches
results_stats_dic['pct_match'] = (results_stats_dic['n_match'] / results_stats_dic['n_images']) *100
# TODO: 5d. REPLACE zero(0.0) with CODE that calculates the % of correctly
# classified dog images. Recall that this can be calculated by
# the number of correctly classified dog images('n_correct_dogs')
# divided by the number of dog images('n_dogs_img'). This result
# will need to be multiplied by 100.0 to provide the percentage.
#
# Calculates % correct dogs
results_stats_dic['pct_correct_dogs'] = (results_stats_dic['n_correct_dogs'] / results_stats_dic['n_dogs_img']) *100
# TODO: 5e. REPLACE zero(0.0) with CODE that calculates the % of correctly
# classified breeds of dogs. Recall that this can be calculated
# by the number of correctly classified breeds of dog('n_correct_breed')
# divided by the number of dog images('n_dogs_img'). This result
# will need to be multiplied by 100.0 to provide the percentage.
#
# Calculates % correct breed of dog
results_stats_dic['pct_correct_breed'] = (results_stats_dic['n_correct_breed'] / results_stats_dic['n_dogs_img']) *100
# Calculates % correct not-a-dog images
# Uses conditional statement for when no 'not a dog' images were submitted
if results_stats_dic['n_notdogs_img'] > 0:
results_stats_dic['pct_correct_notdogs'] = (results_stats_dic['n_correct_notdogs'] /
results_stats_dic['n_notdogs_img'])*100.0
else:
results_stats_dic['pct_correct_notdogs'] = 0.0
# TODO 5f. REPLACE None with the results_stats_dic dictionary that you
# created with this function
return results_stats_dic
| 55.897849 | 123 | 0.63326 |
5c8576e5211283b339f9ec0b098042a64ce8e7a7 | 359 | py | Python | GBP/urls.py | Scankiran/CurrencyAPI | 1fc987e267210796813028f773d8ea631d5b8da6 | [
"MIT"
] | null | null | null | GBP/urls.py | Scankiran/CurrencyAPI | 1fc987e267210796813028f773d8ea631d5b8da6 | [
"MIT"
] | null | null | null | GBP/urls.py | Scankiran/CurrencyAPI | 1fc987e267210796813028f773d8ea631d5b8da6 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path,include
from . import views
app_name = "GBP"
urlpatterns = [
path('last/', views.lastData, name = 'gbpLast'),
# path('hourly/',views.dashboard, name = 'gbpHourly'),
# path('daily/',views.addArticle,name='gbpoDaily'),
# path('mounthly',views.detailArticle,name = 'gbpMonthly'),
] | 27.615385 | 63 | 0.685237 |
f6510547d0467fb391ff467e3bc55c67b1c5780d | 6,773 | py | Python | book/imdb_keras.py | qitsweauca/pyprobml | 59a1191896fbb7408fb589f0b8170a42c0f55969 | [
"MIT"
] | 1 | 2019-03-04T05:43:10.000Z | 2019-03-04T05:43:10.000Z | book/imdb_keras.py | YihaoHu/pyprobml | 59a1191896fbb7408fb589f0b8170a42c0f55969 | [
"MIT"
] | null | null | null | book/imdb_keras.py | YihaoHu/pyprobml | 59a1191896fbb7408fb589f0b8170a42c0f55969 | [
"MIT"
] | null | null | null |
# Movie review classifier using keras. Based on
# https://www.tensorflow.org/tutorials/keras/basic_text_classification
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import keras
import numpy as np
from utils import save_fig
print(tf.__version__)
np.random.seed(0)
imdb = keras.datasets.imdb
vocab_size = 10000
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=vocab_size)
print(np.shape(train_data)) # (25000)
print(len(train_data[0]))
print(len(train_data[1]))
print(train_data[0])
# [1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941...]
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
decode_review(train_data[0])
"""
<START> this film was just brilliant casting location scenery story direction everyone's really suited the part
they played and you could just imagine being there robert <UNK> is an amazing actor and now the same being director <UNK>
father came from the same scottish island as myself ...
"""
train_data = keras.preprocessing.sequence.pad_sequences(
train_data, value=word_index["<PAD>"], padding='post', maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(
test_data, value=word_index["<PAD>"], padding='post', maxlen=256)
print(train_data[0])
"""
[ 1 14 22 16 43 530 973 1622 1385 65 458 4468 66 3941
4 173 36 256 5 25 100 43 838 112 50 670 2 9
35 480 284 5 150 4 172 112 167 2 336 385 39 4
172 4536 1111 17 546 38 13 447 4 192 50 16 6 147
2025 19 14 22 4 1920 4613 469 4 22 71 87 12 16
43 530 38 76 15 13 1247 4 22 17 515 17 12 16
626 18 2 5 62 386 12 8 316 8 106 5 4 2223
5244 16 480 66 3785 33 4 130 12 16 38 619 5 25
124 51 36 135 48 25 1415 33 6 22 12 215 28 77
52 5 14 407 16 82 2 8 4 107 117 5952 15 256
4 2 7 3766 5 723 36 71 43 530 476 26 400 317
46 7 4 2 1029 13 104 88 4 381 15 297 98 32
2071 56 26 141 6 194 7486 18 4 226 22 21 134 476
26 480 5 144 30 5535 18 51 36 28 224 92 25 104
4 226 65 16 38 1334 88 12 16 283 5 16 4472 113
103 32 15 16 5345 19 178 32 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0]
"""
embed_size = 16
def make_model(embed_size):
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, embed_size))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['acc'])
return model
model = make_model(embed_size)
model.summary()
"""
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, None, 16) 160000
_________________________________________________________________
global_average_pooling1d (Gl (None, 16) 0
_________________________________________________________________
dense (Dense) (None, 16) 272
_________________________________________________________________
dense_1 (Dense) (None, 1) 17
=================================================================
Total params: 160,289
Trainable params: 160,289
Non-trainable params: 0
_________________________________________________________________
"""
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
results = model.evaluate(test_data, test_labels)
print(results)
history_dict = history.history
print(history_dict.keys())
import matplotlib.pyplot as plt
acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
fig, ax = plt.subplots()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'r-', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
save_fig("imdb-loss.pdf")
plt.show()
fig, ax = plt.subplots()
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
save_fig("imdb-acc.pdf")
plt.show()
# Now turn on early stopping
# https://chrisalbon.com/deep_learning/keras/neural_network_early_stopping/
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
callbacks = [PrintDot(),
keras.callbacks.EarlyStopping(monitor='val_loss', patience=2),
keras.callbacks.ModelCheckpoint(filepath='imdb_keras_best_model.ckpt',
monitor='val_loss', save_best_only=True)]
# Reset parameters to a new random state
model = make_model(embed_size)
history = model.fit(
partial_x_train, partial_y_train, epochs=40, batch_size=512,
validation_data=(x_val, y_val), verbose=0, callbacks=callbacks)
print(history)
history_dict = history.history
acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
fig, ax = plt.subplots()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'r-', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
save_fig("imdb-loss-early-stop.pdf")
plt.show() | 35.835979 | 122 | 0.636498 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.