hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f30ba55c15f29bf30749e56c200370e76ea9326f | 10,749 | py | Python | sdk/python/pulumi_aws/cloudformation/get_cloud_formation_type.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/cloudformation/get_cloud_formation_type.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/cloudformation/get_cloud_formation_type.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetCloudFormationTypeResult',
'AwaitableGetCloudFormationTypeResult',
'get_cloud_formation_type',
]
@pulumi.output_type
class GetCloudFormationTypeResult:
"""
A collection of values returned by getCloudFormationType.
"""
def __init__(__self__, arn=None, default_version_id=None, deprecated_status=None, description=None, documentation_url=None, execution_role_arn=None, id=None, is_default_version=None, logging_configs=None, provisioning_type=None, schema=None, source_url=None, type=None, type_arn=None, type_name=None, version_id=None, visibility=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if default_version_id and not isinstance(default_version_id, str):
raise TypeError("Expected argument 'default_version_id' to be a str")
pulumi.set(__self__, "default_version_id", default_version_id)
if deprecated_status and not isinstance(deprecated_status, str):
raise TypeError("Expected argument 'deprecated_status' to be a str")
pulumi.set(__self__, "deprecated_status", deprecated_status)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if documentation_url and not isinstance(documentation_url, str):
raise TypeError("Expected argument 'documentation_url' to be a str")
pulumi.set(__self__, "documentation_url", documentation_url)
if execution_role_arn and not isinstance(execution_role_arn, str):
raise TypeError("Expected argument 'execution_role_arn' to be a str")
pulumi.set(__self__, "execution_role_arn", execution_role_arn)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_default_version and not isinstance(is_default_version, bool):
raise TypeError("Expected argument 'is_default_version' to be a bool")
pulumi.set(__self__, "is_default_version", is_default_version)
if logging_configs and not isinstance(logging_configs, list):
raise TypeError("Expected argument 'logging_configs' to be a list")
pulumi.set(__self__, "logging_configs", logging_configs)
if provisioning_type and not isinstance(provisioning_type, str):
raise TypeError("Expected argument 'provisioning_type' to be a str")
pulumi.set(__self__, "provisioning_type", provisioning_type)
if schema and not isinstance(schema, str):
raise TypeError("Expected argument 'schema' to be a str")
pulumi.set(__self__, "schema", schema)
if source_url and not isinstance(source_url, str):
raise TypeError("Expected argument 'source_url' to be a str")
pulumi.set(__self__, "source_url", source_url)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if type_arn and not isinstance(type_arn, str):
raise TypeError("Expected argument 'type_arn' to be a str")
pulumi.set(__self__, "type_arn", type_arn)
if type_name and not isinstance(type_name, str):
raise TypeError("Expected argument 'type_name' to be a str")
pulumi.set(__self__, "type_name", type_name)
if version_id and not isinstance(version_id, str):
raise TypeError("Expected argument 'version_id' to be a str")
pulumi.set(__self__, "version_id", version_id)
if visibility and not isinstance(visibility, str):
raise TypeError("Expected argument 'visibility' to be a str")
pulumi.set(__self__, "visibility", visibility)
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="defaultVersionId")
def default_version_id(self) -> str:
"""
Identifier of the CloudFormation Type default version.
"""
return pulumi.get(self, "default_version_id")
@property
@pulumi.getter(name="deprecatedStatus")
def deprecated_status(self) -> str:
"""
Deprecation status of the CloudFormation Type.
"""
return pulumi.get(self, "deprecated_status")
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the CloudFormation Type.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="documentationUrl")
def documentation_url(self) -> str:
"""
URL of the documentation for the CloudFormation Type.
"""
return pulumi.get(self, "documentation_url")
@property
@pulumi.getter(name="executionRoleArn")
def execution_role_arn(self) -> str:
"""
Amazon Resource Name (ARN) of the IAM Role used to register the CloudFormation Type.
"""
return pulumi.get(self, "execution_role_arn")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isDefaultVersion")
def is_default_version(self) -> bool:
"""
Whether the CloudFormation Type version is the default version.
"""
return pulumi.get(self, "is_default_version")
@property
@pulumi.getter(name="loggingConfigs")
def logging_configs(self) -> Sequence['outputs.GetCloudFormationTypeLoggingConfigResult']:
"""
List of objects containing logging configuration.
"""
return pulumi.get(self, "logging_configs")
@property
@pulumi.getter(name="provisioningType")
def provisioning_type(self) -> str:
"""
Provisioning behavior of the CloudFormation Type.
"""
return pulumi.get(self, "provisioning_type")
@property
@pulumi.getter
def schema(self) -> str:
"""
JSON document of the CloudFormation Type schema.
"""
return pulumi.get(self, "schema")
@property
@pulumi.getter(name="sourceUrl")
def source_url(self) -> str:
"""
URL of the source code for the CloudFormation Type.
"""
return pulumi.get(self, "source_url")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="typeArn")
def type_arn(self) -> str:
return pulumi.get(self, "type_arn")
@property
@pulumi.getter(name="typeName")
def type_name(self) -> str:
return pulumi.get(self, "type_name")
@property
@pulumi.getter(name="versionId")
def version_id(self) -> Optional[str]:
return pulumi.get(self, "version_id")
@property
@pulumi.getter
def visibility(self) -> str:
"""
Scope of the CloudFormation Type.
"""
return pulumi.get(self, "visibility")
class AwaitableGetCloudFormationTypeResult(GetCloudFormationTypeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCloudFormationTypeResult(
arn=self.arn,
default_version_id=self.default_version_id,
deprecated_status=self.deprecated_status,
description=self.description,
documentation_url=self.documentation_url,
execution_role_arn=self.execution_role_arn,
id=self.id,
is_default_version=self.is_default_version,
logging_configs=self.logging_configs,
provisioning_type=self.provisioning_type,
schema=self.schema,
source_url=self.source_url,
type=self.type,
type_arn=self.type_arn,
type_name=self.type_name,
version_id=self.version_id,
visibility=self.visibility)
def get_cloud_formation_type(arn: Optional[str] = None,
type: Optional[str] = None,
type_name: Optional[str] = None,
version_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCloudFormationTypeResult:
"""
Provides details about a CloudFormation Type.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.cloudformation.get_cloud_formation_type(type="RESOURCE",
type_name="AWS::Athena::WorkGroup")
```
:param str arn: Amazon Resource Name (ARN) of the CloudFormation Type. For example, `arn:aws:cloudformation:us-west-2::type/resource/AWS-EC2-VPC`.
:param str type: CloudFormation Registry Type. For example, `RESOURCE`.
:param str type_name: CloudFormation Type name. For example, `AWS::EC2::VPC`.
:param str version_id: Identifier of the CloudFormation Type version.
"""
__args__ = dict()
__args__['arn'] = arn
__args__['type'] = type
__args__['typeName'] = type_name
__args__['versionId'] = version_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:cloudformation/getCloudFormationType:getCloudFormationType', __args__, opts=opts, typ=GetCloudFormationTypeResult).value
return AwaitableGetCloudFormationTypeResult(
arn=__ret__.arn,
default_version_id=__ret__.default_version_id,
deprecated_status=__ret__.deprecated_status,
description=__ret__.description,
documentation_url=__ret__.documentation_url,
execution_role_arn=__ret__.execution_role_arn,
id=__ret__.id,
is_default_version=__ret__.is_default_version,
logging_configs=__ret__.logging_configs,
provisioning_type=__ret__.provisioning_type,
schema=__ret__.schema,
source_url=__ret__.source_url,
type=__ret__.type,
type_arn=__ret__.type_arn,
type_name=__ret__.type_name,
version_id=__ret__.version_id,
visibility=__ret__.visibility)
| 38.945652 | 339 | 0.662852 |
cbbcce447f1dfc70d2bc71bcef6894756b8fb880 | 905 | py | Python | code/ordered_radicals/sol_124.py | bhavinjawade/project-euler-solutions | 56bf6a282730ed4b9b875fa081cf4509d9939d98 | [
"Apache-2.0"
] | 2 | 2020-07-16T08:16:32.000Z | 2020-10-01T07:16:48.000Z | code/ordered_radicals/sol_124.py | Psingh12354/project-euler-solutions | 56bf6a282730ed4b9b875fa081cf4509d9939d98 | [
"Apache-2.0"
] | null | null | null | code/ordered_radicals/sol_124.py | Psingh12354/project-euler-solutions | 56bf6a282730ed4b9b875fa081cf4509d9939d98 | [
"Apache-2.0"
] | 1 | 2021-05-07T18:06:08.000Z | 2021-05-07T18:06:08.000Z |
# -*- coding: utf-8 -*-
'''
File name: code\ordered_radicals\sol_124.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #124 :: Ordered radicals
#
# For more information see:
# https://projecteuler.net/problem=124
# Problem Statement
'''
The radical of n, rad(n), is the product of the distinct prime factors of n. For example, 504 = 23 × 32 × 7, so rad(504) = 2 × 3 × 7 = 42.
If we calculate rad(n) for 1 ≤ n ≤ 10, then sort them on rad(n), and sorting on n if the radical values are equal, we get:
Unsorted
Sorted
n
rad(n)
n
rad(n)
k
11
111
22
222
33
423
42
824
55
335
66
936
77
557
82
668
93
779
1010
101010
Let E(k) be the kth element in the sorted n column; for example, E(4) = 8 and E(6) = 9.
If rad(n) is sorted for 1 ≤ n ≤ 100000, find E(10000).
'''
# Solution
# Solution Approach
'''
'''
| 13.507463 | 138 | 0.644199 |
4c69a66ab24eaffecad214700d54db557c797884 | 7,219 | py | Python | models/model.py | alexcwsmith/TRAILMAP | 3f5adcc34341add14561be7b44d240aa712444e9 | [
"MIT"
] | 29 | 2019-11-12T22:36:51.000Z | 2021-12-16T00:11:44.000Z | models/model.py | alexcwsmith/TRAILMAP | 3f5adcc34341add14561be7b44d240aa712444e9 | [
"MIT"
] | 14 | 2019-11-06T19:19:00.000Z | 2022-01-25T21:14:13.000Z | models/model.py | alexcwsmith/TRAILMAP | 3f5adcc34341add14561be7b44d240aa712444e9 | [
"MIT"
] | 13 | 2019-10-22T12:53:33.000Z | 2022-03-15T20:15:52.000Z | import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv3D, MaxPooling3D, BatchNormalization, Conv3DTranspose, concatenate, \
Cropping3D, Input
from tensorflow.keras.optimizers import Adam
input_dim = 64
output_dim = 36
def create_weighted_binary_crossentropy(axon_weight, background_weight, artifact_weight, edge_weight):
def weighted_binary_crossentropy(y_true, y_pred):
weights = tf.reduce_sum(y_true, axis=-1, keepdims=True)
mask = tf.equal(weights, 1)
axon_true = y_true[:, :, :, :, 0]
axon_true = tf.expand_dims(axon_true, -1)
axon_mask = tf.boolean_mask(axon_true, mask)
background_true = y_true[:, :, :, :, 1]
background_true = tf.expand_dims(background_true, -1)
background_mask = tf.boolean_mask(background_true, mask)
artifact_true = y_true[:, :, :, :, 2]
artifact_true = tf.expand_dims(artifact_true, -1)
artifact_mask = tf.boolean_mask(artifact_true, mask)
edge_true = y_true[:, :, :, :, 3]
edge_true = tf.expand_dims(edge_true, -1)
edge_mask = tf.boolean_mask(edge_true, mask)
mask_true = tf.boolean_mask(axon_true, mask)
mask_pred = tf.boolean_mask(y_pred, mask)
crossentropy = K.binary_crossentropy(mask_true, mask_pred)
weight_vector = (axon_mask * axon_weight) + (background_mask * background_weight) + \
(artifact_mask * artifact_weight) + (edge_mask * edge_weight)
weighted_crossentropy = weight_vector * crossentropy
return K.mean(weighted_crossentropy)
return weighted_binary_crossentropy
def weighted_binary_crossentropy(y_true, y_pred):
loss = create_weighted_binary_crossentropy(1.5, 0.2, 0.8, 0.05)(y_true, y_pred)
return loss
def adjusted_accuracy(y_true, y_pred):
weights = tf.reduce_sum(y_true, axis=-1, keepdims=True)
mask = K.equal(weights, 1)
axons_true = y_true[:, :, :, :, 0]
axons_true = K.expand_dims(axons_true, -1)
mask_true = tf.boolean_mask(axons_true, mask)
mask_pred = tf.boolean_mask(y_pred, mask)
return K.mean(K.equal(mask_true, K.round(mask_pred)))
def axon_precision(y_true, y_pred):
weights = tf.reduce_sum(y_true, axis=-1)
mask = tf.equal(weights, 1)
mask_true = tf.boolean_mask(y_true[:, :, :, :, 0], mask)
mask_pred = tf.boolean_mask(y_pred[:, :, :, :, 0], mask)
true_positives = K.sum(K.round(K.clip(mask_true * mask_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(mask_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def axon_recall(y_true, y_pred):
weights = tf.reduce_sum(y_true, axis=-1)
mask = tf.equal(weights, 1)
mask_true = tf.boolean_mask(y_true[:, :, :, :, 0], mask)
mask_pred = tf.boolean_mask(y_pred[:, :, :, :, 0], mask)
true_positives = K.sum(K.round(K.clip(mask_true * mask_pred, 0, 1)))
actual_positives = K.sum(K.round(K.clip(mask_true, 0, 1)))
recall = true_positives / (actual_positives + K.epsilon())
return recall
def artifact_precision(y_true, y_pred):
weights = y_true[:, :, :, :, 2]
mask = tf.equal(weights, 1)
mask_true = tf.boolean_mask(y_true[:, :, :, :, 2], mask)
mask_pred = tf.boolean_mask(1 - y_pred[:, :, :, :, 0], mask)
true_positives = K.sum(K.round(K.clip(mask_true * mask_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(mask_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_score(y_true, y_pred):
precision = axon_precision(y_true, y_pred)
recall = axon_recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def edge_axon_precision(y_true, y_pred):
weights = tf.reduce_sum(y_true, axis=-1)
mask = tf.equal(weights, 1)
mask_true = tf.boolean_mask(y_true[:, :, :, :, 0], mask)
mask_pred = tf.boolean_mask(y_pred[:, :, :, :, 0], mask)
mask_edge_true = tf.boolean_mask(y_true[:, :, :, :, 3], mask)
true_positives = K.sum(K.round(K.clip(mask_true * mask_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(mask_pred, 0, 1)))
edge_count = K.sum(K.round(K.clip(mask_edge_true * mask_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon() - edge_count)
return precision
def get_net():
# Level 1
input = Input((input_dim, input_dim, input_dim, 1))
conv1 = Conv3D(32, (3, 3, 3), activation="relu", padding="same")(input)
batch1 = BatchNormalization()(conv1)
conv1 = Conv3D(64, (3, 3, 3), activation="relu", padding="same")(batch1)
batch1 = BatchNormalization()(conv1)
# Level 2
pool2 = MaxPooling3D((2, 2, 2))(batch1)
conv2 = Conv3D(64, (3, 3, 3), activation="relu", padding="same")(pool2)
batch2 = BatchNormalization()(conv2)
conv2 = Conv3D(128, (3, 3, 3), activation="relu", padding="same")(batch2)
batch2 = BatchNormalization()(conv2)
# Level 3
pool3 = MaxPooling3D((2, 2, 2))(batch2)
conv3 = Conv3D(128, (3, 3, 3), activation="relu", padding="same")(pool3)
batch3 = BatchNormalization()(conv3)
conv3 = Conv3D(256, (3, 3, 3), activation="relu", padding="same")(batch3)
batch3 = BatchNormalization()(conv3)
# Level 4
pool4 = MaxPooling3D((2, 2, 2))(batch3)
conv4 = Conv3D(256, (3, 3, 3), activation="relu", padding="same")(pool4)
batch4 = BatchNormalization()(conv4)
conv4 = Conv3D(512, (3, 3, 3), activation="relu", padding="same")(batch4)
batch4 = BatchNormalization()(conv4)
# Level 3
up5 = Conv3DTranspose(512, (2, 2, 2), strides=(2, 2, 2), padding="same", activation="relu")(batch4)
merge5 = concatenate([up5, batch3])
conv5 = Conv3D(256, (3, 3, 3), activation="relu")(merge5)
batch5 = BatchNormalization()(conv5)
conv5 = Conv3D(256, (3, 3, 3), activation="relu")(batch5)
batch5 = BatchNormalization()(conv5)
# Level 2
up6 = Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), activation="relu")(batch5)
merge6 = concatenate([up6, Cropping3D(cropping=((4, 4), (4, 4), (4, 4)))(batch2)])
conv6 = Conv3D(128, (3, 3, 3), activation="relu")(merge6)
batch6 = BatchNormalization()(conv6)
conv6 = Conv3D(128, (3, 3, 3), activation="relu")(batch6)
batch6 = BatchNormalization()(conv6)
# Level 1
up7 = Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same", activation="relu")(batch6)
merge7 = concatenate([up7, Cropping3D(cropping=((12, 12), (12, 12), (12, 12)))(batch1)])
conv7 = Conv3D(64, (3, 3, 3), activation="relu")(merge7)
batch7 = BatchNormalization()(conv7)
conv7 = Conv3D(64, (3, 3, 3), activation="relu")(batch7)
batch7 = BatchNormalization()(conv7)
# Output dim is (36, 36, 36)
preds = Conv3D(1, (1, 1, 1), activation="sigmoid")(batch7)
model = Model(inputs=input, outputs=preds)
model.compile(optimizer=Adam(lr=0.001, decay=0.00), loss=weighted_binary_crossentropy,
metrics=[axon_precision, axon_recall, f1_score, artifact_precision, edge_axon_precision, adjusted_accuracy])
return model
| 35.915423 | 126 | 0.655769 |
a1263acea3b976b7cf9136be5c6ccbf7ee3d1452 | 282 | py | Python | utils/extract_version.py | Piumal1999/asset-registry | ecf291f963571fc3933f3b91b867f1b42e4885bb | [
"MIT"
] | null | null | null | utils/extract_version.py | Piumal1999/asset-registry | ecf291f963571fc3933f3b91b867f1b42e4885bb | [
"MIT"
] | 3 | 2021-11-24T17:53:08.000Z | 2022-01-10T05:18:05.000Z | utils/extract_version.py | Piumal1999/asset-registry | ecf291f963571fc3933f3b91b867f1b42e4885bb | [
"MIT"
] | 1 | 2021-11-14T10:05:51.000Z | 2021-11-14T10:05:51.000Z | import os
import sys
env_file = os.getenv('GITHUB_ENV')
def set_actions_env_var(var_name, value):
with open(env_file, "a") as my_file:
my_file.write(str(var_name) + "=" + str(value) + "\n")
string = sys.argv[1]
set_actions_env_var("VERSION", string.split(":")[-1])
| 20.142857 | 62 | 0.666667 |
c5cf926bea6c7b931bf2fa5d098eb2d5d412fa0d | 9,970 | py | Python | models/RelationNetworks/relation_rcnn/operator_py/proposal.py | RamsteinWR/PneumoniaRSNA1 | 08bdba51292307a78ef711c6be4a63faea240ddf | [
"MIT"
] | null | null | null | models/RelationNetworks/relation_rcnn/operator_py/proposal.py | RamsteinWR/PneumoniaRSNA1 | 08bdba51292307a78ef711c6be4a63faea240ddf | [
"MIT"
] | null | null | null | models/RelationNetworks/relation_rcnn/operator_py/proposal.py | RamsteinWR/PneumoniaRSNA1 | 08bdba51292307a78ef711c6be4a63faea240ddf | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Relation Networks for Object Detection
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Yuwen Xiong
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
"""
Proposal Operator transform anchor coordinates into ROI coordinates with prediction results on
classification probability and bounding box prediction results, and image size and scale information.
"""
from distutils.util import strtobool
import mxnet as mx
import numpy as np
import numpy.random as npr
from bbox.bbox_transform import bbox_pred, clip_boxes
from nms.nms import gpu_nms_wrapper
from rpn.generate_anchor import generate_anchors
DEBUG = False
class ProposalOperator(mx.operator.CustomOp):
def __init__(self, feat_stride, scales, ratios, output_score,
rpn_pre_nms_top_n, rpn_post_nms_top_n, threshold, rpn_min_size):
super(ProposalOperator, self).__init__()
self._feat_stride = feat_stride
self._scales = np.fromstring(scales[1:-1], dtype=float, sep=',')
self._ratios = np.fromstring(ratios[1:-1], dtype=float, sep=',')
self._anchors = generate_anchors(base_size=self._feat_stride, scales=self._scales, ratios=self._ratios)
self._num_anchors = self._anchors.shape[0]
self._output_score = output_score
self._rpn_pre_nms_top_n = rpn_pre_nms_top_n
self._rpn_post_nms_top_n = rpn_post_nms_top_n
self._threshold = threshold
self._rpn_min_size = rpn_min_size
if DEBUG:
print
'feat_stride: {}'.format(self._feat_stride)
print
'anchors:'
print
self._anchors
def forward(self, is_train, req, in_data, out_data, aux):
nms = gpu_nms_wrapper(self._threshold, in_data[0].context.device_id)
batch_size = in_data[0].shape[0]
if batch_size > 1:
raise ValueError("Sorry, multiple images each device is not implemented")
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
pre_nms_topN = self._rpn_pre_nms_top_n
post_nms_topN = self._rpn_post_nms_top_n
min_size = self._rpn_min_size
# the first set of anchors are background probabilities
# keep the second part
scores = in_data[0].asnumpy()[:, self._num_anchors:, :, :]
bbox_deltas = in_data[1].asnumpy()
im_info = in_data[2].asnumpy()[0, :]
if DEBUG:
print
'im_size: ({}, {})'.format(im_info[0], im_info[1])
print
'scale: {}'.format(im_info[2])
# 1. Generate proposals from bbox_deltas and shifted anchors
# use real image size instead of padded feature map sizes
height, width = int(im_info[0] / self._feat_stride), int(im_info[1] / self._feat_stride)
if DEBUG:
print
'score map size: {}'.format(scores.shape)
print
"resudial: {}".format((scores.shape[2] - height, scores.shape[3] - width))
# Enumerate all shifts
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
anchors = self._anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
#
# bbox deltas will be (1, 4 * A, H, W) format
# transpose to (1, H, W, 4 * A)
# reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)
# in slowest to fastest order
bbox_deltas = self._clip_pad(bbox_deltas, (height, width))
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))
# Same story for the scores:
#
# scores are (1, A, H, W) format
# transpose to (1, H, W, A)
# reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)
scores = self._clip_pad(scores, (height, width))
scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))
# Convert anchors into proposals via bbox transformations
proposals = bbox_pred(anchors, bbox_deltas)
# 2. clip predicted boxes to image
proposals = clip_boxes(proposals, im_info[:2])
# 3. remove predicted boxes with either height or width < threshold
# (NOTE: convert min_size to input image scale stored in im_info[2])
keep = self._filter_boxes(proposals, min_size * im_info[2])
proposals = proposals[keep, :]
scores = scores[keep]
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
det = np.hstack((proposals, scores)).astype(np.float32)
keep = nms(det)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
# pad to ensure output size remains unchanged
if len(keep) < post_nms_topN:
pad = npr.choice(keep, size=post_nms_topN - len(keep))
keep = np.hstack((keep, pad))
proposals = proposals[keep, :]
scores = scores[keep]
# Output rois array
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
self.assign(out_data[0], req[0], blob)
if self._output_score:
self.assign(out_data[1], req[1], scores.astype(np.float32, copy=False))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 0)
self.assign(in_grad[1], req[1], 0)
self.assign(in_grad[2], req[2], 0)
@staticmethod
def _filter_boxes(boxes, min_size):
""" Remove all boxes with any side smaller than min_size """
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
@staticmethod
def _clip_pad(tensor, pad_shape):
"""
Clip boxes of the pad area.
:param tensor: [n, c, H, W]
:param pad_shape: [h, w]
:return: [n, c, h, w]
"""
H, W = tensor.shape[2:]
h, w = pad_shape
if h < H or w < W:
tensor = tensor[:, :, :h, :w].copy()
return tensor
@mx.operator.register("proposal")
class ProposalProp(mx.operator.CustomOpProp):
def __init__(self, feat_stride='16', scales='(8, 16, 32)', ratios='(0.5, 1, 2)', output_score='False',
rpn_pre_nms_top_n='6000', rpn_post_nms_top_n='300', threshold='0.3', rpn_min_size='16'):
super(ProposalProp, self).__init__(need_top_grad=False)
self._feat_stride = int(feat_stride)
self._scales = scales
self._ratios = ratios
self._output_score = strtobool(output_score)
self._rpn_pre_nms_top_n = int(rpn_pre_nms_top_n)
self._rpn_post_nms_top_n = int(rpn_post_nms_top_n)
self._threshold = float(threshold)
self._rpn_min_size = int(rpn_min_size)
def list_arguments(self):
return ['cls_prob', 'bbox_pred', 'im_info']
def list_outputs(self):
if self._output_score:
return ['output', 'score']
else:
return ['output']
def infer_shape(self, in_shape):
cls_prob_shape = in_shape[0]
bbox_pred_shape = in_shape[1]
assert cls_prob_shape[0] == bbox_pred_shape[0], 'ROI number does not equal in cls and reg'
batch_size = cls_prob_shape[0]
im_info_shape = (batch_size, 3)
output_shape = (self._rpn_post_nms_top_n, 5)
score_shape = (self._rpn_post_nms_top_n, 1)
if self._output_score:
return [cls_prob_shape, bbox_pred_shape, im_info_shape], [output_shape, score_shape]
else:
return [cls_prob_shape, bbox_pred_shape, im_info_shape], [output_shape]
def create_operator(self, ctx, shapes, dtypes):
return ProposalOperator(self._feat_stride, self._scales, self._ratios, self._output_score,
self._rpn_pre_nms_top_n, self._rpn_post_nms_top_n, self._threshold, self._rpn_min_size)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
| 39.721116 | 119 | 0.611535 |
b9b8f86949db8544ed43d8767b0ac873ddfb6c57 | 1,660 | py | Python | db.py | Shreyas-pandith/mlh-localhost-build-and-deploy-aws | a163dd305af45c7a0b89aab58c1c5db9b6a31386 | [
"MIT"
] | null | null | null | db.py | Shreyas-pandith/mlh-localhost-build-and-deploy-aws | a163dd305af45c7a0b89aab58c1c5db9b6a31386 | [
"MIT"
] | null | null | null | db.py | Shreyas-pandith/mlh-localhost-build-and-deploy-aws | a163dd305af45c7a0b89aab58c1c5db9b6a31386 | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, BigInteger ,String ,ForeignKey
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
import config
database = create_engine(config.DATABASE_URL,max_overflow=-1)
base = declarative_base()
class Articles(base):
__tablename__ = "Articles"
id = Column(BigInteger, primary_key=True)
title = Column(String(100), index=True)
content = Column(String(100), index=True)
user_id = Column(BigInteger, ForeignKey('users.id'))
class User(UserMixin, base):
"""Model for user accounts."""
__tablename__ = 'users'
id = Column(BigInteger,
primary_key=True)
name = Column(String(50),
nullable=False,
unique=False)
email = Column(String(40),
unique=True,
nullable=False)
password = Column(String(200),
primary_key=False,
unique=False,
nullable=False)
def set_password(self, password):
"""Create hashed password."""
self.password = generate_password_hash(password, method='sha256')
def check_password(self, password):
"""Check hashed password."""
return check_password_hash(self.password, password)
def __repr__(self):
return '<User {}>'.format(self.name)
base.metadata.create_all(database)
Session = sessionmaker(database)
def get_session():
return Session()
| 26.349206 | 73 | 0.65 |
88c6c2743d91c659914bbdc844966cbb3c4c47df | 6,682 | py | Python | game.py | Siedler/Owela | cdaa3218846d78cf93a90ff6c4740ac3020275ee | [
"Apache-2.0"
] | null | null | null | game.py | Siedler/Owela | cdaa3218846d78cf93a90ff6c4740ac3020275ee | [
"Apache-2.0"
] | null | null | null | game.py | Siedler/Owela | cdaa3218846d78cf93a90ff6c4740ac3020275ee | [
"Apache-2.0"
] | null | null | null | from textwrap import dedent
import time
import random
max_field_count = -1
class Game:
def __init__(self):
self.state = [
[2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 0, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 0, 0, 0, 0, 0]]
def copy(self):
copied_game = Game()
copied_game.state[0] = self.state[0][:]
copied_game.state[1] = self.state[1][:]
return copied_game
def play(self, player1, player2):
"""
Simulates an owella game for two given players
Given as inputs are two functions that determin the behaviour of the
player/bot.
These functions need to work out the correct move according the game
state and selected player (both are given as inputs).
"""
players = [player1, player2]
current_player = 0
# print(self)
current_round = 1
while not self.game_finished():
player = players[current_player]
start_position = player(self, current_player)
self.move(current_player, start_position)
current_player = 1 - current_player
current_round += 1
# Return which player has won the game
if self.player_has_won(0):
return 0
else:
return 1
def game_finished(self):
return self.player_has_won(0) or self.player_has_won(1)
def move(self, player, position):
"""
Calculates the resulting board according to the current game state,
player and position to be played
"""
# Catch the case that the player tries to make a move with an empty field
if self.state[player][position] <= 0:
raise Exception("invalid")
other_player = 1 - player
my_state = self.state[player]
other_state = self.state[other_player]
# While the player is still alowed to make moves
while True:
if self.player_has_won(player):
break
amount = my_state[position] # amount of stones in that field
my_state[position] = 0 # set the number of stones to 0 in field
# Add one stone to each following field
for i in range(1, amount + 1):
my_state[(position + i) % 16] += 1
new_position = (position + amount) % 16
# If the last field already got a stone
if my_state[new_position] > 1:
# If the filed was in the front row: steal stones of opponent
if new_position >= 8:
steal_position_1 = new_position - 8
steal_position_2 = 15 - steal_position_1
stolen = other_state[steal_position_1] + other_state[steal_position_2]
other_state[steal_position_1] = 0
other_state[steal_position_2] = 0
my_state[new_position] += stolen
# Continue move from new starting position
position = new_position
else:
break
def move_recursive(self, player, position):
"""
Recursive implementation of the described move function.
Not in use anymore because of recursion-depth-problems
"""
if self.state[player][position] <= 0:
raise Exception("invalid")
other_player = 1 - player
my_state = self.state[player]
other_state = self.state[other_player]
amount = my_state[position]
my_state[position] = 0
for i in range(1, amount + 1):
my_state[(position + i) % 16] += 1
new_position = (position + amount) % 16
if my_state[new_position] > 1:
if new_position >= 8:
steal_position_1 = new_position - 8
steal_position_2 = 15 - steal_position_1
stolen = other_state[steal_position_1] + other_state[steal_position_2]
other_state[steal_position_1] = 0
other_state[steal_position_2] = 0
my_state[new_position] += stolen
self.move_recursive(player, new_position)
def max_field_count(self, player):
"""
Calculate the filed with maximum amount of stones.
If I remember correctly this was a support function to approximate
the possible maximum number of stones in one field.
"""
global max_field_count
n = max(self.state[player])
if max_field_count < n:
max_field_count = n
return n
def stone_count(self, player):
return sum(self.state[player])
def player_has_won(self, player):
return self.stone_count(1 - player) <= 1
def used_fields_count(self, player):
return len([i for i in range(16) if self.state[player][i] > 0])
def __repr__(self):
"""
Represent the current state of the board.
"""
return dedent(f"""\
State: {list(reversed(self.state[0][:8]))}
{self.state[0][8:]}
-----------------------------------------
{list(reversed(self.state[1][8:]))}
{self.state[1][:8]}\
""")
def print_player_perspective(self, player):
"""
Represent the current state of the board according to the given player
"""
print(dedent(f"""\
{list(reversed(self.state[1-player][:8]))}
{self.state[1-player][8:]}
-----------------------------------------
{list(reversed(self.state[player][8:]))}
{self.state[player][:8]}\
"""))
def __hash__(self):
return hash((tuple(self.state[0]), tuple(self.state[1])))
def __eq__(self, other):
return self.state == other.state
def possible_moves(self, player):
"""
Returns a list of all possible moves a player can make
"""
return [i for i in range(16) if self.state[player][i] > 0]
def trackGames(number_of_games, player1, player2):
"""
Track how n games between two bots/player work
"""
winner = [0,0]
for i in range(number_of_games):
game = Game()
winner[game.play(player1, player2)] += 1
return winner
def trackGamesRandStart(number_of_games, player1, player2):
winner = [0,0]
for i in range(number_of_games):
game = Game()
if(random.choice([True, False])):
winner[game.play(player1, player2)] += 1
else:
winner[1-game.play(player2, player1)] += 1
return winner
| 31.668246 | 90 | 0.555672 |
bd5a2769a140376efb9a2c5ac936e32bef53aaf6 | 2,455 | py | Python | app.py | bhavsarpratik/jina-icd10-entity-search | c7c1225a63d9e89eb7d9c0458661fd630d824e8d | [
"MIT"
] | 1 | 2021-08-08T09:41:59.000Z | 2021-08-08T09:41:59.000Z | app.py | bhavsarpratik/jina-icd10-entity-search | c7c1225a63d9e89eb7d9c0458661fd630d824e8d | [
"MIT"
] | null | null | null | app.py | bhavsarpratik/jina-icd10-entity-search | c7c1225a63d9e89eb7d9c0458661fd630d824e8d | [
"MIT"
] | 1 | 2021-11-13T06:48:36.000Z | 2021-11-13T06:48:36.000Z | import os
import shutil
import click
from jina.flow import Flow
def clean_workdir():
if os.path.exists(os.environ['JINA_WORKSPACE']):
shutil.rmtree(os.environ['JINA_WORKSPACE'])
def config():
os.environ['JINA_DATA_FILE'] = 'data/icd10.csv'
os.environ['JINA_WORKSPACE'] = 'workspace'
os.environ['JINA_PORT'] = str(45678)
def print_topk(resp, sentence):
for d in resp.search.docs:
print(f'Ta-Dah🔮, here are what we found for: {sentence}')
for idx, match in enumerate(d.matches):
score = match.score.value
if score < 0.0:
continue
code = match.meta_info.decode()
name = match.text.strip()
print(f'> {idx:>2d}({score:.2f}) | {code.upper().ljust(6)} | {name}')
def index(num_docs):
f = Flow().load_config('flow-index.yml')
with f:
f.index_lines(
filepath=os.environ['JINA_DATA_FILE'],
batch_size=8,
size=num_docs,
)
def query(top_k):
f = Flow().load_config('flow-query.yml')
with f:
while True:
text = input('please type a sentence: ')
if not text:
break
def ppr(x):
print_topk(x, text)
f.search_lines(lines=[text, ], output_fn=ppr, top_k=top_k)
def query_restful():
f = Flow().load_config('flow-query.yml')
f.use_rest_gateway()
with f:
f.block()
def dryrun():
f = Flow().load_config('flow-index.yml')
with f:
f.dry_run()
@click.command()
@click.option(
'--task',
'-t',
type=click.Choice(
['index', 'query', 'query_restful', 'dryrun'], case_sensitive=False
),
)
@click.option('--num_docs', '-n', default=70000)
@click.option('--top_k', '-k', default=5)
def main(task, num_docs, top_k):
config()
workspace = os.environ['JINA_WORKSPACE']
if task == 'index':
clean_workdir()
index(num_docs)
if task == 'query':
if not os.path.exists(workspace):
print(f'The directory {workspace} does not exist. Please index first via `python app.py -t index`')
query(top_k)
if task == 'query_restful':
if not os.path.exists(workspace):
print(f'The directory {workspace} does not exist. Please index first via `python app.py -t index`')
query_restful()
if task == 'dryrun':
dryrun()
if __name__ == '__main__':
main()
| 25.572917 | 111 | 0.576782 |
6375fe081324045bf083c2e6456de5f9335f5dfc | 2,331 | py | Python | 02.1a) F to C temp; Force, Energy and Work.py | malikcaukiel/malikcaukiel-Some_Physics | afe1cb56c08255bd348b7096e979c848f2e5c7dd | [
"MIT"
] | 1 | 2020-04-02T16:52:23.000Z | 2020-04-02T16:52:23.000Z | 02.1a) F to C temp; Force, Energy and Work.py | malikcaukiel/malikcaukiel-Some_Physics | afe1cb56c08255bd348b7096e979c848f2e5c7dd | [
"MIT"
] | null | null | null | 02.1a) F to C temp; Force, Energy and Work.py | malikcaukiel/malikcaukiel-Some_Physics | afe1cb56c08255bd348b7096e979c848f2e5c7dd | [
"MIT"
] | null | null | null | ### fahrenheit to centigrade ###
"""
def f_to_c(f_temp):
c_temp = (f_temp - 32) * 5/9
return c_temp
print(f_to_c(98.6))
"""
####################################################################################################
### centigrade to fahrenheit ###
"""
def c_to_f(c_temp):
f_temp = 9/5*(c_temp) + 32
return f_temp
print(c_to_f(37))
### Getting 0 centigrade in fahrenheit, now
c0_in_fahrenheit = c_to_f(0)
print(c0_in_fahrenheit)
"""
####################################################################################################
### Global Environment ###
train_mass = 22680
train_acceleration = 10
train_distance = 100
bomb_mass = 1
### Calculate force ###
def get_force(mass, acceleration):
f = mass*acceleration
return f
#print(get_force(10,10))
train_force = get_force(train_mass, train_acceleration)
print(train_force)
### Printing string and a number together
print("The GE train supplies " + str(train_force) + "Newtons of force.")
########################################
### calculate energy ###
c = 3*10**8
def get_energy(mass = 10):
f = mass * c
return f
print(get_energy(20))
# Testing the get_energy function with bomb_mass = 1 defined above
print(get_energy(bomb_mass))
### calculate work ###
def get_work(mass, acceleration, distance):
force = get_force(mass, acceleration) # get_force defined outside function. So can be
work = force * distance # used, but not vice versa.
return work
#y = get_work(10,10,10)
#print(y)
# Testing with train's variables
#get_work(train_mass, train_acceleration, train_distance)
#print(get_work(train_mass, train_acceleration, train_distance))
train_work = get_work(train_mass, train_acceleration, train_distance)
#print(trian_work)
print("The GE train does " +str(train_work)+ " Joules of work over " +str(train_distance)+ "meters.")
####################################################################################################
| 35.318182 | 140 | 0.493779 |
d2ba0d1b9e5da8af17c434ea8d4243430a72264f | 769 | py | Python | setup.py | mrmh2/dtool-azure | efc24feb5ebb8bdb663ceabfa4b0a7b15d985b67 | [
"MIT"
] | null | null | null | setup.py | mrmh2/dtool-azure | efc24feb5ebb8bdb663ceabfa4b0a7b15d985b67 | [
"MIT"
] | 1 | 2020-01-24T14:24:01.000Z | 2020-01-24T14:24:01.000Z | setup.py | mrmh2/dtool-azure | efc24feb5ebb8bdb663ceabfa4b0a7b15d985b67 | [
"MIT"
] | null | null | null | from setuptools import setup
url = "https://github.com/jic-dtool/dtool-azure"
version = "0.7.1"
readme = open('README.rst').read()
setup(
name="dtool-azure",
packages=["dtool_azure"],
version=version,
description="Add Azure dataset support to dtool",
long_description=readme,
include_package_data=True,
author="Matthew Hartley",
author_email="Matthew.Hartley@jic.ac.uk",
url=url,
install_requires=[
"dtoolcore>=3.17",
"azure-storage-blob==2.1.0",
"azure-storage-common==2.1.0"
],
entry_points={
"dtool.storage_brokers": [
"AzureStorageBroker=dtool_azure.storagebroker:AzureStorageBroker",
],
},
download_url="{}/tarball/{}".format(url, version),
license="MIT"
)
| 25.633333 | 78 | 0.643693 |
734f38ec0e60e710bc7f6b2182a52970080c17d9 | 35,632 | py | Python | nova/tests/api/openstack/compute/contrib/test_floating_ips.py | berrange/nova | 2dea6662cdf15558edd3f0bf33642e7c6e18cb5c | [
"Apache-2.0"
] | null | null | null | nova/tests/api/openstack/compute/contrib/test_floating_ips.py | berrange/nova | 2dea6662cdf15558edd3f0bf33642e7c6e18cb5c | [
"Apache-2.0"
] | null | null | null | nova/tests/api/openstack/compute/contrib/test_floating_ips.py | berrange/nova | 2dea6662cdf15558edd3f0bf33642e7c6e18cb5c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import uuid
from lxml import etree
import mock
import webob
from nova.api.openstack.compute.contrib import floating_ips
from nova.api.openstack import extensions
from nova import compute
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova import network
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
def network_api_get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': None}
def network_api_get_floating_ip_by_address(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10}
def network_api_get_floating_ips_by_project(self, context):
return [{'id': 1,
'address': '10.10.10.10',
'pool': 'nova',
'fixed_ip': {'address': '10.0.0.1',
'instance_uuid': FAKE_UUID,
'instance': {'uuid': FAKE_UUID}}},
{'id': 2,
'pool': 'nova', 'interface': 'eth0',
'address': '10.10.10.11',
'fixed_ip': None}]
def compute_api_get(self, context, instance_id, expected_attrs=None,
want_objects=False):
return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
def network_api_allocate(self, context):
return '10.10.10.10'
def network_api_release(self, context, address):
pass
def compute_api_associate(self, context, instance_id, address):
pass
def network_api_associate(self, context, floating_address, fixed_address):
pass
def network_api_disassociate(self, context, instance, floating_address):
pass
def fake_instance_get(context, instance_id):
return {
"id": 1,
"uuid": uuid.uuid4(),
"name": 'fake',
"user_id": 'fakeuser',
"project_id": '123'}
def stub_nw_info(stubs):
def get_nw_info_for_instance(instance):
return fake_network.fake_get_instance_nw_info(stubs)
return get_nw_info_for_instance
def get_instance_by_floating_ip_addr(self, context, address):
return None
class FloatingIpTestNeutron(test.NoDBTestCase):
def setUp(self):
super(FloatingIpTestNeutron, self).setUp()
self.flags(network_api_class='nova.network.neutronv2.api.API')
self.controller = floating_ips.FloatingIPController()
def test_floatingip_delete(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
with contextlib.nested(
mock.patch.object(self.controller.network_api,
'disassociate_floating_ip'),
mock.patch.object(self.controller.network_api,
'disassociate_and_release_floating_ip'),
mock.patch.object(self.controller.network_api,
'release_floating_ip'),
mock.patch.object(self.controller.network_api,
'get_instance_id_by_floating_address',
return_value=None),
mock.patch.object(self.controller.network_api,
'get_floating_ip',
return_value=fip_val)) as (
disoc_fip, dis_and_del, rel_fip, _, _):
self.controller.delete(req, 1)
self.assertFalse(disoc_fip.called)
self.assertFalse(rel_fip.called)
# Only disassociate_and_release_floating_ip is
# called if using neutron
self.assertTrue(dis_and_del.called)
class FloatingIpTest(test.TestCase):
floating_ip = "10.10.10.10"
floating_ip_2 = "10.10.10.11"
def _create_floating_ips(self, floating_ips=None):
"""Create a floating ip object."""
if floating_ips is None:
floating_ips = [self.floating_ip]
elif not isinstance(floating_ips, (list, tuple)):
floating_ips = [floating_ips]
def make_ip_dict(ip):
"""Shortcut for creating floating ip dict."""
return
dict_ = {'pool': 'nova', 'host': 'fake_host'}
return db.floating_ip_bulk_create(
self.context, [dict(address=ip, **dict_) for ip in floating_ips],
)
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.floating_ip)
def setUp(self):
super(FloatingIpTest, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
self.stubs.Set(network.api.API, "get_floating_ips_by_project",
network_api_get_floating_ips_by_project)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
stub_nw_info(self.stubs))
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Floating_ips'])
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.stubs.Set(db, 'instance_get',
fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = floating_ips.FloatingIPController()
self.manager = floating_ips.FloatingIPActionController(self.ext_mgr)
def tearDown(self):
self._delete_floating_ip()
super(FloatingIpTest, self).tearDown()
def test_floatingip_delete(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
with contextlib.nested(
mock.patch.object(self.controller.network_api,
'disassociate_floating_ip'),
mock.patch.object(self.controller.network_api,
'release_floating_ip'),
mock.patch.object(self.controller.network_api,
'get_instance_id_by_floating_address',
return_value=None),
mock.patch.object(self.controller.network_api,
'get_floating_ip',
return_value=fip_val)) as (
disoc_fip, rel_fip, _, _):
self.controller.delete(req, 1)
self.assertTrue(disoc_fip.called)
self.assertTrue(rel_fip.called)
def test_translate_floating_ip_view(self):
floating_ip_address = self.floating_ip
floating_ip = db.floating_ip_get_by_address(self.context,
floating_ip_address)
# NOTE(vish): network_get uses the id not the address
floating_ip = db.floating_ip_get(self.context, floating_ip['id'])
view = floating_ips._translate_floating_ip_view(floating_ip)
self.assertIn('floating_ip', view)
self.assertTrue(view['floating_ip']['id'])
self.assertEqual(view['floating_ip']['ip'], self.floating_ip)
self.assertIsNone(view['floating_ip']['fixed_ip'])
self.assertIsNone(view['floating_ip']['instance_id'])
def test_translate_floating_ip_view_dict(self):
floating_ip = {'id': 0, 'address': '10.0.0.10', 'pool': 'nova',
'fixed_ip': None}
view = floating_ips._translate_floating_ip_view(floating_ip)
self.assertIn('floating_ip', view)
def test_floating_ips_list(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips')
res_dict = self.controller.index(req)
response = {'floating_ips': [{'instance_id': FAKE_UUID,
'ip': '10.10.10.10',
'pool': 'nova',
'fixed_ip': '10.0.0.1',
'id': 1},
{'instance_id': None,
'ip': '10.10.10.11',
'pool': 'nova',
'fixed_ip': None,
'id': 2}]}
self.assertEqual(res_dict, response)
def test_floating_ip_release_nonexisting(self):
def fake_get_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotFound(id=id)
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/9876')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app(init_only=('os-floating-ips',)))
self.assertEqual(res.status_int, 404)
expected_msg = ('{"itemNotFound": {"message": "Floating ip not found '
'for id 9876", "code": 404}}')
self.assertEqual(res.body, expected_msg)
def test_floating_ip_release_race_cond(self):
def fake_get_floating_ip(*args, **kwargs):
return {'fixed_ip_id': 1, 'address': self.floating_ip}
def fake_get_instance_by_floating_ip_addr(*args, **kwargs):
return 'test-inst'
def fake_disassociate_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotAssociated(args[3])
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
self.stubs.Set(floating_ips, "get_instance_by_floating_ip_addr",
fake_get_instance_by_floating_ip_addr)
self.stubs.Set(floating_ips, "disassociate_floating_ip",
fake_disassociate_floating_ip)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app(init_only=('os-floating-ips',)))
self.assertEqual(res.status_int, 202)
def test_floating_ip_show(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertIsNone(res_dict['floating_ip']['instance_id'])
def test_floating_ip_show_not_found(self):
def fake_get_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotFound(id='fake')
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/9876')
res = req.get_response(fakes.wsgi_app(init_only=('os-floating-ips',)))
self.assertEqual(res.status_int, 404)
expected_msg = ('{"itemNotFound": {"message": "Floating ip not found '
'for id 9876", "code": 404}}')
self.assertEqual(res.body, expected_msg)
def test_show_associated_floating_ip(self):
def get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip': {'address': '10.0.0.1',
'instance_uuid': FAKE_UUID,
'instance': {'uuid': FAKE_UUID}}}
self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['fixed_ip'], '10.0.0.1')
self.assertEqual(res_dict['floating_ip']['instance_id'], FAKE_UUID)
def test_recreation_of_floating_ip(self):
self._delete_floating_ip()
self._create_floating_ips()
def test_floating_ip_in_bulk_creation(self):
self._delete_floating_ip()
self._create_floating_ips([self.floating_ip, self.floating_ip_2])
all_ips = db.floating_ip_get_all(self.context)
ip_list = [ip['address'] for ip in all_ips]
self.assertIn(self.floating_ip, ip_list)
self.assertIn(self.floating_ip_2, ip_list)
def test_fail_floating_ip_in_bulk_creation(self):
self.assertRaises(exception.FloatingIpExists,
self._create_floating_ips,
[self.floating_ip, self.floating_ip_2])
all_ips = db.floating_ip_get_all(self.context)
ip_list = [ip['address'] for ip in all_ips]
self.assertIn(self.floating_ip, ip_list)
self.assertNotIn(self.floating_ip_2, ip_list)
def test_floating_ip_allocate_no_free_ips(self):
def fake_allocate(*args, **kwargs):
raise exception.NoMoreFloatingIps()
self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips')
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req)
self.assertIn('No more floating ips', ex.explanation)
def test_floating_ip_allocate_no_free_ips_pool(self):
def fake_allocate(*args, **kwargs):
raise exception.NoMoreFloatingIps()
self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips')
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, {'pool': 'non_existent_pool'})
self.assertIn('No more floating ips in pool non_existent_pool',
ex.explanation)
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpLimitExceeded())
def test_floating_ip_allocate_over_quota(self, allocate_mock):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips')
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req)
self.assertIn('IP allocation over quota', ex.explanation)
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpLimitExceeded())
def test_floating_ip_allocate_quota_exceed_in_pool(self, allocate_mock):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips')
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, {'pool': 'non_existent_pool'})
self.assertIn('IP allocation over quota in pool non_existent_pool.',
ex.explanation)
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpPoolNotFound())
def test_floating_ip_create_with_unknown_pool(self, allocate_mock):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips')
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, {'pool': 'non_existent_pool'})
self.assertIn('Floating ip pool not found.', ex.explanation)
def test_floating_ip_allocate(self):
def fake1(*args, **kwargs):
pass
def fake2(*args, **kwargs):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova'}
self.stubs.Set(network.api.API, "allocate_floating_ip",
fake1)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake2)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips')
res_dict = self.controller.create(req)
ip = res_dict['floating_ip']
expected = {
"id": 1,
"instance_id": None,
"ip": "10.10.10.10",
"fixed_ip": None,
"pool": 'nova'}
self.assertEqual(ip, expected)
def test_floating_ip_release(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
self.controller.delete(req, 1)
def test_floating_ip_associate(self):
fixed_address = '192.168.1.100'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address, kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
rsp = self.manager._add_floating_ip(req, 'test_inst', body)
self.assertEqual(202, rsp.status_int)
def test_floating_ip_associate_invalid_instance(self):
def fake_get(self, context, id, expected_attrs=None,
want_objects=False):
raise exception.InstanceNotFound(instance_id=id)
self.stubs.Set(compute.api.API, "get", fake_get)
body = dict(addFloatingIp=dict(address=self.floating_ip))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip, req, 'test_inst',
body)
def test_not_extended_floating_ip_associate_fixed(self):
# Check that fixed_address is ignored if os-extended-floating-ips
# is not loaded
fixed_address_requested = '192.168.1.101'
fixed_address_allocated = '192.168.1.100'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address_allocated,
kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address=fixed_address_requested))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
rsp = self.manager._add_floating_ip(req, 'test_inst', body)
self.assertEqual(202, rsp.status_int)
def test_associate_not_allocated_floating_ip_to_instance(self):
def fake_associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
floating_ip = '10.10.10.11'
body = dict(addFloatingIp=dict(address=floating_ip))
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(fakes.wsgi_app(init_only=('servers',)))
res_dict = jsonutils.loads(resp.body)
self.assertEqual(resp.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['message'],
"floating ip not found")
@mock.patch.object(network.api.API, 'associate_floating_ip',
side_effect=exception.Forbidden)
def test_associate_floating_ip_forbidden(self, associate_mock):
body = dict(addFloatingIp=dict(address='10.10.10.11'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._add_floating_ip, req, 'test_inst',
body)
def test_associate_floating_ip_bad_address_key(self):
body = dict(addFloatingIp=dict(bad_address='10.10.10.11'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._add_floating_ip, req, 'test_inst',
body)
def test_associate_floating_ip_bad_addfloatingip_key(self):
body = dict(bad_addFloatingIp=dict(address='10.10.10.11'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._add_floating_ip, req, 'test_inst',
body)
def test_floating_ip_disassociate(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'test_inst'
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
rsp = self.manager._remove_floating_ip(req, 'test_inst', body)
self.assertEqual(202, rsp.status_int)
def test_floating_ip_disassociate_missing(self):
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.manager._remove_floating_ip,
req, 'test_inst', body)
def test_floating_ip_associate_non_existent_ip(self):
def fake_network_api_associate(self, context, instance,
floating_address=None,
fixed_address=None):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_network_api_associate)
body = dict(addFloatingIp=dict(address='1.1.1.1'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip,
req, 'test_inst', body)
def test_floating_ip_disassociate_non_existent_ip(self):
def network_api_get_floating_ip_by_address(self, context,
floating_address):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
body = dict(removeFloatingIp=dict(address='1.1.1.1'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._remove_floating_ip,
req, 'test_inst', body)
def test_floating_ip_disassociate_wrong_instance_uuid(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'test_inst'
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
wrong_uuid = 'aaaaaaaa-ffff-ffff-ffff-aaaaaaaaaaaa'
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.manager._remove_floating_ip,
req, wrong_uuid, body)
def test_floating_ip_disassociate_wrong_instance_id(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'wrong_inst'
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.manager._remove_floating_ip,
req, 'test_inst', body)
def test_floating_ip_disassociate_auto_assigned(self):
def fake_get_floating_ip_addr_auto_assigned(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10, 'auto_assigned': 1}
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'test_inst'
def network_api_disassociate(self, context, instance,
floating_address):
raise exception.CannotDisassociateAutoAssignedFloatingIP()
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake_get_floating_ip_addr_auto_assigned)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._remove_floating_ip,
req, 'test_inst', body)
def test_floating_ip_disassociate_map_authorization_exc(self):
def fake_get_floating_ip_addr_auto_assigned(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10, 'auto_assigned': 1}
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'test_inst'
def network_api_disassociate(self, context, instance, address):
raise exception.Forbidden()
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake_get_floating_ip_addr_auto_assigned)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._remove_floating_ip,
req, 'test_inst', body)
# these are a few bad param tests
def test_bad_address_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp=dict(badparam='11.0.0.1'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._remove_floating_ip, req, 'test_inst',
body)
def test_missing_dict_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp='11.0.0.1')
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._remove_floating_ip, req, 'test_inst',
body)
def test_missing_dict_param_in_add_floating_ip(self):
body = dict(addFloatingIp='11.0.0.1')
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._add_floating_ip, req, 'test_inst',
body)
class ExtendedFloatingIpTest(test.TestCase):
floating_ip = "10.10.10.10"
floating_ip_2 = "10.10.10.11"
def _create_floating_ips(self, floating_ips=None):
"""Create a floating ip object."""
if floating_ips is None:
floating_ips = [self.floating_ip]
elif not isinstance(floating_ips, (list, tuple)):
floating_ips = [floating_ips]
def make_ip_dict(ip):
"""Shortcut for creating floating ip dict."""
return
dict_ = {'pool': 'nova', 'host': 'fake_host'}
return db.floating_ip_bulk_create(
self.context, [dict(address=ip, **dict_) for ip in floating_ips],
)
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.floating_ip)
def setUp(self):
super(ExtendedFloatingIpTest, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
self.stubs.Set(network.api.API, "get_floating_ips_by_project",
network_api_get_floating_ips_by_project)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
stub_nw_info(self.stubs))
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Floating_ips', 'Extended_floating_ips'])
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.stubs.Set(db, 'instance_get',
fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.ext_mgr.extensions['os-floating-ips'] = True
self.ext_mgr.extensions['os-extended-floating-ips'] = True
self.controller = floating_ips.FloatingIPController()
self.manager = floating_ips.FloatingIPActionController(self.ext_mgr)
def tearDown(self):
self._delete_floating_ip()
super(ExtendedFloatingIpTest, self).tearDown()
def test_extended_floating_ip_associate_fixed(self):
fixed_address = '192.168.1.101'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address, kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address=fixed_address))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
rsp = self.manager._add_floating_ip(req, 'test_inst', body)
self.assertEqual(202, rsp.status_int)
def test_extended_floating_ip_associate_fixed_not_allocated(self):
def fake_associate_floating_ip(*args, **kwargs):
pass
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address='11.11.11.11'))
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(fakes.wsgi_app(init_only=('servers',)))
res_dict = jsonutils.loads(resp.body)
self.assertEqual(resp.status_int, 400)
self.assertEqual(res_dict['badRequest']['message'],
"Specified fixed address not assigned to instance")
class FloatingIpSerializerTest(test.TestCase):
def test_default_serializer(self):
serializer = floating_ips.FloatingIPTemplate()
text = serializer.serialize(dict(
floating_ip=dict(
instance_id=1,
ip='10.10.10.10',
fixed_ip='10.0.0.1',
id=1)))
tree = etree.fromstring(text)
self.assertEqual('floating_ip', tree.tag)
self.assertEqual('1', tree.get('instance_id'))
self.assertEqual('10.10.10.10', tree.get('ip'))
self.assertEqual('10.0.0.1', tree.get('fixed_ip'))
self.assertEqual('1', tree.get('id'))
def test_index_serializer(self):
serializer = floating_ips.FloatingIPsTemplate()
text = serializer.serialize(dict(
floating_ips=[
dict(instance_id=1,
ip='10.10.10.10',
fixed_ip='10.0.0.1',
id=1),
dict(instance_id=None,
ip='10.10.10.11',
fixed_ip=None,
id=2)]))
tree = etree.fromstring(text)
self.assertEqual('floating_ips', tree.tag)
self.assertEqual(2, len(tree))
self.assertEqual('floating_ip', tree[0].tag)
self.assertEqual('floating_ip', tree[1].tag)
self.assertEqual('1', tree[0].get('instance_id'))
self.assertEqual('None', tree[1].get('instance_id'))
self.assertEqual('10.10.10.10', tree[0].get('ip'))
self.assertEqual('10.10.10.11', tree[1].get('ip'))
self.assertEqual('10.0.0.1', tree[0].get('fixed_ip'))
self.assertEqual('None', tree[1].get('fixed_ip'))
self.assertEqual('1', tree[0].get('id'))
self.assertEqual('2', tree[1].get('id'))
| 42.62201 | 79 | 0.61992 |
4aea0bf189f5d292214ee90960b8b39c9f37a1da | 15,339 | py | Python | buildscripts/resmokelib/testing/fixtures/shardedcluster.py | MartinNeupauer/mongo | 6cc2dfe7edd312b8596355edef454e15988e350e | [
"Apache-2.0"
] | null | null | null | buildscripts/resmokelib/testing/fixtures/shardedcluster.py | MartinNeupauer/mongo | 6cc2dfe7edd312b8596355edef454e15988e350e | [
"Apache-2.0"
] | 2 | 2021-03-26T00:01:11.000Z | 2021-03-26T00:02:19.000Z | buildscripts/resmokelib/testing/fixtures/shardedcluster.py | MartinNeupauer/mongo | 6cc2dfe7edd312b8596355edef454e15988e350e | [
"Apache-2.0"
] | null | null | null | """
Sharded cluster fixture for executing JSTests against.
"""
from __future__ import absolute_import
import copy
import os.path
import socket
import time
import pymongo
from . import interface
from . import standalone
from . import replicaset
from ... import config
from ... import core
from ... import errors
from ... import utils
from ...utils import registry
class ShardedClusterFixture(interface.Fixture):
"""
Fixture which provides JSTests with a sharded cluster to run
against.
"""
_CONFIGSVR_REPLSET_NAME = "config-rs"
_SHARD_REPLSET_NAME_PREFIX = "shard-rs"
def __init__(self,
logger,
job_num,
mongos_executable=None,
mongos_options=None,
mongod_executable=None,
mongod_options=None,
dbpath_prefix=None,
preserve_dbpath=False,
num_shards=1,
num_rs_nodes_per_shard=None,
separate_configsvr=True,
enable_sharding=None,
auth_options=None):
"""
Initializes ShardedClusterFixture with the different options to
the mongod and mongos processes.
"""
interface.Fixture.__init__(self, logger, job_num)
if "dbpath" in mongod_options:
raise ValueError("Cannot specify mongod_options.dbpath")
self.mongos_executable = mongos_executable
self.mongos_options = utils.default_if_none(mongos_options, {})
self.mongod_executable = mongod_executable
self.mongod_options = utils.default_if_none(mongod_options, {})
self.preserve_dbpath = preserve_dbpath
self.num_shards = num_shards
self.num_rs_nodes_per_shard = num_rs_nodes_per_shard
self.separate_configsvr = separate_configsvr
self.enable_sharding = utils.default_if_none(enable_sharding, [])
self.auth_options = auth_options
# Command line options override the YAML configuration.
dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
self._dbpath_prefix = os.path.join(dbpath_prefix,
"job%d" % (self.job_num),
config.FIXTURE_SUBDIR)
self.configsvr = None
self.mongos = None
self.shards = []
def setup(self):
if self.separate_configsvr:
if self.configsvr is None:
self.configsvr = self._new_configsvr()
self.configsvr.setup()
if not self.shards:
for i in xrange(self.num_shards):
if self.num_rs_nodes_per_shard is None:
shard = self._new_standalone_shard(i)
elif isinstance(self.num_rs_nodes_per_shard, int):
if self.num_rs_nodes_per_shard <= 0:
raise ValueError("num_rs_nodes_per_shard must be a positive integer")
shard = self._new_rs_shard(i, self.num_rs_nodes_per_shard)
else:
raise TypeError("num_rs_nodes_per_shard must be an integer or None")
self.shards.append(shard)
# Start up each of the shards
for shard in self.shards:
shard.setup()
def await_ready(self):
# Wait for the config server
if self.configsvr is not None:
self.configsvr.await_ready()
# Wait for each of the shards
for shard in self.shards:
shard.await_ready()
if self.mongos is None:
self.mongos = self._new_mongos()
# Start up the mongos
self.mongos.setup()
# Wait for the mongos
self.mongos.await_ready()
self.port = self.mongos.port
client = utils.new_mongo_client(port=self.port)
if self.auth_options is not None:
auth_db = client[self.auth_options["authenticationDatabase"]]
auth_db.authenticate(self.auth_options["username"],
password=self.auth_options["password"],
mechanism=self.auth_options["authenticationMechanism"])
# Inform mongos about each of the shards
for shard in self.shards:
self._add_shard(client, shard)
# Enable sharding on each of the specified databases
for db_name in self.enable_sharding:
self.logger.info("Enabling sharding for '%s' database...", db_name)
client.admin.command({"enablesharding": db_name})
def _do_teardown(self):
"""
Shuts down the sharded cluster.
"""
running_at_start = self.is_running()
success = True # Still a success even if nothing is running.
if not running_at_start:
self.logger.info(
"Sharded cluster was expected to be running in _do_teardown(), but wasn't.")
if self.configsvr is not None:
if running_at_start:
self.logger.info("Stopping config server...")
success = self.configsvr.teardown() and success
if running_at_start:
self.logger.info("Successfully terminated the config server.")
if self.mongos is not None:
if running_at_start:
self.logger.info("Stopping mongos...")
success = self.mongos.teardown() and success
if running_at_start:
self.logger.info("Successfully terminated the mongos.")
if running_at_start:
self.logger.info("Stopping shards...")
for shard in self.shards:
success = shard.teardown() and success
if running_at_start:
self.logger.info("Successfully terminated all shards.")
return success
def is_running(self):
"""
Returns true if the config server, all shards, and the mongos
are all still operating, and false otherwise.
"""
return (self.configsvr is not None and self.configsvr.is_running() and
all(shard.is_running() for shard in self.shards) and
self.mongos is not None and self.mongos.is_running())
def get_connection_string(self):
if self.mongos is None:
raise ValueError("Must call setup() before calling get_connection_string()")
return "%s:%d" % (socket.gethostname(), self.mongos.port)
def _new_configsvr(self):
"""
Returns a replicaset.ReplicaSetFixture configured to be used as
the config server of a sharded cluster.
"""
mongod_logger = self.logger.new_fixture_node_logger("configsvr")
mongod_options = copy.deepcopy(self.mongod_options)
mongod_options["configsvr"] = ""
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "config")
mongod_options["replSet"] = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
mongod_options["storageEngine"] = "wiredTiger"
return replicaset.ReplicaSetFixture(mongod_logger,
self.job_num,
mongod_executable=self.mongod_executable,
mongod_options=mongod_options,
preserve_dbpath=self.preserve_dbpath,
num_nodes=3,
auth_options=self.auth_options,
replset_config_options={"configsvr": True})
def _new_rs_shard(self, index, num_rs_nodes_per_shard):
"""
Returns a replicaset.ReplicaSetFixture configured to be used as a
shard in a sharded cluster.
"""
mongod_logger = self.logger.new_fixture_node_logger("shard%d" % index)
mongod_options = copy.deepcopy(self.mongod_options)
mongod_options["shardsvr"] = ""
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "shard%d" % (index))
mongod_options["replSet"] = ShardedClusterFixture._SHARD_REPLSET_NAME_PREFIX + str(index)
return replicaset.ReplicaSetFixture(mongod_logger,
self.job_num,
mongod_executable=self.mongod_executable,
mongod_options=mongod_options,
preserve_dbpath=self.preserve_dbpath,
num_nodes=num_rs_nodes_per_shard,
auth_options=self.auth_options,
replset_config_options={"configsvr": False})
def _new_standalone_shard(self, index):
"""
Returns a standalone.MongoDFixture configured to be used as a
shard in a sharded cluster.
"""
mongod_logger = self.logger.new_fixture_node_logger("shard%d" % index)
mongod_options = copy.deepcopy(self.mongod_options)
mongod_options["shardsvr"] = ""
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "shard%d" % (index))
return standalone.MongoDFixture(mongod_logger,
self.job_num,
mongod_executable=self.mongod_executable,
mongod_options=mongod_options,
preserve_dbpath=self.preserve_dbpath)
def _new_mongos(self):
"""
Returns a _MongoSFixture configured to be used as the mongos for
a sharded cluster.
"""
mongos_logger = self.logger.new_fixture_node_logger("mongos")
mongos_options = copy.deepcopy(self.mongos_options)
configdb_hostname = socket.gethostname()
if self.separate_configsvr:
configdb_replset = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
configdb_port = self.configsvr.port
mongos_options["configdb"] = "%s/%s:%d" % (configdb_replset,
configdb_hostname,
configdb_port)
else:
mongos_options["configdb"] = "%s:%d" % (configdb_hostname, self.shards[0].port)
return _MongoSFixture(mongos_logger,
self.job_num,
mongos_executable=self.mongos_executable,
mongos_options=mongos_options)
def _add_shard(self, client, shard):
"""
Add the specified program as a shard by executing the addShard
command.
See https://docs.mongodb.org/manual/reference/command/addShard
for more details.
"""
connection_string = shard.get_connection_string()
self.logger.info("Adding %s as a shard..." % (connection_string))
client.admin.command({"addShard": "%s" % (connection_string)})
class _MongoSFixture(interface.Fixture):
"""
Fixture which provides JSTests with a mongos to connect to.
"""
REGISTERED_NAME = registry.LEAVE_UNREGISTERED
def __init__(self,
logger,
job_num,
mongos_executable=None,
mongos_options=None):
interface.Fixture.__init__(self, logger, job_num)
# Command line options override the YAML configuration.
self.mongos_executable = utils.default_if_none(config.MONGOS_EXECUTABLE, mongos_executable)
self.mongos_options = utils.default_if_none(mongos_options, {}).copy()
self.mongos = None
def setup(self):
if "port" not in self.mongos_options:
self.mongos_options["port"] = core.network.PortAllocator.next_fixture_port(self.job_num)
self.port = self.mongos_options["port"]
mongos = core.programs.mongos_program(self.logger,
executable=self.mongos_executable,
**self.mongos_options)
try:
self.logger.info("Starting mongos on port %d...\n%s", self.port, mongos.as_command())
mongos.start()
self.logger.info("mongos started on port %d with pid %d.", self.port, mongos.pid)
except:
self.logger.exception("Failed to start mongos on port %d.", self.port)
raise
self.mongos = mongos
def await_ready(self):
deadline = time.time() + standalone.MongoDFixture.AWAIT_READY_TIMEOUT_SECS
# Wait until the mongos is accepting connections. The retry logic is necessary to support
# versions of PyMongo <3.0 that immediately raise a ConnectionFailure if a connection cannot
# be established.
while True:
# Check whether the mongos exited for some reason.
exit_code = self.mongos.poll()
if exit_code is not None:
raise errors.ServerFailure("Could not connect to mongos on port %d, process ended"
" unexpectedly with code %d." % (self.port, exit_code))
try:
# Use a shorter connection timeout to more closely satisfy the requested deadline.
client = utils.new_mongo_client(self.port, timeout_millis=500)
client.admin.command("ping")
break
except pymongo.errors.ConnectionFailure:
remaining = deadline - time.time()
if remaining <= 0.0:
raise errors.ServerFailure(
"Failed to connect to mongos on port %d after %d seconds"
% (self.port, standalone.MongoDFixture.AWAIT_READY_TIMEOUT_SECS))
self.logger.info("Waiting to connect to mongos on port %d.", self.port)
time.sleep(0.1) # Wait a little bit before trying again.
self.logger.info("Successfully contacted the mongos on port %d.", self.port)
def _do_teardown(self):
running_at_start = self.is_running()
success = True # Still a success even if nothing is running.
if not running_at_start and self.mongos is not None:
self.logger.info(
"mongos on port %d was expected to be running in _do_teardown(), but wasn't. "
"Exited with code %d.",
self.port, self.mongos.poll())
if self.mongos is not None:
if running_at_start:
self.logger.info("Stopping mongos on port %d with pid %d...",
self.port,
self.mongos.pid)
self.mongos.stop()
exit_code = self.mongos.wait()
success = exit_code == 0
if running_at_start:
self.logger.info("Successfully terminated the mongos on port %d, exited with code"
" %d",
self.port,
exit_code)
return success
def is_running(self):
return self.mongos is not None and self.mongos.poll() is None
| 39.230179 | 100 | 0.58348 |
399839b0a9c4082937e7f5c7df09ad044314c45e | 804 | py | Python | 05-first-class_functions/argument.py | sexyjoon/fluent-python | 8635960f99cd3c46bd8b839e34a148885180164d | [
"CNRI-Python"
] | null | null | null | 05-first-class_functions/argument.py | sexyjoon/fluent-python | 8635960f99cd3c46bd8b839e34a148885180164d | [
"CNRI-Python"
] | 1 | 2021-06-02T00:33:53.000Z | 2021-06-02T00:33:53.000Z | 05-first-class_functions/argument.py | sexyjoon/fluent-python | 8635960f99cd3c46bd8b839e34a148885180164d | [
"CNRI-Python"
] | null | null | null | def tag(name, *content, cls=None, **attrs):
'''Create tags at least 1'''
if cls is not None:
attrs['class'] = cls
if attrs:
attr_str = ''.join(' %s="%s"' % (attr, value) for attr, value in sorted(attrs.items()))
else:
attr_str = ''
if content:
return '\n'.join('<%s%s>%s</%s>' % (name, attr_str, c, name) for c in content)
else:
return '<%s%s />' % (name, attr_str)
if __name__ == '__main__':
print(tag('br'))
print(tag('p', 'hello'))
print(tag('p', 'hello', 'world'))
print(tag('p', 'hello', id=33))
print(tag('p', 'hello', 'world', cls='sidebar'))
print(tag(content='testing', name='img'))
my_tag = {'name': 'img', 'title': 'Sunset Boulervard', 'src': 'sunset.jpg', 'cls': 'framed'}
print(tag(**my_tag)) | 33.5 | 96 | 0.534826 |
7a2b045520b728650055af4c303c320d368f3332 | 4,369 | py | Python | python/GafferUI/StringPlugValueWidget.py | dboogert/gaffer | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | [
"BSD-3-Clause"
] | null | null | null | python/GafferUI/StringPlugValueWidget.py | dboogert/gaffer | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | [
"BSD-3-Clause"
] | null | null | null | python/GafferUI/StringPlugValueWidget.py | dboogert/gaffer | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2011-2013, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import Gaffer
import GafferUI
## User docs :
#
# Return commits any changes onto the plug.
class StringPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, continuousUpdate=False, **kw ) :
self.__textWidget = GafferUI.TextWidget()
GafferUI.PlugValueWidget.__init__( self, self.__textWidget, plug, **kw )
self._addPopupMenu( self.__textWidget )
self.__keyPressConnection = self.__textWidget.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ) )
self.__editingFinishedConnection = self.__textWidget.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__textChanged ) )
if continuousUpdate :
self.__textChangedConnection = self.__textWidget.textChangedSignal().connect( Gaffer.WeakMethod( self.__textChanged ) )
self._updateFromPlug()
def textWidget( self ) :
return self.__textWidget
def setHighlighted( self, highlighted ) :
GafferUI.PlugValueWidget.setHighlighted( self, highlighted )
self.textWidget().setHighlighted( highlighted )
def _updateFromPlug( self ) :
if self.getPlug() is not None :
with self.getContext() :
value = self.getPlug().getValue()
if value != self.__textWidget.getText() :
# Setting the text moves the cursor to the end,
# even if the new text is the same. We must avoid
# calling setText() in this situation, otherwise the
# cursor is always moving to the end whenever a key is
# pressed in continuousUpdate mode.
self.__textWidget.setText( value )
self.__textWidget.setEditable( self._editable() )
def __keyPress( self, widget, event ) :
assert( widget is self.__textWidget )
if not self.__textWidget.getEditable() :
return False
# escape abandons everything
if event.key=="Escape" :
self._updateFromPlug()
return True
return False
def __textChanged( self, textWidget ) :
assert( textWidget is self.__textWidget )
if self._editable() :
text = self.__textWidget.getText()
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().setValue( text )
# now we've transferred the text changes to the global undo queue, we remove them
# from the widget's private text editing undo queue. it will then ignore undo shortcuts,
# allowing them to fall through to the global undo shortcut.
self.__textWidget.clearUndo()
GafferUI.PlugValueWidget.registerType( Gaffer.StringPlug, StringPlugValueWidget )
| 37.663793 | 129 | 0.710689 |
8b4285db1363ef7d660b6f63a335db8e45e8202d | 480 | py | Python | vyper/types/check.py | siraben/vyper | fc9348b997b571e2b608e89b899362143f78d754 | [
"MIT"
] | null | null | null | vyper/types/check.py | siraben/vyper | fc9348b997b571e2b608e89b899362143f78d754 | [
"MIT"
] | null | null | null | vyper/types/check.py | siraben/vyper | fc9348b997b571e2b608e89b899362143f78d754 | [
"MIT"
] | null | null | null | # stub file to factor type checker into
# for now just call into existing code
from vyper.parser.parser_utils import make_setter
# Check assignment from rhs to lhs.
# For now use make_setter for its typechecking side effects
def check_assign(lhs, rhs, pos, in_function_call=False):
make_setter(lhs, rhs,
location='memory',
pos=pos,
in_function_call=in_function_call)
# TODO Refactor into an actual type-checking function
| 32 | 59 | 0.70625 |
623cc019fa11e741159d1fd2029685050df610f4 | 194 | py | Python | checkScriptRunning.py | destro-2698/CowinPortalOTPRequestBot | 70041c19e10a18ccc87dbb71f9a9be567e439340 | [
"Apache-2.0"
] | null | null | null | checkScriptRunning.py | destro-2698/CowinPortalOTPRequestBot | 70041c19e10a18ccc87dbb71f9a9be567e439340 | [
"Apache-2.0"
] | null | null | null | checkScriptRunning.py | destro-2698/CowinPortalOTPRequestBot | 70041c19e10a18ccc87dbb71f9a9be567e439340 | [
"Apache-2.0"
] | null | null | null | import subprocess
pytonProcess = subprocess.check_output("ps -ef | grep botStart.py",shell=True).decode()
pytonProcess = pytonProcess.split('\n')
for process in pytonProcess:
print(process)
| 21.555556 | 87 | 0.768041 |
23ac69d38d825576ceeb77e3bc35b9b6e3cab86e | 3,082 | py | Python | visualization/mpl_curve3d_tangents.py | orbingol/NURBS-Python_Examples | c99d8cd3d20e7523694ce62f72760b260582fa11 | [
"MIT"
] | 48 | 2017-12-14T09:54:48.000Z | 2020-03-30T13:34:44.000Z | visualization/mpl_curve3d_tangents.py | GabrielJie/NURBS-Python_Examples | c99d8cd3d20e7523694ce62f72760b260582fa11 | [
"MIT"
] | 7 | 2020-05-27T04:27:24.000Z | 2021-05-25T16:11:39.000Z | visualization/mpl_curve3d_tangents.py | GabrielJie/NURBS-Python_Examples | c99d8cd3d20e7523694ce62f72760b260582fa11 | [
"MIT"
] | 37 | 2017-10-14T08:11:11.000Z | 2020-05-04T02:51:58.000Z | # -*- coding: utf-8 -*-
"""
Visualization Examples for the NURBS-Python Package
Released under The MIT License
Developed by Onur Rauf Bingol (c) 2018
Creates a 3-dimensional curve and plots tangent vectors
"""
import os
from geomdl import BSpline
from geomdl import utilities
from geomdl import exchange
from geomdl import operations
import numpy as np
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# Fix file path
os.chdir(os.path.dirname(os.path.realpath(__file__)))
#
# Curve Evaluation
#
# Create a BSpline curve instance
curve = BSpline.Curve()
# Set degree
curve.degree = 3
# Set control points
curve.ctrlpts = exchange.import_txt("../curve3d/ex_curve3d01.cpt")
# Auto-generate knot vector
curve.knotvector = utilities.generate_knot_vector(curve.degree, len(curve.ctrlpts))
# Set evaluation delta
curve.delta = 0.001
# Evaulate curve
curve.evaluate()
#
# Tangent Vector Evaluation
#
# Store tangent vectors in a list for plotting
curvetan = []
# Evaluate curve tangent at u = 0.0175
ct1 = operations.tangent(curve, 0.0175, normalize=True)
curvetan.append(ct1)
# Evaluate curve tangent at u = 0.075
ct2 = operations.tangent(curve, 0.075, normalize=True)
curvetan.append(ct2)
# Evaluate curve tangent at u = 0.375
ct3 = operations.tangent(curve, 0.375, normalize=True)
curvetan.append(ct3)
# Evaluate curve tangent at u = 0.535
ct4 = operations.tangent(curve, 0.535, normalize=True)
curvetan.append(ct4)
# Evaluate curve tangent at u = 0.65
ct5 = operations.tangent(curve, 0.65, normalize=True)
curvetan.append(ct5)
# Evaluate curve tangent at u = 0.85
ct6 = operations.tangent(curve, 0.85, normalize=True)
curvetan.append(ct6)
# Evaluate curve tangent at u = 0.975
ct7 = operations.tangent(curve, 0.975, normalize=True)
curvetan.append(ct7)
#
# Control Points, Curve and Tangent Vector Plotting using Matplotlib
#
# Arrange control points and evaluated curve points for plotting
ctrlpts = np.array(curve.ctrlpts)
curvepts = np.array(curve.evalpts)
# Convert tangent list into a NumPy array
ctarr = np.array(curvetan)
# Draw the control points polygon, the 3D curve and the tangent vectors
fig = plt.figure(figsize=(10.67, 8), dpi=96)
ax = Axes3D(fig)
# Plot 3D lines
ax.plot(ctrlpts[:, 0], ctrlpts[:, 1], ctrlpts[:, 2], color='black', linestyle='-.', marker='o')
ax.plot(curvepts[:, 0], curvepts[:, 1], curvepts[:, 2], color='green', linestyle='-')
# Plot tangent vectors
ax.quiver(ctarr[:, 0, 0], ctarr[:, 0, 1], ctarr[:, 0, 2], ctarr[:, 1, 0], ctarr[:, 1, 1], ctarr[:, 1, 2], color='blue')
# Add legend to 3D plot, @ref: https://stackoverflow.com/a/20505720
ctrlpts_proxy = matplotlib.lines.Line2D([0], [0], linestyle='-.', color='black', marker='o')
curvepts_proxy = matplotlib.lines.Line2D([0], [0], linestyle='none', color='green', marker='o')
tangent_proxy = matplotlib.lines.Line2D([0], [0], linestyle='none', color='blue', marker='>')
ax.legend([ctrlpts_proxy, curvepts_proxy, tangent_proxy], ['Control Points', 'Curve', 'Tangents'], numpoints=1)
# Display the 3D plot
plt.show()
| 27.517857 | 119 | 0.725503 |
a25a8d9a1889341615e9530faa698a2fc4aba4f8 | 1,900 | py | Python | src/use_cases/user/register.py | WebisD/chat-irc-protocol | 6720d1789a366bfd7943b81c7c84cb0941c66e80 | [
"MIT"
] | null | null | null | src/use_cases/user/register.py | WebisD/chat-irc-protocol | 6720d1789a366bfd7943b81c7c84cb0941c66e80 | [
"MIT"
] | null | null | null | src/use_cases/user/register.py | WebisD/chat-irc-protocol | 6720d1789a366bfd7943b81c7c84cb0941c66e80 | [
"MIT"
] | 3 | 2021-06-03T12:27:27.000Z | 2021-06-14T22:48:36.000Z | import sys
from entities.ent_user import *
from util import *
__all__ = ['Register']
class Register:
"""Class to register the user in the server"""
@staticmethod
def response(user, server, args) -> User:
"""Performs the register of user in the server
:param server: IP where the server will be allocated
:param args: args to register the user
:returns: user obj with the changes
"""
try:
name = args[0]
nickname = args[1]
password = args[2]
if user.is_logged:
raise Exception("Already logged")
if name == '' or nickname == '' or password == '':
raise Exception("Invalid command")
user_to_register = User(name, nickname, password, user.connection_socket)
for registered_user in server.registered_users:
if registered_user.nickname == nickname:
user.connection_socket.send(
(PrettyPrint.pretty_print(
"Client '" + str(name) + "' is already registered \n\n", Colors.FAIL
)).encode()
)
return user
server.registered_users.append(user_to_register)
server.user_repository.put(user_to_register.to_dto())
user.connection_socket.send(
(PrettyPrint.pretty_print(
"Client " + str(name) + " successfully registered \n\n", Colors.OKGREEN
)).encode()
)
return user
except Exception as exp:
print(exp.with_traceback(sys.exc_info()[2]))
user.connection_socket.send(
(PrettyPrint.pretty_print("Error in register client '" + str(args[0]) + "'\n\n", Colors.FAIL)).encode())
return user
| 30.645161 | 120 | 0.541053 |
356e37450a839267575091ce157496af99770eb2 | 3,524 | py | Python | source/hive-year1-summary.py | jdwapman/docs | c6d9979803c7b91ddfe02b61018d30038d47abf8 | [
"Apache-2.0"
] | null | null | null | source/hive-year1-summary.py | jdwapman/docs | c6d9979803c7b91ddfe02b61018d30038d47abf8 | [
"Apache-2.0"
] | null | null | null | source/hive-year1-summary.py | jdwapman/docs | c6d9979803c7b91ddfe02b61018d30038d47abf8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# run as
# ./hive-year1-summary.py && open darpa.pdf
import os
import tempfile
import subprocess
import re
files = sorted([f for f in os.listdir('.')
if ((f.startswith('hive_') and
f.endswith('.html.md') and
f != 'hive_year1_summary.html.md' and
f != 'hive_template.html.md' and
f != 'hive_scaling.html.md' and
f != 'hive_sandbox.html.md'))])
# I put this back since it doesn't get included in the PDF otherwise
files.append('hive_scaling.html.md')
print("""---
title: HIVE Year 1 Report: Executive Summary
toc_footers:
- <a href='https://github.com/gunrock/gunrock'>Gunrock: GPU Graph Analytics</a>
- Gunrock © 2018 The Regents of the University of California.
search: true
full_length: true
---
# HIVE Year 1 Report: Executive Summary
This report is located online at the following URL: <https://gunrock.github.io/docs/hive_year1_summary.html>.
Herein UC Davis produces the following three deliverables that it promised to deliver in Year 1:
1. **7--9 kernels running on a single GPU on DGX-1**. The PM had indicated that the application targets are the graph-specific kernels of larger applications, and that our effort should target these kernels. These kernels run on one GPU of the DGX-1. These kernels are in Gunrock's GitHub repository as standalone kernels. While we committed to delivering 7--9 kernels, as of the date of this addendum, we deliver all 11 v0 kernels.
2. **(High-level) performance analysis of these kernels**. In this report we analyze the performance of these kernels.
3. **Separable communication benchmark predicting latency and throughput for a multi-GPU implementation**. This report (and associated code, also in the Gunrock GitHub repository) analyzes the DGX-1's communication capabilities and projects how single-GPU benchmarks will scale on this machine to 8 GPUs.
Specific notes on applications and scaling follow:
""",
file=open('hive_year1_summary.html.md', 'w'))
with open('hive_year1_summary.html.md', 'a') as dest:
for f in files:
fname = f[:-3]
with open(f) as file:
contents = file.read()
title = re.search('\n# (.*)\n', contents).group(1)
summary = re.search(
'\n## Summary of Results\n\n([^#]*)\n\n#', contents).group(1)
dest.write(f'## {title} \n**[{title}](https://gunrock.github.io/docs/{fname})** \n{summary}\n\n')
files.insert(0, 'hive_year1_summary.html.md')
pandoc_cmd = ['pandoc',
'--template=darpa-template.tex',
'--variable', 'title=A Commodity Performance Baseline for HIVE Graph Applications:\\\\Year 1 Report',
'--variable', 'subtitle=(Addendum, 16 November 2018)',
'--variable', 'author=Ben Johnson \\and Weitang Liu \\and Agnieszka Łupińska \\and Muhammad Osama \\and John D. Owens \\and Yuechao Pan \\and Leyuan Wang \\and Xiaoyun Wang \\and Carl Yang',
'--variable', 'postauthor=UC Davis',
'--variable', 'documentclass=memoir',
'--variable', 'fontsize=10pt',
'--variable', 'classoption=oneside',
# '--variable', 'classoption=article',
'--variable', 'toc-depth=0',
'--toc',
'-o', 'darpa.pdf',
# '-o', 'darpa.tex',
]
pandoc_cmd.extend(files)
print(pandoc_cmd)
subprocess.run(pandoc_cmd)
| 43.506173 | 432 | 0.644722 |
e3597be4a6d8b0ec372a969ef8e550a9f6bf1204 | 1,254 | py | Python | Leetcode-Practices-By-Topic/Sorting/56.merge-intervals.py | billzhonggz/Algorithms | ca6c469576765caa1f5796c85e44c8dc00b05171 | [
"MIT"
] | null | null | null | Leetcode-Practices-By-Topic/Sorting/56.merge-intervals.py | billzhonggz/Algorithms | ca6c469576765caa1f5796c85e44c8dc00b05171 | [
"MIT"
] | null | null | null | Leetcode-Practices-By-Topic/Sorting/56.merge-intervals.py | billzhonggz/Algorithms | ca6c469576765caa1f5796c85e44c8dc00b05171 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=56 lang=python3
#
# [56] Merge Intervals
#
# @lc code=start
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
"""Attempt 1
1. Sort the intervals by the first element in increasing order.
2. Two intervals are overlapping if the first element of the larger one is in the area of the smaller one.
3. Merge these intervals.
"""
# Sort
from operator import itemgetter
intervals = sorted(intervals, key=itemgetter(0))
# Determine overlap
for i in range(len(intervals)):
for j in range(i, len(intervals)):
# The first (small) number of the intervial
if j[0] > i[0] and j[0] < i[1]:
# Determine the second element, to see which is wider.
if j[1] >= i[1]:
# The later one is wider
new_interval = [i[0], j[1]]
# TODO: delete the original intervals.
elif j[1] < i[1]:
# The former one is wider
new_interval = [i[0], i[1]]
# TODO: delete the original intervals.
# @lc code=end
| 35.828571 | 114 | 0.515949 |
c173a56d7e182771eba4f62d69a76073dd981cca | 1,301 | py | Python | sissy_university/data_wrangler.py | ruffiana/SissyUniversity | 06df8f07844742429831ab004ff5fc0489c1a7f9 | [
"BSD-2-Clause"
] | 6 | 2020-11-06T03:45:25.000Z | 2021-10-13T07:34:21.000Z | sissy_university/data_wrangler.py | ruffiana/SissyUniversity | 06df8f07844742429831ab004ff5fc0489c1a7f9 | [
"BSD-2-Clause"
] | null | null | null | sissy_university/data_wrangler.py | ruffiana/SissyUniversity | 06df8f07844742429831ab004ff5fc0489c1a7f9 | [
"BSD-2-Clause"
] | null | null | null | """
Collection of tools and functions for managing/updating local data files
**Author:**
Ruffiana, ruffiana.plays@gmail.com, 9/28/2020
"""
import json
from pprint import pprint
try:
from .data import Json
from .const import *
except ImportError:
from data import Json
from const import *
def upate_imageUrl_values():
"""
Update .json data files with mapped id of images
**Arguments:**
None
**Keyword Arguments:**
None
**Author:**
Ruffiana, ruffiana.plays@gmail.com, 9/27/2020
"""
image_map = Json.read_json(PATH_DATA / "image_map.json")
map_key_datafile = {
'MajorsImages' : DATA_MAJORS,
'ClassesImages' : DATA_CLASSES,
'PartnersImages' : DATA_PARTNERS,
'ClubsImages' : DATA_CLUBS,
'PunishmentsImages' : DATA_PUNISHMENTS,
}
for key_name, datafile in map_key_datafile.items():
_dict = Json.read_json(datafile)
image_ids = image_map.get(key_name)
for _id, values in _dict.items():
image_id = image_ids.get(_id)
_dict[_id]["imgUrl"] = str(image_id)
pprint(_dict)
Json.write_json(_dict, datafile)
if __name__ == "__main__":
pass
# upate_imageUrl_values() | 20.983871 | 72 | 0.614143 |
7af6474171814db6685d94458fd5c5475f065b5b | 65,055 | py | Python | mongoengine/fields.py | malderete/mongoengine | 2803404360332d1e2c951415b7a72402bce8b113 | [
"MIT"
] | null | null | null | mongoengine/fields.py | malderete/mongoengine | 2803404360332d1e2c951415b7a72402bce8b113 | [
"MIT"
] | null | null | null | mongoengine/fields.py | malderete/mongoengine | 2803404360332d1e2c951415b7a72402bce8b113 | [
"MIT"
] | null | null | null | import datetime
import decimal
import itertools
import re
import time
import urllib2
import uuid
import warnings
from operator import itemgetter
try:
import dateutil
except ImportError:
dateutil = None
else:
import dateutil.parser
import pymongo
import gridfs
from bson import Binary, DBRef, SON, ObjectId
from mongoengine.errors import ValidationError
from mongoengine.python_support import (PY3, bin_type, txt_type,
str_types, StringIO)
from base import (BaseField, ComplexBaseField, ObjectIdField, GeoJsonBaseField,
get_document, BaseDocument)
from queryset import DO_NOTHING, QuerySet
from document import Document, EmbeddedDocument
from connection import get_db, DEFAULT_CONNECTION_NAME
try:
from PIL import Image, ImageOps
except ImportError:
Image = None
ImageOps = None
__all__ = [
'StringField', 'URLField', 'EmailField', 'IntField', 'LongField',
'FloatField', 'DecimalField', 'BooleanField', 'DateTimeField',
'ComplexDateTimeField', 'EmbeddedDocumentField', 'ObjectIdField',
'GenericEmbeddedDocumentField', 'DynamicField', 'ListField',
'SortedListField', 'DictField', 'MapField', 'ReferenceField',
'CachedReferenceField', 'GenericReferenceField', 'BinaryField',
'GridFSError', 'GridFSProxy', 'FileField', 'ImageGridFsProxy',
'ImproperlyConfigured', 'ImageField', 'GeoPointField', 'PointField',
'LineStringField', 'PolygonField', 'SequenceField', 'UUIDField',
'MultiPointField', 'MultiLineStringField', 'MultiPolygonField',
'GeoJsonBaseField']
RECURSIVE_REFERENCE_CONSTANT = 'self'
class StringField(BaseField):
"""A unicode string field.
"""
def __init__(self, regex=None, max_length=None, min_length=None, **kwargs):
self.regex = re.compile(regex) if regex else None
self.max_length = max_length
self.min_length = min_length
super(StringField, self).__init__(**kwargs)
def to_python(self, value):
if isinstance(value, unicode):
return value
try:
value = value.decode('utf-8')
except:
pass
return value
def validate(self, value):
if not isinstance(value, basestring):
self.error('StringField only accepts string values')
if self.max_length is not None and len(value) > self.max_length:
self.error('String value is too long')
if self.min_length is not None and len(value) < self.min_length:
self.error('String value is too short')
if self.regex is not None and self.regex.match(value) is None:
self.error('String value did not match validation regex')
def lookup_member(self, member_name):
return None
def prepare_query_value(self, op, value):
if not isinstance(op, basestring):
return value
if op.lstrip('i') in ('startswith', 'endswith', 'contains', 'exact'):
flags = 0
if op.startswith('i'):
flags = re.IGNORECASE
op = op.lstrip('i')
regex = r'%s'
if op == 'startswith':
regex = r'^%s'
elif op == 'endswith':
regex = r'%s$'
elif op == 'exact':
regex = r'^%s$'
# escape unsafe characters which could lead to a re.error
value = re.escape(value)
value = re.compile(regex % value, flags)
return value
class URLField(StringField):
"""A field that validates input as an URL.
.. versionadded:: 0.3
"""
_URL_REGEX = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
# domain...
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def __init__(self, verify_exists=False, url_regex=None, **kwargs):
self.verify_exists = verify_exists
self.url_regex = url_regex or self._URL_REGEX
super(URLField, self).__init__(**kwargs)
def validate(self, value):
if not self.url_regex.match(value):
self.error('Invalid URL: %s' % value)
return
if self.verify_exists:
warnings.warn(
"The URLField verify_exists argument has intractable security "
"and performance issues. Accordingly, it has been deprecated.",
DeprecationWarning)
try:
request = urllib2.Request(value)
urllib2.urlopen(request)
except Exception, e:
self.error('This URL appears to be a broken link: %s' % e)
class EmailField(StringField):
"""A field that validates input as an E-Mail-Address.
.. versionadded:: 0.4
"""
EMAIL_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"'
# domain (max length of an ICAAN TLD is 22 characters)
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,253}[A-Z0-9])?\.)+[A-Z]{2,22}$', re.IGNORECASE
)
def validate(self, value):
if not EmailField.EMAIL_REGEX.match(value):
self.error('Invalid Mail-address: %s' % value)
super(EmailField, self).validate(value)
class IntField(BaseField):
"""An 32-bit integer field.
"""
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(IntField, self).__init__(**kwargs)
def to_python(self, value):
try:
value = int(value)
except ValueError:
pass
return value
def validate(self, value):
try:
value = int(value)
except:
self.error('%s could not be converted to int' % value)
if self.min_value is not None and value < self.min_value:
self.error('Integer value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Integer value is too large')
def prepare_query_value(self, op, value):
if value is None:
return value
return int(value)
class LongField(BaseField):
"""An 64-bit integer field.
"""
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(LongField, self).__init__(**kwargs)
def to_python(self, value):
try:
value = long(value)
except ValueError:
pass
return value
def validate(self, value):
try:
value = long(value)
except:
self.error('%s could not be converted to long' % value)
if self.min_value is not None and value < self.min_value:
self.error('Long value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Long value is too large')
def prepare_query_value(self, op, value):
if value is None:
return value
return long(value)
class FloatField(BaseField):
"""An floating point number field.
"""
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(FloatField, self).__init__(**kwargs)
def to_python(self, value):
try:
value = float(value)
except ValueError:
pass
return value
def validate(self, value):
if isinstance(value, int):
value = float(value)
if not isinstance(value, float):
self.error('FloatField only accepts float values')
if self.min_value is not None and value < self.min_value:
self.error('Float value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Float value is too large')
def prepare_query_value(self, op, value):
if value is None:
return value
return float(value)
class DecimalField(BaseField):
"""A fixed-point decimal number field.
.. versionchanged:: 0.8
.. versionadded:: 0.3
"""
def __init__(self, min_value=None, max_value=None, force_string=False,
precision=2, rounding=decimal.ROUND_HALF_UP, **kwargs):
"""
:param min_value: Validation rule for the minimum acceptable value.
:param max_value: Validation rule for the maximum acceptable value.
:param force_string: Store as a string.
:param precision: Number of decimal places to store.
:param rounding: The rounding rule from the python decimal library:
- decimal.ROUND_CEILING (towards Infinity)
- decimal.ROUND_DOWN (towards zero)
- decimal.ROUND_FLOOR (towards -Infinity)
- decimal.ROUND_HALF_DOWN (to nearest with ties going towards zero)
- decimal.ROUND_HALF_EVEN (to nearest with ties going to nearest even integer)
- decimal.ROUND_HALF_UP (to nearest with ties going away from zero)
- decimal.ROUND_UP (away from zero)
- decimal.ROUND_05UP (away from zero if last digit after rounding towards zero would have been 0 or 5; otherwise towards zero)
Defaults to: ``decimal.ROUND_HALF_UP``
"""
self.min_value = min_value
self.max_value = max_value
self.force_string = force_string
self.precision = precision
self.rounding = rounding
super(DecimalField, self).__init__(**kwargs)
def to_python(self, value):
if value is None:
return value
# Convert to string for python 2.6 before casting to Decimal
try:
value = decimal.Decimal("%s" % value)
except decimal.InvalidOperation:
return value
return value.quantize(decimal.Decimal(".%s" % ("0" * self.precision)), rounding=self.rounding)
def to_mongo(self, value, use_db_field=True):
if value is None:
return value
if self.force_string:
return unicode(value)
return float(self.to_python(value))
def validate(self, value):
if not isinstance(value, decimal.Decimal):
if not isinstance(value, basestring):
value = unicode(value)
try:
value = decimal.Decimal(value)
except Exception, exc:
self.error('Could not convert value to decimal: %s' % exc)
if self.min_value is not None and value < self.min_value:
self.error('Decimal value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Decimal value is too large')
def prepare_query_value(self, op, value):
return self.to_mongo(value)
class BooleanField(BaseField):
"""A boolean field type.
.. versionadded:: 0.1.2
"""
def to_python(self, value):
try:
value = bool(value)
except ValueError:
pass
return value
def validate(self, value):
if not isinstance(value, bool):
self.error('BooleanField only accepts boolean values')
class DateTimeField(BaseField):
"""A datetime field.
Uses the python-dateutil library if available alternatively use time.strptime
to parse the dates. Note: python-dateutil's parser is fully featured and when
installed you can utilise it to convert varying types of date formats into valid
python datetime objects.
Note: Microseconds are rounded to the nearest millisecond.
Pre UTC microsecond support is effectively broken.
Use :class:`~mongoengine.fields.ComplexDateTimeField` if you
need accurate microsecond support.
"""
def validate(self, value):
new_value = self.to_mongo(value)
if not isinstance(new_value, (datetime.datetime, datetime.date)):
self.error(u'cannot parse date "%s"' % value)
def to_mongo(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
if callable(value):
return value()
if not isinstance(value, basestring):
return None
# Attempt to parse a datetime:
if dateutil:
try:
return dateutil.parser.parse(value)
except (TypeError, ValueError):
return None
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
return None
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(*time.strptime(value,
'%Y-%m-%d %H:%M:%S')[:6], **kwargs)
except ValueError:
try: # Try without seconds.
return datetime.datetime(*time.strptime(value,
'%Y-%m-%d %H:%M')[:5], **kwargs)
except ValueError: # Try without hour/minutes/seconds.
try:
return datetime.datetime(*time.strptime(value,
'%Y-%m-%d')[:3], **kwargs)
except ValueError:
return None
def prepare_query_value(self, op, value):
return self.to_mongo(value)
class ComplexDateTimeField(StringField):
"""
ComplexDateTimeField handles microseconds exactly instead of rounding
like DateTimeField does.
Derives from a StringField so you can do `gte` and `lte` filtering by
using lexicographical comparison when filtering / sorting strings.
The stored string has the following format:
YYYY,MM,DD,HH,MM,SS,NNNNNN
Where NNNNNN is the number of microseconds of the represented `datetime`.
The `,` as the separator can be easily modified by passing the `separator`
keyword when initializing the field.
.. versionadded:: 0.5
"""
def __init__(self, separator=',', **kwargs):
self.names = ['year', 'month', 'day', 'hour', 'minute', 'second',
'microsecond']
self.separtor = separator
super(ComplexDateTimeField, self).__init__(**kwargs)
def _leading_zero(self, number):
"""
Converts the given number to a string.
If it has only one digit, a leading zero so as it has always at least
two digits.
"""
if int(number) < 10:
return "0%s" % number
else:
return str(number)
def _convert_from_datetime(self, val):
"""
Convert a `datetime` object to a string representation (which will be
stored in MongoDB). This is the reverse function of
`_convert_from_string`.
>>> a = datetime(2011, 6, 8, 20, 26, 24, 192284)
>>> RealDateTimeField()._convert_from_datetime(a)
'2011,06,08,20,26,24,192284'
"""
data = []
for name in self.names:
data.append(self._leading_zero(getattr(val, name)))
return ','.join(data)
def _convert_from_string(self, data):
"""
Convert a string representation to a `datetime` object (the object you
will manipulate). This is the reverse function of
`_convert_from_datetime`.
>>> a = '2011,06,08,20,26,24,192284'
>>> ComplexDateTimeField()._convert_from_string(a)
datetime.datetime(2011, 6, 8, 20, 26, 24, 192284)
"""
data = data.split(',')
data = map(int, data)
values = {}
for i in range(7):
values[self.names[i]] = data[i]
return datetime.datetime(**values)
def __get__(self, instance, owner):
data = super(ComplexDateTimeField, self).__get__(instance, owner)
if data is None:
return datetime.datetime.now()
if isinstance(data, datetime.datetime):
return data
return self._convert_from_string(data)
def __set__(self, instance, value):
value = self._convert_from_datetime(value) if value else value
return super(ComplexDateTimeField, self).__set__(instance, value)
def validate(self, value):
value = self.to_python(value)
if not isinstance(value, datetime.datetime):
self.error('Only datetime objects may used in a '
'ComplexDateTimeField')
def to_python(self, value):
original_value = value
try:
return self._convert_from_string(value)
except:
return original_value
def to_mongo(self, value):
value = self.to_python(value)
return self._convert_from_datetime(value)
def prepare_query_value(self, op, value):
return self._convert_from_datetime(value)
class EmbeddedDocumentField(BaseField):
"""An embedded document field - with a declared document_type.
Only valid values are subclasses of :class:`~mongoengine.EmbeddedDocument`.
"""
def __init__(self, document_type, **kwargs):
if not isinstance(document_type, basestring):
if not issubclass(document_type, EmbeddedDocument):
self.error('Invalid embedded document class provided to an '
'EmbeddedDocumentField')
self.document_type_obj = document_type
super(EmbeddedDocumentField, self).__init__(**kwargs)
@property
def document_type(self):
if isinstance(self.document_type_obj, basestring):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
def to_python(self, value):
if not isinstance(value, self.document_type):
return self.document_type._from_son(value)
return value
def to_mongo(self, value, use_db_field=True, fields=[]):
if not isinstance(value, self.document_type):
return value
return self.document_type.to_mongo(value, use_db_field,
fields=fields)
def validate(self, value, clean=True):
"""Make sure that the document instance is an instance of the
EmbeddedDocument subclass provided when the document was defined.
"""
# Using isinstance also works for subclasses of self.document
if not isinstance(value, self.document_type):
self.error('Invalid embedded document instance provided to an '
'EmbeddedDocumentField')
self.document_type.validate(value, clean)
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
def prepare_query_value(self, op, value):
return self.to_mongo(value)
class GenericEmbeddedDocumentField(BaseField):
"""A generic embedded document field - allows any
:class:`~mongoengine.EmbeddedDocument` to be stored.
Only valid values are subclasses of :class:`~mongoengine.EmbeddedDocument`.
.. note ::
You can use the choices param to limit the acceptable
EmbeddedDocument types
"""
def prepare_query_value(self, op, value):
return self.to_mongo(value)
def to_python(self, value):
if isinstance(value, dict):
doc_cls = get_document(value['_cls'])
value = doc_cls._from_son(value)
return value
def validate(self, value, clean=True):
if not isinstance(value, EmbeddedDocument):
self.error('Invalid embedded document instance provided to an '
'GenericEmbeddedDocumentField')
value.validate(clean=clean)
def to_mongo(self, document, use_db_field=True):
if document is None:
return None
data = document.to_mongo(use_db_field)
if not '_cls' in data:
data['_cls'] = document._class_name
return data
class DynamicField(BaseField):
"""A truly dynamic field type capable of handling different and varying
types of data.
Used by :class:`~mongoengine.DynamicDocument` to handle dynamic data"""
def to_mongo(self, value):
"""Convert a Python type to a MongoDB compatible type.
"""
if isinstance(value, basestring):
return value
if hasattr(value, 'to_mongo'):
cls = value.__class__
val = value.to_mongo()
# If we its a document thats not inherited add _cls
if (isinstance(value, Document)):
val = {"_ref": value.to_dbref(), "_cls": cls.__name__}
if (isinstance(value, EmbeddedDocument)):
val['_cls'] = cls.__name__
return val
if not isinstance(value, (dict, list, tuple)):
return value
is_list = False
if not hasattr(value, 'items'):
is_list = True
value = dict([(k, v) for k, v in enumerate(value)])
data = {}
for k, v in value.iteritems():
data[k] = self.to_mongo(v)
value = data
if is_list: # Convert back to a list
value = [v for k, v in sorted(data.iteritems(), key=itemgetter(0))]
return value
def to_python(self, value):
if isinstance(value, dict) and '_cls' in value:
doc_cls = get_document(value['_cls'])
if '_ref' in value:
value = doc_cls._get_db().dereference(value['_ref'])
return doc_cls._from_son(value)
return super(DynamicField, self).to_python(value)
def lookup_member(self, member_name):
return member_name
def prepare_query_value(self, op, value):
if isinstance(value, basestring):
from mongoengine.fields import StringField
return StringField().prepare_query_value(op, value)
return self.to_mongo(value)
def validate(self, value, clean=True):
if hasattr(value, "validate"):
value.validate(clean=clean)
class ListField(ComplexBaseField):
"""A list field that wraps a standard field, allowing multiple instances
of the field to be used as a list in the database.
If using with ReferenceFields see: :ref:`one-to-many-with-listfields`
.. note::
Required means it cannot be empty - as the default for ListFields is []
"""
def __init__(self, field=None, **kwargs):
self.field = field
kwargs.setdefault('default', lambda: [])
super(ListField, self).__init__(**kwargs)
def validate(self, value):
"""Make sure that a list of valid fields is being used.
"""
if (not isinstance(value, (list, tuple, QuerySet)) or
isinstance(value, basestring)):
self.error('Only lists and tuples may be used in a list field')
super(ListField, self).validate(value)
def prepare_query_value(self, op, value):
if self.field:
if op in ('set', 'unset') and (not isinstance(value, basestring)
and not isinstance(value, BaseDocument)
and hasattr(value, '__iter__')):
return [self.field.prepare_query_value(op, v) for v in value]
return self.field.prepare_query_value(op, value)
return super(ListField, self).prepare_query_value(op, value)
class SortedListField(ListField):
"""A ListField that sorts the contents of its list before writing to
the database in order to ensure that a sorted list is always
retrieved.
.. warning::
There is a potential race condition when handling lists. If you set /
save the whole list then other processes trying to save the whole list
as well could overwrite changes. The safest way to append to a list is
to perform a push operation.
.. versionadded:: 0.4
.. versionchanged:: 0.6 - added reverse keyword
"""
_ordering = None
_order_reverse = False
def __init__(self, field, **kwargs):
if 'ordering' in kwargs.keys():
self._ordering = kwargs.pop('ordering')
if 'reverse' in kwargs.keys():
self._order_reverse = kwargs.pop('reverse')
super(SortedListField, self).__init__(field, **kwargs)
def to_mongo(self, value):
value = super(SortedListField, self).to_mongo(value)
if self._ordering is not None:
return sorted(value, key=itemgetter(self._ordering),
reverse=self._order_reverse)
return sorted(value, reverse=self._order_reverse)
def key_not_string(d):
""" Helper function to recursively determine if any key in a dictionary is
not a string.
"""
for k, v in d.items():
if not isinstance(k, basestring) or (isinstance(v, dict) and key_not_string(v)):
return True
def key_has_dot_or_dollar(d):
""" Helper function to recursively determine if any key in a dictionary
contains a dot or a dollar sign.
"""
for k, v in d.items():
if ('.' in k or '$' in k) or (isinstance(v, dict) and key_has_dot_or_dollar(v)):
return True
class DictField(ComplexBaseField):
"""A dictionary field that wraps a standard Python dictionary. This is
similar to an embedded document, but the structure is not defined.
.. note::
Required means it cannot be empty - as the default for DictFields is {}
.. versionadded:: 0.3
.. versionchanged:: 0.5 - Can now handle complex / varying types of data
"""
def __init__(self, basecls=None, field=None, *args, **kwargs):
self.field = field
self.basecls = basecls or BaseField
if not issubclass(self.basecls, BaseField):
self.error('DictField only accepts dict values')
kwargs.setdefault('default', lambda: {})
super(DictField, self).__init__(*args, **kwargs)
def validate(self, value):
"""Make sure that a list of valid fields is being used.
"""
if not isinstance(value, dict):
self.error('Only dictionaries may be used in a DictField')
if key_not_string(value):
msg = ("Invalid dictionary key - documents must "
"have only string keys")
self.error(msg)
if key_has_dot_or_dollar(value):
self.error('Invalid dictionary key name - keys may not contain "."'
' or "$" characters')
super(DictField, self).validate(value)
def lookup_member(self, member_name):
return DictField(basecls=self.basecls, db_field=member_name)
def prepare_query_value(self, op, value):
match_operators = ['contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith',
'exact', 'iexact']
if op in match_operators and isinstance(value, basestring):
return StringField().prepare_query_value(op, value)
if hasattr(self.field, 'field'):
if op in ('set', 'unset') and isinstance(value, dict):
return dict(
(k, self.field.prepare_query_value(op, v))
for k, v in value.items())
return self.field.prepare_query_value(op, value)
return super(DictField, self).prepare_query_value(op, value)
class MapField(DictField):
"""A field that maps a name to a specified field type. Similar to
a DictField, except the 'value' of each item must match the specified
field type.
.. versionadded:: 0.5
"""
def __init__(self, field=None, *args, **kwargs):
if not isinstance(field, BaseField):
self.error('Argument to MapField constructor must be a valid '
'field')
super(MapField, self).__init__(field=field, *args, **kwargs)
class ReferenceField(BaseField):
"""A reference to a document that will be automatically dereferenced on
access (lazily).
Use the `reverse_delete_rule` to handle what should happen if the document
the field is referencing is deleted. EmbeddedDocuments, DictFields and
MapFields does not support reverse_delete_rule and an `InvalidDocumentError`
will be raised if trying to set on one of these Document / Field types.
The options are:
* DO_NOTHING - don't do anything (default).
* NULLIFY - Updates the reference to null.
* CASCADE - Deletes the documents associated with the reference.
* DENY - Prevent the deletion of the reference object.
* PULL - Pull the reference from a :class:`~mongoengine.fields.ListField`
of references
Alternative syntax for registering delete rules (useful when implementing
bi-directional delete rules)
.. code-block:: python
class Bar(Document):
content = StringField()
foo = ReferenceField('Foo')
Bar.register_delete_rule(Foo, 'bar', NULLIFY)
.. note ::
`reverse_delete_rule` does not trigger pre / post delete signals to be
triggered.
.. versionchanged:: 0.5 added `reverse_delete_rule`
"""
def __init__(self, document_type, dbref=False,
reverse_delete_rule=DO_NOTHING, **kwargs):
"""Initialises the Reference Field.
:param dbref: Store the reference as :class:`~pymongo.dbref.DBRef`
or as the :class:`~pymongo.objectid.ObjectId`.id .
:param reverse_delete_rule: Determines what to do when the referring
object is deleted
"""
if not isinstance(document_type, basestring):
if not issubclass(document_type, (Document, basestring)):
self.error('Argument to ReferenceField constructor must be a '
'document class or a string')
self.dbref = dbref
self.document_type_obj = document_type
self.reverse_delete_rule = reverse_delete_rule
super(ReferenceField, self).__init__(**kwargs)
@property
def document_type(self):
if isinstance(self.document_type_obj, basestring):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
def __get__(self, instance, owner):
"""Descriptor to allow lazy dereferencing.
"""
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
value = instance._data.get(self.name)
self._auto_dereference = instance._fields[self.name]._auto_dereference
# Dereference DBRefs
if self._auto_dereference and isinstance(value, DBRef):
value = self.document_type._get_db().dereference(value)
if value is not None:
instance._data[self.name] = self.document_type._from_son(value)
return super(ReferenceField, self).__get__(instance, owner)
def to_mongo(self, document):
if isinstance(document, DBRef):
if not self.dbref:
return document.id
return document
id_field_name = self.document_type._meta['id_field']
id_field = self.document_type._fields[id_field_name]
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.pk
if id_ is None:
self.error('You can only reference documents once they have'
' been saved to the database')
else:
id_ = document
id_ = id_field.to_mongo(id_)
if self.dbref:
collection = self.document_type._get_collection_name()
return DBRef(collection, id_)
return id_
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type.
"""
if (not self.dbref and
not isinstance(value, (DBRef, Document, EmbeddedDocument))):
collection = self.document_type._get_collection_name()
value = DBRef(collection, self.document_type.id.to_python(value))
return value
def prepare_query_value(self, op, value):
if value is None:
return None
return self.to_mongo(value)
def validate(self, value):
if not isinstance(value, (self.document_type, DBRef)):
self.error("A ReferenceField only accepts DBRef or documents")
if isinstance(value, Document) and value.id is None:
self.error('You can only reference documents once they have been '
'saved to the database')
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
class CachedReferenceField(BaseField):
"""
A referencefield with cache fields to porpuse pseudo-joins
.. versionadded:: 0.9
"""
def __init__(self, document_type, fields=[], auto_sync=True, **kwargs):
"""Initialises the Cached Reference Field.
:param fields: A list of fields to be cached in document
:param auto_sync: if True documents are auto updated.
"""
if not isinstance(document_type, basestring) and \
not issubclass(document_type, (Document, basestring)):
self.error('Argument to CachedReferenceField constructor must be a'
' document class or a string')
self.auto_sync = auto_sync
self.document_type_obj = document_type
self.fields = fields
super(CachedReferenceField, self).__init__(**kwargs)
def start_listener(self):
from mongoengine import signals
signals.post_save.connect(self.on_document_pre_save,
sender=self.document_type)
def on_document_pre_save(self, sender, document, created, **kwargs):
if not created:
update_kwargs = dict(
('set__%s__%s' % (self.name, k), v)
for k, v in document._delta()[0].items()
if k in self.fields)
if update_kwargs:
filter_kwargs = {}
filter_kwargs[self.name] = document
self.owner_document.objects(
**filter_kwargs).update(**update_kwargs)
def to_python(self, value):
if isinstance(value, dict):
collection = self.document_type._get_collection_name()
value = DBRef(
collection, self.document_type.id.to_python(value['_id']))
return value
@property
def document_type(self):
if isinstance(self.document_type_obj, basestring):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
def __get__(self, instance, owner):
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
value = instance._data.get(self.name)
self._auto_dereference = instance._fields[self.name]._auto_dereference
# Dereference DBRefs
if self._auto_dereference and isinstance(value, DBRef):
value = self.document_type._get_db().dereference(value)
if value is not None:
instance._data[self.name] = self.document_type._from_son(value)
return super(CachedReferenceField, self).__get__(instance, owner)
def to_mongo(self, document):
id_field_name = self.document_type._meta['id_field']
id_field = self.document_type._fields[id_field_name]
doc_tipe = self.document_type
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.pk
if id_ is None:
self.error('You can only reference documents once they have'
' been saved to the database')
else:
self.error('Only accept a document object')
value = SON((
("_id", id_field.to_mongo(id_)),
))
value.update(dict(document.to_mongo(fields=self.fields)))
return value
def prepare_query_value(self, op, value):
if value is None:
return None
if isinstance(value, Document):
if value.pk is None:
self.error('You can only reference documents once they have'
' been saved to the database')
return {'_id': value.pk}
raise NotImplementedError
def validate(self, value):
if not isinstance(value, (self.document_type)):
self.error("A CachedReferenceField only accepts documents")
if isinstance(value, Document) and value.id is None:
self.error('You can only reference documents once they have been '
'saved to the database')
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
def sync_all(self):
"""
Sync all cached fields on demand.
Caution: this operation may be slower.
"""
update_key = 'set__%s' % self.name
for doc in self.document_type.objects:
filter_kwargs = {}
filter_kwargs[self.name] = doc
update_kwargs = {}
update_kwargs[update_key] = doc
self.owner_document.objects(
**filter_kwargs).update(**update_kwargs)
class GenericReferenceField(BaseField):
"""A reference to *any* :class:`~mongoengine.document.Document` subclass
that will be automatically dereferenced on access (lazily).
.. note ::
* Any documents used as a generic reference must be registered in the
document registry. Importing the model will automatically register
it.
* You can use the choices param to limit the acceptable Document types
.. versionadded:: 0.3
"""
def __get__(self, instance, owner):
if instance is None:
return self
value = instance._data.get(self.name)
self._auto_dereference = instance._fields[self.name]._auto_dereference
if self._auto_dereference and isinstance(value, (dict, SON)):
instance._data[self.name] = self.dereference(value)
return super(GenericReferenceField, self).__get__(instance, owner)
def validate(self, value):
if not isinstance(value, (Document, DBRef, dict, SON)):
self.error('GenericReferences can only contain documents')
if isinstance(value, (dict, SON)):
if '_ref' not in value or '_cls' not in value:
self.error('GenericReferences can only contain documents')
# We need the id from the saved object to create the DBRef
elif isinstance(value, Document) and value.id is None:
self.error('You can only reference documents once they have been'
' saved to the database')
def dereference(self, value):
doc_cls = get_document(value['_cls'])
reference = value['_ref']
doc = doc_cls._get_db().dereference(reference)
if doc is not None:
doc = doc_cls._from_son(doc)
return doc
def to_mongo(self, document, use_db_field=True):
if document is None:
return None
if isinstance(document, (dict, SON)):
return document
id_field_name = document.__class__._meta['id_field']
id_field = document.__class__._fields[id_field_name]
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.id
if id_ is None:
self.error('You can only reference documents once they have'
' been saved to the database')
else:
id_ = document
id_ = id_field.to_mongo(id_)
collection = document._get_collection_name()
ref = DBRef(collection, id_)
return SON((
('_cls', document._class_name),
('_ref', ref)
))
def prepare_query_value(self, op, value):
if value is None:
return None
return self.to_mongo(value)
class BinaryField(BaseField):
"""A binary data field.
"""
def __init__(self, max_bytes=None, **kwargs):
self.max_bytes = max_bytes
super(BinaryField, self).__init__(**kwargs)
def __set__(self, instance, value):
"""Handle bytearrays in python 3.1"""
if PY3 and isinstance(value, bytearray):
value = bin_type(value)
return super(BinaryField, self).__set__(instance, value)
def to_mongo(self, value):
return Binary(value)
def validate(self, value):
if not isinstance(value, (bin_type, txt_type, Binary)):
self.error("BinaryField only accepts instances of "
"(%s, %s, Binary)" % (
bin_type.__name__, txt_type.__name__))
if self.max_bytes is not None and len(value) > self.max_bytes:
self.error('Binary value is too long')
class GridFSError(Exception):
pass
class GridFSProxy(object):
"""Proxy object to handle writing and reading of files to and from GridFS
.. versionadded:: 0.4
.. versionchanged:: 0.5 - added optional size param to read
.. versionchanged:: 0.6 - added collection name param
"""
_fs = None
def __init__(self, grid_id=None, key=None,
instance=None,
db_alias=DEFAULT_CONNECTION_NAME,
collection_name='fs'):
self.grid_id = grid_id # Store GridFS id for file
self.key = key
self.instance = instance
self.db_alias = db_alias
self.collection_name = collection_name
self.newfile = None # Used for partial writes
self.gridout = None
def __getattr__(self, name):
attrs = ('_fs', 'grid_id', 'key', 'instance', 'db_alias',
'collection_name', 'newfile', 'gridout')
if name in attrs:
return self.__getattribute__(name)
obj = self.get()
if hasattr(obj, name):
return getattr(obj, name)
raise AttributeError
def __get__(self, instance, value):
return self
def __nonzero__(self):
return bool(self.grid_id)
def __getstate__(self):
self_dict = self.__dict__
self_dict['_fs'] = None
return self_dict
def __copy__(self):
copied = GridFSProxy()
copied.__dict__.update(self.__getstate__())
return copied
def __deepcopy__(self, memo):
return self.__copy__()
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.grid_id)
def __str__(self):
name = getattr(
self.get(), 'filename', self.grid_id) if self.get() else '(no file)'
return '<%s: %s>' % (self.__class__.__name__, name)
def __eq__(self, other):
if isinstance(other, GridFSProxy):
return ((self.grid_id == other.grid_id) and
(self.collection_name == other.collection_name) and
(self.db_alias == other.db_alias))
else:
return False
@property
def fs(self):
if not self._fs:
self._fs = gridfs.GridFS(
get_db(self.db_alias), self.collection_name)
return self._fs
def get(self, id=None):
if id:
self.grid_id = id
if self.grid_id is None:
return None
try:
if self.gridout is None:
self.gridout = self.fs.get(self.grid_id)
return self.gridout
except:
# File has been deleted
return None
def new_file(self, **kwargs):
self.newfile = self.fs.new_file(**kwargs)
self.grid_id = self.newfile._id
def put(self, file_obj, **kwargs):
if self.grid_id:
raise GridFSError('This document already has a file. Either delete '
'it or call replace to overwrite it')
self.grid_id = self.fs.put(file_obj, **kwargs)
self._mark_as_changed()
def write(self, string):
if self.grid_id:
if not self.newfile:
raise GridFSError('This document already has a file. Either '
'delete it or call replace to overwrite it')
else:
self.new_file()
self.newfile.write(string)
def writelines(self, lines):
if not self.newfile:
self.new_file()
self.grid_id = self.newfile._id
self.newfile.writelines(lines)
def read(self, size=-1):
gridout = self.get()
if gridout is None:
return None
else:
try:
return gridout.read(size)
except:
return ""
def delete(self):
# Delete file from GridFS, FileField still remains
self.fs.delete(self.grid_id)
self.grid_id = None
self.gridout = None
self._mark_as_changed()
def replace(self, file_obj, **kwargs):
self.delete()
self.put(file_obj, **kwargs)
def close(self):
if self.newfile:
self.newfile.close()
def _mark_as_changed(self):
"""Inform the instance that `self.key` has been changed"""
if self.instance:
self.instance._mark_as_changed(self.key)
class FileField(BaseField):
"""A GridFS storage field.
.. versionadded:: 0.4
.. versionchanged:: 0.5 added optional size param for read
.. versionchanged:: 0.6 added db_alias for multidb support
"""
proxy_class = GridFSProxy
def __init__(self,
db_alias=DEFAULT_CONNECTION_NAME,
collection_name="fs", **kwargs):
super(FileField, self).__init__(**kwargs)
self.collection_name = collection_name
self.db_alias = db_alias
def __get__(self, instance, owner):
if instance is None:
return self
# Check if a file already exists for this model
grid_file = instance._data.get(self.name)
if not isinstance(grid_file, self.proxy_class):
grid_file = self.get_proxy_obj(key=self.name, instance=instance)
instance._data[self.name] = grid_file
if not grid_file.key:
grid_file.key = self.name
grid_file.instance = instance
return grid_file
def __set__(self, instance, value):
key = self.name
if ((hasattr(value, 'read') and not
isinstance(value, GridFSProxy)) or isinstance(value, str_types)):
# using "FileField() = file/string" notation
grid_file = instance._data.get(self.name)
# If a file already exists, delete it
if grid_file:
try:
grid_file.delete()
except:
pass
# Create a new proxy object as we don't already have one
instance._data[key] = self.get_proxy_obj(
key=key, instance=instance)
instance._data[key].put(value)
else:
instance._data[key] = value
instance._mark_as_changed(key)
def get_proxy_obj(self, key, instance, db_alias=None, collection_name=None):
if db_alias is None:
db_alias = self.db_alias
if collection_name is None:
collection_name = self.collection_name
return self.proxy_class(key=key, instance=instance,
db_alias=db_alias,
collection_name=collection_name)
def to_mongo(self, value):
# Store the GridFS file id in MongoDB
if isinstance(value, self.proxy_class) and value.grid_id is not None:
return value.grid_id
return None
def to_python(self, value):
if value is not None:
return self.proxy_class(value,
collection_name=self.collection_name,
db_alias=self.db_alias)
def validate(self, value):
if value.grid_id is not None:
if not isinstance(value, self.proxy_class):
self.error('FileField only accepts GridFSProxy values')
if not isinstance(value.grid_id, ObjectId):
self.error('Invalid GridFSProxy value')
class ImageGridFsProxy(GridFSProxy):
"""
Proxy for ImageField
versionadded: 0.6
"""
def put(self, file_obj, **kwargs):
"""
Insert a image in database
applying field properties (size, thumbnail_size)
"""
field = self.instance._fields[self.key]
# Handle nested fields
if hasattr(field, 'field') and isinstance(field.field, FileField):
field = field.field
try:
img = Image.open(file_obj)
img_format = img.format
except Exception, e:
raise ValidationError('Invalid image: %s' % e)
# Progressive JPEG
progressive = img.info.get('progressive') or False
if (kwargs.get('progressive') and
isinstance(kwargs.get('progressive'), bool) and
img_format == 'JPEG'):
progressive = True
else:
progressive = False
if (field.size and (img.size[0] > field.size['width'] or
img.size[1] > field.size['height'])):
size = field.size
if size['force']:
img = ImageOps.fit(img,
(size['width'],
size['height']),
Image.ANTIALIAS)
else:
img.thumbnail((size['width'],
size['height']),
Image.ANTIALIAS)
thumbnail = None
if field.thumbnail_size:
size = field.thumbnail_size
if size['force']:
thumbnail = ImageOps.fit(
img, (size['width'], size['height']), Image.ANTIALIAS)
else:
thumbnail = img.copy()
thumbnail.thumbnail((size['width'],
size['height']),
Image.ANTIALIAS)
if thumbnail:
thumb_id = self._put_thumbnail(thumbnail, img_format, progressive)
else:
thumb_id = None
w, h = img.size
io = StringIO()
img.save(io, img_format, progressive=progressive)
io.seek(0)
return super(ImageGridFsProxy, self).put(io,
width=w,
height=h,
format=img_format,
thumbnail_id=thumb_id,
**kwargs)
def delete(self, *args, **kwargs):
# deletes thumbnail
out = self.get()
if out and out.thumbnail_id:
self.fs.delete(out.thumbnail_id)
return super(ImageGridFsProxy, self).delete(*args, **kwargs)
def _put_thumbnail(self, thumbnail, format, progressive, **kwargs):
w, h = thumbnail.size
io = StringIO()
thumbnail.save(io, format, progressive=progressive)
io.seek(0)
return self.fs.put(io, width=w,
height=h,
format=format,
**kwargs)
@property
def size(self):
"""
return a width, height of image
"""
out = self.get()
if out:
return out.width, out.height
@property
def format(self):
"""
return format of image
ex: PNG, JPEG, GIF, etc
"""
out = self.get()
if out:
return out.format
@property
def thumbnail(self):
"""
return a gridfs.grid_file.GridOut
representing a thumbnail of Image
"""
out = self.get()
if out and out.thumbnail_id:
return self.fs.get(out.thumbnail_id)
def write(self, *args, **kwargs):
raise RuntimeError("Please use \"put\" method instead")
def writelines(self, *args, **kwargs):
raise RuntimeError("Please use \"put\" method instead")
class ImproperlyConfigured(Exception):
pass
class ImageField(FileField):
"""
A Image File storage field.
@size (width, height, force):
max size to store images, if larger will be automatically resized
ex: size=(800, 600, True)
@thumbnail (width, height, force):
size to generate a thumbnail
.. versionadded:: 0.6
"""
proxy_class = ImageGridFsProxy
def __init__(self, size=None, thumbnail_size=None,
collection_name='images', **kwargs):
if not Image:
raise ImproperlyConfigured("PIL library was not found")
params_size = ('width', 'height', 'force')
extra_args = dict(size=size, thumbnail_size=thumbnail_size)
for att_name, att in extra_args.items():
value = None
if isinstance(att, (tuple, list)):
if PY3:
value = dict(itertools.zip_longest(params_size, att,
fillvalue=None))
else:
value = dict(map(None, params_size, att))
setattr(self, att_name, value)
super(ImageField, self).__init__(
collection_name=collection_name,
**kwargs)
class SequenceField(BaseField):
"""Provides a sequential counter see:
http://www.mongodb.org/display/DOCS/Object+IDs#ObjectIDs-SequenceNumbers
.. note::
Although traditional databases often use increasing sequence
numbers for primary keys. In MongoDB, the preferred approach is to
use Object IDs instead. The concept is that in a very large
cluster of machines, it is easier to create an object ID than have
global, uniformly increasing sequence numbers.
Use any callable as `value_decorator` to transform calculated counter into
any value suitable for your needs, e.g. string or hexadecimal
representation of the default integer counter value.
.. versionadded:: 0.5
.. versionchanged:: 0.8 added `value_decorator`
"""
_auto_gen = True
COLLECTION_NAME = 'mongoengine.counters'
VALUE_DECORATOR = int
def __init__(self, collection_name=None, db_alias=None, sequence_name=None,
value_decorator=None, *args, **kwargs):
self.collection_name = collection_name or self.COLLECTION_NAME
self.db_alias = db_alias or DEFAULT_CONNECTION_NAME
self.sequence_name = sequence_name
self.value_decorator = (callable(value_decorator) and
value_decorator or self.VALUE_DECORATOR)
return super(SequenceField, self).__init__(*args, **kwargs)
def generate(self):
"""
Generate and Increment the counter
"""
sequence_name = self.get_sequence_name()
sequence_id = "%s.%s" % (sequence_name, self.name)
collection = get_db(alias=self.db_alias)[self.collection_name]
counter = collection.find_and_modify(query={"_id": sequence_id},
update={"$inc": {"next": 1}},
new=True,
upsert=True)
return self.value_decorator(counter['next'])
def set_next_value(self, value):
"""Helper method to set the next sequence value"""
sequence_name = self.get_sequence_name()
sequence_id = "%s.%s" % (sequence_name, self.name)
collection = get_db(alias=self.db_alias)[self.collection_name]
counter = collection.find_and_modify(query={"_id": sequence_id},
update={"$set": {"next": value}},
new=True,
upsert=True)
return self.value_decorator(counter['next'])
def get_next_value(self):
"""Helper method to get the next value for previewing.
.. warning:: There is no guarantee this will be the next value
as it is only fixed on set.
"""
sequence_name = self.get_sequence_name()
sequence_id = "%s.%s" % (sequence_name, self.name)
collection = get_db(alias=self.db_alias)[self.collection_name]
data = collection.find_one({"_id": sequence_id})
if data:
return self.value_decorator(data['next'] + 1)
return self.value_decorator(1)
def get_sequence_name(self):
if self.sequence_name:
return self.sequence_name
owner = self.owner_document
if issubclass(owner, Document):
return owner._get_collection_name()
else:
return ''.join('_%s' % c if c.isupper() else c
for c in owner._class_name).strip('_').lower()
def __get__(self, instance, owner):
value = super(SequenceField, self).__get__(instance, owner)
if value is None and instance._initialised:
value = self.generate()
instance._data[self.name] = value
instance._mark_as_changed(self.name)
return value
def __set__(self, instance, value):
if value is None and instance._initialised:
value = self.generate()
return super(SequenceField, self).__set__(instance, value)
def prepare_query_value(self, op, value):
"""
This method is overridden in order to convert the query value into to required
type. We need to do this in order to be able to successfully compare query
values passed as string, the base implementation returns the value as is.
"""
return self.value_decorator(value)
def to_python(self, value):
if value is None:
value = self.generate()
return value
class UUIDField(BaseField):
"""A UUID field.
.. versionadded:: 0.6
"""
_binary = None
def __init__(self, binary=True, **kwargs):
"""
Store UUID data in the database
:param binary: if False store as a string.
.. versionchanged:: 0.8.0
.. versionchanged:: 0.6.19
"""
self._binary = binary
super(UUIDField, self).__init__(**kwargs)
def to_python(self, value):
if not self._binary:
original_value = value
try:
if not isinstance(value, basestring):
value = unicode(value)
return uuid.UUID(value)
except:
return original_value
return value
def to_mongo(self, value):
if not self._binary:
return unicode(value)
elif isinstance(value, basestring):
return uuid.UUID(value)
return value
def prepare_query_value(self, op, value):
if value is None:
return None
return self.to_mongo(value)
def validate(self, value):
if not isinstance(value, uuid.UUID):
if not isinstance(value, basestring):
value = str(value)
try:
value = uuid.UUID(value)
except Exception, exc:
self.error('Could not convert to UUID: %s' % exc)
class GeoPointField(BaseField):
"""A list storing a longitude and latitude coordinate.
.. note:: this represents a generic point in a 2D plane and a legacy way of
representing a geo point. It admits 2d indexes but not "2dsphere" indexes
in MongoDB > 2.4 which are more natural for modeling geospatial points.
See :ref:`geospatial-indexes`
.. versionadded:: 0.4
"""
_geo_index = pymongo.GEO2D
def validate(self, value):
"""Make sure that a geo-value is of type (x, y)
"""
if not isinstance(value, (list, tuple)):
self.error('GeoPointField can only accept tuples or lists '
'of (x, y)')
if not len(value) == 2:
self.error("Value (%s) must be a two-dimensional point" %
repr(value))
elif (not isinstance(value[0], (float, int)) or
not isinstance(value[1], (float, int))):
self.error(
"Both values (%s) in point must be float or int" % repr(value))
class PointField(GeoJsonBaseField):
"""A GeoJSON field storing a longitude and latitude coordinate.
The data is represented as:
.. code-block:: js
{ "type" : "Point" ,
"coordinates" : [x, y]}
You can either pass a dict with the full information or a list
to set the value.
Requires mongodb >= 2.4
.. versionadded:: 0.8
"""
_type = "Point"
class LineStringField(GeoJsonBaseField):
"""A GeoJSON field storing a line of longitude and latitude coordinates.
The data is represented as:
.. code-block:: js
{ "type" : "LineString" ,
"coordinates" : [[x1, y1], [x1, y1] ... [xn, yn]]}
You can either pass a dict with the full information or a list of points.
Requires mongodb >= 2.4
.. versionadded:: 0.8
"""
_type = "LineString"
class PolygonField(GeoJsonBaseField):
"""A GeoJSON field storing a polygon of longitude and latitude coordinates.
The data is represented as:
.. code-block:: js
{ "type" : "Polygon" ,
"coordinates" : [[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]}
You can either pass a dict with the full information or a list
of LineStrings. The first LineString being the outside and the rest being
holes.
Requires mongodb >= 2.4
.. versionadded:: 0.8
"""
_type = "Polygon"
class MultiPointField(GeoJsonBaseField):
"""A GeoJSON field storing a list of Points.
The data is represented as:
.. code-block:: js
{ "type" : "MultiPoint" ,
"coordinates" : [[x1, y1], [x2, y2]]}
You can either pass a dict with the full information or a list
to set the value.
Requires mongodb >= 2.6
.. versionadded:: 0.9
"""
_type = "MultiPoint"
class MultiLineStringField(GeoJsonBaseField):
"""A GeoJSON field storing a list of LineStrings.
The data is represented as:
.. code-block:: js
{ "type" : "MultiLineString" ,
"coordinates" : [[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]]}
You can either pass a dict with the full information or a list of points.
Requires mongodb >= 2.6
.. versionadded:: 0.9
"""
_type = "MultiLineString"
class MultiPolygonField(GeoJsonBaseField):
"""A GeoJSON field storing list of Polygons.
The data is represented as:
.. code-block:: js
{ "type" : "Polygon" ,
"coordinates" : [[
[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]
], [
[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]
]
}
You can either pass a dict with the full information or a list
of Polygons.
Requires mongodb >= 2.6
.. versionadded:: 0.9
"""
_type = "MultiPolygon"
| 32.856061 | 138 | 0.592176 |
d0dc89cc92b3ac9ffe80fdf73b17ee659ea9085d | 21,615 | py | Python | python_on_whales/components/buildx/cli_wrapper.py | ucam-department-of-psychiatry/python-on-whales | f3171814089b16b88c407f316048f830f45eaa4e | [
"MIT"
] | 191 | 2020-12-02T19:35:00.000Z | 2022-03-31T22:41:48.000Z | python_on_whales/components/buildx/cli_wrapper.py | ucam-department-of-psychiatry/python-on-whales | f3171814089b16b88c407f316048f830f45eaa4e | [
"MIT"
] | 94 | 2020-12-18T16:36:38.000Z | 2022-03-31T00:06:39.000Z | python_on_whales/components/buildx/cli_wrapper.py | ucam-department-of-psychiatry/python-on-whales | f3171814089b16b88c407f316048f830f45eaa4e | [
"MIT"
] | 33 | 2020-12-17T20:32:31.000Z | 2022-03-29T10:23:06.000Z | from __future__ import annotations
import json
import tempfile
from enum import Enum
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Union
import python_on_whales.components.image.cli_wrapper
from python_on_whales.client_config import (
ClientConfig,
DockerCLICaller,
ReloadableObject,
)
from python_on_whales.components.buildx.imagetools.cli_wrapper import ImagetoolsCLI
from python_on_whales.components.buildx.models import BuilderInspectResult
from python_on_whales.utils import (
ValidPath,
format_dict_for_cli,
run,
stream_stdout_and_stderr,
to_list,
)
class GetImageMethod(Enum):
TAG = 1
IIDFILE = 2
class Builder(ReloadableObject):
def __init__(
self,
client_config: ClientConfig,
reference: Optional[str],
is_immutable_id=False,
):
super().__init__(client_config, "name", reference, is_immutable_id)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.remove()
def _fetch_and_parse_inspect_result(
self, reference: Optional[str]
) -> BuilderInspectResult:
full_cmd = self.docker_cmd + ["buildx", "inspect"]
if reference is not None:
full_cmd.append(reference)
inspect_str = run(full_cmd)
return BuilderInspectResult.from_str(inspect_str)
@property
def name(self) -> str:
return self._get_immutable_id()
@property
def driver(self) -> str:
return self._get_inspect_result().driver
def remove(self):
"""Removes this builder. After this operation the builder cannot be used anymore.
If you use the builder as a context manager, it will call this function when
you exit the context manager.
```python
from python_on_whales import docker
buildx_builder = docker.buildx.create(use=True)
with buildx_builder:
docker.build(".")
# now the variable buildx_builder is not usable since we're out of the context manager.
# the .remove() method was called behind the scenes
# since it was the current builder, 'default' is now the current builder.
```
"""
BuildxCLI(self.client_config).remove(self)
ValidBuilder = Union[str, Builder]
class BuildxCLI(DockerCLICaller):
def __init__(self, client_config: ClientConfig):
super().__init__(client_config)
self.imagetools = ImagetoolsCLI(self.client_config)
def bake(
self,
targets: Union[str, List[str]] = [],
builder: Optional[ValidBuilder] = None,
files: Union[ValidPath, List[ValidPath]] = [],
load: bool = False,
cache: bool = True,
print: bool = False,
progress: Union[str, bool] = "auto",
pull: bool = False,
push: bool = False,
set: Dict[str, str] = {},
variables: Dict[str, str] = {},
stream_logs: bool = False,
) -> Union[Dict[str, Dict[str, Dict[str, Any]]], Iterator[str]]:
"""Bake is similar to make, it allows you to build things declared in a file.
For example it allows you to build multiple docker image in parallel.
The CLI docs is [here](https://github.com/docker/buildx#buildx-bake-options-target)
and it contains a lot more information.
# Arguments
targets: Targets or groups of targets to build.
builder: The builder to use.
files: Build definition file(s)
load: Shorthand for `set=["*.output=type=docker"]`
cache: Whether to use the cache or not.
print: Do nothing, just returns the config.
progress: Set type of progress output (`"auto"`, `"plain"`, `"tty"`,
or `False`). Use plain to keep the container output on screen
pull: Always try to pull the newer version of the image
push: Shorthand for `set=["*.output=type=registry"]`
set: A list of overrides in the form `"targetpattern.key=value"`.
variables: A dict containing the values of the variables defined in the
hcl file. See <https://github.com/docker/buildx#hcl-variables-and-functions>
# Returns
The configuration used for the bake (files merged + override with
the arguments used in the function). It's the loaded json you would
obtain by running `docker buildx bake --print --load my_target` if
your command was `docker buildx bake --load my_target`. Some example here.
```python
from python_on_whales import docker
# returns the config used and runs the builds
config = docker.buildx.bake(["my_target1", "my_target2"], load=True)
assert config == {
"target": {
"my_target1": {
"context": "./",
"dockerfile": "Dockerfile",
"tags": ["pretty_image1:1.0.0"],
"target": "out1",
"output": ["type=docker"]
},
"my_target2": {
"context": "./",
"dockerfile": "Dockerfile",
"tags": ["pretty_image2:1.0.0"],
"target": "out2",
"output": ["type=docker"]
}
}
}
# returns the config only, doesn't run the builds
config = docker.buildx.bake(["my_target1", "my_target2"], load=True, print=True)
```
"""
full_cmd = self.docker_cmd + ["buildx", "bake"]
full_cmd.add_flag("--no-cache", not cache)
full_cmd.add_simple_arg("--builder", builder)
full_cmd.add_flag("--load", load)
full_cmd.add_flag("--pull", pull)
full_cmd.add_flag("--push", push)
full_cmd.add_flag("--print", print)
if progress != "auto" and isinstance(progress, str):
full_cmd += ["--progress", progress]
for file in to_list(files):
full_cmd.add_simple_arg("--file", file)
full_cmd.add_args_list("--set", format_dict_for_cli(set))
targets = to_list(targets)
env = dict(variables)
if print:
if stream_logs:
ValueError(
"Getting the config of the bake and streaming "
"logs at the same time is not possible."
)
return json.loads(run(full_cmd + targets, env=env))
elif stream_logs:
return stream_buildx_logs(full_cmd + targets, env=env)
else:
run(full_cmd + targets, capture_stderr=progress is False, env=env)
return json.loads(run(full_cmd + ["--print"] + targets, env=env))
def build(
self,
context_path: ValidPath,
add_hosts: Dict[str, str] = {},
allow: List[str] = [],
build_args: Dict[str, str] = {},
builder: Optional[ValidBuilder] = None,
cache: bool = True,
cache_from: Union[str, Dict[str, str], List[Dict[str, str]], None] = None,
cache_to: Union[str, Dict[str, str], None] = None,
file: Optional[ValidPath] = None,
labels: Dict[str, str] = {},
load: bool = False,
network: Optional[str] = None,
output: Dict[str, str] = {},
platforms: Optional[List[str]] = None,
progress: Union[str, bool] = "auto",
pull: bool = False,
push: bool = False,
secrets: Union[str, List[str]] = [],
ssh: Optional[str] = None,
tags: Union[str, List[str]] = [],
target: Optional[str] = None,
stream_logs: bool = False,
) -> Union[
None, python_on_whales.components.image.cli_wrapper.Image, Iterator[str]
]:
"""Build a Docker image with builkit as backend.
Alias: `docker.build(...)`
A `python_on_whales.Image` is returned, even when using multiple tags.
That is because it will produce a single image with multiple tags.
If no image is loaded into the Docker daemon (if `push=True` for ex),
then `None` is returned.
# Arguments
context_path: The path of the build context.
add_hosts: Hosts to add. `add_hosts={"my_host1": "192.168.32.35"}`
allow: List of extra privileges.
Eg `allow=["network.host", "security.insecure"]`
build_args: The build arguments.
ex `build_args={"PY_VERSION": "3.7.8", "UBUNTU_VERSION": "20.04"}`.
builder: Specify which builder to use.
cache: Whether or not to use the cache
cache_from: Works only with the container driver. Loads the cache
(if needed) from a registry `cache_from="user/app:cache"` or
a directory on the client `cache_from="type=local,src=path/to/dir"`.
It's also possible to use a dict or list of dict form for this
argument. e.g.
`cache_from=dict(type="local", src="path/to/dir")`
cache_to: Works only with the container driver. Sends the resulting
docker cache either to a registry `cache_to="user/app:cache"`,
or to a local directory `cache_to="type=local,dest=path/to/dir"`.
It's also possible to use a dict form for this argument. e.g.
`cache_to=dict(type="local", dest="path/to/dir", mode="max")`
file: The path of the Dockerfile
labels: Dict of labels to add to the image.
`labels={"very-secure": "1", "needs-gpu": "0"}` for example.
load: Shortcut for `output=dict(type="docker")` If `True`,
`docker.buildx.build` will return a `python_on_whales.Image`.
network: which network to use when building the Docker image
output: Output destination
(format: `output={"type": "local", "dest": "path"}`
Possible output types are
`["local", "tar", "oci", "docker", "image", "registry"]`.
See [this link](https://github.com/docker/buildx#-o---outputpath-typetypekeyvalue)
for more details about each exporter.
platforms: List of target platforms when building the image. Ex:
`platforms=["linux/amd64", "linux/arm64"]`
progress: Set type of progress output (auto, plain, tty, or False).
Use plain to keep the container output on screen
pull: Always attempt to pull a newer version of the image
push: Shorthand for `output=dict(type="registry")`.
secrets: One or more secrets passed as string(s). For example
`secrets="id=aws,src=/home/my_user/.aws/credentials"`
ssh: SSH agent socket or keys to expose to the build
(format is `default|<id>[=<socket>|<key>[,<key>]]` as a string)
tags: Tag or tags to put on the resulting image.
target: Set the target build stage to build.
stream_logs: If `True` this function will return an iterator of strings.
You can then read the logs as they arrive.
# Returns
A `python_on_whales.Image` if a Docker image is loaded
in the daemon after the build (the default behavior when
calling `docker.build(...)`). Otherwise, `None`.
"""
tags = to_list(tags)
full_cmd = self.docker_cmd + ["buildx", "build"]
if progress != "auto" and isinstance(progress, str):
full_cmd += ["--progress", progress]
full_cmd.add_args_list(
"--add-host", format_dict_for_cli(add_hosts, separator=":")
)
full_cmd.add_args_list("--allow", allow)
full_cmd.add_args_list("--build-arg", format_dict_for_cli(build_args))
full_cmd.add_simple_arg("--builder", builder)
full_cmd.add_args_list("--label", format_dict_for_cli(labels))
full_cmd.add_simple_arg("--ssh", ssh)
full_cmd.add_flag("--pull", pull)
full_cmd.add_flag("--push", push)
full_cmd.add_flag("--load", load)
full_cmd.add_simple_arg("--file", file)
full_cmd.add_simple_arg("--target", target)
if isinstance(cache_from, list):
for item in cache_from:
full_cmd.add_simple_arg("--cache-from", format_dict_for_buildx(item))
elif isinstance(cache_from, dict):
full_cmd.add_simple_arg("--cache-from", format_dict_for_buildx(cache_from))
else:
full_cmd.add_simple_arg("--cache-from", cache_from)
if isinstance(cache_to, dict):
full_cmd.add_simple_arg("--cache-to", format_dict_for_buildx(cache_to))
else:
full_cmd.add_simple_arg("--cache-to", cache_to)
full_cmd.add_args_list("--secret", to_list(secrets))
if output != {}:
full_cmd += ["--output", format_dict_for_buildx(output)]
if platforms is not None:
full_cmd += ["--platform", ",".join(platforms)]
full_cmd.add_simple_arg("--network", network)
full_cmd.add_flag("--no-cache", not cache)
full_cmd.add_args_list("--tag", tags)
if stream_logs:
if progress in (False, "tty"):
raise ValueError(
"You want to stream logs, but it's not possible if a tty is used "
"as 'progress'. It's also not possible if 'progress' is False. "
"Make sure the function arguments of 'docker.build' are "
"coherent."
)
full_cmd.append(context_path)
return stream_buildx_logs(full_cmd)
will_load_image = self._build_will_load_image(builder, push, load, output)
# very special_case, must be fixed https://github.com/docker/buildx/issues/420
if (
will_load_image
and not tags
and self.inspect(builder).driver == "docker-container"
):
# we have no way of fetching the image because iidfile is wrong in this case.
will_load_image = False
if not will_load_image:
full_cmd.append(context_path)
run(full_cmd, capture_stderr=progress is False)
return
docker_image = python_on_whales.components.image.cli_wrapper.ImageCLI(
self.client_config
)
if self._method_to_get_image(builder) == GetImageMethod.TAG:
full_cmd.append(context_path)
run(full_cmd, capture_stderr=progress is False)
return docker_image.inspect(tags[0])
else:
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_dir = Path(tmp_dir)
iidfile = tmp_dir / "id_file.txt"
full_cmd.add_simple_arg("--iidfile", iidfile)
full_cmd.append(context_path)
run(full_cmd, capture_stderr=progress is False)
image_id = iidfile.read_text()
return docker_image.inspect(image_id)
def _build_will_load_image(
self,
builder: Optional[str],
push: bool,
load: bool,
output: Optional[Dict[str, str]],
) -> bool:
if load:
return True
if push:
return False
if output != {}:
if output.get("type") == "docker" and "dest" not in output:
return True
else:
return False
# now load push and output are not set.
if self.inspect(builder).driver == "docker":
return True
return False
def _method_to_get_image(self, builder: Optional[str]) -> GetImageMethod:
"""Getting around https://github.com/docker/buildx/issues/420"""
builder = self.inspect(builder)
if builder.driver == "docker":
return GetImageMethod.IIDFILE
else:
return GetImageMethod.TAG
def create(
self,
context_or_endpoint: Optional[str] = None,
buildkitd_flags: Optional[str] = None,
config: Optional[ValidPath] = None,
driver: Optional[str] = None,
driver_options: Dict[str, str] = {},
name: Optional[str] = None,
use: bool = False,
) -> Builder:
"""Create a new builder instance
# Arguments
context_or_endpoint:
buildkitd_flags: Flags for buildkitd daemon
config: BuildKit config file
driver: Driver to use (available: [kubernetes docker docker-container])
driver_options: Options for the driver.
e.g `driver_options=dict(network="host")`
name: Builder instance name
use: Set the current builder instance to this builder
# Returns
A `python_on_whales.Builder` object.
"""
full_cmd = self.docker_cmd + ["buildx", "create"]
full_cmd.add_simple_arg("--buildkitd-flags", buildkitd_flags)
full_cmd.add_simple_arg("--config", config)
full_cmd.add_simple_arg("--driver", driver)
if driver_options != {}:
full_cmd.add_simple_arg(
"--driver-opt", format_dict_for_buildx(driver_options)
)
full_cmd.add_simple_arg("--name", name)
full_cmd.add_flag("--use", use)
if context_or_endpoint is not None:
full_cmd.append(context_or_endpoint)
return Builder(self.client_config, run(full_cmd))
def disk_usage(self):
"""Not yet implemented"""
raise NotImplementedError
def inspect(self, x: Optional[str] = None) -> Builder:
"""Returns a builder instance from the name.
# Arguments
x: If `None` (the default), returns the current builder. If a string is provided,
the builder that has this name is returned.
# Returns
A `python_on_whales.Builder` object.
"""
return Builder(self.client_config, x, is_immutable_id=False)
def list(self) -> List[Builder]:
"""Returns the list of `python_on_whales.Builder` available."""
full_cmd = self.docker_cmd + ["buildx", "ls"]
output = run(full_cmd)
lines = output.splitlines()
# the first line have the headers
lines = lines[1:]
# if the line starts by a " ", it's not a builder, it's a node
lines = list(filter(lambda x: not x.startswith(" "), lines))
builders_names = [x.split(" ")[0] for x in lines]
return [
Builder(self.client_config, x, is_immutable_id=True) for x in builders_names
]
def prune(self, all: bool = False, filters: Dict[str, str] = {}) -> None:
"""Remove build cache on the current builder.
# Arguments
all: Remove all cache, not just dangling layers
filters: Filters to use, for example `filters=dict(until="24h")`
"""
full_cmd = self.docker_cmd + ["buildx", "prune", "--force"]
full_cmd.add_flag("--all", all)
full_cmd.add_args_list("--filter", format_dict_for_cli(filters))
run(full_cmd)
def remove(self, builder: Union[Builder, str]) -> None:
"""Remove a builder
# Arguments
builder: The builder to remove
"""
full_cmd = self.docker_cmd + ["buildx", "rm"]
full_cmd.append(builder)
run(full_cmd)
def stop(self, builder: Optional[ValidBuilder]) -> None:
"""Stop the builder instance
# Arguments:
builder: The builder to stop. If `None` (the default value),
the current builder is stopped.
"""
full_cmd = self.docker_cmd + ["buildx", "stop"]
if builder is not None:
full_cmd.append(builder)
run(full_cmd)
def use(
self, builder: Union[Builder, str], default: bool = False, global_: bool = False
) -> None:
"""Set the current builder instance
# Arguments
builder: The builder to use
default: Set builder as default for the current context
global_: Builder will be used even when changing contexts
"""
full_cmd = self.docker_cmd + ["buildx", "use"]
full_cmd.add_flag("--default", default)
full_cmd.add_flag("--global", global_)
full_cmd.append(builder)
run(full_cmd)
def version(self) -> str:
"""Returns the docker buildx version as a string.
```python
from python_on_whales import docker
version = docker.buildx.version()
print(version)
# "github.com/docker/buildx v0.4.2 fb7b670b764764dc4716df3eba07ffdae4cc47b2"
```
"""
full_cmd = self.docker_cmd + ["buildx", "version"]
return run(full_cmd)
def is_installed(self) -> bool:
"""Returns `True` if docker buildx is installed and working.
If it's not installed, head
to [the installation page](https://github.com/docker/buildx#installing)
and follow the instructions.
"""
full_cmd = self.docker_cmd + ["buildx", "--help"]
help_output = run(full_cmd)
return "buildx" in help_output
def format_dict_for_buildx(options: Dict[str, str]) -> str:
return ",".join(format_dict_for_cli(options, separator="="))
def stream_buildx_logs(full_cmd: list, env: Dict[str, str] = None) -> Iterator[str]:
for origin, value in stream_stdout_and_stderr(full_cmd, env=env):
yield value.decode()
| 39.086799 | 98 | 0.591904 |
f173c5294d0c18ea2ffbeb6c49acca514a616bdb | 7,837 | py | Python | twilio/docs/conf.py | vinothpofi/1bookingz | 053bebb0792c2be8779e8a472ca9ab1e2c760916 | [
"MIT"
] | null | null | null | twilio/docs/conf.py | vinothpofi/1bookingz | 053bebb0792c2be8779e8a472ca9ab1e2c760916 | [
"MIT"
] | null | null | null | twilio/docs/conf.py | vinothpofi/1bookingz | 053bebb0792c2be8779e8a472ca9ab1e2c760916 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Services_Twilio documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 8 04:02:01 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinxcontrib.phpdomain', 'sphinxcontrib_phpautodoc']
primary_domain = 'php'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Services_Twilio'
copyright = unicode(datetime.utcnow().year) + u', Twilio Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.12'
# The full version, including alpha/beta/rc tags.
release = '3.12.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'kr'
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Services_Twiliodoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Services_Twilio.tex', u'Services\\_Twilio Documentation',
u'Neuman Vong', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'services_twilio', u'Services_Twilio Documentation',
[u'Neuman Vong'], 1)
]
| 34.524229 | 82 | 0.681766 |
38da1ed2386a2bb8b67828c93d357bfd9518f922 | 1,120 | py | Python | voxel_globe/meta/fields.py | ngageoint/voxel-globe | 91f386de652b704942165889c10468b2c4cf4eec | [
"MIT"
] | 28 | 2015-07-27T23:57:24.000Z | 2020-04-05T15:10:52.000Z | voxel_globe/meta/fields.py | VisionSystemsInc/voxel_globe | 6eb3fca5586726428e9d914f7b730ca164c64a52 | [
"MIT"
] | 50 | 2016-02-11T15:50:22.000Z | 2016-10-27T22:38:27.000Z | voxel_globe/meta/fields.py | ngageoint/voxel-globe | 91f386de652b704942165889c10468b2c4cf4eec | [
"MIT"
] | 8 | 2015-07-27T19:22:03.000Z | 2021-01-04T09:44:48.000Z | import os
from django.contrib.gis.db import models
from django.utils.translation import ugettext_lazy
def validate_file(value):
return os.path.isfile(os.path.expandvars(value)) or \
os.path.isdir(os.path.expandvars(value))
class FileNameField(models.TextField):
#Field for a file or directory
default_validators = [validate_file]
description = ugettext_lazy("File Name")
def __init__(self, *args, **kwargs):
self.path = kwargs.pop('path', None)
super(FileNameField, self).__init__(*args, **kwargs)
def check(self, *args, **kwargs):
errors = super(FileNameField, self).check(*args, **kwargs)
errors.extend(self._check_path_attribute(*args, **kwargs))
return errors
def _check_path_attribute(self, **kwargs):
if self.path is None:
return [checks.Error("FileNameField must define a 'path' attribute.",
obj=self, id='voxel_globe.E1')]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(FilePathField, self).deconstruct()
kwargs['path'] = self.path
return name, path, args, kwargs
| 32 | 75 | 0.680357 |
64c52a0b480cca6e8693e1645c070878007e884a | 2,819 | py | Python | src/demuxfb/_progress_reporter.py | nick-killeen/demuxfb | 9c9a89c3b3116add018f98ef9e11ae335395692a | [
"MIT"
] | null | null | null | src/demuxfb/_progress_reporter.py | nick-killeen/demuxfb | 9c9a89c3b3116add018f98ef9e11ae335395692a | [
"MIT"
] | null | null | null | src/demuxfb/_progress_reporter.py | nick-killeen/demuxfb | 9c9a89c3b3116add018f98ef9e11ae335395692a | [
"MIT"
] | null | null | null | """Module for logic about reporting on the long progress of `Chat` creation."""
from abc import ABC, abstractmethod
from typing import Callable, Any
import datetime
from .message import Message
class ProgressReporter(ABC):
"""
Interface for reporting on progress during the construction of a chat, which
can take a while. This is an optional argument to `demuxfb.build_chat`.
See Also
--------
demuxfb.IntervalProgressReporter
"""
@abstractmethod
def finish_message(self, message: Message) -> None:
"""
Called when a message has finished being constructed.
Parameters
----------
message: demuxfb.mesage.Message
The message that was just constructed.
"""
raise NotImplementedError
@abstractmethod
def start(self) -> None:
"""Called when Chat construction begins."""
raise NotImplementedError
@abstractmethod
def finish(self) -> None:
"""Called when Chat construction finishes."""
raise NotImplementedError
class IntervalProgressReporter(ProgressReporter):
"""
ProgressReporter that logs time and number of messages processed at a
regular interval.
"""
_start_time: float
_message_count: int
_report_interval: float
_report_function: Callable[[str], Any]
def __init__(self, report_interval_seconds: float = 1.0,
report_function: Callable[[str], Any] = print) -> None:
"""
Create reporter.
Parameters
----------
report_interval_seconds : float, defaults to 1.0
Interval (in seconds) to report at.
report_function : function, defaults to print
Function that takes in a str and logs its value via some
side-effect. This function will be used to make the reports.
"""
self._message_count = 0
self._next_report_time = 0.0
self._report_interval = report_interval_seconds
self._report_function = report_function
def finish_message(self, message: Message) -> None:
self._message_count += 1
# Report our progress if we are due to.
current_time = datetime.datetime.now().timestamp()
if current_time >= self._next_report_time:
self._next_report_time = current_time + self._report_interval
self._report_function(
'Messages processed: {}'.format(self._message_count))
def start(self) -> None:
self._start_time = datetime.datetime.now().timestamp()
def finish(self) -> None:
end_time = datetime.datetime.now().timestamp()
d_time = end_time - self._start_time
self._report_function('Processed {} messages\nTook: {} seconds'.format(
self._message_count, d_time))
| 31.674157 | 80 | 0.647747 |
ebcc31226b895fb9fd5f7de0f1b8298345da8075 | 11,036 | py | Python | venv/Lib/site-packages/pandas/tests/groupby/test_quantile.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 7 | 2022-01-16T12:28:16.000Z | 2022-03-04T15:31:45.000Z | venv/Lib/site-packages/pandas/tests/groupby/test_quantile.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 8 | 2021-09-22T12:47:32.000Z | 2022-01-14T21:30:38.000Z | venv/Lib/site-packages/pandas/tests/groupby/test_quantile.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 3 | 2020-08-04T02:48:32.000Z | 2020-08-17T01:20:09.000Z | import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
)
import pandas._testing as tm
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
)
@pytest.mark.parametrize(
"a_vals,b_vals",
[
# Ints
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1]),
([1, 2, 3, 4], [4, 3, 2, 1]),
([1, 2, 3, 4, 5], [4, 3, 2, 1]),
# Floats
([1.0, 2.0, 3.0, 4.0, 5.0], [5.0, 4.0, 3.0, 2.0, 1.0]),
# Missing data
([1.0, np.nan, 3.0, np.nan, 5.0], [5.0, np.nan, 3.0, np.nan, 1.0]),
([np.nan, 4.0, np.nan, 2.0, np.nan], [np.nan, 4.0, np.nan, 2.0, np.nan]),
# Timestamps
(
list(pd.date_range("1/1/18", freq="D", periods=5)),
list(pd.date_range("1/1/18", freq="D", periods=5))[::-1],
),
# All NA
([np.nan] * 5, [np.nan] * 5),
],
)
@pytest.mark.parametrize("q", [0, 0.25, 0.5, 0.75, 1])
def test_quantile(interpolation, a_vals, b_vals, q):
if interpolation == "nearest" and q == 0.5 and b_vals == [4, 3, 2, 1]:
pytest.skip(
"Unclear numpy expectation for nearest result with equidistant data"
)
a_expected = pd.Series(a_vals).quantile(q, interpolation=interpolation)
b_expected = pd.Series(b_vals).quantile(q, interpolation=interpolation)
df = DataFrame(
{"key": ["a"] * len(a_vals) + ["b"] * len(b_vals), "val": a_vals + b_vals}
)
expected = DataFrame(
[a_expected, b_expected], columns=["val"], index=Index(["a", "b"], name="key")
)
result = df.groupby("key").quantile(q, interpolation=interpolation)
tm.assert_frame_equal(result, expected)
def test_quantile_array():
# https://github.com/pandas-dev/pandas/issues/27526
df = DataFrame({"A": [0, 1, 2, 3, 4]})
result = df.groupby([0, 0, 1, 1, 1]).quantile([0.25])
index = pd.MultiIndex.from_product([[0, 1], [0.25]])
expected = DataFrame({"A": [0.25, 2.50]}, index=index)
tm.assert_frame_equal(result, expected)
df = DataFrame({"A": [0, 1, 2, 3], "B": [4, 5, 6, 7]})
index = pd.MultiIndex.from_product([[0, 1], [0.25, 0.75]])
result = df.groupby([0, 0, 1, 1]).quantile([0.25, 0.75])
expected = DataFrame(
{"A": [0.25, 0.75, 2.25, 2.75], "B": [4.25, 4.75, 6.25, 6.75]}, index=index
)
tm.assert_frame_equal(result, expected)
def test_quantile_array2():
# https://github.com/pandas-dev/pandas/pull/28085#issuecomment-524066959
df = DataFrame(
np.random.RandomState(0).randint(0, 5, size=(10, 3)), columns=list("ABC")
)
result = df.groupby("A").quantile([0.3, 0.7])
expected = DataFrame(
{
"B": [0.9, 2.1, 2.2, 3.4, 1.6, 2.4, 2.3, 2.7, 0.0, 0.0],
"C": [1.2, 2.8, 1.8, 3.0, 0.0, 0.0, 1.9, 3.1, 3.0, 3.0],
},
index=pd.MultiIndex.from_product(
[[0, 1, 2, 3, 4], [0.3, 0.7]], names=["A", None]
),
)
tm.assert_frame_equal(result, expected)
def test_quantile_array_no_sort():
df = DataFrame({"A": [0, 1, 2], "B": [3, 4, 5]})
result = df.groupby([1, 0, 1], sort=False).quantile([0.25, 0.5, 0.75])
expected = DataFrame(
{"A": [0.5, 1.0, 1.5, 1.0, 1.0, 1.0], "B": [3.5, 4.0, 4.5, 4.0, 4.0, 4.0]},
index=pd.MultiIndex.from_product([[1, 0], [0.25, 0.5, 0.75]]),
)
tm.assert_frame_equal(result, expected)
result = df.groupby([1, 0, 1], sort=False).quantile([0.75, 0.25])
expected = DataFrame(
{"A": [1.5, 0.5, 1.0, 1.0], "B": [4.5, 3.5, 4.0, 4.0]},
index=pd.MultiIndex.from_product([[1, 0], [0.75, 0.25]]),
)
tm.assert_frame_equal(result, expected)
def test_quantile_array_multiple_levels():
df = DataFrame(
{"A": [0, 1, 2], "B": [3, 4, 5], "c": ["a", "a", "a"], "d": ["a", "a", "b"]}
)
result = df.groupby(["c", "d"]).quantile([0.25, 0.75])
index = pd.MultiIndex.from_tuples(
[("a", "a", 0.25), ("a", "a", 0.75), ("a", "b", 0.25), ("a", "b", 0.75)],
names=["c", "d", None],
)
expected = DataFrame(
{"A": [0.25, 0.75, 2.0, 2.0], "B": [3.25, 3.75, 5.0, 5.0]}, index=index
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("frame_size", [(2, 3), (100, 10)])
@pytest.mark.parametrize("groupby", [[0], [0, 1]])
@pytest.mark.parametrize("q", [[0.5, 0.6]])
def test_groupby_quantile_with_arraylike_q_and_int_columns(frame_size, groupby, q):
# GH30289
nrow, ncol = frame_size
df = DataFrame(np.array([ncol * [_ % 4] for _ in range(nrow)]), columns=range(ncol))
idx_levels = [list(range(min(nrow, 4)))] * len(groupby) + [q]
idx_codes = [[x for x in range(min(nrow, 4)) for _ in q]] * len(groupby) + [
list(range(len(q))) * min(nrow, 4)
]
expected_index = pd.MultiIndex(
levels=idx_levels, codes=idx_codes, names=groupby + [None]
)
expected_values = [
[float(x)] * (ncol - len(groupby)) for x in range(min(nrow, 4)) for _ in q
]
expected_columns = [x for x in range(ncol) if x not in groupby]
expected = DataFrame(
expected_values, index=expected_index, columns=expected_columns
)
result = df.groupby(groupby).quantile(q)
tm.assert_frame_equal(result, expected)
def test_quantile_raises():
df = DataFrame([["foo", "a"], ["foo", "b"], ["foo", "c"]], columns=["key", "val"])
with pytest.raises(TypeError, match="cannot be performed against 'object' dtypes"):
with tm.assert_produces_warning(
FutureWarning, match="Dropping invalid columns"
):
df.groupby("key").quantile()
def test_quantile_out_of_bounds_q_raises():
# https://github.com/pandas-dev/pandas/issues/27470
df = DataFrame({"a": [0, 0, 0, 1, 1, 1], "b": range(6)})
g = df.groupby([0, 0, 0, 1, 1, 1])
with pytest.raises(ValueError, match="Got '50.0' instead"):
g.quantile(50)
with pytest.raises(ValueError, match="Got '-1.0' instead"):
g.quantile(-1)
def test_quantile_missing_group_values_no_segfaults():
# GH 28662
data = np.array([1.0, np.nan, 1.0])
df = DataFrame({"key": data, "val": range(3)})
# Random segfaults; would have been guaranteed in loop
grp = df.groupby("key")
for _ in range(100):
grp.quantile()
@pytest.mark.parametrize(
"key, val, expected_key, expected_val",
[
([1.0, np.nan, 3.0, np.nan], range(4), [1.0, 3.0], [0.0, 2.0]),
([1.0, np.nan, 2.0, 2.0], range(4), [1.0, 2.0], [0.0, 2.5]),
(["a", "b", "b", np.nan], range(4), ["a", "b"], [0, 1.5]),
([0], [42], [0], [42.0]),
([], [], np.array([], dtype="float64"), np.array([], dtype="float64")),
],
)
def test_quantile_missing_group_values_correct_results(
key, val, expected_key, expected_val
):
# GH 28662, GH 33200, GH 33569
df = DataFrame({"key": key, "val": val})
expected = DataFrame(
expected_val, index=Index(expected_key, name="key"), columns=["val"]
)
grp = df.groupby("key")
result = grp.quantile(0.5)
tm.assert_frame_equal(result, expected)
result = grp.quantile()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
pd.array([1, 0, None] * 2, dtype="Int64"),
pd.array([True, False, None] * 2, dtype="boolean"),
],
)
@pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]])
def test_groupby_quantile_nullable_array(values, q):
# https://github.com/pandas-dev/pandas/issues/33136
df = DataFrame({"a": ["x"] * 3 + ["y"] * 3, "b": values})
result = df.groupby("a")["b"].quantile(q)
if isinstance(q, list):
idx = pd.MultiIndex.from_product((["x", "y"], q), names=["a", None])
true_quantiles = [0.0, 0.5, 1.0]
else:
idx = Index(["x", "y"], name="a")
true_quantiles = [0.5]
expected = pd.Series(true_quantiles * 2, index=idx, name="b")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]])
def test_groupby_quantile_skips_invalid_dtype(q):
df = DataFrame({"a": [1], "b": [2.0], "c": ["x"]})
warn = None if isinstance(q, list) else FutureWarning
with tm.assert_produces_warning(warn, match="Dropping invalid columns"):
result = df.groupby("a").quantile(q)
expected = df.groupby("a")[["b"]].quantile(q)
tm.assert_frame_equal(result, expected)
def test_groupby_quantile_NA_float(any_float_allowed_nullable_dtype):
# GH#42849
df = DataFrame(
{"x": [1, 1], "y": [0.2, np.nan]}, dtype=any_float_allowed_nullable_dtype
)
result = df.groupby("x")["y"].quantile(0.5)
expected = pd.Series([0.2], dtype=float, index=Index(df["x"][:1]), name="y")
tm.assert_series_equal(expected, result)
result = df.groupby("x")["y"].quantile([0.5, 0.75])
expected = pd.Series(
[0.2] * 2,
index=pd.MultiIndex.from_arrays(
[Index(df["x"]), [0.5, 0.75]], names=["x", None]
),
name="y",
)
tm.assert_series_equal(result, expected)
def test_groupby_quantile_NA_int(any_nullable_int_dtype):
# GH#42849
df = DataFrame({"x": [1, 1], "y": [2, 5]}, dtype=any_nullable_int_dtype)
result = df.groupby("x")["y"].quantile(0.5)
expected = pd.Series([3.5], dtype=float, index=Index(df["x"][:1]), name="y")
tm.assert_series_equal(expected, result)
result = df.groupby("x").quantile(0.5)
expected = DataFrame({"y": 3.5}, index=Index(df["x"][:1]))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["Float64", "Float32"])
def test_groupby_quantile_allNA_column(dtype):
# GH#42849
df = DataFrame({"x": [1, 1], "y": [pd.NA] * 2}, dtype=dtype)
result = df.groupby("x")["y"].quantile(0.5)
expected = pd.Series([np.nan], dtype=float, index=Index(df["x"][:1]), name="y")
tm.assert_series_equal(expected, result)
def test_groupby_timedelta_quantile():
# GH: 29485
df = DataFrame(
{"value": pd.to_timedelta(np.arange(4), unit="s"), "group": [1, 1, 2, 2]}
)
result = df.groupby("group").quantile(0.99)
expected = DataFrame(
{
"value": [
pd.Timedelta("0 days 00:00:00.990000"),
pd.Timedelta("0 days 00:00:02.990000"),
]
},
index=Index([1, 2], name="group"),
)
tm.assert_frame_equal(result, expected)
def test_columns_groupby_quantile():
# GH 33795
df = DataFrame(
np.arange(12).reshape(3, -1),
index=list("XYZ"),
columns=pd.Series(list("ABAB"), name="col"),
)
result = df.groupby("col", axis=1).quantile(q=[0.8, 0.2])
expected = DataFrame(
[
[1.6, 0.4, 2.6, 1.4],
[5.6, 4.4, 6.6, 5.4],
[9.6, 8.4, 10.6, 9.4],
],
index=list("XYZ"),
columns=pd.MultiIndex.from_tuples(
[("A", 0.8), ("A", 0.2), ("B", 0.8), ("B", 0.2)], names=["col", None]
),
)
tm.assert_frame_equal(result, expected)
| 33.34139 | 88 | 0.561707 |
ba073c1d9e85bb876647d0880ee9d2619f03518b | 3,875 | py | Python | google-datacatalog-sqlserver-connector/tests/google/datacatalog_connectors/sqlserver/scrape/metadata_scraper_test.py | brucearctor/datacatalog-connectors-rdbms | 7ff5dc858ea7aa21486343304fc281692480cdb8 | [
"Apache-2.0"
] | 46 | 2020-04-27T21:55:50.000Z | 2022-02-06T04:34:06.000Z | google-datacatalog-sqlserver-connector/tests/google/datacatalog_connectors/sqlserver/scrape/metadata_scraper_test.py | brucearctor/datacatalog-connectors-rdbms | 7ff5dc858ea7aa21486343304fc281692480cdb8 | [
"Apache-2.0"
] | 45 | 2020-05-20T21:09:04.000Z | 2022-03-24T00:14:30.000Z | google-datacatalog-sqlserver-connector/tests/google/datacatalog_connectors/sqlserver/scrape/metadata_scraper_test.py | brucearctor/datacatalog-connectors-rdbms | 7ff5dc858ea7aa21486343304fc281692480cdb8 | [
"Apache-2.0"
] | 47 | 2020-05-02T14:48:06.000Z | 2022-03-28T22:12:22.000Z | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from unittest.mock import patch, Mock
from google.datacatalog_connectors.sqlserver.scrape import metadata_scraper
from google.datacatalog_connectors.commons_test import utils
class MetadataScraperTestCase(unittest.TestCase):
__MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
__SCRAPE_PACKAGE = 'google.datacatalog_connectors.rdbms.scrape'
@patch('pandas.read_csv')
@patch('{}.metadata_normalizer.MetadataNormalizer'
'.normalize'.format(__SCRAPE_PACKAGE))
def test_scrape_schemas_metadata_with_csv_should_return_objects(
self, normalize, read_csv): # noqa
metadata = \
utils.Utils.convert_json_to_object(
self.__MODULE_PATH,
'metadata.json')
read_csv.return_value = metadata
normalize.return_value = metadata
scraper = metadata_scraper.MetadataScraper()
schemas_metadata = scraper.scrape({}, csv_path='csv')
self.assertEqual(1, len(schemas_metadata))
@patch('pyodbc.connect')
@patch('{}.metadata_normalizer.MetadataNormalizer'
'.normalize'.format(__SCRAPE_PACKAGE))
def test_scrape_schemas_metadata_with_credentials_should_return_objects(
self, normalize, connect): # noqa
metadata = \
utils.Utils.convert_json_to_object(
self.__MODULE_PATH,
'metadata.json')
con = Mock()
connect.return_value = con
cursor = Mock()
con.cursor.return_value = cursor
cursor.fetchall.return_value = \
utils.Utils.convert_json_to_object(
self.__MODULE_PATH,
'rows.json')
cursor.description =\
utils.Utils.convert_json_to_object(
self.__MODULE_PATH,
'description.json')
normalize.return_value = metadata
scraper = metadata_scraper.MetadataScraper()
schemas_metadata = scraper.scrape({},
connection_args={
'database': 'db',
'host': 'mysql_host',
'user': 'dbc',
'pass': 'dbc'
})
self.assertEqual(1, len(schemas_metadata))
self.assertEqual(connect.call_count, 1)
@patch('pyodbc.connect')
@patch('{}.metadata_normalizer.MetadataNormalizer'
'.normalize'.format(__SCRAPE_PACKAGE))
def test_scrape_schemas_metadata_on_exception_should_re_raise(
self, normalize, connect): # noqa
connect.side_effect = Exception('Error when connecting to Server')
scraper = metadata_scraper.MetadataScraper()
self.assertRaises(Exception,
scraper.scrape, {},
connection_args={
'database': 'db',
'host': 'mysql_host',
'user': 'dbc',
'pass': 'dbc'
})
self.assertEqual(connect.call_count, 1)
self.assertEqual(normalize.call_count, 0)
| 35.87963 | 76 | 0.598194 |
37a3c517e3d2c41653bd698443f6508040b57312 | 6,724 | py | Python | testing/test_split_dataset.py | AshkanTaghipour/ivadomed | 84c4e01831265b311c7b053ffdb19fb393fb135d | [
"MIT"
] | null | null | null | testing/test_split_dataset.py | AshkanTaghipour/ivadomed | 84c4e01831265b311c7b053ffdb19fb393fb135d | [
"MIT"
] | null | null | null | testing/test_split_dataset.py | AshkanTaghipour/ivadomed | 84c4e01831265b311c7b053ffdb19fb393fb135d | [
"MIT"
] | null | null | null | import os
import csv
import json
import shutil
import pytest
import numpy as np
from ivadomed.loader import utils as imed_loader_utils
BIDS_PATH = 'bids'
LOG_PATH = 'log'
N = 200
N_CENTERS = 5
@pytest.mark.parametrize('split_params', [{
"fname_split": None,
"random_seed": 6,
"center_test": ['0'],
"method": "per_center",
"train_fraction": 0.6,
"test_fraction": 0.2
}, {
"fname_split": None,
"random_seed": 6,
"center_test": [],
"method": "per_center",
"train_fraction": 0.75,
"test_fraction": 0.25
}])
def load_dataset(split_params):
patient_mapping = create_tsvfile()
create_jsonfile()
# Create log path
if not os.path.isdir(LOG_PATH):
os.mkdir(LOG_PATH)
train, val, test = imed_loader_utils.get_subdatasets_subjects_list(split_params, BIDS_PATH, LOG_PATH)
return train, val, test, patient_mapping
@pytest.mark.parametrize('split_params', [{
"fname_split": None,
"random_seed": 6,
"center_test": ['0'],
"method": "per_center",
"train_fraction": 0.6,
"test_fraction": 0.2
}])
def test_per_center_testcenter_0(split_params):
train, val, test, patient_mapping = load_dataset(split_params)
# Verify split proportion
assert len(train) == round(0.6 * (N - len(test)))
# Verify there is only the test center selected
for sub in test:
assert patient_mapping[sub]['center'] == '0'
@pytest.mark.parametrize('split_params', [{
"fname_split": None,
"random_seed": 6,
"center_test": [],
"method": "per_center",
"train_fraction": 0.2,
"test_fraction": 0.4
}])
def test_per_center_without_testcenter(split_params):
train, val, test, patient_mapping = load_dataset(split_params)
test_centers = set()
for sub in test:
test_centers.add(patient_mapping[sub]['center'])
training_centers = set()
for sub in train:
training_centers.add(patient_mapping[sub]['center'])
# Verify the test center proportion
assert len(test_centers) == round(N_CENTERS * 0.4)
# Verify test and training centers are fully different
for train_center in training_centers:
assert train_center not in test_centers
@pytest.mark.parametrize('split_params', [{
"fname_split": None,
"random_seed": 6,
"center_test": [],
"method": "per_patient",
"train_fraction": 0.45,
"test_fraction": 0.35
}])
def test_per_patient(split_params):
train, val, test, patient_mapping = load_dataset(split_params)
assert np.isclose(len(train), round(N * 0.45), atol=1)
assert np.isclose(len(test), round(N * 0.35), atol=1)
@pytest.mark.parametrize('split_params', [{
"fname_split": None,
"random_seed": 6,
"center_test": [],
"method": "per_patient",
"train_fraction": 0.6,
"test_fraction": 0
}])
def test_per_patient(split_params):
train, val, test, patient_mapping = load_dataset(split_params)
assert np.isclose(len(train), round(N * 0.6), atol=1)
assert np.isclose(len(val), round(N * 0.4), atol=1)
assert np.isclose(len(test), 0, atol=1)
def check_balance(train, val, test, patient_mapping):
for dataset in [train, val, test]:
disability_count = {'0': 0, '1': 0, '2': 0}
for sub in dataset:
disability_count[patient_mapping[sub]['disability']] += 1
assert np.isclose(disability_count['0'], disability_count['1'], atol=1)
assert np.isclose(disability_count['1'], disability_count['2'], atol=1)
assert np.isclose(disability_count['0'], disability_count['2'], atol=1)
@pytest.mark.parametrize('split_params', [{
"fname_split": None,
"random_seed": 6,
"center_test": [],
"balance": "disability",
"method": "per_patient",
"train_fraction": 0.45,
"test_fraction": 0.35
}])
def test_per_patient_balance(split_params):
train, val, test, patient_mapping = load_dataset(split_params)
assert np.isclose(len(train), round(N * 0.45), atol=1)
assert np.isclose(len(test), round(N * 0.35), atol=1)
check_balance(train, val, test, patient_mapping)
@pytest.mark.parametrize('split_params', [{
"fname_split": None,
"random_seed": 6,
"center_test": ['0'],
"balance": "disability",
"method": "per_center",
"train_fraction": 0.4,
"test_fraction": 0.2
}])
def test_per_center_balance(split_params):
train, val, test, patient_mapping = load_dataset(split_params)
# Verify split proportion
assert np.isclose(len(train), round(0.4 * (N - len(test))), atol=1)
# Verify there is only the test center selected
for sub in test:
assert patient_mapping[sub]['center'] == '0'
check_balance(train, val, test, patient_mapping)
delete_test_folders()
def create_tsvfile():
# Create bids path
if not os.path.isdir(BIDS_PATH):
os.mkdir(BIDS_PATH)
patient_mapping = {}
# Create participants.tsv with n participants
participants = []
for participant_id in range(N):
row_participants = []
patient_id = 'sub-00' + str(participant_id)
row_participants.append(patient_id)
# 3 different disabilities: 0, 1, or 2
disability_id = str(participant_id % 3)
row_participants.append(disability_id)
# N_CENTERS different centers: 0, 1, ..., or N_CENTERS
center_id = str(participant_id % N_CENTERS)
row_participants.append(center_id)
patient_mapping[patient_id] = {}
patient_mapping[patient_id]['disability'] = disability_id
patient_mapping[patient_id]['center'] = center_id
participants.append(row_participants)
# # Save participants.tsv
with open(os.path.join(BIDS_PATH, "participants.tsv"), 'w') as tsv_file:
tsv_writer = csv.writer(tsv_file, delimiter='\t', lineterminator='\n')
tsv_writer.writerow(["participant_id", "disability", "institution_id"])
for item in sorted(participants):
tsv_writer.writerow(item)
return patient_mapping
def create_jsonfile():
#Create dataset_description.json
dataset_description = {}
dataset_description[u'Name'] = 'Test'
dataset_description[u'BIDSVersion'] = '1.2.1'
# Save dataset_description.json
with open(os.path.join(BIDS_PATH, "dataset_description.json"), 'w') as outfile:
outfile.write(json.dumps(dataset_description, indent=2, sort_keys=True))
outfile.close()
def delete_test_folders():
shutil.rmtree(BIDS_PATH)
shutil.rmtree(LOG_PATH)
| 30.425339 | 105 | 0.64307 |
279fe064210fdbceaf174bb71d4940b0f3dcaa65 | 2,212 | py | Python | kibom/xml_writer.py | optimiseddesign/KiBoM | 499cd979e82a2242f78b94569df04966234104f9 | [
"MIT"
] | 274 | 2016-05-17T07:57:33.000Z | 2022-03-30T15:58:52.000Z | kibom/xml_writer.py | optimiseddesign/KiBoM | 499cd979e82a2242f78b94569df04966234104f9 | [
"MIT"
] | 141 | 2016-08-01T19:04:40.000Z | 2022-03-31T14:29:00.000Z | kibom/xml_writer.py | optimiseddesign/KiBoM | 499cd979e82a2242f78b94569df04966234104f9 | [
"MIT"
] | 91 | 2016-05-15T11:26:26.000Z | 2022-02-23T16:02:35.000Z | """
Write BoM out to an XML file
filename = path to output file (must be a .xml)
groups = [list of ComponentGroup groups]
net = netlist object
headings = [list of headings to display in the BoM file]
prefs = BomPref object
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from xml.etree import ElementTree
from xml.dom import minidom
def WriteXML(filename, groups, net, headings, head_names, prefs):
if not filename.endswith(".xml"):
return False
nGroups = len(groups)
nTotal = sum([g.getCount() for g in groups])
nFitted = sum([g.getCount() for g in groups if g.isFitted()])
nBuild = nFitted * prefs.boards
attrib = {}
attrib['Schematic_Source'] = net.getSource()
attrib['Schematic_Version'] = net.getVersion()
attrib['Schematic_Date'] = net.getSheetDate()
attrib['PCB_Variant'] = ', '.join(prefs.pcbConfig)
attrib['BOM_Date'] = net.getDate()
attrib['KiCad_Version'] = net.getTool()
attrib['Component_Groups'] = str(nGroups)
attrib['Component_Count'] = str(nTotal)
attrib['Fitted_Components'] = str(nFitted)
attrib['Number_of_PCBs'] = str(prefs.boards)
attrib['Total_Components'] = str(nBuild)
xml = ElementTree.Element('KiCad_BOM', attrib=attrib, encoding='utf-8')
for group in groups:
if prefs.ignoreDNF and not group.isFitted():
continue
row = group.getRow(headings)
attrib = {}
for i, h in enumerate(head_names):
h = h.replace(' ', '_') # Replace spaces, xml no likey
h = h.replace('"', '')
h = h.replace("'", '')
attrib[h] = str(row[i])
ElementTree.SubElement(xml, "group", attrib=attrib)
with open(filename, "w", encoding="utf-8") as output:
out = ElementTree.tostring(xml, encoding="utf-8")
# There is probably a better way to write the data to file (without so many encoding/decoding steps),
# but toprettyxml() without specifying UTF-8 will chew up non-ASCII chars. Perhaps revisit if performance here
# is ever a concern
output.write(minidom.parseString(out).toprettyxml(indent="\t", encoding="utf-8").decode("utf-8"))
return True
| 32.057971 | 118 | 0.646926 |
ca976332df60dc81023bebc845f38eaab1e60406 | 2,278 | py | Python | bstore/config.py | LEB-EPFL/bstore | 471a24b84f18c7efe0c3e52632fc14fa27611e50 | [
"BSD-3-Clause"
] | 5 | 2016-08-29T10:01:43.000Z | 2017-09-14T12:12:33.000Z | bstore/config.py | LEB-EPFL/bstore | 471a24b84f18c7efe0c3e52632fc14fa27611e50 | [
"BSD-3-Clause"
] | 63 | 2016-07-25T06:49:00.000Z | 2018-04-25T17:14:21.000Z | bstore/config.py | LEB-EPFL/bstore | 471a24b84f18c7efe0c3e52632fc14fa27611e50 | [
"BSD-3-Clause"
] | 1 | 2019-06-24T07:40:28.000Z | 2019-06-24T07:40:28.000Z | __bstore_Version__ = '1.3.0-dev'
"""__HDF_AtomID_Prefix__ : str
String that precedes all attributes marking dataset
identifiers in an HDF datastore.
"""
__HDF_AtomID_Prefix__ = 'SMLM_'
"""___HDF_Metadata_Prefix : str
String that precedes all attributes marking metadata elements in
an HDF datastore.
"""
__HDF_Metadata_Prefix__ = __HDF_AtomID_Prefix__ + 'Metadata_'
"""__Custom_Dir__ : str
The name of the directory containing customization files.
"""
__Custom_Dir__ = ['~', '.bstore']
"""__Plugin_Dir__ : str
The name of the directory containing B-Store plugins.
"""
__Plugin_Dir__ = __Custom_Dir__ + ['bsplugins']
"""FormatDefault : dict
The default mapping for converting between column header names
when using the ConvertHeader processor.
"""
__Format_Default__ = {}
__Format_Default__['x [nm]'] = 'x'
__Format_Default__['y [nm]'] = 'y'
__Format_Default__['z [nm]'] = 'z'
__Format_Default__['frame'] = 'frame'
__Format_Default__['uncertainty [nm]'] = 'precision'
__Format_Default__['intensity [photon]'] = 'photons'
__Format_Default__['offset [photon]'] = 'background'
__Format_Default__['loglikelihood'] = 'loglikelihood'
__Format_Default__['sigma [nm]'] = 'sigma'
__Format_Default__['dx [nm]'] = 'dx'
__Format_Default__['dy [nm]'] = 'dy'
__Format_Default__['length [frames]'] = 'length'
"""__Path_To_Test_Data__ : str
Path relative to the bstore project root directory that
contains the data for running the automated tests.
"""
__Path_To_Test_Data__ = '../bstore_test_files/'
"""__MM_PixelSize__ : str
Name of the field in the Micro-Manager metadata containing the pixel size.
"""
__MM_PixelSize__ = 'PixelSize_um'
"""__Registered_DatasetTypes__ : list of str
The list of datasetTypes currently recognized by B-Store.
"""
__Registered_DatasetTypes__ = ['Localizations']
"""__Verbose__ : bool
Controls how much detail is provided when errors occur.
"""
__Verbose__ = False
"""__Persistence_Key__ : str
The location in the HDF file where the HDFDatastore object's state is kept.
"""
__Persistence_Key__ = '/bstore'
| 29.205128 | 79 | 0.676471 |
03d5b121b252fb303b58849d9c63fda69f245482 | 135,572 | py | Python | lrs/tests/test_Statement.py | DamavandiKamali/ADL_LRS | b0a0f894de02976c69938b9e883fd7b05bbf6d30 | [
"Apache-2.0"
] | null | null | null | lrs/tests/test_Statement.py | DamavandiKamali/ADL_LRS | b0a0f894de02976c69938b9e883fd7b05bbf6d30 | [
"Apache-2.0"
] | null | null | null | lrs/tests/test_Statement.py | DamavandiKamali/ADL_LRS | b0a0f894de02976c69938b9e883fd7b05bbf6d30 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import base64
import uuid
import urllib.request, urllib.parse, urllib.error
import hashlib
from datetime import datetime, timedelta
from django.test import TestCase
from django.urls import reverse
from django.utils.timezone import utc
from django.conf import settings
from django.test.utils import override_settings
from ..models import Statement, Activity, Agent, Verb, SubStatement
from ..utils import retrieve_statement
from adl_lrs.views import register
class StatementTests(TestCase):
@classmethod
def setUpClass(cls):
print("\n%s" % __name__)
super(StatementTests, cls).setUpClass()
def setUp(self):
self.username = "tester1"
self.email = "test1@tester.com"
self.password = "test"
self.auth = "Basic %s" % base64.b64encode(
"%s:%s" % (self.username, self.password))
form = {"username": self.username, "email": self.email,
"password": self.password, "password2": self.password}
self.client.post(reverse(register), form,
X_Experience_API_Version=settings.XAPI_VERSION)
self.username2 = "tester2"
self.email2 = "test2@tester.com"
self.password2 = "test2"
self.auth2 = "Basic %s" % base64.b64encode(
"%s:%s" % (self.username2, self.password2))
form2 = {"username": self.username2, "email": self.email2,
"password": self.password2, "password2": self.password2}
self.client.post(reverse(register), form2,
X_Experience_API_Version=settings.XAPI_VERSION)
self.firstTime = str(datetime.utcnow().replace(tzinfo=utc).isoformat())
self.guid1 = uuid.uuid4()
def bunchostmts(self):
self.guid2 = uuid.uuid4()
self.guid3 = uuid.uuid4()
self.guid4 = uuid.uuid4()
self.guid5 = uuid.uuid4()
self.guid6 = uuid.uuid4()
self.guid7 = uuid.uuid4()
self.guid8 = uuid.uuid4()
self.guid9 = uuid.uuid4()
self.guid10 = str(uuid.uuid4())
self.cguid1 = str(uuid.uuid4())
self.cguid2 = str(uuid.uuid4())
self.cguid3 = str(uuid.uuid4())
self.cguid4 = str(uuid.uuid4())
self.cguid5 = str(uuid.uuid4())
self.cguid6 = str(uuid.uuid4())
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "object": {"id": "act:activity"},
"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"authority": {"objectType": "Agent", "name": "tester1", "mbox": "mailto:test1@tester.com"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
stmt_id = uuid.UUID(json.loads(response.content)[0])
self.existStmt = Statement.objects.get(statement_id=stmt_id)
self.exist_stmt_id = self.existStmt.statement_id
self.existStmt1 = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname2", "en-GB": "altname"},
"description": {"en-US": "testdesc2", "en-GB": "altdesc"}, "type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answer"],
"extensions": {"ext:key1": "value1", "ext:key2": "value2", "ext:key3": "value3"}}},
"result": {"score": {"scaled": .85}, "completion": True, "success": True, "response": "kicked",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:key1": "value1", "ext:key2": "value2"}},
"context": {"registration": self.cguid1, "contextActivities": {"other": {"id": "act:NewActivityID2"}},
"revision": "food", "platform": "bard", "language": "en-US", "extensions": {"ext:ckey1": "cval1",
"ext:ckey2": "cval2"}}})
self.existStmt2 = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@t.com"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname3", "en-GB": "altname"},
"description": {"en-US": "testdesc3", "en-GB": "altdesc"}, "type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answers"],
"extensions": {"ext:key11": "value11", "ext:key22": "value22", "ext:key33": "value33"}}},
"result": {"score": {"scaled": .75}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:dkey1": "dvalue1", "ext:dkey2": "dvalue2"}},
"context": {"registration": self.cguid2, "contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "platform": "bard", "language": "en-US", "extensions": {"ext:ckey11": "cval11",
"ext:ckey22": "cval22"}}})
self.existStmt3 = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foogals",
"definition": {"name": {"en-US": "testname3"}, "description": {"en-US": "testdesc3"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answers"],
"extensions": {"ext:key111": "value111", "ext:key222": "value222", "ext:key333": "value333"}}},
"result": {"score": {"scaled": .79}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:dkey1": "dvalue1", "ext:dkey2": "dvalue2"}},
"context": {"registration": self.cguid3, "contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "platform": "bard", "language": "en-US",
"instructor": {"objectType": "Agent", "name": "bob", "mbox": "mailto:bob@bob.com"},
"extensions": {"ext:ckey111": "cval111", "ext:ckey222": "cval222"}}})
self.existStmt4 = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foogal",
"definition": {"name": {"en-US": "testname3"}, "description": {"en-US": "testdesc3"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answers"],
"extensions": {"ext:key111": "value111", "ext:key222": "value222", "ext:key333": "value333"}}},
"result": {"score": {"scaled": .79}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:dkey1": "dvalue1", "ext:dkey2": "dvalue2"}},
"context": {"registration": self.cguid4, "contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "platform": "bard", "language": "en-US", "instructor": {"name": "bill", "mbox": "mailto:bill@bill.com"},
"extensions": {"ext:ckey111": "cval111", "ext:ckey222": "cval222"}}})
self.existStmt5 = json.dumps({"object": {"objectType": "Agent", "name": "jon", "mbox": "mailto:jon@jon.com"},
"verb": {"id": "http://example.com/verbs/created", "display": {"en-US": "created"}},
"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"}})
self.existStmt6 = json.dumps({"actor": {"objectType": "Agent", "name": "max", "mbox": "mailto:max@max.com"},
"object": {"id": "act:test_activity"}, "verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}})
self.existStmt7 = json.dumps({"object": {"objectType": "Agent", "name": "max", "mbox": "mailto:max@max.com"},
"verb": {"id": "http://example.com/verbs/created", "display": {"en-US": "created"}},
"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"}})
self.existStmt8 = json.dumps({"object": {"objectType": "Agent", "name": "john", "mbox": "mailto:john@john.com"},
"verb": {"id": "http://example.com/verbs/missed", "display": {"en-US": "missed"}},
"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"}})
self.existStmt9 = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:sub@sub.com"},
"verb": {"id": "http://example.com/verbs/missed"}, "object": {"objectType": "SubStatement",
"actor": {"objectType": "Agent", "mbox": "mailto:ss@ss.com"}, "verb": {"id": "verb:verb/url/nested"},
"object": {"objectType": "Activity", "id": "act:testex.com"}, "result": {"completion": True, "success": True,
"response": "kicked"}, "context": {"registration": self.cguid5,
"contextActivities": {"other": {"id": "act:NewActivityID"}}, "revision": "foo", "platform": "bar",
"language": "en-US", "extensions": {"ext:k1": "v1", "ext:k2": "v2"}}}})
self.existStmt10 = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:ref@ref.com"},
"verb": {"id": "http://example.com/verbs/missed"}, "object": {"objectType": "StatementRef",
"id": str(self.exist_stmt_id)}})
# Put statements
param = {"statementId": str(self.guid1)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt_payload = self.existStmt1
self.putresponse1 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse1.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=2)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid1).update(stored=time)
param = {"statementId": str(self.guid3)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt_payload = self.existStmt3
self.putresponse3 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse3.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=3)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid3).update(stored=time)
param = {"statementId": str(self.guid4)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt_payload = self.existStmt4
self.putresponse4 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse4.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=4)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid4).update(stored=time)
self.secondTime = str(
(datetime.utcnow() + timedelta(seconds=4)).replace(tzinfo=utc).isoformat())
param = {"statementId": str(self.guid2)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt_payload = self.existStmt2
self.putresponse2 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse2.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=6)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid2).update(stored=time)
param = {"statementId": str(self.guid5)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt_payload = self.existStmt5
self.putresponse5 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse5.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=7)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid5).update(stored=time)
param = {"statementId": str(self.guid6)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt_payload = self.existStmt6
self.putresponse6 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse6.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=8)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid6).update(stored=time)
param = {"statementId": str(self.guid7)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt_payload = self.existStmt7
self.putresponse7 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse7.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=9)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid7).update(stored=time)
param = {"statementId": str(self.guid8)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt_payload = self.existStmt8
self.putresponse8 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse8.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=10)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid8).update(stored=time)
param = {"statementId": str(self.guid9)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt_payload = self.existStmt9
self.putresponse9 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse9.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=11)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid9).update(stored=time)
param = {"statementId": str(self.guid10)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt_payload = self.existStmt10
self.putresponse10 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse10.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=11)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid10).update(stored=time)
def test_invalid_result_fields(self):
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foogie"},
"result": {"bad": "fields", "foo": "bar", "score": {"scaled": .85}, "completion": True, "success": True,
"response": "kicked", "duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:key1": "value1",
"ext:key2": "value2"}}})
resp = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp.status_code, 400)
self.assertEqual(
resp.content, 'Invalid field(s) found in Result - bad, foo')
def test_invalid_context_fields(self):
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foogals",
"definition": {"name": {"en-US": "testname3"}, "description": {"en-US": "testdesc3"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answers"],
"extensions": {"ext:key111": "value111", "ext:key222": "value222", "ext:key333": "value333"}}},
"result": {"score": {"scaled": .79}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:dkey1": "dvalue1", "ext:dkey2": "dvalue2"}},
"context": {"contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "bad": "foo", "platform": "bard", "language": "en-US",
"instructor": {"objectType": "Agent", "name": "bob", "mbox": "mailto:bob@bob.com"},
"extensions": {"ext:ckey111": "cval111", "ext:ckey222": "cval222"}}})
resp = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp.status_code, 400)
self.assertEqual(
resp.content, 'Invalid field(s) found in Context - bad')
def test_post_with_no_valid_params(self):
# Error will be thrown in statements class
resp = self.client.post(reverse('lrs:statements'), {"feet": "yes", "hands": {"id": "http://example.com/test_post"}},
content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp.status_code, 400)
def test_post(self):
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:t@t.com", "name": "bob"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_post"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
act = Activity.objects.get(activity_id="act:test_post")
self.assertEqual(act.activity_id, "act:test_post")
agent = Agent.objects.get(mbox="mailto:t@t.com")
self.assertEqual(agent.name, "bob")
def test_post_wrong_crp_type(self):
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/created"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname2", "en-GB": "altname"},
"description": {"en-US": "testdesc2", "en-GB": "altdesc"}, "type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": "wrong"}},
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-t@t.com"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.content, 'Activity definition correctResponsesPattern is not a properly formatted array')
def test_post_wrong_choice_type(self):
stmt = json.dumps(
{"verb": {"id": "http://example.com/verbs/created"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname2", "en-GB": "altname"},
"description": {"en-US": "testdesc2", "en-GB": "altdesc"},
"type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "choice", "correctResponsesPattern": ["a1[,]a3[,]a6[,]a7"],
"choices": "wrong"}},
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-t@t.com"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.content, 'Activity definition choices is not a properly formatted array')
def test_openid(self):
stmt = json.dumps({'object': {'objectType': 'Agent', 'name': 'lulu', 'openid': 'id:luluid'},
'verb': {"id": "verb:verb/url"}, 'actor': {'objectType': 'Agent', 'mbox': 'mailto:t@t.com'}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
agent = Agent.objects.get(name='lulu')
self.assertEqual(agent.openid, 'id:luluid')
def test_invalid_actor_fields(self):
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:t@t.com", "name": "bob", "bad": "blah",
"foo": "bar"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_post"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
'Invalid field(s) found in Agent/Group - bad, foo')
def test_invalid_activity_fields(self):
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:t@t.com", "name": "bob"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_post", "bad": "foo", "foo": "bar"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
"Invalid field(s) found in Activity - bad, foo")
def test_blank_object_definition(self):
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:def@def.com", "name": "D"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {
"definition": { },
"id": "http://object.com/",
"objectType": "Activity"
}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
agent = Agent.objects.get(mbox="mailto:def@def.com")
self.assertEqual(agent.name, "D")
get_response = self.client.get(reverse('lrs:statements'), X_Experience_API_Version=settings.XAPI_VERSION,
Authorization=self.auth)
self.assertEqual(get_response.status_code, 200)
rsp = get_response.content
self.assertIn("definition", rsp)
json_object = json.loads(rsp)
jdef = json_object['statements'][0]['object']['definition']
self.assertEqual(jdef, {})
param = {"format": 'canonical'}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
get_response = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION,
Authorization=self.auth)
self.assertEqual(get_response.status_code, 200)
self.assertNotIn('definition', get_response.content)
def test_blank_score(self):
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foogie2"},
"result": {"score": {}, "completion": True, "success": True,
"response": "kicked", "duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:key1": "value1",
"ext:key2": "value2"}}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
get_response = self.client.get(reverse('lrs:statements'), X_Experience_API_Version=settings.XAPI_VERSION,
Authorization=self.auth)
self.assertEqual(get_response.status_code, 200)
rsp = get_response.content
self.assertIn("score", rsp)
json_object = json.loads(rsp)
jscore = json_object['statements'][0]['result']['score']
self.assertEqual(jscore, {})
param = {"format": 'canonical'}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
get_response = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION,
Authorization=self.auth)
self.assertEqual(get_response.status_code, 200)
self.assertNotIn('score', get_response.content)
def test_blank_result(self):
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:foo@foo.com"},
"object": {"objectType": "Activity", "id": "act:foop"},
"result": {}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
get_response = self.client.get(reverse('lrs:statements'), X_Experience_API_Version=settings.XAPI_VERSION,
Authorization=self.auth)
self.assertEqual(get_response.status_code, 200)
rsp = get_response.content
self.assertIn("result", rsp)
json_object = json.loads(rsp)
jresult = json_object['statements'][0]['result']
self.assertEqual(jresult, {})
param = {"format": 'canonical'}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
get_response = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION,
Authorization=self.auth)
self.assertEqual(get_response.status_code, 200)
self.assertNotIn('result', get_response.content)
def test_blank_context(self):
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foobaz"},
"context": {}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
get_response = self.client.get(reverse('lrs:statements'), X_Experience_API_Version=settings.XAPI_VERSION,
Authorization=self.auth)
self.assertEqual(get_response.status_code, 200)
rsp = get_response.content
self.assertIn("context", rsp)
json_object = json.loads(rsp)
jcontext = json_object['statements'][0]['context']
self.assertEqual(jcontext, {})
param = {"format": 'canonical'}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
get_response = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION,
Authorization=self.auth)
self.assertEqual(get_response.status_code, 200)
self.assertNotIn('result', get_response.content)
def test_invalid_activity_def_fields(self):
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:t@t.com", "name": "bob"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {'objectType': 'Activity', 'id': 'act:food',
'definition': {'bad': 'foo', 'name': {'en-FR': 'testname2', 'en-US': 'testnameEN'}, 'description': {'en-CH': 'testdesc2',
'en-GB': 'testdescGB'}, 'type': 'type:course', 'interactionType': 'intType2', 'extensions': {'ext:key1': 'value1',
'ext:key2': 'value2', 'ext:key3': 'value3'}}}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
'Invalid field(s) found in Activity definition - bad')
def test_post_wrong_duration(self):
stmt = json.dumps({"actor": {'name': 'jon',
'mbox': 'mailto:jon@example.com'}, 'verb': {"id": "verb:verb/url"}, "object": {'id': 'act:activity13'},
"result": {'completion': True, 'success': True, 'response': 'yes', 'duration': 'wrongduration',
'extensions': {'ext:key1': 'value1', 'ext:key2': 'value2'}}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.content, "Error with result duration")
def test_post_stmt_ref_no_existing_stmt(self):
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:ref@ref.com"},
"verb": {"id": "http://example.com/verbs/missed"}, "object": {"objectType": "StatementRef",
"id": "12345678-1234-5678-1234-567812345678"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
def test_post_with_actor(self):
stmt = json.dumps({"actor": {"mbox": "mailto:mr.t@example.com"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:i.pity.the.fool"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
Agent.objects.get(mbox="mailto:mr.t@example.com")
def test_context_bad_language(self):
stmt = json.dumps({"actor": {"mbox": "mailto:mr.t@example.com"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:i.pity.the.fool"},
"context":{"language": "thisisnotalanguage"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
def test_list_post(self):
stmts = json.dumps([{"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_list_post"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}},
{"verb": {"id": "http://example.com/verbs/failed", "display": {"en-GB": "failed"}},
"object": {"id": "act:test_list_post1"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}}])
response = self.client.post(reverse('lrs:statements'), stmts, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
activity1 = Activity.objects.get(activity_id="act:test_list_post")
activity2 = Activity.objects.get(activity_id="act:test_list_post1")
stmt1 = Statement.objects.get(object_activity=activity1)
stmt2 = Statement.objects.get(object_activity=activity2)
verb1 = Verb.objects.get(id=stmt1.verb.id)
verb2 = Verb.objects.get(id=stmt2.verb.id)
lang_map1 = verb1.canonical_data['display']
lang_map2 = verb2.canonical_data['display']
self.assertEqual(response.status_code, 200)
self.assertEqual(stmt1.verb.verb_id, "http://example.com/verbs/passed")
self.assertEqual(stmt2.verb.verb_id, "http://example.com/verbs/failed")
self.assertEqual(list(lang_map1.keys())[0], "en-US")
self.assertEqual(list(lang_map1.values())[0], "passed")
self.assertEqual(list(lang_map2.keys())[0], "en-GB")
self.assertEqual(list(lang_map2.values())[0], "failed")
def test_put(self):
guid = uuid.uuid4()
param = {"statementId": str(guid)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
putResponse = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 204)
stmt = Statement.objects.get(statement_id=guid)
act = Activity.objects.get(activity_id="act:test_put")
self.assertEqual(act.activity_id, "act:test_put")
self.assertEqual(stmt.actor.mbox, "mailto:t@t.com")
self.assertEqual(stmt.authority.name, "tester1")
self.assertEqual(stmt.authority.mbox, "mailto:test1@tester.com")
self.assertEqual(stmt.version, '1.0.0')
self.assertEqual(stmt.verb.verb_id, "http://example.com/verbs/passed")
def test_put_1_0_0(self):
guid = uuid.uuid4()
param = {"statementId": str(guid)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
putResponse = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(putResponse.status_code, 204)
stmt = Statement.objects.get(statement_id=guid)
act = Activity.objects.get(activity_id="act:test_put")
self.assertEqual(act.activity_id, "act:test_put")
self.assertEqual(stmt.actor.mbox, "mailto:t@t.com")
self.assertEqual(stmt.authority.name, "tester1")
self.assertEqual(stmt.authority.mbox, "mailto:test1@tester.com")
self.assertEqual(stmt.version, "1.0.0")
self.assertEqual(stmt.verb.verb_id, "http://example.com/verbs/passed")
def test_put_id_in_stmt(self):
guid = uuid.uuid4()
stmt = json.dumps({"id": str(guid), "verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
putResponse = self.client.put(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 400)
def test_put_id_in_both_same(self):
guid = uuid.uuid4()
param = {"statementId": str(guid)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt = json.dumps({"id": str(guid), "verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
putResponse = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 204)
stmt = Statement.objects.get(statement_id=guid)
act = Activity.objects.get(activity_id="act:test_put")
self.assertEqual(act.activity_id, "act:test_put")
self.assertEqual(stmt.actor.mbox, "mailto:t@t.com")
self.assertEqual(stmt.authority.name, "tester1")
self.assertEqual(stmt.authority.mbox, "mailto:test1@tester.com")
self.assertEqual(stmt.version, '1.0.0')
self.assertEqual(stmt.verb.verb_id, "http://example.com/verbs/passed")
def test_put_id_in_both_different(self):
guid1 = str(uuid.uuid4())
guid2 = str(uuid.uuid4())
param = {"statementId": guid1}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt = json.dumps({"id": guid2, "verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
putResponse = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 400)
self.assertEqual(
putResponse.content, "Error -- statements - method = PUT, param and body ID both given, but do not match")
def test_put_with_substatement(self):
con_guid = str(uuid.uuid4())
st_guid = str(uuid.uuid4())
param = {"statementId": st_guid}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:sass@sass.com"},
"verb": {"id": "verb:verb/url/tested"}, "object": {"objectType": "SubStatement",
"actor": {"objectType": "Agent", "mbox": "mailto:ss@ss.com"}, "verb": {"id": "verb:verb/url/nested"},
"object": {"objectType": "Activity", "id": "act:testex.com"}, "result": {"completion": True, "success": True,
"response": "kicked"}, "context": {"registration": con_guid,
"contextActivities": {"other": {"id": "act:NewActivityID"}}, "revision": "foo", "platform": "bar",
"language": "en-US", "extensions": {"ext:k1": "v1", "ext:k2": "v2"}}}})
response = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 204)
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
get_response = self.client.get(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(get_response.status_code, 200)
rsp = get_response.content
self.assertIn("objectType", rsp)
self.assertIn("SubStatement", rsp)
self.assertIn("actor", rsp)
self.assertIn("ss@ss.com", rsp)
self.assertIn("verb", rsp)
self.assertIn("verb:verb/url/nested", rsp)
self.assertIn("Activity", rsp)
self.assertIn("act:testex.com", rsp)
self.assertIn("result", rsp)
self.assertIn("completion", rsp)
self.assertIn("success", rsp)
self.assertIn("response", rsp)
self.assertIn("kicked", rsp)
self.assertIn("context", rsp)
self.assertIn(con_guid, rsp)
self.assertIn("contextActivities", rsp)
self.assertIn("other", rsp)
self.assertIn("revision", rsp)
self.assertIn("foo", rsp)
self.assertIn("platform", rsp)
self.assertIn("bar", rsp)
self.assertIn("language", rsp)
self.assertIn("en-US", rsp)
self.assertIn("extensions", rsp)
self.assertIn("ext:k1", rsp)
self.assertIn("v1", rsp)
self.assertIn("ext:k2", rsp)
self.assertIn("v2", rsp)
def test_no_content_put(self):
guid = str(uuid.uuid4())
param = {"statementId": guid}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt = json.dumps({})
putResponse = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 400)
def test_existing_stmtID_put(self):
guid = str(uuid.uuid4())
exist_stmt = json.dumps({"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:activity"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
path = "%s?%s" % (reverse('lrs:statements'),
urllib.parse.urlencode({"statementId": guid}))
response = self.client.put(path, exist_stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 204)
param = {"statementId": guid}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_existing_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
putResponse = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 409)
def test_missing_stmtID_put(self):
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
response = self.client.put(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn(
response.content, "Error -- statements - method = PUT, but no statementId parameter or ID given in statement")
def test_get(self):
self.bunchostmts()
param = {"statementId": str(self.guid1)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
getResponse = self.client.get(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(getResponse.status_code, 200)
rsp = getResponse.content
self.assertIn(str(self.guid1), rsp)
self.assertIn('content-length', getResponse._headers)
def test_get_no_params(self):
self.bunchostmts()
getResponse = self.client.get(reverse('lrs:statements'), X_Experience_API_Version=settings.XAPI_VERSION,
Authorization=self.auth)
self.assertEqual(getResponse.status_code, 200)
self.assertIn('content-length', getResponse._headers)
rsp = json.loads(getResponse.content)
self.assertEqual(len(rsp['statements']), 11)
def test_head(self):
self.bunchostmts()
param = {"statementId": str(self.guid1)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
head_resp = self.client.head(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(head_resp.status_code, 200)
self.assertEqual(head_resp.content, '')
self.assertIn('content-length', head_resp._headers)
def test_get_no_existing_ID(self):
param = {"statementId": "aaaaaa"}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
getResponse = self.client.get(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(getResponse.status_code, 400)
def test_get_no_statementid(self):
self.bunchostmts()
getResponse = self.client.get(reverse(
'lrs:statements'), X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(getResponse.status_code, 200)
jsn = json.loads(getResponse.content)
self.assertEqual(len(jsn["statements"]), 11)
self.assertIn('content-length', getResponse._headers)
def test_head_no_statementid(self):
self.bunchostmts()
head_resp = self.client.head(reverse(
'lrs:statements'), X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(head_resp.status_code, 200)
self.assertEqual(head_resp.content, '')
self.assertIn('content-length', head_resp._headers)
# Sever activities are PUT - contextActivities create 3 more
def test_number_of_activities(self):
self.bunchostmts()
acts = len(Activity.objects.all())
self.assertEqual(9, acts)
def test_timeout_snafu(self):
stmt = json.dumps({
"timestamp": "2013-11-05T07:33:49.512119+00:00",
"object": {
"definition": {
"name": {
"en-US": "news.google.com",
"ja": "news.google.com"
},
"description": {
"en-US": "",
"ja": ""
}
},
"id": "http://garewelswe.com/",
"objectType": "Activity"
},
"authority": {
"mbox": "mailto:kazutaka_kamiya@test.local",
"name": "adllrs",
"objectType": "Agent"
},
"verb": {
"id": "http://example.com/verbs/experienced",
"display": {
"en-US": "experienced"
}
},
"actor": {
"openid": "http://test.local/PEab76617d1d21d725d358a7ad5231bd6e",
"name": "dev2-001",
"objectType": "Agent"
},
"id": "9cb78e42-45ec-11e3-b8dc-0af904863508"
})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
stmt = json.dumps({
"timestamp": "2013-11-08T08:41:55.985064+00:00",
"object": {
"definition": {
"interactionType": "fill-in",
"correctResponsesPattern": [],
"type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"name": {
"ja": "SCORM20110721_12"
},
"description": {
"ja": ""
}
},
"id": "http://garewelswe.com/",
"objectType": "Activity"
},
"actor": {
"openid": "http://test.local/EAGLE/PEab76617d1d21d725d358a7ad5231bd6e",
"name": "dev2-001",
"objectType": "Agent"
},
"verb": {
"id": "http://example.com/verbs/answered",
"display": {
"en-US": "answered"
}
},
"result": {
"response": "TEST0",
"success": True
},
"context": {
"contextActivities": {
"parent": [
{
"id": "http://garewelswe.com/"
}
],
"grouping": [
{
"id": "http://garewelswe.com/"
}
]
}
},
"id": "9faf143c-4851-11e3-b1a1-000c29bfba11",
"authority": {
"mbox": "mailto:kazutaka_kamiya@test.local",
"name": "adllrs",
"objectType": "Agent"
}
})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
def test_amsterdam_snafu(self):
stmt = json.dumps({
"timestamp": "2013-05-23T10:46:39+02:00",
"verb": {"id": "http://www.adlnet.gov/expapi/verbs/experienced"},
"context": {
"contextActivities": {
"parent": {
"id": "http://localhost:8080/portal/site/~88a4933d-99d2-4a35-8906-993fdcdf2259"
}
}
},
"object": {
"id": "http://localhost:8080/portal/web/~88a4933d-99d2-4a35-8906-993fdcdf2259/id/c50bf034-0f3e-4055-a1e7-8d1cf92be353/url/%2Flibrary%2Fcontent%2Fmyworkspace_info.html",
"definition": {
"type": "http://adlnet.gov/expapi/activities/view-web-content"
},
"objectType": "Activity"
},
"actor": {
"name": "Alan Tester",
"objectType": "Agent",
"mbox": "mailto:tester@dev.nl"
}
})
post_response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post_response.status_code, 200)
def test_update_activity_wrong_auth(self):
existStmt1 = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname2", "en-GB": "altname"}, "description": {"en-US": "testdesc2", "en-GB": "altdesc"},
"type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answer"],
"extensions": {"ext:key1": "value1", "ext:key2": "value2", "ext:key3": "value3"}}},
"result": {"score": {"scaled": .85}, "completion": True, "success": True, "response": "kicked",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:key1": "value1", "ext:key2": "value2"}},
"context": {"registration": str(uuid.uuid4()), "contextActivities": {"other": {"id": "act:NewActivityID2"}},
"revision": "food", "platform": "bard", "language": "en-US", "extensions": {"ext:ckey1": "cval1",
"ext:ckey2": "cval2"}}})
param = {"statementId": str(self.guid1)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
putresponse1 = self.client.put(path, existStmt1, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putresponse1.status_code, 204)
wrong_username = "tester2"
wrong_email = "test2@tester.com"
wrong_password = "test2"
wrong_auth = "Basic %s" % base64.b64encode(
"%s:%s" % (wrong_username, wrong_password))
form = {"username": wrong_username, "email": wrong_email, "password": wrong_password,
"password2": wrong_password}
self.client.post(reverse(register), form,
X_Experience_API_Version=settings.XAPI_VERSION)
stmt = json.dumps({"verb": {"id": "verb:verb/iri/attempted"}, "actor": {"objectType": "Agent", "mbox": "mailto:r@r.com"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname3"}, "description": {"en-US": "testdesc3"},
"type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answer"],
"extensions": {"ext:key1": "value1", "ext:key2": "value2", "ext:key3": "value3"}}},
"result": {"score": {"scaled": .85}, "completion": True, "success": True, "response": "kicked",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:key1": "value1", "ext:key2": "value2"}},
"context": {"registration": str(uuid.uuid4()), "contextActivities": {"other": {"id": "act:NewActivityID2"}},
"revision": "food", "platform": "bard", "language": "en-US", "extensions": {"ext:ckey1": "cval1",
"ext:ckey2": "cval2"}}, "authority": {"objectType": "Agent", "name": "auth", "mbox": "mailto:auth@example.com"}})
post_response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=wrong_auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post_response.status_code, 200)
acts = Activity.objects.filter(activity_id="act:foogie").count()
self.assertEqual(acts, 1)
def test_update_activity_correct_auth(self):
self.bunchostmts()
stmt = json.dumps({"verb": {"id": "verb:verb/url/changed-act"}, "actor": {"objectType": "Agent", "mbox": "mailto:l@l.com"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname3"}, "description": {"en-US": "testdesc3"},
"type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction", "interactionType": "fill-in", "correctResponsesPattern": ["answer"],
"extensions": {"ext:key1": "value1", "ext:key2": "value2", "ext:key3": "value3"}}},
"result": {"score": {"scaled": .85}, "completion": True, "success": True, "response": "kicked",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:key1": "value1", "ext:key2": "value2"}},
"context": {"registration": self.cguid6, "contextActivities": {"other": {"id": "act:NewActivityID2"}},
"revision": "food", "platform": "bard", "language": "en-US", "extensions": {"ext:ckey1": "cval1",
"ext:ckey2": "cval2"}}, "authority": {"objectType": "Agent", "name": "auth", "mbox": "mailto:auth@example.com"}})
post_response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post_response.status_code, 200)
act = Activity.objects.get(activity_id="act:foogie")
name_set = act.canonical_data['definition']['name']
desc_set = act.canonical_data['definition']['description']
self.assertEqual(list(name_set.keys())[1], "en-US")
self.assertEqual(list(name_set.values())[1], "testname3")
self.assertEqual(list(name_set.keys())[0], "en-GB")
self.assertEqual(list(name_set.values())[0], "altname")
self.assertEqual(list(desc_set.keys())[1], "en-US")
self.assertEqual(list(desc_set.values())[1], "testdesc3")
self.assertEqual(list(desc_set.keys())[0], "en-GB")
self.assertEqual(list(desc_set.values())[0], "altdesc")
def test_cors_post_put(self):
content = ('{"verb": {"id": "verb:verb/url"}, "actor": {"objectType": "Agent", "mbox": "mailto:r@r.com"},'
'"object": {"id": "act:test_cors_post_put"}}')
bdy = "statementId=886313e1-3b8a-5372-9b90-0c9aee199e5d&content=%s&Authorization=%s&Content-Type=application/json&X-Experience-API-Version=%s" % (
urllib.parse.quote(content), self.auth, settings.XAPI_VERSION)
path = "%s?%s" % (reverse('lrs:statements'),
urllib.parse.urlencode({"method": "PUT"}))
response = self.client.post(
path, bdy, content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, 204)
act = Activity.objects.get(activity_id="act:test_cors_post_put")
self.assertEqual(act.activity_id, "act:test_cors_post_put")
agent = Agent.objects.get(mbox="mailto:test1@tester.com")
self.assertEqual(agent.name, "tester1")
self.assertEqual(agent.mbox, "mailto:test1@tester.com")
def test_cors_post_put_1_0_0(self):
content = {"verb": {"id": "verb:verb/url"}, "actor": {"objectType": "Agent", "mbox": "mailto:r@r.com"},
"object": {"id": "act:test_cors_post_put"}}
bdy = "statementId=886313e1-3b8a-5372-9b90-0c9aee199e5d&content=%s&Authorization=%s&Content-Type=application/json&X-Experience-API-Version=1.0.0" % (
urllib.parse.quote(str(content)), self.auth)
path = "%s?%s" % (reverse('lrs:statements'),
urllib.parse.urlencode({"method": "PUT"}))
response = self.client.post(
path, bdy, content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, 204)
act = Activity.objects.get(activity_id="act:test_cors_post_put")
self.assertEqual(act.activity_id, "act:test_cors_post_put")
agent = Agent.objects.get(mbox="mailto:test1@tester.com")
self.assertEqual(agent.name, "tester1")
self.assertEqual(agent.mbox, "mailto:test1@tester.com")
def test_cors_post_put_wrong_version(self):
content = {"verb": {"id": "verb:verb/url"}, "actor": {"objectType": "Agent", "mbox": "mailto:r@r.com"},
"object": {"id": "act:test_cors_post_put"}}
bdy = "statementId=886313e1-3b8a-5372-9b90-0c9aee199e5b&content=%s&Authorization=%s&X-Experience-API-Version=1.0.33&Content-Type=application/json" % (
content, self.auth)
path = "%s?%s" % (reverse('lrs:statements'),
urllib.parse.urlencode({"method": "PUT"}))
response = self.client.post(
path, bdy, content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
"X-Experience-API-Version is not supported")
def test_cors_post_put_correct_version(self):
content = {"verb": {"id": "verb:verb/url"}, "actor": {"objectType": "Agent", "mbox": "mailto:r@r.com"},
"object": {"id": "act:test_cors_post_put"}}
bdy = "statementId=886313e1-3b8a-5372-9b90-0c9aee199e5a&content=%s&Authorization=%s&X-Experience-API-Version=1.0.1&Content-Type=application/json" % (
urllib.parse.quote(str(content)), self.auth)
path = "%s?%s" % (reverse('lrs:statements'),
urllib.parse.urlencode({"method": "PUT"}))
response = self.client.post(
path, bdy, content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, 204)
def test_issue_put(self):
stmt_id = "33f60b35-e1b2-4ddc-9c6f-7b3f65244430"
stmt = json.dumps({"verb": {"id": "verb:verb/iri"}, "object": {"id": "act:scorm.com/JsTetris_TCAPI", "definition": {"type": "type:media",
"name": {"en-US": "Js Tetris - Tin Can Prototype"}, "description": {"en-US": "A game of tetris."}}},
"context": {"contextActivities": {"grouping": {"id": "act:scorm.com/JsTetris_TCAPI"}},
"registration": "6b1091be-2833-4886-b4a6-59e5e0b3c3f4"},
"actor": {"mbox": "mailto:tom.creighton.ctr@adlnet.gov", "name": "Tom Creighton"}})
path = "%s?%s" % (reverse('lrs:statements'),
urllib.parse.urlencode({"statementId": stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_stmt.status_code, 204)
def test_post_with_group(self):
ot = "Group"
name = "the group ST"
mbox = "mailto:the.groupST@example.com"
stmt = json.dumps({"actor": {"objectType": ot, "name": name, "mbox": mbox, "member": [{"name": "agentA", "mbox": "mailto:agentA@example.com"}, {"name": "agentB", "mbox": "mailto:agentB@example.com"}]}, "verb": {"id": "http://verb/iri/created", "display": {"en-US": "created"}},
"object": {"id": "act:i.pity.the.fool"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
g = Agent.objects.get(mbox="mailto:the.groupST@example.com")
self.assertEqual(g.name, name)
self.assertEqual(g.mbox, mbox)
mems = g.member.values_list("name", flat=True)
self.assertEqual(len(mems), 2)
self.assertIn("agentA", mems)
self.assertIn("agentB", mems)
def test_post_with_group_no_members_listed(self):
ot = "Group"
name = "the group ML"
mbox = "mailto:the.groupML@example.com"
stmt = json.dumps({"actor": {"objectType": ot, "name": name, "mbox": mbox}, "verb": {"id": "http://verb/iri/created", "display": {"en-US": "created"}},
"object": {"id": "act:i.pity.the.fool"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
g = Agent.objects.get(mbox="mailto:the.groupML@example.com")
self.assertEqual(g.name, name)
self.assertEqual(g.mbox, mbox)
mems = g.member.values_list("name", flat=True)
self.assertEqual(len(mems), 0)
def test_post_with_group_member_not_array(self):
ot = "Group"
name = "the group ST"
mbox = "mailto:the.groupST@example.com"
members = "wrong"
stmt = json.dumps({"actor": {"objectType": ot, "name": name, "mbox": mbox, "member": members}, "verb": {"id": "http://verb/iri/created", "display": {"en-US": "created"}},
"object": {"id": "act:i.pity.the.fool"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
'Members is not a properly formatted array')
def test_post_with_group_member_empty_array(self):
ot = "Group"
name = "the group ST"
mbox = "mailto:the.groupST@example.com"
members = []
stmt = json.dumps({"actor": {"objectType": ot, "name": name, "mbox": mbox, "member": members}, "verb": {"id": "http://verb/iri/created", "display": {"en-US": "created"}},
"object": {"id": "act:i.pity.the.fool"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
"Member property must contain agents")
def test_issue_put_no_version_header(self):
stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244431'
stmt = json.dumps({"verb": "verb:completed", "object": {"id": "act:scorm.com/JsTetris_TCAPI/level2",
"definition": {"type": "media", "name": {"en-US": "Js Tetris Level2"},
"description": {"en-US": "Starting at 1, the higher the level, the harder the game."}}},
"result": {"extensions": {"ext:time": 104, "ext:apm": 229, "ext:lines": 5}, "score": {"raw": 9911, "min": 0}},
"context": {"contextActivities": {"grouping": {"id": "act:scorm.com/JsTetris_TCAPI"}},
"registration": "b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"},
"actor": {"name": "tom creighton", "mbox": "mailto:tom@example.com"}})
path = '%s?%s' % (reverse('lrs:statements'),
urllib.parse.urlencode({"statementId": stmt_id}))
put_stmt = self.client.put(
path, stmt, content_type="application/json", Authorization=self.auth)
self.assertEqual(put_stmt.status_code, 400)
def test_issue_put_wrong_version_header(self):
stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244432'
stmt = json.dumps({"verb": {"id": "verb:completed"}, "object": {"id": "act:scorm.com/JsTetris_TCAPI/level2",
"definition": {"type": "media", "name": {"en-US": "Js Tetris Level2"},
"description": {"en-US": "Starting at 1, the higher the level, the harder the game."}}},
"result": {"extensions": {"ext:time": 104, "ext:apm": 229, "ext:lines": 5}, "score": {"raw": 9911, "min": 0}},
"context": {"contextActivities": {"grouping": {"id": "act:scorm.com/JsTetris_TCAPI"}},
"registration": "b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"},
"actor": {"name": "tom creighton", "mbox": "mailto:tom@example.com"}})
path = '%s?%s' % (reverse('lrs:statements'),
urllib.parse.urlencode({"statementId": stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="0.90")
self.assertEqual(put_stmt.status_code, 400)
def test_issue_put_wrong_version_header_again(self):
stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244432'
stmt = json.dumps({"verb": {"id": "verb:completed"}, "object": {"id": "act:scorm.com/JsTetris_TCAPI/level2",
"definition": {"type": "media", "name": {"en-US": "Js Tetris Level2"},
"description": {"en-US": "Starting at 1, the higher the level, the harder the game."}}},
"result": {"extensions": {"ext:time": 104, "ext:apm": 229, "ext:lines": 5}, "score": {"raw": 9911, "min": 0}},
"context": {"contextActivities": {"grouping": {"id": "act:scorm.com/JsTetris_TCAPI"}},
"registration": "b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"},
"actor": {"name": "tom creighton", "mbox": "mailto:tom@example.com"}})
path = '%s?%s' % (reverse('lrs:statements'),
urllib.parse.urlencode({"statementId": stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="1.0.")
self.assertEqual(put_stmt.status_code, 400)
def test_issue_put_wrong_version_header_1_1(self):
stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244432'
stmt = json.dumps({"verb": {"id": "verb:completed"}, "object": {"id": "act:scorm.com/JsTetris_TCAPI/level2",
"definition": {"type": "media", "name": {"en-US": "Js Tetris Level2"},
"description": {"en-US": "Starting at 1, the higher the level, the harder the game."}}},
"result": {"extensions": {"ext:time": 104, "ext:apm": 229, "ext:lines": 5}, "score": {"raw": 9911, "min": 0}},
"context": {"contextActivities": {"grouping": {"id": "act:scorm.com/JsTetris_TCAPI"}},
"registration": "b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"},
"actor": {"name": "tom creighton", "mbox": "mailto:tom@example.com"}})
path = '%s?%s' % (reverse('lrs:statements'),
urllib.parse.urlencode({"statementId": stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="1.1.")
self.assertEqual(put_stmt.status_code, 400)
# Use this test to make sure stmts are being returned correctly with all
# data - doesn't check timestamp and stored fields
def test_all_fields_activity_as_object(self):
self.bunchostmts()
nested_st_id = str(uuid.uuid4())
nest_param = {"statementId": nested_st_id}
nest_path = "%s?%s" % (reverse('lrs:statements'),
urllib.parse.urlencode(nest_param))
nested_stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:tincan@adlnet.gov"},
"verb": {"id": "http://example.com/verbs/assess", "display": {"en-US": "assessed"}},
"object": {"id": "http://example.adlnet.gov/tincan/example/simplestatement"}})
put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_sub_stmt.status_code, 204)
stmt_id = str(uuid.uuid4())
context_id = str(uuid.uuid4())
param = {"statementId": stmt_id}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt = json.dumps({"actor": {"objectType": "Agent", "name": "Lou Wolford", "account": {"homePage": "http://example.com", "name": "uniqueName"}},
"verb": {"id": "http://example.com/verbs/created", "display": {"en-US": "created", "en-GB": "made"}},
"object": {"objectType": "Activity", "id": "http:adlnet.gov/my/Activity/URL",
"definition": {"name": {"en-US": "actName", "en-GB": "anotherActName"},
"description": {"en-US": "This is my activity description.", "en-GB": "This is another activity description."},
"type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"moreInfo": "http://some/activity/url",
"interactionType": "choice",
"correctResponsesPattern": ["golf", "tetris"],
"choices": [{"id": "golf", "description": {"en-US": "Golf Example", "en-GB": "GOLF"}},
{"id": "tetris", "description": {
"en-US": "Tetris Example", "en-GB": "TETRIS"}},
{"id": "facebook", "description": {
"en-US": "Facebook App", "en-GB": "FACEBOOK"}},
{"id": "scrabble", "description": {"en-US": "Scrabble Example", "en-GB": "SCRABBLE"}}],
"extensions": {"ext:key1": "value1", "ext:key2": "value2", "ext:key3": "value3"}}},
"result": {"score": {"scaled": .85, "raw": 85, "min": 0, "max": 100}, "completion": True, "success": False, "response": "Well done",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:resultKey1": "resultValue1", "ext:resultKey2": "resultValue2"}},
"context": {"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"},
"grouping": {"id": "http://groupingID"}},
"revision": "Spelling error in choices.", "platform": "Platform is web browser.", "language": "en-US",
"statement": {"objectType": "StatementRef", "id": str(nested_st_id)},
"extensions": {"ext:contextKey1": "contextVal1", "ext:contextKey2": "contextVal2"}},
"timestamp": self.firstTime})
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_stmt.status_code, 204)
param = {"statementId": stmt_id}
get_response = self.client.get(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
the_returned = json.loads(get_response.content)
self.assertEqual(the_returned['id'], stmt_id)
self.assertEqual(the_returned['version'], '1.0.0')
self.assertEqual(the_returned['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['actor']['name'], 'Lou Wolford')
self.assertEqual(the_returned['actor'][
'account']['name'], 'uniqueName')
self.assertEqual(the_returned['actor']['account'][
'homePage'], 'http://example.com')
self.assertEqual(the_returned['verb']['id'],
'http://example.com/verbs/created')
self.assertEqual(the_returned['verb']['display']['en-GB'], 'made')
self.assertEqual(the_returned['verb']['display']['en-US'], 'created')
self.assertEqual(the_returned['result']['completion'], True)
self.assertEqual(the_returned['result'][
'duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['result']['extensions'][
'ext:resultKey1'], 'resultValue1')
self.assertEqual(the_returned['result']['extensions'][
'ext:resultKey2'], 'resultValue2')
self.assertEqual(the_returned['result']['response'], 'Well done')
self.assertEqual(the_returned['result']['score']['max'], 100)
self.assertEqual(the_returned['result']['score']['min'], 0)
self.assertEqual(the_returned['result']['score']['raw'], 85)
self.assertEqual(the_returned['result']['score']['scaled'], 0.85)
self.assertEqual(the_returned['result']['success'], False)
self.assertEqual(the_returned['context']['contextActivities']['other'][0][
'id'], 'http://example.adlnet.gov/tincan/example/test')
self.assertEqual(the_returned['context']['extensions'][
'ext:contextKey1'], 'contextVal1')
self.assertEqual(the_returned['context']['extensions'][
'ext:contextKey2'], 'contextVal2')
self.assertEqual(the_returned['context']['language'], 'en-US')
self.assertEqual(the_returned['context'][
'platform'], 'Platform is web browser.')
self.assertEqual(the_returned['context']['registration'], context_id)
self.assertEqual(the_returned['context'][
'revision'], 'Spelling error in choices.')
self.assertEqual(the_returned['context']['statement'][
'id'], str(nested_st_id))
self.assertEqual(the_returned['context']['statement'][
'objectType'], 'StatementRef')
self.assertEqual(the_returned['authority']['objectType'], 'Agent')
self.assertEqual(the_returned['authority']['name'], 'tester1')
self.assertEqual(the_returned['authority'][
'mbox'], 'mailto:test1@tester.com')
self.assertEqual(the_returned['object'][
'id'], 'http:adlnet.gov/my/Activity/URL')
self.assertEqual(the_returned['object']['objectType'], 'Activity')
self.assertEqual(the_returned['object']['definition']['description'][
'en-US'], 'This is my activity description.')
self.assertEqual(the_returned['object']['definition']['description'][
'en-GB'], 'This is another activity description.')
self.assertEqual(the_returned['object']['definition'][
'interactionType'], 'choice')
self.assertEqual(the_returned['object']['definition'][
'name']['en-US'], 'actName')
self.assertEqual(the_returned['object']['definition'][
'name']['en-GB'], 'anotherActName')
self.assertEqual(the_returned['object']['definition'][
'type'], 'http://adlnet.gov/expapi/activities/cmi.interaction')
self.assertEqual(the_returned['object']['definition'][
'moreInfo'], 'http://some/activity/url')
self.assertEqual(the_returned['object']['definition'][
'extensions']['ext:key1'], 'value1')
self.assertEqual(the_returned['object']['definition'][
'extensions']['ext:key2'], 'value2')
self.assertEqual(the_returned['object']['definition'][
'extensions']['ext:key3'], 'value3')
# arrays.. testing slightly differently
choices_str = json.dumps(the_returned['object'][
'definition']['choices'])
self.assertIn('description', choices_str)
self.assertIn('id', choices_str)
self.assertIn('GOLF', choices_str)
self.assertIn('Golf Example', choices_str)
self.assertIn('golf', choices_str)
self.assertIn('TETRIS', choices_str)
self.assertIn('Tetris Example', choices_str)
self.assertIn('tetris', choices_str)
self.assertIn('FACEBOOK', choices_str)
self.assertIn('Facebook App', choices_str)
self.assertIn('Facebook', choices_str)
self.assertIn('SCRABBLE', choices_str)
self.assertIn('Scrabble Example', choices_str)
self.assertIn('scrabble', choices_str)
crp_str = json.dumps(the_returned['object']['definition'][
'correctResponsesPattern'])
self.assertIn('golf', crp_str)
self.assertIn('tetris', crp_str)
# Use this test to make sure stmts are being returned correctly with all
# data - doesn't check timestamp, stored fields
def test_all_fields_agent_as_object(self):
nested_st_id = str(uuid.uuid4())
nest_param = {"statementId": nested_st_id}
nest_path = "%s?%s" % (reverse('lrs:statements'),
urllib.parse.urlencode(nest_param))
nested_stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:tincan@adlnet.gov"},
"verb": {"id": "http://example.com/verbs/assess", "display": {"en-US": "assessed"}},
"object": {"id": "http://example.adlnet.gov/tincan/example/simplestatement"}})
put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_sub_stmt.status_code, 204)
stmt_id = str(uuid.uuid4())
context_id = str(uuid.uuid4())
param = {"statementId": stmt_id}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
msha = hashlib.sha1("mailto:tom@example.com").hexdigest()
stmt = json.dumps({"actor": {"objectType": "Agent", "name": "Lou Wolford", "account": {"homePage": "http://example.com", "name": "louUniqueName"}},
"verb": {"id": "http://example.com/verbs/helped", "display": {"en-US": "helped", "en-GB": "assisted"}},
"object": {"objectType": "Agent", "name": "Tom Creighton", "mbox_sha1sum": msha},
"result": {"score": {"scaled": .85, "raw": 85, "min": 0, "max": 100}, "completion": True, "success": True, "response": "Well done",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:resultKey1": "resultValue1", "ext:resultKey2": "resultValue2"}},
"context": {"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"}},
"language": "en-US",
"statement": {"objectType": "StatementRef", "id": str(nested_st_id)},
"extensions": {"ext:contextKey1": "contextVal1", "ext:contextKey2": "contextVal2"}},
"timestamp": self.firstTime})
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_stmt.status_code, 204)
param = {"statementId": stmt_id}
get_response = self.client.get(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
the_returned = json.loads(get_response.content)
self.assertEqual(the_returned['id'], stmt_id)
self.assertEqual(the_returned['version'], '1.0.0')
self.assertEqual(the_returned['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['actor']['name'], 'Lou Wolford')
self.assertEqual(the_returned['actor']['account'][
'name'], 'louUniqueName')
self.assertEqual(the_returned['actor']['account'][
'homePage'], 'http://example.com')
self.assertEqual(the_returned['verb']['id'],
'http://example.com/verbs/helped')
self.assertEqual(the_returned['verb']['display']['en-GB'], 'assisted')
self.assertEqual(the_returned['verb']['display']['en-US'], 'helped')
self.assertEqual(the_returned['result']['completion'], True)
self.assertEqual(the_returned['result'][
'duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['result']['extensions'][
'ext:resultKey1'], 'resultValue1')
self.assertEqual(the_returned['result']['extensions'][
'ext:resultKey2'], 'resultValue2')
self.assertEqual(the_returned['result']['response'], 'Well done')
self.assertEqual(the_returned['result']['score']['max'], 100)
self.assertEqual(the_returned['result']['score']['min'], 0)
self.assertEqual(the_returned['result']['score']['raw'], 85)
self.assertEqual(the_returned['result']['score']['scaled'], 0.85)
self.assertEqual(the_returned['result']['success'], True)
self.assertEqual(the_returned['context']['contextActivities']['other'][0][
'id'], 'http://example.adlnet.gov/tincan/example/test')
self.assertEqual(the_returned['context']['extensions'][
'ext:contextKey1'], 'contextVal1')
self.assertEqual(the_returned['context']['extensions'][
'ext:contextKey2'], 'contextVal2')
self.assertEqual(the_returned['context']['language'], 'en-US')
self.assertEqual(the_returned['context']['registration'], context_id)
self.assertEqual(the_returned['context']['statement'][
'id'], str(nested_st_id))
self.assertEqual(the_returned['context']['statement'][
'objectType'], 'StatementRef')
self.assertEqual(the_returned['authority']['objectType'], 'Agent')
self.assertEqual(the_returned['authority']['name'], 'tester1')
self.assertEqual(the_returned['authority'][
'mbox'], 'mailto:test1@tester.com')
self.assertEqual(the_returned['object']['objectType'], 'Agent')
self.assertEqual(the_returned['object']['name'], 'Tom Creighton')
self.assertEqual(the_returned['object']['mbox_sha1sum'], msha)
# Use this test to make sure stmts are being returned correctly with all
# data - doesn't check timestamps or stored fields
def test_all_fields_substatement_as_object(self):
nested_st_id = str(uuid.uuid4())
nest_param = {"statementId": nested_st_id}
nest_path = "%s?%s" % (reverse('lrs:statements'),
urllib.parse.urlencode(nest_param))
nested_stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:tincannest@adlnet.gov"},
"verb": {"id": "http://example.com/verbs/assess", "display": {"en-US": "assessed", "en-GB": "graded"}},
"object": {"id": "http://example.adlnet.gov/tincan/example/simplestatement"}})
put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_sub_stmt.status_code, 204)
nested_sub_st_id = str(uuid.uuid4())
nest_sub_param = {"statementId": nested_sub_st_id}
nest_sub_path = "%s?%s" % (
reverse('lrs:statements'), urllib.parse.urlencode(nest_sub_param))
nested_sub_stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:tincannestsub@adlnet.gov"},
"verb": {"id": "http://example.com/verbs/verb", "display": {"en-US": "verb", "en-GB": "altVerb"}},
"object": {"id": "http://example.adlnet.gov/tincan/example/simplenestedsubstatement"}})
put_nest_sub_stmt = self.client.put(nest_sub_path, nested_sub_stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_nest_sub_stmt.status_code, 204)
stmt_id = str(uuid.uuid4())
context_id = str(uuid.uuid4())
sub_context_id = str(uuid.uuid4())
param = {"statementId": stmt_id}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
stmt = json.dumps({"actor": {"objectType": "Agent", "name": "Lou Wolford", "account": {"homePage": "http://example.com", "name": "louUniqueName"}},
"verb": {"id": "http://example.com/verbs/said", "display": {"en-US": "said", "en-GB": "talked"}},
"object": {"objectType": "SubStatement", "actor": {"objectType": "Agent", "name": "Tom Creighton", "mbox": "mailto:tom@adlnet.gov"},
"verb": {"id": "http://example.com/verbs/assess", "display": {"en-US": "assessed", "en-GB": "Graded"}},
"object": {"id": "http://example.adlnet.gov/tincan/example/simplestatement",
'definition': {'name': {'en-US': 'SubStatement name'},
'description': {'en-US': 'SubStatement description'},
'type': 'http://adlnet.gov/expapi/activities/cmi.interaction', 'interactionType': 'matching',
'correctResponsesPattern': ['lou.3,tom.2,andy.1'], 'source': [{'id': 'lou',
'description': {'en-US': 'Lou', 'it': 'Luigi'}}, {'id': 'tom', 'description': {'en-US': 'Tom', 'it': 'Tim'}},
{'id': 'andy', 'description': {'en-US': 'Andy'}}], 'target': [{'id': '1',
'description': {'en-US': 'ADL LRS'}}, {'id': '2', 'description': {'en-US': 'lrs'}},
{'id': '3', 'description': {'en-US': 'the adl lrs', 'en-CH': 'the lrs'}}]}},
"result": {"score": {"scaled": .50, "raw": 50, "min": 1, "max": 51}, "completion": True,
"success": True, "response": "Poorly done",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:resultKey11": "resultValue11", "ext:resultKey22": "resultValue22"}},
"context": {"registration": sub_context_id,
"contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test/nest"}},
"revision": "Spelling error in target.", "platform": "Ipad.", "language": "en-US",
"statement": {"objectType": "StatementRef", "id": str(nested_sub_st_id)},
"extensions": {"ext:contextKey11": "contextVal11", "ext:contextKey22": "contextVal22"}}},
"result": {"score": {"scaled": .85, "raw": 85, "min": 0, "max": 100}, "completion": True, "success": True, "response": "Well done",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:resultKey1": "resultValue1", "ext:resultKey2": "resultValue2"}},
"context": {"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"}},
"language": "en-US",
"statement": {"objectType": "StatementRef", "id": str(nested_st_id)},
"extensions": {"ext:contextKey1": "contextVal1", "ext:contextKey2": "contextVal2"}},
"timestamp": self.firstTime})
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_stmt.status_code, 204)
param = {"statementId": stmt_id}
get_response = self.client.get(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
the_returned = json.loads(get_response.content)
self.assertEqual(the_returned['id'], stmt_id)
self.assertEqual(the_returned['version'], '1.0.0')
self.assertEqual(the_returned['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['actor']['name'], 'Lou Wolford')
self.assertEqual(the_returned['actor']['account'][
'name'], 'louUniqueName')
self.assertEqual(the_returned['actor']['account'][
'homePage'], 'http://example.com')
self.assertEqual(the_returned['verb']['id'],
'http://example.com/verbs/said')
self.assertEqual(the_returned['verb']['display']['en-GB'], 'talked')
self.assertEqual(the_returned['verb']['display']['en-US'], 'said')
self.assertEqual(the_returned['object'][
'actor']['objectType'], 'Agent')
self.assertEqual(the_returned['object']['actor'][
'name'], 'Tom Creighton')
self.assertEqual(the_returned['object']['actor'][
'mbox'], 'mailto:tom@adlnet.gov')
self.assertEqual(the_returned['object']['context'][
'registration'], sub_context_id)
self.assertEqual(the_returned['object'][
'context']['language'], 'en-US')
self.assertEqual(the_returned['object'][
'context']['platform'], 'Ipad.')
self.assertEqual(the_returned['object']['context'][
'revision'], 'Spelling error in target.')
self.assertEqual(the_returned['object']['context'][
'statement']['id'], str(nested_sub_st_id))
self.assertEqual(the_returned['object']['context'][
'statement']['objectType'], 'StatementRef')
self.assertEqual(the_returned['object']['context']['contextActivities']['other'][
0]['id'], 'http://example.adlnet.gov/tincan/example/test/nest')
self.assertEqual(the_returned['object']['context']['extensions'][
'ext:contextKey11'], 'contextVal11')
self.assertEqual(the_returned['object']['context']['extensions'][
'ext:contextKey22'], 'contextVal22')
self.assertEqual(the_returned['object']['object'][
'id'], 'http://example.adlnet.gov/tincan/example/simplestatement')
self.assertEqual(the_returned['object']['object']['definition'][
'type'], 'http://adlnet.gov/expapi/activities/cmi.interaction')
self.assertEqual(the_returned['object']['object']['definition'][
'description']['en-US'], 'SubStatement description')
self.assertEqual(the_returned['object']['object'][
'definition']['interactionType'], 'matching')
self.assertEqual(the_returned['object']['object']['definition'][
'name']['en-US'], 'SubStatement name')
# arrays.. testing slightly differently
source_str = json.dumps(the_returned['object']['object'][
'definition']['source'])
self.assertIn('description', source_str)
self.assertIn('id', source_str)
self.assertIn('Lou', source_str)
self.assertIn('Luigi', source_str)
self.assertIn('lou', source_str)
self.assertIn('Tom', source_str)
self.assertIn('Tim', source_str)
self.assertIn('tom', source_str)
self.assertIn('Andy', source_str)
self.assertIn('andy', source_str)
target_str = json.dumps(the_returned['object']['object'][
'definition']['target'])
self.assertIn('description', target_str)
self.assertIn('id', target_str)
self.assertIn('ADL LRS', target_str)
self.assertIn('1', target_str)
self.assertIn('lrs', target_str)
self.assertIn('2', target_str)
self.assertIn('the lrs', target_str)
self.assertIn('the adl lrs', target_str)
self.assertIn('3', target_str)
self.assertEqual(the_returned['object']['objectType'], 'SubStatement')
self.assertEqual(the_returned['object']['result']['completion'], True)
self.assertEqual(the_returned['object']['result'][
'duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['object']['result']['extensions'][
'ext:resultKey11'], 'resultValue11')
self.assertEqual(the_returned['object']['result']['extensions'][
'ext:resultKey22'], 'resultValue22')
self.assertEqual(the_returned['object']['result'][
'response'], 'Poorly done')
self.assertEqual(the_returned['object']['result']['score']['max'], 51)
self.assertEqual(the_returned['object']['result']['score']['min'], 1)
self.assertEqual(the_returned['object']['result']['score']['raw'], 50)
self.assertEqual(the_returned['object']['result'][
'score']['scaled'], 0.5)
self.assertEqual(the_returned['object']['result']['success'], True)
self.assertEqual(the_returned['object']['verb'][
'id'], 'http://example.com/verbs/assess')
self.assertEqual(the_returned['object']['verb'][
'display']['en-GB'], 'Graded')
self.assertEqual(the_returned['object']['verb'][
'display']['en-US'], 'assessed')
self.assertEqual(the_returned['result']['completion'], True)
self.assertEqual(the_returned['result'][
'duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['result']['extensions'][
'ext:resultKey1'], 'resultValue1')
self.assertEqual(the_returned['result']['extensions'][
'ext:resultKey2'], 'resultValue2')
self.assertEqual(the_returned['result']['response'], 'Well done')
self.assertEqual(the_returned['result']['score']['max'], 100)
self.assertEqual(the_returned['result']['score']['min'], 0)
self.assertEqual(the_returned['result']['score']['raw'], 85)
self.assertEqual(the_returned['result']['score']['scaled'], 0.85)
self.assertEqual(the_returned['result']['success'], True)
self.assertEqual(the_returned['context']['contextActivities']['other'][0][
'id'], 'http://example.adlnet.gov/tincan/example/test')
self.assertEqual(the_returned['context']['extensions'][
'ext:contextKey1'], 'contextVal1')
self.assertEqual(the_returned['context']['extensions'][
'ext:contextKey2'], 'contextVal2')
self.assertEqual(the_returned['context']['language'], 'en-US')
self.assertEqual(the_returned['context']['registration'], context_id)
self.assertEqual(the_returned['context']['statement'][
'id'], str(nested_st_id))
self.assertEqual(the_returned['context']['statement'][
'objectType'], 'StatementRef')
self.assertEqual(the_returned['authority']['objectType'], 'Agent')
self.assertEqual(the_returned['authority']['name'], 'tester1')
self.assertEqual(the_returned['authority'][
'mbox'], 'mailto:test1@tester.com')
# Third stmt in list is missing actor - should throw error and perform
# cascading delete on first three statements
def test_post_list_rollback(self):
self.bunchostmts()
cguid1 = str(uuid.uuid4())
stmts = json.dumps([
{"verb": {"id": "http://example.com/verbs/wrong-failed", "display": {"en-US": "wrong-failed"}},
"object": {"id": "act:test_wrong_list_post2"}, "actor": {"objectType": "Agent",
"mbox": "mailto:wrong-t@t.com"}, "result": {"score": {"scaled": .99}, "completion": True, "success": True,
"response": "wrong", "extensions": {"ext:resultwrongkey1": "value1", "ext:resultwrongkey2": "value2"}}},
{"verb": {"id": "http://example.com/verbs/wrong-kicked", "display": {"en-US": "wrong-kicked"}},
"object": {"objectType": "Activity", "id": "act:test_wrong_list_post",
"definition": {"name": {"en-US": "wrongactName", "en-GB": "anotherActName"},
"description": {"en-US": "This is my activity description.", "en-GB": "This is another activity description."},
"type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "choice",
"correctResponsesPattern": ["wronggolf", "wrongtetris"],
"choices":[{"id": "wronggolf", "description": {"en-US": "Golf Example", "en-GB": "GOLF"}},
{"id": "wrongtetris", "description": {
"en-US": "Tetris Example", "en-GB": "TETRIS"}},
{"id": "wrongfacebook", "description": {
"en-US": "Facebook App", "en-GB": "FACEBOOK"}},
{"id": "wrongscrabble", "description": {"en-US": "Scrabble Example", "en-GB": "SCRABBLE"}}],
"extensions": {"ext:wrongkey1": "wrongvalue1", "ext:wrongkey2": "wrongvalue2", "ext:wrongkey3": "wrongvalue3"}}},
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-t@t.com"}},
{"verb": {"id": "http://example.com/verbs/wrong-passed", "display": {"en-US": "wrong-passed"}}, "object": {"id": "act:test_wrong_list_post1"},
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-t@t.com"}, "context": {"registration": cguid1, "contextActivities": {"other": {"id": "act:wrongActivityID2"}},
"revision": "wrong", "platform": "wrong", "language": "en-US", "extensions": {"ext:wrongkey1": "wrongval1",
"ext:wrongkey2": "wrongval2"}}},
{"verb": {"id": "http://example.com/verbs/wrong-kicked", "display": {
"en-US": "wrong-kicked"}}, "object": {"id": "act:test_wrong_list_post2"}},
{"verb": {"id": "http://example.com/verbs/wrong-kicked", "display": {"en-US": "wrong-kicked"}}, "object": {"id": "act:test_wrong_list_post4"}, "actor": {"objectType": "Agent", "mbox": "wrong-t@t.com"}}])
response = self.client.post(reverse('lrs:statements'), stmts, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
verbs = Verb.objects.filter(verb_id__contains='wrong')
activities = Activity.objects.filter(
activity_id__contains='test_wrong_list_post')
stmts = Statement.objects.all()
# 11 statements from setup
self.assertEqual(len(stmts), 11)
self.assertEqual(len(verbs), 0)
self.assertEqual(len(activities), 0)
def test_post_list_rollback_part_2(self):
self.bunchostmts()
stmts = json.dumps([{"object": {"objectType": "Agent", "name": "john", "mbox": "mailto:john@john.com"},
"verb": {"id": "http://example.com/verbs/wrong", "display": {"en-US": "wrong"}},
"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"}},
{"verb": {"id": "http://example.com/verbs/created"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname2", "en-GB": "altname"},
"description": {"en-US": "testdesc2", "en-GB": "altdesc"}, "type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answer"]}},
"actor":{"objectType": "Agent", "mbox": "mailto:wrong-t@t.com"}},
{"verb": {"id": "http://example.com/verbs/wrong-kicked"}, "object": {"id": "act:test_wrong_list_post2"}}])
response = self.client.post(reverse('lrs:statements'), stmts, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
created_verbs = Verb.objects.filter(
verb_id__contains='http://example.com/verbs/created')
wrong_verbs = Verb.objects.filter(
verb_id__contains='http://example.com/verbs/wrong')
activities = Activity.objects.filter(activity_id='act:foogie')
stmts = Statement.objects.all()
wrong_agent = Agent.objects.filter(mbox='mailto:wrong-t@t.com')
john_agent = Agent.objects.filter(mbox='mailto:john@john.com')
s_agent = Agent.objects.filter(mbox='mailto:s@s.com')
auth_agent = Agent.objects.filter(mbox='mailto:test1@tester.com')
self.assertEqual(len(created_verbs), 1)
self.assertEqual(len(wrong_verbs), 0)
self.assertEqual(len(activities), 1)
self.assertEqual(len(stmts), 11)
self.assertEqual(len(wrong_agent), 0)
self.assertEqual(len(john_agent), 1)
self.assertEqual(len(s_agent), 1)
self.assertEqual(len(auth_agent), 1)
def test_post_list_rollback_with_void(self):
self.bunchostmts()
stmts = json.dumps([{"actor": {"objectType": "Agent", "mbox": "mailto:only-s@s.com"},
"object": {"objectType": "StatementRef", "id": str(self.exist_stmt_id)},
"verb": {"id": "http://adlnet.gov/expapi/verbs/voided", "display": {"en-US": "voided"}}},
{"verb": {"id": "http://example.com/verbs/wrong-kicked"}, "object": {"id": "act:test_wrong_list_post2"}}])
response = self.client.post(reverse('lrs:statements'), stmts, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
voided_st = Statement.objects.get(statement_id=self.exist_stmt_id)
voided_verb = Verb.objects.filter(verb_id__contains='voided')
only_actor = Agent.objects.filter(mbox="mailto:only-s@s.com")
stmts = Statement.objects.all()
self.assertEqual(len(stmts), 11)
self.assertEqual(voided_st.voided, False)
self.assertEqual(len(voided_verb), 0)
self.assertEqual(len(only_actor), 0)
def test_post_list_rollback_with_subs(self):
self.bunchostmts()
sub_context_id = str(uuid.uuid4())
stmts = json.dumps([{"actor": {"objectType": "Agent", "mbox": "mailto:wrong-s@s.com"},
"verb": {"id": "http://example.com/verbs/wrong", "display": {"en-US": "wrong"}},
"object": {"objectType": "Agent", "name": "john", "mbox": "mailto:john@john.com"}},
{"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"verb": {"id": "http://example.com/verbs/wrong-next", "display": {"en-US": "wrong-next"}},
"object": {"objectType": "SubStatement",
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-ss@ss.com"}, "verb": {"id": "http://example.com/verbs/wrong-sub"},
"object": {"objectType": "Activity", "id": "act:wrong-testex.com"}, "result": {"completion": True, "success": True,
"response": "sub-wrong-kicked"}, "context": {"registration": sub_context_id,
"contextActivities": {"other": {"id": "act:sub-wrong-ActivityID"}}, "revision": "foo", "platform": "bar",
"language": "en-US", "extensions": {"ext:wrong-k1": "v1", "ext:wrong-k2": "v2"}}}},
{"verb": {"id": "http://example.com/verbs/wrong-kicked"}, "object": {"id": "act:test_wrong_list_post2"}}])
response = self.client.post(reverse('lrs:statements'), stmts, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
s_agent = Agent.objects.filter(mbox="mailto:wrong-s@s.com")
ss_agent = Agent.objects.filter(mbox="mailto:wrong-ss@ss.com")
john_agent = Agent.objects.filter(mbox="mailto:john@john.com")
subs = SubStatement.objects.all()
wrong_verb = Verb.objects.filter(verb_id__contains="wrong")
activities = Activity.objects.filter(activity_id__contains="wrong")
stmts = Statement.objects.all()
self.assertEqual(len(stmts), 11)
self.assertEqual(len(s_agent), 0)
self.assertEqual(len(ss_agent), 0)
self.assertEqual(len(john_agent), 1)
# Only 1 sub from setup
self.assertEqual(len(subs), 1)
self.assertEqual(len(wrong_verb), 0)
self.assertEqual(len(activities), 0)
def test_post_list_rollback_context_activities(self):
self.bunchostmts()
sub_context_id = str(uuid.uuid4())
# Will throw error and need to rollback b/c last stmt is missing actor
stmts = json.dumps([{
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-s@s.com"},
"verb": {"id": "http://example.com/verbs/wrong", "display": {"en-US": "wrong"}},
"object": {"objectType": "Agent", "name": "john", "mbox": "mailto:john@john.com"}},
{
"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"verb": {"id": "http://example.com/verbs/wrong-next", "display": {"en-US": "wrong-next"}},
"object": {
"objectType": "SubStatement",
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-ss@ss.com"},
"verb": {"id": "http://example.com/verbs/wrong-sub"},
"object": {"objectType": "Activity", "id": "act:wrong-testex.com"},
"result": {"completion": True, "success": True, "response": "sub-wrong-kicked"},
"context": {
"registration": sub_context_id,
"contextActivities": {
"other": [{"id": "act:subwrongActivityID"}, {"id": "act:foogie"}]},
"revision": "foo", "platform": "bar", "language": "en-US",
"extensions": {"ext:wrong-k1": "v1", "ext:wrong-k2": "v2"}}
}
},
{
"verb": {"id": "http://example.com/verbs/wrong-kicked"},
"object": {"id": "act:test_wrong_list_post2"}}])
response = self.client.post(reverse('lrs:statements'), stmts, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
s_agent = Agent.objects.filter(mbox="mailto:wrong-s@s.com")
ss_agent = Agent.objects.filter(mbox="mailto:wrong-ss@ss.com")
john_agent = Agent.objects.filter(mbox="mailto:john@john.com")
subs = SubStatement.objects.all()
wrong_verb = Verb.objects.filter(verb_id__contains="wrong")
wrong_activities = Activity.objects.filter(
activity_id__contains="wrong")
foogie_activities = Activity.objects.filter(
activity_id__exact="act:foogie")
stmts = Statement.objects.all()
self.assertEqual(len(stmts), 11)
self.assertEqual(len(s_agent), 0)
self.assertEqual(len(ss_agent), 0)
self.assertEqual(len(john_agent), 1)
# Only 1 sub from setup
self.assertEqual(len(subs), 1)
self.assertEqual(len(wrong_verb), 0)
self.assertEqual(len(wrong_activities), 0)
self.assertEqual(len(foogie_activities), 1)
def test_unique_actor_authority(self):
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:timmay@timmay.com", "name": "timmay"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_post"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
response2 = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth2, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response2.status_code, 200)
acts = Activity.objects.filter(activity_id='act:test_post').count()
self.assertEqual(acts, 1)
def test_stmts_w_same_regid(self):
stmt1_guid = str(uuid.uuid4())
stmt2_guid = str(uuid.uuid4())
reg_guid = str(uuid.uuid4())
stmt1 = json.dumps({"actor": {"mbox": "mailto:tom@example.com"},
"verb": {"id": "http:adlnet.gov/expapi/verbs/tested",
"display": {"en-US": "tested"}},
"object": {"id": "test:same.regid"},
"context": {"registration": reg_guid}
})
stmt2 = json.dumps({"actor": {"mbox": "mailto:tom@example.com"},
"verb": {"id": "http:adlnet.gov/expapi/verbs/tested",
"display": {"en-US": "tested"}},
"object": {"id": "test:same.regid.again"},
"context": {"registration": reg_guid}
})
param1 = {"statementId": stmt1_guid}
path1 = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param1))
stmt_payload1 = stmt1
resp1 = self.client.put(path1, stmt_payload1, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp1.status_code, 204)
param2 = {"statementId": stmt2_guid}
path2 = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param2))
stmt_payload2 = stmt2
resp2 = self.client.put(path2, stmt_payload2, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp2.status_code, 204)
@override_settings(CELERY_ALWAYS_EAGER=True,
TEST_RUNNER='djcelery.contrib.test_runner.CeleryTestSuiteRunner')
def test_void(self):
stmt_guid = str(uuid.uuid4())
stmt = {"actor": {"mbox": "mailto:tinytom@example.com"},
"verb": {"id": "http://tommy.com/my-testverbs/danced",
"display": {"en-US": "danced"}},
"object": {"id": "act:the-macarena"}}
param = {"statementId": stmt_guid}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
payload = json.dumps(stmt)
r = self.client.put(path, payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 204)
r = self.client.get(reverse('lrs:statements'), Authorization=self.auth,
X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
obj = json.loads(r.content)
self.assertEqual(len(obj['statements']), 1)
obj = obj['statements'][0]
self.assertEqual(obj['id'], stmt_guid)
self.assertEqual(obj['actor']['mbox'], stmt['actor']['mbox'])
self.assertEqual(obj['verb'], stmt['verb'])
self.assertEqual(obj['object']['id'], stmt['object']['id'])
stmt2_guid = str(uuid.uuid4())
stmt2 = {"actor": {"mbox": "mailto:louo@example.com"},
"verb": {"id": "http://tommy.com/my-testverbs/laughed",
"display": {"en-US": "laughed at"}},
"object": {"objectType": "StatementRef", "id": stmt_guid}}
param = {"statementId": stmt2_guid}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(param))
payload2 = json.dumps(stmt2)
r = self.client.put(path, payload2, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 204)
r = self.client.get(reverse('lrs:statements'), Authorization=self.auth,
X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
obj = json.loads(r.content)
self.assertEqual(len(obj['statements']), 2)
objs = obj['statements']
for o in objs:
if o['id'] == stmt_guid:
self.assertEqual(o['actor']['mbox'], stmt['actor']['mbox'])
self.assertEqual(o['verb']['id'], stmt['verb']['id'])
self.assertEqual(o['object']['id'], stmt['object']['id'])
else:
self.assertEqual(o['actor']['mbox'], stmt2['actor']['mbox'])
self.assertEqual(o['verb']['id'], stmt2['verb']['id'])
self.assertEqual(o['object']['id'], stmt2['object']['id'])
stmtv = {"actor": {"mbox": "mailto:hulk@example.com"},
"verb": {"id": "http://adlnet.gov/expapi/verbs/voided"},
"object": {"objectType": "StatementRef",
"id": "%s" % stmt_guid}}
v_guid = str(uuid.uuid4())
paramv = {"statementId": v_guid}
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(paramv))
vpayload = json.dumps(stmtv)
r = self.client.put(path, vpayload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 204)
r = self.client.get(reverse('lrs:statements'), Authorization=self.auth,
X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
obj = json.loads(r.content)
self.assertEqual(len(obj['statements']), 2)
objs = obj['statements']
for o in objs:
if o['id'] == v_guid:
self.assertEqual(o['actor']['mbox'], stmtv['actor']['mbox'])
self.assertEqual(o['verb']['id'], stmtv['verb']['id'])
self.assertEqual(o['object']['id'], stmtv['object']['id'])
else:
self.assertEqual(o['actor']['mbox'], stmt2['actor']['mbox'])
self.assertEqual(o['verb']['id'], stmt2['verb']['id'])
self.assertEqual(o['object']['id'], stmt2['object']['id'])
# get voided statement via voidedStatementId
path = "%s?%s" % (reverse('lrs:statements'), urllib.parse.urlencode(
{"voidedStatementId": stmt_guid}))
r = self.client.get(path, Authorization=self.auth,
X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
obj = json.loads(r.content)
self.assertEqual(obj['id'], stmt_guid)
self.assertEqual(obj['actor']['mbox'], stmt['actor']['mbox'])
self.assertEqual(obj['verb']['id'], stmt['verb']['id'])
self.assertEqual(obj['object']['id'], stmt['object']['id'])
# make sure voided statement returns a 404 on get w/ statementId req
path = "%s?%s" % (reverse('lrs:statements'),
urllib.parse.urlencode({"statementId": stmt_guid}))
r = self.client.get(path, Authorization=self.auth,
X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 404)
def test_act_id_iri(self):
act_id = "act:Flügel"
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"verb": {"id": "http://example.com/verbs/created", "display": {"en-US": "created"}},
"object": {"id": act_id}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
stmt_db = Statement.objects.get(
statement_id=uuid.UUID(json.loads(response.content)[0]))
act = Activity.objects.get(id=stmt_db.object_activity.id)
self.assertEqual(act.activity_id.encode('utf-8'), act_id)
def test_invalid_act_id_iri(self):
act_id = "Flügel"
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"verb": {"id": "http://example.com/verbs/created", "display": {"en-US": "created"}},
"object": {"id": act_id}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('not a valid IRI', response.content)
def test_tag_act_id_uri(self):
act_id = "tag:adlnet.gov,2013:expapi:0.9:activities"
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"verb": {"id": "http://example.com/verbs/created", "display": {"en-US": "created"}},
"object": {"id": act_id}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
stmt_db = Statement.objects.get(
statement_id=uuid.UUID(json.loads(response.content)[0]))
act = Activity.objects.get(id=stmt_db.object_activity.id)
self.assertEqual(act.activity_id, act_id)
@override_settings(CELERY_ALWAYS_EAGER=True,
TEST_RUNNER='djcelery.contrib.test_runner.CeleryTestSuiteRunner')
def test_large_batch(self):
import random
post_payload = []
acts = ["http://tom.com/act/1/foo", "http://adlnet.gov/act/arrgs/2",
"http://google.com/activity/eats/ants", "http://tom.com/act/3/boo"]
ctxs = ["http://ctx.com/one", "http://ctx.com/two"]
for x in range(1, 500):
s = {"verb": {"id": "http://example.com/verbs/passed"}, "object": {"id": ""}, "actor": {"mbox": "mailto:t@t.com"},
"context": {"contextActivities": {"grouping": [{"id": ""}]}}}
s['object']['id'] = acts[random.randrange(0, len(acts) - 1)]
s['context']['contextActivities']['grouping'][0][
'id'] = ctxs[random.randrange(0, len(ctxs) - 1)]
post_payload.append(s)
response = self.client.post(reverse('lrs:statements'), json.dumps(post_payload), content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
| 64.588852 | 306 | 0.538843 |
49542bca83fb31a1394bc0ce675e4a920261f598 | 1,021 | py | Python | neon/__init__.py | kashif/neon | d4d8ed498ee826b67f5fda1746d2d65c8ce613d2 | [
"Apache-2.0"
] | 3 | 2017-02-02T05:20:48.000Z | 2021-07-07T16:50:41.000Z | neon/__init__.py | kashif/neon | d4d8ed498ee826b67f5fda1746d2d65c8ce613d2 | [
"Apache-2.0"
] | null | null | null | neon/__init__.py | kashif/neon | d4d8ed498ee826b67f5fda1746d2d65c8ce613d2 | [
"Apache-2.0"
] | 2 | 2016-06-09T13:05:00.000Z | 2021-02-18T14:18:15.000Z | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
"""
try:
from neon.version import VERSION as __version__ # noqa
except ImportError:
import sys
print("ERROR: Version information not found. Ensure you have installed "
"the software.\n From the top level dir issue: 'make install'")
sys.exit(1)
| 40.84 | 78 | 0.619001 |
3acec4405c183d3d327928e6c737e3773ac13e19 | 6,619 | py | Python | programs/util/newplugin.py | mukai154/webblen-io | 307a7fc212c321ebedbc8ebcc113cc3cf4744085 | [
"MIT"
] | 10 | 2017-11-15T05:15:57.000Z | 2021-05-21T06:35:12.000Z | programs/util/newplugin.py | mukai154/webblen-io | 307a7fc212c321ebedbc8ebcc113cc3cf4744085 | [
"MIT"
] | 2 | 2018-07-09T09:44:50.000Z | 2018-10-08T14:12:11.000Z | programs/util/newplugin.py | mukai154/webblen-io | 307a7fc212c321ebedbc8ebcc113cc3cf4744085 | [
"MIT"
] | 6 | 2017-12-05T03:42:06.000Z | 2018-06-05T09:42:11.000Z | #!/usr/bin/env python3
templates = {
"plugin.json" :
"""{{
"plugin_name": "{plugin_name}",
"plugin_project": "{plugin_provider}_{plugin_name}"
}}
""",
"CMakeLists.txt" :
"""file(GLOB HEADERS "include/{plugin_provider}/plugins/{plugin_name}/*.hpp")
add_library( {plugin_provider}_{plugin_name}
${{HEADERS}}
{plugin_name}_plugin.cpp
{plugin_name}_api.cpp
)
target_link_libraries( {plugin_provider}_{plugin_name} steemit_app steemit_chain steemit_protocol )
target_include_directories( {plugin_provider}_{plugin_name}
PUBLIC "${{CMAKE_CURRENT_SOURCE_DIR}}/include" )
""",
"include/{plugin_provider}/plugins/{plugin_name}/{plugin_name}_api.hpp" :
"""
#pragma once
#include <fc/api.hpp>
namespace steemit {{ namespace app {{
struct api_context;
}} }}
namespace {plugin_provider} {{ namespace plugin {{ namespace {plugin_name} {{
namespace detail {{
class {plugin_name}_api_impl;
}}
class {plugin_name}_api
{{
public:
{plugin_name}_api( const steemit::app::api_context& ctx );
void on_api_startup();
// TODO: Add API methods here
private:
std::shared_ptr< detail::{plugin_name}_api_impl > my;
}};
}} }} }}
FC_API( {plugin_provider}::plugin::{plugin_name}::{plugin_name}_api,
// TODO: Add method bubble list here
)
""",
"include/{plugin_provider}/plugins/{plugin_name}/{plugin_name}_plugin.hpp" :
"""
#pragma once
#include <steemit/app/plugin.hpp>
namespace {plugin_provider} {{ namespace plugin {{ namespace {plugin_name} {{
namespace detail {{
class {plugin_name}_plugin_impl;
}}
class {plugin_name}_plugin : public steemit::app::plugin
{{
public:
{plugin_name}_plugin( steemit::app::application* app );
virtual ~{plugin_name}_plugin();
virtual std::string plugin_name()const override;
virtual void plugin_initialize( const boost::program_options::variables_map& options ) override;
virtual void plugin_startup() override;
virtual void plugin_shutdown() override;
private:
std::shared_ptr< detail::{plugin_name}_plugin_impl > my;
}};
}} }} }}
""",
"{plugin_name}_api.cpp" :
"""
#include <steemit/app/api_context.hpp>
#include <steemit/app/application.hpp>
#include <{plugin_provider}/plugins/{plugin_name}/{plugin_name}_api.hpp>
#include <{plugin_provider}/plugins/{plugin_name}/{plugin_name}_plugin.hpp>
namespace {plugin_provider} {{ namespace plugin {{ namespace {plugin_name} {{
namespace detail {{
class {plugin_name}_api_impl
{{
public:
{plugin_name}_api_impl( steemit::app::application& _app );
std::shared_ptr< {plugin_provider}::plugin::{plugin_name}::{plugin_name}_plugin > get_plugin();
steemit::app::application& app;
}};
{plugin_name}_api_impl::{plugin_name}_api_impl( steemit::app::application& _app ) : app( _app )
{{}}
std::shared_ptr< {plugin_provider}::plugin::{plugin_name}::{plugin_name}_plugin > {plugin_name}_api_impl::get_plugin()
{{
return app.get_plugin< {plugin_name}_plugin >( "{plugin_name}" );
}}
}} // detail
{plugin_name}_api::{plugin_name}_api( const steemit::app::api_context& ctx )
{{
my = std::make_shared< detail::{plugin_name}_api_impl >(ctx.app);
}}
void {plugin_name}_api::on_api_startup() {{ }}
}} }} }} // {plugin_provider}::plugin::{plugin_name}
""",
"{plugin_name}_plugin.cpp" :
"""
#include <{plugin_provider}/plugins/{plugin_name}/{plugin_name}_api.hpp>
#include <{plugin_provider}/plugins/{plugin_name}/{plugin_name}_plugin.hpp>
#include <string>
namespace {plugin_provider} {{ namespace plugin {{ namespace {plugin_name} {{
namespace detail {{
class {plugin_name}_plugin_impl
{{
public:
{plugin_name}_plugin_impl( steemit::app::application& app );
virtual ~{plugin_name}_plugin_impl();
virtual std::string plugin_name()const;
virtual void plugin_initialize( const boost::program_options::variables_map& options );
virtual void plugin_startup();
virtual void plugin_shutdown();
void on_applied_block( const chain::signed_block& b );
steemit::app::application& _app;
boost::signals2::scoped_connection _applied_block_conn;
}};
{plugin_name}_plugin_impl::{plugin_name}_plugin_impl( steemit::app::application& app )
: _app(app) {{}}
{plugin_name}_plugin_impl::~{plugin_name}_plugin_impl() {{}}
std::string {plugin_name}_plugin_impl::plugin_name()const
{{
return "{plugin_name}";
}}
void {plugin_name}_plugin_impl::plugin_initialize( const boost::program_options::variables_map& options )
{{
}}
void {plugin_name}_plugin_impl::plugin_startup()
{{
_app.register_api_factory< {plugin_name}_api >( "{plugin_name}_api" );
_applied_block_conn = _app.chain_database()->applied_block.connect(
[this](const chain::signed_block& b){{ on_applied_block(b); }});
}}
void {plugin_name}_plugin_impl::plugin_shutdown()
{{
}}
void {plugin_name}_plugin_impl::on_applied_block( const chain::signed_block& b )
{{
}}
}}
{plugin_name}_plugin::{plugin_name}_plugin( steemit::app::application* app )
: plugin(app)
{{
FC_ASSERT( app != nullptr );
my = std::make_shared< detail::{plugin_name}_plugin_impl >( *app );
}}
{plugin_name}_plugin::~{plugin_name}_plugin() {{}}
std::string {plugin_name}_plugin::plugin_name()const
{{
return my->plugin_name();
}}
void {plugin_name}_plugin::plugin_initialize( const boost::program_options::variables_map& options )
{{
my->plugin_initialize( options );
}}
void {plugin_name}_plugin::plugin_startup()
{{
my->plugin_startup();
}}
void {plugin_name}_plugin::plugin_shutdown()
{{
my->plugin_shutdown();
}}
}} }} }} // {plugin_provider}::plugin::{plugin_name}
STEEMIT_DEFINE_PLUGIN( {plugin_name}, {plugin_provider}::plugin::{plugin_name}::{plugin_name}_plugin )
""",
}
import argparse
import os
import sys
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("provider", help="Name of plugin provider (steemit for plugins developed by Steemit)")
parser.add_argument("name", help="Name of plugin to create")
args = parser.parse_args(argv[1:])
ctx = {
"plugin_provider" : args.provider,
"plugin_name" : args.name,
}
outdir = os.path.join("libraries", "plugins", ctx["plugin_name"])
for t_fn, t_content in templates.items():
content = t_content.format(**ctx)
fn = os.path.join(outdir, t_fn.format(**ctx))
dn = os.path.dirname(fn)
if not os.path.exists(dn):
os.makedirs(dn)
with open(fn, "w") as f:
f.write(content)
return
if __name__ == "__main__":
main(sys.argv)
| 25.956863 | 118 | 0.689379 |
612428851104e714b1e76be35028cadd857c3f0c | 5,010 | py | Python | Collections-a-installer/community-general-2.4.0/plugins/modules/net_tools/gandi_livedns.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | 22 | 2021-07-16T08:11:22.000Z | 2022-03-31T07:15:34.000Z | Collections-a-installer/community-general-2.4.0/plugins/modules/net_tools/gandi_livedns.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | Collections-a-installer/community-general-2.4.0/plugins/modules/net_tools/gandi_livedns.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | 39 | 2021-07-05T02:31:42.000Z | 2022-03-31T02:46:03.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019 Gregory Thiemonge <gregory.thiemonge@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: gandi_livedns
author:
- Gregory Thiemonge (@gthiemonge)
version_added: "2.3.0"
short_description: Manage Gandi LiveDNS records
description:
- "Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/)."
options:
api_key:
description:
- Account API token.
type: str
required: true
record:
description:
- Record to add.
type: str
required: true
state:
description:
- Whether the record(s) should exist or not.
type: str
choices: [ absent, present ]
default: present
ttl:
description:
- The TTL to give the new record.
- Required when I(state=present).
type: int
type:
description:
- The type of DNS record to create.
type: str
required: true
values:
description:
- The record values.
- Required when I(state=present).
type: list
elements: str
domain:
description:
- The name of the Domain to work with (for example, "example.com").
required: true
type: str
notes:
- Supports C(check_mode).
'''
EXAMPLES = r'''
- name: Create a test A record to point to 127.0.0.1 in the my.com domain
community.general.gandi_livedns:
domain: my.com
record: test
type: A
values:
- 127.0.0.1
ttl: 7200
api_key: dummyapitoken
register: record
- name: Create a mail CNAME record to www.my.com domain
community.general.gandi_livedns:
domain: my.com
type: CNAME
record: mail
values:
- www
ttl: 7200
api_key: dummyapitoken
state: present
- name: Change its TTL
community.general.gandi_livedns:
domain: my.com
type: CNAME
record: mail
values:
- www
ttl: 10800
api_key: dummyapitoken
state: present
- name: Delete the record
community.general.gandi_livedns:
domain: my.com
type: CNAME
record: mail
api_key: dummyapitoken
state: absent
'''
RETURN = r'''
record:
description: A dictionary containing the record data.
returned: success, except on record deletion
type: dict
contains:
values:
description: The record content (details depend on record type).
returned: success
type: list
elements: str
sample:
- 192.0.2.91
- 192.0.2.92
record:
description: The record name.
returned: success
type: str
sample: www
ttl:
description: The time-to-live for the record.
returned: success
type: int
sample: 300
type:
description: The record type.
returned: success
type: str
sample: A
domain:
description: The domain associated with the record.
returned: success
type: str
sample: my.com
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.gandi_livedns_api import GandiLiveDNSAPI
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(type='str', required=True, no_log=True),
record=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
ttl=dict(type='int'),
type=dict(type='str', required=True),
values=dict(type='list', elements='str'),
domain=dict(type='str', required=True),
),
supports_check_mode=True,
required_if=[
('state', 'present', ['values', 'ttl']),
],
)
gandi_api = GandiLiveDNSAPI(module)
if module.params['state'] == 'present':
ret, changed = gandi_api.ensure_dns_record(module.params['record'],
module.params['type'],
module.params['ttl'],
module.params['values'],
module.params['domain'])
else:
ret, changed = gandi_api.delete_dns_record(module.params['record'],
module.params['type'],
module.params['values'],
module.params['domain'])
result = dict(
changed=changed,
)
if ret:
result['record'] = gandi_api.build_result(ret,
module.params['domain'])
module.exit_json(**result)
if __name__ == '__main__':
main()
| 26.648936 | 104 | 0.568862 |
84ea4b429ab70e43ac26457c8cf64f98db7a3a94 | 4,904 | py | Python | calaccess_processed/models/filings/campaign/form460/base.py | dwillis/django-calaccess-processed-data | f228252df1b390967468b41d336839f1bd9ca192 | [
"MIT"
] | 1 | 2021-01-13T12:06:25.000Z | 2021-01-13T12:06:25.000Z | calaccess_processed/models/filings/campaign/form460/base.py | anthonyjpesce/django-calaccess-processed-data | d99b461abb7b7f7973f90b49634c9262efcbe7bf | [
"MIT"
] | null | null | null | calaccess_processed/models/filings/campaign/form460/base.py | anthonyjpesce/django-calaccess-processed-data | d99b461abb7b7f7973f90b49634c9262efcbe7bf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Models for storing data from Campaign Disclosure Statements (Form 460).
"""
from __future__ import unicode_literals
from django.db import models
from calaccess_processed.models.filings.campaign import CampaignFinanceFilingBase
class Form460FilingBase(CampaignFinanceFilingBase):
"""
Base and abstract model for Form 460 filings.
"""
from_date = models.DateField(
verbose_name='from date',
db_index=True,
null=False,
help_text="The first date of the filing period covered by the statement "
"(from CVR_CAMPAIGN_DISCLOSURE.FROM_DATE)",
)
thru_date = models.DateField(
verbose_name='thru date',
db_index=True,
null=False,
help_text="The last date of the filing period covered by the statement "
"(from CVR_CAMPAIGN_DISCLOSURE.THRU_DATE)",
)
monetary_contributions = models.IntegerField(
verbose_name='monetary contributions',
null=True,
help_text="Total monetary contributions (from line 1, column A)",
)
loans_received = models.IntegerField(
verbose_name='loans received',
null=True,
help_text="Total loans received (from line 2, column A)",
)
subtotal_cash_contributions = models.IntegerField(
verbose_name='subtotal cash contributions',
null=True,
help_text="Monetary contributions and loans received combined (from "
"line 3, column A)",
)
nonmonetary_contributions = models.IntegerField(
verbose_name='nonmonetary contributions',
null=True,
help_text="Non-monetary contributions (from line 4, column A)",
)
total_contributions = models.IntegerField(
verbose_name='total contributions',
null=True,
help_text="Total contributions (from line 5, column A)",
)
payments_made = models.IntegerField(
verbose_name='payments made',
null=True,
help_text="Payments made (from line 6, column A)",
)
loans_made = models.IntegerField(
verbose_name='loans made',
null=True,
help_text="Loans made (from line 7, column A)",
)
subtotal_cash_payments = models.IntegerField(
verbose_name='subtotal cash payments',
null=True,
help_text="Sub-total of cash payments (from line 8, column A)",
)
unpaid_bills = models.IntegerField(
verbose_name='unpaid bills',
null=True,
help_text="Unpaid bills / accrued expenses (from line 9, column A)",
)
nonmonetary_adjustment = models.IntegerField(
verbose_name='nonmonetary adjustment',
null=True,
help_text="Non-monetary adjustment (from line 10, column A), which is "
"equal to the total of non-monetary contributions",
)
total_expenditures_made = models.IntegerField(
verbose_name='total expenditures made',
null=True,
help_text="Total expenditures made (from line 11, column A)",
)
begin_cash_balance = models.IntegerField(
verbose_name='begin cash balance',
null=True,
help_text="Beginning cash balance (from line 12), which is equal to "
"the Ending Cash Balance (line 16) reported on the summary "
"page of the previous Form 460 filing"
)
cash_receipts = models.IntegerField(
verbose_name='cash receipts',
null=True,
help_text="Cash receipts (from line 13)",
)
miscellaneous_cash_increases = models.IntegerField(
verbose_name='miscellaneous cash increases',
null=True,
help_text="Miscellaneous cash increases (from line 14)",
)
cash_payments = models.IntegerField(
verbose_name='cash payments',
null=True,
help_text="Cash payments (from line 15)",
)
ending_cash_balance = models.IntegerField(
verbose_name='ending cash balance',
null=True,
help_text="Ending cash balance (from line 16)",
)
loan_guarantees_received = models.IntegerField(
verbose_name='loan guarantees received',
null=True,
help_text="Loan guarantees received (from line 17)",
)
cash_equivalents = models.IntegerField(
verbose_name='cash equivalents',
null=True,
help_text="Cash equivalents (from line 18), which includes investments "
"that can't be readily converted to cash, such as outstanding "
"loans the committee has made to others"
)
outstanding_debts = models.IntegerField(
verbose_name='outstanding debts',
null=True,
help_text="Outstanding debts on loans owed by the committee (from line "
"19)",
)
class Meta:
"""
Model options.
"""
abstract = True
| 35.79562 | 81 | 0.641109 |
a0af6d0234850b8bc5d57b517e38fa0efffbe57d | 16,862 | py | Python | bottom/pack.py | larsks/bottom | eddceacbaef6fda4160ee7f6f1c375e84fbb99fc | [
"MIT"
] | 63 | 2015-01-03T05:38:35.000Z | 2022-03-28T23:59:13.000Z | bottom/pack.py | larsks/bottom | eddceacbaef6fda4160ee7f6f1c375e84fbb99fc | [
"MIT"
] | 51 | 2015-01-13T04:41:58.000Z | 2022-02-24T04:58:17.000Z | bottom/pack.py | larsks/bottom | eddceacbaef6fda4160ee7f6f1c375e84fbb99fc | [
"MIT"
] | 24 | 2015-01-27T23:15:44.000Z | 2021-06-14T20:31:58.000Z | """ Simplified support for rfc2812 """
# https://tools.ietf.org/html/rfc2812
import collections.abc
from typing import Any, Dict, Optional
def b(field: str, kwargs: Dict[str, Any],
present: Optional[Any] = None, missing: Any = '') -> str:
"""
Return `present` value (default to `field`) if `field` in `kwargs` and
Truthy, otherwise return `missing` value
"""
if kwargs.get(field):
return field if present is None else str(present)
return str(missing)
def f(field: str, kwargs: Dict[str, Any],
default: Optional[Any] = None) -> str:
""" Alias for more readable command construction """
if default is not None:
return str(kwargs.get(field, default))
return str(kwargs[field])
def pack(field: str, kwargs: Dict[str, Any],
default: Optional[Any] = None, sep: str = ',') -> str:
""" Util for joining multiple fields with commas """
if default is not None:
value = kwargs.get(field, default)
else:
value = kwargs[field]
if isinstance(value, str):
return value
elif isinstance(value, collections.abc.Iterable):
return sep.join(str(f) for f in value)
else:
return str(value)
def pack_command(command: str, **kwargs: Any) -> str:
""" Pack a command to send to an IRC server """
if not command:
raise ValueError("Must provide a command")
if not isinstance(command, str):
raise ValueError("Command must be a string")
command = command.upper()
# ========================================================================
# For each command, provide:
# 1. a link to the definition in rfc2812
# 2. the normalized grammar, which may not equate to the rfc grammar
# the normalized grammar will use the keys expected in kwargs,
# which usually do NOT line up with rfc2812. They may also make
# optional fields which are required in rfc2812, by providing
# the most common or reasonable defaults.
# 3. exhaustive examples, preferring normalized form of
# the rfc2812 examples
# ========================================================================
# ========================================================================
# Normalized grammar:
# : should not be provided; it denotes the beginning of the last
# field, which may contain spaces
# [] indicates an optional field
# <> denote the key that the field will be filled with
# because fields are filled from a dict, required fields may follow
# optional fields - see USER command, where mode is optional
# (and defaults to 0)
# "" indicates a literal value that is inserted if present
# ========================================================================
# PASS
# https://tools.ietf.org/html/rfc2812#section-3.1.1
# PASS <password>
# ----------
# PASS secretpasswordhere
if command == "PASS":
return "PASS " + f("password", kwargs)
# NICK
# https://tools.ietf.org/html/rfc2812#section-3.1.2
# NICK <nick>
# ----------
# NICK Wiz
elif command == "NICK":
return "NICK " + f("nick", kwargs)
# USER
# https://tools.ietf.org/html/rfc2812#section-3.1.3
# USER <user> [<mode>] :<realname>
# ----------
# USER guest 8 :Ronnie Reagan
# USER guest :Ronnie Reagan
elif command == "USER":
return "USER {} {} * :{}".format(
f("user", kwargs),
f("mode", kwargs, 0),
f("realname", kwargs))
# OPER
# https://tools.ietf.org/html/rfc2812#section-3.1.4
# OPER <user> <password>
# ----------
# OPER AzureDiamond hunter2
elif command == "OPER":
return "OPER {} {}".format(f("user", kwargs), f("password", kwargs))
# USERMODE (renamed from MODE)
# https://tools.ietf.org/html/rfc2812#section-3.1.5
# MODE <nick> [<modes>]
# ----------
# MODE WiZ -w
# MODE Angel +i
# MODE
elif command == "USERMODE":
return "MODE {} {}".format(f("nick", kwargs), f("modes", kwargs, ''))
# SERVICE
# https://tools.ietf.org/html/rfc2812#section-3.1.6
# SERVICE <nick> <distribution> <type> :<info>
# ----------
# SERVICE dict *.fr 0 :French
elif command == "SERVICE":
return "SERVICE {} * {} {} 0 :{}".format(
f("nick", kwargs),
f("distribution", kwargs),
f("type", kwargs),
f("info", kwargs))
# QUIT
# https://tools.ietf.org/html/rfc2812#section-3.1.7
# QUIT :[<message>]
# ----------
# QUIT :Gone to lunch
# QUIT
elif command == "QUIT":
if "message" in kwargs:
return "QUIT :" + f("message", kwargs)
return "QUIT"
# SQUIT
# https://tools.ietf.org/html/rfc2812#section-3.1.8
# SQUIT <server> [<message>]
# ----------
# SQUIT tolsun.oulu.fi :Bad Link
# SQUIT tolsun.oulu.fi
elif command == "SQUIT":
base = "SQUIT " + f("server", kwargs)
if "message" in kwargs:
return base + " :" + f("message", kwargs)
return base
# JOIN
# https://tools.ietf.org/html/rfc2812#section-3.2.1
# JOIN <channel> [<key>]
# ----------
# JOIN #foo fookey
# JOIN #foo
# JOIN 0
elif command == "JOIN":
return "JOIN {} {}".format(pack("channel", kwargs),
pack("key", kwargs, ''))
# PART
# https://tools.ietf.org/html/rfc2812#section-3.2.2
# PART <channel> :[<message>]
# ----------
# PART #foo :I lost
# PART #foo
elif command == "PART":
base = "PART " + pack("channel", kwargs)
if "message" in kwargs:
return base + " :" + f("message", kwargs)
return base
# CHANNELMODE (renamed from MODE)
# https://tools.ietf.org/html/rfc2812#section-3.2.3
# MODE <channel> <modes> [<params>]
# ----------
# MODE #Finnish +imI *!*@*.fi
# MODE #en-ops +v WiZ
# MODE #Fins -s
elif command == "CHANNELMODE":
return "MODE {} {} {}".format(f("channel", kwargs),
f("modes", kwargs),
f("params", kwargs, ''))
# TOPIC
# https://tools.ietf.org/html/rfc2812#section-3.2.4
# TOPIC <channel> :[<message>]
# ----------
# TOPIC #test :New topic
# TOPIC #test :
# TOPIC #test
elif command == "TOPIC":
base = "TOPIC " + f("channel", kwargs)
if "message" in kwargs:
return base + " :" + f("message", kwargs)
return base
# NAMES
# https://tools.ietf.org/html/rfc2812#section-3.2.5
# NAMES [<channel>] [<target>]
# ----------
# NAMES #twilight_zone remote.*.edu
# NAMES #twilight_zone
# NAMES
elif command == "NAMES":
if "channel" in kwargs:
return "NAMES {} {}".format(pack("channel", kwargs),
f("target", kwargs, ''))
return "NAMES"
# LIST
# https://tools.ietf.org/html/rfc2812#section-3.2.6
# LIST [<channel>] [<target>]
# ----------
# LIST #twilight_zone remote.*.edu
# LIST #twilight_zone
# LIST
elif command == "LIST":
if "channel" in kwargs:
return "LIST {} {}".format(pack("channel", kwargs),
f("target", kwargs, ''))
return "LIST"
# INVITE
# https://tools.ietf.org/html/rfc2812#section-3.2.7
# INVITE <nick> <channel>
# ----------
# INVITE Wiz #Twilight_Zone
elif command == "INVITE":
return "INVITE {} {}".format(f("nick", kwargs),
f("channel", kwargs))
# KICK
# https://tools.ietf.org/html/rfc2812#section-3.2.8
# KICK <channel> <nick> :[<message>]
# ----------
# KICK #Finnish WiZ :Speaking English
# KICK #Finnish WiZ,Wiz-Bot :Both speaking English
# KICK #Finnish,#English WiZ,ZiW :Speaking wrong language
elif command == "KICK":
base = "KICK {} {}".format(pack("channel", kwargs),
pack("nick", kwargs))
if "message" in kwargs:
return base + " :" + pack("message", kwargs)
return base
# PRIVMSG
# https://tools.ietf.org/html/rfc2812#section-3.3.1
# PRIVMSG <target> :<message>
# ----------
# PRIVMSG Angel :yes I'm receiving it !
# PRIVMSG $*.fi :Server tolsun.oulu.fi rebooting.
# PRIVMSG #Finnish :This message is in english
elif command == "PRIVMSG":
return "PRIVMSG {} :{}".format(f("target", kwargs),
f("message", kwargs))
# NOTICE
# https://tools.ietf.org/html/rfc2812#section-3.3.2
# NOTICE <target> :<message>
# ----------
# NOTICE Angel :yes I'm receiving it !
# NOTICE $*.fi :Server tolsun.oulu.fi rebooting.
# NOTICE #Finnish :This message is in english
elif command == "NOTICE":
return "NOTICE {} :{}".format(f("target", kwargs),
f("message", kwargs))
# MOTD
# https://tools.ietf.org/html/rfc2812#section-3.4.1
# MOTD [<target>]
# ----------
# MOTD remote.*.edu
# MOTD
elif command == "MOTD":
return "MOTD " + f("target", kwargs, '')
# LUSERS
# https://tools.ietf.org/html/rfc2812#section-3.4.2
# LUSERS [<mask>] [<target>]
# ----------
# LUSERS *.edu remote.*.edu
# LUSERS *.edu
# LUSERS
elif command == "LUSERS":
if "mask" in kwargs:
return "LUSERS {} {}".format(f("mask", kwargs),
f("target", kwargs, ''))
return "LUSERS"
# VERSION
# https://tools.ietf.org/html/rfc2812#section-3.4.3
# VERSION [<target>]
# ----------
# VERSION remote.*.edu
# VERSION
elif command == "VERSION":
return "VERSION " + f("target", kwargs, '')
# STATS
# https://tools.ietf.org/html/rfc2812#section-3.4.4
# STATS [<query>] [<target>]
# ----------
# STATS m remote.*.edu
# STATS m
# STATS
elif command == "STATS":
if "query" in kwargs:
return "STATS {} {}".format(f("query", kwargs),
f("target", kwargs, ''))
return "STATS"
# LINKS
# https://tools.ietf.org/html/rfc2812#section-3.4.5
# LINKS [<remote>] [<mask>]
# ----------
# LINKS *.edu *.bu.edu
# LINKS *.au
# LINKS
elif command == "LINKS":
if "remote" in kwargs:
return "LINKS {} {}".format(f("remote", kwargs), f("mask", kwargs))
elif "mask" in kwargs:
return "LINKS " + f("mask", kwargs)
return "LINKS"
# TIME
# https://tools.ietf.org/html/rfc2812#section-3.4.6
# TIME [<target>]
# ----------
# TIME remote.*.edu
# TIME
elif command == "TIME":
return "TIME " + f("target", kwargs, '')
# CONNECT
# https://tools.ietf.org/html/rfc2812#section-3.4.7
# CONNECT <target> <port> [<remote>]
# ----------
# CONNECT tolsun.oulu.fi 6667 *.edu
# CONNECT tolsun.oulu.fi 6667
elif command == "CONNECT":
return "CONNECT {} {} {}".format(f("target", kwargs),
f("port", kwargs),
f("remote", kwargs, ''))
# TRACE
# https://tools.ietf.org/html/rfc2812#section-3.4.8
# TRACE [<target>]
# ----------
# TRACE
elif command == "TRACE":
return "TRACE " + f("target", kwargs, '')
# ADMIN
# https://tools.ietf.org/html/rfc2812#section-3.4.9
# ADMIN [<target>]
# ----------
# ADMIN
elif command == "ADMIN":
return "ADMIN " + f("target", kwargs, '')
# INFO
# https://tools.ietf.org/html/rfc2812#section-3.4.10
# INFO [<target>]
# ----------
# INFO
elif command == "INFO":
return "INFO " + f("target", kwargs, '')
# SERVLIST
# https://tools.ietf.org/html/rfc2812#section-3.5.1
# SERVLIST [<mask>] [<type>]
# ----------
# SERVLIST *SERV 3
# SERVLIST *SERV
# SERVLIST
elif command == "SERVLIST":
return "SERVLIST {} {}".format(f("mask", kwargs, ''),
f("type", kwargs, ''))
# SQUERY
# https://tools.ietf.org/html/rfc2812#section-3.5.2
# SQUERY <target> :<message>
# ----------
# SQUERY irchelp :HELP privmsg
elif command == "SQUERY":
return "SQUERY {} :{}".format(f("target", kwargs),
f("message", kwargs))
# WHO
# https://tools.ietf.org/html/rfc2812#section-3.6.1
# WHO [<mask>] ["o"]
# ----------
# WHO jto* o
# WHO *.fi
# WHO
elif command == "WHO":
return "WHO {} {}".format(f("mask", kwargs, ''), b("o", kwargs))
# WHOIS
# https://tools.ietf.org/html/rfc2812#section-3.6.2
# WHOIS <mask> [<target>]
# ----------
# WHOIS jto* o remote.*.edu
# WHOIS jto* o
# WHOIS *.fi
elif command == "WHOIS":
return "WHOIS {} {}".format(pack("mask", kwargs),
f("target", kwargs, ''))
# WHOWAS
# https://tools.ietf.org/html/rfc2812#section-3.6.3
# WHOWAS <nick> [<count>] [<target>]
# ----------
# WHOWAS Wiz 9 remote.*.edu
# WHOWAS Wiz 9
# WHOWAS Mermaid
elif command == "WHOWAS":
if "count" in kwargs:
return "WHOWAS {} {} {}".format(pack("nick", kwargs),
f("count", kwargs),
f("target", kwargs, ''))
return "WHOWAS " + pack("nick", kwargs)
# KILL
# https://tools.ietf.org/html/rfc2812#section-3.7.1
# KILL <nick> :<message>
# ----------
# KILL WiZ :Spamming joins
elif command == "KILL":
return "KILL {} :{}".format(f("nick", kwargs), f("message", kwargs))
# PING
# https://tools.ietf.org/html/rfc2812#section-3.7.2
# PING :[<message>]
# ----------
# PING :I'm still here
# PING
elif command == "PING":
if "message" in kwargs:
return "PING :{}".format(f("message", kwargs))
else:
return "PING"
# PONG
# https://tools.ietf.org/html/rfc2812#section-3.7.3
# PONG :[<message>]
# ----------
# PONG :I'm still here
# PONG
elif command == "PONG":
if "message" in kwargs:
return "PONG :{}".format(f("message", kwargs))
else:
return "PONG"
# AWAY
# https://tools.ietf.org/html/rfc2812#section-4.1
# AWAY :[<message>]
# ----------
# AWAY :Gone to lunch.
# AWAY
elif command == "AWAY":
if "message" in kwargs:
return "AWAY :" + f("message", kwargs)
return "AWAY"
# REHASH
# https://tools.ietf.org/html/rfc2812#section-4.2
# REHASH
# ----------
# REHASH
elif command == "REHASH":
return "REHASH"
# DIE
# https://tools.ietf.org/html/rfc2812#section-4.3
# DIE
# ----------
# DIE
elif command == "DIE":
return "DIE"
# RESTART
# https://tools.ietf.org/html/rfc2812#section-4.4
# RESTART
# ----------
# RESTART
elif command == "RESTART":
return "RESTART"
# SUMMON
# https://tools.ietf.org/html/rfc2812#section-4.5
# SUMMON <nick> [<target>] [<channel>]
# ----------
# SUMMON Wiz remote.*.edu #Finnish
# SUMMON Wiz remote.*.edu
# SUMMON Wiz
elif command == "SUMMON":
if "target" in kwargs:
return "SUMMON {} {} {}".format(f("nick", kwargs),
f("target", kwargs),
f("channel", kwargs, ''))
return "SUMMON " + f("nick", kwargs)
# USERS
# https://tools.ietf.org/html/rfc2812#section-4.6
# USERS [<target>]
# ----------
# USERS remote.*.edu
# USERS
elif command == "USERS":
return "USERS " + f("target", kwargs, '')
# WALLOPS
# https://tools.ietf.org/html/rfc2812#section-4.7
# WALLOPS :<message>
# ----------
# WALLOPS :Maintenance in 5 minutes
elif command == "WALLOPS":
return "WALLOPS :" + f("message", kwargs)
# USERHOST
# https://tools.ietf.org/html/rfc2812#section-4.8
# USERHOST <nick>
# ----------
# USERHOST Wiz Michael syrk
# USERHOST syrk
elif command == "USERHOST":
return "USERHOST " + pack("nick", kwargs, sep=" ")
# ISON
# https://tools.ietf.org/html/rfc2812#section-4.9
# ISON <nick>
# ----------
# ISON Wiz Michael syrk
# ISON syrk
elif command == "ISON":
return "ISON " + pack("nick", kwargs, sep=" ")
else:
raise ValueError("Unknown command '{}'".format(command))
| 30.770073 | 79 | 0.506701 |
237d571507a3a636a3119c29772ab32df77548a2 | 12,633 | py | Python | pandas/core/arrays/numpy_.py | mayank1897/pandas | c9e4ba146053f0b59160fd7fad70d9e0f6dab463 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/arrays/numpy_.py | mayank1897/pandas | c9e4ba146053f0b59160fd7fad70d9e0f6dab463 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/arrays/numpy_.py | mayank1897/pandas | c9e4ba146053f0b59160fd7fad70d9e0f6dab463 | [
"BSD-3-Clause"
] | 1 | 2021-04-11T21:22:00.000Z | 2021-04-11T21:22:00.000Z | import numbers
from typing import Tuple, Type, Union
import numpy as np
from numpy.lib.mixins import NDArrayOperatorsMixin
from pandas._libs import lib
from pandas._typing import Scalar
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import isna
from pandas import compat
from pandas.core import nanops, ops
from pandas.core.array_algos import masked_reductions
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.arrays.base import ExtensionOpsMixin
from pandas.core.strings.object_array import ObjectStringArrayMixin
class PandasDtype(ExtensionDtype):
"""
A Pandas ExtensionDtype for NumPy dtypes.
.. versionadded:: 0.24.0
This is mostly for internal compatibility, and is not especially
useful on its own.
Parameters
----------
dtype : object
Object to be converted to a NumPy data type object.
See Also
--------
numpy.dtype
"""
_metadata = ("_dtype",)
def __init__(self, dtype: object):
self._dtype = np.dtype(dtype)
def __repr__(self) -> str:
return f"PandasDtype({repr(self.name)})"
@property
def numpy_dtype(self) -> np.dtype:
"""
The NumPy dtype this PandasDtype wraps.
"""
return self._dtype
@property
def name(self) -> str:
"""
A bit-width name for this data-type.
"""
return self._dtype.name
@property
def type(self) -> Type[np.generic]:
"""
The type object used to instantiate a scalar of this NumPy data-type.
"""
return self._dtype.type
@property
def _is_numeric(self) -> bool:
# exclude object, str, unicode, void.
return self.kind in set("biufc")
@property
def _is_boolean(self) -> bool:
return self.kind == "b"
@classmethod
def construct_from_string(cls, string: str) -> "PandasDtype":
try:
dtype = np.dtype(string)
except TypeError as err:
if not isinstance(string, str):
msg = f"'construct_from_string' expects a string, got {type(string)}"
else:
msg = f"Cannot construct a 'PandasDtype' from '{string}'"
raise TypeError(msg) from err
return cls(dtype)
@classmethod
def construct_array_type(cls) -> Type["PandasArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return PandasArray
@property
def kind(self) -> str:
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
"""
return self._dtype.kind
@property
def itemsize(self) -> int:
"""
The element size of this data-type object.
"""
return self._dtype.itemsize
class PandasArray(
NDArrayBackedExtensionArray,
ExtensionOpsMixin,
NDArrayOperatorsMixin,
ObjectStringArrayMixin,
):
"""
A pandas ExtensionArray for NumPy data.
.. versionadded:: 0.24.0
This is mostly for internal compatibility, and is not especially
useful on its own.
Parameters
----------
values : ndarray
The NumPy ndarray to wrap. Must be 1-dimensional.
copy : bool, default False
Whether to copy `values`.
Attributes
----------
None
Methods
-------
None
"""
# If you're wondering why pd.Series(cls) doesn't put the array in an
# ExtensionBlock, search for `ABCPandasArray`. We check for
# that _typ to ensure that that users don't unnecessarily use EAs inside
# pandas internals, which turns off things like block consolidation.
_typ = "npy_extension"
__array_priority__ = 1000
_ndarray: np.ndarray
# ------------------------------------------------------------------------
# Constructors
def __init__(self, values: Union[np.ndarray, "PandasArray"], copy: bool = False):
if isinstance(values, type(self)):
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError(
f"'values' must be a NumPy array, not {type(values).__name__}"
)
if values.ndim != 1:
raise ValueError("PandasArray must be 1-dimensional.")
if copy:
values = values.copy()
self._ndarray = values
self._dtype = PandasDtype(values.dtype)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "PandasArray":
if isinstance(dtype, PandasDtype):
dtype = dtype._dtype
result = np.asarray(scalars, dtype=dtype)
if copy and result is scalars:
result = result.copy()
return cls(result)
@classmethod
def _from_factorized(cls, values, original) -> "PandasArray":
return cls(values)
def _from_backing_data(self, arr: np.ndarray) -> "PandasArray":
return type(self)(arr)
# ------------------------------------------------------------------------
# Data
@property
def dtype(self) -> PandasDtype:
return self._dtype
# ------------------------------------------------------------------------
# NumPy Array Interface
def __array__(self, dtype=None) -> np.ndarray:
return np.asarray(self._ndarray, dtype=dtype)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method: str, *inputs, **kwargs):
# Lightly modified version of
# https://numpy.org/doc/stable/reference/generated/numpy.lib.mixins.NDArrayOperatorsMixin.html
# The primary modification is not boxing scalar return values
# in PandasArray, since pandas' ExtensionArrays are 1-d.
out = kwargs.get("out", ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use PandasArray instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle PandasArray objects.
if not isinstance(x, self._HANDLED_TYPES + (PandasArray,)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x._ndarray if isinstance(x, PandasArray) else x for x in inputs)
if out:
kwargs["out"] = tuple(
x._ndarray if isinstance(x, PandasArray) else x for x in out
)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple and len(result):
# multiple return values
if not lib.is_scalar(result[0]):
# re-box array-like results
return tuple(type(self)(x) for x in result)
else:
# but not scalar reductions
return result
elif method == "at":
# no return value
return None
else:
# one return value
if not lib.is_scalar(result):
# re-box array-like results, but not scalar reductions
result = type(self)(result)
return result
# ------------------------------------------------------------------------
# Pandas ExtensionArray Interface
def isna(self) -> np.ndarray:
return isna(self._ndarray)
def _validate_fill_value(self, fill_value):
if fill_value is None:
# Primarily for subclasses
fill_value = self.dtype.na_value
return fill_value
def _values_for_factorize(self) -> Tuple[np.ndarray, int]:
return self._ndarray, -1
# ------------------------------------------------------------------------
# Reductions
def any(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_any((), dict(out=out, keepdims=keepdims))
return nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
def all(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_all((), dict(out=out, keepdims=keepdims))
return nanops.nanall(self._ndarray, axis=axis, skipna=skipna)
def min(self, skipna: bool = True, **kwargs) -> Scalar:
nv.validate_min((), kwargs)
result = masked_reductions.min(
values=self.to_numpy(), mask=self.isna(), skipna=skipna
)
return result
def max(self, skipna: bool = True, **kwargs) -> Scalar:
nv.validate_max((), kwargs)
result = masked_reductions.max(
values=self.to_numpy(), mask=self.isna(), skipna=skipna
)
return result
def sum(self, axis=None, skipna=True, min_count=0, **kwargs) -> Scalar:
nv.validate_sum((), kwargs)
return nanops.nansum(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
def prod(self, axis=None, skipna=True, min_count=0, **kwargs) -> Scalar:
nv.validate_prod((), kwargs)
return nanops.nanprod(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
nv.validate_mean((), dict(dtype=dtype, out=out, keepdims=keepdims))
return nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
def median(
self, axis=None, out=None, overwrite_input=False, keepdims=False, skipna=True
):
nv.validate_median(
(), dict(out=out, overwrite_input=overwrite_input, keepdims=keepdims)
)
return nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
def std(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="std"
)
return nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
def var(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="var"
)
return nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
def sem(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="sem"
)
return nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
def kurt(self, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="kurt"
)
return nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
def skew(self, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="skew"
)
return nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)
# ------------------------------------------------------------------------
# Additional Methods
def to_numpy(
self, dtype=None, copy: bool = False, na_value=lib.no_default
) -> np.ndarray:
result = np.asarray(self._ndarray, dtype=dtype)
if (copy or na_value is not lib.no_default) and result is self._ndarray:
result = result.copy()
if na_value is not lib.no_default:
result[self.isna()] = na_value
return result
# ------------------------------------------------------------------------
# Ops
def __invert__(self):
return type(self)(~self._ndarray)
@classmethod
def _create_arithmetic_method(cls, op):
@ops.unpack_zerodim_and_defer(op.__name__)
def arithmetic_method(self, other):
if isinstance(other, cls):
other = other._ndarray
with np.errstate(all="ignore"):
result = op(self._ndarray, other)
if op is divmod:
a, b = result
return cls(a), cls(b)
return cls(result)
return compat.set_function_name(arithmetic_method, f"__{op.__name__}__", cls)
_create_comparison_method = _create_arithmetic_method
# ------------------------------------------------------------------------
# String methods interface
_str_na_value = np.nan
PandasArray._add_arithmetic_ops()
PandasArray._add_comparison_ops()
| 32.392308 | 102 | 0.591942 |
3a2a9201055cf6c678c1fa094d5a43bbf2a88550 | 835 | py | Python | application/flask_math/calculation/Sieve_of_Eratosthenes.py | kouki-0926/Flask_RaspberryPi | 874dc160af038ee717b90fe4a587a42082eb6664 | [
"MIT"
] | null | null | null | application/flask_math/calculation/Sieve_of_Eratosthenes.py | kouki-0926/Flask_RaspberryPi | 874dc160af038ee717b90fe4a587a42082eb6664 | [
"MIT"
] | null | null | null | application/flask_math/calculation/Sieve_of_Eratosthenes.py | kouki-0926/Flask_RaspberryPi | 874dc160af038ee717b90fe4a587a42082eb6664 | [
"MIT"
] | null | null | null | from flask_math.calculation.common.NEWTON_METHOD import NEWTON_METHOD
from flask import flash
def Sieve_of_Eratosthenes(N):
try:
N = int(N)
if N >= 2:
List = list(range(2, N+1))
prime_List = []
MIN = 0
while MIN < NEWTON_METHOD(N):
MIN = min(List)
prime_List.append(MIN)
List = [i for i in List if i % MIN != 0]
prime_List = prime_List+List
n = 15
Anser = [str(N)+"以下の素数"]
for i in range(len(prime_List)//n+1):
Anser.append(prime_List[n*i:n*i+n])
else:
Anser = ["Error"]
flash("エラー:2以上の自然数を入力してください")
except:
Anser = ["Error"]
flash("エラー:もう一度入力してください")
return Anser
| 27.833333 | 70 | 0.48024 |
a58aad6c359cb75d0193531b0f1d00c8e492b17a | 233 | py | Python | src/wsgi_benchmark/eventlet_server.py | jamespic/wsgi_benchmark | d5c02fb7530501a46c22765bf3da7c564a68872d | [
"MIT"
] | null | null | null | src/wsgi_benchmark/eventlet_server.py | jamespic/wsgi_benchmark | d5c02fb7530501a46c22765bf3da7c564a68872d | [
"MIT"
] | null | null | null | src/wsgi_benchmark/eventlet_server.py | jamespic/wsgi_benchmark | d5c02fb7530501a46c22765bf3da7c564a68872d | [
"MIT"
] | null | null | null | if __name__ == '__main__':
from eventlet import monkey_patch, wsgi
import eventlet
eventlet.sleep()
monkey_patch()
from wsgi_benchmark.handlers import app
wsgi.server(eventlet.listen(('0.0.0.0', 8765)), app)
| 25.888889 | 56 | 0.690987 |
b33d1c8a635fd7aaaed181621c4e12a958b990e1 | 117 | py | Python | mmcls/models/classifiers/__init__.py | ZwwWayne/mmclassification | 2ccc55ce4f783ca34892fe7d91f247d18906a994 | [
"Apache-2.0"
] | 31 | 2020-11-14T02:47:54.000Z | 2021-12-14T06:26:10.000Z | mmcls/models/classifiers/__init__.py | ZwwWayne/mmclassification | 2ccc55ce4f783ca34892fe7d91f247d18906a994 | [
"Apache-2.0"
] | 2 | 2020-09-01T00:53:39.000Z | 2022-01-27T20:26:11.000Z | mmcls/models/classifiers/__init__.py | ZwwWayne/mmclassification | 2ccc55ce4f783ca34892fe7d91f247d18906a994 | [
"Apache-2.0"
] | 4 | 2021-01-14T18:12:38.000Z | 2021-11-11T11:46:50.000Z | from .base import BaseClassifier
from .image import ImageClassifier
__all__ = ['BaseClassifier', 'ImageClassifier']
| 23.4 | 47 | 0.803419 |
c8d07e3a0d77370224c80c642afff6fabbac993c | 2,592 | py | Python | up/tasks/cls/models/postprocess/cls_postprocess.py | ModelTC/EOD | 164bff80486e9ae6a095a97667b365c46ceabd86 | [
"Apache-2.0"
] | 196 | 2021-10-30T05:15:36.000Z | 2022-03-30T18:43:40.000Z | up/tasks/cls/models/postprocess/cls_postprocess.py | ModelTC/EOD | 164bff80486e9ae6a095a97667b365c46ceabd86 | [
"Apache-2.0"
] | 12 | 2021-10-30T11:33:28.000Z | 2022-03-31T14:22:58.000Z | up/tasks/cls/models/postprocess/cls_postprocess.py | ModelTC/EOD | 164bff80486e9ae6a095a97667b365c46ceabd86 | [
"Apache-2.0"
] | 23 | 2021-11-01T07:26:17.000Z | 2022-03-27T05:55:37.000Z | import torch.nn as nn
import torch.nn.functional as F
from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY
from up.utils.model import accuracy as A
from up.models.losses import build_loss
__all__ = ['BaseClsPostProcess']
@MODULE_ZOO_REGISTRY.register('base_cls_postprocess')
class BaseClsPostProcess(nn.Module):
def __init__(self, cls_loss, prefix=None):
super(BaseClsPostProcess, self).__init__()
if isinstance(cls_loss, list):
self.cls_loss = nn.ModuleList()
for _loss in cls_loss:
self.cls_loss.append(build_loss(_loss))
else:
self.cls_loss = build_loss(cls_loss)
self.prefix = prefix if prefix is not None else self.__class__.__name__
def get_acc(self, logits, targets):
acc = A.accuracy(logits, targets)[0]
return acc
def get_loss(self, logits, targets):
loss_info = {}
if isinstance(logits, list):
loss = 0
for idx, logit in enumerate(logits):
if isinstance(self.cls_loss, nn.ModuleList):
assert len(logits) == len(self.cls_loss)
loss = self.cls_loss[idx](logit, targets[:, idx])
else:
loss = self.cls_loss(logit, targets[:, idx])
loss_info[f"{self.prefix}_head_{idx}.loss"] = loss
loss_info[f"{self.prefix}_head_{idx}.accuracy"] = self.get_acc(logit, targets[:, idx])
else:
loss = self.cls_loss(logits, targets)
loss_info[f"{self.prefix}.loss"] = loss
loss_info[f"{self.prefix}.accuracy"] = self.get_acc(logits, targets)
return loss_info
def get_single_pred(self, logit):
score = F.softmax(logit, dim=1)
_, pred = logit.data.topk(k=1, dim=1)
pred = pred.view(-1)
return score, pred
def get_test_output(self, logits):
if isinstance(logits, list):
scores = []
preds = []
for logit in logits:
score, pred = self.get_single_pred(logit)
preds.append(pred)
scores.append(score)
else:
scores, preds = self.get_single_pred(logits)
return {"preds": preds, "scores": scores}
def forward(self, input):
logits = input['logits']
output = {}
if self.training:
targets = input['gt']
return self.get_loss(logits, targets)
else:
results = self.get_test_output(logits)
output.update(results)
return output
| 36 | 102 | 0.58912 |
d009eb8d901f7ff3e02b72ccf5942e2c34a932d1 | 662 | py | Python | app/core/management/commands/wait_for_db.py | abexamir/api-blueprint | 8d5c02766c03e9e031aa2ba6233241b2b8af2999 | [
"MIT"
] | null | null | null | app/core/management/commands/wait_for_db.py | abexamir/api-blueprint | 8d5c02766c03e9e031aa2ba6233241b2b8af2999 | [
"MIT"
] | null | null | null | app/core/management/commands/wait_for_db.py | abexamir/api-blueprint | 8d5c02766c03e9e031aa2ba6233241b2b8af2999 | [
"MIT"
] | null | null | null | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available"""
def handle(self, *args, **options):
self.stdout.write('Waiting for database')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database not available, waiting 1 sec')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available'))
| 33.1 | 74 | 0.65861 |
3c89d9987b16415774bbee64e5accc2b8ccc6a17 | 1,953 | py | Python | lldb/test/API/linux/add-symbols/TestTargetSymbolsAddCommand.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | 605 | 2019-10-18T01:15:54.000Z | 2022-03-31T14:31:04.000Z | lldb/test/API/linux/add-symbols/TestTargetSymbolsAddCommand.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | 3,180 | 2019-10-18T01:21:21.000Z | 2022-03-31T23:25:41.000Z | lldb/test/API/linux/add-symbols/TestTargetSymbolsAddCommand.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | 275 | 2019-10-18T05:27:22.000Z | 2022-03-30T09:04:21.000Z | """ Testing explicit symbol loading via target symbols add. """
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TargetSymbolsAddCommand(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
self.source = 'main.c'
@no_debug_info_test # Prevent the genaration of the dwarf version of this test
@skipUnlessPlatform(['linux'])
@skipIf(bugnumber="rdar://38550275")
def test_target_symbols_add(self):
"""Test that 'target symbols add' can load the symbols
even if gnu.build-id and gnu_debuglink are not present in the module.
Similar to test_add_dsym_mid_execution test for macos."""
self.build()
exe = self.getBuildArtifact("stripped.out")
self.target = self.dbg.CreateTarget(exe)
self.assertTrue(self.target, VALID_TARGET)
main_bp = self.target.BreakpointCreateByName("main", "stripped.out")
self.assertTrue(main_bp, VALID_BREAKPOINT)
self.process = self.target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(self.process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
self.assertEquals(self.process.GetState(), lldb.eStateStopped,
STOPPED_DUE_TO_BREAKPOINT)
exe_module = self.target.GetModuleAtIndex(0)
# Check that symbols are not loaded and main.c is not know to be
# the source file.
self.expect("frame select", substrs=['main.c'], matching=False)
# Tell LLDB that a.out has symbols for stripped.out
self.runCmd("target symbols add -s %s %s" %
(exe, self.getBuildArtifact("a.out")))
# Check that symbols are now loaded and main.c is in the output.
self.expect("frame select", substrs=['main.c'])
| 37.557692 | 83 | 0.673323 |
943dc74984b1132b1c77d2fd612cac04adc48ae0 | 9,568 | py | Python | app/models/collection.py | Midas0615/react-flask-E-commerce | 0d18409e9b58363b8035cce96b930602ec648fbd | [
"MIT"
] | 54 | 2017-08-22T05:59:11.000Z | 2021-09-28T06:48:23.000Z | app/models/collection.py | Midas0615/react-flask-E-commerce | 0d18409e9b58363b8035cce96b930602ec648fbd | [
"MIT"
] | 5 | 2021-02-08T20:14:18.000Z | 2021-12-13T19:36:25.000Z | app/models/collection.py | Midas0615/react-flask-E-commerce | 0d18409e9b58363b8035cce96b930602ec648fbd | [
"MIT"
] | 30 | 2017-10-24T19:36:17.000Z | 2021-11-03T04:41:50.000Z | from app import mysql, webapp
from app.models import *
from app.scripts import Indexer
import json
from slugify import slugify
class Collection(Prototype):
def __init__(self, collection_id):
self.data = self.getData(collection_id)
def getData(self, collection_id):
from app import cache
cache_key = 'collection_'+str(collection_id)
collection_data = cache.get(cache_key)
if collection_data:
return collection_data
cursor = mysql.connect().cursor()
cursor.execute("""SELECT c.*,
(select group_concat(ci.item_id order by ci.sort_order asc separator ',') from collections_items ci
where ci.collection_id = c.collection_id) as item_ids,
(select group_concat(concat(cm.meta_key,":",cm.meta_value) separator '&') from collections_metadata cm
where cm.collection_id = c.collection_id) as metadata
FROM collections c WHERE c.collection_id = %s""", (collection_id,))
data = Utils.fetchOneAssoc(cursor)
if data['metadata']:
collections_metadata_raw = data['metadata']
data['metadata'] = {}
for props in collections_metadata_raw.split('&'):
props_formatted = props.split(':')
data['metadata'][props_formatted[0]] = props_formatted[1]
if data['item_ids']:
data['item_ids'] = [int(_) for _ in data['item_ids'].split(',')]
data['items'] = Search().getById(data['item_ids'])
else:
data['items'] = []
if not data:
data = {}
cache.set(cache_key, data)
return data
@staticmethod
def getByCategory():
cursor = mysql.connect().cursor()
cursor.execute("""SELECT cc.*,
(select group_concat(c.collection_id separator ',') from collections c
where c.category_id = cc.category_id and c.active=1) as collection_ids
FROM collections_category cc""")
num_rows = cursor.rowcount
collections_categories = []
for i in range(num_rows):
category = Utils.fetchOneAssoc(cursor)
category['collections'] = []
if category['collection_ids'] is not None:
for col_id in category['collection_ids'].split(','):
items = Collection(col_id).getObj()
if items:
category['collections'].append(items)
collections_categories.append(category)
return collections_categories
@staticmethod
def getPreview():
collections_data = {
'collections_list': [],
'collections_categories': []
}
cursor = mysql.connect().cursor()
cursor.execute("""SELECT collection_id, name FROM collections WHERE active = 1""")
num_rows = cursor.rowcount
collections = []
for i in range(num_rows):
collections_data['collections_list'].append(Utils.fetchOneAssoc(cursor))
cursor.execute("""SELECT category_id, category_name FROM collections_category""")
num_rows = cursor.rowcount
collections = []
for i in range(num_rows):
collections_data['collections_categories'].append(Utils.fetchOneAssoc(cursor))
return collections_data
@staticmethod
def saveCollectionData(data, collection_item_ids=''):
conn = mysql.connect()
cursor = conn.cursor()
slug_url = slugify(data['name'])[:100]
if not int(data['collection_id']):
cursor.execute("""INSERT INTO collections (name, description, price,
return_days, partial_order, category_id, slug_url) VALUES (%s, %s, %s, %s, %s, %s, %s)""",
(data['name'], data['description'], data['price'], data['return_days'],
data['partial_order'], data['category_id'], slug_url))
conn.commit()
collection_id = cursor.lastrowid
else:
collection_id = data['collection_id']
cursor.execute("""UPDATE collections SET name = %s, description = %s,
price = %s, return_days = %s, category_id = %s, date_edited = CURRENT_TIMESTAMP,
partial_order = %s, slug_url = %s WHERE collection_id = %s""", (
data['name'],
data['description'],
data['price'],
data['return_days'],
data['category_id'],
data['partial_order'],
slug_url,
collection_id))
conn.commit()
cursor.execute("""DELETE FROM collections_metadata WHERE collection_id = %s""",
(collection_id,))
conn.commit()
if data['metadata']:
metadata_pairs = []
for meta in data['metadata'].split(";"):
key, value = meta.split(":")
metadata_pairs.append(tuple([collection_id, key, value]))
cursor.executemany("""INSERT INTO collections_metadata (collection_id, meta_key, meta_value)
VALUES (%s, %s, %s)""", metadata_pairs)
conn.commit()
update_item_order = []
insert_item_order = []
item_ids = []
original_items = collection_item_ids
for item in data['items'].split(";"):
key, value = item.split(":")
key = int(key)
item_ids.append(key)
if key in original_items:
update_item_order.append(tuple([value, collection_id, key]))
else:
insert_item_order.append(tuple([value, collection_id, key]))
cursor.executemany("""UPDATE collections_items SET sort_order = %s,
date_edited = CURRENT_TIMESTAMP WHERE collection_id = %s AND item_id = %s""",
update_item_order)
conn.commit()
cursor.executemany("""INSERT INTO collections_items (sort_order, collection_id, item_id)
VALUES (%s, %s, %s)""", insert_item_order)
conn.commit()
format_chars = ",".join(["%s"] * len(item_ids))
cursor.execute("""DELETE FROM collections_items
WHERE collection_id = %s AND item_id NOT IN ("""+format_chars+""")""",
(tuple([collection_id]) + tuple(item_ids)))
conn.commit()
Indexer().indexCollections(query_condition='c.collection_id='+str(collection_id))
#NOTE for start session cals
if collection_id in [4, 5]:
Notifications().startDataUpdate()
from app import cache
cache_key = 'collection_'+str(collection_id)
cache.set(cache_key, None)
return True
@staticmethod
def removeCollection(collection_id):
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("""UPDATE collections SET active = 0, date_edited = CURRENT_TIMESTAMP
WHERE collection_id = %s""", (collection_id,))
conn.commit()
return True
@staticmethod
def addCategory(data):
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("""INSERT INTO collections_category (category_name, image) VALUES (%s, %s)""", (data['name'], data['img_url']))
conn.commit()
response = {'category_name': data['name']}
response['category_id'] = cursor.lastrowid
return response
'''
Website Related functions
'''
@staticmethod
def getHomepageCollections(items=False):
# List of collections to be displayed on homepage
from app import cache
cache_key = 'homepage_collections'+('_items' if items else '')
homepage_collections = cache.get(cache_key)
if homepage_collections:
return homepage_collections
# NOTE temp
if webapp.config['APP_ENV'] != 'dev':
homepage_collection_ids = [38, 40, 41, 42]
else:
homepage_collection_ids = [25, 26, 27, 28]
homepage_collection_ids = [38, 40, 41, 42]
homepage_collections = []
for col_id in homepage_collection_ids:
col_obj = Collection(col_id)
if items:
col_obj = col_obj.getObj()
col_obj['items'] = WebUtils.extendItemWebProperties(col_obj['items'])
# NOTE temp case
col_obj['items'] = col_obj['items'][:5]
else:
col_obj = col_obj.getObj()
url = webapp.config['HOST'] + '/books/collection/' + str(col_obj['collection_id'])
if col_obj['slug_url']:
url = url + '-' + col_obj['slug_url']
col_obj['slug_url'] = url
if col_obj['image']:
col_obj['image'] = webapp.config['S3_HOST'] + 'website/collections/' + col_obj['image']
more_url = '/books/category' + col_obj['more_url'] if col_obj['more_url'] else ''
col_obj['more_url'] = webapp.config['HOST'] + more_url
homepage_collections.append(col_obj)
if not items:
mock_collection = {
'slug_url': webapp.config['HOST'] + '/books',
'collection_id': 0,
'name': 'Browse',
'image': webapp.config['S3_HOST'] + 'website/collections/Browse.png'
}
homepage_collections = [mock_collection] + homepage_collections
cache.set(cache_key, homepage_collections)
return homepage_collections
| 40.714894 | 134 | 0.576505 |
a20764ff763f58fd084f6ca17d0635466ff63eac | 2,075 | py | Python | splunk_add_on_ucc_framework/uccrestbuilder/endpoint/multiple_model.py | artemrys/addonfactory-ucc-generator | 6d2ffc3f46d67fd136dbbb009bb7e7d50aecbbd9 | [
"Apache-2.0"
] | 16 | 2020-10-27T18:51:23.000Z | 2022-03-15T10:01:51.000Z | splunk_add_on_ucc_framework/uccrestbuilder/endpoint/multiple_model.py | artemrys/addonfactory-ucc-generator | 6d2ffc3f46d67fd136dbbb009bb7e7d50aecbbd9 | [
"Apache-2.0"
] | 375 | 2020-09-19T13:03:00.000Z | 2022-03-31T17:12:24.000Z | splunk_add_on_ucc_framework/uccrestbuilder/endpoint/multiple_model.py | artemrys/addonfactory-ucc-generator | 6d2ffc3f46d67fd136dbbb009bb7e7d50aecbbd9 | [
"Apache-2.0"
] | 11 | 2021-01-02T03:25:00.000Z | 2022-03-16T15:50:49.000Z | #
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .base import indent
from .single_model import RestEndpointBuilder, RestEntityBuilder
class MultipleModelEntityBuilder(RestEntityBuilder):
@property
def name_spec(self):
return self.name
@property
def name_default(self):
return self.name
@property
def name_rh(self):
return "_" + self._name
class MultipleModelEndpointBuilder(RestEndpointBuilder):
_rh_template = """
from splunktaucclib.rest_handler.endpoint import (
field,
validator,
RestModel,
MultipleModel,
)
from splunktaucclib.rest_handler import admin_external, util
from {handler_module} import {handler_name}
import logging
util.remove_http_proxy_env_vars()
{entities}
endpoint = MultipleModel(
'{conf_name}',
models=[
{models}
],
)
if __name__ == '__main__':
logging.getLogger().addHandler(logging.NullHandler())
admin_external.handle(
endpoint,
handler={handler_name},
)
"""
def actions(self):
return ["edit", "list"]
def generate_rh(self, handler):
entities = [entity.generate_rh() for entity in self._entities]
models = ["model" + entity.name_rh for entity in self._entities]
models_lines = ", \n".join(models)
return self._rh_template.format(
handler_module=handler.module,
handler_name=handler.name,
entities="\n".join(entities),
models=indent(models_lines, 2),
conf_name=self.conf_name,
)
| 25 | 74 | 0.692048 |
bcee0eb010173d883b9d4d53297fe555cbd4d373 | 20,139 | py | Python | virtual/lib/python3.6/site-packages/pylint/test/test_self.py | edithamadi/pitch_one | 40c8d1c67c77e483b29bd326721dde7f4a20120d | [
"Unlicense"
] | 3 | 2018-10-21T14:01:01.000Z | 2018-10-22T14:42:22.000Z | virtual/lib/python3.6/site-packages/pylint/test/test_self.py | edithamadi/pitch_one | 40c8d1c67c77e483b29bd326721dde7f4a20120d | [
"Unlicense"
] | 12 | 2018-10-03T19:45:36.000Z | 2022-03-11T23:54:25.000Z | virtual/lib/python3.6/site-packages/pylint/test/test_self.py | edithamadi/pitch_one | 40c8d1c67c77e483b29bd326721dde7f4a20120d | [
"Unlicense"
] | 3 | 2020-01-19T21:26:14.000Z | 2020-11-04T08:37:38.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014-2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Vlad Temian <vladtemian@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016 Derek Gustafson <degustaf@gmail.com>
# Copyright (c) 2016 Moises Lopez <moylop260@vauxoo.com>
# Copyright (c) 2017 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 Daniel Miller <millerdev@gmail.com>
# Copyright (c) 2017 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Copyright (c) 2017 Thomas Hisch <t.hisch@gmail.com>
# Copyright (c) 2017 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 Jason Owen <jason.a.owen@gmail.com>
# Copyright (c) 2018 Jace Browning <jacebrowning@gmail.com>
# Copyright (c) 2018 Reverb C <reverbc@users.noreply.github.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
import contextlib
import json
import re
import sys
import os
from os.path import join, dirname, abspath
import tempfile
import textwrap
import configparser
from io import StringIO
from pylint.lint import Run
from pylint.reporters import BaseReporter
from pylint.reporters.text import *
from pylint.reporters.json import JSONReporter
import pytest
from pylint import utils
HERE = abspath(dirname(__file__))
@contextlib.contextmanager
def _patch_streams(out):
sys.stderr = sys.stdout = out
try:
yield
finally:
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
@contextlib.contextmanager
def _configure_lc_ctype(lc_ctype):
lc_ctype_env = 'LC_CTYPE'
original_lctype = os.environ.get(lc_ctype_env)
os.environ[lc_ctype_env] = lc_ctype
try:
yield
finally:
os.environ.pop(lc_ctype_env)
if original_lctype:
os.environ[lc_ctype_env] = original_lctype
class MultiReporter(BaseReporter):
def __init__(self, reporters):
self._reporters = reporters
self.path_strip_prefix = os.getcwd() + os.sep
def on_set_current_module(self, *args, **kwargs):
for rep in self._reporters:
rep.on_set_current_module(*args, **kwargs)
def handle_message(self, msg):
for rep in self._reporters:
rep.handle_message(msg)
def display_reports(self, layout):
pass
@property
def out(self):
return self._reporters[0].out
@property
def linter(self):
return self._linter
@linter.setter
def linter(self, value):
self._linter = value
for rep in self._reporters:
rep.linter = value
class TestRunTC(object):
def _runtest(self, args, reporter=None, out=None, code=None):
if out is None:
out = StringIO()
pylint_code = self._run_pylint(args, reporter=reporter, out=out)
if reporter:
output = reporter.out.getvalue()
elif hasattr(out, 'getvalue'):
output = out.getvalue()
else:
output = None
msg = 'expected output status %s, got %s' % (code, pylint_code)
if output is not None:
msg = '%s. Below pylint output: \n%s' % (msg, output)
assert pylint_code == code, msg
def _run_pylint(self, args, out, reporter=None):
args = args + ['--persistent=no']
with _patch_streams(out):
with pytest.raises(SystemExit) as cm:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
Run(args, reporter=reporter)
return cm.value.code
def _clean_paths(self, output):
"""Remove version-specific tox parent directories from paths."""
return re.sub('^py.+/site-packages/', '', output.replace('\\', '/'), flags=re.MULTILINE)
def _test_output(self, args, expected_output):
out = StringIO()
self._run_pylint(args, out=out)
actual_output = self._clean_paths(out.getvalue())
assert expected_output.strip() in actual_output.strip()
def test_pkginfo(self):
"""Make pylint check itself."""
self._runtest(['pylint.__pkginfo__'], reporter=TextReporter(StringIO()),
code=0)
def test_all(self):
"""Make pylint check itself."""
reporters = [
TextReporter(StringIO()),
ColorizedTextReporter(StringIO()),
JSONReporter(StringIO())
]
self._runtest([join(HERE, 'functional/arguments.py')],
reporter=MultiReporter(reporters), code=2)
def test_no_ext_file(self):
self._runtest([join(HERE, 'input', 'noext')], code=0)
def test_w0704_ignored(self):
self._runtest([join(HERE, 'input', 'ignore_except_pass_by_default.py')], code=0)
def test_exit_zero(self):
self._runtest([
'--exit-zero',
join(HERE, 'regrtest_data', 'syntax_error.py')
], code=0)
def test_generate_config_option(self):
self._runtest(['--generate-rcfile'], code=0)
def test_generate_config_option_order(self):
out1 = StringIO()
out2 = StringIO()
self._runtest(['--generate-rcfile'], code=0, out=out1)
self._runtest(['--generate-rcfile'], code=0, out=out2)
output1 = out1.getvalue()
output2 = out2.getvalue()
assert output1 == output2
def test_generate_config_disable_symbolic_names(self):
# Test that --generate-rcfile puts symbolic names in the --disable
# option.
out = StringIO()
self._run_pylint(["--generate-rcfile", "--rcfile="], out=out)
output = out.getvalue()
# Get rid of the pesky messages that pylint emits if the
# configuration file is not found.
master = re.search(r"\[MASTER", output)
out = StringIO(output[master.start():])
parser = configparser.RawConfigParser()
parser.readfp(out)
messages = utils._splitstrip(parser.get('MESSAGES CONTROL', 'disable'))
assert 'suppressed-message' in messages
def test_generate_rcfile_no_obsolete_methods(self):
out = StringIO()
self._run_pylint(["--generate-rcfile"], out=out)
output = out.getvalue()
assert "profile" not in output
def test_inexisting_rcfile(self):
out = StringIO()
with pytest.raises(IOError) as excinfo:
self._run_pylint(["--rcfile=/tmp/norcfile.txt"], out=out)
assert "The config file /tmp/norcfile.txt doesn't exist!" == str(excinfo.value)
def test_help_message_option(self):
self._runtest(['--help-msg', 'W0101'], code=0)
def test_error_help_message_option(self):
self._runtest(['--help-msg', 'WX101'], code=0)
def test_error_missing_arguments(self):
self._runtest([], code=32)
def test_no_out_encoding(self):
"""test redirection of stdout with non ascii caracters
"""
#This test reproduces bug #48066 ; it happens when stdout is redirected
# through '>' : the sys.stdout.encoding becomes then None, and if the
# output contains non ascii, pylint will crash
if sys.version_info < (3, 0):
strio = tempfile.TemporaryFile()
else:
strio = StringIO()
assert strio.encoding is None
self._runtest([join(HERE, 'regrtest_data/no_stdout_encoding.py'),
'--enable=all'],
out=strio, code=28)
def test_parallel_execution(self):
self._runtest(['-j 2',
join(HERE, 'functional/arguments.py'),
join(HERE, 'functional/bad_continuation.py')], code=18)
def test_parallel_execution_missing_arguments(self):
self._runtest(['-j 2', 'not_here', 'not_here_too'], code=1)
def test_py3k_option(self):
# Test that --py3k flag works.
rc_code = 0
self._runtest([join(HERE, 'functional', 'unpacked_exceptions.py'),
'--py3k'],
code=rc_code)
def test_py3k_jobs_option(self):
rc_code = 0
self._runtest([join(HERE, 'functional', 'unpacked_exceptions.py'),
'--py3k', '-j 2'],
code=rc_code)
@pytest.mark.skipif(sys.version_info[0] > 2, reason="Requires the --py3k flag.")
def test_py3k_commutative_with_errors_only(self):
# Test what gets emitted with -E only
module = join(HERE, 'regrtest_data', 'py3k_error_flag.py')
expected = textwrap.dedent("""
************* Module py3k_error_flag
Explicit return in __init__
""")
self._test_output([module, "-E", "--msg-template='{msg}'"],
expected_output=expected)
# Test what gets emitted with -E --py3k
expected = textwrap.dedent("""
************* Module py3k_error_flag
Use raise ErrorClass(args) instead of raise ErrorClass, args.
""")
self._test_output([module, "-E", "--py3k", "--msg-template='{msg}'"],
expected_output=expected)
# Test what gets emitted with --py3k -E
self._test_output([module, "--py3k", "-E", "--msg-template='{msg}'"],
expected_output=expected)
@pytest.mark.skipif(sys.version_info[0] > 2, reason="Requires the --py3k flag.")
def test_py3k_commutative_with_config_disable(self):
module = join(HERE, 'regrtest_data', 'py3k_errors_and_warnings.py')
rcfile = join(HERE, 'regrtest_data', 'py3k-disabled.rc')
cmd = [module, "--msg-template='{msg}'", "--reports=n"]
expected = textwrap.dedent("""
************* Module py3k_errors_and_warnings
import missing `from __future__ import absolute_import`
Use raise ErrorClass(args) instead of raise ErrorClass, args.
Calling a dict.iter*() method
print statement used
""")
self._test_output(cmd + ["--py3k"], expected_output=expected)
expected = textwrap.dedent("""
************* Module py3k_errors_and_warnings
Use raise ErrorClass(args) instead of raise ErrorClass, args.
Calling a dict.iter*() method
print statement used
""")
self._test_output(cmd + ["--py3k", "--rcfile", rcfile],
expected_output=expected)
expected = textwrap.dedent("""
************* Module py3k_errors_and_warnings
Use raise ErrorClass(args) instead of raise ErrorClass, args.
print statement used
""")
self._test_output(cmd + ["--py3k", "-E", "--rcfile", rcfile],
expected_output=expected)
self._test_output(cmd + ["-E", "--py3k", "--rcfile", rcfile],
expected_output=expected)
def test_abbreviations_are_not_supported(self):
expected = "no such option: --load-plugin"
self._test_output([".", "--load-plugin"], expected_output=expected)
def test_enable_all_works(self):
module = join(HERE, 'data', 'clientmodule_test.py')
expected = textwrap.dedent("""
************* Module data.clientmodule_test
pylint/test/data/clientmodule_test.py:10:8: W0612: Unused variable 'local_variable' (unused-variable)
pylint/test/data/clientmodule_test.py:18:4: C0111: Missing method docstring (missing-docstring)
pylint/test/data/clientmodule_test.py:22:0: C0111: Missing class docstring (missing-docstring)
""")
self._test_output([module, "--disable=all", "--enable=all", "-rn"],
expected_output=expected)
def test_wrong_import_position_when_others_disabled(self):
expected_output = textwrap.dedent('''
************* Module wrong_import_position
pylint/test/regrtest_data/wrong_import_position.py:11:0: C0413: Import "import os" should be placed at the top of the module (wrong-import-position)
''')
module1 = join(HERE, 'regrtest_data', 'import_something.py')
module2 = join(HERE, 'regrtest_data', 'wrong_import_position.py')
args = [module2, module1,
"--disable=all", "--enable=wrong-import-position",
"-rn", "-sn"]
out = StringIO()
self._run_pylint(args, out=out)
actual_output = self._clean_paths(out.getvalue().strip())
to_remove = "No config file found, using default configuration"
if to_remove in actual_output:
actual_output = actual_output[len(to_remove):]
if actual_output.startswith("Using config file "):
# If ~/.pylintrc is present remove the
# Using config file... line
actual_output = actual_output[actual_output.find("\n"):]
assert expected_output.strip() == actual_output.strip()
def test_import_itself_not_accounted_for_relative_imports(self):
expected = 'Your code has been rated at 10.00/10'
package = join(HERE, 'regrtest_data', 'dummy')
self._test_output([package, '--disable=locally-disabled', '-rn'],
expected_output=expected)
def test_reject_empty_indent_strings(self):
expected = "indent string can't be empty"
module = join(HERE, 'data', 'clientmodule_test.py')
self._test_output([module, '--indent-string='],
expected_output=expected)
def test_json_report_when_file_has_syntax_error(self):
out = StringIO()
module = join(HERE, 'regrtest_data', 'syntax_error.py')
self._runtest([module], code=2, reporter=JSONReporter(out))
output = json.loads(out.getvalue())
assert isinstance(output, list)
assert len(output) == 1
assert isinstance(output[0], dict)
expected = {
"obj": "",
"column": 0,
"line": 1,
"type": "error",
"symbol": "syntax-error",
"module": "syntax_error"
}
message = output[0]
for key, value in expected.items():
assert key in message
assert message[key] == value
assert 'invalid syntax' in message['message'].lower()
def test_json_report_when_file_is_missing(self):
out = StringIO()
module = join(HERE, 'regrtest_data', 'totally_missing.py')
self._runtest([module], code=1, reporter=JSONReporter(out))
output = json.loads(out.getvalue())
assert isinstance(output, list)
assert len(output) == 1
assert isinstance(output[0], dict)
expected = {
"obj": "",
"column": 0,
"line": 1,
"type": "fatal",
"symbol": "fatal",
"module": module
}
message = output[0]
for key, value in expected.items():
assert key in message
assert message[key] == value
assert message['message'].startswith("No module named")
def test_information_category_disabled_by_default(self):
expected = 'Your code has been rated at 10.00/10'
path = join(HERE, 'regrtest_data', 'meta.py')
self._test_output([path], expected_output=expected)
def test_error_mode_shows_no_score(self):
expected_output = textwrap.dedent('''
************* Module application_crash
pylint/test/regrtest_data/application_crash.py:1:6: E0602: Undefined variable 'something_undefined' (undefined-variable)
''')
module = join(HERE, 'regrtest_data', 'application_crash.py')
self._test_output([module, "-E"], expected_output=expected_output)
def test_evaluation_score_shown_by_default(self):
expected_output = 'Your code has been rated at '
module = join(HERE, 'regrtest_data', 'application_crash.py')
self._test_output([module], expected_output=expected_output)
def test_confidence_levels(self):
expected = 'Your code has been rated at'
path = join(HERE, 'regrtest_data', 'meta.py')
self._test_output([path, "--confidence=HIGH,INFERENCE"],
expected_output=expected)
def test_bom_marker(self):
path = join(HERE, 'regrtest_data', 'meta.py')
config_path = join(HERE, 'regrtest_data', '.pylintrc')
expected = 'Your code has been rated at 10.00/10'
self._test_output([path, "--rcfile=%s" % config_path, "-rn"],
expected_output=expected)
def test_pylintrc_plugin_duplicate_options(self):
dummy_plugin_path = join(HERE, 'regrtest_data', 'dummy_plugin')
# Enable --load-plugins=dummy_plugin
sys.path.append(dummy_plugin_path)
config_path = join(HERE, 'regrtest_data', 'dummy_plugin.rc')
expected = (
":dummy-message-01 (I9061): *Dummy short desc 01*\n"
" Dummy long desc This message belongs to the dummy_plugin checker.\n\n"
":dummy-message-02 (I9060): *Dummy short desc 02*\n"
" Dummy long desc This message belongs to the dummy_plugin checker.")
self._test_output(["--rcfile=%s" % config_path,
"--help-msg=dummy-message-01,dummy-message-02"],
expected_output=expected)
expected = (
"[DUMMY_PLUGIN]\n\n# Dummy option 1\ndummy_option_1=dummy value 1\n\n"
"# Dummy option 2\ndummy_option_2=dummy value 2")
self._test_output(["--rcfile=%s" % config_path, "--generate-rcfile"],
expected_output=expected)
sys.path.remove(dummy_plugin_path)
def test_pylintrc_comments_in_values(self):
path = join(HERE, 'regrtest_data', 'test_pylintrc_comments.py')
config_path = join(HERE, 'regrtest_data', 'comments_pylintrc')
expected = textwrap.dedent('''
************* Module test_pylintrc_comments
pylint/test/regrtest_data/test_pylintrc_comments.py:2:0: W0311: Bad indentation. Found 1 spaces, expected 4 (bad-indentation)
pylint/test/regrtest_data/test_pylintrc_comments.py:1:0: C0111: Missing module docstring (missing-docstring)
pylint/test/regrtest_data/test_pylintrc_comments.py:1:0: C0111: Missing function docstring (missing-docstring)
''')
self._test_output([path, "--rcfile=%s" % config_path, "-rn"],
expected_output=expected)
def test_no_crash_with_formatting_regex_defaults(self):
self._runtest(["--ignore-patterns=a"], reporter=TextReporter(StringIO()),
code=32)
def test_getdefaultencoding_crashes_with_lc_ctype_utf8(self):
expected_output = textwrap.dedent('''
************* Module application_crash
pylint/test/regrtest_data/application_crash.py:1:6: E0602: Undefined variable 'something_undefined' (undefined-variable)
''')
module = join(HERE, 'regrtest_data', 'application_crash.py')
with _configure_lc_ctype('UTF-8'):
self._test_output([module, '-E'], expected_output=expected_output)
@pytest.mark.skipif(sys.platform == 'win32', reason='only occurs on *nix')
def test_parseable_file_path(self):
file_name = 'test_target.py'
fake_path = HERE + os.getcwd()
module = join(fake_path, file_name)
try:
# create module under directories which have the same name as reporter.path_strip_prefix
# e.g. /src/some/path/src/test_target.py when reporter.path_strip_prefix = /src/
os.makedirs(fake_path)
with open(module, 'w') as test_target:
test_target.write('a = object()')
self._test_output(
[module, '--output-format=parseable'],
expected_output=join(os.getcwd(), file_name))
finally:
os.remove(module)
os.removedirs(fake_path)
| 40.602823 | 156 | 0.621083 |
f6725025866ffe73263887aeb26b98990d069327 | 2,707 | py | Python | azure-servicefabric/azure/servicefabric/models/node_close_event_py3.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-servicefabric/azure/servicefabric/models/node_close_event_py3.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-servicefabric/azure/servicefabric/models/node_close_event_py3.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2020-07-25T20:36:02.000Z | 2020-07-25T20:36:02.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .node_event_py3 import NodeEvent
class NodeCloseEvent(NodeEvent):
"""Node Close event.
All required parameters must be populated in order to send to Azure.
:param event_instance_id: Required. The identifier for the FabricEvent
instance.
:type event_instance_id: str
:param time_stamp: Required. The time event was logged.
:type time_stamp: datetime
:param has_correlated_events: Shows there is existing related events
available.
:type has_correlated_events: bool
:param kind: Required. Constant filled by server.
:type kind: str
:param node_name: Required. The name of a Service Fabric node.
:type node_name: str
:param node_id: Required. Id of Node.
:type node_id: str
:param node_instance: Required. Id of Node instance.
:type node_instance: str
:param error: Required. Describes error.
:type error: str
"""
_validation = {
'event_instance_id': {'required': True},
'time_stamp': {'required': True},
'kind': {'required': True},
'node_name': {'required': True},
'node_id': {'required': True},
'node_instance': {'required': True},
'error': {'required': True},
}
_attribute_map = {
'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'},
'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'},
'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'},
'kind': {'key': 'Kind', 'type': 'str'},
'node_name': {'key': 'NodeName', 'type': 'str'},
'node_id': {'key': 'NodeId', 'type': 'str'},
'node_instance': {'key': 'NodeInstance', 'type': 'str'},
'error': {'key': 'Error', 'type': 'str'},
}
def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_id: str, node_instance: str, error: str, has_correlated_events: bool=None, **kwargs) -> None:
super(NodeCloseEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs)
self.node_id = node_id
self.node_instance = node_instance
self.error = error
self.kind = 'NodeClose'
| 40.402985 | 180 | 0.619874 |
9b82e220c1d461c364ce5bb585236552c1a9524d | 804 | py | Python | solving/sorting/bubblesort.py | williamlagos/chess | 7470479e352bf6fa28215e745af8c42dc20d7a1f | [
"MIT"
] | null | null | null | solving/sorting/bubblesort.py | williamlagos/chess | 7470479e352bf6fa28215e745af8c42dc20d7a1f | [
"MIT"
] | 4 | 2020-04-23T23:17:54.000Z | 2021-07-06T17:44:45.000Z | solving/sorting/bubblesort.py | williamlagos/chess | 7470479e352bf6fa28215e745af8c42dc20d7a1f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import math
import os
import random
import re
import sys
# Complete the countSwaps function below.
def countSwaps(a):
n = len(a)
# Constraints check
if n > 600:
return
# Bubble sort algorithm with swaps counting
last = n - 1
numSwaps = 0
arraySorted = False
while not arraySorted:
i = 0
while i < last:
arraySorted = True
if a[i] > a[i + 1]:
a[i], a[i + 1] = a[i + 1], a[i]
arraySorted = False
numSwaps += 1
i += 1
print("Array is sorted in %d swaps.\nFirst Element: %d\nLast Element: %d" % (numSwaps, a[0], a[last]))
if __name__ == '__main__':
n = int(input())
a = list(map(int, input().rstrip().split()))
countSwaps(a)
| 22.971429 | 106 | 0.539801 |
11488214c3f0082fbbc12674eff545e2ea956288 | 50 | py | Python | day3/exercise/p2.py | AkshayManchanda/Python_Training | 5a50472d118ac6d40145bf1dd60f26864bf9fb6c | [
"MIT"
] | null | null | null | day3/exercise/p2.py | AkshayManchanda/Python_Training | 5a50472d118ac6d40145bf1dd60f26864bf9fb6c | [
"MIT"
] | null | null | null | day3/exercise/p2.py | AkshayManchanda/Python_Training | 5a50472d118ac6d40145bf1dd60f26864bf9fb6c | [
"MIT"
] | null | null | null | l = [3,7,[1,4,'hello']]
l[2][2]='goodbye'
print(l) | 16.666667 | 23 | 0.52 |
a539bbce8383978fa85c6d1371f2b2f4c99c337b | 5,820 | py | Python | tests/dhcpv4/classification/test_v4_classification_release.py | isc-projects/forge | dfec8b41003d6b5a229f69ee93616e0e5cc6d71b | [
"0BSD"
] | 22 | 2015-02-27T11:51:05.000Z | 2022-02-28T12:39:29.000Z | tests/dhcpv4/classification/test_v4_classification_release.py | isc-projects/forge | dfec8b41003d6b5a229f69ee93616e0e5cc6d71b | [
"0BSD"
] | 16 | 2018-10-30T15:00:12.000Z | 2019-01-11T17:55:13.000Z | tests/dhcpv4/classification/test_v4_classification_release.py | isc-projects/forge | dfec8b41003d6b5a229f69ee93616e0e5cc6d71b | [
"0BSD"
] | 11 | 2015-02-27T11:51:36.000Z | 2021-03-30T08:33:54.000Z | """DHCPv4 Client Classification release process"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_msg
import misc
import srv_control
@pytest.mark.v4
@pytest.mark.classification
@pytest.mark.release
def test_v4_client_classification_release_same_chaddr_client_id():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.config_client_classification(0, 'VENDOR_CLASS_my-own-class')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_does_include_with_value('vendor_class_id', 'my-own-class')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00010203040506')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_does_include_with_value('vendor_class_id', 'my-own-class')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00010203040506')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_copy_option('server_id')
srv_msg.client_sets_value('Client', 'ciaddr', '192.168.50.1')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:1F:D0:11:22:33')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_does_include_with_value('vendor_class_id', 'my-own-class')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00010203040506')
@pytest.mark.v4
@pytest.mark.classification
@pytest.mark.release
def test_v4_client_classification_release_different_chaddr_client_id():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.config_client_classification(0, 'VENDOR_CLASS_my-own-class')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_does_include_with_value('vendor_class_id', 'my-own-class')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00010203040506')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_does_include_with_value('vendor_class_id', 'my-own-class')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00010203040506')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:11:22:33')
srv_msg.client_does_include_with_value('client_id', '00010203123456')
srv_msg.client_copy_option('server_id')
srv_msg.client_sets_value('Client', 'ciaddr', '192.168.50.1')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:1F:D0:11:22:33')
# Client adds to the message client_id with value 00010203040506.
srv_msg.client_does_include_with_value('vendor_class_id', 'my-own-class')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
# we should check logs here..
| 41.870504 | 81 | 0.750172 |
81e8e4ba652fd94f192afd0cf0142a9a96746156 | 8,041 | py | Python | akshare/stock/hk_stock_sina.py | repos-cl/akshare | 94fa42fb095ac4bfa5d8d58673b805d36cc0128e | [
"MIT"
] | 1 | 2021-07-13T01:29:49.000Z | 2021-07-13T01:29:49.000Z | akshare/stock/hk_stock_sina.py | repos-cl/akshare | 94fa42fb095ac4bfa5d8d58673b805d36cc0128e | [
"MIT"
] | null | null | null | akshare/stock/hk_stock_sina.py | repos-cl/akshare | 94fa42fb095ac4bfa5d8d58673b805d36cc0128e | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/5/29 15:28
Desc: 新浪财经-港股-实时行情数据和历史行情数据(包含前复权和后复权因子)
http://stock.finance.sina.com.cn/hkstock/quotes/00700.html
"""
import requests
import demjson
import pandas as pd
from py_mini_racer import py_mini_racer
from akshare.stock.cons import (
hk_js_decode,
hk_sina_stock_dict_payload,
hk_sina_stock_list_url,
hk_sina_stock_hist_url,
hk_sina_stock_hist_hfq_url,
hk_sina_stock_hist_qfq_url,
)
def stock_hk_spot() -> pd.DataFrame:
"""
新浪财经-港股的所有港股的实时行情数据
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: 实时行情数据
:rtype: pandas.DataFrame
"""
res = requests.get(hk_sina_stock_list_url, params=hk_sina_stock_dict_payload)
data_json = [
demjson.decode(tt)
for tt in [
item + "}" for item in res.text[1:-1].split("},") if not item.endswith("}")
]
]
data_df = pd.DataFrame(data_json)
data_df = data_df[
[
"symbol",
"name",
"engname",
"tradetype",
"lasttrade",
"prevclose",
"open",
"high",
"low",
"volume",
"amount",
"ticktime",
"buy",
"sell",
"pricechange",
"changepercent",
]
]
return data_df
def stock_hk_daily(symbol: str = "00981", adjust: str = "") -> pd.DataFrame:
"""
新浪财经-港股-个股的历史行情数据
https://stock.finance.sina.com.cn/hkstock/quotes/02912.html
:param symbol: 可以使用 stock_hk_spot 获取
:type symbol: str
:param adjust: "": 返回未复权的数据 ; qfq: 返回前复权后的数据; qfq-factor: 返回前复权因子和调整;
:type adjust: str
:return: 指定 adjust 的数据
:rtype: pandas.DataFrame
"""
res = requests.get(hk_sina_stock_hist_url.format(symbol))
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df.index = pd.to_datetime(data_df["date"]).dt.date
del data_df["date"]
data_df = data_df.astype("float")
if adjust == "":
data_df.reset_index(inplace=True)
return data_df
if adjust == "hfq":
res = requests.get(hk_sina_stock_hist_hfq_url.format(symbol))
try:
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if len(hfq_factor_df) == 1:
data_df.reset_index(inplace=True)
return data_df
except SyntaxError as e:
data_df.reset_index(inplace=True)
return data_df
hfq_factor_df.columns = ["date", "hfq_factor", "cash"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
# 处理复权因子
temp_date_range = pd.date_range(
"1900-01-01", hfq_factor_df.index[0].isoformat()
)
temp_df = pd.DataFrame(range(len(temp_date_range)), temp_date_range)
new_range = pd.merge(
temp_df, hfq_factor_df, left_index=True, right_index=True, how="outer"
)
new_range = new_range.fillna(method="ffill")
new_range = new_range.iloc[:, [1, 2]]
temp_df = pd.merge(
data_df, new_range, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df.drop_duplicates(
subset=["open", "high", "low", "close", "volume"], inplace=True
)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"] + temp_df["cash"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"] + temp_df["cash"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"] + temp_df["cash"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"] + temp_df["cash"]
temp_df = temp_df.apply(lambda x: round(x, 4))
temp_df.dropna(how="any", inplace=True)
temp_df = temp_df.iloc[:, :-2]
temp_df.reset_index(inplace=True)
temp_df.rename({"index": "date"}, axis='columns', inplace=True)
temp_df['date'] = temp_df['date'].astype(str)
return temp_df
if adjust == "qfq":
res = requests.get(hk_sina_stock_hist_qfq_url.format(symbol))
try:
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
if len(qfq_factor_df) == 1:
data_df.reset_index(inplace=True)
return data_df
except SyntaxError as e:
data_df.reset_index(inplace=True)
return data_df
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_date_range = pd.date_range(
"1900-01-01", qfq_factor_df.index[0].isoformat()
)
temp_df = pd.DataFrame(range(len(temp_date_range)), temp_date_range)
new_range = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="outer"
)
new_range = new_range.fillna(method="ffill")
new_range = new_range.iloc[:, [1]]
temp_df = pd.merge(
data_df, new_range, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df.drop_duplicates(
subset=["open", "high", "low", "close", "volume"], inplace=True
)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["qfq_factor"]
temp_df = temp_df.apply(lambda x: round(x, 4))
temp_df.dropna(how="any", inplace=True)
temp_df = temp_df.iloc[:, :-1]
temp_df.reset_index(inplace=True)
temp_df.rename({"index": "date"}, axis='columns', inplace=True)
temp_df['date'] = temp_df['date'].astype(str)
return temp_df
if adjust == "hfq-factor":
res = requests.get(hk_sina_stock_hist_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
hfq_factor_df.columns = ["date", "hfq_factor", "cash"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
hfq_factor_df.reset_index(inplace=True)
hfq_factor_df['date'] = hfq_factor_df['date'].astype(str)
return hfq_factor_df
if adjust == "qfq-factor":
res = requests.get(hk_sina_stock_hist_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])["data"]
)
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
qfq_factor_df.reset_index(inplace=True)
qfq_factor_df['date'] = qfq_factor_df['date'].astype(str)
return qfq_factor_df
if __name__ == "__main__":
stock_hk_daily_hfq_df = stock_hk_daily(symbol="00700", adjust="")
print(stock_hk_daily_hfq_df)
stock_hk_daily_hfq_df = stock_hk_daily(symbol="00700", adjust="hfq")
print(stock_hk_daily_hfq_df)
stock_hk_daily_hfq_df = stock_hk_daily(symbol="01591", adjust="hfq")
print(stock_hk_daily_hfq_df)
stock_hk_daily_hfq_df = stock_hk_daily(symbol="00700", adjust="qfq")
print(stock_hk_daily_hfq_df)
stock_hk_daily_df = stock_hk_daily(symbol="01302", adjust="qfq")
print(stock_hk_daily_df)
stock_hk_daily_hfq_factor_df = stock_hk_daily(symbol="00700", adjust="hfq-factor")
print(stock_hk_daily_hfq_factor_df)
stock_hk_spot_df = stock_hk_spot()
print(stock_hk_spot_df)
| 35.422907 | 87 | 0.607138 |
ea3c3171cbc5ed9e81dd791780dcf438a6e5d331 | 9,392 | py | Python | Utils.py | PashaIanko/Covid19Classifier | ee75a2b17babb8c9701351dfaa6052afa083168f | [
"MIT"
] | null | null | null | Utils.py | PashaIanko/Covid19Classifier | ee75a2b17babb8c9701351dfaa6052afa083168f | [
"MIT"
] | 1 | 2022-01-27T13:30:38.000Z | 2022-01-27T13:30:38.000Z | Utils.py | PashaIanko/Covid19Classifier | ee75a2b17babb8c9701351dfaa6052afa083168f | [
"MIT"
] | null | null | null | # Load filenames
import os
import numpy as np
from DataProperties import DataProperties
import cv2
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from scipy.signal import convolve2d
import pandas as pd
from ModelUtils import ModelUtils
import time
def load_filenames(data_path, max_files = None):
p = os.listdir(data_path)
if max_files is not None:
p = p[: min(max_files, len(p))]
p = [data_path + file_path for file_path in p]
return p
def get_filenames(
covid_path, pneumonia_path, normal_path
):
return (
load_filenames(covid_path),
load_filenames(pneumonia_path),
load_filenames(normal_path)
)
def get_labels(
covid_fnames,
pn_fnames,
normal_fnames
):
return (
np.full(len(covid_fnames), fill_value = DataProperties.covid_class),
np.full(len(pn_fnames), fill_value = DataProperties.pneumonia_class),
np.full(len(normal_fnames), fill_value = DataProperties.healthy_class)
)
def getXY(covid_fnames, pn_fnames, normal_fnames,
covid_labels, pn_labels, normal_labels):
X = [
*covid_fnames, *pn_fnames, *normal_fnames
]
Y = [
*covid_labels, *pn_labels, *normal_labels
]
return X, Y
def load_image(full_path):
# print(f'Loading, {full_path}')
img = cv2.imread(full_path, cv2.IMREAD_COLOR)
# print(type(img))
return img
def plot_history(history, metrics_name, plot_validation, figsize = (12, 8)):
fig, ax = plt.subplots(figsize = figsize)
ax.plot(
history[metrics_name],
label = metrics_name,
marker = 'o',
markersize = 11,
markerfacecolor = 'white'
)
if plot_validation:
plt.plot(
history['val_' + metrics_name],
label = metrics_name + ' (validation)',
marker = 'o',
markersize = 11,
markerfacecolor = 'white'
)
plt.xlabel('Epoch')
plt.ylabel(metrics_name)
plt.ylim([0.01, 1])
plt.legend(loc = 'lower right')
plt.grid()
def get_class_name(class_indices, label):
for class_name, val in class_indices.items():
if val == label:
return class_name
def visualize(batch, labels, n_subplots, class_indices, figsize = (15, 15)):
plt.figure(figsize = figsize)
for i in range(n_subplots): #(batch_size):
ax = plt.subplot(
int(np.sqrt(n_subplots)),
int(np.sqrt(n_subplots)),
i + 1
)
plt.imshow(batch[i])
plt.title(get_class_name(class_indices, labels[i]))
plt.axis("off")
def plot_confusion_matrix(Y_true, Y_pred, class_indices):
#fig, axes = plt.subplots(1, 1, figsize = (5, 5))
#axes.set_ylabel('True', fontdict={'size': '16'})
#axes.set_xlabel('Predicted', fontdict={'size': '16'})
#axes.tick_params(axis='both', labelsize=17)
cm = confusion_matrix(
Y_true,
Y_pred,
normalize = 'true'
)
disp = ConfusionMatrixDisplay(
confusion_matrix = cm,
display_labels = [k for k in class_indices.keys()] #translate_labels(class_indices),
)
disp.plot(
cmap = 'Oranges',
xticks_rotation = 'vertical',
)
#plt.title(f'Confusion matrix for {model_name}', fontsize = 18)
plt.show()
def visualize_convolutions(
image,
kernels,
label,
n_color_channels
):
rgb_components = [
image[:, :, channel] for channel in range(n_color_channels)
]
assert len(rgb_components) == len(kernels)
convolved = [
convolve2d(
rgb_components[i],
kernels[i],
mode = 'same'
)
for i in range(n_color_channels)
]
_, axes = plt.subplots(1, 4, figsize = (15, 15))
axes[0].imshow(image)
axes[0].set_title('Source image')
for i in range(len(convolved)):
axes[i + 1].imshow(convolved[i])
axes[i + 1].set_title(f'Color channel {i + 1}')
return convolved
def visualize_kernels(kernels):
n_subplots = len(kernels)
_, axes = plt.subplots(1, n_subplots)
for i, kernel in enumerate(kernels):
ax = axes[i]
ax.imshow(kernel)
ax.set_title(f'Color channel {i + 1}')
def fit_(model, train_flow, train_steps, val_flow, val_steps, epochs, callbacks):
history = model.fit(
train_flow,
steps_per_epoch = train_steps,
validation_data = val_flow,
validation_steps = val_steps,
epochs = epochs,
callbacks = callbacks
)
return history
def visualize_kernel_work(model, n_layer, n_kernel, image, label, n_color_channels):
conv_layer = model.layers[n_layer]
kernels = conv_layer.get_weights()[0]
color_kernels = [
kernels[:, :, color_ch, n_kernel]
for color_ch in range(n_color_channels)
]
kern_shape = kernels.shape
print(
f'''We have:
{kern_shape[0]} by {kern_shape[1]} kernel,
of {kern_shape[2]} color channels,
total: {kern_shape[3]} kernels'''
)
visualize_kernels(color_kernels)
_ = visualize_convolutions(
image,
color_kernels,
label = label,
n_color_channels = n_color_channels
)
def collect_metrics(models_dict, data_flow, data_steps):
res_dict = {k: {} for k in models_dict.keys()}
for name, model in models_dict.items():
data_flow.reset()
t_start = time.time()
eval_res = model.model.evaluate(
data_flow,
steps = data_steps
)
t_end = time.time()
res_dict[name]['data_eval_time_sec'] = t_end - t_start
res_dict[name]['data_size'] = data_flow.n
res_dict[name]['test_loss^(-1)'] = eval_res[0] # loss
res_dict[name]['test_accuracy'] = eval_res[1] # accuracy
data_flow.reset()
metrics = model.evaluate_metrics(
data_flow,
data_steps
)
res_dict[name]['F1'] = metrics['F1']
res_dict[name]['precision'] = metrics['Precision']
res_dict[name]['recall'] = metrics['Recall']
# number of trainable parameters
trainable_params = np.sum(
[np.prod(v.get_shape()) for v in model.model.trainable_weights]
)
res_dict[name]['tr_params'] = int(trainable_params)
# nonTrainableParams = np.sum([np.prod(v.get_shape()) for v in model.non_trainable_weights])
# totalParams = trainableParams + nonTrainableParams
return res_dict
def calc_files(directory):
total_files = 0
for base, _, files in os.walk(directory):
# print('Searching in : ',base)
for _ in files:
total_files += 1
return total_files
def visualize_full_train_time(models_dict):
if not(models_dict is None) and len(models_dict):
legends = []
for model_name, model in models_dict.items():
model = model['model']
fit_times = model.epoch_time_callback.times
plt.xlabel('Epoch')
plt.ylabel('Total time taken until an epoch in seconds')
plt.plot(
*zip(*fit_times),
marker = 'o',
linestyle = '--',
markerfacecolor = 'white',
markersize = 12
)
legends.append(model_name)
plt.legend(legends)
plt.show()
def extract_dt(epochs_times):
epochs_dts = []
for i in range(len(epochs_times)):
epoch = epochs_times[i][0]
time = epochs_times[i][1]
if i == 0:
epochs_dts.append((epoch, time))
else:
time_prev = epochs_times[i - 1][1]
epochs_dts.append((epoch, time - time_prev))
return epochs_dts
def visualize_epoch_time(models_dict):
legends = []
for model_name, model in models_dict.items():
model = model['model']
fit_times = model.epoch_time_callback.times
epochs_delta_ts = extract_dt(fit_times)
plt.xlabel('Epoch')
plt.ylabel('Total time taken until an epoch in seconds')
plt.plot(
*zip(*epochs_delta_ts),
marker = 'o',
linestyle = '--',
markerfacecolor = 'white',
markersize = 12
)
legends.append(model_name)
plt.legend(legends)
plt.show()
def save_train_times(models_dict, save_dir):
res_df = pd.DataFrame()
for name, model in models_dict.items():
# epochs = [pair[0] for pair in model['model'].epoch_time_callback.times]
times = [pair[1] for pair in model['model'].epoch_time_callback.times]
df_new = pd.DataFrame({name: times})
res_df = pd.concat([res_df, df_new], ignore_index = True)
res_df.to_csv(save_dir)
def save_histories(hist_dict, save_dir):
for model_name, hist in hist_dict.items():
df = pd.DataFrame(hist.history)
df.to_csv(f'{save_dir}{model_name}_history.csv')
def print_summary(models_dict, model_name):
if model_name in list(models_dict.keys()):
model = models_dict[model_name]['model']
model.construct_model()
print(model.model.summary())
else:
print(f'Model {model_name} is not in the models dictionary')
| 27.223188 | 100 | 0.606687 |
b88f0922be73df21e36c5b7c066b667d6da6fcaf | 137 | py | Python | Flappybird/main.py | Yuconium/Flappy-Bird | 2fe6c4e6004d85c2267577e9a548021510f41e84 | [
"MIT"
] | null | null | null | Flappybird/main.py | Yuconium/Flappy-Bird | 2fe6c4e6004d85c2267577e9a548021510f41e84 | [
"MIT"
] | null | null | null | Flappybird/main.py | Yuconium/Flappy-Bird | 2fe6c4e6004d85c2267577e9a548021510f41e84 | [
"MIT"
] | null | null | null | import pygame
import mainwindow
if __name__ == "__main__":
pygame.init()
Screen = mainwindow.Screen(700, 500)
Screen.mainloop() | 19.571429 | 38 | 0.715328 |
a666d6e18b53320ef67e53f9a8055f32f6eba413 | 54,840 | py | Python | src/Meshing.py | livino/PyAero | 11a6d50ed640fdfdcda74423d47f113fdc7e5e3f | [
"MIT"
] | null | null | null | src/Meshing.py | livino/PyAero | 11a6d50ed640fdfdcda74423d47f113fdc7e5e3f | [
"MIT"
] | null | null | null | src/Meshing.py | livino/PyAero | 11a6d50ed640fdfdcda74423d47f113fdc7e5e3f | [
"MIT"
] | null | null | null |
import os
import copy
from datetime import date
import locale
import itertools
import numpy as np
from scipy import interpolate
from PySide2 import QtGui, QtCore, QtWidgets
import PyAero
import GraphicsItemsCollection as gic
import GraphicsItem
import Connect
from Utils import Utils
from Settings import OUTPUTDATA
import logging
logger = logging.getLogger(__name__)
class Windtunnel:
"""docstring for Windtunnel"""
def __init__(self):
# contains list of BlockMesh objects
self.blocks = []
# get MainWindow instance (overcomes handling parents)
self.mainwindow = QtCore.QCoreApplication.instance().mainwindow
def AirfoilMesh(self, name='', contour=None, divisions=15, ratio=3.0,
thickness=0.04):
# get airfoil contour coordinates
x, y = contour
# make a list of point tuples
# [(x1, y1), (x2, y2), (x3, y3), ... , (xn, yn)]
line = list(zip(x, y))
# block mesh around airfoil contour
self.block_airfoil = BlockMesh(name=name)
self.block_airfoil.addLine(line)
# self.block_airfoil.extrudeLine(line, length=thickness, direction=3,
# divisions=divisions, ratio=ratio)
self.block_airfoil.extrudeLine_cell_thickness(line,
cell_thickness=thickness,
growth=ratio,
divisions=divisions,
direction=3)
self.blocks.append(self.block_airfoil)
def TrailingEdgeMesh(self, name='', te_divisions=3,
thickness=0.04, divisions=10, ratio=1.05):
# compile first line of trailing edge block
first = self.block_airfoil.getLine(number=0, direction='v')
last = self.block_airfoil.getLine(number=-1, direction='v')
last_reversed = copy.deepcopy(last)
last_reversed.reverse()
vec = np.array(first[0]) - np.array(last[0])
line = copy.deepcopy(last_reversed)
# in case of TE add the points from the TE
if self.mainwindow.airfoil.has_TE:
for i in range(1, te_divisions):
p = last_reversed[-1] + float(i) / te_divisions * vec
# p is type numpy.float, so convert it to float
line.append((float(p[0]), float(p[1])))
line += first
# handle case with sharp trailing edge
else:
line += first[1:]
# trailing edge block mesh
block_te = BlockMesh(name=name)
block_te.addLine(line)
# block_te.extrudeLine(line, length=length, direction=4,
# divisions=divisions, ratio=ratio)
block_te.extrudeLine_cell_thickness(line,
cell_thickness=thickness,
growth=ratio,
divisions=divisions,
direction=4)
# equidistant point distribution
block_te.distribute(direction='u', number=-1)
# make a transfinite interpolation
# i.e. recreate pooints inside the block
block_te.transfinite()
self.block_te = block_te
self.blocks.append(block_te)
def TunnelMesh(self, name='', tunnel_height=2.0, divisions_height=100,
ratio_height=10.0, dist='symmetric'):
block_tunnel = BlockMesh(name=name)
self.tunnel_height = tunnel_height
# line composed of trailing edge and airfoil meshes
line = self.block_te.getVLines()[-1]
line.reverse()
del line[-1]
line += self.block_airfoil.getULines()[-1]
del line[-1]
line += self.block_te.getVLines()[0]
block_tunnel.addLine(line)
# line composed of upper, lower and front line segments
p1 = np.array((block_tunnel.getULines()[0][0][0], tunnel_height))
p2 = np.array((0.0, tunnel_height))
p3 = np.array((0.0, -tunnel_height))
p4 = np.array((block_tunnel.getULines()[0][-1][0], -tunnel_height))
# upper line of wind tunnel
line = list()
vec = p2 - p1
for t in np.linspace(0.0, 1.0, 10):
p = p1 + t * vec
line.append(p.tolist())
del line[-1]
# front half circle of wind tunnel
for phi in np.linspace(90.0, 270.0, 200):
phir = np.radians(phi)
x = tunnel_height * np.cos(phir)
y = tunnel_height * np.sin(phir)
line.append((x, y))
del line[-1]
# lower line of wind tunnel
vec = p4 - p3
for t in np.linspace(0.0, 1.0, 10):
p = p3 + t * vec
line.append(p.tolist())
line = np.array(line)
tck, u = interpolate.splprep(line.T, s=0, k=1)
# point distribution on upper, front and lower part
if dist == 'symmetric':
ld = -1.3
ud = 1.3
if dist == 'lower':
ld = -1.2
ud = 1.5
if dist == 'upper':
ld = -1.5
ud = 1.2
xx = np.linspace(ld, ud, len(block_tunnel.getULines()[0]))
t = (np.tanh(xx) + 1.0) / 2.0
line = interpolate.splev(t, tck, der=0)
line = list(zip(line[0].tolist(), line[1].tolist()))
block_tunnel.addLine(line)
p5 = np.array(block_tunnel.getULines()[0][0])
p6 = np.array(block_tunnel.getULines()[0][-1])
# first vline
vline1 = BlockMesh.makeLine(p5, p1, divisions=divisions_height,
ratio=ratio_height)
# last vline
vline2 = BlockMesh.makeLine(p6, p4, divisions=divisions_height,
ratio=ratio_height)
boundary = [block_tunnel.getULines()[0],
block_tunnel.getULines()[-1],
vline1,
vline2]
block_tunnel.transfinite(boundary=boundary)
# blending between normals (inner lines) and transfinite (outer lines)
ulines = list()
old_ulines = block_tunnel.getULines()
for j, uline in enumerate(block_tunnel.getULines()):
# skip first and last line
if j == 0 or j == len(block_tunnel.getULines()) - 1:
ulines.append(uline)
continue
line = list()
xo, yo = list(zip(*old_ulines[0]))
xo = np.array(xo)
yo = np.array(yo)
normals = BlockMesh.curveNormals(xo, yo)
for i, point in enumerate(uline):
# skip first and last point
if i == 0 or i == len(uline) - 1:
line.append(point)
continue
pt = np.array(old_ulines[j][i])
pto = np.array(old_ulines[0][i])
vec = pt - pto
# projection of vec into normal
dist = np.dot(vec, normals[i]) / np.linalg.norm(normals[i])
pn = pto + dist * normals[i]
v = float(j) / float(len(block_tunnel.getULines()))
exp = 0.6
pnew = (1.0 - v**exp) * pn + v**exp * pt
line.append((pnew.tolist()[0], pnew.tolist()[1]))
ulines.append(line)
block_tunnel = BlockMesh(name=name)
for uline in ulines:
block_tunnel.addLine(uline)
ij = [0, 30, 0, len(block_tunnel.getULines()) - 1]
block_tunnel.transfinite(ij=ij)
ij = [len(block_tunnel.getVLines()) - 31,
len(block_tunnel.getVLines()) - 1,
0,
len(block_tunnel.getULines()) - 1]
block_tunnel.transfinite(ij=ij)
sm = 1
if sm == 1:
smooth = Smooth(block_tunnel)
nodes = smooth.selectNodes(domain='interior')
block_tunnel = smooth.smooth(nodes, iterations=1,
algorithm='laplace')
ij = [1, 30, 1, len(block_tunnel.getULines()) - 2]
nodes = smooth.selectNodes(domain='ij', ij=ij)
block_tunnel = smooth.smooth(nodes, iterations=2,
algorithm='laplace')
ij = [len(block_tunnel.getVLines()) - 31,
len(block_tunnel.getVLines()) - 2,
1,
len(block_tunnel.getULines()) - 2]
nodes = smooth.selectNodes(domain='ij', ij=ij)
block_tunnel = smooth.smooth(nodes, iterations=3,
algorithm='laplace')
self.block_tunnel = block_tunnel
self.blocks.append(block_tunnel)
def TunnelMeshWake(self, name='', tunnel_wake=2.0,
divisions=100, ratio=0.1, spread=0.4):
chord = 1.0
block_tunnel_wake = BlockMesh(name=name)
# line composed of trailing edge and block_tunnel meshes
line = self.block_tunnel.getVLines()[-1]
line.reverse()
del line[-1]
line += self.block_te.getULines()[-1]
del line[-1]
line += self.block_tunnel.getVLines()[0]
block_tunnel_wake.addLine(line)
#
p1 = np.array((self.block_te.getULines()[-1][0][0],
self.tunnel_height))
p4 = np.array((self.block_te.getULines()[-1][-1][0],
- self.tunnel_height))
p7 = np.array((tunnel_wake + chord, self.tunnel_height))
p8 = np.array((tunnel_wake + chord, -self.tunnel_height))
upper = BlockMesh.makeLine(p7, p1, divisions=divisions,
ratio=1.0 / ratio)
lower = BlockMesh.makeLine(p8, p4, divisions=divisions,
ratio=1.0 / ratio)
left = line
right = BlockMesh.makeLine(p8, p7, divisions=len(left) - 1, ratio=1.0)
boundary = [upper, lower, right, left]
block_tunnel_wake.transfinite(boundary=boundary)
# equalize division line in wake
for i, u in enumerate(block_tunnel_wake.getULines()[0]):
if u[0] < chord + tunnel_wake * spread:
ll = len(block_tunnel_wake.getULines()[0])
line_no = -ll + i
break
block_tunnel_wake.distribute(direction='v', number=line_no)
# transfinite left of division line
ij = [len(block_tunnel_wake.getVLines()) + line_no,
len(block_tunnel_wake.getVLines()) - 1,
0,
len(block_tunnel_wake.getULines()) - 1]
block_tunnel_wake.transfinite(ij=ij)
# transfinite right of division line
ij = [0,
len(block_tunnel_wake.getVLines()) + line_no,
0,
len(block_tunnel_wake.getULines()) - 1]
block_tunnel_wake.transfinite(ij=ij)
self.block_tunnel_wake = block_tunnel_wake
self.blocks.append(block_tunnel_wake)
def makeMesh(self):
toolbox = self.mainwindow.centralwidget.toolbox
if self.mainwindow.airfoil:
if not hasattr(self.mainwindow.airfoil, 'spline_data'):
message = 'Splining needs to be done first.'
self.mainwindow.slots.messageBox(message)
return
contour = self.mainwindow.airfoil.spline_data[0]
else:
self.mainwindow.slots.messageBox('No airfoil loaded.')
return
# delete blocks outline if existing
# because a new one will be generated
if hasattr(self.mainwindow.airfoil, 'mesh_blocks'):
self.mainwindow.scene.removeItem(
self.mainwindow.airfoil.mesh_blocks)
del self.mainwindow.airfoil.mesh_blocks
progdialog = QtWidgets.QProgressDialog(
"", "Cancel", 0, 100, self.mainwindow)
progdialog.setFixedWidth(300)
progdialog.setMinimumDuration(0)
progdialog.setWindowTitle('Generating the CFD mesh')
progdialog.setWindowModality(QtCore.Qt.WindowModal)
progdialog.show()
progdialog.setValue(10)
# progdialog.setLabelText('making blocks')
self.AirfoilMesh(name='block_airfoil',
contour=contour,
divisions=toolbox.points_n.value(),
ratio=toolbox.ratio.value(),
thickness=toolbox.normal_thickness.value())
progdialog.setValue(20)
if progdialog.wasCanceled():
return
self.TrailingEdgeMesh(name='block_TE',
te_divisions=toolbox.te_div.value(),
thickness=toolbox.length_te.value(),
divisions=toolbox.points_te.value(),
ratio=toolbox.ratio_te.value())
progdialog.setValue(30)
if progdialog.wasCanceled():
return
self.TunnelMesh(name='block_tunnel',
tunnel_height=toolbox.tunnel_height.value(),
divisions_height=toolbox.divisions_height.value(),
ratio_height=toolbox.ratio_height.value(),
dist=toolbox.dist.currentText())
progdialog.setValue(50)
if progdialog.wasCanceled():
return
self.TunnelMeshWake(name='block_tunnel_wake',
tunnel_wake=toolbox.tunnel_wake.value(),
divisions=toolbox.divisions_wake.value(),
ratio=toolbox.ratio_wake.value(),
spread=toolbox.spread.value() / 100.0)
progdialog.setValue(70)
if progdialog.wasCanceled():
return
# connect mesh blocks
connect = Connect.Connect(progdialog)
vertices, connectivity, progdialog = \
connect.connectAllBlocks(self.blocks)
# add mesh to Windtunnel instance
self.mesh = vertices, connectivity
# generate cell to vertex connectivity from mesh
self.makeLCV()
# generate cell to edge connectivity from mesh
self.makeLCE()
# generate boundaries from mesh connectivity
unique, seen, doubles, boundary_edges = self.makeBoundaries()
# find loops inside boundary_edges
self.boundary_loops = self.findLoops(boundary_edges)
logger.info('Mesh around {} created'.
format(self.mainwindow.airfoil.name))
logger.info('Mesh has {} vertices and {} elements'.
format(len(vertices), len(connectivity)))
self.drawMesh(self.mainwindow.airfoil)
self.drawBlockOutline(self.mainwindow.airfoil)
progdialog.setValue(100)
# enable mesh export and set filename and boundary definitions
toolbox.box_meshexport.setEnabled(True)
def makeLCV(self):
"""Make cell to vertex connectivity for the mesh
LCV is identical to connectivity
"""
_, connectivity = self.mesh
self.LCV = connectivity
def makeLCE(self):
"""Make cell to edge connectivity for the mesh"""
_, connectivity = self.mesh
self.LCE = dict()
self.edges = list()
for i, cell in enumerate(connectivity):
# example for Qudrilateral:
# cell: [0, 1, 5, 4]
# local_edges: [(0,1), (1,5), (5,4), (4,0)]
local_edges = [(cell[j], cell[(j + 1) % len(cell)])
for j in range(len(cell))]
# all edges for cell i
self.LCE[i] = local_edges
# all edges in one list
self.edges += [tuple(sorted(edge)) for edge in local_edges]
def makeLCC(self):
"""Make cell to cell connectivity for the mesh"""
pass
def makeBoundaries(self):
"""A boundary edge is an edge that belongs only to one cell"""
seen = set()
unique = list()
doubles = set()
for edge in self.edges:
if edge not in seen:
seen.add(edge)
unique.append(edge)
else:
doubles.add(edge)
boundary_edges = [edge for edge in unique if edge not in doubles]
return unique, seen, doubles, boundary_edges
def findLoops(self, edges):
"""Find loops in a list of edges which are stored in tuples
and return the "connected components", in the disjoint set.
In the case of boundary edges these are loops or
"cycles" in graph theory language
Args:
edges (list of tuples):
Returns:
TYPE: Description
"""
vertices, connectivity = self.mesh
# make disjoint set object
djs = DisjointSet()
# add all edges to the disjoint set
for edge in edges:
djs.add(edge[0], edge[1])
# get the boundary loops (airfoil, outer boundary)
# djs.group returns a dictionary containing all loops
# the key is an arbitrary node of the loop
# the values per key are a list of unordered nodes
# belonging to the loop
boundary_loops = djs.group
# in order to order the returned nodes again, their corresponding edges
# have to be found first
new_loops = dict()
for i, loop in enumerate(boundary_loops):
l_edges = list()
for node in boundary_loops[loop]:
l_edges += [sorted(edge) for edge in edges if node in edge]
# remove duplicate list elements from l_edges
loop_edges = [k for k, _ in itertools.groupby(sorted(l_edges))]
new_loops[i] = loop_edges
# new_loops[0] is airfoil
# new_loops[1] is complete outer boundary
# split outer boundary into inlet and outlet
self.is_outlet = list()
# edge is e.g.: (27, 53)
for i, edge in enumerate(new_loops[1]):
self.is_outlet.append(0)
# vertices[edge[0]] is e.g.: (1.0453006577029285, 3.5)
vector = Utils.vector(vertices[edge[0]], vertices[edge[1]])
# check angle against y-axis
angle = Utils.angle_between(vector, (0., 1.), degree=True)
# FIXME
# FIXME find better criterions or at leat refactor
# FIXME
# angle tolerance
tol = 0.5
# check only for edges downstream the airfoil
tol_wake = 1.1
if ((angle > - tol) and (angle < tol)) or \
((angle > 180. - tol) and (angle < 180. + tol)) and \
vertices[edge[0]][0] > tol_wake:
self.is_outlet[i] = 1
return new_loops
def drawMesh(self, airfoil):
"""Add the mesh as ItemGroup to the scene
Args:
airfoil (TYPE): object containing all airfoil properties and data
"""
# toggle spline points
self.mainwindow.centralwidget.cb3.click()
# delete old mesh if existing
if hasattr(airfoil, 'mesh'):
logger.debug('MESH item type: {}'.format(type(airfoil.mesh)))
self.mainwindow.scene.removeItem(airfoil.mesh)
mesh = list()
for block in self.blocks:
for lines in [block.getULines(),
block.getVLines()]:
for line in lines:
# instantiate a graphics item
contour = gic.GraphicsCollection()
# make it polygon type and populate its points
points = [QtCore.QPointF(x, y) for x, y in line]
contour.Polyline(QtGui.QPolygonF(points), '')
# set its properties
contour.pen.setColor(QtGui.QColor(0, 0, 0, 255))
contour.pen.setWidthF(0.8)
contour.pen.setCosmetic(True)
contour.brush.setStyle(QtCore.Qt.NoBrush)
# add contour as a GraphicsItem to the scene
# these are the objects which are drawn in the GraphicsView
meshline = GraphicsItem.GraphicsItem(contour)
mesh.append(meshline)
airfoil.mesh = self.mainwindow.scene.createItemGroup(mesh)
# activate viewing options if mesh is created and displayed
self.mainwindow.centralwidget.cb6.setChecked(True)
self.mainwindow.centralwidget.cb6.setEnabled(True)
def drawBlockOutline(self, airfoil):
"""Add the mesh block outlines to the scene
Args:
airfoil (TYPE): object containing all airfoil properties and data
"""
# FIXME
# FIXME Refactroing of code duplication here and in drawMesh
# FIXME
mesh_blocks = list()
for block in self.blocks:
for lines in [block.getULines()]:
for line in [lines[0], lines[-1]]:
# instantiate a graphics item
contour = gic.GraphicsCollection()
# make it polygon type and populate its points
points = [QtCore.QPointF(x, y) for x, y in line]
contour.Polyline(QtGui.QPolygonF(points), '')
# set its properties
contour.pen.setColor(QtGui.QColor(202, 31, 123, 255))
contour.pen.setWidthF(3.0)
contour.pen.setCosmetic(True)
contour.brush.setStyle(QtCore.Qt.NoBrush)
# add contour as a GraphicsItem to the scene
# these are the objects which are drawn in the GraphicsView
meshline = GraphicsItem.GraphicsItem(contour)
mesh_blocks.append(meshline)
for lines in [block.getVLines()]:
for line in [lines[0], lines[-1]]:
# instantiate a graphics item
contour = gic.GraphicsCollection()
# make it polygon type and populate its points
points = [QtCore.QPointF(x, y) for x, y in line]
contour.Polyline(QtGui.QPolygonF(points), '')
# set its properties
contour.pen.setColor(QtGui.QColor(202, 31, 123, 255))
contour.pen.setWidthF(3.0)
contour.pen.setCosmetic(True)
contour.brush.setStyle(QtCore.Qt.NoBrush)
# add contour as a GraphicsItem to the scene
# these are the objects which are drawn in the GraphicsView
meshline = GraphicsItem.GraphicsItem(contour)
mesh_blocks.append(meshline)
airfoil.mesh_blocks = self.mainwindow.scene \
.createItemGroup(mesh_blocks)
# activate viewing options if mesh is created and displayed
self.mainwindow.centralwidget.cb8.setChecked(True)
self.mainwindow.centralwidget.cb8.setEnabled(True)
# after instantiating everything above switch it off
# as blocks should not be shown as a default
# now visibility of blocks fits to checkbox setting
self.mainwindow.centralwidget.cb8.click()
class BlockMesh:
def __init__(self, name='block'):
self.name = name
self.ULines = list()
def addLine(self, line):
# line is a list of (x, y) tuples
self.ULines.append(line)
def getULines(self):
return self.ULines
def getVLines(self):
vlines = list()
U, V = self.getDivUV()
# loop over all u-lines
for i in range(U + 1):
# prepare new v-line
vline = list()
# collect i-th point on each u-line
for uline in self.getULines():
vline.append(uline[i])
vlines.append(vline)
return vlines
def getLine(self, number=0, direction='u'):
if direction.lower() == 'u':
lines = self.getULines()
if direction.lower() == 'v':
lines = self.getVLines()
return lines[number]
def getDivUV(self):
u = len(self.getULines()[0]) - 1
v = len(self.getULines()) - 1
return u, v
def getNodeCoo(self, node):
I, J = node[0], node[1]
uline = self.getULines()[J]
point = uline[I]
return np.array(point)
def setNodeCoo(self, node, new_pos):
I, J = node[0], node[1]
uline = self.getULines()[J]
uline[I] = new_pos
return
@staticmethod
def makeLine(p1, p2, divisions=1, ratio=1.0):
vec = p2 - p1
dist = np.linalg.norm(vec)
spacing = BlockMesh.spacing(divisions=divisions,
ratio=ratio, length=dist)
line = list()
line.append((p1.tolist()[0], p1.tolist()[1]))
for i in range(1, len(spacing)):
p = p1 + spacing[i] * Utils.unit_vector(vec)
line.append((p.tolist()[0], p.tolist()[1]))
del line[-1]
line.append((p2.tolist()[0], p2.tolist()[1]))
return line
def extrudeLine_cell_thickness(self, line, cell_thickness=0.04,
growth=1.05,
divisions=1,
direction=3):
x, y = list(zip(*line))
x = np.array(x)
y = np.array(y)
if direction == 3:
spacing, _ = self.spacing_cell_thickness(
cell_thickness=cell_thickness,
growth=growth,
divisions=divisions)
normals = self.curveNormals(x, y)
for i in range(1, len(spacing)):
xo = x + spacing[i] * normals[:, 0]
yo = y + spacing[i] * normals[:, 1]
line = list(zip(xo.tolist(), yo.tolist()))
self.addLine(line)
elif direction == 4:
spacing, _ = self.spacing_cell_thickness(
cell_thickness=cell_thickness,
growth=growth,
divisions=divisions)
normals = self.curveNormals(x, y)
normalx = normals[:, 0].mean()
normaly = normals[:, 1].mean()
for i in range(1, len(spacing)):
xo = x + spacing[i] * normalx
yo = y + spacing[i] * normaly
line = list(zip(xo.tolist(), yo.tolist()))
self.addLine(line)
def extrudeLine(self, line, direction=0, length=0.1, divisions=1,
ratio=1.00001, constant=False):
x, y = list(zip(*line))
x = np.array(x)
y = np.array(y)
if constant and direction == 0:
x.fill(length)
line = list(zip(x.tolist(), y.tolist()))
self.addLine(line)
elif constant and direction == 1:
y.fill(length)
line = list(zip(x.tolist(), y.tolist()))
self.addLine(line)
elif direction == 3:
spacing = self.spacing(divisions=divisions,
ratio=ratio,
length=length)
normals = self.curveNormals(x, y)
for i in range(1, len(spacing)):
xo = x + spacing[i] * normals[:, 0]
yo = y + spacing[i] * normals[:, 1]
line = list(zip(xo.tolist(), yo.tolist()))
self.addLine(line)
elif direction == 4:
spacing = self.spacing(divisions=divisions,
ratio=ratio,
length=length)
normals = self.curveNormals(x, y)
normalx = normals[:, 0].mean()
normaly = normals[:, 1].mean()
for i in range(1, len(spacing)):
xo = x + spacing[i] * normalx
yo = y + spacing[i] * normaly
line = list(zip(xo.tolist(), yo.tolist()))
self.addLine(line)
def distribute(self, direction='u', number=0, type='constant'):
if direction == 'u':
line = np.array(self.getULines()[number])
elif direction == 'v':
line = np.array(self.getVLines()[number])
# interpolate B-spline through data points
# here, a linear interpolant is derived "k=1"
# splprep returns:
# tck ... tuple (t,c,k) containing the vector of knots,
# the B-spline coefficients, and the degree of the spline.
# u ... array of the parameters for each given point (knot)
tck, u = interpolate.splprep(line.T, s=0, k=1)
if type == 'constant':
t = np.linspace(0.0, 1.0, num=len(line))
if type == 'transition':
first = np.array(self.getULines()[0])
last = np.array(self.getULines()[-1])
tck_first, u_first = interpolate.splprep(first.T, s=0, k=1)
tck_last, u_last = interpolate.splprep(last.T, s=0, k=1)
if number < 0.0:
number = len(self.getVLines())
v = float(number) / float(len(self.getVLines()))
t = (1.0 - v) * u_first + v * u_last
# evaluate function at any parameter "0<=t<=1"
line = interpolate.splev(t, tck, der=0)
line = list(zip(line[0].tolist(), line[1].tolist()))
if direction == 'u':
self.getULines()[number] = line
elif direction == 'v':
for i, uline in enumerate(self.getULines()):
self.getULines()[i][number] = line[i]
@staticmethod
def spacing_cell_thickness(cell_thickness=0.04, growth=1.1, divisions=10):
# add cell thickness of first layer
spacing = [cell_thickness]
for i in range(divisions - 1):
spacing.append(spacing[0] + spacing[-1] * growth)
spacing.insert(0, 0.0)
length = np.sum(spacing)
return spacing, length
@staticmethod
def spacing(divisions=10, ratio=1.0, length=1.0):
"""Calculate point distribution on a line
Args:
divisions (int, optional): Number of subdivisions
ratio (float, optional): Ratio of last to first subdivision size
length (float, optional): length of line
Returns:
array: individual line segment lengths
"""
if divisions == 1:
sp = [0.0, 1.0]
return np.array(sp)
growth = ratio**(1.0 / (float(divisions) - 1.0))
if growth == 1.0:
growth = 1.0 + 1.0e-10
s = [1.0]
for i in range(1, divisions + 1):
s.append(growth**i)
spacing = np.array(s)
spacing -= spacing[0]
spacing /= spacing[-1]
spacing *= length
return spacing
def mapLines(self, line_1, line_2):
"""Map the distribution of points from one line to another line
Args:
line_1 (LIST): Source line (will be mapped)
line_2 (LIST): Destination line (upon this line_1 is mapped)
"""
pass
@staticmethod
def curveNormals(x, y, closed=False):
istart = 0
iend = 0
n = list()
for i, _ in enumerate(x):
if closed:
if i == len(x) - 1:
iend = -i - 1
else:
if i == 0:
istart = 1
if i == len(x) - 1:
iend = -1
a = np.array([x[i + 1 + iend] - x[i - 1 + istart],
y[i + 1 + iend] - y[i - 1 + istart]])
e = Utils.unit_vector(a)
n.append([e[1], -e[0]])
istart = 0
iend = 0
return np.array(n)
def transfinite(self, boundary=[], ij=[]):
"""Make a transfinite interpolation.
http://en.wikipedia.org/wiki/Transfinite_interpolation
upper
--------------------
| |
| |
left | | right
| |
| |
--------------------
lower
Example input for the lower boundary:
lower = [(0.0, 0.0), (0.1, 0.3), (0.5, 0.4)]
"""
if boundary:
lower = boundary[0]
upper = boundary[1]
left = boundary[2]
right = boundary[3]
elif ij:
lower = self.getULines()[ij[2]][ij[0]:ij[1] + 1]
upper = self.getULines()[ij[3]][ij[0]:ij[1] + 1]
left = self.getVLines()[ij[0]][ij[2]:ij[3] + 1]
right = self.getVLines()[ij[1]][ij[2]:ij[3] + 1]
else:
lower = self.getULines()[0]
upper = self.getULines()[-1]
left = self.getVLines()[0]
right = self.getVLines()[-1]
# FIXME
# FIXME left and right need to swapped from input
# FIXME
# FIXME like: left, right = right, left
# FIXME
lower = np.array(lower)
upper = np.array(upper)
left = np.array(left)
right = np.array(right)
# convert the block boundary curves into parametric form
# as curves need to be between 0 and 1
# interpolate B-spline through data points
# here, a linear interpolant is derived "k=1"
# splprep returns:
# tck ... tuple (t,c,k) containing the vector of knots,
# the B-spline coefficients, and the degree of the spline.
# u ... array of the parameters for each given point (knot)
tck_lower, u_lower = interpolate.splprep(lower.T, s=0, k=1)
tck_upper, u_upper = interpolate.splprep(upper.T, s=0, k=1)
tck_left, u_left = interpolate.splprep(left.T, s=0, k=1)
tck_right, u_right = interpolate.splprep(right.T, s=0, k=1)
nodes = np.zeros((len(left) * len(lower), 2))
# corner points
c1 = lower[0]
c2 = upper[0]
c3 = lower[-1]
c4 = upper[-1]
for i, xi in enumerate(u_lower):
for j, eta in enumerate(u_left):
node = i * len(u_left) + j
point = (1.0 - xi) * left[j] + xi * right[j] + \
(1.0 - eta) * lower[i] + eta * upper[i] - \
((1.0 - xi) * (1.0 - eta) * c1 + (1.0 - xi) * eta * c2 +
xi * (1.0 - eta) * c3 + xi * eta * c4)
nodes[node, 0] = point[0]
nodes[node, 1] = point[1]
vlines = list()
vline = list()
i = 0
for node in nodes:
i += 1
vline.append(node)
if i % len(left) == 0:
vlines.append(vline)
vline = list()
vlines.reverse()
if ij:
ulines = self.makeUfromV(vlines)
n = -1
for k in range(ij[2], ij[3] + 1):
n += 1
self.ULines[k][ij[0]:ij[1] + 1] = ulines[n]
else:
self.ULines = self.makeUfromV(vlines)
return
@staticmethod
def makeUfromV(vlines):
ulines = list()
uline = list()
for i in range(len(vlines[0])):
for vline in vlines:
x, y = vline[i][0], vline[i][1]
uline.append((x, y))
ulines.append(uline[::-1])
uline = list()
return ulines
@staticmethod
def writeFLMA(wind_tunnel, name='', depth=0.3):
basename = os.path.basename(name)
nameroot, extension = os.path.splitext(basename)
mesh = wind_tunnel.mesh
vertices, connectivity = mesh
with open(name, 'w') as f:
number_of_vertices_2D = len(vertices)
numvertex = '8'
# write number of points to FLMA file (*2 for z-direction)
f.write(str(2 * number_of_vertices_2D) + '\n')
signum = -1.
# write x-, y- and z-coordinates to FLMA file
# loop 1D direction (symmetry)
for _ in range(2):
for vertex in vertices:
f.write(str(vertex[0]) + ' ' + str(vertex[1]) +
' ' + str(signum * depth / 2.0) + ' ')
signum = 1.
# write number of cells to FLMA file
cells = len(connectivity)
f.write('\n' + str(cells) + '\n')
# write cell connectivity to FLMA file
for cell in connectivity:
cell_connect = str(cell[0]) + ' ' + \
str(cell[1]) + ' ' + \
str(cell[2]) + ' ' + \
str(cell[3]) + ' ' + \
str(cell[0] + number_of_vertices_2D) + ' ' + \
str(cell[1] + number_of_vertices_2D) + ' ' + \
str(cell[2] + number_of_vertices_2D) + ' ' + \
str(cell[3] + number_of_vertices_2D) + '\n'
f.write(numvertex + '\n')
f.write(cell_connect)
# FIRE element type (FET) for HEX element
fetHEX = '5'
f.write('\n' + str(cells) + '\n')
for i in range(cells):
f.write(fetHEX + ' ')
f.write('\n\n')
# FIRE element type (FET) for Quad element
fetQuad = '3\n'
# write FIRE selections to FLMA file
f.write('6\n')
f.write('right\n')
f.write(fetQuad)
f.write(str(2 * len(connectivity)) + '\n')
for i in range(len(connectivity)):
f.write(' %s 0' % (i))
f.write('\n')
f.write('\n')
f.write('left\n')
f.write(fetQuad)
f.write(str(2 * len(connectivity)) + '\n')
for i in range(len(connectivity)):
f.write(' %s 1' % (i))
f.write('\n')
f.write('\n')
f.write('bottom\n')
f.write(fetQuad)
f.write('2\n')
f.write('0 2\n')
f.write('\n')
f.write('top\n')
f.write(fetQuad)
f.write('2\n')
f.write('0 3\n')
f.write('\n')
f.write('back\n')
f.write(fetQuad)
f.write('2\n')
f.write('0 4\n')
f.write('\n')
f.write('front\n')
f.write(fetQuad)
f.write('2\n')
f.write('0 5\n')
logger.info('FIRE type mesh saved as {}'.
format(os.path.join(OUTPUTDATA, basename)))
@staticmethod
def writeSU2(wind_tunnel, name=''):
basename = os.path.basename(name)
nameroot, extension = os.path.splitext(basename)
mesh = wind_tunnel.mesh
blocks = wind_tunnel.blocks
boundary_loops = wind_tunnel.boundary_loops
vertices, connectivity = mesh
airfoil_subdivisions, v = blocks[0].getDivUV()
trailing_edge_subdivisions, _ = blocks[1].getDivUV()
# SU2 element types
element_type_quadrilateral = '9'
_date = date.today().strftime("%A %d. %B %Y")
with open(name, 'w') as f:
f.write('%\n')
f.write('% Airfoil contour: ' + nameroot + ' \n')
f.write('%\n')
f.write('% File created with ' + PyAero.__appname__ + '\n')
f.write('% Version: ' + PyAero.__version__ + '\n')
f.write('% Author: ' + PyAero.__author__ + '\n')
f.write('% Date: ' + _date + '\n')
# dimension of the problem
f.write('%\n')
f.write('% Problem dimension\n')
f.write('%\n')
f.write('NDIME= 2\n')
# element connectivity
f.write('%\n')
f.write('% Inner element connectivity\n')
f.write('%\n')
# number of elements
f.write('NELEM= %s\n' % (len(connectivity)))
for cell_id, cell in enumerate(connectivity):
cell_connect = element_type_quadrilateral + ' ' + \
str(cell[0]) + ' ' + \
str(cell[1]) + ' ' + \
str(cell[2]) + ' ' + \
str(cell[3]) + ' ' + str(cell_id) + '\n'
f.write(cell_connect)
# comment for vertices
f.write('%\n')
f.write('% Node coordinates\n')
f.write('%\n')
f.write('NPOIN=%s\n' % (len(vertices)))
# x- and y-coordinates
for node, vertex in enumerate(vertices):
x, y = vertex[0], vertex[1]
f.write(' {:24.16e} {:24.16e} {}\n'.format(x, y, node))
# boundaries
f.write('%\n')
f.write('% Boundary elements\n')
f.write('%\n')
# number of vertices
# number of marks (Airfoil, Farfield, Symmetry)
# f.write('NMARK= 3\n')
f.write('NMARK= 2\n')
# boundary definition (tag) for the airfoil
f.write('MARKER_TAG= {}\n'.format(wind_tunnel.boundary_airfoil))
f.write('MARKER_ELEMS= {}\n'.format(len(boundary_loops[0])))
for edge in boundary_loops[0]:
f.write('3 {} {}\n'.format(edge[0], edge[1]))
# boundary definition (tag) for the farfield
# this loops the complete outer boundary
f.write('MARKER_TAG= {}\n'.format(wind_tunnel.boundary_inlet))
f.write('MARKER_ELEMS= {}\n'.format(len(boundary_loops[1])))
for edge in boundary_loops[1]:
f.write('3 {} {}\n'.format(edge[0], edge[1]))
# boundary definition (tag) for the symmetry
# f.write('MARKER_TAG= {}\n'.format(wind_tunnel.boundary_symmetry))
# f.write('MARKER_ELEMS= {}\n'.format(len(connectivity)))
# for cell_id, cell in enumerate(connectivity):
# cell_connect = element_type_quadrilateral + ' ' + \
# str(cell[0]) + ' ' + \
# str(cell[1]) + ' ' + \
# str(cell[2]) + ' ' + \
# str(cell[3])
# f.write('{}\n'.format(cell_connect))
logger.info('SU2 type mesh saved as {}'.
format(name))
@staticmethod
def writeGMSH(wind_tunnel, name=''):
"""export mesh in GMSH format 2
http://gmsh.info/doc/texinfo/gmsh.html#MSH-file-format-version-2-_0028Legacy_0029
Args:
mesh (TYPE): Description
blocks (TYPE): Description
name (str, optional): Description
"""
basename = os.path.basename(name)
nameroot, extension = os.path.splitext(basename)
mesh = wind_tunnel.mesh
boundary_loops = wind_tunnel.boundary_loops
bnd_airfoil = wind_tunnel.lineedit_airfoil
bnd_inlet = wind_tunnel.lineedit_inlet
bnd_outlet = wind_tunnel.lineedit_outlet
bnd_symmetry = wind_tunnel.lineedit_symmetry
is_outlet = wind_tunnel.is_outlet
vertices, connectivity = mesh
# element type "1" is GMSH 2-node line
# element type "2" is GMSH 3-node triangle
# element type "3" is GMSH 4-node quadrangle
element_type_line = '1'
# element_type_triangle = '2'
element_type_quadrangle = '3'
# write date in English
locale.setlocale(locale.LC_ALL, 'en')
_date = date.today().strftime("%A %d. %B %Y")
with open(name, 'w') as f:
f.write('$MeshFormat\n')
f.write('2.2 0 8\n')
f.write('$EndMeshFormat\n')
f.write('$Comments\n')
f.write(' Airfoil contour: ' + nameroot + ' \n')
f.write(' File created with ' + PyAero.__appname__ + '\n')
f.write(' Version: ' + PyAero.__version__ + '\n')
f.write(' Author: ' + PyAero.__author__ + '\n')
f.write(' Date: ' + _date + '\n')
f.write('$EndComments\n')
'''
$PhysicalNames
number-of-names
physical-dimension physical-tag "physical-name"
$EndPhysicalNames
'''
f.write('$PhysicalNames\n')
f.write('4\n')
f.write('1 1 "{}"\n'.format(bnd_airfoil))
f.write('1 2 "{}"\n'.format(bnd_inlet))
f.write('1 3 "{}"\n'.format(bnd_outlet))
f.write('2 4 "{}"\n'.format(bnd_symmetry))
f.write('$EndPhysicalNames\n')
f.write('$Nodes\n')
f.write('%s\n' % (len(vertices)))
# x- and y-coordinates
for node, vertex in enumerate(vertices, start=1):
x, y = vertex[0], vertex[1]
f.write(' {:} {:16.8} {:16.8} 0.0\n'.format(node, x, y))
f.write('$EndNodes\n')
'''
$Elements
number-of-elements
elm-number elm-type number-of-tags < tag > … node-number-list
$EndElements
'''
f.write('$Elements\n')
# boundary_loops is a disjoint set groups element
# key for each loop is one arbitrary vertex of the loop
# one loop is made by the airfoil
# the other loop is made by the windtunnel outer boundary
keys = list(boundary_loops.keys())
# print('Number of boundary loops', len(keys))
elements_loop1 = len(list(boundary_loops[keys[0]]))
elements_loop2 = len(list(boundary_loops[keys[1]]))
number_of_cells = len(connectivity)
# number of elements
# compiled of airfoil, outer boundary and mesh itself
f.write('{}\n'.format(elements_loop1 + elements_loop2 +
number_of_cells))
element_id = 0
# FIXME
# FIXME refactor dicts and their usage
# FIXME
# write boundary elements (as per physical names)
physical = {0: '1', 1: '2'}
elementary_entities = {0: '8', 1: '7'}
for j, loop in enumerate(boundary_loops):
for i, node in enumerate(boundary_loops[loop]):
element_id += 1
# an element consists of:
# element_id
# element_type
#
if is_outlet[i]:
physical_l = '3'
elementary_entities_l = '9'
else:
physical_l = physical[j]
elementary_entities_l = elementary_entities[j]
element = ' ' + str(element_id) + ' ' + \
element_type_line + ' 3 ' + physical_l + ' ' + \
elementary_entities_l + ' 0 ' + str(node[0] + 1) + \
' ' + str(node[1] + 1) + '\n'
f.write(element)
# write mesh elements
# includes physical tag for symmetry "4"
for cell in connectivity:
element_id += 1
element = ' ' + str(element_id) + ' ' + \
element_type_quadrangle + ' 3 4 6 0 ' + \
str(cell[0] + 1) + ' ' + \
str(cell[1] + 1) + ' ' + \
str(cell[2] + 1) + ' ' + \
str(cell[3] + 1) + '\n'
f.write(element)
f.write('$EndElements')
logger.info('GMSH type mesh saved as {}'.
format(os.path.join(OUTPUTDATA, basename)))
class Smooth:
def __init__(self, block):
self.block = block
def getNeighbours(self, node):
"""Get a list of neighbours around a node """
i, j = node[0], node[1]
neighbours = {1: (i - 1, j - 1), 2: (i, j - 1), 3: (i + 1, j - 1),
4: (i + 1, j), 5: (i + 1, j + 1), 6: (i, j + 1),
7: (i - 1, j + 1), 8: (i - 1, j)}
return neighbours
def smooth(self, nodes, iterations=1, algorithm='laplace'):
"""Smoothing of a square lattice mesh
Algorithms:
- Angle based
Tian Zhou:
AN ANGLE-BASED APPROACH TO TWO-DIMENSIONAL MESH SMOOTHING
- Laplace
Mean of surrounding node coordinates
- Parallelogram smoothing
Sanjay Kumar Khattri:
A NEW SMOOTHING ALGORITHM FOR QUADRILATERAL AND HEXAHEDRAL MESHES
Args:
nodes (TYPE): List of (i, j) tuples for the nodes to be smoothed
iterations (int, optional): Number of smoothing iterations
algorithm (str, optional): Smoothing algorithm
"""
# loop number of smoothing iterations
for i in range(iterations):
new_pos = list()
# smooth a node (i, j)
for node in nodes:
nb = self.getNeighbours(node)
if algorithm == 'laplace':
new_pos = (self.block.getNodeCoo(nb[2]) +
self.block.getNodeCoo(nb[4]) +
self.block.getNodeCoo(nb[6]) +
self.block.getNodeCoo(nb[8])) / 4.0
if algorithm == 'parallelogram':
new_pos = (self.block.getNodeCoo(nb[1]) +
self.block.getNodeCoo(nb[3]) +
self.block.getNodeCoo(nb[5]) +
self.block.getNodeCoo(nb[7])) / 4.0 - \
(self.block.getNodeCoo(nb[2]) +
self.block.getNodeCoo(nb[4]) +
self.block.getNodeCoo(nb[6]) +
self.block.getNodeCoo(nb[8])) / 2.0
if algorithm == 'angle_based':
pass
self.block.setNodeCoo(node, new_pos.tolist())
return self.block
def selectNodes(self, domain='interior', ij=[]):
"""Generate a node index list
Args:
domain (str, optional): Defines the part of the domain where
nodes shall be selected
Returns:
List: Indices as (i, j) tuples
"""
U, V = self.block.getDivUV()
nodes = list()
# select all nodes except boundary nodes
if domain == 'interior':
istart = 1
iend = U
jstart = 1
jend = V
if domain == 'ij':
istart = ij[0]
iend = ij[1]
jstart = ij[2]
jend = ij[3]
for i in range(istart, iend):
for j in range(jstart, jend):
nodes.append((i, j))
return nodes
class DisjointSet:
"""Summary
Attributes:
group (dict): Description
leader (dict): Description
oldgroup (dict): Description
oldleader (dict): Description
from: https://stackoverflow.com/a/3067672/2264936
"""
def __init__(self, size=None):
if size is None:
# maps a member to the group's leader
self.leader = {}
# maps a group leader to the group (which is a set)
self.group = {}
self.oldgroup = {}
self.oldleader = {}
else:
self.group = {i: set([i]) for i in range(0, size)}
self.leader = {i: i for i in range(0, size)}
self.oldgroup = {i: set([i]) for i in range(0, size)}
self.oldleader = {i: i for i in range(0, size)}
def add(self, a, b):
self.oldgroup = self.group.copy()
self.oldleader = self.leader.copy()
leadera = self.leader.get(a)
leaderb = self.leader.get(b)
if leadera is not None:
if leaderb is not None:
if leadera == leaderb:
return # nothing to do
groupa = self.group[leadera]
groupb = self.group[leaderb]
if len(groupa) < len(groupb):
a, leadera, groupa, b, leaderb, groupb = \
b, leaderb, groupb, a, leadera, groupa
groupa |= groupb
del self.group[leaderb]
for k in groupb:
self.leader[k] = leadera
else:
self.group[leadera].add(b)
self.leader[b] = leadera
else:
if leaderb is not None:
self.group[leaderb].add(a)
self.leader[a] = leaderb
else:
self.leader[a] = self.leader[b] = a
self.group[a] = set([a, b])
def connected(self, a, b):
leadera = self.leader.get(a)
leaderb = self.leader.get(b)
if leadera is not None:
if leaderb is not None:
return leadera == leaderb
else:
return False
else:
return False
def undo(self):
self.group = self.oldgroup.copy()
self.leader = self.oldleader.copy()
| 36.245869 | 90 | 0.494894 |
f4227a6f7aed2b266e65012a2f2eaf19fdccd1b3 | 1,513 | py | Python | airflow/serialization/enums.py | takuti/airflow | 0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e | [
"Apache-2.0"
] | 8,092 | 2016-04-27T20:32:29.000Z | 2019-01-05T07:39:33.000Z | airflow/serialization/enums.py | takuti/airflow | 0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e | [
"Apache-2.0"
] | 2,961 | 2016-05-05T07:16:16.000Z | 2019-01-05T08:47:59.000Z | airflow/serialization/enums.py | takuti/airflow | 0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e | [
"Apache-2.0"
] | 3,546 | 2016-05-04T20:33:16.000Z | 2019-01-05T05:14:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Enums for DAG serialization."""
from enum import Enum, unique
# Fields of an encoded object in serialization.
@unique
class Encoding(str, Enum):
"""Enum of encoding constants."""
TYPE = '__type'
VAR = '__var'
# Supported types for encoding. primitives and list are not encoded.
@unique
class DagAttributeTypes(str, Enum):
"""Enum of supported attribute types of DAG."""
DAG = 'dag'
OP = 'operator'
DATETIME = 'datetime'
TIMEDELTA = 'timedelta'
TIMEZONE = 'timezone'
RELATIVEDELTA = 'relativedelta'
DICT = 'dict'
SET = 'set'
TUPLE = 'tuple'
POD = 'k8s.V1Pod'
TASK_GROUP = 'taskgroup'
EDGE_INFO = 'edgeinfo'
PARAM = 'param'
XCOM_REF = 'xcomref'
| 29.096154 | 68 | 0.708526 |
fb8d445c21ea2073d5afa0ca29db6d90e807b27a | 1,221 | py | Python | nativepython/type_wrappers/exceptions.py | szymonlipinski/nativepython | 5f0bcc709b99a43681488f2753eccc2ac37a0334 | [
"Apache-2.0"
] | null | null | null | nativepython/type_wrappers/exceptions.py | szymonlipinski/nativepython | 5f0bcc709b99a43681488f2753eccc2ac37a0334 | [
"Apache-2.0"
] | null | null | null | nativepython/type_wrappers/exceptions.py | szymonlipinski/nativepython | 5f0bcc709b99a43681488f2753eccc2ac37a0334 | [
"Apache-2.0"
] | null | null | null | # Coyright 2017-2019 Nativepython Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nativepython.native_ast as native_ast
import nativepython.type_wrappers.runtime_functions as runtime_functions
def generateThrowException(context, exception):
return (
# as a short-term hack, use a runtime function to stash this where the callsite can pick it up.
runtime_functions.stash_exception_ptr.call(
native_ast.const_utf8_cstr(str(exception))
)
>> native_ast.Expression.Throw(
expr=native_ast.Expression.Constant(
val=native_ast.Constant.NullPointer(value_type=native_ast.UInt8.pointer())
)
)
)
| 39.387097 | 103 | 0.717445 |
bd003adbb7b859f141ee4c4c6b2319cd9594c072 | 13,183 | py | Python | openstack_dashboard/test/api_tests/vpnaas_tests.py | rtpro/horizon | 654724dccc3bf5d224eba10fa8f1e45ef7762c95 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/test/api_tests/vpnaas_tests.py | rtpro/horizon | 654724dccc3bf5d224eba10fa8f1e45ef7762c95 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/test/api_tests/vpnaas_tests.py | rtpro/horizon | 654724dccc3bf5d224eba10fa8f1e45ef7762c95 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from neutronclient.v2_0 import client
neutronclient = client.Client
class VPNaasApiTests(test.APITestCase):
@test.create_stubs({neutronclient: ('create_vpnservice',)})
def test_vpnservice_create(self):
vpnservice1 = self.api_vpnservices.first()
form_data = {
'name': vpnservice1['name'],
'description': vpnservice1['description'],
'subnet_id': vpnservice1['subnet_id'],
'router_id': vpnservice1['router_id'],
'admin_state_up': vpnservice1['admin_state_up']
}
vpnservice = {'vpnservice': self.api_vpnservices.first()}
neutronclient.create_vpnservice(
{'vpnservice': form_data}).AndReturn(vpnservice)
self.mox.ReplayAll()
ret_val = api.vpn.vpnservice_create(self.request, **form_data)
self.assertIsInstance(ret_val, api.vpn.VPNService)
@test.create_stubs({neutronclient: ('list_vpnservices',
'list_ipsec_site_connections'),
api.neutron: ('subnet_list', 'router_list')})
def test_vpnservice_list(self):
vpnservices = {'vpnservices': self.vpnservices.list()}
vpnservices_dict = {'vpnservices': self.api_vpnservices.list()}
subnets = self.subnets.list()
routers = self.routers.list()
ipsecsiteconnections_dict = {
'ipsec_site_connections': self.api_ipsecsiteconnections.list()}
neutronclient.list_vpnservices().AndReturn(vpnservices_dict)
api.neutron.subnet_list(self.request).AndReturn(subnets)
api.neutron.router_list(self.request).AndReturn(routers)
neutronclient.list_ipsec_site_connections().AndReturn(
ipsecsiteconnections_dict)
self.mox.ReplayAll()
ret_val = api.vpn.vpnservice_list(self.request)
for (v, d) in zip(ret_val, vpnservices['vpnservices']):
self.assertIsInstance(v, api.vpn.VPNService)
self.assertTrue(v.name, d.name)
self.assertTrue(v.id)
@test.create_stubs({neutronclient: ('show_vpnservice',
'list_ipsec_site_connections'),
api.neutron: ('subnet_get', 'router_get')})
def test_vpnservice_get(self):
vpnservice = self.vpnservices.first()
vpnservice_dict = {'vpnservice': self.api_vpnservices.first()}
subnet = self.subnets.first()
router = self.routers.first()
ipsecsiteconnections_dict = {
'ipsec_site_connections': self.api_ipsecsiteconnections.list()}
neutronclient.show_vpnservice(
vpnservice.id).AndReturn(vpnservice_dict)
api.neutron.subnet_get(self.request, subnet.id).AndReturn(subnet)
api.neutron.router_get(self.request, router.id).AndReturn(router)
neutronclient.list_ipsec_site_connections().AndReturn(
ipsecsiteconnections_dict)
self.mox.ReplayAll()
ret_val = api.vpn.vpnservice_get(self.request, vpnservice.id)
self.assertIsInstance(ret_val, api.vpn.VPNService)
@test.create_stubs({neutronclient: ('create_ikepolicy',)})
def test_ikepolicy_create(self):
ikepolicy1 = self.api_ikepolicies.first()
form_data = {
'name': ikepolicy1['name'],
'description': ikepolicy1['description'],
'auth_algorithm': ikepolicy1['auth_algorithm'],
'encryption_algorithm': ikepolicy1['encryption_algorithm'],
'ike_version': ikepolicy1['ike_version'],
'lifetime': ikepolicy1['lifetime'],
'phase1_negotiation_mode': ikepolicy1['phase1_negotiation_mode'],
'pfs': ikepolicy1['pfs']
}
ikepolicy = {'ikepolicy': self.api_ikepolicies.first()}
neutronclient.create_ikepolicy(
{'ikepolicy': form_data}).AndReturn(ikepolicy)
self.mox.ReplayAll()
ret_val = api.vpn.ikepolicy_create(self.request, **form_data)
self.assertIsInstance(ret_val, api.vpn.IKEPolicy)
@test.create_stubs({neutronclient: ('list_ikepolicies',
'list_ipsec_site_connections')})
def test_ikepolicy_list(self):
ikepolicies = {'ikepolicies': self.ikepolicies.list()}
ikepolicies_dict = {'ikepolicies': self.api_ikepolicies.list()}
ipsecsiteconnections_dict = {
'ipsec_site_connections': self.api_ipsecsiteconnections.list()}
neutronclient.list_ikepolicies().AndReturn(ikepolicies_dict)
neutronclient.list_ipsec_site_connections().AndReturn(
ipsecsiteconnections_dict)
self.mox.ReplayAll()
ret_val = api.vpn.ikepolicy_list(self.request)
for (v, d) in zip(ret_val, ikepolicies['ikepolicies']):
self.assertIsInstance(v, api.vpn.IKEPolicy)
self.assertTrue(v.name, d.name)
self.assertTrue(v.id)
@test.create_stubs({neutronclient: ('show_ikepolicy',
'list_ipsec_site_connections')})
def test_ikepolicy_get(self):
ikepolicy = self.ikepolicies.first()
ikepolicy_dict = {'ikepolicy': self.api_ikepolicies.first()}
ipsecsiteconnections_dict = {
'ipsec_site_connections': self.api_ipsecsiteconnections.list()}
neutronclient.show_ikepolicy(
ikepolicy.id).AndReturn(ikepolicy_dict)
neutronclient.list_ipsec_site_connections().AndReturn(
ipsecsiteconnections_dict)
self.mox.ReplayAll()
ret_val = api.vpn.ikepolicy_get(self.request, ikepolicy.id)
self.assertIsInstance(ret_val, api.vpn.IKEPolicy)
@test.create_stubs({neutronclient: ('create_ipsecpolicy',)})
def test_ipsecpolicy_create(self):
ipsecpolicy1 = self.api_ipsecpolicies.first()
form_data = {
'name': ipsecpolicy1['name'],
'description': ipsecpolicy1['description'],
'auth_algorithm': ipsecpolicy1['auth_algorithm'],
'encryption_algorithm': ipsecpolicy1['encryption_algorithm'],
'encapsulation_mode': ipsecpolicy1['encapsulation_mode'],
'lifetime': ipsecpolicy1['lifetime'],
'pfs': ipsecpolicy1['pfs'],
'transform_protocol': ipsecpolicy1['transform_protocol']
}
ipsecpolicy = {'ipsecpolicy': self.api_ipsecpolicies.first()}
neutronclient.create_ipsecpolicy(
{'ipsecpolicy': form_data}).AndReturn(ipsecpolicy)
self.mox.ReplayAll()
ret_val = api.vpn.ipsecpolicy_create(self.request, **form_data)
self.assertIsInstance(ret_val, api.vpn.IPSecPolicy)
@test.create_stubs({neutronclient: ('list_ipsecpolicies',
'list_ipsec_site_connections')})
def test_ipsecpolicy_list(self):
ipsecpolicies = {'ipsecpolicies': self.ipsecpolicies.list()}
ipsecpolicies_dict = {'ipsecpolicies': self.api_ipsecpolicies.list()}
ipsecsiteconnections_dict = {
'ipsec_site_connections': self.api_ipsecsiteconnections.list()}
neutronclient.list_ipsecpolicies().AndReturn(ipsecpolicies_dict)
neutronclient.list_ipsec_site_connections().AndReturn(
ipsecsiteconnections_dict)
self.mox.ReplayAll()
ret_val = api.vpn.ipsecpolicy_list(self.request)
for (v, d) in zip(ret_val, ipsecpolicies['ipsecpolicies']):
self.assertIsInstance(v, api.vpn.IPSecPolicy)
self.assertTrue(v.name, d.name)
self.assertTrue(v.id)
@test.create_stubs({neutronclient: ('show_ipsecpolicy',
'list_ipsec_site_connections')})
def test_ipsecpolicy_get(self):
ipsecpolicy = self.ipsecpolicies.first()
ipsecpolicy_dict = {'ipsecpolicy': self.api_ipsecpolicies.first()}
ipsecsiteconnections_dict = {
'ipsec_site_connections': self.api_ipsecsiteconnections.list()}
neutronclient.show_ipsecpolicy(
ipsecpolicy.id).AndReturn(ipsecpolicy_dict)
neutronclient.list_ipsec_site_connections().AndReturn(
ipsecsiteconnections_dict)
self.mox.ReplayAll()
ret_val = api.vpn.ipsecpolicy_get(self.request, ipsecpolicy.id)
self.assertIsInstance(ret_val, api.vpn.IPSecPolicy)
@test.create_stubs({neutronclient: ('create_ipsec_site_connection',)})
def test_ipsecsiteconnection_create(self):
ipsecsiteconnection1 = self.api_ipsecsiteconnections.first()
form_data = {
'name': ipsecsiteconnection1['name'],
'description': ipsecsiteconnection1['description'],
'dpd': ipsecsiteconnection1['dpd'],
'ikepolicy_id': ipsecsiteconnection1['ikepolicy_id'],
'initiator': ipsecsiteconnection1['initiator'],
'ipsecpolicy_id': ipsecsiteconnection1['ipsecpolicy_id'],
'mtu': ipsecsiteconnection1['mtu'],
'peer_address': ipsecsiteconnection1['peer_address'],
'peer_cidrs': ipsecsiteconnection1['peer_cidrs'],
'peer_id': ipsecsiteconnection1['peer_id'],
'psk': ipsecsiteconnection1['psk'],
'vpnservice_id': ipsecsiteconnection1['vpnservice_id'],
'admin_state_up': ipsecsiteconnection1['admin_state_up']
}
ipsecsiteconnection = {'ipsec_site_connection':
self.api_ipsecsiteconnections.first()}
neutronclient.create_ipsec_site_connection(
{'ipsec_site_connection':
form_data}).AndReturn(ipsecsiteconnection)
self.mox.ReplayAll()
ret_val = api.vpn.ipsecsiteconnection_create(
self.request, **form_data)
self.assertIsInstance(ret_val, api.vpn.IPSecSiteConnection)
@test.create_stubs({neutronclient: ('list_ipsec_site_connections',
'list_ikepolicies',
'list_ipsecpolicies',
'list_vpnservices')})
def test_ipsecsiteconnection_list(self):
ipsecsiteconnections = {
'ipsec_site_connections': self.ipsecsiteconnections.list()}
ipsecsiteconnections_dict = {
'ipsec_site_connections': self.api_ipsecsiteconnections.list()}
ikepolicies_dict = {'ikepolicies': self.api_ikepolicies.list()}
ipsecpolicies_dict = {'ipsecpolicies': self.api_ipsecpolicies.list()}
vpnservices_dict = {'vpnservices': self.api_vpnservices.list()}
neutronclient.list_ipsec_site_connections().AndReturn(
ipsecsiteconnections_dict)
neutronclient.list_ikepolicies().AndReturn(ikepolicies_dict)
neutronclient.list_ipsecpolicies().AndReturn(ipsecpolicies_dict)
neutronclient.list_vpnservices().AndReturn(vpnservices_dict)
self.mox.ReplayAll()
ret_val = api.vpn.ipsecsiteconnection_list(self.request)
for (v, d) in zip(ret_val,
ipsecsiteconnections['ipsec_site_connections']):
self.assertIsInstance(v, api.vpn.IPSecSiteConnection)
self.assertTrue(v.name, d.name)
self.assertTrue(v.id)
@test.create_stubs({neutronclient: ('show_ipsec_site_connection',
'show_ikepolicy', 'show_ipsecpolicy',
'show_vpnservice')})
def test_ipsecsiteconnection_get(self):
ipsecsiteconnection = self.ipsecsiteconnections.first()
connection_dict = {'ipsec_site_connection':
self.api_ipsecsiteconnections.first()}
ikepolicy_dict = {'ikepolicy': self.api_ikepolicies.first()}
ipsecpolicy_dict = {'ipsecpolicy': self.api_ipsecpolicies.first()}
vpnservice_dict = {'vpnservice': self.api_vpnservices.first()}
neutronclient.show_ipsec_site_connection(
ipsecsiteconnection.id).AndReturn(connection_dict)
neutronclient.show_ikepolicy(
ipsecsiteconnection.ikepolicy_id).AndReturn(ikepolicy_dict)
neutronclient.show_ipsecpolicy(
ipsecsiteconnection.ipsecpolicy_id).AndReturn(ipsecpolicy_dict)
neutronclient.show_vpnservice(
ipsecsiteconnection.vpnservice_id).AndReturn(vpnservice_dict)
self.mox.ReplayAll()
ret_val = api.vpn.ipsecsiteconnection_get(self.request,
ipsecsiteconnection.id)
self.assertIsInstance(ret_val, api.vpn.IPSecSiteConnection)
| 44.840136 | 78 | 0.655996 |
bf2ab78ba51204bc38e00310ed0900cbe8d572de | 44,395 | py | Python | web/db.py | jackylee53/spiderproxypool | f9e298aa420baea0194da6176a3f1ef976a04c44 | [
"MIT"
] | null | null | null | web/db.py | jackylee53/spiderproxypool | f9e298aa420baea0194da6176a3f1ef976a04c44 | [
"MIT"
] | null | null | null | web/db.py | jackylee53/spiderproxypool | f9e298aa420baea0194da6176a3f1ef976a04c44 | [
"MIT"
] | null | null | null | """
Database API
(part of web.py)
"""
from __future__ import print_function
from .utils import threadeddict, storage, iters, iterbetter, safestr, safeunicode
import datetime, time, os, urllib, re
from .py3helpers import PY2, string_types, numeric_types, iteritems
try:
from urllib import parse as urlparse
from urllib.parse import unquote
except ImportError:
import urlparse
from urllib import unquote
try:
# db module can work independent of web.py
from .webapi import debug, config
except:
import sys
debug = sys.stderr
config = storage()
__all__ = [
"UnknownParamstyle", "UnknownDB", "TransactionError",
"sqllist", "sqlors", "reparam", "sqlquote",
"SQLQuery", "SQLParam", "sqlparam",
"SQLLiteral", "sqlliteral",
"database", 'DB',
]
TOKEN = '[ \\f\\t]*(\\\\\\r?\\n[ \\f\\t]*)*(#[^\\r\\n]*)?(((\\d+[jJ]|((\\d+\\.\\d*|\\.\\d+)([eE][-+]?\\d+)?|\\d+[eE][-+]?\\d+)[jJ])|((\\d+\\.\\d*|\\.\\d+)([eE][-+]?\\d+)?|\\d+[eE][-+]?\\d+)|(0[xX][\\da-fA-F]+[lL]?|0[bB][01]+[lL]?|(0[oO][0-7]+)|(0[0-7]*)[lL]?|[1-9]\\d*[lL]?))|((\\*\\*=?|>>=?|<<=?|<>|!=|//=?|[+\\-*/%&|^=<>]=?|~)|[][(){}]|(\\r?\\n|[:;.,`@]))|([uUbB]?[rR]?\'[^\\n\'\\\\]*(?:\\\\.[^\\n\'\\\\]*)*\'|[uUbB]?[rR]?"[^\\n"\\\\]*(?:\\\\.[^\\n"\\\\]*)*")|[a-zA-Z_]\\w*)'
tokenprog = re.compile(TOKEN)
class UnknownDB(Exception):
"""raised for unsupported dbms"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class TransactionError(Exception): pass
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle(paramstyle)
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, string_types):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, string_types):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (string_types, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '<sql: %s>' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
sqlliteral = SQLLiteral
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""
dictionary = dictionary.copy() # eval mucks with it
# disable builtins to avoid risk for remote code exection.
dictionary['__builtins__'] = object()
vals = []
result = []
for live, chunk in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif isinstance(obj, numeric_types):
return str(obj)
elif isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
if PY2 and isinstance(obj, unicode): #Strings are always UTF8 in Py3
obj = obj.encode('utf8')
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
"""
if isinstance(lst, string_types):
return lst
else:
return ', '.join(lst)
def sqlors(left, lst):
"""
`left is a SQL clause like `tablename.arg = `
and `lst` is a list of values. Returns a reparam-style
pair featuring the SQL that ORs together the clause
for each item in the lst.
>>> sqlors('foo = ', [])
<sql: '1=2'>
>>> sqlors('foo = ', [1])
<sql: 'foo = 1'>
>>> sqlors('foo = ', 1)
<sql: 'foo = 1'>
>>> sqlors('foo = ', [1,2,3])
<sql: '(foo = 1 OR foo = 2 OR foo = 3 OR 1=2)'>
"""
if isinstance(lst, iters):
lst = list(lst)
ln = len(lst)
if ln == 0:
return SQLQuery("1=2")
if ln == 1:
lst = lst[0]
if isinstance(lst, iters):
return SQLQuery(['('] +
sum([[left, sqlparam(x), ' OR '] for x in lst], []) +
['1=2)']
)
else:
return left + sqlparam(lst)
def sqlwhere(data, grouping=' AND '):
"""
Converts a two-tuple (key, value) iterable `data` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere((('cust_id', 2), ('order_id',3)))
<sql: 'cust_id = 2 AND order_id = 3'>
>>> sqlwhere((('order_id', 3), ('cust_id', 2)), grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere((('a', 'a'), ('b', 'b'))).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in data], grouping)
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
class Transaction:
"""Database transaction."""
def __init__(self, ctx):
self.ctx = ctx
self.transaction_count = transaction_count = len(ctx.transactions)
class transaction_engine:
"""Transaction Engine used in top level transactions."""
def do_transact(self):
ctx.commit(unload=False)
def do_commit(self):
ctx.commit()
def do_rollback(self):
ctx.rollback()
class subtransaction_engine:
"""Transaction Engine used in sub transactions."""
def query(self, q):
db_cursor = ctx.db.cursor()
ctx.db_execute(db_cursor, SQLQuery(q % transaction_count))
def do_transact(self):
self.query('SAVEPOINT webpy_sp_%s')
def do_commit(self):
self.query('RELEASE SAVEPOINT webpy_sp_%s')
def do_rollback(self):
self.query('ROLLBACK TO SAVEPOINT webpy_sp_%s')
class dummy_engine:
"""Transaction Engine used instead of subtransaction_engine
when sub transactions are not supported."""
do_transact = do_commit = do_rollback = lambda self: None
if self.transaction_count:
# nested transactions are not supported in some databases
if self.ctx.get('ignore_nested_transactions'):
self.engine = dummy_engine()
else:
self.engine = subtransaction_engine()
else:
self.engine = transaction_engine()
self.engine.do_transact()
self.ctx.transactions.append(self)
def __enter__(self):
return self
def __exit__(self, exctype, excvalue, traceback):
if exctype is not None:
self.rollback()
else:
self.commit()
def commit(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_commit()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
def rollback(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_rollback()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
class DB:
"""Database"""
def __init__(self, db_module, keywords):
"""Creates a database.
"""
# some DB implementaions take optional paramater `driver` to use a specific driver modue
# but it should not be passed to connect
keywords.pop('driver', None)
self.db_module = db_module
self.keywords = keywords
self._ctx = threadeddict()
# flag to enable/disable printing queries
self.printing = config.get('debug_sql', config.get('debug', False))
self.supports_multiple_insert = False
try:
import DBUtils
# enable pooling if DBUtils module is available.
self.has_pooling = True
except ImportError:
self.has_pooling = False
# Pooling can be disabled by passing pooling=False in the keywords.
self.has_pooling = self.keywords.pop('pooling', True) and self.has_pooling
def _getctx(self):
if not self._ctx.get('db'):
self._load_context(self._ctx)
return self._ctx
ctx = property(_getctx)
def _load_context(self, ctx):
ctx.dbq_count = 0
ctx.transactions = [] # stack of transactions
if self.has_pooling:
ctx.db = self._connect_with_pooling(self.keywords)
else:
ctx.db = self._connect(self.keywords)
ctx.db_execute = self._db_execute
if not hasattr(ctx.db, 'commit'):
ctx.db.commit = lambda: None
if not hasattr(ctx.db, 'rollback'):
ctx.db.rollback = lambda: None
def commit(unload=True):
# do db commit and release the connection if pooling is enabled.
ctx.db.commit()
if unload and self.has_pooling:
self._unload_context(self._ctx)
def rollback():
# do db rollback and release the connection if pooling is enabled.
ctx.db.rollback()
if self.has_pooling:
self._unload_context(self._ctx)
ctx.commit = commit
ctx.rollback = rollback
def _unload_context(self, ctx):
del ctx.db
def _connect(self, keywords):
return self.db_module.connect(**keywords)
def _connect_with_pooling(self, keywords):
def get_pooled_db():
from DBUtils import PooledDB
# In DBUtils 0.9.3, `dbapi` argument is renamed as `creator`
# see Bug#122112
if PooledDB.__version__.split('.') < '0.9.3'.split('.'):
return PooledDB.PooledDB(dbapi=self.db_module, **keywords)
else:
return PooledDB.PooledDB(creator=self.db_module, **keywords)
if getattr(self, '_pooleddb', None) is None:
self._pooleddb = get_pooled_db()
return self._pooleddb.connection()
def _db_cursor(self):
return self.ctx.db.cursor()
def _param_marker(self):
"""Returns parameter marker based on paramstyle attribute if this database."""
style = getattr(self, 'paramstyle', 'pyformat')
if style == 'qmark':
return '?'
elif style == 'numeric':
return ':1'
elif style in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle(style)
def _db_execute(self, cur, sql_query):
"""executes an sql query"""
self.ctx.dbq_count += 1
try:
a = time.time()
query, params = self._process_query(sql_query)
out = cur.execute(query, params)
b = time.time()
except:
if self.printing:
print('ERR:', str(sql_query), file=debug)
if self.ctx.transactions:
self.ctx.transactions[-1].rollback()
else:
self.ctx.rollback()
raise
if self.printing:
print('%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query)), file=debug)
return out
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, params
def _where(self, where, vars):
if isinstance(where, numeric_types):
where = "id = " + sqlparam(where)
#@@@ for backward-compatibility
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, dict):
where = self._where_dict(where)
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, vars)
return where
def _where_dict(self, where):
where_clauses = []
for k, v in sorted(iteritems(where), key= lambda t:t[0]):
where_clauses.append(k + ' = ' + sqlquote(v))
if where_clauses:
return SQLQuery.join(where_clauses, " AND ")
else:
return None
def query(self, sql_query, vars=None, processed=False, _test=False):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
"""
if vars is None: vars = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, vars)
if _test: return sql_query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, sql_query)
if db_cursor.description:
names = [x[0] for x in db_cursor.description]
def iterwrapper():
row = db_cursor.fetchone()
while row:
yield storage(dict(zip(names, row)))
row = db_cursor.fetchone()
out = iterbetter(iterwrapper())
out.__len__ = lambda: int(db_cursor.rowcount)
out.list = lambda: [storage(dict(zip(names, x))) \
for x in db_cursor.fetchall()]
else:
out = db_cursor.rowcount
if not self.ctx.transactions:
self.ctx.commit()
return out
def select(self, tables, vars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None, _test=False):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses vars to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
<sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
>>> db.select('foo', where={'id': 5}, _test=True)
<sql: 'SELECT * FROM foo WHERE id = 5'>
"""
if vars is None: vars = {}
sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
clauses = [self.gen_clause(sql, val, vars) for sql, val in sql_clauses if val is not None]
qout = SQLQuery.join(clauses)
if _test: return qout
return self.query(qout, processed=True)
def where(self, table, what='*', order=None, group=None, limit=None,
offset=None, _test=False, **kwargs):
"""
Selects from `table` where keys are equal to values in `kwargs`.
>>> db = DB(None, {})
>>> db.where('foo', bar_id=3, _test=True)
<sql: 'SELECT * FROM foo WHERE bar_id = 3'>
>>> db.where('foo', source=2, crust='dewey', _test=True)
<sql: "SELECT * FROM foo WHERE crust = 'dewey' AND source = 2">
>>> db.where('foo', _test=True)
<sql: 'SELECT * FROM foo'>
"""
where = self._where_dict(kwargs)
return self.select(table, what=what, order=order,
group=group, limit=limit, offset=offset, _test=_test,
where=where)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset))
def gen_clause(self, sql, val, vars):
if isinstance(val, numeric_types):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
#@@@
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif sql == 'WHERE' and isinstance(val, dict):
nout = self._where_dict(val)
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, vars)
def xjoin(a, b):
if a and b: return a + ' ' + b
else: return a or b
return xjoin(sql, nout)
def insert(self, tablename, seqname=None, _test=False, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
<sql: "INSERT INTO foo (age, created, name) VALUES (2, NOW(), 'bob')">
>>> q.query()
'INSERT INTO foo (age, created, name) VALUES (%s, NOW(), %s)'
>>> q.values()
[2, 'bob']
"""
def q(x): return "(" + x + ")"
if values:
#needed for Py3 compatibility with the above doctests
sorted_values = sorted(values.items(), key=lambda t: t[0])
_keys = SQLQuery.join(map(lambda t: t[0], sorted_values), ', ')
_values = SQLQuery.join([sqlparam(v) for v in map(lambda t: t[1], sorted_values)], ', ')
sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
else:
sql_query = SQLQuery(self._get_insert_default_values_query(tablename))
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s DEFAULT VALUES" % table
def multiple_insert(self, tablename, values, seqname=None, _test=False):
"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "foo@example.com"}, {"name": "bar", "email": "bar@example.com"}]
>>> db.multiple_insert('person', values=values, _test=True)
<sql: "INSERT INTO person (email, name) VALUES ('foo@example.com', 'foo'), ('bar@example.com', 'bar')">
"""
if not values:
return []
if not self.supports_multiple_insert:
out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
if seqname is False:
return None
else:
return out
keys = values[0].keys()
#@@ make sure all keys are valid
for v in values:
if v.keys() != keys:
raise ValueError('Not all rows have the same keys')
keys = sorted(keys) #enforce query order for the above doctest compatibility with Py3
sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
out = range(out-len(values)+1, out+1)
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def update(self, tables, where, vars=None, _test=False, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
>>> q
<sql: "UPDATE foo SET age = 2, created = NOW(), name = 'bob' WHERE name = 'Joseph'">
>>> q.query()
'UPDATE foo SET age = %s, created = NOW(), name = %s WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""
if vars is None: vars = {}
where = self._where(where, vars)
values = sorted(values.items(), key=lambda t: t[0])
query = (
"UPDATE " + sqllist(tables) +
" SET " + sqlwhere(values, ', ') +
" WHERE " + where)
if _test: return query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, query)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def delete(self, table, where, using=None, vars=None, _test=False):
"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
<sql: "DELETE FROM foo WHERE name = 'Joe'">
"""
if vars is None: vars = {}
where = self._where(where, vars)
q = 'DELETE FROM ' + table
if using: q += ' USING ' + sqllist(using)
if where: q += ' WHERE ' + where
if _test: return q
db_cursor = self._db_cursor()
self._db_execute(db_cursor, q)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def _process_insert_query(self, query, tablename, seqname):
return query
def transaction(self):
"""Start a transaction."""
return Transaction(self.ctx)
class PostgresDB(DB):
"""Postgres driver."""
def __init__(self, **keywords):
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
db_module = import_driver(["psycopg2", "psycopg", "pgdb"], preferred=keywords.pop('driver', None))
if db_module.__name__ == "psycopg2":
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
if db_module.__name__ == "pgdb" and 'port' in keywords:
keywords["host"] += ":" + str(keywords.pop('port'))
# if db is not provided postgres driver will take it from PGDATABASE environment variable
if 'db' in keywords:
keywords['database'] = keywords.pop('db')
self.dbname = "postgres"
self.paramstyle = db_module.paramstyle
DB.__init__(self, db_module, keywords)
self.supports_multiple_insert = True
self._sequences = None
def _process_insert_query(self, query, tablename, seqname):
if seqname is None:
# when seqname is not provided guess the seqname and make sure it exists
seqname = tablename + "_id_seq"
if seqname not in self._get_all_sequences():
seqname = None
if seqname:
query += "; SELECT currval('%s')" % seqname
return query
def _get_all_sequences(self):
"""Query postgres to find names of all sequences used in this database."""
if self._sequences is None:
q = "SELECT c.relname FROM pg_class c WHERE c.relkind = 'S'"
self._sequences = set([c.relname for c in self.query(q)])
return self._sequences
def _connect(self, keywords):
conn = DB._connect(self, keywords)
try:
conn.set_client_encoding('UTF8')
except AttributeError:
# fallback for pgdb driver
conn.cursor().execute("set client_encoding to 'UTF-8'")
return conn
def _connect_with_pooling(self, keywords):
conn = DB._connect_with_pooling(self, keywords)
conn._con._con.set_client_encoding('UTF8')
return conn
class MySQLDB(DB):
def __init__(self, **keywords):
db = import_driver(["MySQLdb", "pymysql","mysql.connector"], preferred=keywords.pop('driver', None))
if db.__name__ == "MySQLdb":
if 'pw' in keywords:
keywords['passwd'] = keywords['pw']
del keywords['pw']
if db.__name__ == "pymysql":
if 'pw' in keywords:
keywords['password'] = keywords['pw']
del keywords['pw']
if db.__name__ == "mysql.connector":
if 'pw' in keywords:
keywords['password'] = keywords['pw']
del keywords['pw']
if 'charset' not in keywords:
keywords['charset'] = 'utf8'
elif keywords['charset'] is None:
del keywords['charset']
self.paramstyle = db.paramstyle = 'pyformat' # it's both, like psycopg
self.dbname = "mysql"
DB.__init__(self, db, keywords)
self.supports_multiple_insert = True
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_id();')
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s () VALUES()" % table
def import_driver(drivers, preferred=None):
"""Import the first available driver or preferred driver.
"""
if preferred:
drivers = [preferred]
for d in drivers:
try:
return __import__(d, None, None, ['x'])
except ImportError:
pass
raise ImportError("Unable to import " + " or ".join(drivers))
class SqliteDB(DB):
def __init__(self, **keywords):
db = import_driver(["sqlite3", "pysqlite2.dbapi2", "sqlite"], preferred=keywords.pop('driver', None))
if db.__name__ in ["sqlite3", "pysqlite2.dbapi2"]:
db.paramstyle = 'qmark'
# sqlite driver doesn't create datatime objects for timestamp columns unless `detect_types` option is passed.
# It seems to be supported in sqlite3 and pysqlite2 drivers, not surte about sqlite.
keywords.setdefault('detect_types', db.PARSE_DECLTYPES)
self.paramstyle = db.paramstyle
keywords['database'] = keywords.pop('db')
keywords['pooling'] = False # sqlite don't allows connections to be shared by threads
self.dbname = "sqlite"
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_rowid();')
def query(self, *a, **kw):
out = DB.query(self, *a, **kw)
if isinstance(out, iterbetter):
del out.__len__
return out
class FirebirdDB(DB):
"""Firebird Database.
"""
def __init__(self, **keywords):
try:
import kinterbasdb as db
except Exception:
db = None
pass
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
keywords['database'] = keywords.pop('db')
self.paramstyle = db.paramstyle
DB.__init__(self, db, keywords)
def delete(self, table, where=None, using=None, vars=None, _test=False):
# firebird doesn't support using clause
using=None
return DB.delete(self, table, where, using, vars, _test)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', ''),
('FIRST', limit),
('SKIP', offset),
('', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order)
)
class MSSQLDB(DB):
def __init__(self, **keywords):
import pymssql as db
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
keywords['database'] = keywords.pop('db')
self.dbname = "mssql"
DB.__init__(self, db, keywords)
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
# MSSQLDB expects params to be a tuple.
# Overwriting the default implementation to convert params to tuple.
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, tuple(params)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('TOP', limit),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('OFFSET', offset))
def _test(self):
"""Test LIMIT.
Fake presence of pymssql module for running tests.
>>> import sys
>>> sys.modules['pymssql'] = sys.modules['sys']
MSSQL has TOP clause instead of LIMIT clause.
>>> db = MSSQLDB(db='test', user='joe', pw='secret')
>>> db.select('foo', limit=4, _test=True)
<sql: 'SELECT * TOP 4 FROM foo'>
"""
pass
class OracleDB(DB):
def __init__(self, **keywords):
import cx_Oracle as db
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
#@@ TODO: use db.makedsn if host, port is specified
keywords['dsn'] = keywords.pop('db')
self.dbname = 'oracle'
db.paramstyle = 'numeric'
self.paramstyle = db.paramstyle
# oracle doesn't support pooling
keywords.pop('pooling', None)
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
if seqname is None:
# It is not possible to get seq name from table name in Oracle
return query
else:
return query + "; SELECT %s.currval FROM dual" % seqname
def dburl2dict(url):
"""
Takes a URL to a database and parses it into an equivalent dictionary.
>>> dburl2dict('postgres:///mygreatdb') == {'pw': None, 'dbn': 'postgres', 'db': 'mygreatdb', 'host': None, 'user': None, 'port': None}
True
>>> dburl2dict('postgres://james:day@serverfarm.example.net:5432/mygreatdb') == {'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': 5432}
True
>>> dburl2dict('postgres://james:day@serverfarm.example.net/mygreatdb') == {'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
True
>>> dburl2dict('postgres://james:d%40y@serverfarm.example.net/mygreatdb') == {'pw': 'd@y', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
True
>>> dburl2dict('mysql://james:d%40y@serverfarm.example.net/mygreatdb') == {'pw': 'd@y', 'dbn': 'mysql', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
True
"""
parts = urlparse.urlparse(unquote(url))
return {'dbn': parts.scheme,
'user': parts.username,
'pw': parts.password,
'db': parts.path[1:],
'host': parts.hostname,
'port': parts.port}
_databases = {}
def database(dburl=None, **params):
"""Creates appropriate database using params.
Pooling will be enabled if DBUtils module is available.
Pooling can be disabled by passing pooling=False in params.
"""
if not dburl and not params:
dburl = os.environ['DATABASE_URL']
if dburl:
params = dburl2dict(dburl)
dbn = params.pop('dbn')
if dbn in _databases:
return _databases[dbn](**params)
else:
raise UnknownDB(dbn)
def register_database(name, clazz):
"""
Register a database.
>>> class LegacyDB(DB):
... def __init__(self, **params):
... pass
...
>>> register_database('legacy', LegacyDB)
>>> db = database(dbn='legacy', db='test', user='joe', passwd='secret')
"""
_databases[name] = clazz
register_database('mysql', MySQLDB)
register_database('postgres', PostgresDB)
register_database('sqlite', SqliteDB)
register_database('firebird', FirebirdDB)
register_database('mssql', MSSQLDB)
register_database('oracle', OracleDB)
def _interpolate(format):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = format.find("$", pos)
if dollar < 0:
break
nextchar = format[dollar + 1]
if nextchar == "{":
chunks.append((0, format[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, format[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, format[pos:dollar]))
match, pos = matchorfail(format, dollar + 1)
while pos < len(format):
if format[pos] == "." and \
pos + 1 < len(format) and format[pos + 1] in namechars:
match, pos = matchorfail(format, pos + 1)
elif format[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, format[dollar + 1:pos]))
else:
chunks.append((0, format[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(format):
chunks.append((0, format[pos:]))
return chunks
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34.045245 | 477 | 0.538326 |
8ad56120ef43809ad1e1f236241904bcc3d569b4 | 455 | py | Python | exercicio18.py | juniooor/Exercicios-python | aed87da4f93d0e6083b1a8c3af4081a028f145de | [
"MIT"
] | null | null | null | exercicio18.py | juniooor/Exercicios-python | aed87da4f93d0e6083b1a8c3af4081a028f145de | [
"MIT"
] | null | null | null | exercicio18.py | juniooor/Exercicios-python | aed87da4f93d0e6083b1a8c3af4081a028f145de | [
"MIT"
] | null | null | null | #Faça um programa que peça o tamanho de um arquivo para download (em MB) e a velocidade de um link de Internet (em Mbps), calcule e informe o tempo aproximado de download do arquivo usando este link (em minutos).
print('Tempo para download de arquivos')
mb=float(input('QUal o tamanho do arquivo em MB?: '))
net=float(input('Qual a velocidade da internet em mbps?: '))
ts=mb/(net/8)
tm=ts/60
print('O seu arquivo vai baixar em {:.2f} minutos'.format(tm)) | 56.875 | 212 | 0.738462 |
09aed436bc13f13e54344d69d8e1286d9bc00486 | 609 | py | Python | conary/repository/netrepos/__init__.py | sassoftware/conary | d418968acd5e11ee17ed6d91ca395ea10a040222 | [
"Apache-2.0"
] | 43 | 2015-03-31T01:37:10.000Z | 2021-11-14T16:26:48.000Z | conary/repository/netrepos/__init__.py | sassoftware/conary | d418968acd5e11ee17ed6d91ca395ea10a040222 | [
"Apache-2.0"
] | 9 | 2015-06-10T16:39:41.000Z | 2020-01-27T16:35:01.000Z | conary/repository/netrepos/__init__.py | sassoftware/conary | d418968acd5e11ee17ed6d91ca395ea10a040222 | [
"Apache-2.0"
] | 9 | 2015-04-07T08:12:37.000Z | 2020-01-26T09:54:18.000Z | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__all__ = [ 'repos' ]
| 32.052632 | 74 | 0.743842 |
1f94ebf11f393920b3c0087d3a98a7fc065df974 | 4,760 | py | Python | lib/models/cifar_resnet.py | jeffreyzpan/adversarial-playground | 6df17e7b8b2e41bfbe5966006604805e199d2f87 | [
"MIT"
] | 21 | 2020-01-30T00:22:45.000Z | 2021-11-30T03:43:28.000Z | lib/models/cifar_resnet.py | jeffreyzpan/adversarial-playground | 6df17e7b8b2e41bfbe5966006604805e199d2f87 | [
"MIT"
] | 3 | 2020-01-12T06:02:11.000Z | 2020-06-05T17:55:13.000Z | lib/models/cifar_resnet.py | jeffreyzpan/adversarial-playground | 6df17e7b8b2e41bfbe5966006604805e199d2f87 | [
"MIT"
] | null | null | null | '''
Cifar ResNet implementation modified from https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, thermometer_encode=False, level=-1):
super(ResNet, self).__init__()
self.in_planes = 64
self.thermometer_encode = thermometer_encode
if thermometer_encode:
self.conv1 = nn.Conv2d(3*level, 64, kernel_size=3, stride=1, padding=1, bias=False)
else:
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
if self.thermometer_encode:
x = torch.cat((x[0], x[1], x[2]), dim=1)
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def cifar_resnet18(num_classes=10, thermometer_encode=False, level=-1):
return ResNet(BasicBlock, [2,2,2,2], num_classes, thermometer_encode, level)
def cifar_resnet34(num_classes=10, thermometer_encode=False, level=-1):
return ResNet(BasicBlock, [3,4,6,3], num_classes, thermometer_encode, level)
def cifar_resnet50(num_classes=10, thermometer_encode=False, level=-1):
return ResNet(Bottleneck, [3,4,6,3], num_classes, thermometer_encode, level)
def cifar_resnet101(num_classes=10, thermometer_encode=False, level=-1):
return ResNet(Bottleneck, [3,4,23,3], num_classes, thermometer_encode, level)
def cifar_resnet152(num_classes=10, thermometer_encode=False, level=-1):
return ResNet(Bottleneck, [3,8,36,3], num_classes, thermometer_encode, level)
| 39.666667 | 112 | 0.651681 |
990cee2ea14dcc8f722d20cb8d904d1b84b5824d | 1,249 | py | Python | tests/test_inchi2gv.py | Midnighter/component-contribution | e580480a1979fa7b57b378c9a02a99f2f0b5bde6 | [
"MIT"
] | 1 | 2018-01-31T13:44:03.000Z | 2018-01-31T13:44:03.000Z | tests/test_inchi2gv.py | Midnighter/component-contribution | e580480a1979fa7b57b378c9a02a99f2f0b5bde6 | [
"MIT"
] | 19 | 2017-06-07T06:28:55.000Z | 2018-06-05T13:14:17.000Z | tests/test_inchi2gv.py | Midnighter/component-contribution | e580480a1979fa7b57b378c9a02a99f2f0b5bde6 | [
"MIT"
] | 1 | 2016-12-12T14:33:25.000Z | 2016-12-12T14:33:25.000Z | import sys
sys.path.append('../python')
import inchi2gv
from compound_cacher import CompoundCacher
from molecule import Molecule
#logger = logging.getLogger('')
#logger.setLevel(logging.DEBUG)
ccache = CompoundCacher('../cache/compounds.json')
groups_data = inchi2gv.init_groups_data()
group_list = groups_data.GetGroupNames()
group_names = groups_data.GetGroupNames()
decomposer = inchi2gv.InChIDecomposer(groups_data)
# test the decomposition of ATP into groups
ATP_inchi = ccache.get_compound('C00002').inchi
group_def = decomposer.inchi_to_groupvec(ATP_inchi)
for j, group_name in enumerate(group_names):
if group_def[j] != 0:
print group_name, ' x %d' % group_def[j]
patterns = ['c~[O;+0]', 'c~[O;+1]', 'c~[n;+1]~c', 'c~[n;+0]~c', 'c~[n;-1]~c']
for cid in ['C00255', 'C01007']:
comp = ccache.get_compound(cid)
print "-"*50, '\n%s' % cid
inchi = comp.inchi
mol = Molecule.FromInChI(inchi)
print mol.ToSmiles()
print mol.FindSmarts("c~[n;+1]~c")
try:
groupvec = decomposer.inchi_to_groupvec(inchi)
sys.stdout.write(str(groupvec) + '\n')
except inchi2gv.GroupDecompositionError as e:
sys.stderr.write(str(e) + '\n')
sys.stderr.write(e.GetDebugTable())
| 31.225 | 77 | 0.681345 |
6c84df373964059e728c72755ac83b10d2abd9b4 | 652 | py | Python | utils/environment.py | hbontempo-br/registration-validator-api | 7bb2b5cd7727e798bf0d4cd6c925cfec65dabff8 | [
"MIT"
] | null | null | null | utils/environment.py | hbontempo-br/registration-validator-api | 7bb2b5cd7727e798bf0d4cd6c925cfec65dabff8 | [
"MIT"
] | null | null | null | utils/environment.py | hbontempo-br/registration-validator-api | 7bb2b5cd7727e798bf0d4cd6c925cfec65dabff8 | [
"MIT"
] | null | null | null | import os
def get_environment_variable(variable_name: str, default_value: str = None):
"""
Gets an environment variable value, assuming the default value if it is not
already defined on the underlying operating system.
:param variable_name: Environment variable name.
:param default_value: Environment variable default value.
:return: Environment variable value or default value.
"""
variable_value = os.environ.get(variable_name, default_value)
if not variable_value:
raise AttributeError(
f"Can't find a value for environment variable '{variable_name}'!"
)
return variable_value
| 34.315789 | 79 | 0.722393 |
2b338d680f8423fa7d68b15272cd4695b9d3ad0e | 21,294 | py | Python | ptflops/flops_counter.py | zamling/flops-counter.pytorch | c4d510659e20d4b9f1e597d4fae852b412a23d6a | [
"MIT"
] | null | null | null | ptflops/flops_counter.py | zamling/flops-counter.pytorch | c4d510659e20d4b9f1e597d4fae852b412a23d6a | [
"MIT"
] | null | null | null | ptflops/flops_counter.py | zamling/flops-counter.pytorch | c4d510659e20d4b9f1e597d4fae852b412a23d6a | [
"MIT"
] | null | null | null | '''
Copyright (C) 2019 Sovrasov V. - All Rights Reserved
* You may use, distribute and modify this code under the
* terms of the MIT license.
* You should have received a copy of the MIT license with
* this file. If not visit https://opensource.org/licenses/MIT
'''
import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
class ModelFormat(object):
def __init__(self, model):
self.model = model
def get_total_flops(self, ignore_batch = True, is_string = True, unit ='GMac' ):
'''
param:
ignore_batch: ignore_batch = True, the flops is divided by batch size, otherwise consider the batch size.
is_string: is_string=False, return a int value
'''
flops_sum = 0
if ignore_batch:
batches_count = self.model.__batch_counter__
else:
batches_count = 1
for module in self.model.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
if is_string:
return flops_to_string(flops_sum/batches_count,units=unit)
else:
return flops_sum/batches_count
def get_total_param(self,is_string = True, unit =None):
params = get_model_parameters_number(self.model)
if is_string:
return params_to_string(params,units=unit)
else:
return params
def get_layer_flops(self,name,ignore_batch = True, is_string = True, unit ='GMac'):
'''
it is recommended that add the name when using nn.Sequential
'''
if ignore_batch:
batches_count = self.model.__batch_counter__
else:
batches_count = 1
def accumulate_flops(model):
if is_supported_instance(model):
return model.__flops__ / batches_count
else:
sum = 0
for m in model.children():
sum += accumulate_flops(m)
return sum
for name_, module in self.model.named_modules():
if name_ == name:
if is_string:
return flops_to_string(accumulate_flops(module) / batches_count, units=unit)
else:
return accumulate_flops(module)
print("Can not find this layer, please check the layer's name")
return None
def get_layer_params(self,name,is_string = True,unit =None):
def accumulate_params(model):
if is_supported_instance(model):
return model.__params__
else:
sum = 0
for m in model.children():
sum += accumulate_params(m)
# recursive
return sum
for name_, module in self.model.named_modules():
if name_ == name:
params = accumulate_params(module)
if is_string:
return params_to_string(params,units=unit)
else:
return params
def output_info_to_file(self,file_path):
total_flops = self.get_total_flops(is_string=False)
total_params = self.get_total_param(is_string=False)
with open(file_path,'w+') as f:
print_model_with_flops(model=self.model,total_flops=total_flops,total_params=total_params,ost=f)
def get_model_info(model, input_res,
input_constructor=None, ost=sys.stdout,
verbose=False, ignore_modules=[],
custom_modules_hooks={}):
assert type(input_res) is tuple
assert len(input_res) >= 1
assert isinstance(model, nn.Module)
global CUSTOM_MODULES_MAPPING
CUSTOM_MODULES_MAPPING = custom_modules_hooks
flops_model = add_flops_counting_methods(model)
flops_model.eval()
flops_model.start_flops_count(ost=ost, verbose=verbose,
ignore_list=ignore_modules)
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
try:
batch = torch.ones(()).new_empty((1, *input_res),
dtype=next(flops_model.parameters()).dtype,
device=next(flops_model.parameters()).device)
except StopIteration:
batch = torch.ones(()).new_empty((1, *input_res))
_ = flops_model(batch)
return ModelFormat(flops_model)
def get_model_complexity_info(model, input_res,
print_per_layer_stat=True,
as_strings=True,
input_constructor=None, ost=sys.stdout,
verbose=False, ignore_modules=[],
custom_modules_hooks={}):
assert type(input_res) is tuple
assert len(input_res) >= 1
assert isinstance(model, nn.Module)
global CUSTOM_MODULES_MAPPING
CUSTOM_MODULES_MAPPING = custom_modules_hooks
flops_model = add_flops_counting_methods(model)
flops_model.eval()
flops_model.start_flops_count(ost=ost, verbose=verbose,
ignore_list=ignore_modules)
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
try:
batch = torch.ones(()).new_empty((1, *input_res),
dtype=next(flops_model.parameters()).dtype,
device=next(flops_model.parameters()).device)
except StopIteration:
batch = torch.ones(()).new_empty((1, *input_res))
_ = flops_model(batch)
flops_count, params_count = flops_model.compute_average_flops_cost()
if print_per_layer_stat:
print_model_with_flops(flops_model, flops_count, params_count, ost=ost)
flops_model.stop_flops_count()
CUSTOM_MODULES_MAPPING = {}
if as_strings:
return flops_to_string(flops_count), params_to_string(params_count)
return flops_count, params_count
def flops_to_string(flops, units='GMac', precision=2):
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.**9, precision)) + ' GMac'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, precision)) + ' MMac'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, precision)) + ' KMac'
else:
return str(flops) + ' Mac'
else:
if units == 'GMac':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MMac':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KMac':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' Mac'
def params_to_string(params_num, units=None, precision=2):
if units is None:
if params_num // 10 ** 6 > 0:
return str(round(params_num / 10 ** 6, 2)) + ' M'
elif params_num // 10 ** 3:
return str(round(params_num / 10 ** 3, 2)) + ' k'
else:
return str(params_num)
else:
if units == 'M':
return str(round(params_num / 10.**6, precision)) + ' ' + units
elif units == 'K':
return str(round(params_num / 10.**3, precision)) + ' ' + units
else:
return str(params_num)
def print_model_with_flops(model, total_flops, total_params, units='GMac',
precision=3, ost=sys.stdout):
def accumulate_params(self):
if is_supported_instance(self):
return self.__params__
else:
sum = 0
for m in self.children():
sum += m.accumulate_params()
#recursive
return sum
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_params_num = self.accumulate_params()
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([params_to_string(accumulated_params_num,
units='M', precision=precision),
'{:.3%} Params'.format(accumulated_params_num / total_params),
flops_to_string(accumulated_flops_cost,
units=units, precision=precision),
'{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
m.accumulate_params = accumulate_params.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(repr(model), file=ost)
model.apply(del_extra_repr)
def get_model_parameters_number(model):
params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params_num
def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(
net_main_module)
net_main_module.reset_flops_count()
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Returns current mean flops consumption per image.
"""
batches_count = self.__batch_counter__
flops_sum = 0
params_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
params_sum = get_model_parameters_number(self)
return flops_sum / batches_count, params_sum
def start_flops_count(self, **kwargs):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
seen_types = set()
def add_flops_counter_hook_function(module, ost, verbose, ignore_list):
if type(module) in ignore_list:
seen_types.add(type(module))
if is_supported_instance(module):
module.__params__ = 0
elif is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if type(module) in CUSTOM_MODULES_MAPPING:
handle = module.register_forward_hook(
CUSTOM_MODULES_MAPPING[type(module)])
else:
handle = module.register_forward_hook(MODULES_MAPPING[type(module)])
module.__flops_handle__ = handle
seen_types.add(type(module))
else:
if verbose and not type(module) in (nn.Sequential, nn.ModuleList) and \
not type(module) in seen_types:
print('Warning: module ' + type(module).__name__ +
' is treated as a zero-op.', file=ost)
seen_types.add(type(module))
self.apply(partial(add_flops_counter_hook_function, **kwargs))
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
# ---- Internal functions
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count)
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count)
def linear_flops_counter_hook(module, input, output):
input = input[0]
# pytorch checks dimensions, so here we don't care much
output_last_dim = output.shape[-1]
module.__flops__ += int(np.prod(input.shape) * output_last_dim)
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape))
def bn_flops_counter_hook(module, input, output):
module.affine
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops)
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = int(np.prod(kernel_dims)) * \
in_channels * filters_per_channel
active_elements_count = batch_size * int(np.prod(output_dims))
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops)
def batch_counter_hook(module, input, output):
batch_size = 1
if len(input) > 0:
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = len(input)
else:
pass
print('Warning! No positional inputs found for a module,'
' assuming batch size is 1.')
module.__batch_counter__ += batch_size
def rnn_flops(flops, rnn_module, w_ih, w_hh, input_size):
# matrix matrix mult ih state and internal state
flops += w_ih.shape[0]*w_ih.shape[1]
# matrix matrix mult hh state and internal state
flops += w_hh.shape[0]*w_hh.shape[1]
if isinstance(rnn_module, (nn.RNN, nn.RNNCell)):
# add both operations
flops += rnn_module.hidden_size
elif isinstance(rnn_module, (nn.GRU, nn.GRUCell)):
# hadamard of r
flops += rnn_module.hidden_size
# adding operations from both states
flops += rnn_module.hidden_size*3
# last two hadamard product and add
flops += rnn_module.hidden_size*3
elif isinstance(rnn_module, (nn.LSTM, nn.LSTMCell)):
# adding operations from both states
flops += rnn_module.hidden_size*4
# two hadamard product and add for C state
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
# final hadamard
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
return flops
def rnn_flops_counter_hook(rnn_module, input, output):
"""
Takes into account batch goes at first position, contrary
to pytorch common rule (but actually it doesn't matter).
IF sigmoid and tanh are made hard, only a comparison FLOPS should be accurate
"""
flops = 0
# input is a tuple containing a sequence to process and (optionally) hidden state
inp = input[0]
batch_size = inp.shape[0]
seq_length = inp.shape[1]
num_layers = rnn_module.num_layers
for i in range(num_layers):
w_ih = rnn_module.__getattr__('weight_ih_l' + str(i))
w_hh = rnn_module.__getattr__('weight_hh_l' + str(i))
if i == 0:
input_size = rnn_module.input_size
else:
input_size = rnn_module.hidden_size
flops = rnn_flops(flops, rnn_module, w_ih, w_hh, input_size)
if rnn_module.bias:
b_ih = rnn_module.__getattr__('bias_ih_l' + str(i))
b_hh = rnn_module.__getattr__('bias_hh_l' + str(i))
flops += b_ih.shape[0] + b_hh.shape[0]
flops *= batch_size
flops *= seq_length
if rnn_module.bidirectional:
flops *= 2
rnn_module.__flops__ += int(flops)
def rnn_cell_flops_counter_hook(rnn_cell_module, input, output):
flops = 0
inp = input[0]
batch_size = inp.shape[0]
w_ih = rnn_cell_module.__getattr__('weight_ih')
w_hh = rnn_cell_module.__getattr__('weight_hh')
input_size = inp.shape[1]
flops = rnn_flops(flops, rnn_cell_module, w_ih, w_hh, input_size)
if rnn_cell_module.bias:
b_ih = rnn_cell_module.__getattr__('bias_ih')
b_hh = rnn_cell_module.__getattr__('bias_hh')
flops += b_ih.shape[0] + b_hh.shape[0]
flops *= batch_size
rnn_cell_module.__flops__ += int(flops)
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
if hasattr(module, '__flops__') or hasattr(module, '__params__'):
print('Warning: variables __flops__ or __params__ are already '
'defined for the module' + type(module).__name__ +
' ptflops can affect your code!')
module.__flops__ = 0
module.__params__ = get_model_parameters_number(module)
CUSTOM_MODULES_MAPPING = {}
MODULES_MAPPING = {
# convolutions
nn.Conv1d: conv_flops_counter_hook,
nn.Conv2d: conv_flops_counter_hook,
nn.Conv3d: conv_flops_counter_hook,
# activations
nn.ReLU: relu_flops_counter_hook,
nn.PReLU: relu_flops_counter_hook,
nn.ELU: relu_flops_counter_hook,
nn.LeakyReLU: relu_flops_counter_hook,
nn.ReLU6: relu_flops_counter_hook,
# poolings
nn.MaxPool1d: pool_flops_counter_hook,
nn.AvgPool1d: pool_flops_counter_hook,
nn.AvgPool2d: pool_flops_counter_hook,
nn.MaxPool2d: pool_flops_counter_hook,
nn.MaxPool3d: pool_flops_counter_hook,
nn.AvgPool3d: pool_flops_counter_hook,
nn.AdaptiveMaxPool1d: pool_flops_counter_hook,
nn.AdaptiveAvgPool1d: pool_flops_counter_hook,
nn.AdaptiveMaxPool2d: pool_flops_counter_hook,
nn.AdaptiveAvgPool2d: pool_flops_counter_hook,
nn.AdaptiveMaxPool3d: pool_flops_counter_hook,
nn.AdaptiveAvgPool3d: pool_flops_counter_hook,
# BNs
nn.BatchNorm1d: bn_flops_counter_hook,
nn.BatchNorm2d: bn_flops_counter_hook,
nn.BatchNorm3d: bn_flops_counter_hook,
# FC
nn.Linear: linear_flops_counter_hook,
# Upscale
nn.Upsample: upsample_flops_counter_hook,
# Deconvolution
nn.ConvTranspose1d: conv_flops_counter_hook,
nn.ConvTranspose2d: conv_flops_counter_hook,
nn.ConvTranspose3d: conv_flops_counter_hook,
# RNN
nn.RNN: rnn_flops_counter_hook,
nn.GRU: rnn_flops_counter_hook,
nn.LSTM: rnn_flops_counter_hook,
nn.RNNCell: rnn_cell_flops_counter_hook,
nn.LSTMCell: rnn_cell_flops_counter_hook,
nn.GRUCell: rnn_cell_flops_counter_hook
}
def is_supported_instance(module):
if type(module) in MODULES_MAPPING or type(module) in CUSTOM_MODULES_MAPPING:
return True
return False
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
| 35.196694 | 113 | 0.646238 |
9799fc23a5ed98da76d23f1e2c53f7c27002c8bc | 632 | py | Python | submission/insurancesubmission/migrations/0019_auto_20210714_2322.py | simonprast/wopi-engine | b3f59782659c8be42f4064bce5281afd391833be | [
"BSD-Source-Code"
] | null | null | null | submission/insurancesubmission/migrations/0019_auto_20210714_2322.py | simonprast/wopi-engine | b3f59782659c8be42f4064bce5281afd391833be | [
"BSD-Source-Code"
] | null | null | null | submission/insurancesubmission/migrations/0019_auto_20210714_2322.py | simonprast/wopi-engine | b3f59782659c8be42f4064bce5281afd391833be | [
"BSD-Source-Code"
] | null | null | null | # Generated by Django 3.1.2 on 2021-07-14 21:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('insurancesubmission', '0018_document_payment_document'),
]
operations = [
migrations.AddField(
model_name='insurancesubmission',
name='bic_code',
field=models.CharField(blank=True, max_length=11, null=True),
),
migrations.AddField(
model_name='insurancesubmission',
name='iban_ending',
field=models.CharField(blank=True, max_length=4, null=True),
),
]
| 26.333333 | 73 | 0.617089 |
086f4c1b46f6a3c06f0ef148ac7664fe13f1c807 | 710 | py | Python | flask_mongoengine/wtf/models.py | corydolphin/flask-mongoengine | 689b10e6f17e8db4ec15fc87ed03e504bca757a2 | [
"BSD-3-Clause"
] | 2 | 2015-08-25T04:40:13.000Z | 2016-06-16T00:11:26.000Z | flask_mongoengine/wtf/models.py | corydolphin/flask-mongoengine | 689b10e6f17e8db4ec15fc87ed03e504bca757a2 | [
"BSD-3-Clause"
] | null | null | null | flask_mongoengine/wtf/models.py | corydolphin/flask-mongoengine | 689b10e6f17e8db4ec15fc87ed03e504bca757a2 | [
"BSD-3-Clause"
] | null | null | null | from flask.ext.wtf import Form
class ModelForm(Form):
"""A WTForms mongoengine model form"""
def __init__(self, formdata=None, obj=None, prefix='', **kwargs):
self.instance = (kwargs.pop('instance', None) or kwargs.get('obj', None))
if self.instance and not formdata:
obj = self.instance
self.formdata = formdata
super(ModelForm, self).__init__(formdata, obj, prefix, **kwargs)
def save(self, commit=True, **kwargs):
if self.instance:
self.populate_obj(self.instance)
else:
self.instance = self.model_class(**self.data)
if commit:
self.instance.save(**kwargs)
return self.instance
| 30.869565 | 81 | 0.614085 |
fa125eb50fa8a4d5f7f157395c2c0034edc9562e | 24,188 | py | Python | Lucky/src_Mike_GUI_Total/Lucky/Calculations.py | ruggiamp/Lucky | 9c89d2ae4129b3603da7d2c3f73b67995a23797a | [
"Apache-2.0"
] | null | null | null | Lucky/src_Mike_GUI_Total/Lucky/Calculations.py | ruggiamp/Lucky | 9c89d2ae4129b3603da7d2c3f73b67995a23797a | [
"Apache-2.0"
] | null | null | null | Lucky/src_Mike_GUI_Total/Lucky/Calculations.py | ruggiamp/Lucky | 9c89d2ae4129b3603da7d2c3f73b67995a23797a | [
"Apache-2.0"
] | null | null | null | '''
Created on 24 Nov 2015
@author: wnm24546
'''
from scipy.constants import c, h, k, pi
from scipy.optimize import curve_fit
from collections import OrderedDict
import numpy as np
from Lucky.LuckyExceptions import BadModelStateException
#k is kb
class CalculationService(object):
def __init__(self, pp):
self.parentPresenter = pp
self.planckResults = (0, 0, 0, 0)
self.wienResults = (0, 0, 0, 0)
self.twoColResults = (0, 0, 0, 0)
#TODO Spawn calculations and plots in a separate thread
def createCalcs(self, dM, debug=False):
self.updateModel(dM)
self.dsCalcs = LuckyCalculations(self.dsData, self.dsCalib,
self.integConf, self.bulbTemp, "Downstream Measurement")
self.usCalcs = LuckyCalculations(self.usData, self.usCalib,
self.integConf, self.bulbTemp, "Upstream Measurement")
self.dsCalcs.runCalculations()
self.usCalcs.runCalculations()
self.updateResults()
#Create plot objects once we've got some data to plot
self.dsPlots = LuckyPlots(self.dsCalcs, 'DS')
self.usPlots = LuckyPlots(self.usCalcs, 'US')
def updateCalcs(self):
#Perhaps add updateModel call?
self.dsCalcs.runCalculations()
self.usCalcs.runCalculations()
self.updateResults()
#Update the plots with new values from the calculations
self.dsPlots.updatePlots()
self.usPlots.updatePlots()
def updateResults(self):
def calculateResults(dsVal, usVal):
avs = (dsVal + usVal)/2
diff = abs(dsVal - usVal)
return [dsVal, usVal, avs, diff]
self.planckResults = calculateResults(self.dsCalcs.planckTemp, self.usCalcs.planckTemp)
self.wienResults = calculateResults(self.dsCalcs.wienTemp, self.usCalcs.wienTemp)
self.twoColResults = calculateResults(self.dsCalcs.twoColTemp, self.usCalcs.twoColTemp)
def updateModel(self, dM):
self.dsData, self.usData = self.openData(dM)
self.dsCalib, self.usCalib = self.openCalib(dM.calibType, dM.calibConfigData)
self.integConf = dM.integrationConf
self.bulbTemp = dM.calibConfigData.bulbTemp
def updateData(self, usData=None, dsData=None):
if (usData == None) and (dsData == None):
raise BadModelStateException("No data given for data update")
if dsData != None:
newData = np.loadtxt(usData)
self.dsCalcs.update(data=newData)
if usData != None:
newData = np.loadtxt(usData)
self.usCalcs.update(data=usData)
def updateIntegration(self, integConf):
self.dsCalcs.update(integConf=integConf)
self.usCalcs.update(integConf=integConf)
def updateCalibration(self, calibType, calibConf):
self.dsCalib, self.usCalib = self.openCalib(calibType, calibConf)
self.bulbTemp = calibConf.bulbTemp
self.dsCalcs.update(calib=self.dsCalib, bulbTemp=self.bulbTemp)
self.usCalcs.update(calib=self.usCalib, bulbTemp=self.bulbTemp)
def openCalib(self, calibType, calibConfig):
calibFileLabels = calibConfig.calibFiles.keys()
dsCalib, usCalib = None, None
for i in range(len(calibType)):
if calibType[i] == 1:
dsCalib = str(calibConfig.calibFiles[calibFileLabels[2*i]])
usCalib = str(calibConfig.calibFiles[calibFileLabels[2*i+1]])
if None not in [dsCalib, usCalib]:
break
return np.loadtxt(dsCalib, unpack=True), np.loadtxt(usCalib, unpack=True)
def openData(self, dM):
return np.loadtxt(dM.usdsPair[0], unpack=True), np.loadtxt(dM.usdsPair[1], unpack=True)
def disposePlots(self):
self.dsPlots.dispose()
self.usPlots.dispose()
class LuckyCalculations(object): #TODO Make calcs use calcserv to get bulbTemp, integConf & calibset
def __init__(self, data, calib, integConf, bulbTemp, label, debug=False):
self.dataSet = data
self.calibSet = calib
self.intConf = integConf
self.bulbTemp = bulbTemp
self.label = label
self.planckPlotRange = [550, 900]
self.wienPlotRange = [1e9 / self.planckPlotRange[1], 1e9/self.planckPlotRange[0]]
#Prepare the data
self.normaliseData()
def update(self, data=None, integConf=None, calib=None, bulbTemp=None):
self.dataSet = data if (data != None) else self.dataSet
self.intConf = integConf if (integConf != None) else self.intConf
self.calibSet = calib if (calib != None) else self.calibSet
self.bulbTemp = bulbTemp if (bulbTemp != None) else self.bulbTemp
if (data != None) or (calib != None) or (bulbTemp != None):
self.normaliseData()
if integConf != None:
self.calculateRanges()
def normaliseData(self):
self.planckIdeal = self.planck(self.dataSet[0], 1, self.bulbTemp)
self.planckIdeal = np.reshape(self.planckIdeal, (1, len(self.planckIdeal)))
#This step adds the normalises dataset & concatenates with the original data array
self.dataSet = np.concatenate((self.dataSet, self.dataSet[1] / self.calibSet[1] * self.planckIdeal), axis=0)
#We've changed the data so we need to recalculate the ranges:
self.calculateRanges()
def calculateRanges(self):
#Data sets for fitting or plotting, limited by integration range
self.invWL = 1e9 / self.dataSet[0]# For Wien function
self.invWLIntegLim = self.invWL[self.intConf[0]:self.intConf[1]]
self.wlIntegLim = self.dataSet[0][self.intConf[0]:self.intConf[1]]
self.RawIntegLim= self.dataSet[1][self.intConf[0]:self.intConf[1]]
self.normIntegLim = self.dataSet[2][self.intConf[0]:self.intConf[1]]
def runCalculations(self):
#Calculate functions over the range of data
self.wienData = self.wien(self.dataSet[0], self.dataSet[2])
self.wienDataIntegLim = self.wienData[self.intConf[0]:self.intConf[1]]
self.twoColData = self.twoColour(self.dataSet[0], self.dataSet[2], self.intConf[2])
self.twoColDataLim = self.twoColData[self.intConf[0]:self.intConf[1]] #twoColData limited between the integration boundaries
self.wavelengthredLim = self.wavelengthred[self.intConf[0]:self.intConf[1]]
#print "ecco i due colori"
#print self.twoColDataLim
self.a = int(round(min(self.twoColDataLim)))
self.b = int(round(max(self.twoColDataLim)))
self.binning = range(self.a, self.b, 30)
self.twoColHistFreq, self.twoColHistValues = np.histogram(self.twoColDataLim, bins= self.binning, density=False)
#old
#self.twoColHistFreq, self.twoColHistValues = np.histogram(self.twoColDataLim, bins=range(1500,5000,1), density=False)
#self.twoColHistValues = np.delete(self.twoColHistValues, len(self.twoColHistFreq), 0)
#Do fits
self.fitPlanck()
self.fitWien()
self.fitHistogram()
def fitPlanck(self):
#Do some fitting for Planck...
###
self.fitOkPlanck = 1
try:
self.planckFit, planckCov = curve_fit(self.planck, self.wlIntegLim, self.normIntegLim, [1,2000])
except ValueError:
print "Value Error Planck fit"
self.fitOkPlanck = 0
except RuntimeError:
print "Runtime Error Planck fit"
self.fitOkPlanck = 0
if self.fitOkPlanck == 1:
self.planckTemp = self.planckFit[1]
self.planckEmiss = self.planckFit[0]
#Planck with fit params(??)
self.planckFitData = self.planck(self.wlIntegLim, self.planckEmiss, self.planckTemp)
else:
self.planckTemp = 2000
#new method defined to operate a sliding average. usefull for the fit Histogram
def moving_average(self, a, n=2) :
self.ret = np.cumsum(a, dtype=float)
self.ret[n:] = self.ret[n:] - self.ret[:-n]
return self.ret[n - 1:] / n
def fitWien(self):
#Do some fitting for Wien...
###
self.fitOkWien = 1
if self.fitOkPlanck == 1:
try:
self.wienFit, wienCov = curve_fit(self.fWien, self.invWLIntegLim[(np.isfinite(self.wienDataIntegLim))], self.wienDataIntegLim[(np.isfinite(self.wienDataIntegLim))], p0=[1, self.planckTemp])
self.wienResidual = self.wienDataIntegLim - self.fWien(self.invWLIntegLim[(np.isfinite(self.wienDataIntegLim))], *self.wienFit)
except ValueError:
print "Value Error Wien fit"
self.fitOkWien = 0
except RuntimeError:
print "Runtime Error Wien fit"
self.fitOkWien = 0
if self.fitOkWien == 1:
self.wienTemp = self.wienFit[1]
else:
self.wienTemp = 2000
else:
self.wienTemp = 2000
def fitHistogram(self):
#Gaussian fit of two colour histogram
###
#print('averaged twocolhistvalues:')
#print self.moving_average(self.twoColHistValues)
self.fitOkGauss = 1
if self.fitOkPlanck == 1:
try:
self.histFit, histCov = curve_fit(self.gaus, self.moving_average(self.twoColHistValues), self.twoColHistFreq, p0=[1000,self.planckTemp,100])
except ValueError:
print "Value Error Gauss fit"
self.fitOkGauss = 0
except RuntimeError:
print "Runtime Error Gauss fit"
self.fitOkGauss = 0
if self.fitOkGauss == 1:
self.twoColTemp = self.histFit[1]
self.twoColErr = self.histFit[2]
else:
self.twoColTemp = np.mean(self.twoColDataLim)
self.twoColErr = np.std(self.twoColDataLim)
else:
self.twoColTemp = np.mean(self.twoColDataLim)
self.twoColErr = np.std(self.twoColDataLim)
#old
#def fitHistogram(self):
#Gaussian fit of two colour histogram
###
#self.histFit, histCov = curve_fit(self.gaus, self.twoColHistValues, self.twoColHistFreq, p0=[1000,self.planckTemp,100])
#self.twoColTemp = self.histFit[1]
#self.twoColErr = self.histFit[2]
#Planck function
def planck(self, wavelength, emiss, temp):
wavelength = wavelength * 1e-9
return emiss / np.power(wavelength, 5) * (2 * pi * h * np.power(c, 2)) / np.expm1((h * c)/(k * wavelength * temp))
#Wien function
def wien(self, wavelength, intens):
wavelength = wavelength * 1e-9
return self.wienBase(np.power(wavelength, 5) * intens / (2 * pi * h * np.power(c, 2)))
#Linear Wien function
def fWien(self, wavelength, emiss, temp):
# wavelength = wavelength * 1e-9
return self.wienBase(emiss) - (1/temp) * wavelength
#Wien support function (this is just recycling code)
def wienBase(self, exponent):
return k / (h * c) * np.log(exponent)
#Two colour function
def twoColour(self, wavelength, intens, delta):
#wavelength = wavelength * 1e-9
nPoints = len(wavelength)
nWindows = nPoints - delta
twoCol = []
#def twoColCalc(wavelength, intens):
# return np.log(intens * np.power(wavelength, 5) / (2 * pi * h * np.power(c, 2))) * (k / (h *c))
for i in range(nWindows):
f1 = 1 / (wavelength[i]* 1e-9)
f2 = 1/ (wavelength[i + delta]* 1e-9)
i1 = np.log(intens[i]/2/pi/h/c**2/f1**5)*k/h/c #twoColCalc(wavelength[i], intens[i])
i2 = np.log(intens[i+delta]/2/pi/h/c**2/f2**5)*k/h/c #twoColCalc(wavelength[i + delta], intens[i+delta])
twoCol.append(abs((f2 - f1) / (i2 - i1)))
#for i in range(nWindows, nPoints):
# twoCol.append(float('nan'))
self.wavelengthred = wavelength[0:nPoints - delta]
return twoCol
#Gaussian for fit
def gaus(self, x, a, x0, sigma):
return a*np.exp(-(x-x0)**2/(2*sigma**2))
###
import matplotlib.pyplot as plt
class LuckyPlots(object):
def __init__(self, calcs, US_DS, debug=False):
if debug:
return
self.debug = debug
self.luckyCalcs = calcs
self.fig = plt.figure(self.luckyCalcs.label)
self.fig.suptitle(self.luckyCalcs.label, fontsize="16", weight="bold", color = 'b')
self.ax1 = self.fig.add_subplot(3, 2, 1)#Raw+Calib
self.ax2 = self.fig.add_subplot(3, 2, 3)#Planck
self.ax3 = self.fig.add_subplot(3, 2, 4)#Wien
self.ax3.xaxis.get_major_formatter().set_powerlimits((0, 1))
self.ax4 = self.fig.add_subplot(3, 2, 5)#2Colour
self.ax5 = self.fig.add_subplot(3, 2, 6)#Histogram
self.ax5.xaxis.get_major_formatter().set_powerlimits((0, 1))
self.ax6 = self.ax3.twinx()
#Layout settings for the plots
plt.subplots_adjust(wspace=0.3, hspace=0.7)
#One-time configuration of plots
self.ax1.set_title('Raw (blue) & Calibration Data (green)', fontsize= 13, style='italic', weight="bold")
self.ax1.set_xlabel('Wavelength [nm]', fontsize= 13)
self.ax1.grid(True, linestyle='-')
self.ax2.set_title('Planck Function Data', fontsize='13', style='italic', weight="bold")
self.ax2.set_xlabel('Wavelength [nm]', fontsize= 13)
self.ax3.set_ylabel("Planck Function [a.u.]", fontsize= 13)
#self.ax2.set_yticks([])
self.ax2.set_yticks([0.1, 0.3, 0.5, 0.7, 0.9])
self.ax3.set_title('Wien Function Data', fontsize='13', style='italic', weight="bold")
self.ax3.set_xlabel(r'1/Wavelength [m$^{-1}$]', fontsize= 13)
self.ax3.set_ylabel("Wien Function", fontsize= 13)
self.ax3.set_yticks([])
self.ax4.set_title('Two-Colour Plot', fontsize='13', style='italic', weight="bold")
self.ax4.set_xlabel('Wavelength [nm]', fontsize= 13)
self.ax4.set_ylabel('Temperature [K]', fontsize= 13)
self.ax4.grid(True, linestyle='-')
self.ax5.set_title('Two-colour Histogram', fontsize='13', style='italic', weight="bold")
self.ax5.set_xlabel('Temperature [K]', fontsize= 13)
self.ax5.set_ylabel('Counts [a.u.]', fontsize= 13)
self.ax6.set_ylabel('Wien Residual', color='g', fontsize= 13)
self.updatePlots(redraw=False)
#ax1 = calibration and raw spectrum
#ax2 = planck spectrum
#ax3 = wien
#ax4 = 2-col
#ax5 =histogram
#ax6 = residuals in subplot (3,2,4)
if not self.debug:
#Draw the plots if we're not debugging
plt.ion()
plt.show()
mngr = plt.get_current_fig_manager()
if US_DS == 'US':
mngr.window.setGeometry(20,280,700, 700)
if US_DS == 'DS':
mngr.window.setGeometry(1000,280,700, 700)
#Needed to make plt appear!
# http://stackoverflow.com/questions/28269157/plotting-in-a-non-blocking-way-with-matplotlib
plt.pause(0.001)
def updatePlots(self, redraw=True):
#Raw and calibration data subgraph
self.ax1.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.dataSet[1],
self.luckyCalcs.dataSet[0], self.luckyCalcs.calibSet[1],'green',self.luckyCalcs.wlIntegLim,self.luckyCalcs.RawIntegLim,'red')
self.ax1.set_ylim(0, self.getYMax(self.luckyCalcs.dataSet[1], self.luckyCalcs.calibSet[1]))
# self.ax1.set_ylim(0,50000) #TODO Get max fn.
#Planck data subgraph
#self.ax2.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.dataSet[2],
# self.luckyCalcs.wlIntegLim, self.luckyCalcs.planckFitData, 'red')
#self.ax2.set_xlim(*self.luckyCalcs.planckPlotRange)
#Planck data subgraph
if self.luckyCalcs.fitOkPlanck == 1:
self.ax2.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.dataSet[2] / max(self.luckyCalcs.dataSet[2]),
self.luckyCalcs.wlIntegLim, self.luckyCalcs.planckFitData / max(self.luckyCalcs.dataSet[2]), 'red')
self.ax2.set_xlim(*self.luckyCalcs.planckPlotRange)
self.ax2.set_ylim([0, 1])
else:
self.ax2.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.dataSet[2] / max(self.luckyCalcs.dataSet[2]))
self.ax2.set_xlim(*self.luckyCalcs.planckPlotRange)
self.ax2.set_ylim([0, 1])
#Wien data subgraph
if self.luckyCalcs.fitOkWien == 1 and self.luckyCalcs.fitOkPlanck == 1:
self.ax3.plot(self.luckyCalcs.invWL, self.luckyCalcs.wienData,
self.luckyCalcs.invWLIntegLim, self.luckyCalcs.fWien(self.luckyCalcs.invWLIntegLim,*self.luckyCalcs.wienFit), 'red')
self.ax3.set_xlim(*self.luckyCalcs.wienPlotRange)
else:
self.ax3.plot(self.luckyCalcs.invWL, self.luckyCalcs.wienData)
self.ax3.set_xlim(*self.luckyCalcs.wienPlotRange)
#Two Colour data subgraph
self.ax4.plot(self.luckyCalcs.wavelengthred, self.luckyCalcs.twoColData, 'b:',
self.luckyCalcs.wavelengthredLim, self.luckyCalcs.twoColDataLim, 'r:')
self.ax4.set_xlim(*self.luckyCalcs.planckPlotRange)
#Two Colour data subgraph-OLD-
#self.ax4.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.twoColData, 'b:',
# self.luckyCalcs.wlIntegLim, self.luckyCalcs.twoColDataLim, 'r:')
#self.ax4.set_xlim(*self.luckyCalcs.planckPlotRange)
#self.ax4.set_ylim([np.amin(calcs.TwoColDataLim),np.amax(calcs.TwoColDataLim)])
#self.ax4.set_ylim(*calcs.twoColDataLim)
#nuova modifica
self.ax4.set_ylim(self.luckyCalcs.twoColTemp - 500, self.luckyCalcs.twoColTemp + 500)
#Histogram subgraph
#old
#self.ax5.plot(self.luckyCalcs.twoColHistValues, self.luckyCalcs.twoColHistFreq,
# self.luckyCalcs.twoColHistValues, self.luckyCalcs.gaus(self.luckyCalcs.twoColHistValues, *self.luckyCalcs.histFit), 'red')
#modifica
self.ax5.hist(self.luckyCalcs.twoColDataLim, self.luckyCalcs.binning)
if self.luckyCalcs.fitOkGauss == 1 and self.luckyCalcs.fitOkPlanck == 1:
self.ax5.plot(self.luckyCalcs.twoColHistValues, self.luckyCalcs.gaus(self.luckyCalcs.twoColHistValues, *self.luckyCalcs.histFit), 'red')
#
self.ax5.set_xlim([self.luckyCalcs.twoColTemp - 400, self.luckyCalcs.twoColTemp + 400])
#self.ax5.set_xlim(1800,4000)
#Residual subgraph of the Wien
if self.luckyCalcs.fitOkPlanck == 1 and self.luckyCalcs.fitOkWien == 1:
ordin = len(self.luckyCalcs.invWL)*[0]
self.ax6.plot(self.luckyCalcs.invWLIntegLim, self.luckyCalcs.wienResidual,'green',self.luckyCalcs.invWL,ordin,'black')
#Create text label for calculated T values -OLD-
#textLabel = OrderedDict([("T"+r"$_{Planck}$","{0:10.2f}".format(self.luckyCalcs.planckTemp)),
# ("T"+r"$_{Wien}$","{0:10.2f}".format(self.luckyCalcs.wienTemp)),
# ("T"+r"$_{Two Colour}$","{0:10.2f}".format(self.luckyCalcs.twoColTemp))])
#Create text label for calculated T values -modified-
if self.luckyCalcs.fitOkPlanck == 1 and self.luckyCalcs.fitOkWien == 1:
textLabel = OrderedDict([("T"+r"$_{Planck}$" + "[K]","{0:9d}".format(int(self.luckyCalcs.planckTemp))),
("T"+r"$_{Wien}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.wienTemp))),
("T"+r"$_{2col}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.twoColTemp)))])
else:
if self.luckyCalcs.fitOkPlanck == 0:
textLabel = OrderedDict([("T"+r"$_{Planck}$" + "[K]","{0:9s}".format("ERROR")),
("T"+r"$_{Wien}$"+ "[K]","{0:9s}".format("ERROR")),
("T"+r"$_{2col}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.twoColTemp)))])
if self.luckyCalcs.fitOkWien == 0:
textLabel = OrderedDict([("T"+r"$_{Planck}$" + "[K]","{0:9d}".format(int(self.luckyCalcs.planckTemp))),
("T"+r"$_{Wien}$"+ "[K]","{0:9s}".format("ERROR")),
("T"+r"$_{2col}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.twoColTemp)))])
#textLabel = OrderedDict([("T"+r"$_{Planck}$" + "[K]","{0:9d}".format(int(self.luckyCalcs.planckTemp))),
# ("T"+r"$_{Wien}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.wienTemp))),
# ("T"+r"$_{2col}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.twoColTemp)))])
self.errWienPlanck = (abs(self.luckyCalcs.planckTemp - self.luckyCalcs.wienTemp)/ (self.luckyCalcs.planckTemp))*100
self.std2col = self.luckyCalcs.twoColErr
textLabel1 = OrderedDict([
("ERR"+"$_{2col}$"+ "[K]","{0:9d}".format(int(self.std2col))),
("ERR"+"$_{W-P}$","{0:9.2f}".format(self.errWienPlanck))
])
# {"T"+r"$_{Planck}$" : "{0:10.2f}".format(self.luckyCalcs.planckTemp),
# "T"+r"$_{Wien}$" : "{0:10.2f}".format(self.luckyCalcs.wienTemp),
# "T"+r"$_{Two Colour}$":"{0:10.2f}".format(self.luckyCalcs.twoColTemp)}
labelPosition = (0.54, 0.85)
rowNr = 0
for label,tVal in textLabel.iteritems( ):
plt.figtext(labelPosition[0], labelPosition[1]-(0.05*rowNr), label, fontdict = None, size = 'large')
plt.figtext(labelPosition[0]+0.080, labelPosition[1]-(0.05*rowNr), tVal, fontdict = None, size = 'large')
rowNr += 1
labelPosition1 = (0.78, 0.85)
rowNr = 0
for label,tVal in textLabel1.iteritems( ):
if self.errWienPlanck < 1 or rowNr == 0 :
plt.figtext(labelPosition1[0], labelPosition1[1]-(0.05*rowNr), label, fontdict = None, size = 'large')
plt.figtext(labelPosition1[0]+0.080, labelPosition1[1]-(0.05*rowNr), tVal, fontdict = None, size = 'large')
else:
plt.figtext(labelPosition1[0], labelPosition1[1]-(0.05*rowNr), label, fontdict = None, size = 'large')
plt.figtext(labelPosition1[0]+0.080, labelPosition1[1]-(0.05*rowNr), tVal, fontdict = None, size = 'large', color = 'r')
rowNr += 1
if redraw and not self.debug:
plt.draw()
#Needed to make plt appear!
# http://stackoverflow.com/questions/28269157/plotting-in-a-non-blocking-way-with-matplotlib
plt.pause(0.001)
#Draws text label on plot
# txt=plt.text(4500,33,TP)
# txt1=plt.text(4200,33,'T=')
# txt2=plt.text(2000,17,TW)
# txt3=plt.text(1800,17,'T=')
# txt.set_size(15)
# txt1.set_size(15)
# txt2.set_size(15)
# txt3.set_size(15)
# fig.canvas.draw()
def getYMax(self, *data):
maxes = []
for dat in data:
maxes.append(np.amax(dat))
return max(maxes)*1.1
def dispose(self):
plt.close(self.luckyCalcs.label) | 44.875696 | 205 | 0.587647 |
2a3c1367f712abaa3c0b7d5d90861fbe68671d2f | 1,019 | py | Python | web_dev/apps/test_model.py | tssahota/CMPT-732---Big-Data-Project | 27238543f0d62a0f2639317c042ab328b377cd63 | [
"Apache-2.0"
] | null | null | null | web_dev/apps/test_model.py | tssahota/CMPT-732---Big-Data-Project | 27238543f0d62a0f2639317c042ab328b377cd63 | [
"Apache-2.0"
] | null | null | null | web_dev/apps/test_model.py | tssahota/CMPT-732---Big-Data-Project | 27238543f0d62a0f2639317c042ab328b377cd63 | [
"Apache-2.0"
] | 1 | 2021-03-04T23:13:58.000Z | 2021-03-04T23:13:58.000Z | import sys
assert sys.version_info >= (3, 5) # make sure we have Python 3.5+
from pyspark.sql import SparkSession, functions, types, Row
spark = SparkSession.builder.appName('tmax model tester').getOrCreate()
assert spark.version >= '2.3' # make sure we have Spark 2.3+
spark.sparkContext.setLogLevel('WARN')
from pyspark.ml.tuning import TrainValidationSplitModel
from pyspark.ml import PipelineModel
def test_model():
# get the data
temp_res = {'budget': 100, 'vote_count': 100, 'popularity': 100, 'collection': True}
sc_df = spark.createDataFrame(Row(**i) for i in [temp_res])
sc_df.show()
# load the model
model = PipelineModel.load('./best_model/bestModel')
# use the model to make predictions
predictions = model.transform(test_tomorrow)
predictions.show()
# 1 element collected
prediction = predictions.collect()[0].asDict()['prediction']
# print tmax tomorrow
print('Predicted tmax tomorrow:', prediction)
if __name__ == '__main__':
test_model()
| 33.966667 | 88 | 0.7105 |
95420dfe6adae1898062a6daa14ec01ce135a468 | 4,022 | py | Python | tests/unit/resources/test_statistics.py | primitybio/cellengine-python-toolk | 1f9dd168f1f27e2beba69f02e340371190857b33 | [
"MIT"
] | 4 | 2021-01-12T17:03:37.000Z | 2021-12-16T13:23:57.000Z | tests/unit/resources/test_statistics.py | primitybio/cellengine-python-toolk | 1f9dd168f1f27e2beba69f02e340371190857b33 | [
"MIT"
] | 61 | 2021-01-11T05:27:16.000Z | 2022-03-08T01:50:09.000Z | tests/unit/resources/test_statistics.py | primitybio/cellengine-python-toolkit | 1f9dd168f1f27e2beba69f02e340371190857b33 | [
"MIT"
] | null | null | null | import json
import pytest
import responses
EXP_ID = "5d38a6f79fae87499999a74b"
@pytest.fixture(scope="module")
def statistic_response(experiment, statistics):
return statistics
@pytest.mark.usefixtures("block_request")
class TestStatistics:
@responses.activate
def test_should_get_statistics(self, client, ENDPOINT_BASE, statistics):
responses.add(
responses.POST,
f"{ENDPOINT_BASE}/experiments/{EXP_ID}/bulkstatistics",
json=statistics,
)
expected_query_body = {
"statistics": "mean",
"q": 1,
"channels": "FSC-A",
"annotations": False,
"compensationId": "some id",
"fcsFileIds": "some file id",
"format": "json",
"layout": "medium",
"percentOf": "PARENT",
"populationIds": "some population id",
}
client.get_statistics(
EXP_ID,
"mean",
"FSC-A",
q=1,
compensation_id="some id",
fcs_file_ids="some file id",
format="json",
layout="medium",
percent_of="PARENT",
population_ids="some population id",
)
assert set(expected_query_body) == set(
json.loads(responses.calls[0].request.body)
)
@pytest.mark.vcr
def test_should_get_list_of_stats(self, ENDPOINT_BASE, client):
methods_to_get = ["mean", "mad", "stddev"]
stats = client.get_statistics(
"5e4fcb98bdd7ea051d703652", methods_to_get, "FSC-A"
)
assert all([method in stats[0].keys() for method in methods_to_get])
@pytest.mark.vcr
def test_should_get_list_of_channels(self, ENDPOINT_BASE, client):
channels_to_get = ["FSC-A", "FSC-H"]
stats = client.get_statistics(
"5e4fcb98bdd7ea051d703652", "mean", channels_to_get
)
assert any([channels_to_get[0] in stat["channel"] for stat in stats])
assert any([channels_to_get[1] in stat["channel"] for stat in stats])
@pytest.mark.vcr
def test_quantile_should_require_q(self, ENDPOINT_BASE, client):
with pytest.raises(ValueError):
client.get_statistics("5e4fcb98bdd7ea051d703652", "quantile", "FSC-A")
# passes with q set
client.get_statistics("5e4fcb98bdd7ea051d703652", "quantile", "FSC-A", q=0.75)
@pytest.mark.vcr
def test_should_get_every_statistics_type(self, ENDPOINT_BASE, client):
methods = [
"mean",
"median",
"mad",
"geometricMean",
"eventCount",
"cv",
"stddev",
"percent",
]
for method in methods:
stats = client.get_statistics("5e4fcb98bdd7ea051d703652", method, "FSC-A")
assert [method in stat.keys() for stat in stats]
@pytest.mark.vcr
def test_should_get_formatted_csv(self, ENDPOINT_BASE, client):
stats = client.get_statistics(
"5e4fcb98bdd7ea051d703652",
"mean",
"FSC-A",
format="csv",
layout="short-wide",
)
# count rows by row delimiter:
assert type(stats.find("\r")) == int
@responses.activate
def test_should_return_pandas_dataframe(
self, ENDPOINT_BASE, client, statistic_response
):
responses.add(
responses.POST,
f"{ENDPOINT_BASE}/experiments/5e4fcb98bdd7ea051d703653/bulkstatistics",
json=statistic_response,
)
stats = client.get_statistics(
"5e4fcb98bdd7ea051d703653", "mean", "FSC-A", format="pandas"
)
properties = [
"fcsFileId",
"filename",
"populationId",
"population",
"uniquePopulationName",
"parentPopulation",
"parentPopulationId",
]
assert all(prop in stats.columns.to_list() for prop in properties)
| 31.920635 | 86 | 0.577822 |
4c04db3d97fdbf5c5c21be44da5e9edd6c388629 | 35,866 | py | Python | dace/codegen/targets/xilinx.py | andreaskuster/dace | f2c16430543bb56c54a833beeb626b8c30967428 | [
"BSD-3-Clause"
] | null | null | null | dace/codegen/targets/xilinx.py | andreaskuster/dace | f2c16430543bb56c54a833beeb626b8c30967428 | [
"BSD-3-Clause"
] | null | null | null | dace/codegen/targets/xilinx.py | andreaskuster/dace | f2c16430543bb56c54a833beeb626b8c30967428 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import collections
import itertools
import os
import re
import numpy as np
import dace
from dace import registry, dtypes
from dace.config import Config
from dace.frontend import operations
from dace.sdfg import nodes
from dace.sdfg import find_input_arraynode, find_output_arraynode
from dace.codegen import exceptions as cgx
from dace.codegen.codeobject import CodeObject
from dace.codegen.dispatcher import DefinedType
from dace.codegen.prettycode import CodeIOStream
from dace.codegen.targets.target import make_absolute
from dace.codegen.targets import cpp, fpga
REDUCTION_TYPE_TO_HLSLIB = {
dace.dtypes.ReductionType.Min: "hlslib::op::Min",
dace.dtypes.ReductionType.Max: "hlslib::op::Max",
dace.dtypes.ReductionType.Sum: "hlslib::op::Sum",
dace.dtypes.ReductionType.Product: "hlslib::op::Product",
dace.dtypes.ReductionType.Logical_And: "hlslib::op::And",
}
@registry.autoregister_params(name='xilinx')
class XilinxCodeGen(fpga.FPGACodeGen):
""" Xilinx FPGA code generator. """
target_name = 'xilinx'
title = 'Xilinx'
language = 'hls'
def __init__(self, *args, **kwargs):
fpga_vendor = Config.get("compiler", "fpga_vendor")
if fpga_vendor.lower() != "xilinx":
# Don't register this code generator
return
super().__init__(*args, **kwargs)
# {(kernel name, interface name): (memory type, memory bank)}
self._interface_assignments = {}
@staticmethod
def cmake_options():
host_flags = Config.get("compiler", "xilinx", "host_flags")
synthesis_flags = Config.get("compiler", "xilinx", "synthesis_flags")
build_flags = Config.get("compiler", "xilinx", "build_flags")
mode = Config.get("compiler", "xilinx", "mode")
target_platform = Config.get("compiler", "xilinx", "platform")
enable_debugging = ("ON" if Config.get_bool(
"compiler", "xilinx", "enable_debugging") else "OFF")
autobuild = ("ON" if Config.get_bool("compiler", "autobuild_bitstreams")
else "OFF")
frequency = Config.get("compiler", "xilinx", "frequency").strip()
options = [
"-DDACE_XILINX_HOST_FLAGS=\"{}\"".format(host_flags),
"-DDACE_XILINX_SYNTHESIS_FLAGS=\"{}\"".format(synthesis_flags),
"-DDACE_XILINX_BUILD_FLAGS=\"{}\"".format(build_flags),
"-DDACE_XILINX_MODE={}".format(mode),
"-DDACE_XILINX_TARGET_PLATFORM=\"{}\"".format(target_platform),
"-DDACE_XILINX_ENABLE_DEBUGGING={}".format(enable_debugging),
"-DDACE_FPGA_AUTOBUILD_BITSTREAM={}".format(autobuild),
f"-DDACE_XILINX_TARGET_CLOCK={frequency}"
]
# Override Vitis/SDx/SDAccel installation directory
if Config.get("compiler", "xilinx", "path"):
options.append("-DVITIS_ROOT_DIR=\"{}\"".format(
Config.get("compiler", "xilinx", "path").replace("\\", "/")))
return options
def get_generated_codeobjects(self):
execution_mode = Config.get("compiler", "xilinx", "mode")
kernel_file_name = "DACE_BINARY_DIR \"/{}".format(self._program_name)
if execution_mode == "software_emulation":
kernel_file_name += "_sw_emu.xclbin\""
xcl_emulation_mode = "\"sw_emu\""
xilinx_sdx = "DACE_VITIS_DIR"
elif execution_mode == "hardware_emulation":
kernel_file_name += "_hw_emu.xclbin\""
xcl_emulation_mode = "\"hw_emu\""
xilinx_sdx = "DACE_VITIS_DIR"
elif execution_mode == "hardware" or execution_mode == "simulation":
kernel_file_name += "_hw.xclbin\""
xcl_emulation_mode = None
xilinx_sdx = None
else:
raise cgx.CodegenError(
"Unknown Xilinx execution mode: {}".format(execution_mode))
set_env_vars = ""
set_str = "dace::set_environment_variable(\"{}\", {});\n"
unset_str = "dace::unset_environment_variable(\"{}\");\n"
set_env_vars += (set_str.format("XCL_EMULATION_MODE",
xcl_emulation_mode)
if xcl_emulation_mode is not None else
unset_str.format("XCL_EMULATION_MODE"))
set_env_vars += (set_str.format("XILINX_SDX", xilinx_sdx) if xilinx_sdx
is not None else unset_str.format("XILINX_SDX"))
host_code = CodeIOStream()
host_code.write("""\
#include "dace/xilinx/host.h"
#include "dace/dace.h"
#include <iostream>\n\n""")
self._frame.generate_fileheader(self._global_sdfg, host_code,
'xilinx_host')
params_comma = self._global_sdfg.signature(with_arrays=False)
if params_comma:
params_comma = ', ' + params_comma
host_code.write("""
DACE_EXPORTED int __dace_init_xilinx({sdfg.name}_t *__state{signature}) {{
{environment_variables}
__state->fpga_context = new dace::fpga::Context();
__state->fpga_context->Get().MakeProgram({kernel_file_name});
return 0;
}}
DACE_EXPORTED void __dace_exit_xilinx({sdfg.name}_t *__state) {{
delete __state->fpga_context;
}}
{host_code}""".format(signature=params_comma,
sdfg=self._global_sdfg,
environment_variables=set_env_vars,
kernel_file_name=kernel_file_name,
host_code="".join([
"{separator}\n// Kernel: {kernel_name}"
"\n{separator}\n\n{code}\n\n".format(separator="/" *
79,
kernel_name=name,
code=code)
for (name, code) in self._host_codes
])))
host_code_obj = CodeObject(self._program_name,
host_code.getvalue(),
"cpp",
XilinxCodeGen,
"Xilinx",
target_type="host")
kernel_code_objs = [
CodeObject(kernel_name,
code,
"cpp",
XilinxCodeGen,
"Xilinx",
target_type="device")
for (kernel_name, code) in self._kernel_codes
]
# Configuration file with interface assignments
are_assigned = [
v is not None for v in self._interface_assignments.values()
]
bank_assignment_code = []
if any(are_assigned):
if not all(are_assigned):
raise RuntimeError("Some, but not all global memory arrays "
"were assigned to memory banks: {}".format(
self._interface_assignments))
are_assigned = True
else:
are_assigned = False
for name, _ in self._host_codes:
# Only iterate over assignments if any exist
if are_assigned:
for (kernel_name, interface_name), (
memory_type,
memory_bank) in self._interface_assignments.items():
if kernel_name != name:
continue
bank_assignment_code.append("{},{},{}".format(
interface_name, memory_type.name, memory_bank))
# Create file even if there are no assignments
kernel_code_objs.append(
CodeObject("{}_memory_interfaces".format(name),
"\n".join(bank_assignment_code),
"csv",
XilinxCodeGen,
"Xilinx",
target_type="device"))
return [host_code_obj] + kernel_code_objs
@staticmethod
def define_stream(dtype, buffer_size, var_name, array_size, function_stream,
kernel_stream):
"""
Defines a stream
:return: a tuple containing the type of the created variable, and boolean indicating
whether this is a global variable or not
"""
ctype = "dace::FIFO<{}, {}, {}>".format(dtype.base_type.ctype,
dtype.veclen, buffer_size)
if cpp.sym2cpp(array_size) == "1":
kernel_stream.write("{} {}(\"{}\");".format(ctype, var_name,
var_name))
else:
kernel_stream.write("{} {}[{}];\n".format(ctype, var_name,
cpp.sym2cpp(array_size)))
kernel_stream.write("dace::SetNames({}, \"{}\", {});".format(
var_name, var_name, cpp.sym2cpp(array_size)))
# In Xilinx, streams are defined as local variables
# Return value is used for adding to defined_vars in fpga.py
return ctype, False
def define_local_array(self, var_name, desc, array_size, function_stream,
kernel_stream, sdfg, state_id, node):
dtype = desc.dtype
kernel_stream.write("{} {}[{}];\n".format(dtype.ctype, var_name,
cpp.sym2cpp(array_size)))
if desc.storage == dace.dtypes.StorageType.FPGA_Registers:
kernel_stream.write("#pragma HLS ARRAY_PARTITION variable={} "
"complete\n".format(var_name))
elif desc.storage == dace.dtypes.StorageType.FPGA_Local:
if len(desc.shape) > 1:
kernel_stream.write("#pragma HLS ARRAY_PARTITION variable={} "
"block factor={}\n".format(
var_name, desc.shape[-2]))
else:
raise ValueError("Unsupported storage type: {}".format(
desc.storage.name))
self._dispatcher.defined_vars.add(var_name, DefinedType.Pointer,
'%s *' % dtype.ctype)
def define_shift_register(*args, **kwargs):
raise NotImplementedError("Xilinx shift registers NYI")
@staticmethod
def make_vector_type(dtype, is_const):
return "{}{}".format("const " if is_const else "", dtype.ctype)
@staticmethod
def make_kernel_argument(data,
var_name,
is_output,
with_vectorization,
interface_id=None):
if isinstance(data, dace.data.Array):
var_name += "_" + ("out" if is_output else "in")
if interface_id is not None:
var_name += "_%d" % interface_id
if with_vectorization:
dtype = data.dtype
else:
dtype = data.dtype.base_type
return "{} *{}".format(dtype.ctype, var_name)
else:
return data.as_arg(with_types=True, name=var_name)
def generate_unroll_loop_pre(self, kernel_stream, factor, sdfg, state_id,
node):
pass
@staticmethod
def generate_unroll_loop_post(kernel_stream, factor, sdfg, state_id, node):
if factor is None:
kernel_stream.write("#pragma HLS UNROLL", sdfg, state_id, node)
else:
kernel_stream.write("#pragma HLS UNROLL factor={}".format(factor),
sdfg, state_id, node)
@staticmethod
def generate_pipeline_loop_pre(kernel_stream, sdfg, state_id, node):
pass
@staticmethod
def generate_pipeline_loop_post(kernel_stream, sdfg, state_id, node):
kernel_stream.write("#pragma HLS PIPELINE II=1", sdfg, state_id, node)
@staticmethod
def generate_flatten_loop_pre(kernel_stream, sdfg, state_id, node):
pass
@staticmethod
def generate_flatten_loop_post(kernel_stream, sdfg, state_id, node):
kernel_stream.write("#pragma HLS LOOP_FLATTEN")
def generate_nsdfg_header(self, sdfg, state, state_id, node,
memlet_references, sdfg_label):
# TODO: Use a single method for GPU kernels, FPGA modules, and NSDFGs
arguments = [
f'{atype} {aname}' for atype, aname, _ in memlet_references
]
arguments += [
f'{node.sdfg.symbols[aname].as_arg(aname)}'
for aname in sorted(node.symbol_mapping.keys())
if aname not in sdfg.constants
]
arguments = ', '.join(arguments)
return f'void {sdfg_label}({arguments}) {{\n#pragma HLS INLINE'
def write_and_resolve_expr(self,
sdfg,
memlet,
nc,
outname,
inname,
indices=None,
dtype=None):
"""
Emits a conflict resolution call from a memlet.
"""
redtype = operations.detect_reduction_type(memlet.wcr)
if isinstance(indices, str):
ptr = '%s + %s' % (cpp.cpp_ptr_expr(sdfg, memlet), indices)
else:
ptr = cpp.cpp_ptr_expr(sdfg, memlet, indices=indices)
if isinstance(dtype, dtypes.pointer):
dtype = dtype.base_type
# Special call for detected reduction types
if redtype != dtypes.ReductionType.Custom:
credtype = "dace::ReductionType::" + str(
redtype)[str(redtype).find(".") + 1:]
if isinstance(dtype, dtypes.vector):
return (f'dace::xilinx_wcr_fixed_vec<{credtype}, '
f'{dtype.vtype.ctype}, {dtype.veclen}>::reduce('
f'{ptr}, {inname})')
return (
f'dace::xilinx_wcr_fixed<{credtype}, {dtype.ctype}>::reduce('
f'{ptr}, {inname})')
# General reduction
raise NotImplementedError('General reductions not yet implemented')
@staticmethod
def make_read(defined_type, dtype, var_name, expr, index, is_pack,
packing_factor):
if defined_type in [DefinedType.Stream, DefinedType.StreamArray]:
if " " in expr:
expr = "(" + expr + ")"
read_expr = "{}.pop()".format(expr)
elif defined_type == DefinedType.Scalar:
read_expr = var_name
else:
if index is not None and index != "0":
read_expr = "{} + {}".format(expr, index)
else:
read_expr = expr
if is_pack:
return "dace::Pack<{}, {}>({})".format(dtype.base_type.ctype,
packing_factor, read_expr)
else:
return "dace::Read<{}, {}>({})".format(dtype.base_type.ctype,
dtype.veclen, read_expr)
def generate_converter(*args, **kwargs):
pass # Handled in C++
@staticmethod
def make_write(defined_type, dtype, var_name, write_expr, index, read_expr,
wcr, is_unpack, packing_factor):
if defined_type in [DefinedType.Stream, DefinedType.StreamArray]:
if defined_type == DefinedType.StreamArray:
write_expr = "{}[{}]".format(write_expr,
"0" if not index else index)
if is_unpack:
return "\n".join(
"{}.push({}[{}]);".format(write_expr, read_expr, i)
for i in range(packing_factor))
else:
return "{}.push({});".format(write_expr, read_expr)
else:
if defined_type == DefinedType.Scalar:
write_expr = var_name
elif index and index != "0":
write_expr = "{} + {}".format(write_expr, index)
if is_unpack:
return "dace::Unpack<{}, {}>({}, {});".format(
dtype.base_type.ctype, packing_factor, read_expr,
write_expr)
else:
# TODO: Temporary hack because we don't have the output
# vector length.
veclen = max(dtype.veclen, packing_factor)
return "dace::Write<{}, {}>({}, {});".format(
dtype.base_type.ctype, veclen, write_expr, read_expr)
def make_shift_register_write(self, defined_type, dtype, var_name,
write_expr, index, read_expr, wcr, is_unpack,
packing_factor, sdfg):
raise NotImplementedError("Xilinx shift registers NYI")
@staticmethod
def generate_no_dependence_pre(kernel_stream,
sdfg,
state_id,
node,
var_name=None):
pass
@staticmethod
def generate_no_dependence_post(kernel_stream, sdfg, state_id, node,
var_name):
'''
Adds post loop pragma for ignoring loop carried dependencies on a given variable
'''
kernel_stream.write(
"#pragma HLS DEPENDENCE variable={} false".format(var_name), sdfg,
state_id, node)
def generate_kernel_boilerplate_pre(self, sdfg, state_id, kernel_name,
global_data_parameters,
scalar_parameters, symbol_parameters,
module_stream, kernel_stream):
# Write header
module_stream.write(
"""#include <dace/xilinx/device.h>
#include <dace/math.h>
#include <dace/complex.h>""", sdfg)
self._frame.generate_fileheader(sdfg, module_stream, 'xilinx_device')
module_stream.write("\n", sdfg)
symbol_params = [
v.as_arg(with_types=True, name=k)
for k, v in symbol_parameters.items()
]
arrays = list(sorted(global_data_parameters, key=lambda t: t[1]))
scalars = scalar_parameters + list(symbol_parameters.items())
scalars = list(sorted(scalars, key=lambda t: t[0]))
# Build kernel signature
array_args = []
for is_output, dataname, data, interface in arrays:
kernel_arg = self.make_kernel_argument(data, dataname, is_output,
True, interface)
if kernel_arg:
array_args.append(kernel_arg)
kernel_args = array_args + [
v.as_arg(with_types=True, name=k) for k, v in scalars
]
kernel_args = dace.dtypes.deduplicate(kernel_args)
# Write kernel signature
kernel_stream.write(
"DACE_EXPORTED void {}({}) {{\n".format(kernel_name,
', '.join(kernel_args)),
sdfg, state_id)
# Insert interface pragmas
num_mapped_args = 0
for arg, (_, dataname, _, _) in zip(array_args, arrays):
var_name = re.findall(r"\w+", arg)[-1]
if "*" in arg:
interface_name = "gmem{}".format(num_mapped_args)
kernel_stream.write(
"#pragma HLS INTERFACE m_axi port={} "
"offset=slave bundle={}".format(var_name, interface_name),
sdfg, state_id)
# Map this interface to the corresponding location
# specification to be passed to the Xilinx compiler
assignment = self._bank_assignments[(dataname, sdfg)] if (
dataname, sdfg) in self._bank_assignments else None
if assignment is not None:
mem_type, mem_bank = assignment
self._interface_assignments[(kernel_name,
interface_name)] = (mem_type,
mem_bank)
else:
self._interface_assignments[(kernel_name,
interface_name)] = None
num_mapped_args += 1
for arg in kernel_args + ["return"]:
var_name = re.findall(r"\w+", arg)[-1]
kernel_stream.write(
"#pragma HLS INTERFACE s_axilite port={} bundle=control".format(
var_name))
# TODO: add special case if there's only one module for niceness
kernel_stream.write("\n#pragma HLS DATAFLOW")
kernel_stream.write("\nHLSLIB_DATAFLOW_INIT();")
@staticmethod
def generate_kernel_boilerplate_post(kernel_stream, sdfg, state_id):
kernel_stream.write("HLSLIB_DATAFLOW_FINALIZE();\n}\n", sdfg, state_id)
def generate_host_function_body(self, sdfg, state, kernel_name, parameters,
symbol_parameters, kernel_stream):
# Just collect all variable names for calling the kernel function
added = set()
arrays = list(
sorted([
p for p in parameters if not isinstance(p[2], dace.data.Scalar)
],
key=lambda t: t[1]))
scalars = [p for p in parameters if isinstance(p[2], dace.data.Scalar)]
scalars += ((False, k, v, None) for k, v in symbol_parameters.items())
scalars = dace.dtypes.deduplicate(sorted(scalars, key=lambda t: t[1]))
kernel_args = []
for _, name, p, _ in itertools.chain(arrays, scalars):
if not isinstance(p, dace.data.Array) and name in added:
continue
added.add(name)
kernel_args.append(p.as_arg(False, name=name))
kernel_function_name = kernel_name
kernel_file_name = "{}.xclbin".format(kernel_name)
kernel_stream.write(
"""\
auto kernel = program.MakeKernel({kernel_function_name}, "{kernel_function_name}", {kernel_args});
const std::pair<double, double> elapsed = kernel.ExecuteTask();
std::cout << "Kernel executed in " << elapsed.second << " seconds.\\n" << std::flush;
}}""".format(kernel_function_name=kernel_function_name,
kernel_args=", ".join(kernel_args)), sdfg, sdfg.node_id(state))
def generate_module(self, sdfg, state, name, subgraph, parameters,
symbol_parameters, module_stream, entry_stream,
host_stream):
"""Generates a module that will run as a dataflow function in the FPGA
kernel."""
state_id = sdfg.node_id(state)
dfg = sdfg.nodes()[state_id]
kernel_args_call = []
kernel_args_module = []
added = set()
parameters = list(sorted(parameters, key=lambda t: t[1]))
arrays = dtypes.deduplicate(
[p for p in parameters if not isinstance(p[2], dace.data.Scalar)])
scalars = [p for p in parameters if isinstance(p[2], dace.data.Scalar)]
scalars += ((False, k, v, None) for k, v in symbol_parameters.items())
scalars = dace.dtypes.deduplicate(sorted(scalars, key=lambda t: t[1]))
for is_output, pname, p, interface_id in itertools.chain(
arrays, scalars):
if isinstance(p, dace.data.Array):
arr_name = "{}_{}".format(pname, "out" if is_output else "in")
# Add interface ID to called module, but not to the module
# arguments
argname = arr_name
if interface_id is not None:
argname = arr_name + "_%d" % interface_id
kernel_args_call.append(argname)
dtype = p.dtype
kernel_args_module.append("{} {}*{}".format(
dtype.ctype, "const " if not is_output else "", arr_name))
else:
# Don't make duplicate arguments for other types than arrays
if pname in added:
continue
added.add(pname)
if isinstance(p, dace.data.Stream):
kernel_args_call.append(
p.as_arg(with_types=False, name=pname))
if p.is_stream_array():
kernel_args_module.append(
"dace::FIFO<{}, {}, {}> {}[{}]".format(
p.dtype.base_type.ctype, p.veclen,
p.buffer_size, pname, p.size_string()))
else:
kernel_args_module.append(
"dace::FIFO<{}, {}, {}> &{}".format(
p.dtype.base_type.ctype, p.veclen,
p.buffer_size, pname))
else:
kernel_args_call.append(
p.as_arg(with_types=False, name=pname))
kernel_args_module.append(
p.as_arg(with_types=True, name=pname))
# create a unique module name to prevent name clashes
module_function_name = f"module_{name}_{sdfg.sdfg_id}"
# Unrolling processing elements: if there first scope of the subgraph
# is an unrolled map, generate a processing element for each iteration
scope_children = subgraph.scope_children()
top_scopes = [
n for n in scope_children[None]
if isinstance(n, dace.sdfg.nodes.EntryNode)
]
unrolled_loops = 0
if len(top_scopes) == 1:
scope = top_scopes[0]
if scope.unroll:
self._unrolled_pes.add(scope.map)
kernel_args_call += ", ".join(scope.map.params)
kernel_args_module += ["int " + p for p in scope.params]
for p, r in zip(scope.map.params, scope.map.range):
if len(r) > 3:
raise cgx.CodegenError("Strided unroll not supported")
entry_stream.write(
"for (size_t {param} = {begin}; {param} < {end}; "
"{param} += {increment}) {{\n#pragma HLS UNROLL".format(
param=p, begin=r[0], end=r[1] + 1, increment=r[2]))
unrolled_loops += 1
# Generate caller code in top-level function
entry_stream.write(
"HLSLIB_DATAFLOW_FUNCTION({}, {});".format(
module_function_name, ", ".join(kernel_args_call)), sdfg,
state_id)
for _ in range(unrolled_loops):
entry_stream.write("}")
# ----------------------------------------------------------------------
# Generate kernel code
# ----------------------------------------------------------------------
self._dispatcher.defined_vars.enter_scope(subgraph)
module_body_stream = CodeIOStream()
module_body_stream.write(
"void {}({}) {{".format(module_function_name,
", ".join(kernel_args_module)), sdfg,
state_id)
# Construct ArrayInterface wrappers to pack input and output pointers
# to the same global array
in_args = {
argname
for out, argname, arg, _ in parameters
if isinstance(arg, dace.data.Array)
and arg.storage == dace.dtypes.StorageType.FPGA_Global and not out
}
out_args = {
argname
for out, argname, arg, _ in parameters
if isinstance(arg, dace.data.Array)
and arg.storage == dace.dtypes.StorageType.FPGA_Global and out
}
if len(in_args) > 0 or len(out_args) > 0:
# Add ArrayInterface objects to wrap input and output pointers to
# the same array
module_body_stream.write("\n")
interfaces_added = set()
for _, argname, arg, _ in parameters:
if argname in interfaces_added:
continue
interfaces_added.add(argname)
has_in_ptr = argname in in_args
has_out_ptr = argname in out_args
if not has_in_ptr and not has_out_ptr:
continue
in_ptr = ("{}_in".format(argname) if has_in_ptr else "nullptr")
out_ptr = ("{}_out".format(argname)
if has_out_ptr else "nullptr")
ctype = "dace::ArrayInterface<{}>".format(arg.dtype.ctype)
module_body_stream.write("{} {}({}, {});".format(
ctype, argname, in_ptr, out_ptr))
self._dispatcher.defined_vars.add(argname,
DefinedType.ArrayInterface,
ctype,
allow_shadowing=True)
module_body_stream.write("\n")
# Allocate local transients
data_to_allocate = (set(subgraph.top_level_transients()) -
set(sdfg.shared_transients()) -
set([p[1] for p in parameters]))
allocated = set()
for node in subgraph.nodes():
if not isinstance(node, dace.sdfg.nodes.AccessNode):
continue
if node.data not in data_to_allocate or node.data in allocated:
continue
allocated.add(node.data)
self._dispatcher.dispatch_allocate(sdfg, state, state_id, node,
module_stream,
module_body_stream)
self._dispatcher.dispatch_subgraph(sdfg,
subgraph,
state_id,
module_stream,
module_body_stream,
skip_entry_node=False)
module_stream.write(module_body_stream.getvalue(), sdfg, state_id)
module_stream.write("}\n\n")
self._dispatcher.defined_vars.exit_scope(subgraph)
def generate_kernel_internal(self, sdfg, state, kernel_name, subgraphs,
kernel_stream, function_stream,
callsite_stream):
"""Main entry function for generating a Xilinx kernel."""
(global_data_parameters, top_level_local_data, subgraph_parameters,
scalar_parameters, symbol_parameters,
nested_global_transients) = self.make_parameters(
sdfg, state, subgraphs)
# Scalar parameters are never output
sc_parameters = [(False, pname, param, None)
for pname, param in scalar_parameters]
host_code_stream = CodeIOStream()
# Generate host code
self.generate_host_header(sdfg, kernel_name,
global_data_parameters + sc_parameters,
symbol_parameters, host_code_stream)
self.generate_host_function_boilerplate(
sdfg, state, kernel_name, global_data_parameters + sc_parameters,
symbol_parameters, nested_global_transients, host_code_stream,
function_stream, callsite_stream)
self.generate_host_function_body(sdfg, state, kernel_name,
global_data_parameters + sc_parameters,
symbol_parameters, host_code_stream)
# Store code to be passed to compilation phase
self._host_codes.append((kernel_name, host_code_stream.getvalue()))
# Now we write the device code
module_stream = CodeIOStream()
entry_stream = CodeIOStream()
state_id = sdfg.node_id(state)
self.generate_kernel_boilerplate_pre(sdfg, state_id, kernel_name,
global_data_parameters,
scalar_parameters,
symbol_parameters, module_stream,
entry_stream)
# Emit allocations
for node in top_level_local_data:
self._dispatcher.dispatch_allocate(sdfg, state, state_id, node,
module_stream, entry_stream)
self.generate_modules(sdfg, state, kernel_name, subgraphs,
subgraph_parameters, sc_parameters,
symbol_parameters, module_stream, entry_stream,
host_code_stream)
kernel_stream.write(module_stream.getvalue())
kernel_stream.write(entry_stream.getvalue())
self.generate_kernel_boilerplate_post(kernel_stream, sdfg, state_id)
def generate_host_header(self, sdfg, kernel_function_name, parameters,
symbol_parameters, host_code_stream):
arrays = [
p for p in parameters if not isinstance(p[2], dace.data.Scalar)
]
arrays = list(sorted(arrays, key=lambda t: t[1]))
scalars = [p for p in parameters if isinstance(p[2], dace.data.Scalar)]
scalars += ((False, k, v, None) for k, v in symbol_parameters.items())
scalars = list(sorted(scalars, key=lambda t: t[1]))
kernel_args = []
seen = set()
for is_output, name, arg, if_id in itertools.chain(arrays, scalars):
if isinstance(arg, dace.data.Array):
argname = name + ("_out" if is_output else "_in")
if if_id is not None:
argname += "_%d" % if_id
kernel_args.append(arg.as_arg(with_types=True, name=argname))
else:
if name in seen:
continue
seen.add(name)
kernel_args.append(arg.as_arg(with_types=True, name=name))
host_code_stream.write(
"""\
// Signature of kernel function (with raw pointers) for argument matching
DACE_EXPORTED void {kernel_function_name}({kernel_args});\n\n""".format(
kernel_function_name=kernel_function_name,
kernel_args=", ".join(kernel_args)), sdfg)
def generate_memlet_definition(self, sdfg, dfg, state_id, src_node,
dst_node, edge, callsite_stream):
memlet = edge.data
if (self._dispatcher.defined_vars.get(
memlet.data)[0] == DefinedType.FPGA_ShiftRegister):
raise NotImplementedError("Shift register for Xilinx NYI")
else:
self._cpu_codegen.copy_memory(sdfg, dfg, state_id, src_node,
dst_node, edge, None, callsite_stream)
def unparse_tasklet(self, *args, **kwargs):
# Pass this object for callbacks into the Xilinx codegen
cpp.unparse_tasklet(*args, codegen=self, **kwargs)
def make_ptr_assignment(self, src_expr, src_dtype, dst_expr, dst_dtype):
"""
Write source to destination, where the source is a scalar, and the
destination is a pointer.
:return: String of C++ performing the write.
"""
return self.make_write(DefinedType.Pointer, dst_dtype, None,
"&" + dst_expr, None, src_expr, None,
dst_dtype.veclen < src_dtype.veclen,
src_dtype.veclen)
| 44.224414 | 100 | 0.540122 |
0b75d5582d94244453d144d37690813f25e53bab | 1,612 | py | Python | src/poetry/vcs/git/system.py | yokomotod/poetry | 4838c9fe9645c62353be569a96765c693f03f1a3 | [
"MIT"
] | null | null | null | src/poetry/vcs/git/system.py | yokomotod/poetry | 4838c9fe9645c62353be569a96765c693f03f1a3 | [
"MIT"
] | null | null | null | src/poetry/vcs/git/system.py | yokomotod/poetry | 4838c9fe9645c62353be569a96765c693f03f1a3 | [
"MIT"
] | null | null | null | from __future__ import annotations
import subprocess
from typing import TYPE_CHECKING
from dulwich.client import find_git_command
if TYPE_CHECKING:
from pathlib import Path
from typing import Any
class SystemGit:
@classmethod
def clone(cls, repository: str, dest: Path) -> str:
cls._check_parameter(repository)
return cls.run("clone", "--recurse-submodules", "--", repository, str(dest))
@classmethod
def checkout(cls, rev: str, target: Path | None = None) -> str:
args = []
if target:
args += [
"--git-dir",
(target / ".git").as_posix(),
"--work-tree",
target.as_posix(),
]
cls._check_parameter(rev)
args += ["checkout", rev]
return cls.run(*args)
@staticmethod
def run(*args: Any, **kwargs: Any) -> str:
folder = kwargs.pop("folder", None)
if folder:
args = (
"--git-dir",
(folder / ".git").as_posix(),
"--work-tree",
folder.as_posix(),
) + args
return (
subprocess.check_output(
find_git_command() + list(args), stderr=subprocess.STDOUT
)
.decode()
.strip()
)
@staticmethod
def _check_parameter(parameter: str) -> None:
"""
Checks a git parameter to avoid unwanted code execution.
"""
if parameter.strip().startswith("-"):
raise RuntimeError(f"Invalid Git parameter: {parameter}")
| 24.424242 | 84 | 0.527295 |
2641ab438e7ea32df4e425792e560d5b18cb0d22 | 732 | py | Python | spectra/utilities.py | kristianeschenburg/spectra | 53304458a9cd265b40426f50d0aa7114627982d4 | [
"BSD-3-Clause"
] | null | null | null | spectra/utilities.py | kristianeschenburg/spectra | 53304458a9cd265b40426f50d0aa7114627982d4 | [
"BSD-3-Clause"
] | null | null | null | spectra/utilities.py | kristianeschenburg/spectra | 53304458a9cd265b40426f50d0aa7114627982d4 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
def filter(V, F, inds):
"""
Filter vertices and faces by list of indices.
Parameters:
- - - - -
V: float, array
x,y,z coordinates of the surface mesh
F: int, array
triangles of the surface mesh
inds: int, list
list of indices to keep
"""
inds.sort()
indmap = dict(zip(inds, np.arange(len(inds))))
V = V[inds, :]
gface = []
for face in F:
check = np.zeros(3)
for j in np.arange(3):
check[j] = (face[j] in inds)
if check.sum() == 3:
nface = [indmap[f] for f in face]
gface.append(nface)
gface = np.row_stack(gface)
return [V, gface]
| 19.783784 | 50 | 0.505464 |
eb0fe5be64500d64bb0fa7519437215cf1559253 | 2,659 | py | Python | pysnmp/BENU-GENERAL-NOTIFICATION-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/BENU-GENERAL-NOTIFICATION-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/BENU-GENERAL-NOTIFICATION-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module BENU-GENERAL-NOTIFICATION-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BENU-GENERAL-NOTIFICATION-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:20:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint")
benuPlatform, = mibBuilder.importSymbols("BENU-PLATFORM-MIB", "benuPlatform")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, NotificationType, Unsigned32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, IpAddress, Counter32, iso, Bits, ObjectIdentity, mib_2, Counter64, TimeTicks, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "NotificationType", "Unsigned32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "IpAddress", "Counter32", "iso", "Bits", "ObjectIdentity", "mib-2", "Counter64", "TimeTicks", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
benuGeneralNotif = ModuleIdentity((1, 3, 6, 1, 4, 1, 39406, 1, 4))
benuGeneralNotif.setRevisions(('2014-12-15 00:00',))
if mibBuilder.loadTexts: benuGeneralNotif.setLastUpdated('201412150000Z')
if mibBuilder.loadTexts: benuGeneralNotif.setOrganization('Benu Networks')
bGeneralNotifyMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 39406, 1, 4, 1))
bGeneralNotifyMIBTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 39406, 1, 4, 0))
bNotifyAgentShutDown = NotificationType((1, 3, 6, 1, 4, 1, 39406, 1, 4, 0, 1))
if mibBuilder.loadTexts: bNotifyAgentShutDown.setStatus('current')
bNotifyAgentRestart = NotificationType((1, 3, 6, 1, 4, 1, 39406, 1, 4, 0, 2))
if mibBuilder.loadTexts: bNotifyAgentRestart.setStatus('current')
mibBuilder.exportSymbols("BENU-GENERAL-NOTIFICATION-MIB", bNotifyAgentShutDown=bNotifyAgentShutDown, bGeneralNotifyMIBObjects=bGeneralNotifyMIBObjects, benuGeneralNotif=benuGeneralNotif, bNotifyAgentRestart=bNotifyAgentRestart, PYSNMP_MODULE_ID=benuGeneralNotif, bGeneralNotifyMIBTraps=bGeneralNotifyMIBTraps)
| 102.269231 | 493 | 0.790523 |
e0471af5fb00ef2667654c253c03b5f6a5bdeb5d | 6,521 | py | Python | gameboard.py | JP-DataScienceProjects/2048AI | 77901b01414da6e2edd74e5faca70fa8b0944f00 | [
"BSD-2-Clause"
] | 1 | 2018-03-28T07:51:10.000Z | 2018-03-28T07:51:10.000Z | gameboard.py | JP-DataScienceProjects/2048AI | 77901b01414da6e2edd74e5faca70fa8b0944f00 | [
"BSD-2-Clause"
] | null | null | null | gameboard.py | JP-DataScienceProjects/2048AI | 77901b01414da6e2edd74e5faca70fa8b0944f00 | [
"BSD-2-Clause"
] | 1 | 2018-11-06T01:53:50.000Z | 2018-11-06T01:53:50.000Z | import numpy as np
from enum import Enum
from zope.event import notify
class GameStates(Enum):
WIN = 1
LOSE = 2
IN_PROGRESS = 3
class GameActions(Enum):
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
class OnBoardChanged():
def __init__(self, board):
self.board = board
class GameBoard():
def __init__(self, n, max_tile=2048):
self.n = n
self.max_tile = max_tile
self.board = np.zeros((n, n), dtype=np.int)
self._game_state = GameStates.IN_PROGRESS
self.action_set = set()
self._free_tiles = n ** 2
self._largest_tile_placed = 2
self._score = 0
center = (self.n - 1) / 2
self.bonus_mask = np.array([(i - center) * (j - center) for i in range(self.n) for j in range(self.n)]).reshape(self.n, self.n)
self.bonus_mask = np.abs(self.bonus_mask) / np.max(self.bonus_mask)
self.add_tile(value=2)
self.add_tile(value=2)
self.on_board_updated()
def __getitem__(self, item):
return self.board[item]
@property
def game_state(self):
return self._game_state
@property
def largest_tile_placed(self):
return self._largest_tile_placed
@property
def actions(self):
return self.action_set
@property
def score(self):
#return self._score + self._free_tiles
return self._score
@property
def free_tiles(self):
return self._free_tiles
def on_board_updated(self):
self.update_action_set()
self.calc_score()
notify(OnBoardChanged(self))
def update_action_set(self):
"""
Updates the set of available actions that can be taken on this board
This function iterates over the matrix only once but checks both rows and columns
for available actions simultaneously by interchanging the indices i,j (exploits the
fact that the board is always square)
"""
self.action_set.clear()
for i in range(self.n):
h_zeroSeen, v_zeroSeen, v_digitSeen, h_digitSeen = False, False, False, False
for j in range(self.n):
if self.board[i][j] >= self.max_tile:
self._game_state = GameStates.WIN
self.action_set.clear()
return
# User can move tiles to the right if first a digit then a zero are seen when moving left-right in a row
if self.board[i][j] == 0:
h_zeroSeen = True
if h_digitSeen: self.action_set.add(GameActions.RIGHT)
# User can move tiles to the left if first a zero then a digit are seen when moving left-right in a row
if self.board[i][j] != 0:
h_digitSeen = True
if h_zeroSeen: self.action_set.add(GameActions.LEFT)
# If two adjacent horizontal tiles have the same value, either a left or right action can be performed
if (j < self.n - 1 and self.board[i][j] == self.board[i][j+1]): self.action_set.update([GameActions.LEFT, GameActions.RIGHT])
# User can move tiles down if first a digit then a zero are seen when moving top-bottom in a column
if self.board[j][i] == 0:
v_zeroSeen = True
if v_digitSeen: self.action_set.add(GameActions.DOWN)
# User can move tiles up if first a zero then a digit are seen when moving top-bottom in a column
if self.board[j][i] != 0:
v_digitSeen = True
if v_zeroSeen: self.action_set.add(GameActions.UP)
# If two adjacent vertical tiles have the same value, either an up or down action can be performed
if (j < self.n - 1 and self.board[j][i] == self.board[j+1][i]): self.action_set.update([GameActions.UP, GameActions.DOWN])
self._game_state = GameStates.LOSE if len(self.action_set) <= 0 else GameStates.IN_PROGRESS
def add_tile(self, value=None):
found = False
while not found:
i, j = np.random.randint(0, len(self.board), 2)
found = (self.board[i][j] == 0)
self.board[i][j] = value if isinstance(value, int) else np.random.randint(1, 3) * 2
self._free_tiles -= 1
def compress(self):
change_flag = False
for i in range(self.n):
newindex = -1
for j in range(self.n):
if newindex == -1:
if self.board[i][j] == 0: newindex = j
continue
if self.board[i][j] != 0:
self.board[i][newindex] = self.board[i][j]
self.board[i][j] = 0
newindex = j
change_flag = True
return change_flag
def merge(self):
for i in range(self.n):
for j in range(self.n - 1):
if self.board[i][j] == 0 or self.board[i][j] != self.board[i][j + 1]: continue
self.board[i][j] *= 2
self.board[i][j + 1] = 0
self._free_tiles += 1
self._largest_tile_placed = max(self.board[i][j], self._largest_tile_placed)
#self._score += self.board[i][j]
#self._score += self.board[i][j] // 4
#self._score += int(np.log2(self.board[i][j])) - 1
def calc_score(self):
self._score = int(np.sum(self.bonus_mask * self.board))
def make_move(self, action):
if not action in self.action_set: return
{GameActions.UP: self.up, GameActions.DOWN: self.down, GameActions.LEFT: self.left, GameActions.RIGHT: self.right}[action]()
self.add_tile()
self.on_board_updated()
#print('Score: {0}, Remaining tiles: {1}'.format(self.score, self._free_tiles))
def up(self):
self.board = np.rot90(self.board, axes=(0, 1))
self.perform_action()
self.board = np.rot90(self.board, axes=(1, 0))
def down(self):
self.board = np.rot90(self.board, axes=(1, 0))
self.perform_action()
self.board = np.rot90(self.board, axes=(0, 1))
def left(self):
self.perform_action()
def right(self):
self.board = np.flip(self.board, axis=1)
self.perform_action()
self.board = np.flip(self.board, axis=1)
def perform_action(self):
self.compress()
self.merge()
self.compress()
| 36.227778 | 145 | 0.571385 |
2944aeb90e665243ae19bfac98b0a3839691e11f | 58,727 | py | Python | tlrm2e/planet.py | LittleDevil0x29A/SectorGen | 5940bc6ea30279a5efac19e770ab635dac254a1c | [
"CC0-1.0"
] | null | null | null | tlrm2e/planet.py | LittleDevil0x29A/SectorGen | 5940bc6ea30279a5efac19e770ab635dac254a1c | [
"CC0-1.0"
] | null | null | null | tlrm2e/planet.py | LittleDevil0x29A/SectorGen | 5940bc6ea30279a5efac19e770ab635dac254a1c | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import argv,exit
from platform import system
import random
try:
from tools.db_parse import App as XML_Parse
except:
try:
from lib.tools.db_parse import App as XML_Parse
except ModuleNotFoundError:
from testing_tools.db_parse import App as XML_Parse
# in-file settings
if system().lower().startswith("win"):
SLASH="\\"
else:
SLASH="/"
# critical paths
path = argv[0][:argv[0].rfind(SLASH)+1]
config_src = "config"+SLASH+"planet.ini"
def main():
planet=Planet()
out_template="{hex:>4} - {uwp:<9} {cog:>4} {pbj:>3} {widtt:>5} {extl:>21} {culture:>3}"
s = out_template.format( \
hex = planet.location_code,\
uwp = planet.getUWP() ,\
cog = planet.getCOG() ,\
pbj = planet.getPBJ() ,\
widtt = planet.getWDITTP() ,\
extl = planet.getExTL() ,\
culture = planet.getC() ,\
star = "" \
)
print(s)
pass
class Planet:
HEX_EXPANDED =["0","1","2","3","4","5","6","7","8","9","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p"]
STARPORT_CHART=["x","e","e","d","d","c","c","b","b","a","a"]
VN='vn'
SO='so'
HS='hs'
def __init__(self,location="0000",parent=None,type='planet',config=config_src,mode=None,band=None,mainworld=True,populated=True,new=True,db=None,AUtoStar=None,GoldieDist=None,name="unnamed"):
from configparser import ConfigParser
self.mode=mode
self.band=band
self.name=name
self.isGasGiant=False
self.isGasGiantSmall=False
self.isGasGiantLarge=False
# config
self.config = ConfigParser()
try:
with open(path+"plugins"+SLASH+"SectorGen"+SLASH+config) as f:
self.config.read_file(f)
except:
try:
with open(path+config) as f:
self.config.read_file(f)
except:
with open(path[:path[:-1].rfind(SLASH)+1]+config) as f:
self.config.read_file(f)
# DEFAULT
if type=="planet" :
if mode==None: self.mode=self.config.get("DEFAULT","mode" ).strip()
if band==None: self.band=self.config.get("DEFAULT","orbital band").strip()
elif type=="satellite" :
if mode==None: self.config.get("DEFAULT","mode (satellites)").strip()
# parenting
self.parent = parent
# tables
self.list_table_size_R_S = ["R","D","S"]
# determine mode
self.SOpera=False
self.HScifi=False
if self.mode==self.SO:
self.SOpera=True
elif self.mode==self.HS:
self.SOpera=True
self.HScifi=True
# db imports
self.TRADECODES =db[XML_Parse.TOP_LAYER]['traveller']['tradecodes']
self.CULTURE =db[XML_Parse.TOP_LAYER]['traveller']['culture' ]
self.db=db
# save metainfo
self.populated = populated
self.adjacent_to =[]
self.location_code=location
self.row =int(self.location_code[:2])
self.col =int(self.location_code[2:])
self.isMainworld = mainworld
if type!=None and type.lower() in ["sgg","lgg","rgg"]:
self.isGasGiant=True
if type.lower()=='sgg': self.isGasGiantSmall=True
elif type.lower()=='lgg': self.isGasGiantLarge=True
elif type.lower()=='rgg':
if roll(1,6)==1: self.isGasGiantLarge=True
else: self.isGasGiantSmall=True
self.type=type
self.clear()
if AUtoStar !=None: self.AUtoStar =AUtoStar
if GoldieDist!=None: self.GoldieDist=GoldieDist
if self.GoldieDist==None and self.parent!=None:
self.GoldieDist=self.parent.GoldieDist
if new: self.new()
def clear(self):
# undefined starport
self.starport = "X"
# empty SAH-sequence
self.size = 0
self.size_str = ""
self.atmosphere = 0
self.hydrographics = 0
# empty PGL-sequence
self.population = 0
self.government = 0
self.law_level = 0
# undefined tech_level
self.tech_level = 0
# empty COG-sequence
self.climate = 0
self.orbit = 0
self.gravity = 0
# empty PBJ-sequence
self.population_mod = 0
self.asteroid_belts = 0
self.jovian_planets = 0
# empty WDITTP-sequence
self.law_level_weapons = 0
self.law_level_drugs = 0
self.law_level_information = 0
self.law_level_technology = 0
self.law_level_travellers = 0
self.law_level_powers = 0
# empty CTM-sequence
# tECCME
self.tech_level_civilian = 0
self.tech_level_civilian_energy = 0
self.tech_level_civilian_computing = 0
self.tech_level_civilian_communication = 0
self.tech_level_civilian_medicine = 0
self.tech_level_civilian_environment = 0
# tLWAS
self.tech_level_transportation = 0
self.tech_level_transportation_land = 0
self.tech_level_transportation_water = 0
self.tech_level_transportation_air = 0
self.tech_level_transportation_space = 0
# tPPHH
self.tech_level_military = 0
self.tech_level_military_personalweapons = 0
self.tech_level_military_personalarmour = 0
self.tech_level_military_heavyweapons = 0
self.tech_level_military_heavyarmour = 0
# undefined Peripheral Information
self.trade_codes = ""
self.travel_code = ""
self.quirk = ""
self.bases = "" ## GENERATION NOT IMPLEMENTED
self.allegiance = "" ## GENERATION NOT IMPLEMENTED
# trade information
self.trade_number = 0 ## GENERATION NOT IMPLEMENTED
self.imports = 0 ## GENERATION NOT IMPLEMENTED
self.exports = 0 ## GENERATION NOT IMPLEMENTED
self.transient_trade = 0 ## GENERATION NOT IMPLEMENTED
self.port_size = 0 ## GENERATION NOT IMPLEMENTED
# orbit info
self.pos = 0.0
self.orbital_period = 0.0
self.rotations_per_orbit = 0.0
self.weekly_traversal = 0.0
self.isTideLocked = False
# intra-system info
self.AUtoStar = 0.0 # star_diam * self.orbit * 166
self.GoldieDist = None
# satelites/moons
self.satellites = []
# comment
self.comment = "---"
def new(self,from_scratch=True,distFromGZ=None,max_pop=20,max_tl=20,quirk_chance=0):
if self.isGasGiant:
self.newCOG(distFromGZ)
if self.isGasGiantLarge: self.size=int( self.config.get("GAS GIANTS","size (lgg)") )
elif self.isGasGiantSmall: self.size=int( self.config.get("GAS GIANTS","size (sgg)") )
self.gravity=self.size
self.atmosphere =int( self.config.get("GAS GIANTS","atmosphere") )
self.hydrographics =int( self.config.get("GAS GIANTS","hydrographics") )
self.newSat()
self.newOrbitInfo()
else:
if from_scratch:
self.newCOG(distFromGZ)
self.newSAH() # # # Will get weird results if newCOG() is not run first
if self.type!="satellite": self.newSat() # #
self.newOrbitInfo() #
if self.populated:
self.newPGL(max_pop=max_pop) # # # # self.populated dependent
self.newSPort() # # #
self.newExTL(max_tl=max_tl) # #
self.newWDITTP() #
self.newInfo(quirk_chance)
pass
def newCOG(self,distFromGZ=None):
if self.type=="planet":
# Defaults
if distFromGZ==None:
if self.GoldieDist!=None : distFromGZ=self.GoldieDist
elif self.band.startswith("near" ): distFromGZ=- 100
elif self.band.startswith("mid" ): distFromGZ= 0
elif self.band.startswith("far" ): distFromGZ= 100
elif self.band.startswith("rogue"): distFromGZ= 1000
# C(O)G-sequence (habitability)
self.climate = roll(4,6)//2-2
self.gravity =min(max(roll(6,6)//3-7,-3),3)
if int( distFromGZ ) > 12 : self.climate = 0
elif int( distFromGZ ) > 8 : self.climate-= 8 # Outer HZ Edge
elif int( distFromGZ ) > 6 : self.climate-= 4
elif int( distFromGZ ) > 4 : self.climate-= 2
elif int( distFromGZ ) > 2 : self.climate-= 1
elif int( distFromGZ ) > -1 : self.climate+= 0
elif int( distFromGZ ) > -3 : self.climate+= 1
elif int( distFromGZ ) > -5 : self.climate+= 2
elif int( distFromGZ ) > -7 : self.climate+= 4
elif int( distFromGZ ) > -11 : self.climate+= 8 # Inner HZ Edge
else: self.climate =20
self.climate =min(max(self.climate,0),25)
self.orbit =int(self.AUtoStar*100)
elif self.type=="satellite":
# (C)OG-sequence
self.climate = self.parent.climate
if self.band.startswith("n"): self.orbit = 0+roll(1, 6)
elif self.band.startswith("m"): self.orbit = 6+roll(1, 29)
elif self.band.startswith("f"): self.orbit = 35+roll(1, 35)
elif self.band.startswith("r"): self.orbit = 70+roll(1,140)
self.gravity =min(max(roll(6,6)//3-7,-2),2)
pass
def newSAH(self):
if self.type=="planet":
# SAH-sequence
if self.band!=None:
if self.band.startswith("near" ): self.size = roll(1,6)
elif self.band.startswith("mid" ): self.size = roll(2,6)-2
elif self.band.startswith("far" ): self.size = max(roll(3,6)-5,0)
elif self.band.startswith("rogue"): self.size = roll(1,6)
self.size_str=self.HEX_EXPANDED[max(self.size,0)]
elif self.type=="satellite":
# SAH-sequence
if self.band!=None:
if self.band.startswith("near" ): self.size = -2
elif self.band.startswith("mid" ): self.size = roll(2,6)-5
elif self.band.startswith("far" ): self.size = roll(3,6)-6
elif self.band.startswith("rogue"): self.size = roll(1,6)-3
self.size = min(self.size,self.parent.size//2)
if self.orbit < 7: self.size = -1
if self.size <=-1:
if self.orbit <=7: self.size_str=self.list_table_size_R_S[0]
elif self.orbit >=8: self.size_str=self.list_table_size_R_S[1]
elif self.size <= 0: self.size_str=self.list_table_size_R_S[2]
else:
self.size_str=self.HEX_EXPANDED[max(self.size,0)]
self.size = max(self.size,0)
self.gravity += self.size
self.gravity =min(max(self.gravity,0),12)
self.atmosphere =max(roll(2,6)-7+self.gravity+max(self.size-8,-4),0) # using gravity instead of size
if self.type=="satellite" and self.size_str.upper() in ["R","D"]: self.atmosphere=0
# SOpera change
if self.SOpera and self.size <= 4:
if self.size <= 2:
self.atmosphere=0
if self.size > 2:
if self.atmosphere <= 2:
self.atmosphere=0
elif self.atmosphere > 2 and self.atmosphere <= 5:
self.atmosphere=1
elif self.atmosphere > 5:
self.atmosphere=10
# HScifi change (custom)
if self.HScifi:
if self.atmosphere > 1 and self.atmosphere < 10 and roll(1,6)<5:
possibilities=[10,10,10,11,11,12,12,15]
self.atmosphere=possibilities[random.randrange(len(possibilities))]
if self.hydrographics>7 and self.atmosphere==15 and roll(1,6)<4:
self.atmosphere =15
self.hydrographics=10
self.hydrographics=max(roll(2,6)-7+self.size,0)
if self.size <= 1: self.hydrographics=0
elif self.atmosphere <= 1 or ( self.atmosphere >= 10 and self.atmosphere <= 12 ):
self.hydrographics-=4
if self.atmosphere != 13:
if self.climate > 7: self.hydrographics-=2
if self.climate > 9: self.hydrographics-=6
self.atmosphere =min(max(self.atmosphere,0),15)
# SOpera change
if self.SOpera:
if self.size >= 3 and self.size <= 4 and self.atmosphere==10:
self.hydrographics-=6
if self.atmosphere <= 1:
self.hydrographics-=6
elif self.atmosphere in (2,3,11,12):
self.hydrographics-=4
self.hydrographics =min(max(self.hydrographics,0),10)
def newSat(self):
# SATELLITES
satellites=0
if self.size != 0:
if self.gravity<= 4: satellites=max(roll(1,6)-4,0)
elif self.gravity<= 6: satellites=max(roll(1,6)-3,0)
elif self.gravity<= 8: satellites=max(roll(1,6)-2,0)
elif self.gravity<=12: satellites=max(roll(1,6)-1,0)
elif self.gravity<=16: satellites=max(roll(2,6)-2,0)
elif self.gravity<=20: satellites=max(roll(2,6)+roll(1,6)//2-2,0)
if self.band=="near" : satellites=max(satellites-2,0)
for i in range(satellites):
r = roll(3,6)
if r >= 15: band="rogue"
elif r >= 10: band="mid"
elif r >= 6: band="far"
elif r >= 3: band="near"
self.satellites.append(Planet(parent=self,type="satellite",band=band,populated=False,mainworld=False,db=self.db))
#for satellite in self.satellites:
# print(" ",satellite.name,satellite.orbit,satellite.band)
self.purge()
self.name_satellites()
pass
def name_satellites(self):
i=0
for satellite in sorted(self.satellites,key=lambda x: x.orbit):
sequential_satellite_name=self.name+" - "+int_to_roman(i+1)
satellite.name=sequential_satellite_name
i+=1
pass
def purge(self):
for planetoid_one in self.satellites:
i=0
while i < len(self.satellites):
if planetoid_one==self.satellites[i]:
i+=1
continue
if planetoid_one.orbit==self.satellites[i].orbit:
r=random.randrange(2)
del(self.satellites[i])
#print("Purged satellite!")
else:
i+=1
pass
def newPGL(self,max_pop=20):
# PGL-sequence
if self.populated:
self.population = roll(2,6)-2
# HScifi change
if self.HScifi:
if self.size <= 2:
self.population-=1
elif self.size == 10:
self.population-=1
if self.atmosphere in (5,6,8):
self.population+=1
else:
self.population-=1
self.population =min(max(self.population,0),max_pop)
if self.population != 0:
self.government =max(roll(2,6)-7+self.population,0)
if self.government != 0:
self.law_level =max(roll(2,6)-7+self.government,0)
else:
self.law_level =0
else:
self.government =0
self.law_level =0
else:
self.population =0
self.government =0
self.law_level =0
def newSPort(self):
# starport
if self.populated and self.population > 0:
self.starport =self.STARPORT_CHART[roll(2,6)-2]
else:
self.starport ="X"
def newExTL(self,max_tl=20):
# TL calculation
if self.population > 0:
self.tech_level = roll(2,6)//2
if self.starport == "x": self.tech_level-=4
elif self.starport == "c": self.tech_level+=2
elif self.starport == "b": self.tech_level+=4
elif self.starport == "a": self.tech_level+=6
if self.size <= 1 : self.tech_level+=2
elif self.size <= 4 : self.tech_level+=1
if self.atmosphere <= 3 : self.tech_level+=1
elif self.atmosphere >= 10 : self.tech_level+=1
if self.hydrographics in (0,9) : self.tech_level+=1
elif self.hydrographics == 10 : self.tech_level+=2
if self.population >= 1 \
and self.population <= 5 : self.tech_level+=1
if self.population >= 10 : self.tech_level+=1
if self.population >= 11 : self.tech_level+=1
if self.population >= 12 : self.tech_level+=1
if self.population >= 13 : self.tech_level+=1
if self.government in (0,5) : self.tech_level+1
elif self.government == 7 : self.tech_level+=2
elif self.government == 13 : self.tech_level-=2
elif self.government == 14 : self.tech_level-=2
else:
self.tech_level =0
self.tech_level=min(self.tech_level,max_tl)
# CTM-sequence (specialized technology levels)
if self.population > 0:
# tECCME
self.tech_level_civilian = min(max(self.tech_level-7+roll(6,6)//3,0),max_tl)
self.tech_level_civilian_energy = min(max(self.tech_level_civilian-3+roll(4,6)//2//2,0),max_tl)
self.tech_level_civilian_computing = min(max(self.tech_level_civilian-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_civilian_communication = min(max(self.tech_level_civilian-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_civilian_medicine = min(max(self.tech_level_civilian-3+roll(2,6)//1//2,0),max_tl)
self.tech_level_civilian_environment = min(max(self.tech_level_civilian-3+roll(4,6)//2//2,0),max_tl)
# tLWAS
self.tech_level_transportation = min(max(self.tech_level-7+roll(6,6)//3,0),max_tl)
self.tech_level_transportation_land = min(max(self.tech_level_transportation-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_transportation_water = min(max(self.tech_level_transportation-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_transportation_air = min(max(self.tech_level_transportation-3+roll(4,6)//2//2,0),max_tl)
self.tech_level_transportation_space = min(max(self.tech_level_transportation-3+roll(4,6)//2//2,0),max_tl)
# tPPHH
self.tech_level_military = min(max(self.tech_level-7+roll(6,6)//3,0),max_tl)
self.tech_level_military_personalweapons = min(max(self.tech_level_military-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_military_personalarmour = min(max(self.tech_level_military-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_military_heavyweapons = min(max(self.tech_level_military-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_military_heavyarmour = min(max(self.tech_level_military-3+roll(6,6)//3//2,0),max_tl)
else:
# tECCME
self.tech_level_civilian = 0
self.tech_level_civilian_energy = 0
self.tech_level_civilian_computing = 0
self.tech_level_civilian_communication = 0
self.tech_level_civilian_medicine = 0
self.tech_level_civilian_environment = 0
# tLWAS
self.tech_level_transportation = 0
self.tech_level_transportation_land = 0
self.tech_level_transportation_water = 0
self.tech_level_transportation_air = 0
self.tech_level_transportation_space = 0
# tPPHH
self.tech_level_military = 0
self.tech_level_military_personalweapons = 0
self.tech_level_military_personalarmour = 0
self.tech_level_military_heavyweapons = 0
self.tech_level_military_heavyarmour = 0
def newPBJ(self):
# PBJ-sequence (resources)
if self.population==0: self.population_mod=0
self.asteroid_belts =max(roll(2,6)//2-3,0)
if self.size==0:
self.asteroid_belts+=1
self.jovian_planets =max(roll(2,6)-5,0)
def newWDITTP(self):
# WDITTP-sequence (specialized law levels)
if self.population > 0:
self.law_level_weapons = self.law_level-2+roll(1,6)//2
self.law_level_drugs = self.law_level-2+roll(1,6)//2
self.law_level_information = self.law_level-2+roll(1,6)//2
self.law_level_technology = self.law_level-2+roll(1,6)//2
self.law_level_travellers = self.law_level
self.law_level_powers = self.law_level-2+roll(1,6)//2
# Government-ammended WDITTP-sequence
if self.government in [0,10]:
self.law_level -= 1
if self.government in [1,3,4,5,6,8,9,11,12]:
self.law_level_weapons += roll(1,6)
if self.government in [1,2,4,8,9]:
self.law_level_drugs += roll(1,6)
if self.government in [5,9,11]:
self.law_level_information+= roll(1,6)
if self.government in [1,3,5,6,9,11]:
self.law_level_technology += roll(1,6)
if self.government in [1,3,6,9]:
self.law_level_travellers += roll(1,6)
if self.government in [1,3,4,9]:
self.law_level_powers += roll(1,6)
if self.government>=13 or self.government==7:
for i in range(roll(1,6)-1):
es=random.choice(("self.law_level_weapons ",\
"self.law_level_drugs ",\
"self.law_level_information",\
"self.law_level_technology ",\
"self.law_level_travellers ",\
"self.law_level_powers "))
exec(es+"+=(roll(2,6)+1)//2")
self.law_level = self.law_level_travellers
else:
self.law_level_weapons = 0
self.law_level_drugs = 0
self.law_level_information = 0
self.law_level_technology = 0
self.law_level_travellers = 0
self.law_level_powers = 0
# reset law levels
self.law_level_weapons = max(self.law_level_weapons ,0)
self.law_level_drugs = max(self.law_level_drugs ,0)
self.law_level_information= max(self.law_level_information,0)
self.law_level_technology = max(self.law_level_technology ,0)
self.law_level_travellers = max(self.law_level_travellers ,0)
self.law_level_powers = max(self.law_level_powers ,0)
pass
def newInfo(self,quirk_chance):
# travel code
self.travel_code=" "
if self.population > 0 \
and self.populated \
and (self.government == 0 \
or self.government == 7 \
or self.government == 10 \
or self.law_level == 0 \
or self.law_level_weapons == 0 \
or self.law_level_information >= 9 \
or self.law_level_technology >= 9 \
or self.law_level_travellers >= 9 \
or self.government == 0 \
or self.atmosphere >= 10 ):
self.travel_code="a"
self.trade_codes = self.getTradeCodes()
if self.population>0 \
and quirk_chance<random.randrange(100):
self.quirk = self.getQuirk()
else:
self.quirk = ""
pass
def getTradeCodes(self):
rc = ''
from traceback import print_exc
from sys import argv
path = argv[0][:argv[0].rfind("\\")+1]
NAME = 'name'
TAG = 'tag'
SIZE = 'size'
ATMOSPHERE = 'atmosphere'
HYDROGRAPHICS = 'hydrographics'
POPULATION = 'population'
GOVERNMENT = 'government'
LAWLEVEL = 'lawlevel'
TECHLEVEL = 'techlevel'
for tradecode in self.TRADECODES['tradecode']:
try:
tcode = True
for requirement in tradecode['requirements'].keys():
if requirement.endswith('__info') \
or requirement == XML_Parse.CDATA \
or requirement == XML_Parse.ATTR_TAG\
or tradecode['requirements'][requirement][XML_Parse.CDATA] == None \
or tradecode['requirements'][requirement][XML_Parse.CDATA].strip() == '' :
continue
else:
req_info = tradecode['requirements'][requirement][XML_Parse.CDATA]
if requirement == SIZE:
req = splitup( req_info )
if self.size in req: pass
else: tcode = False
elif requirement == ATMOSPHERE:
req = splitup( req_info )
if self.atmosphere in req: pass
else: tcode = False
elif requirement == HYDROGRAPHICS:
req = splitup( req_info )
if self.hydrographics in req: pass
else: tcode = False
elif requirement == POPULATION:
req = splitup( req_info )
if self.population in req: pass
else: tcode = False
elif requirement == GOVERNMENT:
req = splitup( req_info )
if self.government in req: pass
else: tcode = False
elif requirement == LAWLEVEL:
req = splitup( req_info )
if self.law_level in req: pass
else: tcode = False
elif requirement == TECHLEVEL:
req = splitup( req_info )
if self.tech_level in req: pass
else: tcode = False
# print( ' ' + tradecode['name'] + ' is ' + str(tcode) )
if tradecode[XML_Parse.ATTR_TAG][TAG]=="As" and self.type=="satellite": tcode=False
if tradecode[XML_Parse.ATTR_TAG][TAG]=="Ga" and abs(self.GoldieDist)>10: tcode=False
if tradecode[XML_Parse.ATTR_TAG][TAG]=="Ic" and self.GoldieDist>12 and self.hydrographics>0: tcode=True
if tcode:
rc += " " + tradecode[XML_Parse.ATTR_TAG][TAG]
except Exception as e:
print(tradecode[XML_Parse.ATTR_TAG][TAG])
print_exc()#print(e)
return rc
def getTag(self):
r = roll(1,100)
rc = ''
tags = [ 'Abandoned Colony', \
'Alien Ruins', \
'Altered Humanity', \
'Anthromorphs', \
'Battleground', \
'Bubble Cities', \
'Cheap Life', \
'Civil War', \
'Cold War', \
'Colony', \
'Cyclical Doom', \
'Doomed World', \
'Dying Race', \
'Eugenics Cult', \
'Feral World', \
'Flying Cities', \
'Forbidden Tech', \
'Freak Geology', \
'Freak Weather', \
'Friendly Foe', \
'Gold Rush', \
'Great Work', \
'Hatred', \
'Hivemind', \
'Holy War', \
'Hostile Biosphere', \
'Hostile Space', \
'Immortals', \
'Local Specialty', \
'Local Tech', \
'Major Spaceyard', \
'Megacorps', \
'Mercenaries', \
'Minimal Contact', \
'Misandry/Misogyny', \
'Night World', \
'Nomads', \
'Out of Contact', \
'Outpost World', \
'Pilgrimage Site', \
'Pleasure World', \
'Police State', \
'Post-Scarcity', \
'Tech Cultists', \
'Primitive Aliens', \
'Quarantined World', \
'Radioactive World', \
'Refugees', \
'Regional Hegemon', \
'Restrictive Laws', \
'Revanchists', \
'Revolutionaries', \
'Rigid Culture', \
'Rising Hegemon', \
'Ritual Combat', \
'Robots', \
'Seagoing Cities', \
'Sealed Menace', \
'Secret Masters', \
'Sectarians', \
'Seismic Instability', \
'Shackled World', \
'Societal Despair', \
'Sole Supplier', \
'Taboo Treasure', \
'Terraform Failure', \
'Tomb World', \
'Unshackled AI', \
'Urbanized Surface', \
'Utopia', \
'Xenophiles', \
'Xenophobes', \
'Zombies' ]
rc = random.choice( tags )
return rc
def getQuirk(self):
rc = ''
from traceback import print_exc
from sys import argv
dom_quirk=random.choice(self.CULTURE['quirk'])
rc+=dom_quirk[XML_Parse.ATTR_TAG]['index']
return rc
def newOrbitInfo(self):
year=365.25
week=7
day =1
self.isTideLocked=False
if self.type=="satellite":
# position in orbit
if self.isMainworld:
self.parent.pos=0
self.pos=0
else:
self.pos=random.randrange(73)*5
# orbital period
if self.band.startswith("near" ):
self.orbital_period=1.0
elif self.band.startswith("mid" ):
if roll(1,6)<=1: retrograde= 1.00
else: retrograde=-1.25
self.orbital_period=year*max(self.parent.gravity,1)/10*(3+roll(2,6))/10*retrograde
elif self.band.startswith("far" ):
if roll(1,6)<=3: retrograde= 1.00
else: retrograde=-1.25
self.orbital_period=year*max(self.parent.gravity*1.1,1)/10*(roll(1,2)*0.5+0.15)*retrograde
elif self.band.startswith("rogue"):
if roll(1,6)<=5: retrograde= 1.00
else: retrograde=-1.25
self.orbital_period=year*max(self.parent.gravity*1.5,1)/10*(roll(1,2)*0.15+0.15)*retrograde
else:
self.orbital_period=1.0
# length of day
if self.band.startswith("near" ):
self.isTideLocked =True
self.rotations_per_orbit=1.0
elif self.band.startswith("mid" ):
if self.parent.gravity+roll(2,6) > 12:
self.isTideLocked=True
self.rotations_per_orbit=1.0
else:
self.rotations_per_orbit=day*32*(3+roll(2,6))/10
elif self.band.startswith("far" ):
if self.parent.gravity+roll(2,6) > 16:
self.isTideLocked=True
self.rotations_per_orbit=1.0
else:
self.rotations_per_orbit=day*32*(3+roll(2,6))/7
elif self.band.startswith("rogue"):
if self.parent.gravity+roll(2,6) > 20:
self.isTideLocked=True
self.rotations_per_orbit=1.0
else:
self.rotations_per_orbit=day*32*(3+roll(2,6))/5
# traversal
self.weekly_traversal=360*(7/self.orbital_period)
else:
# re-get orbit
self.orbit=int(self.AUtoStar*100)
# position in orbit
if self.isMainworld:
self.pos=0
else:
self.pos=random.randrange(73)*5
# orbital period
if self.band.startswith("near" ): self.orbital_period=year*self.AUtoStar*(2+roll(2,4))/10
elif self.band.startswith("mid" ): self.orbital_period=year*self.AUtoStar*(7+roll(1,6))/10
elif self.band.startswith("far" ):
if roll(1,6)<=1: retrograde= 1.00
else: retrograde=-1.25
self.orbital_period=year*self.AUtoStar*(2+roll(2,6))*retrograde
elif self.band.startswith("rogue"):
if roll(1,6)<=2: retrograde= 1.00
else: retrograde=-1.25
self.orbital_period=year*self.AUtoStar*(4+4*roll(4,6))*retrograde
else:
self.orbital_period=1.0
# length of day
if self.band.startswith("near" ):
if roll(2,6) > 5:
self.isTideLocked=True
self.rotations_per_orbit=1.0
else:
self.rotations_per_orbit=day*self.AUtoStar*roll( 1,6)
elif self.band.startswith("mid" ):
if not self.isGasGiant and roll(2,6) > 9:
self.isTideLocked=True
self.rotations_per_orbit=1.0
else:
self.rotations_per_orbit=day*self.AUtoStar*roll(10,6)*10
elif self.band.startswith("far" ):
if not self.isGasGiant and roll(2,6) > 11:
self.isTideLocked=True
self.rotations_per_orbit=1.0
else:
self.rotations_per_orbit=day*self.AUtoStar*roll( 3,6)*100
elif self.band.startswith("rogue"):
self.rotations_per_orbit=day*self.AUtoStar*roll( 3,6)*100
# traversal
self.weekly_traversal=360*(7/self.orbital_period)
if self.atmosphere==15:
if self.hydrographics==10:
self.comment +="Atmo:\"Panthalassic world (>85% water by volume)\","
elif not self.isTideLocked and roll(1,6)==2:
self.comment +="Atmo:\"Constant Storms (pressure changes wildly)\","
elif not self.isTideLocked and self.size>2 and self.size<7:
self.comment +="Atmo:\"Ellipsoidal Atmosphere (viable density only at equator)\","
pass
def load(self,uwppp):
# #
## ref: M 49 - X420000-0 KN3 00000 0-00000 0-0000 0-0000 00000 |n|noname I|n| |a| Ba De Po |o|143.18|1.00|0.00|o| |c|---|c|
# create pointer
p=0
# Mainworld Status
self.isMainworld=False
p+=0
length=1
marker=uwppp[p:p+length]
if marker in ("M","m"): self.isMainworld=True
if marker in ("•","M"): self.type="planet"
else: self.type="satellite"
# AUtoStar
p+=length
length=4
if self.type!="satellite":
self.AUtoStar = float( uwppp[p:p+length] ) / 100
self.orbit = int( float( uwppp[p:p+length] ) )
else:
self.AUtoStar = -1
self.orbit = int( float( uwppp[p:p+length] ) )
# UWP
p+=length+3
length=9
uwp=uwppp[p:p+length]
if uwp.lower().startswith("sgg"):
self.isGasGiant=True
self.isGasGiantSmall=True
self.size =int( self.config.get("GAS GIANTS","size (sgg)") )
self.gravity = self.size
self.atmosphere =int( self.config.get("GAS GIANTS","atmosphere") )
self.hydrographics =int( self.config.get("GAS GIANTS","hydrographics") )
self.population =0
self.government =0
self.law_level =0
self.tech_level =0
elif uwp.lower().startswith("lgg"):
self.size =int( self.config.get("GAS GIANTS","size (lgg)") )
self.gravity =self.size
self.atmosphere =int( self.config.get("GAS GIANTS","atmosphere") )
self.hydrographics =int( self.config.get("GAS GIANTS","hydrographics") )
self.population =0
self.government =0
self.law_level =0
self.tech_level =0
else:
self.starport = uwp[ 0: 1].lower()
self.size = uwp[ 1: 2].lower()
self.atmosphere =findPosInList(self.HEX_EXPANDED,uwp[ 2: 3].lower())[0]
self.hydrographics =findPosInList(self.HEX_EXPANDED,uwp[ 3: 4].lower())[0]
self.population =findPosInList(self.HEX_EXPANDED,uwp[ 4: 5].lower())[0]
self.government =findPosInList(self.HEX_EXPANDED,uwp[ 5: 6].lower())[0]
self.law_level =findPosInList(self.HEX_EXPANDED,uwp[ 6: 7].lower())[0]
# -
self.tech_level =findPosInList(self.HEX_EXPANDED,uwp[ 8: 9].lower())[0]
try:
if self.size.upper() in self.list_table_size_R_S: raise Exception("non-standard size")
self.size=findPosInList(self.HEX_EXPANDED,self.size)[0]
self.size_str=self.HEX_EXPANDED[max(self.size,0)]
except:
for i in range(3):
if self.size == self.list_table_size_R_S[i].lower():
self.size_str=self.size
self.size =i-3
break
if self.population==0: self.populated=False
else: self.populated=True
# TC(O)G
p+=length+1
length=4
tcog=uwppp[p:p+length]
self.travel_code = tcog[ 0: 1].lower()
self.climate =findPosInList(self.HEX_EXPANDED,tcog[ 1: 2].lower())[0]
band = tcog[ 2: 3].lower()
if band.startswith("n"): self.band="near"
elif band.startswith("m"): self.band="mid"
elif band.startswith("f"): self.band="far"
elif band.startswith("r"): self.band="rogue"
#self.orbit = tcog[ 2: 3].lower()
self.gravity =findPosInList(self.HEX_EXPANDED,tcog[ 3: 4].lower())[0]
# WDITTP
p+=length+1
length=6
wdittp=uwppp[p:p+length]
self.law_level_weapons =findPosInList(self.HEX_EXPANDED,wdittp[ 0: 1].lower())[0]
self.law_level_drugs =findPosInList(self.HEX_EXPANDED,wdittp[ 1: 2].lower())[0]
self.law_level_information=findPosInList(self.HEX_EXPANDED,wdittp[ 2: 3].lower())[0]
self.law_level_technology =findPosInList(self.HEX_EXPANDED,wdittp[ 3: 4].lower())[0]
self.law_level_travellers =findPosInList(self.HEX_EXPANDED,wdittp[ 4: 5].lower())[0]
self.law_level_powers =findPosInList(self.HEX_EXPANDED,wdittp[ 5: 6].lower())[0]
# ExTL - Civilian
p+=length+1
length=7
ctm_c=uwppp[p:p+length]
self.tech_level_civilian =findPosInList(self.HEX_EXPANDED,ctm_c[ 0: 1].lower())[0]
self.tech_level_civilian_energy =findPosInList(self.HEX_EXPANDED,ctm_c[ 2: 3].lower())[0]
self.tech_level_civilian_computing =findPosInList(self.HEX_EXPANDED,ctm_c[ 3: 4].lower())[0]
self.tech_level_civilian_communication =findPosInList(self.HEX_EXPANDED,ctm_c[ 4: 5].lower())[0]
self.tech_level_civilian_medicine =findPosInList(self.HEX_EXPANDED,ctm_c[ 5: 6].lower())[0]
self.tech_level_civilian_environment =findPosInList(self.HEX_EXPANDED,ctm_c[ 6: 7].lower())[0]
# ExTL - Transportation
p+=length+1
length=6
ctm_t=uwppp[p:p+length]
self.tech_level_transportation =findPosInList(self.HEX_EXPANDED,ctm_t[ 0: 1].lower())[0]
self.tech_level_transportation_land =findPosInList(self.HEX_EXPANDED,ctm_t[ 2: 3].lower())[0]
self.tech_level_transportation_water =findPosInList(self.HEX_EXPANDED,ctm_t[ 3: 4].lower())[0]
self.tech_level_transportation_air =findPosInList(self.HEX_EXPANDED,ctm_t[ 4: 5].lower())[0]
self.tech_level_transportation_space =findPosInList(self.HEX_EXPANDED,ctm_t[ 5: 6].lower())[0]
# ExTL - Military
p+=length+1
length=6
ctm_m=uwppp[p:p+length]
self.tech_level_military =findPosInList(self.HEX_EXPANDED,ctm_m[ 0: 1].lower())[0]
self.tech_level_military_personalweapons=findPosInList(self.HEX_EXPANDED,ctm_m[ 2: 3].lower())[0]
self.tech_level_military_personalarmour =findPosInList(self.HEX_EXPANDED,ctm_m[ 3: 4].lower())[0]
self.tech_level_military_heavyweapons =findPosInList(self.HEX_EXPANDED,ctm_m[ 4: 5].lower())[0]
self.tech_level_military_heavyarmour =findPosInList(self.HEX_EXPANDED,ctm_m[ 5: 6].lower())[0]
# Trade Info
p+=length+1
length=5
t_info=uwppp[p:p+length]
self.trade_number =findPosInList(self.HEX_EXPANDED,t_info[0:1].lower())[0]
self.imports =findPosInList(self.HEX_EXPANDED,t_info[1:2].lower())[0]
self.exports =findPosInList(self.HEX_EXPANDED,t_info[2:3].lower())[0]
self.transient_trade =findPosInList(self.HEX_EXPANDED,t_info[3:4].lower())[0]
self.port_size =findPosInList(self.HEX_EXPANDED,t_info[4:5].lower())[0]
# Cultural Quirk
p+=length+1
length=3
quirk=uwppp[p:p+length]
self.quirk =quirk
# Name
p=uwppp.find("|n|")+3
length=uwppp[p:].find("|n|")
name=uwppp[p:p+length]
self.name =name
# bases [WIP]
self.bases = "" ## LOADING NOT IMPLEMENTED
# allegiance
p=uwppp.find("|a|")+3
length=4
allegiance=uwppp[p:p+length]
self.allegiance = allegiance
# Orbit Information
p=uwppp.find("|o|")+3
length=uwppp[p:].find("|")
orbital_period=uwppp[p:p+length]
self.orbital_period = float( orbital_period )
p+=length+1
length=uwppp[p:].find("|")
rotations_per_orbit=uwppp[p:p+length]
self.rotations_per_orbit = float( rotations_per_orbit )
p+=length+1
length=uwppp[p:].find("|o|")
pos=uwppp[p:p+length]
self.pos = float( pos )
self.weekly_traversal = 360*(7/self.orbital_period)
self.isTideLocked = False
if self.rotations_per_orbit==1.0: self.isTideLocked=True
# Comment
p=uwppp.find("|c|")+3
length=uwppp[p:].find("|c|")
comment=uwppp[p:p+length]
self.comment = comment
# Trade Codes
p=uwppp.find("|a|")+3+4
length=uwppp[p:].find("|o|")
trade_codes=uwppp[p:p+length]
if self.isGasGiant: self.trade_codes=""
else: self.trade_codes=trade_codes
pass
def import_planet(self,planetcode):
self.clear()
s_HID=planetcode[0: 4]
self.row=int(s_HID[:2])
self.col=int(s_HID[2:])
self.location_code=s_HID
# Get UWP
s_UWP=planetcode[7:16]
self.starport = s_UWP[ 0: 1].lower()
self.size =findPosInList(self.HEX_EXPANDED,s_UWP[ 1: 2].lower())[0]
self.atmosphere =findPosInList(self.HEX_EXPANDED,s_UWP[ 2: 3].lower())[0]
self.hydrographics =findPosInList(self.HEX_EXPANDED,s_UWP[ 3: 4].lower())[0]
self.population =findPosInList(self.HEX_EXPANDED,s_UWP[ 4: 5].lower())[0]
self.government =findPosInList(self.HEX_EXPANDED,s_UWP[ 5: 6].lower())[0]
self.law_level =findPosInList(self.HEX_EXPANDED,s_UWP[ 6: 7].lower())[0]
# -
self.tech_level =findPosInList(self.HEX_EXPANDED,s_UWP[ 8: 9].lower())[0]
try:
self.size=int(self.size)
except:
for i in range(2):
if self.size == self.list_table_size_R_S[i]:
self.size_str=self.size
self.size =i-2
break
# Get COG-sequence
s_COG=planetcode[17:21]
self.travel_code = s_COG[ 0: 1].lower()
self.climate =findPosInList(self.HEX_EXPANDED,s_COG[ 1: 2].lower())[0]
self.orbit =findPosInList(self.HEX_EXPANDED,s_COG[ 2: 3].lower())[0]
self.gravity =findPosInList(self.HEX_EXPANDED,s_COG[ 3: 4].lower())[0]
# Get PBJ-sequence
s_PBJ=planetcode[22:25]
self.population_mod=findPosInList(self.HEX_EXPANDED,s_PBJ[ 0: 1].lower())[0]
self.asteroid_belts=findPosInList(self.HEX_EXPANDED,s_PBJ[ 1: 2].lower())[0]
self.jovian_planets=findPosInList(self.HEX_EXPANDED,s_PBJ[ 2: 3].lower())[0]
# Get WDITTP-sequence
s_WDITTP=planetcode[26:32]
self.law_level_weapons =findPosInList(self.HEX_EXPANDED,s_WDITTP[ 0: 1].lower())[0]
self.law_level_drugs =findPosInList(self.HEX_EXPANDED,s_WDITTP[ 1: 2].lower())[0]
self.law_level_information=findPosInList(self.HEX_EXPANDED,s_WDITTP[ 2: 3].lower())[0]
self.law_level_technology =findPosInList(self.HEX_EXPANDED,s_WDITTP[ 3: 4].lower())[0]
self.law_level_travellers =findPosInList(self.HEX_EXPANDED,s_WDITTP[ 4: 5].lower())[0]
self.law_level_powers =findPosInList(self.HEX_EXPANDED,s_WDITTP[ 5: 6].lower())[0]
# Get CTM-sequence
s_CTM=planetcode[33:54].split(" ")
# tECCME
self.tech_level_civilian =findPosInList(self.HEX_EXPANDED,s_CTM[0][ 0: 1].lower())[0]
self.tech_level_civilian_energy =findPosInList(self.HEX_EXPANDED,s_CTM[0][ 2: 3].lower())[0]
self.tech_level_civilian_computing =findPosInList(self.HEX_EXPANDED,s_CTM[0][ 3: 4].lower())[0]
self.tech_level_civilian_communication =findPosInList(self.HEX_EXPANDED,s_CTM[0][ 4: 5].lower())[0]
self.tech_level_civilian_medicine =findPosInList(self.HEX_EXPANDED,s_CTM[0][ 5: 6].lower())[0]
self.tech_level_civilian_environment =findPosInList(self.HEX_EXPANDED,s_CTM[0][ 6: 7].lower())[0]
# tLWAS
self.tech_level_transportation =findPosInList(self.HEX_EXPANDED,s_CTM[1][ 0: 1].lower())[0]
self.tech_level_transportation_land =findPosInList(self.HEX_EXPANDED,s_CTM[1][ 2: 3].lower())[0]
self.tech_level_transportation_water =findPosInList(self.HEX_EXPANDED,s_CTM[1][ 3: 4].lower())[0]
self.tech_level_transportation_air =findPosInList(self.HEX_EXPANDED,s_CTM[1][ 4: 5].lower())[0]
self.tech_level_transportation_space =findPosInList(self.HEX_EXPANDED,s_CTM[1][ 5: 6].lower())[0]
# tPPHH
self.tech_level_military =findPosInList(self.HEX_EXPANDED,s_CTM[2][ 0: 1].lower())[0]
self.tech_level_military_personalweapons=findPosInList(self.HEX_EXPANDED,s_CTM[2][ 2: 3].lower())[0]
self.tech_level_military_personalarmour =findPosInList(self.HEX_EXPANDED,s_CTM[2][ 3: 4].lower())[0]
self.tech_level_military_heavyweapons =findPosInList(self.HEX_EXPANDED,s_CTM[2][ 4: 5].lower())[0]
self.tech_level_military_heavyarmour =findPosInList(self.HEX_EXPANDED,s_CTM[2][ 5: 6].lower())[0]
# derived
if self.population==0 \
and self.government==0 \
and self.law_level ==0 \
and self.tech_level==0 :
self.populated=False
else:
self.populated=True
# Trade Codes
self.trade_codes=self.getTradeCodes()
self.quirk=planetcode[55:58]
pass
def getUWP(self):
if self.isGasGiant:
if self.isGasGiantLarge: return "LGG "
elif self.isGasGiantSmall: return "SGG "
else:
uwp="{}{}{}{}{}{}{}-{}"
result = uwp.format( self.starport .upper() ,\
self.size_str .upper() ,\
self.HEX_EXPANDED[max(self.atmosphere ,0)].upper() ,\
self.HEX_EXPANDED[max(self.hydrographics,0)].upper() ,\
self.HEX_EXPANDED[max(self.population ,0)].upper() ,\
self.HEX_EXPANDED[max(self.government ,0)].upper() ,\
self.HEX_EXPANDED[max(self.law_level ,0)].upper() ,\
self.HEX_EXPANDED[max(self.tech_level ,0)].upper() )
return result
def getCOG(self):
cog="{}{}{}{}"
if self.band.startswith("near" ): orbit="n"
elif self.band.startswith("mid" ): orbit="m"
elif self.band.startswith("far" ): orbit="f"
elif self.band.startswith("rogue"): orbit="r"
else: orbit="e"
result = cog.format(self.travel_code .upper() ,\
self.HEX_EXPANDED[max(min(self.climate,25),0)].upper() ,\
orbit .upper() ,\
self.HEX_EXPANDED[max(min(self.gravity,25),0)].upper() )
return result
def getPBJ(self):
pbj="{}{}{}"
result = pbj.format(self.HEX_EXPANDED[self.population_mod].upper() ,\
self.HEX_EXPANDED[self.asteroid_belts].upper() ,\
self.HEX_EXPANDED[self.jovian_planets].upper() )
return result
def getWDITTP(self):
wdittp="{}{}{}{}{}{}"
result = wdittp.format(self.HEX_EXPANDED[max(min(self.law_level_weapons ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.law_level_drugs ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.law_level_information,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.law_level_technology ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.law_level_travellers ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.law_level_powers ,25),0)].upper() )
return result
def getExTL(self):
extl="{}-{}{}{}{}{} {}-{}{}{}{} {}-{}{}{}{}"
result = extl.format(self.HEX_EXPANDED[max(min(self.tech_level_civilian ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_civilian_energy ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_civilian_computing ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_civilian_communication ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_civilian_medicine ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_civilian_environment ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_transportation ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_transportation_land ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_transportation_water ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_transportation_air ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_transportation_space ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_military ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_military_personalweapons ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_military_personalarmour ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_military_heavyweapons ,25),0)].upper() ,\
self.HEX_EXPANDED[max(min(self.tech_level_military_heavyarmour ,25),0)].upper() )
return result
def getTrade(self):
trade_template="{}{}{}{}{}"
trade=trade_template.format(\
self.trade_number ,\
self.imports ,\
self.exports ,\
self.transient_trade ,\
self.port_size )
return trade
def getC(self):
culture="{}"
result =culture.format(self.quirk)
return result
def getOrbitInfo(self):
rObitInfo_template="|o|{:.2f}|{:.2f}|{:.2f}|o|"
rObitInfo=rObitInfo_template.format(\
self.orbital_period, \
self.rotations_per_orbit, \
self.pos )
return rObitInfo
def roll(dice,sides):
result=0
for die in range(dice):
result+=random.randrange(sides)+1
return result
def findPosInList(list,item):
try:
rc = [i for i,x in enumerate(list) if x == item]
if rc==[]: raise Exception
return rc
except: return [-1]
def splitup( string ):
if string == None: return []
if ',' in string:
sl = string.split(',')
else:
sl = [string]
for s in sl:
try:
if '-' in s:
sl.remove(s)
s = s.split('-')
s = range( int(s[0]), int(s[1])+1 )
sl += s
except:
pass
for s in sl:
try:
sl.remove(s)
sl.append( int(s) )
except:
pass
return sl
def int_to_roman(input):
"""
Convert an integer to Roman numerals.
Examples:
>>> int_to_roman(0)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(-1)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(1.5)
Traceback (most recent call last):
TypeError: expected integer, got <type 'float'>
>>> for i in range(1, 21): print int_to_roman(i)
...
I
II
III
IV
V
VI
VII
VIII
IX
X
XI
XII
XIII
XIV
XV
XVI
XVII
XVIII
XIX
XX
>>> print int_to_roman(2000)
MM
>>> print int_to_roman(1999)
MCMXCIX
"""
if type(input) != type(1):
raise TypeError
if not 0 < input < 4000:
raise ValueError
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
if __name__=="__main__": main() | 47.284219 | 195 | 0.510327 |
28c4a8bf9bf00c45351a4a947775f78f55790bbf | 16,732 | py | Python | numba/cuda/api.py | luk-f-a/numba | 3a682bd827e416335e3574bc7b10f0ec69adb701 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2021-08-10T05:33:29.000Z | 2021-08-10T05:33:29.000Z | numba/cuda/api.py | luk-f-a/numba | 3a682bd827e416335e3574bc7b10f0ec69adb701 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | numba/cuda/api.py | luk-f-a/numba | 3a682bd827e416335e3574bc7b10f0ec69adb701 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | """
API that are reported to numba.cuda
"""
import contextlib
import numpy as np
from .cudadrv import devicearray, devices, driver
# NDarray device helper
require_context = devices.require_context
current_context = devices.get_context
gpus = devices.gpus
@require_context
def from_cuda_array_interface(desc, owner=None):
"""Create a DeviceNDArray from a cuda-array-interface description.
The *owner* is the owner of the underlying memory.
The resulting DeviceNDArray will acquire a reference from it.
"""
version = desc.get('version')
# Mask introduced in version 1
if 1 <= version:
mask = desc.get('mask')
# Would ideally be better to detect if the mask is all valid
if mask is not None:
raise NotImplementedError('Masked arrays are not supported')
shape = desc['shape']
strides = desc.get('strides')
dtype = np.dtype(desc['typestr'])
shape, strides, dtype = _prepare_shape_strides_dtype(
shape, strides, dtype, order='C')
size = driver.memory_size_from_info(shape, strides, dtype.itemsize)
devptr = driver.get_devptr_for_active_ctx(desc['data'][0])
data = driver.MemoryPointer(
current_context(), devptr, size=size, owner=owner)
da = devicearray.DeviceNDArray(shape=shape, strides=strides,
dtype=dtype, gpu_data=data)
return da
def as_cuda_array(obj):
"""Create a DeviceNDArray from any object that implements
the :ref:`cuda array interface <cuda-array-interface>`.
A view of the underlying GPU buffer is created. No copying of the data
is done. The resulting DeviceNDArray will acquire a reference from `obj`.
"""
if not is_cuda_array(obj):
raise TypeError("*obj* doesn't implement the cuda array interface.")
else:
return from_cuda_array_interface(obj.__cuda_array_interface__,
owner=obj)
def is_cuda_array(obj):
"""Test if the object has defined the `__cuda_array_interface__` attribute.
Does not verify the validity of the interface.
"""
return hasattr(obj, '__cuda_array_interface__')
@require_context
def to_device(obj, stream=0, copy=True, to=None):
"""to_device(obj, stream=0, copy=True, to=None)
Allocate and transfer a numpy ndarray or structured scalar to the device.
To copy host->device a numpy array::
ary = np.arange(10)
d_ary = cuda.to_device(ary)
To enqueue the transfer to a stream::
stream = cuda.stream()
d_ary = cuda.to_device(ary, stream=stream)
The resulting ``d_ary`` is a ``DeviceNDArray``.
To copy device->host::
hary = d_ary.copy_to_host()
To copy device->host to an existing array::
ary = np.empty(shape=d_ary.shape, dtype=d_ary.dtype)
d_ary.copy_to_host(ary)
To enqueue the transfer to a stream::
hary = d_ary.copy_to_host(stream=stream)
"""
if to is None:
to, new = devicearray.auto_device(obj, stream=stream, copy=copy)
return to
if copy:
to.copy_to_device(obj, stream=stream)
return to
@require_context
def device_array(shape, dtype=np.float, strides=None, order='C', stream=0):
"""device_array(shape, dtype=np.float, strides=None, order='C', stream=0)
Allocate an empty device ndarray. Similar to :meth:`numpy.empty`.
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
return devicearray.DeviceNDArray(shape=shape, strides=strides, dtype=dtype,
stream=stream)
@require_context
def managed_array(shape, dtype=np.float, strides=None, order='C', stream=0,
attach_global=True):
"""managed_array(shape, dtype=np.float, strides=None, order='C', stream=0,
attach_global=True)
Allocate a np.ndarray with a buffer that is managed.
Similar to np.empty().
:param attach_global: A flag indicating whether to attach globally. Global
attachment implies that the memory is accessible from
any stream on any device. If ``False``, attachment is
*host*, and memory is only accessible by devices
with Compute Capability 6.0 and later.
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize)
buffer = current_context().memallocmanaged(bytesize,
attach_global=attach_global)
npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
buffer=buffer)
managedview = np.ndarray.view(npary, type=devicearray.ManagedNDArray)
managedview.device_setup(buffer, stream=stream)
return managedview
@require_context
def pinned_array(shape, dtype=np.float, strides=None, order='C'):
"""pinned_array(shape, dtype=np.float, strides=None, order='C')
Allocate an :class:`ndarray <numpy.ndarray>` with a buffer that is pinned
(pagelocked). Similar to :func:`np.empty() <numpy.empty>`.
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
bytesize = driver.memory_size_from_info(shape, strides,
dtype.itemsize)
buffer = current_context().memhostalloc(bytesize)
return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
buffer=buffer)
@require_context
def mapped_array(shape, dtype=np.float, strides=None, order='C', stream=0,
portable=False, wc=False):
"""mapped_array(shape, dtype=np.float, strides=None, order='C', stream=0,
portable=False, wc=False)
Allocate a mapped ndarray with a buffer that is pinned and mapped on
to the device. Similar to np.empty()
:param portable: a boolean flag to allow the allocated device memory to be
usable in multiple devices.
:param wc: a boolean flag to enable writecombined allocation which is faster
to write by the host and to read by the device, but slower to
write by the host and slower to write by the device.
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize)
buffer = current_context().memhostalloc(bytesize, mapped=True)
npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
buffer=buffer)
mappedview = np.ndarray.view(npary, type=devicearray.MappedNDArray)
mappedview.device_setup(buffer, stream=stream)
return mappedview
@contextlib.contextmanager
@require_context
def open_ipc_array(handle, shape, dtype, strides=None, offset=0):
"""
A context manager that opens a IPC *handle* (*CUipcMemHandle*) that is
represented as a sequence of bytes (e.g. *bytes*, tuple of int)
and represent it as an array of the given *shape*, *strides* and *dtype*.
The *strides* can be omitted. In that case, it is assumed to be a 1D
C contiguous array.
Yields a device array.
The IPC handle is closed automatically when context manager exits.
"""
dtype = np.dtype(dtype)
# compute size
size = np.prod(shape) * dtype.itemsize
# manually recreate the IPC mem handle
handle = driver.drvapi.cu_ipc_mem_handle(*handle)
# use *IpcHandle* to open the IPC memory
ipchandle = driver.IpcHandle(None, handle, size, offset=offset)
yield ipchandle.open_array(current_context(), shape=shape,
strides=strides, dtype=dtype)
ipchandle.close()
def synchronize():
"Synchronize the current context."
return current_context().synchronize()
def _prepare_shape_strides_dtype(shape, strides, dtype, order):
dtype = np.dtype(dtype)
if isinstance(shape, int):
shape = (shape,)
if isinstance(strides, int):
strides = (strides,)
else:
if shape == ():
shape = (1,)
strides = strides or _fill_stride_by_order(shape, dtype, order)
return shape, strides, dtype
def _fill_stride_by_order(shape, dtype, order):
nd = len(shape)
strides = [0] * nd
if order == 'C':
strides[-1] = dtype.itemsize
for d in reversed(range(nd - 1)):
strides[d] = strides[d + 1] * shape[d + 1]
elif order == 'F':
strides[0] = dtype.itemsize
for d in range(1, nd):
strides[d] = strides[d - 1] * shape[d - 1]
else:
raise ValueError('must be either C/F order')
return tuple(strides)
def _contiguous_strides_like_array(ary):
"""
Given an array, compute strides for a new contiguous array of the same
shape.
"""
# Don't recompute strides if the default strides will be sufficient to
# create a contiguous array.
if ary.flags['C_CONTIGUOUS'] or ary.flags['F_CONTIGUOUS'] or ary.ndim <= 1:
return None
# Otherwise, we need to compute new strides using an algorithm adapted from
# NumPy v1.17.4's PyArray_NewLikeArrayWithShape in
# core/src/multiarray/ctors.c. We permute the strides in ascending order
# then compute the stride for the dimensions with the same permutation.
# Stride permutation. E.g. a stride array (4, -2, 12) becomes
# [(1, -2), (0, 4), (2, 12)]
strideperm = [ x for x in enumerate(ary.strides) ]
strideperm.sort(key=lambda x: x[1])
# Compute new strides using permutation
strides = [0] * len(ary.strides)
stride = ary.dtype.itemsize
for i_perm, _ in strideperm:
strides[i_perm] = stride
stride *= ary.shape[i_perm]
return tuple(strides)
def _order_like_array(ary):
if ary.flags['F_CONTIGUOUS'] and not ary.flags['C_CONTIGUOUS']:
return 'F'
else:
return 'C'
def device_array_like(ary, stream=0):
"""
Call :func:`device_array() <numba.cuda.device_array>` with information from
the array.
"""
strides = _contiguous_strides_like_array(ary)
order = _order_like_array(ary)
return device_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
order=order, stream=stream)
def mapped_array_like(ary, stream=0, portable=False, wc=False):
"""
Call :func:`mapped_array() <numba.cuda.mapped_array>` with the information
from the array.
"""
strides = _contiguous_strides_like_array(ary)
order = _order_like_array(ary)
return mapped_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
order=order, stream=stream, portable=portable, wc=wc)
def pinned_array_like(ary):
"""
Call :func:`pinned_array() <numba.cuda.pinned_array>` with the information
from the array.
"""
strides = _contiguous_strides_like_array(ary)
order = _order_like_array(ary)
return pinned_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
order=order)
# Stream helper
@require_context
def stream():
"""
Create a CUDA stream that represents a command queue for the device.
"""
return current_context().create_stream()
@require_context
def default_stream():
"""
Get the default CUDA stream. CUDA semantics in general are that the default
stream is either the legacy default stream or the per-thread default stream
depending on which CUDA APIs are in use. In Numba, the APIs for the legacy
default stream are always the ones in use, but an option to use APIs for
the per-thread default stream may be provided in future.
"""
return current_context().get_default_stream()
@require_context
def legacy_default_stream():
"""
Get the legacy default CUDA stream.
"""
return current_context().get_legacy_default_stream()
@require_context
def per_thread_default_stream():
"""
Get the per-thread default CUDA stream.
"""
return current_context().get_per_thread_default_stream()
@require_context
def external_stream(ptr):
"""Create a Numba stream object for a stream allocated outside Numba.
:param ptr: Pointer to the external stream to wrap in a Numba Stream
:type ptr: int
"""
return current_context().create_external_stream(ptr)
# Page lock
@require_context
@contextlib.contextmanager
def pinned(*arylist):
"""A context manager for temporary pinning a sequence of host ndarrays.
"""
pmlist = []
for ary in arylist:
pm = current_context().mempin(ary, driver.host_pointer(ary),
driver.host_memory_size(ary),
mapped=False)
pmlist.append(pm)
yield
@require_context
@contextlib.contextmanager
def mapped(*arylist, **kws):
"""A context manager for temporarily mapping a sequence of host ndarrays.
"""
assert not kws or 'stream' in kws, "Only accept 'stream' as keyword."
stream = kws.get('stream', 0)
pmlist = []
devarylist = []
for ary in arylist:
pm = current_context().mempin(ary, driver.host_pointer(ary),
driver.host_memory_size(ary),
mapped=True)
pmlist.append(pm)
devary = devicearray.from_array_like(ary, gpu_data=pm, stream=stream)
devarylist.append(devary)
try:
if len(devarylist) == 1:
yield devarylist[0]
else:
yield devarylist
finally:
# When exiting from `with cuda.mapped(*arrs) as mapped_arrs:`, the name
# `mapped_arrs` stays in scope, blocking automatic unmapping based on
# reference count. We therefore invoke the finalizer manually.
for pm in pmlist:
pm.free()
def event(timing=True):
"""
Create a CUDA event. Timing data is only recorded by the event if it is
created with ``timing=True``.
"""
evt = current_context().create_event(timing=timing)
return evt
event_elapsed_time = driver.event_elapsed_time
# Device selection
def select_device(device_id):
"""
Make the context associated with device *device_id* the current context.
Returns a Device instance.
Raises exception on error.
"""
context = devices.get_context(device_id)
return context.device
def get_current_device():
"Get current device associated with the current thread"
return current_context().device
def list_devices():
"Return a list of all detected devices"
return devices.gpus
def close():
"""
Explicitly clears all contexts in the current thread, and destroys all
contexts if the current thread is the main thread.
"""
devices.reset()
def _auto_device(ary, stream=0, copy=True):
return devicearray.auto_device(ary, stream=stream, copy=copy)
def detect():
"""
Detect supported CUDA hardware and print a summary of the detected hardware.
Returns a boolean indicating whether any supported devices were detected.
"""
devlist = list_devices()
print('Found %d CUDA devices' % len(devlist))
supported_count = 0
for dev in devlist:
attrs = []
cc = dev.compute_capability
attrs += [('compute capability', '%d.%d' % cc)]
attrs += [('pci device id', dev.PCI_DEVICE_ID)]
attrs += [('pci bus id', dev.PCI_BUS_ID)]
if cc < (2, 0):
support = '[NOT SUPPORTED: CC < 2.0]'
else:
support = '[SUPPORTED]'
supported_count += 1
print('id %d %20s %40s' % (dev.id, dev.name, support))
for key, val in attrs:
print('%40s: %s' % (key, val))
print('Summary:')
print('\t%d/%d devices are supported' % (supported_count, len(devlist)))
return supported_count > 0
@contextlib.contextmanager
def defer_cleanup():
"""
Temporarily disable memory deallocation.
Use this to prevent resource deallocation breaking asynchronous execution.
For example::
with defer_cleanup():
# all cleanup is deferred in here
do_speed_critical_code()
# cleanup can occur here
Note: this context manager can be nested.
"""
with current_context().defer_cleanup():
yield
profiling = require_context(driver.profiling)
profile_start = require_context(driver.profile_start)
profile_stop = require_context(driver.profile_stop)
| 32.679688 | 80 | 0.649235 |
f528f3eaa37e43021b01db52277bcdc759a353ae | 9,745 | py | Python | tests/unit/utils/network.py | bogdanr/salt | 4f198525873a1b7da3fbb9994dbb40d381494922 | [
"Apache-2.0"
] | 2 | 2015-11-07T12:05:15.000Z | 2018-10-29T13:21:06.000Z | tests/unit/utils/network.py | bogdanr/salt | 4f198525873a1b7da3fbb9994dbb40d381494922 | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/network.py | bogdanr/salt | 4f198525873a1b7da3fbb9994dbb40d381494922 | [
"Apache-2.0"
] | 1 | 2020-10-19T11:49:50.000Z | 2020-10-19T11:49:50.000Z | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import skipIf
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import network
LINUX = '''\
eth0 Link encap:Ethernet HWaddr e0:3f:49:85:6a:af
inet addr:10.10.10.56 Bcast:10.10.10.255 Mask:255.255.252.0
inet6 addr: fe80::e23f:49ff:fe85:6aaf/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:643363 errors:0 dropped:0 overruns:0 frame:0
TX packets:196539 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:386388355 (368.4 MiB) TX bytes:25600939 (24.4 MiB)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:548901 errors:0 dropped:0 overruns:0 frame:0
TX packets:548901 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:613479895 (585.0 MiB) TX bytes:613479895 (585.0 MiB)
'''
FREEBSD = '''
em0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
options=4219b<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,VLAN_HWCSUM,TSO4,WOL_MAGIC,VLAN_HWTSO>
ether 00:30:48:ff:ff:ff
inet 10.10.10.250 netmask 0xffffffe0 broadcast 10.10.10.255
inet 10.10.10.56 netmask 0xffffffc0 broadcast 10.10.10.63
media: Ethernet autoselect (1000baseT <full-duplex>)
status: active
em1: flags=8c02<BROADCAST,OACTIVE,SIMPLEX,MULTICAST> metric 0 mtu 1500
options=4219b<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,VLAN_HWCSUM,TSO4,WOL_MAGIC,VLAN_HWTSO>
ether 00:30:48:aa:aa:aa
media: Ethernet autoselect
status: no carrier
plip0: flags=8810<POINTOPOINT,SIMPLEX,MULTICAST> metric 0 mtu 1500
lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> metric 0 mtu 16384
options=3<RXCSUM,TXCSUM>
inet6 fe80::1%lo0 prefixlen 64 scopeid 0x8
inet6 ::1 prefixlen 128
inet 127.0.0.1 netmask 0xff000000
nd6 options=3<PERFORMNUD,ACCEPT_RTADV>
tun0: flags=8051<UP,POINTOPOINT,RUNNING,MULTICAST> metric 0 mtu 1500
options=80000<LINKSTATE>
inet 10.12.0.1 --> 10.12.0.2 netmask 0xffffffff
Opened by PID 1964
'''
SOLARIS = '''\
lo0: flags=2001000849<UP,LOOPBACK,RUNNING,MULTICAST,IPv4,VIRTUAL> mtu 8232 index 1
inet 127.0.0.1 netmask ff000000
net0: flags=100001100943<UP,BROADCAST,RUNNING,PROMISC,MULTICAST,ROUTER,IPv4,PHYSRUNNING> mtu 1500 index 2
inet 10.10.10.38 netmask ffffffe0 broadcast 10.10.10.63
ilbint0: flags=110001100843<UP,BROADCAST,RUNNING,MULTICAST,ROUTER,IPv4,VRRP,PHYSRUNNING> mtu 1500 index 3
inet 10.6.0.11 netmask ffffff00 broadcast 10.6.0.255
ilbext0: flags=110001100843<UP,BROADCAST,RUNNING,MULTICAST,ROUTER,IPv4,VRRP,PHYSRUNNING> mtu 1500 index 4
inet 10.10.11.11 netmask ffffffe0 broadcast 10.10.11.31
ilbext0:1: flags=110001100843<UP,BROADCAST,RUNNING,MULTICAST,ROUTER,IPv4,VRRP,PHYSRUNNING> mtu 1500 index 4
inet 10.10.11.12 netmask ffffffe0 broadcast 10.10.11.31
vpn0: flags=1000011008d1<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST,ROUTER,IPv4,PHYSRUNNING> mtu 1480 index 5
inet tunnel src 10.10.11.12 tunnel dst 10.10.5.5
tunnel hop limit 64
inet 10.6.0.14 --> 10.6.0.15 netmask ff000000
lo0: flags=2002000849<UP,LOOPBACK,RUNNING,MULTICAST,IPv6,VIRTUAL> mtu 8252 index 1
inet6 ::1/128
net0: flags=120002004941<UP,RUNNING,PROMISC,MULTICAST,DHCP,IPv6,PHYSRUNNING> mtu 1500 index 2
inet6 fe80::221:9bff:fefd:2a22/10
ilbint0: flags=120002000840<RUNNING,MULTICAST,IPv6,PHYSRUNNING> mtu 1500 index 3
inet6 ::/0
ilbext0: flags=120002000840<RUNNING,MULTICAST,IPv6,PHYSRUNNING> mtu 1500 index 4
inet6 ::/0
vpn0: flags=120002200850<POINTOPOINT,RUNNING,MULTICAST,NONUD,IPv6,PHYSRUNNING> mtu 1480 index 5
inet tunnel src 10.10.11.12 tunnel dst 10.10.5.5
tunnel hop limit 64
inet6 ::/0 --> fe80::b2d6:7c10
'''
FREEBSD_SOCKSTAT = '''\
USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS
root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506
'''
@skipIf(NO_MOCK, NO_MOCK_REASON)
class NetworkTestCase(TestCase):
def test_interfaces_ifconfig_linux(self):
interfaces = network._interfaces_ifconfig(LINUX)
self.assertEqual(interfaces,
{'eth0': {'hwaddr': 'e0:3f:49:85:6a:af',
'inet': [{'address': '10.10.10.56',
'broadcast': '10.10.10.255',
'netmask': '255.255.252.0'}],
'inet6': [{'address': 'fe80::e23f:49ff:fe85:6aaf',
'prefixlen': '64'}],
'up': True},
'lo': {'inet': [{'address': '127.0.0.1',
'netmask': '255.0.0.0'}],
'inet6': [{'address': '::1',
'prefixlen': '128'}],
'up': True}}
)
def test_interfaces_ifconfig_freebsd(self):
interfaces = network._interfaces_ifconfig(FREEBSD)
self.assertEqual(interfaces,
{'': {'up': False},
'em0': {'hwaddr': '00:30:48:ff:ff:ff',
'inet': [{'address': '10.10.10.250',
'broadcast': '10.10.10.255',
'netmask': '255.255.255.224'},
{'address': '10.10.10.56',
'broadcast': '10.10.10.63',
'netmask': '255.255.255.192'}],
'up': True},
'em1': {'hwaddr': '00:30:48:aa:aa:aa',
'up': False},
'lo0': {'inet': [{'address': '127.0.0.1',
'netmask': '255.0.0.0'}],
'inet6': [{'address': 'fe80::1',
'prefixlen': '64'},
{'address': '::1',
'prefixlen': '128'}],
'up': True},
'plip0': {'up': False},
'tun0': {'inet': [{'address': '10.12.0.1',
'netmask': '255.255.255.255'}],
'up': True}}
)
def test_interfaces_ifconfig_solaris(self):
with patch('salt.utils.is_sunos', lambda: True):
interfaces = network._interfaces_ifconfig(SOLARIS)
self.assertEqual(interfaces,
{'ilbext0': {'inet': [{'address': '10.10.11.11',
'broadcast': '10.10.11.31',
'netmask': '255.255.255.224'}],
'inet6': [{'address': '::',
'prefixlen': '0'}],
'up': True},
'ilbint0': {'inet': [{'address': '10.6.0.11',
'broadcast': '10.6.0.255',
'netmask': '255.255.255.0'}],
'inet6': [{'address': '::',
'prefixlen': '0'}],
'up': True},
'lo0': {'inet': [{'address': '127.0.0.1',
'netmask': '255.0.0.0'}],
'inet6': [{'address': '::1',
'prefixlen': '128'}],
'up': True},
'net0': {'inet': [{'address': '10.10.10.38',
'broadcast': '10.10.10.63',
'netmask': '255.255.255.224'}],
'inet6': [{'address': 'fe80::221:9bff:fefd:2a22',
'prefixlen': '10'}],
'up': True},
'vpn0': {'inet': [{'address': '10.6.0.14',
'netmask': '255.0.0.0'}],
'inet6': [{'address': '::',
'prefixlen': '0'}],
'up': True}}
)
def test_freebsd_remotes_on(self):
with patch('salt.utils.is_sunos', lambda: False):
with patch('salt.utils.is_freebsd', lambda: True):
with patch('subprocess.check_output',
return_value=FREEBSD_SOCKSTAT):
remotes = network._freebsd_remotes_on('4506', 'remote')
self.assertEqual(remotes, set(['127.0.0.1']))
if __name__ == '__main__':
from integration import run_tests
run_tests(NetworkTestCase, needs_daemon=False)
| 51.289474 | 107 | 0.491534 |
996b2f7b0cb4942d7b87d7efeca5ffcebe3681fb | 1,582 | py | Python | 2020/starter.py | iKevinY/advent | d160fb711a0a4d671f53cbd61088117e7ff0276a | [
"MIT"
] | 11 | 2019-12-03T06:32:37.000Z | 2021-12-24T12:23:57.000Z | 2020/starter.py | iKevinY/advent | d160fb711a0a4d671f53cbd61088117e7ff0276a | [
"MIT"
] | null | null | null | 2020/starter.py | iKevinY/advent | d160fb711a0a4d671f53cbd61088117e7ff0276a | [
"MIT"
] | 1 | 2019-12-07T06:21:31.000Z | 2019-12-07T06:21:31.000Z | import os # NOQA
import sys # NOQA
import re # NOQA
import math # NOQA
import copy # NOQA
import fileinput
from string import ascii_uppercase, ascii_lowercase # NOQA
from collections import Counter, defaultdict, deque, namedtuple # NOQA
from itertools import count, product, permutations, combinations, combinations_with_replacement # NOQA
from utils import parse_line, parse_nums, mul, all_unique, factors, memoize, primes, resolve_mapping # NOQA
from utils import chunks, gcd, lcm, print_grid, min_max_xy # NOQA
from utils import new_table, transposed, rotated # NOQA
from utils import md5, sha256, knot_hash # NOQA
from utils import VOWELS, CONSONANTS # NOQA
from utils import Point, DIRS, DIRS_4, DIRS_8 # NOQA # N (0, 1) -> E (1, 0) -> S (0, -1) -> W (-1, 0)
# Itertools Functions:
# product('ABCD', repeat=2) AA AB AC AD BA BB BC BD CA CB CC CD DA DB DC DD
# permutations('ABCD', 2) AB AC AD BA BC BD CA CB CD DA DB DC
# combinations('ABCD', 2) AB AC AD BC BD CD
# combinations_with_replacement('ABCD', 2) AA AB AC AD BB BC BD CC CD DD
tot = 0
res = []
board = {}
table = new_table(None, width=2, height=4)
# Uncomment for multi-group style inputs. :c
# data = ''.join([line for line in fileinput.input()])
# groups = [g.split('\n') for g in data.split('\n\n')]
for y, line in enumerate(fileinput.input()):
line = line.strip()
nums = parse_nums(line)
data = parse_line(r'', line)
for x, c in enumerate(line):
board[Point(x, y)] = c
if y == 0:
print(data)
| 35.954545 | 108 | 0.661188 |
57ee10d869e994c127818736544a8de71ae2f3b1 | 2,209 | py | Python | Videos/dataManagement/rootrelative.py | 93TEI/3D_Action_Recognition | b648f4cd8e479872c0cd9488120ada18bc64e5ad | [
"MIT"
] | 33 | 2018-05-22T08:35:59.000Z | 2021-10-06T09:56:07.000Z | Videos/dataManagement/rootrelative.py | 93TEI/3D_Action_Recognition | b648f4cd8e479872c0cd9488120ada18bc64e5ad | [
"MIT"
] | 2 | 2018-09-19T19:32:19.000Z | 2019-05-09T02:27:06.000Z | Videos/dataManagement/rootrelative.py | Naman-ntc/Action-Recognition | b648f4cd8e479872c0cd9488120ada18bc64e5ad | [
"MIT"
] | 5 | 2018-05-06T20:48:38.000Z | 2019-09-01T07:55:09.000Z | import numpy as np
import pickle
import torch
def f(a):
##array is 3d
##size is 300x25x3
#print(a.shape)
first = 0
last = 300
zeros = np.zeros((75, 1))
if not (a[299, :]==0).all():
return a
while (first<last):
middle = (first + last)//2
if (a[middle,:] == 0).all():
last = middle
else:
first = middle + 1
firstZeroIndex = min(first, last)
return a[:firstZeroIndex]
trainData = pickle.load(open('../datasets/toyData/trainData.npy','rb'))
valData = pickle.load(open('../datasets/toyData/valData.npy','rb'))
#trainData = np.swapaxes(trainData, 1,2)
#trainData = np.swapaxes(trainData, 2,3)
"""
for i in range(trainData.shape[0]):
for j in range(300):
trainData[i,j] = trainData[i,j] - trainData[i,j,0]
if ((trainData[i,j,1,:])**2).mean() != 0:
trainData[i,j] = trainData[i,j]*(1.0/np.linalg.norm(trainData[i,j,1]))
if i%50 == 0:
print("Processing", i)
trainData = trainData.reshape(trainData.shape[0], 300, 75)
finalData = []
for i in range(trainData.shape[0]):
finalData.append(torch.from_numpy(f(trainData[i])))
print("Processed!!!")
pickle.dump(finalData, open("../datasets/toyData/lstmProcessedValData.npy", 'wb'))
"""
trainLen = len(trainData)
valLen = len(valData)
for i in range(trainLen):
thisData = trainData[i]
thisData = thisData.reshape((-1,16,3))
numFrames = thisData.shape[0]
divisor = None
for j in range(numFrames):
thisData[j,:,:] = thisData[j,:,:] - this[0,6,:]
if j==0:
divisor = np.linalg.norm(thisData[j,6,:],thisData[j,7,:])
thisData[j] = thisData/divisor
trainData[i] = thisData
print("Training Video %d root relatived", i)
for i in range(valLen):
thisData = valData[i]
thisData = thisData.reshape((-1,16,3))
numFrames = thisData.shape[0]
#divisor = None
for j in range(numFrames):
thisData[j,:,:] = thisData[j,:,:] - this[0,6,:]
if j==0:
divisor = np.linalg.norm(thisData[j,6,:],thisData[j,7,:])
thisData[j] = thisData/divisor
trainData[i] = thisData
print("Validation Video %d root relatived", i)
pickle.dump(trainData,open('../datasets/trainData.npy','wb'))
pickle.dump(valData,open('../datasets/valData.npy','wb'))
| 25.988235 | 82 | 0.635129 |
7ced76bdb4f627a4b91ac83d6e8995761ca3bd85 | 9,780 | py | Python | isi_sdk_8_2_2/isi_sdk_8_2_2/models/dataset_filter_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_2_2/isi_sdk_8_2_2/models/dataset_filter_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_2_2/isi_sdk_8_2_2/models/dataset_filter_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_2.models.dataset_filter import DatasetFilter # noqa: F401,E501
from isi_sdk_8_2_2.models.dataset_filter_metric_values import DatasetFilterMetricValues # noqa: F401,E501
class DatasetFilterExtended(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'creation_time': 'int',
'dataset_id': 'int',
'error': 'str',
'id': 'int',
'metric_values': 'DatasetFilterMetricValues'
}
attribute_map = {
'name': 'name',
'creation_time': 'creation_time',
'dataset_id': 'dataset_id',
'error': 'error',
'id': 'id',
'metric_values': 'metric_values'
}
def __init__(self, name=None, creation_time=None, dataset_id=None, error=None, id=None, metric_values=None): # noqa: E501
"""DatasetFilterExtended - a model defined in Swagger""" # noqa: E501
self._name = None
self._creation_time = None
self._dataset_id = None
self._error = None
self._id = None
self._metric_values = None
self.discriminator = None
if name is not None:
self.name = name
if creation_time is not None:
self.creation_time = creation_time
if dataset_id is not None:
self.dataset_id = dataset_id
if error is not None:
self.error = error
self.id = id
if metric_values is not None:
self.metric_values = metric_values
@property
def name(self):
"""Gets the name of this DatasetFilterExtended. # noqa: E501
The name of the filter. User specified. # noqa: E501
:return: The name of this DatasetFilterExtended. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this DatasetFilterExtended.
The name of the filter. User specified. # noqa: E501
:param name: The name of this DatasetFilterExtended. # noqa: E501
:type: str
"""
if name is not None and len(name) > 80:
raise ValueError("Invalid value for `name`, length must be less than or equal to `80`") # noqa: E501
if name is not None and len(name) < 1:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def creation_time(self):
"""Gets the creation_time of this DatasetFilterExtended. # noqa: E501
Timestamp of when the filter was applied. # noqa: E501
:return: The creation_time of this DatasetFilterExtended. # noqa: E501
:rtype: int
"""
return self._creation_time
@creation_time.setter
def creation_time(self, creation_time):
"""Sets the creation_time of this DatasetFilterExtended.
Timestamp of when the filter was applied. # noqa: E501
:param creation_time: The creation_time of this DatasetFilterExtended. # noqa: E501
:type: int
"""
if creation_time is not None and creation_time > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `creation_time`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if creation_time is not None and creation_time < 0: # noqa: E501
raise ValueError("Invalid value for `creation_time`, must be a value greater than or equal to `0`") # noqa: E501
self._creation_time = creation_time
@property
def dataset_id(self):
"""Gets the dataset_id of this DatasetFilterExtended. # noqa: E501
Unique identifier of the associated dataset. # noqa: E501
:return: The dataset_id of this DatasetFilterExtended. # noqa: E501
:rtype: int
"""
return self._dataset_id
@dataset_id.setter
def dataset_id(self, dataset_id):
"""Sets the dataset_id of this DatasetFilterExtended.
Unique identifier of the associated dataset. # noqa: E501
:param dataset_id: The dataset_id of this DatasetFilterExtended. # noqa: E501
:type: int
"""
if dataset_id is not None and dataset_id > 4294967295: # noqa: E501
raise ValueError("Invalid value for `dataset_id`, must be a value less than or equal to `4294967295`") # noqa: E501
if dataset_id is not None and dataset_id < 0: # noqa: E501
raise ValueError("Invalid value for `dataset_id`, must be a value greater than or equal to `0`") # noqa: E501
self._dataset_id = dataset_id
@property
def error(self):
"""Gets the error of this DatasetFilterExtended. # noqa: E501
If this field is present, then there was an error fetching the filter configuration. # noqa: E501
:return: The error of this DatasetFilterExtended. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this DatasetFilterExtended.
If this field is present, then there was an error fetching the filter configuration. # noqa: E501
:param error: The error of this DatasetFilterExtended. # noqa: E501
:type: str
"""
if error is not None and len(error) > 255:
raise ValueError("Invalid value for `error`, length must be less than or equal to `255`") # noqa: E501
if error is not None and len(error) < 1:
raise ValueError("Invalid value for `error`, length must be greater than or equal to `1`") # noqa: E501
self._error = error
@property
def id(self):
"""Gets the id of this DatasetFilterExtended. # noqa: E501
The filter ID. Unique and automatically assigned. # noqa: E501
:return: The id of this DatasetFilterExtended. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this DatasetFilterExtended.
The filter ID. Unique and automatically assigned. # noqa: E501
:param id: The id of this DatasetFilterExtended. # noqa: E501
:type: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
if id is not None and id > 4294967295: # noqa: E501
raise ValueError("Invalid value for `id`, must be a value less than or equal to `4294967295`") # noqa: E501
if id is not None and id < 0: # noqa: E501
raise ValueError("Invalid value for `id`, must be a value greater than or equal to `0`") # noqa: E501
self._id = id
@property
def metric_values(self):
"""Gets the metric_values of this DatasetFilterExtended. # noqa: E501
Performance metric values that can be used to pin workloads and apply filters, and performance metric values that are used to display information about the system performance dataset. # noqa: E501
:return: The metric_values of this DatasetFilterExtended. # noqa: E501
:rtype: DatasetFilterMetricValues
"""
return self._metric_values
@metric_values.setter
def metric_values(self, metric_values):
"""Sets the metric_values of this DatasetFilterExtended.
Performance metric values that can be used to pin workloads and apply filters, and performance metric values that are used to display information about the system performance dataset. # noqa: E501
:param metric_values: The metric_values of this DatasetFilterExtended. # noqa: E501
:type: DatasetFilterMetricValues
"""
self._metric_values = metric_values
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DatasetFilterExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.053763 | 205 | 0.617178 |
f2938ebae09335ecb17ac03fcdbcd51858d9b44a | 121,738 | py | Python | tensorflow/python/keras/layers/recurrent.py | carchrae/tensorflow | 6a69a6b2e286b14ac9ae813998bb0d78b6fee440 | [
"Apache-2.0"
] | 1 | 2020-06-21T23:30:57.000Z | 2020-06-21T23:30:57.000Z | tensorflow/python/keras/layers/recurrent.py | carchrae/tensorflow | 6a69a6b2e286b14ac9ae813998bb0d78b6fee440 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/layers/recurrent.py | carchrae/tensorflow | 6a69a6b2e286b14ac9ae813998bb0d78b6fee440 | [
"Apache-2.0"
] | 1 | 2020-08-28T07:24:37.000Z | 2020-08-28T07:24:37.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Recurrent layers and their base classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
RECURRENT_DROPOUT_WARNING_MSG = (
'RNN `implementation=2` is not supported when `recurrent_dropout` is set. '
'Using `implementation=1`.')
@keras_export('keras.layers.StackedRNNCells')
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
Arguments:
cells: List of RNN cell instances.
Examples:
```python
batch_size = 3
sentence_max_length = 5
n_features = 2
new_shape = (batch_size, sentence_max_length, n_features)
x = tf.constant(np.reshape(np.arange(30), new_shape), dtype = tf.float32)
rnn_cells = [tf.keras.layers.LSTMCell(128) for _ in range(2)]
stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells)
lstm_layer = tf.keras.layers.RNN(stacked_lstm)
result = lstm_layer(x)
```
"""
def __init__(self, cells, **kwargs):
for cell in cells:
if not 'call' in dir(cell):
raise ValueError('All cells must have a `call` method. '
'received cells:', cells)
if not 'state_size' in dir(cell):
raise ValueError('All cells must have a '
'`state_size` attribute. '
'received cells:', cells)
self.cells = cells
# reverse_state_order determines whether the state size will be in a reverse
# order of the cells' state. User might want to set this to True to keep the
# existing behavior. This is only useful when use RNN(return_state=True)
# since the state will be returned as the same order of state_size.
self.reverse_state_order = kwargs.pop('reverse_state_order', False)
if self.reverse_state_order:
logging.warning('reverse_state_order=True in StackedRNNCells will soon '
'be deprecated. Please update the code to work with the '
'natural order of states if you rely on the RNN states, '
'eg RNN(return_state=True).')
super(StackedRNNCells, self).__init__(**kwargs)
@property
def state_size(self):
return tuple(c.state_size for c in
(self.cells[::-1] if self.reverse_state_order else self.cells))
@property
def output_size(self):
if getattr(self.cells[-1], 'output_size', None) is not None:
return self.cells[-1].output_size
elif _is_multiple_state(self.cells[-1].state_size):
return self.cells[-1].state_size[0]
else:
return self.cells[-1].state_size
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
initial_states = []
for cell in self.cells[::-1] if self.reverse_state_order else self.cells:
get_initial_state_fn = getattr(cell, 'get_initial_state', None)
if get_initial_state_fn:
initial_states.append(get_initial_state_fn(
inputs=inputs, batch_size=batch_size, dtype=dtype))
else:
initial_states.append(_generate_zero_filled_state_for_cell(
cell, inputs, batch_size, dtype))
return tuple(initial_states)
def call(self, inputs, states, constants=None, training=None, **kwargs):
# Recover per-cell states.
state_size = (self.state_size[::-1]
if self.reverse_state_order else self.state_size)
nested_states = nest.pack_sequence_as(state_size, nest.flatten(states))
# Call the cells in order and store the returned states.
new_nested_states = []
for cell, states in zip(self.cells, nested_states):
states = states if nest.is_sequence(states) else [states]
# TF cell does not wrap the state into list when there is only one state.
is_tf_rnn_cell = getattr(cell, '_is_tf_rnn_cell', None) is not None
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
if generic_utils.has_arg(cell.call, 'training'):
kwargs['training'] = training
else:
kwargs.pop('training', None)
if generic_utils.has_arg(cell.call, 'constants'):
inputs, states = cell.call(inputs, states, constants=constants,
**kwargs)
else:
inputs, states = cell.call(inputs, states, **kwargs)
new_nested_states.append(states)
return inputs, nest.pack_sequence_as(state_size,
nest.flatten(new_nested_states))
@tf_utils.shape_type_conversion
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
for cell in self.cells:
if isinstance(cell, Layer):
if not cell.built:
cell.build(input_shape)
if getattr(cell, 'output_size', None) is not None:
output_dim = cell.output_size
elif _is_multiple_state(cell.state_size):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
input_shape = tuple([input_shape[0]] +
tensor_shape.as_shape(output_dim).as_list())
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append({
'class_name': cell.__class__.__name__,
'config': cell.get_config()
})
config = {'cells': cells}
base_config = super(StackedRNNCells, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cells = []
for cell_config in config.pop('cells'):
cells.append(
deserialize_layer(cell_config, custom_objects=custom_objects))
return cls(cells, **config)
@keras_export('keras.layers.RNN')
class RNN(Layer):
"""Base class for recurrent layers.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Arguments:
cell: A RNN cell instance or a list of RNN cell instances.
A RNN cell is a class that has:
- A `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- A `state_size` attribute. This can be a single integer
(single state) in which case it is the size of the recurrent
state. This can also be a list/tuple of integers (one size per state).
The `state_size` can also be TensorShape or tuple/list of
TensorShape, to represent high dimension state.
- A `output_size` attribute. This can be a single integer or a
TensorShape, which represent the shape of the output. For backward
compatible reason, if this attribute is not available for the
cell, the value will be inferred by the first element of the
`state_size`.
- A `get_initial_state(inputs=None, batch_size=None, dtype=None)`
method that creates a tensor meant to be fed to `call()` as the
initial state, if the user didn't specify any initial state via other
means. The returned initial state should have a shape of
[batch_size, cell.state_size]. The cell might choose to create a
tensor full of zeros, or full of other values based on the cell's
implementation.
`inputs` is the input tensor to the RNN layer, which should
contain the batch size as its shape[0], and also dtype. Note that
the shape[0] might be `None` during the graph construction. Either
the `inputs` or the pair of `batch_size` and `dtype` are provided.
`batch_size` is a scalar tensor that represents the batch size
of the inputs. `dtype` is `tf.DType` that represents the dtype of
the inputs.
For backward compatible reason, if this method is not implemented
by the cell, the RNN layer will create a zero filled tensor with the
size of [batch_size, cell.state_size].
In the case that `cell` is a list of RNN cell instances, the cells
will be stacked on top of each other in the RNN, resulting in an
efficient stacked RNN.
return_sequences: Boolean (default `False`). Whether to return the last
output in the output sequence, or the full sequence.
return_state: Boolean (default `False`). Whether to return the last state
in addition to the output.
go_backwards: Boolean (default `False`).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default `False`). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default `False`).
If True, the network will be unrolled, else a symbolic loop will be used.
Unrolling can speed-up a RNN, although it tends to be more
memory-intensive. Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
Call arguments:
inputs: Input tensor.
mask: Binary tensor of shape `[batch_size, timesteps]` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is for use with cells that use dropout.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
constants: List of constant tensors to be passed to the cell at each
timestep.
Input shape:
N-D tensor with shape `[batch_size, timesteps, ...]` or
`[timesteps, batch_size, ...]` when time_major is True.
Output shape:
- If `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `[batch_size, state_size]`, where `state_size` could
be a high dimension tensor shape.
- If `return_sequences`: N-D tensor with shape
`[batch_size, timesteps, output_size]`, where `output_size` could
be a high dimension tensor shape, or
`[timesteps, batch_size, output_size]` when `time_major` is True.
- Else, N-D tensor with shape `[batch_size, output_size]`, where
`output_size` could be a high dimension tensor shape.
Masking:
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [tf.keras.layers.Embedding] layer with the `mask_zero` parameter
set to `True`.
Note on using statefulness in RNNs:
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- Specify `stateful=True` in the layer constructor.
- Specify a fixed batch size for your model, by passing
If sequential model:
`batch_input_shape=(...)` to the first layer in your model.
Else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- Specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
Note on specifying the initial state of RNNs:
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
Note on passing external constants to RNNs:
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
Examples:
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
**kwargs):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if not 'call' in dir(cell):
raise ValueError('`cell` should have a `call` method. '
'The RNN was passed:', cell)
if not 'state_size' in dir(cell):
raise ValueError('The RNN cell should have '
'an attribute `state_size` '
'(tuple of integers, '
'one integer per RNN state).')
# If True, the output for masked timestep will be zeros, whereas in the
# False case, output from previous timestep is returned for masked timestep.
self.zero_output_for_mask = kwargs.pop('zero_output_for_mask', False)
if 'input_shape' not in kwargs and (
'input_dim' in kwargs or 'input_length' in kwargs):
input_shape = (kwargs.pop('input_length', None),
kwargs.pop('input_dim', None))
kwargs['input_shape'] = input_shape
super(RNN, self).__init__(**kwargs)
self.cell = cell
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.time_major = time_major
self.supports_masking = True
# The input shape is unknown yet, it could have nested tensor inputs, and
# the input spec will be the list of specs for nested inputs, the structure
# of the input_spec will be the same as the input.
self.input_spec = None
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = 0
self._supports_ragged_inputs = True
if stateful:
if ds_context.has_strategy():
raise ValueError('RNNs with stateful=True not yet supported with '
'tf.distribute.Strategy.')
@property
def states(self):
if self._states is None:
state = nest.map_structure(lambda _: None, self.cell.state_size)
return state if nest.is_sequence(self.cell.state_size) else [state]
return self._states
@states.setter
# Automatic tracking catches "self._states" which adds an extra weight and
# breaks HDF5 checkpoints.
@trackable.no_automatic_dependency_tracking
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
# inputs.
try:
input_shape = tensor_shape.as_shape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
input_shape = nest.flatten(input_shape)[0]
batch = input_shape[0]
time_step = input_shape[1]
if self.time_major:
batch, time_step = time_step, batch
if _is_multiple_state(self.cell.state_size):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
def _get_output_shape(flat_output_size):
output_dim = tensor_shape.as_shape(flat_output_size).as_list()
if self.return_sequences:
if self.time_major:
output_shape = tensor_shape.as_shape([time_step, batch] + output_dim)
else:
output_shape = tensor_shape.as_shape([batch, time_step] + output_dim)
else:
output_shape = tensor_shape.as_shape([batch] + output_dim)
return output_shape
if getattr(self.cell, 'output_size', None) is not None:
# cell.output_size could be nested structure.
output_shape = nest.flatten(nest.map_structure(
_get_output_shape, self.cell.output_size))
output_shape = output_shape[0] if len(output_shape) == 1 else output_shape
else:
# Note that state_size[0] could be a tensor_shape or int.
output_shape = _get_output_shape(state_size[0])
if self.return_state:
def _get_state_shape(flat_state):
state_shape = [batch] + tensor_shape.as_shape(flat_state).as_list()
return tensor_shape.as_shape(state_shape)
state_shape = nest.map_structure(_get_state_shape, state_size)
return generic_utils.to_list(output_shape) + nest.flatten(state_shape)
else:
return output_shape
def compute_mask(self, inputs, mask):
# Time step masks must be the same for each input.
# This is because the mask for an RNN is of size [batch, time_steps, 1],
# and specifies which time steps should be skipped, and a time step
# must be skipped for all inputs.
# TODO(scottzhu): Should we accept multiple different masks?
mask = nest.flatten(mask)[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
# The input_shape here could be a nest structure.
# do the tensor_shape to shapes here. The input could be single tensor, or a
# nested structure of tensors.
def get_input_spec(shape):
if isinstance(shape, tensor_shape.TensorShape):
input_spec_shape = shape.as_list()
else:
input_spec_shape = list(shape)
batch_index, time_step_index = (1, 0) if self.time_major else (0, 1)
if not self.stateful:
input_spec_shape[batch_index] = None
input_spec_shape[time_step_index] = None
return InputSpec(shape=tuple(input_spec_shape))
def get_step_input_shape(shape):
if isinstance(shape, tensor_shape.TensorShape):
shape = tuple(shape.as_list())
# remove the timestep from the input_shape
return shape[1:] if self.time_major else (shape[0],) + shape[2:]
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
# inputs.
try:
input_shape = tensor_shape.as_shape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
pass
if not nest.is_sequence(input_shape):
# This indicates the there is only one input.
if self.input_spec is not None:
self.input_spec[0] = get_input_spec(input_shape)
else:
self.input_spec = [get_input_spec(input_shape)]
step_input_shape = get_step_input_shape(input_shape)
else:
if self.input_spec is not None:
self.input_spec[0] = nest.map_structure(get_input_spec, input_shape)
else:
self.input_spec = generic_utils.to_list(
nest.map_structure(get_input_spec, input_shape))
step_input_shape = nest.map_structure(get_step_input_shape, input_shape)
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
if not self.cell.built:
self.cell.build(step_input_shape)
# set or validate state_spec
if _is_multiple_state(self.cell.state_size):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
self._validate_state_spec(state_size, self.state_spec)
else:
self.state_spec = [
InputSpec(shape=[None] + tensor_shape.as_shape(dim).as_list())
for dim in state_size
]
if self.stateful:
self.reset_states()
self.built = True
@staticmethod
def _validate_state_spec(cell_state_sizes, init_state_specs):
"""Validate the state spec between the initial_state and the state_size.
Args:
cell_state_sizes: list, the `state_size` attribute from the cell.
init_state_specs: list, the `state_spec` from the initial_state that is
passed in `call()`.
Raises:
ValueError: When initial state spec is not compatible with the state size.
"""
validation_error = ValueError(
'An `initial_state` was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'however `cell.state_size` is '
'{}'.format(init_state_specs, cell_state_sizes))
flat_cell_state_size = nest.flatten(cell_state_sizes)
flat_state_spec = nest.flatten(init_state_specs)
if len(flat_cell_state_size) != len(flat_state_spec):
raise validation_error
for i in range(len(flat_cell_state_size)):
if not tensor_shape.TensorShape(
# Ignore the first axis for init_state which is for batch
flat_state_spec[i].shape[1:]).is_compatible_with(
tensor_shape.TensorShape(flat_cell_state_size[i])):
raise validation_error
@doc_controls.do_not_doc_inheritable
def get_initial_state(self, inputs):
get_initial_state_fn = getattr(self.cell, 'get_initial_state', None)
if nest.is_sequence(inputs):
# The input are nested sequences. Use the first element in the seq to get
# batch size and dtype.
inputs = nest.flatten(inputs)[0]
input_shape = array_ops.shape(inputs)
batch_size = input_shape[1] if self.time_major else input_shape[0]
dtype = inputs.dtype
if get_initial_state_fn:
init_state = get_initial_state_fn(
inputs=None, batch_size=batch_size, dtype=dtype)
else:
init_state = _generate_zero_filled_state(batch_size, self.cell.state_size,
dtype)
# Keras RNN expect the states in a list, even if it's a single state tensor.
if not nest.is_sequence(init_state):
init_state = [init_state]
# Force the state to be a list in case it is a namedtuple eg LSTMStateTuple.
return list(init_state)
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = _standardize_args(inputs,
initial_state,
constants,
self._num_constants)
if initial_state is None and constants is None:
return super(RNN, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
additional_inputs += initial_state
self.state_spec = nest.map_structure(
lambda s: InputSpec(shape=K.int_shape(s)), initial_state)
additional_specs += self.state_spec
if constants is not None:
additional_inputs += constants
self.constants_spec = [
InputSpec(shape=K.int_shape(constant)) for constant in constants
]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# additional_inputs can be empty if initial_state or constants are provided
# but empty (e.g. the cell is stateless).
flat_additional_inputs = nest.flatten(additional_inputs)
is_keras_tensor = K.is_keras_tensor(
flat_additional_inputs[0]) if flat_additional_inputs else True
for tensor in flat_additional_inputs:
if K.is_keras_tensor(tensor) != is_keras_tensor:
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors'
' (a "Keras tensor" is a tensor that was'
' returned by a Keras layer, or by `Input`)')
if is_keras_tensor:
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
# The original input_spec is None since there could be a nested tensor
# input. Update the input_spec to match the inputs.
full_input_spec = generic_utils.to_list(
nest.map_structure(lambda _: None, inputs)) + additional_specs
# Perform the call with temporarily replaced input_spec
self.input_spec = full_input_spec
output = super(RNN, self).__call__(full_input, **kwargs)
# Remove the additional_specs from input spec and keep the rest. It is
# important to keep since the input spec was populated by build(), and
# will be reused in the stateful=True.
self.input_spec = self.input_spec[:-len(additional_specs)]
return output
else:
if initial_state is not None:
kwargs['initial_state'] = initial_state
if constants is not None:
kwargs['constants'] = constants
return super(RNN, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
# The input should be dense, padded with zeros. If a ragged input is fed
# into the layer, it is padded and the row lengths are used for masking.
inputs, row_lengths = K.convert_inputs_if_ragged(inputs)
is_ragged_input = (row_lengths is not None)
self._validate_args_if_ragged(is_ragged_input, mask)
inputs, initial_state, constants = self._process_inputs(
inputs, initial_state, constants)
self._maybe_reset_cell_dropout_mask(self.cell)
if isinstance(self.cell, StackedRNNCells):
for cell in self.cell.cells:
self._maybe_reset_cell_dropout_mask(cell)
if mask is not None:
# Time step masks must be the same for each input.
# TODO(scottzhu): Should we accept multiple different masks?
mask = nest.flatten(mask)[0]
if nest.is_sequence(inputs):
# In the case of nested input, use the first element for shape check.
input_shape = K.int_shape(nest.flatten(inputs)[0])
else:
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if self.unroll and timesteps is None:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
kwargs = {}
if generic_utils.has_arg(self.cell.call, 'training'):
kwargs['training'] = training
# TF RNN cells expect single tensor as state instead of list wrapped tensor.
is_tf_rnn_cell = getattr(self.cell, '_is_tf_rnn_cell', None) is not None
if constants:
if not generic_utils.has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:] # pylint: disable=invalid-unary-operand-type
states = states[:-self._num_constants] # pylint: disable=invalid-unary-operand-type
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
output, new_states = self.cell.call(
inputs, states, constants=constants, **kwargs)
if not nest.is_sequence(new_states):
new_states = [new_states]
return output, new_states
else:
def step(inputs, states):
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
output, new_states = self.cell.call(inputs, states, **kwargs)
if not nest.is_sequence(new_states):
new_states = [new_states]
return output, new_states
last_output, outputs, states = K.rnn(
step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=row_lengths if row_lengths is not None else timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
if self.stateful:
updates = []
for state_, state in zip(nest.flatten(self.states), nest.flatten(states)):
updates.append(state_ops.assign(state_, state))
self.add_update(updates)
if self.return_sequences:
output = K.maybe_convert_to_ragged(is_ragged_input, outputs, row_lengths)
else:
output = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return generic_utils.to_list(output) + states
else:
return output
def _process_inputs(self, inputs, initial_state, constants):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if (isinstance(inputs, collections.Sequence)
and not isinstance(inputs, tuple)):
# get initial_state from full input spec
# as they could be copied to multiple GPU.
if not self._num_constants:
initial_state = inputs[1:]
else:
initial_state = inputs[1:-self._num_constants]
constants = inputs[-self._num_constants:]
if len(initial_state) == 0:
initial_state = None
inputs = inputs[0]
if self.stateful:
if initial_state is not None:
# When layer is stateful and initial_state is provided, check if the
# recorded state is same as the default value (zeros). Use the recorded
# state if it is not same as the default.
non_zero_count = math_ops.add_n([math_ops.count_nonzero_v2(s)
for s in nest.flatten(self.states)])
# Set strict = True to keep the original structure of the state.
initial_state = control_flow_ops.cond(non_zero_count > 0,
true_fn=lambda: self.states,
false_fn=lambda: initial_state,
strict=True)
else:
initial_state = self.states
elif initial_state is None:
initial_state = self.get_initial_state(inputs)
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' + str(len(initial_state)) +
' initial states.')
return inputs, initial_state, constants
def _validate_args_if_ragged(self, is_ragged_input, mask):
if not is_ragged_input:
return
if mask is not None:
raise ValueError('The mask that was passed in was ' + str(mask) +
' and cannot be applied to RaggedTensor inputs. Please '
'make sure that there is no mask passed in by upstream '
'layers.')
if self.unroll:
raise ValueError('The input received constains RaggedTensors and does '
'not support unrolling. Disable unrolling by passing '
'`unroll=False` in the RNN Layer constructor.')
def _maybe_reset_cell_dropout_mask(self, cell):
if isinstance(cell, DropoutRNNCellMixin):
cell.reset_dropout_mask()
cell.reset_recurrent_dropout_mask()
def reset_states(self, states=None):
"""Reset the recorded states for the stateful RNN layer.
Can only be used when RNN layer is constructed with `stateful` = `True`.
Args:
states: Numpy arrays that contains the value for the initial state, which
will be feed to cell at the first time step. When the value is None,
zero filled numpy array will be created based on the cell state size.
Raises:
AttributeError: When the RNN layer is not stateful.
ValueError: When the batch size of the RNN layer is unknown.
ValueError: When the input numpy array is not compatible with the RNN
layer state, either size wise or dtype wise.
"""
if not self.stateful:
raise AttributeError('Layer must be stateful.')
spec_shape = None
if self.input_spec is not None:
spec_shape = nest.flatten(self.input_spec[0])[0].shape
if spec_shape is None:
# It is possible to have spec shape to be None, eg when construct a RNN
# with a custom cell, or standard RNN layers (LSTM/GRU) which we only know
# it has 3 dim input, but not its full shape spec before build().
batch_size = None
else:
batch_size = spec_shape[1] if self.time_major else spec_shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the batch size by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if nest.flatten(self.states)[0] is None:
def create_state_variable(state):
return K.zeros([batch_size] + tensor_shape.as_shape(state).as_list())
self.states = nest.map_structure(
create_state_variable, self.cell.state_size)
if not nest.is_sequence(self.states):
self.states = [self.states]
elif states is None:
for state, size in zip(nest.flatten(self.states),
nest.flatten(self.cell.state_size)):
K.set_value(state, np.zeros([batch_size] +
tensor_shape.as_shape(size).as_list()))
else:
flat_states = nest.flatten(self.states)
flat_input_states = nest.flatten(states)
if len(flat_input_states) != len(flat_states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(flat_states)) + ' states, '
'but it received ' + str(len(flat_input_states)) +
' state values. Input received: ' + str(states))
set_value_tuples = []
for i, (value, state) in enumerate(zip(flat_input_states,
flat_states)):
if value.shape != state.shape:
raise ValueError(
'State ' + str(i) + ' is incompatible with layer ' +
self.name + ': expected shape=' + str(
(batch_size, state)) + ', found shape=' + str(value.shape))
set_value_tuples.append((state, value))
K.batch_set_value(set_value_tuples)
def get_config(self):
config = {
'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll,
'time_major': self.time_major
}
if self._num_constants:
config['num_constants'] = self._num_constants
if self.zero_output_for_mask:
config['zero_output_for_mask'] = self.zero_output_for_mask
cell_config = self.cell.get_config()
config['cell'] = {
'class_name': self.cell.__class__.__name__,
'config': cell_config
}
base_config = super(RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cell = deserialize_layer(config.pop('cell'), custom_objects=custom_objects)
num_constants = config.pop('num_constants', 0)
layer = cls(cell, **config)
layer._num_constants = num_constants
return layer
@keras_export('keras.layers.AbstractRNNCell')
class AbstractRNNCell(Layer):
"""Abstract object representing an RNN cell.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
This is the base class for implementing RNN cells with custom behavior.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`.
Examples:
```python
class MinimalRNNCell(AbstractRNNCell):
def __init__(self, units, **kwargs):
self.units = units
super(MinimalRNNCell, self).__init__(**kwargs)
@property
def state_size(self):
return self.units
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, output
```
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def call(self, inputs, states):
"""The function that contains the logic for one RNN step calculation.
Args:
inputs: the input tensor, which is a slide from the overall RNN input by
the time dimension (usually the second dimension).
states: the state tensor from previous step, which has the same shape
as `(batch, state_size)`. In the case of timestep 0, it will be the
initial state user specified, or zero filled tensor otherwise.
Returns:
A tuple of two tensors:
1. output tensor for the current timestep, with size `output_size`.
2. state tensor for next step, which has the shape of `state_size`.
"""
raise NotImplementedError('Abstract method')
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError('Abstract method')
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError('Abstract method')
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
@doc_controls.do_not_generate_docs
class DropoutRNNCellMixin(object):
"""Object that hold dropout related fields for RNN Cell.
This class is not a standalone RNN cell. It suppose to be used with a RNN cell
by multiple inheritance. Any cell that mix with class should have following
fields:
dropout: a float number within range [0, 1). The ratio that the input
tensor need to dropout.
recurrent_dropout: a float number within range [0, 1). The ratio that the
recurrent state weights need to dropout.
This object will create and cache created dropout masks, and reuse them for
the incoming data, so that the same mask is used for every batch input.
"""
def __init__(self, *args, **kwargs):
# Note that the following two masks will be used in "graph function" mode,
# e.g. these masks are symbolic tensors. In eager mode, the `eager_*_mask`
# tensors will be generated differently than in the "graph function" case,
# and they will be cached.
# Also note that in graph mode, we still cache those masks only because the
# RNN could be created with `unroll=True`. In that case, the `cell.call()`
# function will be invoked multiple times, and we want to ensure same mask
# is used every time.
self._dropout_mask = None
self._recurrent_dropout_mask = None
self._eager_dropout_mask = None
self._eager_recurrent_dropout_mask = None
super(DropoutRNNCellMixin, self).__init__(*args, **kwargs)
def reset_dropout_mask(self):
"""Reset the cached dropout masks if any.
This is important for the RNN layer to invoke this in it call() method so
that the cached mask is cleared before calling the cell.call(). The mask
should be cached across the timestep within the same batch, but shouldn't
be cached between batches. Otherwise it will introduce unreasonable bias
against certain index of data within the batch.
"""
self._dropout_mask = None
self._eager_dropout_mask = None
def reset_recurrent_dropout_mask(self):
"""Reset the cached recurrent dropout masks if any.
This is important for the RNN layer to invoke this in it call() method so
that the cached mask is cleared before calling the cell.call(). The mask
should be cached across the timestep within the same batch, but shouldn't
be cached between batches. Otherwise it will introduce unreasonable bias
against certain index of data within the batch.
"""
self._recurrent_dropout_mask = None
self._eager_recurrent_dropout_mask = None
def get_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the dropout mask for RNN cell's input.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: The input tensor whose shape will be used to generate dropout
mask.
training: Boolean tensor, whether its in training mode, dropout will be
ignored in non-training mode.
count: Int, how many dropout mask will be generated. It is useful for cell
that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.dropout == 0:
return None
if (not context.executing_eagerly() and self._dropout_mask is None
or context.executing_eagerly() and self._eager_dropout_mask is None):
# Generate new mask and cache it based on context.
dp_mask = _generate_dropout_mask(
array_ops.ones_like(inputs),
self.dropout,
training=training,
count=count)
if context.executing_eagerly():
self._eager_dropout_mask = dp_mask
else:
self._dropout_mask = dp_mask
else:
# Reuse the existing mask.
dp_mask = (self._eager_dropout_mask
if context.executing_eagerly() else self._dropout_mask)
return dp_mask
def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the recurrent dropout mask for RNN cell.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: The input tensor whose shape will be used to generate dropout
mask.
training: Boolean tensor, whether its in training mode, dropout will be
ignored in non-training mode.
count: Int, how many dropout mask will be generated. It is useful for cell
that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.recurrent_dropout == 0:
return None
if (not context.executing_eagerly() and self._recurrent_dropout_mask is None
or context.executing_eagerly()
and self._eager_recurrent_dropout_mask is None):
# Generate new mask and cache it based on context.
rec_dp_mask = _generate_dropout_mask(
array_ops.ones_like(inputs),
self.recurrent_dropout,
training=training,
count=count)
if context.executing_eagerly():
self._eager_recurrent_dropout_mask = rec_dp_mask
else:
self._recurrent_dropout_mask = rec_dp_mask
else:
# Reuse the existing mask.
rec_dp_mask = (self._eager_recurrent_dropout_mask
if context.executing_eagerly()
else self._recurrent_dropout_mask)
return rec_dp_mask
@keras_export('keras.layers.SimpleRNNCell')
class SimpleRNNCell(DropoutRNNCellMixin, Layer):
"""Cell class for SimpleRNN.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
This class processes one step within the whole time sequence input, whereas
`tf.keras.layer.SimpleRNN` processes the whole sequence.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
Call arguments:
inputs: A 2D tensor, with shape of `[batch, feature]`.
states: A 2D tensor with shape of `[batch, units]`, which is the state from
the previous time step. For timestep 0, the initial state provided by user
will be feed to cell.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
Examples:
```python
inputs = np.random.random([32, 10, 8]).astype(np.float32)
rnn = tf.keras.layers.RNN(tf.keras.layers.SimpleRNNCell(4))
output = rnn(inputs) # The output has shape `[32, 4]`.
rnn = tf.keras.layers.RNN(
tf.keras.layers.SimpleRNNCell(4),
return_sequences=True,
return_state=True)
# whole_sequence_output has shape `[32, 10, 4]`.
# final_state has shape `[32, 4]`.
whole_sequence_output, final_state = rnn(inputs)
```
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
# By default use cached variable under v2 mode, see b/143699808.
if ops.executing_eagerly_outside_functions():
self._enable_caching_device = kwargs.pop('enable_caching_device', True)
else:
self._enable_caching_device = kwargs.pop('enable_caching_device', False)
super(SimpleRNNCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = self.units
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
default_caching_device = _caching_device(self)
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
caching_device=default_caching_device)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
caching_device=default_caching_device)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
caching_device=default_caching_device)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
prev_output = states[0] if nest.is_sequence(states) else states
dp_mask = self.get_dropout_mask_for_cell(inputs, training)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
prev_output, training)
if dp_mask is not None:
h = K.dot(inputs * dp_mask, self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
if rec_dp_mask is not None:
prev_output = prev_output * rec_dp_mask
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
return output, [output]
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
config.update(_config_for_enable_caching_device(self))
base_config = super(SimpleRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SimpleRNN')
class SimpleRNN(RNN):
"""Fully-connected RNN where the output is to be fed back to input.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the inputs.
Default: 0.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the
recurrent state. Default: 0.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state
in addition to the output. Default: `False`
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
Call arguments:
inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.
mask: Binary tensor of shape `[batch, timesteps]` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
Examples:
```python
inputs = np.random.random([32, 10, 8]).astype(np.float32)
simple_rnn = tf.keras.layers.SimpleRNN(4)
output = simple_rnn(inputs) # The output has shape `[32, 4]`.
simple_rnn = tf.keras.layers.SimpleRNN(
4, return_sequences=True, return_state=True)
# whole_sequence_output has shape `[32, 10, 4]`.
# final_state has shape `[32, 4]`.
whole_sequence_output, final_state = simple_rnn(inputs)
```
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if 'implementation' in kwargs:
kwargs.pop('implementation')
logging.warning('The `implementation` argument '
'in `SimpleRNN` has been deprecated. '
'Please remove it from your layer call.')
if 'enable_caching_device' in kwargs:
cell_kwargs = {'enable_caching_device':
kwargs.pop('enable_caching_device')}
else:
cell_kwargs = {}
cell = SimpleRNNCell(
units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
dtype=kwargs.get('dtype'),
trainable=kwargs.get('trainable', True),
**cell_kwargs)
super(SimpleRNN, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self._maybe_reset_cell_dropout_mask(self.cell)
return super(SimpleRNN, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(SimpleRNN, self).get_config()
config.update(_config_for_enable_caching_device(self.cell))
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config:
config.pop('implementation')
return cls(**config)
@keras_export(v1=['keras.layers.GRUCell'])
class GRUCell(DropoutRNNCellMixin, Layer):
"""Cell class for the GRU layer.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
reset_after=False,
**kwargs):
# By default use cached variable under v2 mode, see b/143699808.
if ops.executing_eagerly_outside_functions():
self._enable_caching_device = kwargs.pop('enable_caching_device', True)
else:
self._enable_caching_device = kwargs.pop('enable_caching_device', False)
super(GRUCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
if self.recurrent_dropout != 0 and implementation != 1:
logging.debug(RECURRENT_DROPOUT_WARNING_MSG)
self.implementation = 1
else:
self.implementation = implementation
self.reset_after = reset_after
self.state_size = self.units
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
input_dim = input_shape[-1]
default_caching_device = _caching_device(self)
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
caching_device=default_caching_device)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
caching_device=default_caching_device)
if self.use_bias:
if not self.reset_after:
bias_shape = (3 * self.units,)
else:
# separate biases for input and recurrent kernels
# Note: the shape is intentionally different from CuDNNGRU biases
# `(2 * 3 * self.units,)`, so that we can distinguish the classes
# when loading and converting saved weights.
bias_shape = (2, 3 * self.units)
self.bias = self.add_weight(shape=bias_shape,
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
caching_device=default_caching_device)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] if nest.is_sequence(states) else states # previous memory
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=3)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=3)
if self.use_bias:
if not self.reset_after:
input_bias, recurrent_bias = self.bias, None
else:
input_bias, recurrent_bias = array_ops.unstack(self.bias)
if self.implementation == 1:
if 0. < self.dropout < 1.:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = K.dot(inputs_z, self.kernel[:, :self.units])
x_r = K.dot(inputs_r, self.kernel[:, self.units:self.units * 2])
x_h = K.dot(inputs_h, self.kernel[:, self.units * 2:])
if self.use_bias:
x_z = K.bias_add(x_z, input_bias[:self.units])
x_r = K.bias_add(x_r, input_bias[self.units: self.units * 2])
x_h = K.bias_add(x_h, input_bias[self.units * 2:])
if 0. < self.recurrent_dropout < 1.:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
recurrent_z = K.dot(h_tm1_z, self.recurrent_kernel[:, :self.units])
recurrent_r = K.dot(h_tm1_r,
self.recurrent_kernel[:, self.units:self.units * 2])
if self.reset_after and self.use_bias:
recurrent_z = K.bias_add(recurrent_z, recurrent_bias[:self.units])
recurrent_r = K.bias_add(recurrent_r,
recurrent_bias[self.units:self.units * 2])
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
# reset gate applied after/before matrix multiplication
if self.reset_after:
recurrent_h = K.dot(h_tm1_h, self.recurrent_kernel[:, self.units * 2:])
if self.use_bias:
recurrent_h = K.bias_add(recurrent_h, recurrent_bias[self.units * 2:])
recurrent_h = r * recurrent_h
else:
recurrent_h = K.dot(r * h_tm1_h,
self.recurrent_kernel[:, self.units * 2:])
hh = self.activation(x_h + recurrent_h)
else:
if 0. < self.dropout < 1.:
inputs = inputs * dp_mask[0]
# inputs projected by all gate matrices at once
matrix_x = K.dot(inputs, self.kernel)
if self.use_bias:
# biases: bias_z_i, bias_r_i, bias_h_i
matrix_x = K.bias_add(matrix_x, input_bias)
x_z, x_r, x_h = array_ops.split(matrix_x, 3, axis=-1)
if self.reset_after:
# hidden state projected by all gate matrices at once
matrix_inner = K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
matrix_inner = K.bias_add(matrix_inner, recurrent_bias)
else:
# hidden state projected separately for update/reset and new
matrix_inner = K.dot(h_tm1, self.recurrent_kernel[:, :2 * self.units])
recurrent_z, recurrent_r, recurrent_h = array_ops.split(
matrix_inner, [self.units, self.units, -1], axis=-1)
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
if self.reset_after:
recurrent_h = r * recurrent_h
else:
recurrent_h = K.dot(r * h_tm1,
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
return h, [h]
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation,
'reset_after': self.reset_after
}
config.update(_config_for_enable_caching_device(self))
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
@keras_export(v1=['keras.layers.GRU'])
class GRU(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
There are two variants. The default one is based on 1406.1078v3 and
has reset gate applied to hidden state before matrix multiplication. The
other one is based on original 1406.1078v1 and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. Use `'reset_after'=True` and
`recurrent_activation='sigmoid'`.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
reset_after=False,
**kwargs):
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if 'enable_caching_device' in kwargs:
cell_kwargs = {'enable_caching_device':
kwargs.pop('enable_caching_device')}
else:
cell_kwargs = {}
cell = GRUCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
reset_after=reset_after,
dtype=kwargs.get('dtype'),
trainable=kwargs.get('trainable', True),
**cell_kwargs)
super(GRU, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self._maybe_reset_cell_dropout_mask(self.cell)
return super(GRU, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
@property
def reset_after(self):
return self.cell.reset_after
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation,
'reset_after':
self.reset_after
}
config.update(_config_for_enable_caching_device(self.cell))
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
@keras_export(v1=['keras.layers.LSTMCell'])
class LSTMCell(DropoutRNNCellMixin, Layer):
"""Cell class for the LSTM layer.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
# By default use cached variable under v2 mode, see b/143699808.
if ops.executing_eagerly_outside_functions():
self._enable_caching_device = kwargs.pop('enable_caching_device', True)
else:
self._enable_caching_device = kwargs.pop('enable_caching_device', False)
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
if self.recurrent_dropout != 0 and implementation != 1:
logging.debug(RECURRENT_DROPOUT_WARNING_MSG)
self.implementation = 1
else:
self.implementation = implementation
# tuple(_ListWrapper) was silently dropping list content in at least 2.7.10,
# and fixed after 2.7.16. Converting the state_size to wrapper around
# NoDependency(), so that the base_layer.__setattr__ will not convert it to
# ListWrapper. Down the stream, self.states will be a list since it is
# generated from nest.map_structure with list, and tuple(list) will work
# properly.
self.state_size = data_structures.NoDependency([self.units, self.units])
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
default_caching_device = _caching_device(self)
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
caching_device=default_caching_device)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
caching_device=default_caching_device)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
caching_device=default_caching_device)
else:
self.bias = None
self.built = True
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
"""Computes carry and output using split kernels."""
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]))
f = self.recurrent_activation(x_f + K.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]))
c = f * c_tm1 + i * self.activation(x_c + K.dot(
h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))
o = self.recurrent_activation(
x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]))
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
"""Computes carry and output using fused kernels."""
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
return c, o
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=4)
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
k_i, k_f, k_c, k_o = array_ops.split(
self.kernel, num_or_size_splits=4, axis=1)
x_i = K.dot(inputs_i, k_i)
x_f = K.dot(inputs_f, k_f)
x_c = K.dot(inputs_c, k_c)
x_o = K.dot(inputs_o, k_o)
if self.use_bias:
b_i, b_f, b_c, b_o = array_ops.split(
self.bias, num_or_size_splits=4, axis=0)
x_i = K.bias_add(x_i, b_i)
x_f = K.bias_add(x_f, b_f)
x_c = K.bias_add(x_c, b_c)
x_o = K.bias_add(x_o, b_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
x = (x_i, x_f, x_c, x_o)
h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o)
c, o = self._compute_carry_and_output(x, h_tm1, c_tm1)
else:
if 0. < self.dropout < 1.:
inputs = inputs * dp_mask[0]
z = K.dot(inputs, self.kernel)
z += K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z = array_ops.split(z, num_or_size_splits=4, axis=1)
c, o = self._compute_carry_and_output_fused(z, c_tm1)
h = o * self.activation(c)
return h, [h, c]
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
config.update(_config_for_enable_caching_device(self))
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return list(_generate_zero_filled_state_for_cell(
self, inputs, batch_size, dtype))
@keras_export('keras.experimental.PeepholeLSTMCell')
class PeepholeLSTMCell(LSTMCell):
"""Equivalent to LSTMCell class but adds peephole connections.
Peephole connections allow the gates to utilize the previous internal state as
well as the previous hidden state (which is what LSTMCell is limited to).
This allows PeepholeLSTMCell to better learn precise timings over LSTMCell.
From [Gers et al.](http://www.jmlr.org/papers/volume3/gers02a/gers02a.pdf):
"We find that LSTM augmented by 'peephole connections' from its internal
cells to its multiplicative gates can learn the fine distinction between
sequences of spikes spaced either 50 or 49 time steps apart without the help
of any short training exemplars."
The peephole implementation is based on:
[Long short-term memory recurrent neural network architectures for
large scale acoustic modeling.
](https://research.google.com/pubs/archive/43905.pdf)
Example:
```python
# Create 2 PeepholeLSTMCells
peephole_lstm_cells = [PeepholeLSTMCell(size) for size in [128, 256]]
# Create a layer composed sequentially of the peephole LSTM cells.
layer = RNN(peephole_lstm_cells)
input = keras.Input((timesteps, input_dim))
output = layer(input)
```
"""
def build(self, input_shape):
super(PeepholeLSTMCell, self).build(input_shape)
# The following are the weight matrices for the peephole connections. These
# are multiplied with the previous internal state during the computation of
# carry and output.
self.input_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='input_gate_peephole_weights',
initializer=self.kernel_initializer)
self.forget_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='forget_gate_peephole_weights',
initializer=self.kernel_initializer)
self.output_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='output_gate_peephole_weights',
initializer=self.kernel_initializer)
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]) +
self.input_gate_peephole_weights * c_tm1)
f = self.recurrent_activation(x_f + K.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]) +
self.forget_gate_peephole_weights * c_tm1)
c = f * c_tm1 + i * self.activation(x_c + K.dot(
h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))
o = self.recurrent_activation(
x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]) +
self.output_gate_peephole_weights * c)
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0 +
self.input_gate_peephole_weights * c_tm1)
f = self.recurrent_activation(z1 +
self.forget_gate_peephole_weights * c_tm1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3 + self.output_gate_peephole_weights * c)
return c, o
@keras_export(v1=['keras.layers.LSTM'])
class LSTM(RNN):
"""Long Short-Term Memory layer - Hochreiter 1997.
Note that this cell is not optimized for performance on GPU. Please use
`tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if 'enable_caching_device' in kwargs:
cell_kwargs = {'enable_caching_device':
kwargs.pop('enable_caching_device')}
else:
cell_kwargs = {}
cell = LSTMCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
dtype=kwargs.get('dtype'),
trainable=kwargs.get('trainable', True),
**cell_kwargs)
super(LSTM, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self._maybe_reset_cell_dropout_mask(self.cell)
return super(LSTM, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
config.update(_config_for_enable_caching_device(self.cell))
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return K.dropout(ones, rate)
if count > 1:
return [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(count)
]
return K.in_train_phase(dropped_inputs, ones, training=training)
def _standardize_args(inputs, initial_state, constants, num_constants):
"""Standardizes `__call__` to a single list of tensor inputs.
When running a model loaded from a file, the input tensors
`initial_state` and `constants` can be passed to `RNN.__call__()` as part
of `inputs` instead of by the dedicated keyword arguments. This method
makes sure the arguments are separated and that `initial_state` and
`constants` are lists of tensors (or None).
Arguments:
inputs: Tensor or list/tuple of tensors. which may include constants
and initial states. In that case `num_constant` must be specified.
initial_state: Tensor or list of tensors or None, initial states.
constants: Tensor or list of tensors or None, constant tensors.
num_constants: Expected number of constants (if constants are passed as
part of the `inputs` list.
Returns:
inputs: Single tensor or tuple of tensors.
initial_state: List of tensors or None.
constants: List of tensors or None.
"""
if isinstance(inputs, list):
# There are several situations here:
# In the graph mode, __call__ will be only called once. The initial_state
# and constants could be in inputs (from file loading).
# In the eager mode, __call__ will be called twice, once during
# rnn_layer(inputs=input_t, constants=c_t, ...), and second time will be
# model.fit/train_on_batch/predict with real np data. In the second case,
# the inputs will contain initial_state and constants as eager tensor.
#
# For either case, the real input is the first item in the list, which
# could be a nested structure itself. Then followed by initial_states, which
# could be a list of items, or list of list if the initial_state is complex
# structure, and finally followed by constants which is a flat list.
assert initial_state is None and constants is None
if num_constants:
constants = inputs[-num_constants:]
inputs = inputs[:-num_constants]
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[:1]
if len(inputs) > 1:
inputs = tuple(inputs)
else:
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def _is_multiple_state(state_size):
"""Check whether the state_size contains multiple states."""
return (hasattr(state_size, '__len__') and
not isinstance(state_size, tensor_shape.TensorShape))
def _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype):
if inputs is not None:
batch_size = array_ops.shape(inputs)[0]
dtype = inputs.dtype
return _generate_zero_filled_state(batch_size, cell.state_size, dtype)
def _generate_zero_filled_state(batch_size_tensor, state_size, dtype):
"""Generate a zero filled tensor with shape [batch_size, state_size]."""
if batch_size_tensor is None or dtype is None:
raise ValueError(
'batch_size and dtype cannot be None while constructing initial state: '
'batch_size={}, dtype={}'.format(batch_size_tensor, dtype))
def create_zeros(unnested_state_size):
flat_dims = tensor_shape.as_shape(unnested_state_size).as_list()
init_state_size = [batch_size_tensor] + flat_dims
return array_ops.zeros(init_state_size, dtype=dtype)
if nest.is_sequence(state_size):
return nest.map_structure(create_zeros, state_size)
else:
return create_zeros(state_size)
def _caching_device(rnn_cell):
"""Returns the caching device for the RNN variable.
This is useful for distributed training, when variable is not located as same
device as the training worker. By enabling the device cache, this allows
worker to read the variable once and cache locally, rather than read it every
time step from remote when it is needed.
Note that this is assuming the variable that cell needs for each time step is
having the same value in the forward path, and only gets updated in the
backprop. It is true for all the default cells (SimpleRNN, GRU, LSTM). If the
cell body relies on any variable that gets updated every time step, then
caching device will cause it to read the stall value.
Args:
rnn_cell: the rnn cell instance.
"""
if context.executing_eagerly():
# caching_device is not supported in eager mode.
return None
if not getattr(rnn_cell, '_enable_caching_device', False):
return None
# Don't set a caching device when running in a loop, since it is possible that
# train steps could be wrapped in a tf.while_loop. In that scenario caching
# prevents forward computations in loop iterations from re-reading the
# updated weights.
if control_flow_util.IsInWhileLoop(ops.get_default_graph()):
logging.warn('Variable read device caching has been disabled because the '
'RNN is in tf.while_loop loop context, which will cause '
'reading stalled value in forward path. This could slow down '
'the training due to duplicated variable reads. Please '
'consider updating your code to remove tf.while_loop if '
'possible.')
return None
if rnn_cell._dtype_policy.should_cast_variables:
logging.warn('Variable read device caching has been disabled since it '
'doesn\'t work with the mixed precision API. This is '
'likely to cause a slowdown for RNN training due to '
'duplicated read of variable for each timestep, which '
'will be significant in a multi remote worker setting. '
'Please consider disabling mixed precision API if '
'the performance has been affected.')
return None
# Cache the value on the device that access the variable.
return lambda op: op.device
def _config_for_enable_caching_device(rnn_cell):
"""Return the dict config for RNN cell wrt to enable_caching_device field.
Since enable_caching_device is a internal implementation detail for speed up
the RNN variable read when running on the multi remote worker setting, we
don't want this config to be serialized constantly in the JSON. We will only
serialize this field when a none default value is used to create the cell.
Args:
rnn_cell: the RNN cell for serialize.
Returns:
A dict which contains the JSON config for enable_caching_device value or
empty dict if the enable_caching_device value is same as the default value.
"""
default_enable_caching_device = ops.executing_eagerly_outside_functions()
if rnn_cell._enable_caching_device != default_enable_caching_device:
return {'enable_caching_device': rnn_cell._enable_caching_device}
return {}
| 40.310596 | 118 | 0.678408 |
b571d767f002a222289e9e96594642c37a8705d6 | 6,045 | py | Python | setup.py | ismlkrkmz/Dragonfire | 7a5e22bd07ba9734d68fe76ce77d80164d47249e | [
"MIT"
] | 1,320 | 2017-06-20T21:47:35.000Z | 2022-03-29T08:53:31.000Z | setup.py | ismlkrkmz/Dragonfire | 7a5e22bd07ba9734d68fe76ce77d80164d47249e | [
"MIT"
] | 120 | 2017-06-21T13:16:40.000Z | 2022-03-24T18:12:21.000Z | setup.py | ismlkrkmz/Dragonfire | 7a5e22bd07ba9734d68fe76ce77d80164d47249e | [
"MIT"
] | 229 | 2017-06-21T05:38:43.000Z | 2022-03-14T14:03:10.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages, Extension
# To use a consistent encoding
from codecs import open
from os import path
from subprocess import PIPE, Popen
__location__ = path.abspath(path.dirname(__file__))
def pkgconfig(*packages):
"""Method to prepare the configuration for compiling the `realhud` Python C extension
of Dragonfire by querying installed libraries.
Kwargs:
packages: C libraries
"""
flags = {
'-D': 'define_macros',
'-I': 'include_dirs',
'-L': 'library_dirs',
'-l': 'libraries'
}
cmd = ['pkg-config', '--cflags', '--libs'] + list(packages)
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
output, error = proc.stdout.read(), proc.stderr.read()
if error:
raise ValueError(error)
config = {}
for token in output.split():
token = token.decode('ascii')
if token != '-pthread':
flag, value = token[:2], token[2:]
config.setdefault(flags[flag], []).append(value)
if 'define_macros' in config:
macros = [(name, None) for name in config['define_macros']]
config['define_macros'] = macros
return config
def read_requirements():
"""parses requirements from requirements.txt"""
reqs_path = path.join(__location__, 'requirements.txt')
with open(reqs_path, encoding='utf8') as f:
reqs = [line.strip() for line in f if not line.strip().startswith('#')]
names = []
links = []
for req in reqs:
if '://' in req:
links.append(req)
else:
names.append(req)
return {'install_requires': names, 'dependency_links': links}
# Get the long description from the README file
with open(path.join(__location__, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='dragonfire',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.1.1',
description='Dragonfire is an open source virtual assistant project for Ubuntu based Linux distributions',
long_description=long_description,
long_description_content_type='text/markdown',
# The project's main homepage.
url='https://github.com/mertyildiran/Dragonfire',
# Author details
author='Mehmet Mert Yıldıran',
author_email='mert.yildiran@bil.omu.edu.tr',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Intended language
'Natural Language :: English',
# Target Operating System
'Operating System :: POSIX :: Linux',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3 :: Only',
],
# What does your project relate to?
keywords='virtual assistant machine learining artifical intelligence chat bot',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
**read_requirements(),
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'optionals': [
'pyqtgraph',
'PeakUtils',
'flake8',
'sphinx',
'sphinx_rtd_theme',
'recommonmark',
'm2r',
'pytest',
'pytest-cov',
'codecov'
]
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# If any package contains data files, include them:
'dragonfire': ['realhud/animation/*', 'sr/models/english/*']
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'dragonfire=dragonfire:initiate',
],
},
ext_modules=[
Extension('realhud', ['dragonfire/realhud/realhud.c'],
**pkgconfig('gtk+-2.0 x11 xext'))
]
)
| 32.326203 | 110 | 0.642514 |
dbbb8eb0219260520c944c613b41bffbf8fad6f8 | 2,454 | py | Python | app/scheduler/default_settings.py | ZoomerAnalytics/chronos | b4418b8be0c2d685533e5699c8d2d49344742365 | [
"BSD-2-Clause"
] | 2 | 2017-02-20T10:28:09.000Z | 2017-09-22T16:45:26.000Z | app/scheduler/default_settings.py | ZoomerAnalytics/chronos | b4418b8be0c2d685533e5699c8d2d49344742365 | [
"BSD-2-Clause"
] | null | null | null | app/scheduler/default_settings.py | ZoomerAnalytics/chronos | b4418b8be0c2d685533e5699c8d2d49344742365 | [
"BSD-2-Clause"
] | 3 | 2017-02-09T19:32:31.000Z | 2017-05-04T05:43:13.000Z | """Default settings."""
import logging
import os
#
# Development mode or production mode
# If DEBUG is True, then auto-reload is enabled, i.e., when code is modified, server will be
# reloaded immediately
#
DEBUG = True
#
# Static Assets
#
# The web UI is a single page app. All javascripts/css files should be in STATIC_DIR_PATH
#
STATIC_DIR_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static')
TEMPLATE_DIR_PATH = STATIC_DIR_PATH
APP_INDEX_PAGE = 'index.html'
URL_PREFIX = os.environ.get('URL_PREFIX', '')
#
# Server setup
#
HTTP_PORT = 8888
HTTP_ADDRESS = '0.0.0.0'
TORNADO_MAX_WORKERS = 8
#
# ApScheduler settings
#
THREAD_POOL_SIZE = 4
JOB_MAX_INSTANCES = 3
JOB_COALESCE = True
TIMEZONE = 'UTC'
# When a job is misfired -- A job were to run at a specific time, but due to some
# reason (e.g., scheduler restart), we miss that run.
#
# By default, if a job is misfired within 1 hour, the scheduler will rerun it.
# Otherwise, if it's misfired over 1 hour, the scheduler will not rerun it.
JOB_MISFIRE_GRACE_SEC = 3600
#
# Database settings
#
JOBS_TABLENAME = 'scheduler_jobs'
EXECUTIONS_TABLENAME = 'scheduler_execution'
AUDIT_LOGS_TABLENAME = 'scheduler_jobauditlog'
# See different database providers in ndscheduler/core/datastore/providers/
# SQLite
#
# DATABASE_CLASS = 'scheduler.core.datastore.providers.sqlite.DatastoreSqlite'
# DATABASE_CONFIG_DICT = {
# 'file_path': 'datastore.db'
# }
# Postgres
#
DATABASE_CLASS = 'scheduler.core.datastore.providers.postgresql.DatastorePostgresql'
DATABASE_CONFIG_DICT = {
'user': os.environ['POSTGRES_USER'],
'password': os.environ['POSTGRES_PASSWORD'],
'hostname': os.environ['POSTGRES_HOST'],
'port': int(os.environ['POSTGRES_PORT']),
'database': os.environ['POSTGRES_DB'],
'sslmode': 'disable'
}
# MySQL
#
# DATABASE_CLASS = 'ndscheduler.core.datastore.providers.mysql.DatastoreMysql'
# DATABASE_CONFIG_DICT = {
# 'user': 'username',
# 'password': '',
# 'hostname': 'localhost',
# 'port': 3306,
# 'database': 'scheduler'
# }
# ndschedule is based on apscheduler. Here we can customize the apscheduler's main scheduler class
# Please see ndscheduler/core/scheduler/base.py
SCHEDULER_CLASS = 'scheduler.core.scheduler.base.SingletonScheduler'
#
# Set logging level
#
logging.getLogger().setLevel(logging.INFO)
# Packages that contains job classes, e.g., simple_scheduler.jobs
JOB_CLASS_PACKAGES = ['scheduler.jobs']
| 25.040816 | 98 | 0.735126 |
4d55d0d30d5562fdaca9f9fea6a2697d32419a14 | 1,730 | py | Python | ThreadedPS.py | Rishit-dagli/Network-scanner-Python | 951e8caa0344a388a517250b3e2aac071eea03c3 | [
"Apache-2.0"
] | 1 | 2020-07-24T03:50:18.000Z | 2020-07-24T03:50:18.000Z | ThreadedPS.py | Rishit-dagli/Network-scanner-Python | 951e8caa0344a388a517250b3e2aac071eea03c3 | [
"Apache-2.0"
] | null | null | null | ThreadedPS.py | Rishit-dagli/Network-scanner-Python | 951e8caa0344a388a517250b3e2aac071eea03c3 | [
"Apache-2.0"
] | null | null | null | '''
Threaded Port Scanner 1.0.0:
A python code to demonstrate demonstrates a Threaded Port scanner built using Python 3.x
We here use threading to speed up the process
Note: Port scanning is dangerous, so you are advised to not to use
this script without permission
'''
__author__ = "Rishit Dagli"
__copyright__ = ""
__credits__ = ["Rishit Dagli"]
__license__ = "Apache License 2.0"
__version__ = "1.0.0"
__maintainer__ = "Rishit Dagli"
__email__ = "rishit.dagli@gmail.com"
__status__ = "Development"
import socket
import time
import threading
from queue import Queue
# set Timeout time
socket.setdefaulttimeout(0.25)
print_lock = threading.Lock()
target = input('Enter the host to be scanned: ')
t_IP = socket.gethostbyname(target)
print ('Starting scan on host: ', t_IP)
def portscan(port):
'''
@author = "Rishit Dagli"
scan for ports
'''
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
con = s.connect((t_IP, port))
with print_lock:
print(port, 'is open')
con.close()
except:
pass
def threader():
'''
@author = "Rishit Dagli"
Do the portscan in a threads
'''
while True:
worker = q.get()
portscan(worker)
q.task_done()
q = Queue()
startTime = time.time()
for x in range(100):
t = threading.Thread(target = threader)
t.daemon = True
t.start()
for worker in range(1, 500):
q.put(worker)
# Join the results from threads
q.join()
# Print time taken
print('Time taken:', time.time() - startTime)
# print("functions- portscan, threader")
# print(Docs:)
# print(portscan.__doc__)
# print(threader.__doc__)
| 21.358025 | 89 | 0.641618 |
20eee3b38aea940ae6a0daefa4cfec0929fc21ea | 4,209 | py | Python | torchvision/datasets/omniglot.py | SliMM/vision | 101d19b9dec9b4a82ef6c3e2e1d7903e46369fd5 | [
"BSD-3-Clause"
] | 1 | 2020-11-17T07:13:18.000Z | 2020-11-17T07:13:18.000Z | torchvision/datasets/omniglot.py | SliMM/vision | 101d19b9dec9b4a82ef6c3e2e1d7903e46369fd5 | [
"BSD-3-Clause"
] | null | null | null | torchvision/datasets/omniglot.py | SliMM/vision | 101d19b9dec9b4a82ef6c3e2e1d7903e46369fd5 | [
"BSD-3-Clause"
] | 3 | 2020-12-17T22:32:06.000Z | 2022-03-23T01:43:42.000Z | from PIL import Image
from os.path import join
from typing import Any, Callable, List, Optional, Tuple
from .vision import VisionDataset
from .utils import download_and_extract_archive, check_integrity, list_dir, list_files
class Omniglot(VisionDataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
"""
folder = 'omniglot-py'
download_url_prefix = 'https://github.com/brendenlake/omniglot/raw/master/python'
zips_md5 = {
'images_background': '68d2efa1b9178cc56df9314c21c6e718',
'images_evaluation': '6b91aef0f799c5bb55b94e3f2daec811'
}
def __init__(
self,
root: str,
background: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super(Omniglot, self).__init__(join(root, self.folder), transform=transform,
target_transform=target_transform)
self.background = background
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters: List[str] = sum([[join(a, c) for c in list_dir(join(self.target_folder, a))]
for a in self._alphabets], [])
self._character_images = [[(image, idx) for image in list_files(join(self.target_folder, character), '.png')]
for idx, character in enumerate(self._characters)]
self._flat_character_images: List[Tuple[str, int]] = sum(self._character_images, [])
def __len__(self) -> int:
return len(self._flat_character_images)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
image_name, character_class = self._flat_character_images[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
image = Image.open(image_path, mode='r').convert('L')
if self.transform:
image = self.transform(image)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, character_class
def _check_integrity(self) -> bool:
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + '.zip'), self.zips_md5[zip_filename]):
return False
return True
def download(self) -> None:
if self._check_integrity():
print('Files already downloaded and verified')
return
filename = self._get_target_folder()
zip_filename = filename + '.zip'
url = self.download_url_prefix + '/' + zip_filename
download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename])
def _get_target_folder(self) -> str:
return 'images_background' if self.background else 'images_evaluation'
| 42.94898 | 117 | 0.642195 |
38de62c8b825fa6b4c7b62c0820951439e41b21e | 286 | py | Python | tests/test_bulk_query.py | Jayzhanscar/SQLBatis | 28561b52f97d30f22b6500fc1be37a1d7cbea2ba | [
"MIT"
] | null | null | null | tests/test_bulk_query.py | Jayzhanscar/SQLBatis | 28561b52f97d30f22b6500fc1be37a1d7cbea2ba | [
"MIT"
] | null | null | null | tests/test_bulk_query.py | Jayzhanscar/SQLBatis | 28561b52f97d30f22b6500fc1be37a1d7cbea2ba | [
"MIT"
] | null | null | null | from tests.basic_test import BasicTestCase, db
from tests.crud import *
class BulkQueryTestCase(BasicTestCase):
def test_1_bulk_create(self):
bulk_create(users)
results = select()
assert len(results) == 2
if __name__ == '__main__':
unittest.main()
| 17.875 | 46 | 0.681818 |
acdcb9e288fecedce2cb2592f6fb676432e05afb | 348 | py | Python | Abbaize.py | fatih-iver/Intro-to-Computer-Science-with-Python | 7b8127681415dfd100a0e70fe8a672cec696bbb7 | [
"MIT"
] | null | null | null | Abbaize.py | fatih-iver/Intro-to-Computer-Science-with-Python | 7b8127681415dfd100a0e70fe8a672cec696bbb7 | [
"MIT"
] | null | null | null | Abbaize.py | fatih-iver/Intro-to-Computer-Science-with-Python | 7b8127681415dfd100a0e70fe8a672cec696bbb7 | [
"MIT"
] | null | null | null | # Define a procedure, abbaize, that takes
# two strings as its inputs, and returns
# a string that is the first input,
# followed by two repetitions of the second input,
# followed by the first input.
def abbaize(a, b):
return a + b + b + a
#print abbaize('a','b')
#>>> 'abba'
#print abbaize('dog','cat')
#>>> 'dogcatcatdog' | 21.75 | 51 | 0.637931 |
07a3b58f1d6e8b381ccae2bd4df7a0c2cd5fee00 | 2,769 | py | Python | atgql/shims.py | ATyped/atgql | 3fa9e09c9cea346dc42c205452487420ceb493e2 | [
"MIT"
] | null | null | null | atgql/shims.py | ATyped/atgql | 3fa9e09c9cea346dc42c205452487420ceb493e2 | [
"MIT"
] | null | null | null | atgql/shims.py | ATyped/atgql | 3fa9e09c9cea346dc42c205452487420ceb493e2 | [
"MIT"
] | null | null | null | __all__ = ['Promise', 'typeof']
import types
from collections.abc import Awaitable, Callable
from inspect import getmembers, isclass
from typing import Any, Literal, TypeVar
T = TypeVar('T')
Promise = Awaitable[T]
boolean_types = {bool}
number_types = {int, float, complex}
string_types = {str}
callable_types = set()
known_object_types = set([type(None)])
symbol_types = set()
for _name, _attr in getmembers(types, isclass):
if _name.startswith('_'):
continue
if issubclass(_attr, Callable): # type: ignore[arg-type]
callable_types.add(_attr)
elif _attr in (
types.CellType, # type: ignore[attr-defined]
types.ModuleType,
types.MappingProxyType,
types.SimpleNamespace,
):
known_object_types.add(_attr)
else:
symbol_types.add(_attr)
def typeof(value: Any) -> Literal['object', 'boolean', 'number', 'string', 'function', 'symbol']:
"""The simulator of `typeof` in JavaScript.
JavaScript-side:
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/typeof#description
Python-side:
https://docs.python.org/3/library/types.html#standard-interpreter-types
Precondition:
| Types / Values | Result |
|-------------------------------------------------------------------|------------|
| None | 'object' |
| only `bool`, no subclasses | 'boolean' |
| only `int` / `float` / `complex`, no subclasses | 'number' |
| only `str`, no subclasses | 'string' |
| the types which is subclass of `Callable` in module `types` | 'function' |
| `CellType` / `ModuleType` / `MappingProxyType`/ `SimpleNamespace` | 'object' |
| the other types in module `types` | 'symbol' |
| any others | 'object' |
Notes:
The reason why `CellType`, `ModuleType`, `MappingProxyType` and `SimpleNamespace`
are considered to be 'object', is that users can manually control its properties,
and `MappingProxyType`, which is seen as `dict`, is like the literal style in
JavaScript that defines object.
"""
t = type(value)
if t in boolean_types:
return 'boolean'
elif t in number_types:
return 'number'
elif t in string_types:
return 'string'
elif t in callable_types:
return 'function'
elif t in symbol_types:
return 'symbol'
elif t in known_object_types:
return 'object'
else:
return 'object'
| 32.964286 | 98 | 0.558685 |
ba153204cb6ef2a8ec467e147e20fe902be103e8 | 4,769 | py | Python | xmpush/base/APIMessage.py | ULHI-xin/xmpush-python | b88c75d7c5e2f10262a997f6d65ae1defe0af1b0 | [
"Apache-2.0"
] | 1 | 2020-03-10T00:54:26.000Z | 2020-03-10T00:54:26.000Z | xmpush/base/APIMessage.py | ULHI-xin/xmpush-python | b88c75d7c5e2f10262a997f6d65ae1defe0af1b0 | [
"Apache-2.0"
] | null | null | null | xmpush/base/APIMessage.py | ULHI-xin/xmpush-python | b88c75d7c5e2f10262a997f6d65ae1defe0af1b0 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
from xmpush.base.APIConstants import Constants
class MessageDict(dict):
def __getattr__(self, item):
try:
return self[item]
except KeyError:
raise AttributeError(r"'message' object has no attribute %s'" % item)
def __setattr__(self, key, value):
self[key] = value
class PushTargetMessage(object):
def __init__(self, push_message, target_type, target):
self.push_message = push_message
self.target_type = target_type
self.target = target
class PushMessage(object):
def __init__(self):
self.__message_dict = MessageDict()
def collapse_key(self, collapse_key):
self.__message_dict[Constants.http_param_collapse_key] = collapse_key
return self
def payload(self, payload):
self.__message_dict[Constants.http_param_payload] = payload
return self
def title(self, title):
self.__message_dict[Constants.http_param_title] = title
return self
def description(self, description):
self.__message_dict[Constants.http_param_description] = description
return self
def notify_type(self, notify_type):
self.__message_dict[Constants.http_param_notify_type] = notify_type
return self
def time_to_live(self, time_to_live):
self.__message_dict[Constants.http_param_time_to_live] = time_to_live
return self
def restricted_package_name(self, package_name):
self.__message_dict[Constants.http_param_restricted_package_name] = [package_name]
return self
def restricted_package_names(self, package_names):
self.__message_dict[Constants.http_param_restricted_package_name] = package_names
return self
def pass_through(self, pass_through=0):
self.__message_dict[Constants.http_param_pass_through] = pass_through
return self
def notify_id(self, notify_id=0):
self.__message_dict[Constants.http_param_notify_id] = notify_id
return self
def extra(self, extra):
for k, v in extra.items():
self.__message_dict['%s%s' % (Constants.http_param_extra_prefix, k)] = v
return self
def extra_element(self, key, value):
self.__message_dict['%s%s' % (Constants.http_param_extra_prefix, key)] = value
return self
'''
aps特殊字段适配
'''
def aps_element(self, key, value):
self.__message_dict['%s%s' % (Constants.http_param_aps_prefix, key)] = value
return self
def aps_title(self, value):
self.aps_element(Constants.http_param_aps_title, value)
return self
def aps_subtitle(self, value):
self.aps_element(Constants.http_param_aps_subtitle, value)
return self
def aps_body(self, value):
self.aps_element(Constants.http_param_aps_body, value)
return self
def aps_mutable_content(self, value):
self.aps_element(Constants.http_param_aps_mutable_content, value)
return self
'''
平滑推送, 目前仅对android消息有效
'''
def enable_flow_control(self):
self.extra_element(Constants.extra_param_flow_control, '1')
return self
'''
定时发送消息, timeToSend是用自1970年1月1日以来00:00:00.0UTC时间表示(以毫秒为单位的时间)
注:仅支持七天内的定时消息
'''
def time_to_send(self, time_to_send):
self.__message_dict[Constants.http_param_time_to_send] = time_to_send
return self
'''
ios自定义通知数字角标
'''
def badge(self, badge):
self.extra_element(Constants.extra_param_badge, badge)
return self
'''
ios8推送消息快速回复类别
'''
def category(self, category):
self.extra_element(Constants.extra_param_category, category)
return self
'''
ios设置通知铃声
'''
def sound_url(self, sound_url):
self.extra_element(Constants.extra_param_sound_url, sound_url)
return self
'''
ios设置苹果apns通道
'''
def apns_only(self):
self.extra_element(Constants.extra_param_ios_msg_channel, Constants.extra_param_ios_msg_channel_apns_only)
return self
'''
ios设置长连接通道
'''
def connection_only(self):
self.extra_element(Constants.extra_param_ios_msg_channel, Constants.extra_param_ios_msg_channel_connection_only)
return self
'''
android message params build method
need verify package_name must be not null
'''
def message_dict(self):
try:
self.__message_dict[Constants.http_param_restricted_package_name]
except AttributeError as ex:
raise ex
return self.__message_dict
'''
ios message params build method
'''
def message_dict_ios(self):
return self.__message_dict
| 28.728916 | 120 | 0.67121 |
688b47a858312c158dc6371fde27d9c793b7ff2e | 3,961 | py | Python | model/component.py | RxstydnR/LEA-Net | e163c614a1370b9ee3aba177ccc06b22837091b2 | [
"MIT"
] | null | null | null | model/component.py | RxstydnR/LEA-Net | e163c614a1370b9ee3aba177ccc06b22837091b2 | [
"MIT"
] | null | null | null | model/component.py | RxstydnR/LEA-Net | e163c614a1370b9ee3aba177ccc06b22837091b2 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras.layers import Conv2D,Flatten,Dense,Lambda,BatchNormalization, Activation, GlobalAveragePooling2D
from tensorflow.keras.layers import Add, Multiply, Concatenate
from tensorflow.keras import backend as K
def conv_block(n_filter, x):
x = Conv2D(n_filter, kernel_size=(3,3), padding='same', strides=2, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def input_attention(x,A,method):
if method=="none":
pass
elif method=="add":
x = Add()([x,A])
elif method=="multiply":
x = Multiply()([x,A])
elif method=="attention":
Ax = Multiply()([x,A])
x = Add()([x,Ax])
elif method=="4ch":
x = Concatenate(axis=-1)([x,A])
else:
raise ValueError(f"Value Error!!: {method} is invalid method.")
return x
def attention_distillation(A,distil_method):
if distil_method == "none":
pass
elif distil_method == "max":
A = Lambda(lambda x: K.max(x, axis=-1,keepdims=True))(A)
elif distil_method == "avg":
A = Lambda(lambda x: K.mean(x, axis=-1,keepdims=True))(A)
elif distil_method == "conv":
A = Conv2D(1, kernel_size=(1,1), padding='same', strides=1, kernel_initializer='he_normal')(A)
else:
raise ValueError(f"Value Error!!: {distil_method} is invalid method.")
return A
def attention_sigmoid(A, sigmoid_apply):
if sigmoid_apply==True:
A = Activation('sigmoid')(A)
return A
def fusion_module(x,A,fusion_method,SE=False):
if fusion_method=="none":
pass
elif fusion_method=="concat":
x = Concatenate(axis=-1)([x,A])
if SE:
print("SE is applied")
n_channel = x.shape[-1]
x = se_block(input=x, channels=n_channel, r=8)
elif fusion_method=="add":
x = Add()([x,A])
elif fusion_method=="multiply":
x = Multiply()([x,A])
elif fusion_method=="attention":
Ax = Multiply()([x,A])
x = Add()([x,Ax])
else:
raise ValueError(f"Value Error!!: {fusion_method} is invalid method.")
return x
def final_flat(x,flat_method):
if flat_method == "flat":
x = Flatten()(x)
elif flat_method == "gap":
x = GlobalAveragePooling2D()(x)
else:
raise ValueError(f"Value Error!!: {flat_method} is invalid method.")
return x
def output_block(x, A, output_method, flat_method, num_class):
if A != None:
if output_method=="separate":
x = final_flat(x,flat_method)
A = final_flat(A,flat_method)
prob_x = Dense(num_class, activation='sigmoid')(x)
prob_A = Dense(num_class, activation='sigmoid')(A)
outputs = [prob_x, prob_A]
elif output_method=="oneway":
x = final_flat(x,flat_method)
prob_x = Dense(num_class, activation='sigmoid')(x)
outputs = [prob_x]
elif output_method=="merge":
output_filters = int(x.shape[-1])
x = Concatenate(axis=-1)([x,A])
x = Conv2D(output_filters, kernel_size=(3,3), padding='same', strides=1, kernel_initializer='he_normal')(x)
x = final_flat(x,flat_method)
prob_x = Dense(num_class, activation='sigmoid')(x)
outputs = [prob_x]
else:
raise ValueError(f"Value Error!!: {output_method} is invalid method.")
else:
x = final_flat(x,flat_method)
prob_x = Dense(num_class, activation='softmax')(x)
outputs = [prob_x]
return outputs
def se_block(input, channels, r=8):
""" Squeeze and Excitation """
# Squeeze
x = GlobalAveragePooling2D()(input)
# Excitation
x = Dense(channels//r, activation="relu")(x)
x = Dense(channels, activation="sigmoid")(x)
return Multiply()([input, x]) | 29.781955 | 119 | 0.591265 |
58acca784e9e8edd8436d767c33570a286b0db0a | 5,579 | py | Python | kubernetes/client/models/v2beta2_metric_value_status.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | 2 | 2020-06-21T08:03:18.000Z | 2020-06-21T09:53:29.000Z | kubernetes/client/models/v2beta2_metric_value_status.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v2beta2_metric_value_status.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | 1 | 2020-12-10T07:28:08.000Z | 2020-12-10T07:28:08.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V2beta2MetricValueStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'average_utilization': 'int',
'average_value': 'str',
'value': 'str'
}
attribute_map = {
'average_utilization': 'averageUtilization',
'average_value': 'averageValue',
'value': 'value'
}
def __init__(self, average_utilization=None, average_value=None, value=None): # noqa: E501
"""V2beta2MetricValueStatus - a model defined in OpenAPI""" # noqa: E501
self._average_utilization = None
self._average_value = None
self._value = None
self.discriminator = None
if average_utilization is not None:
self.average_utilization = average_utilization
if average_value is not None:
self.average_value = average_value
if value is not None:
self.value = value
@property
def average_utilization(self):
"""Gets the average_utilization of this V2beta2MetricValueStatus. # noqa: E501
currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. # noqa: E501
:return: The average_utilization of this V2beta2MetricValueStatus. # noqa: E501
:rtype: int
"""
return self._average_utilization
@average_utilization.setter
def average_utilization(self, average_utilization):
"""Sets the average_utilization of this V2beta2MetricValueStatus.
currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. # noqa: E501
:param average_utilization: The average_utilization of this V2beta2MetricValueStatus. # noqa: E501
:type: int
"""
self._average_utilization = average_utilization
@property
def average_value(self):
"""Gets the average_value of this V2beta2MetricValueStatus. # noqa: E501
averageValue is the current value of the average of the metric across all relevant pods (as a quantity) # noqa: E501
:return: The average_value of this V2beta2MetricValueStatus. # noqa: E501
:rtype: str
"""
return self._average_value
@average_value.setter
def average_value(self, average_value):
"""Sets the average_value of this V2beta2MetricValueStatus.
averageValue is the current value of the average of the metric across all relevant pods (as a quantity) # noqa: E501
:param average_value: The average_value of this V2beta2MetricValueStatus. # noqa: E501
:type: str
"""
self._average_value = average_value
@property
def value(self):
"""Gets the value of this V2beta2MetricValueStatus. # noqa: E501
value is the current value of the metric (as a quantity). # noqa: E501
:return: The value of this V2beta2MetricValueStatus. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this V2beta2MetricValueStatus.
value is the current value of the metric (as a quantity). # noqa: E501
:param value: The value of this V2beta2MetricValueStatus. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2beta2MetricValueStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.625731 | 213 | 0.625381 |
a57fdffa357424ff99aae449d5c51f0efb6304d2 | 3,532 | py | Python | Tutorials/TensorFlow_V1/examples/3_NeuralNetworks/recurrent_network.py | lev1khachatryan/ASDS_CV | c9f0c0412002e929bcb7cc2fc6e5392977a9fa76 | [
"MIT"
] | 5 | 2019-12-13T16:26:10.000Z | 2020-01-10T07:44:05.000Z | Tutorials/TensorFlow_V1/examples/3_NeuralNetworks/recurrent_network.py | lev1khachatryan/ASDS_CV | c9f0c0412002e929bcb7cc2fc6e5392977a9fa76 | [
"MIT"
] | 1 | 2020-01-07T16:48:21.000Z | 2020-03-18T18:43:37.000Z | Tutorials/TensorFlow_V1/examples/3_NeuralNetworks/recurrent_network.py | lev1khachatryan/ASDS_CV | c9f0c0412002e929bcb7cc2fc6e5392977a9fa76 | [
"MIT"
] | null | null | null | from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import rnn
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
'''
To classify images using a recurrent neural network, we consider every image
row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then
handle 28 sequences of 28 steps for every sample.
'''
# Training Parameters
learning_rate = 0.001
training_steps = 10000
batch_size = 128
display_step = 200
# Network Parameters
num_input = 28 # MNIST data input (img shape: 28*28)
timesteps = 28 # timesteps
num_hidden = 128 # hidden layer num of features
num_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
def RNN(x, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, timesteps, n_input)
# Required shape: 'timesteps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
x = tf.unstack(x, timesteps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
logits = RNN(X, weights, biases)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
for step in range(1, training_steps+1):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
print("Optimization Finished!")
# Calculate accuracy for 128 mnist test images
test_len = 128
test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))
| 34.291262 | 81 | 0.689128 |
c23389cc7d45a8d1d2b83a5c7b5fc753995e3e1a | 4,854 | py | Python | external/scons-local-3.0.3/scons-local-3.0.3/SCons/Scanner/C.py | MrAwesomeRocks/caelus-cml | 55b6dc5ba47d0e95c07412d9446ac72ac11d7fd7 | [
"mpich2"
] | null | null | null | external/scons-local-3.0.3/scons-local-3.0.3/SCons/Scanner/C.py | MrAwesomeRocks/caelus-cml | 55b6dc5ba47d0e95c07412d9446ac72ac11d7fd7 | [
"mpich2"
] | null | null | null | external/scons-local-3.0.3/scons-local-3.0.3/SCons/Scanner/C.py | MrAwesomeRocks/caelus-cml | 55b6dc5ba47d0e95c07412d9446ac72ac11d7fd7 | [
"mpich2"
] | null | null | null | """SCons.Scanner.C
This module implements the dependency scanner for C/C++ code.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/C.py 27552f9e8d59c13c3567f2bd380b74e34ee25324 2019-01-08 02:59:02 bdbaddog"
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.cpp
class SConsCPPScanner(SCons.cpp.PreProcessor):
"""
SCons-specific subclass of the cpp.py module's processing.
We subclass this so that: 1) we can deal with files represented
by Nodes, not strings; 2) we can keep track of the files that are
missing.
"""
def __init__(self, *args, **kw):
SCons.cpp.PreProcessor.__init__(self, *args, **kw)
self.missing = []
def initialize_result(self, fname):
self.result = SCons.Util.UniqueList([fname])
def finalize_result(self, fname):
return self.result[1:]
def find_include_file(self, t):
keyword, quote, fname = t
result = SCons.Node.FS.find_file(fname, self.searchpath[quote])
if not result:
self.missing.append((fname, self.current_file))
return result
def read_file(self, file):
try:
with open(str(file.rfile())) as fp:
return fp.read()
except EnvironmentError as e:
self.missing.append((file, self.current_file))
return ''
def dictify_CPPDEFINES(env):
cppdefines = env.get('CPPDEFINES', {})
if cppdefines is None:
return {}
if SCons.Util.is_Sequence(cppdefines):
result = {}
for c in cppdefines:
if SCons.Util.is_Sequence(c):
result[c[0]] = c[1]
else:
result[c] = None
return result
if not SCons.Util.is_Dict(cppdefines):
return {cppdefines : None}
return cppdefines
class SConsCPPScannerWrapper(object):
"""
The SCons wrapper around a cpp.py scanner.
This is the actual glue between the calling conventions of generic
SCons scanners, and the (subclass of) cpp.py class that knows how
to look for #include lines with reasonably real C-preprocessor-like
evaluation of #if/#ifdef/#else/#elif lines.
"""
def __init__(self, name, variable):
self.name = name
self.path = SCons.Scanner.FindPathDirs(variable)
def __call__(self, node, env, path = ()):
cpp = SConsCPPScanner(current = node.get_dir(),
cpppath = path,
dict = dictify_CPPDEFINES(env))
result = cpp(node)
for included, includer in cpp.missing:
fmt = "No dependency generated for file: %s (included from: %s) -- file not found"
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
fmt % (included, includer))
return result
def recurse_nodes(self, nodes):
return nodes
def select(self, node):
return self
def CScanner():
"""Return a prototype Scanner instance for scanning source files
that use the C pre-processor"""
# Here's how we would (or might) use the CPP scanner code above that
# knows how to evaluate #if/#ifdef/#else/#elif lines when searching
# for #includes. This is commented out for now until we add the
# right configurability to let users pick between the scanners.
#return SConsCPPScannerWrapper("CScanner", "CPPPATH")
cs = SCons.Scanner.ClassicCPP("CScanner",
"$CPPSUFFIXES",
"CPPPATH",
'^[ \t]*#[ \t]*(?:include|import)[ \t]*(<|")([^>"]+)(>|")')
return cs
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 36.772727 | 116 | 0.650391 |
ebcac8bd03f8d36809232d96955e2d3d55cf5c22 | 3,992 | py | Python | detr2onnx.py | haozy008/detr_transformer | f2cca52a1ea97a31c9497451714373bb691589e9 | [
"Apache-2.0"
] | 22 | 2020-09-20T15:08:57.000Z | 2022-03-27T14:06:09.000Z | detr2onnx.py | haozy008/detr_transformer | f2cca52a1ea97a31c9497451714373bb691589e9 | [
"Apache-2.0"
] | 4 | 2020-12-16T15:52:13.000Z | 2021-08-14T02:40:07.000Z | detr2onnx.py | haozy008/detr_transformer | f2cca52a1ea97a31c9497451714373bb691589e9 | [
"Apache-2.0"
] | 7 | 2020-08-24T03:12:55.000Z | 2022-03-27T14:06:34.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import io
import unittest
import torch
from util.misc import nested_tensor_from_tensor_list
from hubconf import detr_resnet50, detr_resnet50_panoptic
# onnxruntime requires python 3.5 or above
try:
import onnxruntime
except ImportError:
onnxruntime = None
@unittest.skipIf(onnxruntime is None, 'ONNX Runtime unavailable')
class ONNXExporterTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
torch.manual_seed(123)
def run_model(self, model, inputs_list, tolerate_small_mismatch=False, do_constant_folding=True, dynamic_axes=None,
output_names=None, input_names=None):
model.eval()
onnx_io = io.BytesIO()
onnx_path = "detr.onnx"
# export to onnx with the first input
torch.onnx.export(model, inputs_list[0], onnx_io,
input_names=input_names, output_names=output_names,export_params=True,training=False)
torch.onnx.export(model, inputs_list[0], onnx_path,
input_names=input_names, output_names=output_names,export_params=True,training=False)
# validate the exported model with onnx runtime
for test_inputs in inputs_list:
with torch.no_grad():
if isinstance(test_inputs, torch.Tensor) or isinstance(test_inputs, list):
test_inputs = (nested_tensor_from_tensor_list(test_inputs),)
test_ouputs = model(*test_inputs)
if isinstance(test_ouputs, torch.Tensor):
test_ouputs = (test_ouputs,)
self.ort_validate(onnx_io, test_inputs, test_ouputs, tolerate_small_mismatch)
def ort_validate(self, onnx_io, inputs, outputs, tolerate_small_mismatch=False):
inputs, _ = torch.jit._flatten(inputs)
outputs, _ = torch.jit._flatten(outputs)
def to_numpy(tensor):
if tensor.requires_grad:
return tensor.detach().cpu().numpy()
else:
return tensor.cpu().numpy()
inputs = list(map(to_numpy, inputs))
outputs = list(map(to_numpy, outputs))
ort_session = onnxruntime.InferenceSession(onnx_io.getvalue())
# compute onnxruntime output prediction
ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs))
ort_outs = ort_session.run(None, ort_inputs)
for i in range(0, len(outputs)):
try:
torch.testing.assert_allclose(outputs[i], ort_outs[i], rtol=1e-03, atol=1e-05)
except AssertionError as error:
if tolerate_small_mismatch:
self.assertIn("(0.00%)", str(error), str(error))
else:
raise
def test_model_onnx_detection(self):
model = detr_resnet50(pretrained=False).eval()
dummy_image = torch.ones(1, 3, 800, 800) * 0.3
model(dummy_image)
# Test exported model on images of different size, or dummy input
self.run_model(
model,
[(torch.rand(1, 3, 750, 800),)],
input_names=["inputs"],
output_names=["pred_logits", "pred_boxes"],
tolerate_small_mismatch=True,
)
if __name__ == '__main__':
detr = detr_resnet50(pretrained=False,num_classes=3+1).eval() # <------这里类别需要+1
state_dict = torch.load('./outputs/checkpoint.pth') # <-----------修改加载模型的路径
detr.load_state_dict(state_dict["model"])
dummy_image = [torch.ones(1, 3, 800, 800) ]
onnx_export = ONNXExporterTester()
onnx_export.run_model(detr, dummy_image,input_names=['inputs'],
output_names=["pred_logits", "pred_boxes"],tolerate_small_mismatch=True)
# https://colab.research.google.com/drive/18UBY-mY9tuw22I4RdjoTua_JfpTTBcE7?usp=sharing
# torch.onnx.export(detr, dummy_image, "detr.onnx",
# input_names=['inputs'], output_names=["pred_logits", "pred_boxes"])
| 38.384615 | 119 | 0.650301 |
129ce6021660afe594d02b612a04298fd9c09ec6 | 65 | py | Python | tests/app1.py | gilbrookie/cmdr | ee31e5b75a01f00e45f8181bf78017f232f0287e | [
"ISC"
] | null | null | null | tests/app1.py | gilbrookie/cmdr | ee31e5b75a01f00e45f8181bf78017f232f0287e | [
"ISC"
] | null | null | null | tests/app1.py | gilbrookie/cmdr | ee31e5b75a01f00e45f8181bf78017f232f0287e | [
"ISC"
] | null | null | null | #!/usr/bin/python
from data import CmdrSimple
CmdrSimple.start()
| 16.25 | 27 | 0.784615 |
62f871b3bfaf95541ac9faf7b4cc948a7dfd3355 | 6,288 | py | Python | relentless/data.py | mphoward/relentless | 5f7e8eb62696f45df28a948202b324563805a7f5 | [
"BSD-3-Clause"
] | null | null | null | relentless/data.py | mphoward/relentless | 5f7e8eb62696f45df28a948202b324563805a7f5 | [
"BSD-3-Clause"
] | 8 | 2019-12-19T21:27:25.000Z | 2019-12-20T02:47:00.000Z | relentless/data.py | mphoward/relentless | 5f7e8eb62696f45df28a948202b324563805a7f5 | [
"BSD-3-Clause"
] | null | null | null | """
Data management
===============
The :class:`Directory` class provides an interface for creating hierarchical
filesystem directories and files within those directories using either an absolute
or relative path.
.. autosummary::
:nosignatures:
Directory
.. autoclass:: Directory
:members:
"""
import os
import shutil
from . import mpi
class Directory:
"""Context for a filesystem directory.
The directory specified by ``path`` (which can be either absolute or relative)
is created if it does not already exist. This process is recursive, so
``path`` may include multiple directories that do not yet exist. This object
represents the final directory in ``path``.
A :class:`Directory` is a context that can be used to manage the current
working directory. Entering the context changes the current working
directory to ``path``, and exiting restores the working directory before the
context was entered.
Parameters
----------
path : str
Absolute or relative directory path.
Raises
------
OSError
If the specified path is not a valid directory.
Examples
--------
Creating a directory::
d = Directory('foo')
Using the context to open a file ``foo/bar.txt`` in a directory::
with Directory('foo') as d:
f = open('bar.txt')
"""
def __init__(self, path):
self._start = []
# ensure path exists at time directory is created (synchronizing)
path = os.path.realpath(path)
if mpi.world.rank_is_root:
if not os.path.exists(path):
os.makedirs(path)
dir_error = not os.path.isdir(path)
else:
dir_error = None
mpi.world.bcast(dir_error)
if dir_error:
raise OSError('The specified path is not a valid directory')
self._path = path
@classmethod
def cast(cls, directory):
"""Try to cast an object to a directory.
Ensure that a `str` or :class:`Directory` is a :class:`Directory`. No
action is taken if the object is already a :class:`Directory`. Otherwise,
a new one is constructed.
Parameters
----------
directory : str or :class:`Directory`
Object to ensure is a directory
Returns
-------
:class:`Directory`
The cast object.
"""
if not isinstance(directory, Directory):
directory = Directory(directory)
return directory
def __enter__(self):
"""Enter the directory context.
The working directory is changed to the ``path`` of this object.
Returns
-------
:class:`Directory`
This directory.
"""
self._start.append(os.getcwd())
os.chdir(self.path)
return self
def __exit__(self, exception_type, exception_value, traceback):
"""Exit the directory context.
If possible, the working directory is reset to the path before entering
the context. The change is silently ignored if the original directory
no longer exists.
"""
try:
os.chdir(self._start.pop())
except OSError:
pass
def _in_context(self):
"""bool: True if object is being used as a context."""
return len(self._start) > 0
@property
def path(self):
"""str: Real path to the directory."""
return self._path
def file(self, name):
"""Get the absolute path to a file in the directory.
This method is convenient for abstracting references to a file in the
directory.
Parameters
----------
name : str
Name of the file.
Returns
-------
str
The absolute path to the file ``name``.
Examples
--------
Opening a file by absolute path::
d = Directory('foo')
f = open(d.file('bar.txt'))
"""
return os.path.join(self.path, name)
def directory(self, name):
"""Get a child directory.
This method is convenient for abstracting references to child
directories.
Parameters
----------
name : str
Name of the directory.
Returns
-------
:class:`Directory`
A new directory relative to this one.
Examples
--------
Making nested directories ``foo/bar``::
foo = Directory('foo')
bar = foo.directory('bar')
"""
return Directory(os.path.join(self.path, name))
def clear_contents(self):
r"""Clear the contents of a directory.
This method **removes** all the contents of a directory (files and
directories), so it should be used carefully!
"""
# delete on root rank and wait
if mpi.world.rank_is_root:
for entry in os.scandir(self.path):
if entry.is_file():
os.remove(entry.path)
elif entry.is_dir():
shutil.rmtree(entry.path)
mpi.world.barrier()
def move_contents(self, dest):
"""Move the contents of the directory.
Parameters
----------
dest : :class:`Directory` or :class:`str`
Destination directory.
"""
dest = Directory.cast(dest)
# move on root rank and wait
if mpi.world.rank_is_root:
for entry in os.scandir(self.path):
shutil.move(entry.path, dest.path)
mpi.world.barrier()
def copy_contents(self, dest):
"""Copy the contents of the directory.
Parameters
----------
dest : :class:`Directory` or :class:`str`
Destination directory.
"""
dest = Directory.cast(dest)
# copy using root rank and wait
if mpi.world.rank_is_root:
for entry in os.scandir(self.path):
if entry.is_file():
shutil.copy2(entry.path, dest.path)
elif entry.is_dir():
shutil.copytree(entry.path, os.path.join(dest.path,entry.name))
mpi.world.barrier()
| 26.757447 | 83 | 0.566476 |
c7b559c04142bfeba26ddab4738ba5da0b7c8454 | 7,629 | py | Python | Iirc.EnergyLimitsScheduling.Shared/python/vizualization/gantt.py | CTU-IIG/EnergyLimitsScheduling | 4046c5d6f2a6ff39de0a80665a64666938b0928b | [
"MIT"
] | null | null | null | Iirc.EnergyLimitsScheduling.Shared/python/vizualization/gantt.py | CTU-IIG/EnergyLimitsScheduling | 4046c5d6f2a6ff39de0a80665a64666938b0928b | [
"MIT"
] | null | null | null | Iirc.EnergyLimitsScheduling.Shared/python/vizualization/gantt.py | CTU-IIG/EnergyLimitsScheduling | 4046c5d6f2a6ff39de0a80665a64666938b0928b | [
"MIT"
] | null | null | null | from typing import Tuple, List, Dict, Optional
import matplotlib
matplotlib.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
matplotlib.rc('text', usetex=True)
matplotlib.rc('text.latex', preamble=r'\usepackage{amsmath}')
import numpy as np
import matplotlib.pyplot as plt
import struct
from matplotlib.patches import Rectangle
from datastructs.instance import Instance, Operation, Job
__all__ = [
'draw'
]
def _generate_colors(n: int) -> List[Tuple[float, float, float]]:
def scale_rgb_color(r: int, g: int, b: int) -> Tuple[float, float, float]:
return (r / 255.0, g / 255.0, b / 255.0)
# Backup old randomness state.
rand_state = np.random.get_state()
np.random.seed(0)
# Some nice-looking default colors.
colors = [
scale_rgb_color(*struct.unpack('BBB', bytes.fromhex(color[1:])))
for color in plt.rcParams['axes.prop_cycle'].by_key()['color']
]
# If needed, add more random colors.
colors.extend(np.random.rand(3) for _ in range(n - len(colors)))
del colors[n:]
# Restore old randomness state.
np.random.set_state(rand_state)
return colors
def _get_machine_bottom_y(machine_index: int, num_machines: int, machine_height: int) -> int:
return (num_machines - 1 - machine_index) * machine_height
def _get_operation_bottom_y(
operation: Operation,
num_machines: int,
machine_height: int,
operation_margin: int) -> int:
return _get_machine_bottom_y(operation.machine_index, num_machines, machine_height) + operation_margin
def _compute_intervals_overlap(
left1: float,
right1: float,
left2: float,
right2: float) -> float:
return max(0, min(right1, right2) - max(left1, left2))
def draw(
ins: Instance,
start_times: Dict[Operation, float],
title='',
operation_height: int = 0.8,
operation_margin: int = 0.1,
time_units: Optional[str] = None,
power_consumption_units: Optional[str] = None):
job_colors = _generate_colors(len(ins.jobs)) # By job index.
machine_height = operation_height + operation_margin
last_metering_interval_index = int(round(max([start_time + operation.processing_time
for operation, start_time in start_times.items()]) / ins.length_metering_interval))
horizon = (last_metering_interval_index + 1) * ins.length_metering_interval
gantt_ylim = machine_height * ins.num_machines
gantt_xlim = horizon
energy_ylim = ins.energy_limit * 1.1
energy_xlim = gantt_xlim
fig = plt.figure(figsize=(8, 4))
fig.canvas.set_window_title(title)
gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[2, 3])
# Gantt.
gantt_ax = fig.add_subplot(gs[0])
gantt_ax.set_title(title)
gantt_ax.spines['top'].set_visible(False)
gantt_ax.spines['right'].set_visible(False)
gantt_ax.spines['bottom'].set_visible(True)
gantt_ax.spines['left'].set_visible(False)
plt.ylim(0, gantt_ylim)
plt.xlim(0, gantt_xlim)
if time_units is None:
plt.xlabel("time")
else:
plt.xlabel(f"time [{time_units}]")
gantt_ax.yaxis.set_visible(False)
for metering_interval_index in range(1, last_metering_interval_index + 1):
x = metering_interval_index * ins.length_metering_interval
plt.plot([x, x], [0, gantt_ylim], "b:", linewidth=1)
for operation, start_time in start_times.items():
rect = Rectangle(
(start_time, _get_operation_bottom_y(operation, ins.num_machines, machine_height, operation_margin)),
operation.processing_time,
operation_height,
facecolor=job_colors[operation.job_index],
edgecolor="black",
linewidth=1
)
gantt_ax.add_patch(rect)
# Energy consumption.
energy_ax = fig.add_subplot(gs[1])
plt.ylim(0, energy_ylim)
plt.xlim(0, energy_xlim)
plt.xlabel("metering intervals")
plt.xticks(
[(n + 0.5) * ins.length_metering_interval for n in range(int(horizon / ins.length_metering_interval))],
np.array(range(int(horizon / ins.length_metering_interval))) + 1)
if power_consumption_units is None:
plt.ylabel(u"energy consumption\nin metering interval")
else:
plt.ylabel(u"energy consumption\nin metering interval [{units}]".format(units=power_consumption_units))
energy_ax.xaxis.set_ticks_position('none')
energy_ax.yaxis.set_ticks_position('left')
energy_ax.spines['top'].set_visible(False)
energy_ax.spines['right'].set_visible(False)
energy_ax.spines['bottom'].set_visible(True)
energy_ax.spines['left'].set_visible(True)
for metering_interval_index in range(1, last_metering_interval_index + 1):
x = metering_interval_index * ins.length_metering_interval
plt.plot([x, x], [0, energy_ylim], "b:", linewidth=1)
ordered_operations = sorted(
start_times.keys(),
key=lambda operation: (start_times[operation], operation.machine_index))
for metering_interval_index in range(last_metering_interval_index + 1):
metering_interval_energy_consumption = 0.0
metering_interval_start = metering_interval_index * ins.length_metering_interval
metering_interval_end = (metering_interval_index + 1) * ins.length_metering_interval
plt.plot(
[metering_interval_start, metering_interval_start + ins.length_metering_interval],
[ins.energy_limit, ins.energy_limit],
"r--", linewidth=2
)
for operation in ordered_operations:
overlap = _compute_intervals_overlap(
start_times[operation],
start_times[operation] + operation.processing_time,
metering_interval_start,
metering_interval_end
)
if not np.isclose(overlap, 0.0):
energy_consumption = overlap * operation.power_consumption
stack_width_percent = 0.6
stack_width = ins.length_metering_interval * stack_width_percent
stack_space = ins.length_metering_interval * ((1.0 - stack_width_percent) / 2.0)
rect = Rectangle(
(metering_interval_start + stack_space, metering_interval_energy_consumption),
stack_width,
energy_consumption,
facecolor=job_colors[operation.job_index])
metering_interval_energy_consumption += energy_consumption
energy_ax.add_patch(rect)
plt.tight_layout()
if __name__ == '__main__':
jobs = [
Job(
0, 0,
[
Operation(
0, 0, 0, 1, 5, 14.0
),
Operation(
1, 1, 0, 0, 7, 23.0
),
Operation(
2, 2, 0, 2, 3, 13.0
),
]
),
Job(
1, 1,
[
Operation(
3, 0, 1, 2, 2, 12.0
),
Operation(
4, 1, 1, 1, 12, 28.0
),
Operation(
5, 2, 1, 0, 35, 10.0
),
]
),
]
ins = Instance(3, jobs, 600.0, 90, 15)
start_times = {
jobs[0].operations[0]: 0,
jobs[0].operations[1]: 5,
jobs[0].operations[2]: 12,
jobs[1].operations[0]: 0,
jobs[1].operations[1]: 5,
jobs[1].operations[2]: 17,
}
draw(ins, start_times)
plt.show()
| 33.460526 | 113 | 0.621051 |
f963005ec0cd36e19b5190a0b14d7954cb59e026 | 339 | py | Python | skfftw/__init__.py | ghisvail/scikit-fftw | 98dd33250794405e4d983c34ccbf27d27572a75b | [
"BSD-3-Clause"
] | null | null | null | skfftw/__init__.py | ghisvail/scikit-fftw | 98dd33250794405e4d983c34ccbf27d27572a75b | [
"BSD-3-Clause"
] | null | null | null | skfftw/__init__.py | ghisvail/scikit-fftw | 98dd33250794405e4d983c34ccbf27d27572a75b | [
"BSD-3-Clause"
] | null | null | null | # coding: utf8
# Copyright (c) 2014, 2015 Ghislain Antony Vaillant.
#
# This file is distributed under the new BSD License, see the LICENSE file or
# checkout the license terms at http://opensource.org/licenses/BSD-3-Clause).
from __future__ import absolute_import, division, print_function
from .version import VERSION as __version__
| 30.818182 | 78 | 0.781711 |
1742a0a5d6ce46ec86276eed7f389dfa7d3c3e89 | 2,773 | py | Python | homeassistant/components/abode/camera.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 5 | 2020-09-17T10:48:51.000Z | 2021-11-22T00:08:17.000Z | homeassistant/components/abode/camera.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | homeassistant/components/abode/camera.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 2 | 2019-07-05T17:46:08.000Z | 2021-04-25T21:21:02.000Z | """Support for Abode Security System cameras."""
from datetime import timedelta
import logging
import abodepy.helpers.constants as CONST
import abodepy.helpers.timeline as TIMELINE
import requests
from homeassistant.components.camera import Camera
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util import Throttle
from . import AbodeDevice
from .const import DOMAIN, SIGNAL_CAPTURE_IMAGE
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=90)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Abode camera devices."""
data = hass.data[DOMAIN]
entities = []
for device in data.abode.get_devices(generic_type=CONST.TYPE_CAMERA):
entities.append(AbodeCamera(data, device, TIMELINE.CAPTURE_IMAGE))
async_add_entities(entities)
class AbodeCamera(AbodeDevice, Camera):
"""Representation of an Abode camera."""
def __init__(self, data, device, event):
"""Initialize the Abode device."""
AbodeDevice.__init__(self, data, device)
Camera.__init__(self)
self._event = event
self._response = None
async def async_added_to_hass(self):
"""Subscribe Abode events."""
await super().async_added_to_hass()
self.hass.async_add_job(
self._data.abode.events.add_timeline_callback,
self._event,
self._capture_callback,
)
signal = SIGNAL_CAPTURE_IMAGE.format(self.entity_id)
async_dispatcher_connect(self.hass, signal, self.capture)
def capture(self):
"""Request a new image capture."""
return self._device.capture()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def refresh_image(self):
"""Find a new image on the timeline."""
if self._device.refresh_image():
self.get_image()
def get_image(self):
"""Attempt to download the most recent capture."""
if self._device.image_url:
try:
self._response = requests.get(self._device.image_url, stream=True)
self._response.raise_for_status()
except requests.HTTPError as err:
_LOGGER.warning("Failed to get camera image: %s", err)
self._response = None
else:
self._response = None
def camera_image(self):
"""Get a camera image."""
self.refresh_image()
if self._response:
return self._response.content
return None
def _capture_callback(self, capture):
"""Update the image with the device then refresh device."""
self._device.update_image_location(capture)
self.get_image()
self.schedule_update_ha_state()
| 29.817204 | 82 | 0.670754 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.