hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f708ae9eeb4e2946f5143c922dd31621f9c88830
| 6,093
|
py
|
Python
|
test_model_images.py
|
Ambattz/Intelligent_Traffic_Management_System
|
51c3100ddb3479538d8a6accbcc0ea9f751481a7
|
[
"MIT"
] | null | null | null |
test_model_images.py
|
Ambattz/Intelligent_Traffic_Management_System
|
51c3100ddb3479538d8a6accbcc0ea9f751481a7
|
[
"MIT"
] | null | null | null |
test_model_images.py
|
Ambattz/Intelligent_Traffic_Management_System
|
51c3100ddb3479538d8a6accbcc0ea9f751481a7
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# script repurposed from sentdex's edits and TensorFlow's example script. Pretty messy as not all unnecessary
# parts of the original have been removed
# # Model preparation
# ## Variables
#
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
#
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
# What model to download.
MODEL_NAME = 'trained_model' # change to whatever folder has the new graph
# MODEL_FILE = MODEL_NAME + '.tar.gz' # these lines not needed as we are using our own model
# DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('training', 'label.pbtxt') # our labels are in training/object-detection.pbkt
NUM_CLASSES = 3 # we only are using one class at the moment (mask at the time of edit)
# ## Download Model
# opener = urllib.request.URLopener() # we don't need to download model since we have our own
# opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
# tar_file = tarfile.open(MODEL_FILE)
# for file in tar_file.getmembers():
# file_name = os.path.basename(file.name)
# if 'frozen_inference_graph.pb' in file_name:
# tar_file.extract(file, os.getcwd())
# ## Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
# In[7]:
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test'
TEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(0, 60)] # adjust range for # of images in folder
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
i = 0
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np) # matplotlib is configured for command line only so we save the outputs instead
plt.savefig("outputs/detection_output{}.png".format(i)) # create an outputs folder for the images to be saved
i = i+1 # this was a quick fix for iteration, create a pull request if you'd like
| 42.3125
| 280
| 0.702445
|
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
sys.path.append("..")
MODEL_NAME = 'trained_model'
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
PATH_TO_LABELS = os.path.join('training', 'label.pbtxt')
NUM_CLASSES = 3
# opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
# tar_file = tarfile.open(MODEL_FILE)
# for file in tar_file.getmembers():
# file_name = os.path.basename(file.name)
# if 'frozen_inference_graph.pb' in file_name:
# tar_file.extract(file, os.getcwd())
# ## Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
# In[7]:
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test'
TEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(0, 60)] # adjust range for # of images in folder
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
i = 0
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np) # matplotlib is configured for command line only so we save the outputs instead
plt.savefig("outputs/detection_output{}.png".format(i)) # create an outputs folder for the images to be saved
i = i+1 # this was a quick fix for iteration, create a pull request if you'd like
| true
| true
|
f708af28ca60caeeae8f3a4f7d0f575926c07fb3
| 12,524
|
py
|
Python
|
vegnoveg/vegnonveg-fulltraining-nnframe.py
|
intel-analytics/WorldBankPoC
|
49c19268601ff1aa7e396ddc5a8a23abfe73880e
|
[
"Apache-2.0"
] | 3
|
2018-07-05T14:15:07.000Z
|
2019-04-29T09:29:11.000Z
|
vegnoveg/vegnonveg-fulltraining-nnframe.py
|
intel-analytics/WorldBankPoC
|
49c19268601ff1aa7e396ddc5a8a23abfe73880e
|
[
"Apache-2.0"
] | null | null | null |
vegnoveg/vegnonveg-fulltraining-nnframe.py
|
intel-analytics/WorldBankPoC
|
49c19268601ff1aa7e396ddc5a8a23abfe73880e
|
[
"Apache-2.0"
] | 3
|
2018-06-19T13:58:12.000Z
|
2019-11-06T01:20:14.000Z
|
# Databricks notebook source
import pandas as pd
from os import listdir
from os.path import join, basename
import struct
import pickle
import json
import os
from scipy import misc
import datetime as dt
from pyspark.sql.types import *
from pyspark.sql.functions import udf
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# import matplotlib.pyplot as plt
# %matplotlib inline
# COMMAND ----------
# %pylab inline
from bigdl.nn.layer import *
from bigdl.nn.criterion import *
from bigdl.optim.optimizer import *
from bigdl.util.common import *
from bigdl.dataset.transformer import *
from bigdl.dataset import mnist
from bigdl.transform.vision.image import *
from zoo.pipeline.nnframes.nn_image_reader import *
from zoo.pipeline.nnframes.nn_image_transformer import *
from zoo.pipeline.nnframes.nn_classifier import *
from zoo.common.nncontext import *
import urllib
# COMMAND ----------
def scala_T(input_T):
"""
Helper function for building Inception layers. Transforms a list of numbers to a dictionary with ascending keys
and 0 appended to the front. Ignores dictionary inputs.
:param input_T: either list or dict
:return: dictionary with ascending keys and 0 appended to front {0: 0, 1: realdata_1, 2: realdata_2, ...}
"""
if type(input_T) is list:
# insert 0 into first index spot, such that the real data starts from index 1
temp = [0]
temp.extend(input_T)
return dict(enumerate(temp))
# if dictionary, return it back
return input_T
# COMMAND ----------
def Inception_Layer_v1(input_size, config, name_prefix=""):
"""
Builds the inception-v1 submodule, a local network, that is stacked in the entire architecture when building
the full model.
:param input_size: dimensions of input coming into the local network
:param config: ?
:param name_prefix: string naming the layers of the particular local network
:return: concat container object with all of the Sequential layers' ouput concatenated depthwise
"""
'''
Concat is a container who concatenates the output of it's submodules along the provided dimension: all submodules
take the same inputs, and their output is concatenated.
'''
concat = Concat(2)
"""
In the above code, we first create a container Sequential. Then add the layers into the container one by one. The
order of the layers in the model is same with the insertion order.
"""
conv1 = Sequential()
#Adding layes to the conv1 model we jus created
#SpatialConvolution is a module that applies a 2D convolution over an input image.
conv1.add(SpatialConvolution(input_size, config[1][1], 1, 1, 1, 1).set_name(name_prefix + "1x1"))
conv1.add(ReLU(True).set_name(name_prefix + "relu_1x1"))
concat.add(conv1)
conv3 = Sequential()
conv3.add(SpatialConvolution(input_size, config[2][1], 1, 1, 1, 1).set_name(name_prefix + "3x3_reduce"))
conv3.add(ReLU(True).set_name(name_prefix + "relu_3x3_reduce"))
conv3.add(SpatialConvolution(config[2][1], config[2][2], 3, 3, 1, 1, 1, 1).set_name(name_prefix + "3x3"))
conv3.add(ReLU(True).set_name(name_prefix + "relu_3x3"))
concat.add(conv3)
conv5 = Sequential()
conv5.add(SpatialConvolution(input_size,config[3][1], 1, 1, 1, 1).set_name(name_prefix + "5x5_reduce"))
conv5.add(ReLU(True).set_name(name_prefix + "relu_5x5_reduce"))
conv5.add(SpatialConvolution(config[3][1], config[3][2], 5, 5, 1, 1, 2, 2).set_name(name_prefix + "5x5"))
conv5.add(ReLU(True).set_name(name_prefix + "relu_5x5"))
concat.add(conv5)
pool = Sequential()
pool.add(SpatialMaxPooling(3, 3, 1, 1, 1, 1, to_ceil=True).set_name(name_prefix + "pool"))
pool.add(SpatialConvolution(input_size, config[4][1], 1, 1, 1, 1).set_name(name_prefix + "pool_proj"))
pool.add(ReLU(True).set_name(name_prefix + "relu_pool_proj"))
concat.add(pool).set_name(name_prefix + "output")
return concat
# COMMAND ----------
def Inception_v1(class_num):
model = Sequential()
model.add(SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3, 1, False).set_name("conv1/7x7_s2"))
model.add(ReLU(True).set_name("conv1/relu_7x7"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name("pool1/3x3_s2"))
model.add(SpatialCrossMapLRN(5, 0.0001, 0.75).set_name("pool1/norm1"))
model.add(SpatialConvolution(64, 64, 1, 1, 1, 1).set_name("conv2/3x3_reduce"))
model.add(ReLU(True).set_name("conv2/relu_3x3_reduce"))
model.add(SpatialConvolution(64, 192, 3, 3, 1, 1, 1, 1).set_name("conv2/3x3"))
model.add(ReLU(True).set_name("conv2/relu_3x3"))
model.add(SpatialCrossMapLRN(5, 0.0001, 0.75).set_name("conv2/norm2"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name("pool2/3x3_s2"))
model.add(Inception_Layer_v1(192, scala_T([scala_T([64]), scala_T(
[96, 128]), scala_T([16, 32]), scala_T([32])]), "inception_3a/"))
model.add(Inception_Layer_v1(256, scala_T([scala_T([128]), scala_T(
[128, 192]), scala_T([32, 96]), scala_T([64])]), "inception_3b/"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True))
model.add(Inception_Layer_v1(480, scala_T([scala_T([192]), scala_T(
[96, 208]), scala_T([16, 48]), scala_T([64])]), "inception_4a/"))
model.add(Inception_Layer_v1(512, scala_T([scala_T([160]), scala_T(
[112, 224]), scala_T([24, 64]), scala_T([64])]), "inception_4b/"))
model.add(Inception_Layer_v1(512, scala_T([scala_T([128]), scala_T(
[128, 256]), scala_T([24, 64]), scala_T([64])]), "inception_4c/"))
model.add(Inception_Layer_v1(512, scala_T([scala_T([112]), scala_T(
[144, 288]), scala_T([32, 64]), scala_T([64])]), "inception_4d/"))
model.add(Inception_Layer_v1(528, scala_T([scala_T([256]), scala_T(
[160, 320]), scala_T([32, 128]), scala_T([128])]), "inception_4e/"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True))
model.add(Inception_Layer_v1(832, scala_T([scala_T([256]), scala_T(
[160, 320]), scala_T([32, 128]), scala_T([128])]), "inception_5a/"))
model.add(Inception_Layer_v1(832, scala_T([scala_T([384]), scala_T(
[192, 384]), scala_T([48, 128]), scala_T([128])]), "inception_5b/"))
model.add(SpatialAveragePooling(7, 7, 1, 1).set_name("pool5/7x7_s1"))
model.add(Dropout(0.4).set_name("pool5/drop_7x7_s1"))
model.add(View([1024], num_input_dims=3))
model.add(Linear(1024, class_num).set_name("loss3/classifier"))
model.add(LogSoftMax().set_name("loss3/loss3"))
model.reset()
return model
# COMMAND ----------
# MAGIC %md ## Download the images from Amazon s3
# MAGIC
# MAGIC Make sure you have AWS command line interface to recursively download all images in s3 folder. You can set up aws cli from this link: http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html
# COMMAND ----------
import urllib
from os import path
MODEL_ROOT = "/mnt/nobigdl/few-inceptionv1"
# dbutils.fs.mkdirs(MODEL_ROOT)
#local_folder = DATA_ROOT + '/vegnonveg-samples'
checkpoint_path = path.join(MODEL_ROOT, "checkpoints")
# if not path.isdir(local_folder):
# os.system('aws s3 cp --recursive s3://vegnonveg/vegnonveg-fewsamples %s' % local_folder)
# COMMAND ----------
# MAGIC %md ## Save images and load to Spark as BigDL ImageFrame
# MAGIC
# MAGIC save data to parquet files and load to spark. Add label to each image.
# COMMAND ----------
DATA_ROOT = "/data/worldbank/"
sample_path = DATA_ROOT + 'samples/'
# sample_path = DATA_ROOT + 'imagenet_samples/'
# sample_path = '/mnt/nobigdl/vegnonveg-samples100/'
label_path = DATA_ROOT + 'vegnonveg-samples_labels.csv'
parquet_path = DATA_ROOT + 'sample_parquet/'
# dbutils.fs.rm(parquet_path, True)
# COMMAND ----------
sparkConf = create_spark_conf().setMaster("local[2]").setAppName("test_validation")
sc = get_spark_context(sparkConf)
sqlContext = SQLContext(sc)
#intializa bigdl
init_engine()
redire_spark_logs()
# This only runs at the first time to generate parquet files
image_frame = NNImageReader.readImages(sample_path, sc, minParitions=32)
# save dataframe to parquet files
# image_frame.write.parquet(parquet_path)
# ImageFrame.write_parquet(sample_path, parquet_path, sc, partition_num=32)
# COMMAND ----------
# load parquet file into spark cluster
import time
start = time.time()
image_raw_DF = sqlContext.read.parquet(parquet_path)
end = time.time()
print("Load data time is: " + str(end-start) + " seconds")
# COMMAND ----------
# create dict from item_name to label
labels_csv = pd.read_csv(label_path)
unique_labels = labels_csv['item_name'].unique().tolist()
label_dict = dict(zip(unique_labels, range(1,len(unique_labels)+1)))
class_num = len(label_dict)
# COMMAND ----------
# create label dataframe
label_raw_DF = sqlContext.read.format("com.databricks.spark.csv")\
.option("header", "true")\
.option("mode", "DROPMALFORMED")\
.load(label_path)
get_label = udf(lambda item_name: float(label_dict[item_name]), FloatType())
change_name = udf(lambda uid: uid+".jpg", StringType())
labelDF = label_raw_DF.withColumn("label", get_label("item_name")).withColumn("image_name", change_name("obs_uid"))
labelDF.show(truncate=False)
# COMMAND ----------
get_name = udf(lambda row: row[0].split("/")[-1], StringType())
imageDF = image_raw_DF.withColumn("image_name", get_name("image"))
imageDF.show(truncate=False)
dataDF = imageDF.join(labelDF, "image_name", "inner").select("image", "image_name", "label")
dataDF.show(truncate=False)
# COMMAND ----------
# MAGIC %md ## Do Train/Test Split and preprocessing
# MAGIC Split Train/Test split with some ratio and preprocess images.
# COMMAND ----------
data = dataDF.randomSplit([0.8, 0.2], seed=10)
train_image = data[0]
val_image = data[1]
type(train_image)
# COMMAND ----------
IMAGE_SIZE = 224
train_transformer = NNImageTransformer(
Pipeline([Resize(256, 256), RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
ChannelNormalize(123.0, 117.0, 104.0, 1.0, 1.0, 1.0),
MatToTensor()])
).setInputCol("image").setOutputCol("features")
train_data = train_transformer.transform(train_image)
# COMMAND ----------
train_size = train_image.count()
# COMMAND ----------
print(train_size)
# COMMAND ----------
val_transformer = NNImageTransformer(
Pipeline([Resize(256,256),
CenterCrop(IMAGE_SIZE, IMAGE_SIZE),
ChannelNormalize(123.0, 117.0, 104.0, 1.0, 1.0, 1.0),
MatToTensor(to_rgb=True)]
)
).setInputCol("image").setOutputCol("features")
# COMMAND ----------
test_data = val_transformer.transform(val_image)
# COMMAND ----------
# MAGIC %md ## Define Model
# COMMAND ----------
# Network Parameters
n_classes = len(label_dict)# item_name categories
model = Inception_v1(n_classes)
# COMMAND ----------
# Parameters
learning_rate = 0.2
# parameters for
batch_size = 2 #depends on dataset
no_epochs = 1 #stop when validation accuracy doesn't improve anymore
# COMMAND ----------
criterion = ClassNLLCriterion()
classifier = NNClassifier(model, criterion, [3,IMAGE_SIZE,IMAGE_SIZE])\
.setBatchSize(batch_size)\
.setMaxEpoch(no_epochs)\
.setLearningRate(learning_rate)
start = time.time()
trained_model = classifier.fit(train_data)
end = time.time()
print("Optimization Done.")
print("Training time is: %s seconds" % str(end-start))
# + dt.datetime.now().strftime("%Y%m%d-%H%M%S")
# COMMAND ----------
throughput = train_size * no_epochs / (end - start)
print("Average throughput is: %s" % str(throughput))
# COMMAND ----------
#predict
predict_model = trained_model.setBatchSize(batch_size)
predictionDF = predict_model.transform(test_data)
predictionDF.show()
# COMMAND ----------
num_preds = 1
preds = predictionDF.select("label", "prediction").take(num_preds)
for idx in range(num_preds):
# true_label = str(map_to_label(map_groundtruth_label(truth[idx].label)))
true_label = preds[idx][0]
pred_label = preds[idx][1]
print(idx + 1, ')', 'Ground Truth label: ', true_label)
print(idx + 1, ')', 'Predicted label: ', pred_label)
print("correct" if true_label == pred_label else "wrong")
# COMMAND ----------
'''
Measure Test Accuracy w/Test Set
'''
evaluator = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictionDF)
# expected error should be less than 10%
print("Accuracy = %g " % accuracy)
| 35.478754
| 211
| 0.690913
|
import pandas as pd
from os import listdir
from os.path import join, basename
import struct
import pickle
import json
import os
from scipy import misc
import datetime as dt
from pyspark.sql.types import *
from pyspark.sql.functions import udf
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from bigdl.nn.layer import *
from bigdl.nn.criterion import *
from bigdl.optim.optimizer import *
from bigdl.util.common import *
from bigdl.dataset.transformer import *
from bigdl.dataset import mnist
from bigdl.transform.vision.image import *
from zoo.pipeline.nnframes.nn_image_reader import *
from zoo.pipeline.nnframes.nn_image_transformer import *
from zoo.pipeline.nnframes.nn_classifier import *
from zoo.common.nncontext import *
import urllib
def scala_T(input_T):
if type(input_T) is list:
temp = [0]
temp.extend(input_T)
return dict(enumerate(temp))
return input_T
def Inception_Layer_v1(input_size, config, name_prefix=""):
concat = Concat(2)
conv1 = Sequential()
conv1.add(SpatialConvolution(input_size, config[1][1], 1, 1, 1, 1).set_name(name_prefix + "1x1"))
conv1.add(ReLU(True).set_name(name_prefix + "relu_1x1"))
concat.add(conv1)
conv3 = Sequential()
conv3.add(SpatialConvolution(input_size, config[2][1], 1, 1, 1, 1).set_name(name_prefix + "3x3_reduce"))
conv3.add(ReLU(True).set_name(name_prefix + "relu_3x3_reduce"))
conv3.add(SpatialConvolution(config[2][1], config[2][2], 3, 3, 1, 1, 1, 1).set_name(name_prefix + "3x3"))
conv3.add(ReLU(True).set_name(name_prefix + "relu_3x3"))
concat.add(conv3)
conv5 = Sequential()
conv5.add(SpatialConvolution(input_size,config[3][1], 1, 1, 1, 1).set_name(name_prefix + "5x5_reduce"))
conv5.add(ReLU(True).set_name(name_prefix + "relu_5x5_reduce"))
conv5.add(SpatialConvolution(config[3][1], config[3][2], 5, 5, 1, 1, 2, 2).set_name(name_prefix + "5x5"))
conv5.add(ReLU(True).set_name(name_prefix + "relu_5x5"))
concat.add(conv5)
pool = Sequential()
pool.add(SpatialMaxPooling(3, 3, 1, 1, 1, 1, to_ceil=True).set_name(name_prefix + "pool"))
pool.add(SpatialConvolution(input_size, config[4][1], 1, 1, 1, 1).set_name(name_prefix + "pool_proj"))
pool.add(ReLU(True).set_name(name_prefix + "relu_pool_proj"))
concat.add(pool).set_name(name_prefix + "output")
return concat
def Inception_v1(class_num):
model = Sequential()
model.add(SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3, 1, False).set_name("conv1/7x7_s2"))
model.add(ReLU(True).set_name("conv1/relu_7x7"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name("pool1/3x3_s2"))
model.add(SpatialCrossMapLRN(5, 0.0001, 0.75).set_name("pool1/norm1"))
model.add(SpatialConvolution(64, 64, 1, 1, 1, 1).set_name("conv2/3x3_reduce"))
model.add(ReLU(True).set_name("conv2/relu_3x3_reduce"))
model.add(SpatialConvolution(64, 192, 3, 3, 1, 1, 1, 1).set_name("conv2/3x3"))
model.add(ReLU(True).set_name("conv2/relu_3x3"))
model.add(SpatialCrossMapLRN(5, 0.0001, 0.75).set_name("conv2/norm2"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name("pool2/3x3_s2"))
model.add(Inception_Layer_v1(192, scala_T([scala_T([64]), scala_T(
[96, 128]), scala_T([16, 32]), scala_T([32])]), "inception_3a/"))
model.add(Inception_Layer_v1(256, scala_T([scala_T([128]), scala_T(
[128, 192]), scala_T([32, 96]), scala_T([64])]), "inception_3b/"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True))
model.add(Inception_Layer_v1(480, scala_T([scala_T([192]), scala_T(
[96, 208]), scala_T([16, 48]), scala_T([64])]), "inception_4a/"))
model.add(Inception_Layer_v1(512, scala_T([scala_T([160]), scala_T(
[112, 224]), scala_T([24, 64]), scala_T([64])]), "inception_4b/"))
model.add(Inception_Layer_v1(512, scala_T([scala_T([128]), scala_T(
[128, 256]), scala_T([24, 64]), scala_T([64])]), "inception_4c/"))
model.add(Inception_Layer_v1(512, scala_T([scala_T([112]), scala_T(
[144, 288]), scala_T([32, 64]), scala_T([64])]), "inception_4d/"))
model.add(Inception_Layer_v1(528, scala_T([scala_T([256]), scala_T(
[160, 320]), scala_T([32, 128]), scala_T([128])]), "inception_4e/"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True))
model.add(Inception_Layer_v1(832, scala_T([scala_T([256]), scala_T(
[160, 320]), scala_T([32, 128]), scala_T([128])]), "inception_5a/"))
model.add(Inception_Layer_v1(832, scala_T([scala_T([384]), scala_T(
[192, 384]), scala_T([48, 128]), scala_T([128])]), "inception_5b/"))
model.add(SpatialAveragePooling(7, 7, 1, 1).set_name("pool5/7x7_s1"))
model.add(Dropout(0.4).set_name("pool5/drop_7x7_s1"))
model.add(View([1024], num_input_dims=3))
model.add(Linear(1024, class_num).set_name("loss3/classifier"))
model.add(LogSoftMax().set_name("loss3/loss3"))
model.reset()
return model
import urllib
from os import path
MODEL_ROOT = "/mnt/nobigdl/few-inceptionv1"
checkpoint_path = path.join(MODEL_ROOT, "checkpoints")
DATA_ROOT = "/data/worldbank/"
sample_path = DATA_ROOT + 'samples/'
label_path = DATA_ROOT + 'vegnonveg-samples_labels.csv'
parquet_path = DATA_ROOT + 'sample_parquet/'
sparkConf = create_spark_conf().setMaster("local[2]").setAppName("test_validation")
sc = get_spark_context(sparkConf)
sqlContext = SQLContext(sc)
init_engine()
redire_spark_logs()
image_frame = NNImageReader.readImages(sample_path, sc, minParitions=32)
import time
start = time.time()
image_raw_DF = sqlContext.read.parquet(parquet_path)
end = time.time()
print("Load data time is: " + str(end-start) + " seconds")
labels_csv = pd.read_csv(label_path)
unique_labels = labels_csv['item_name'].unique().tolist()
label_dict = dict(zip(unique_labels, range(1,len(unique_labels)+1)))
class_num = len(label_dict)
label_raw_DF = sqlContext.read.format("com.databricks.spark.csv")\
.option("header", "true")\
.option("mode", "DROPMALFORMED")\
.load(label_path)
get_label = udf(lambda item_name: float(label_dict[item_name]), FloatType())
change_name = udf(lambda uid: uid+".jpg", StringType())
labelDF = label_raw_DF.withColumn("label", get_label("item_name")).withColumn("image_name", change_name("obs_uid"))
labelDF.show(truncate=False)
get_name = udf(lambda row: row[0].split("/")[-1], StringType())
imageDF = image_raw_DF.withColumn("image_name", get_name("image"))
imageDF.show(truncate=False)
dataDF = imageDF.join(labelDF, "image_name", "inner").select("image", "image_name", "label")
dataDF.show(truncate=False)
data = dataDF.randomSplit([0.8, 0.2], seed=10)
train_image = data[0]
val_image = data[1]
type(train_image)
IMAGE_SIZE = 224
train_transformer = NNImageTransformer(
Pipeline([Resize(256, 256), RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
ChannelNormalize(123.0, 117.0, 104.0, 1.0, 1.0, 1.0),
MatToTensor()])
).setInputCol("image").setOutputCol("features")
train_data = train_transformer.transform(train_image)
train_size = train_image.count()
print(train_size)
val_transformer = NNImageTransformer(
Pipeline([Resize(256,256),
CenterCrop(IMAGE_SIZE, IMAGE_SIZE),
ChannelNormalize(123.0, 117.0, 104.0, 1.0, 1.0, 1.0),
MatToTensor(to_rgb=True)]
)
).setInputCol("image").setOutputCol("features")
test_data = val_transformer.transform(val_image)
n_classes = len(label_dict)model = Inception_v1(n_classes)
learning_rate = 0.2
batch_size = 2 no_epochs = 1
# COMMAND ----------
criterion = ClassNLLCriterion()
classifier = NNClassifier(model, criterion, [3,IMAGE_SIZE,IMAGE_SIZE])\
.setBatchSize(batch_size)\
.setMaxEpoch(no_epochs)\
.setLearningRate(learning_rate)
start = time.time()
trained_model = classifier.fit(train_data)
end = time.time()
print("Optimization Done.")
print("Training time is: %s seconds" % str(end-start))
# + dt.datetime.now().strftime("%Y%m%d-%H%M%S")
# COMMAND ----------
throughput = train_size * no_epochs / (end - start)
print("Average throughput is: %s" % str(throughput))
# COMMAND ----------
#predict
predict_model = trained_model.setBatchSize(batch_size)
predictionDF = predict_model.transform(test_data)
predictionDF.show()
# COMMAND ----------
num_preds = 1
preds = predictionDF.select("label", "prediction").take(num_preds)
for idx in range(num_preds):
# true_label = str(map_to_label(map_groundtruth_label(truth[idx].label)))
true_label = preds[idx][0]
pred_label = preds[idx][1]
print(idx + 1, ')', 'Ground Truth label: ', true_label)
print(idx + 1, ')', 'Predicted label: ', pred_label)
print("correct" if true_label == pred_label else "wrong")
# COMMAND ----------
evaluator = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictionDF)
# expected error should be less than 10%
print("Accuracy = %g " % accuracy)
| true
| true
|
f708b00b0a9edc4940fa8641402e3307b8e92005
| 21,917
|
py
|
Python
|
motion/components/structural.py
|
TUM-AAS/motron
|
2f8800d1d6e297fc4baab555ceb2d37f55841406
|
[
"MIT"
] | null | null | null |
motion/components/structural.py
|
TUM-AAS/motron
|
2f8800d1d6e297fc4baab555ceb2d37f55841406
|
[
"MIT"
] | null | null | null |
motion/components/structural.py
|
TUM-AAS/motron
|
2f8800d1d6e297fc4baab555ceb2d37f55841406
|
[
"MIT"
] | null | null | null |
from typing import Tuple, Optional, List, Union
import torch
from torch.nn import *
import math
def gmm(x: torch.Tensor, w: torch.Tensor) -> torch.Tensor:
return torch.einsum('ndo,bnd->bno', w, x)
class GraphLinear(Module):
def __init__(self, in_features: int, out_features: int):
super().__init__()
self.in_features = in_features
self.out_features = out_features
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
#stdv = 1. / math.sqrt(self.weight.size(1))
#self.weight.data.uniform_(-stdv, stdv)
#if self.learn_influence:
# self.G.data.uniform_(-stdv, stdv)
if len(self.weight.shape) == 3:
self.weight.data[1:] = self.weight.data[0]
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input: torch.Tensor, g: Optional[torch.Tensor] = None) -> torch.Tensor:
if g is None and self.learn_influence:
g = torch.nn.functional.normalize(self.G, p=1., dim=1)
#g = torch.softmax(self.G, dim=1)
elif g is None:
g = self.G
w = self.weight[self.node_type_index]
output = self.mm(input, w.transpose(-2, -1))
if self.bias is not None:
bias = self.bias[self.node_type_index]
output += bias
output = g.matmul(output)
return output
class DynamicGraphLinear(GraphLinear):
def __init__(self, num_node_types: int = 1, *args):
super().__init__(*args)
def forward(self, input: torch.Tensor, g: torch.Tensor = None, t: torch.Tensor = None) -> torch.Tensor:
assert g is not None or t is not None, "Either Graph Influence Matrix or Node Type Vector is needed"
if g is None:
g = self.G[t][:, t]
return super().forward(input, g)
class StaticGraphLinear(GraphLinear):
def __init__(self, *args, bias: bool = True, num_nodes: int = None, graph_influence: Union[torch.Tensor, Parameter] = None,
learn_influence: bool = False, node_types: torch.Tensor = None, weights_per_type: bool = False):
"""
:param in_features: Size of each input sample
:param out_features: Size of each output sample
:param num_nodes: Number of nodes.
:param graph_influence: Graph Influence Matrix
:param learn_influence: If set to ``False``, the layer will not learn an the Graph Influence Matrix.
:param node_types: List of Type for each node. All nodes of same type will share weights.
Default: All nodes have unique types.
:param weights_per_type: If set to ``False``, the layer will not learn weights for each node type.
:param bias: If set to ``False``, the layer will not learn an additive bias.
"""
super().__init__(*args)
self.learn_influence = learn_influence
if graph_influence is not None:
assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'
num_nodes = graph_influence.shape[0]
if type(graph_influence) is Parameter:
assert learn_influence, "Graph Influence Matrix is a Parameter, therefore it must be learnable."
self.G = graph_influence
elif learn_influence:
self.G = Parameter(graph_influence)
else:
self.register_buffer('G', graph_influence)
else:
assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'
eye_influence = torch.eye(num_nodes, num_nodes)
if learn_influence:
self.G = Parameter(eye_influence)
else:
self.register_buffer('G', eye_influence)
if weights_per_type and node_types is None:
node_types = torch.tensor([i for i in range(num_nodes)])
if node_types is not None:
num_node_types = node_types.max() + 1
self.weight = Parameter(torch.Tensor(num_node_types, self.out_features, self.in_features))
self.mm = gmm
self.node_type_index = node_types
else:
self.weight = Parameter(torch.Tensor(self.out_features, self.in_features))
self.mm = torch.matmul
self.node_type_index = None
if bias:
if node_types is not None:
self.bias = Parameter(torch.Tensor(num_node_types, self.out_features))
else:
self.bias = Parameter(torch.Tensor(self.out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
GraphLSTMState = Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]
class BN(Module):
def __init__(self, num_nodes, num_features):
super().__init__()
self.num_nodes = num_nodes
self.num_features = num_features
self.bn = BatchNorm1d(num_nodes * num_features)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.bn(x.view(-1, self.num_nodes * self.num_features)).view(-1, self.num_nodes, self.num_features)
class LinearX(Module):
def __init__(self):
super().__init__()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return input
class StaticGraphLSTMCell_(Module):
def __init__(self, input_size: int, hidden_size: int, num_nodes: int = None, dropout: float = 0.,
recurrent_dropout: float = 0., graph_influence: Union[torch.Tensor, Parameter] = None,
learn_influence: bool = False, additive_graph_influence: Union[torch.Tensor, Parameter] = None,
learn_additive_graph_influence: bool = False, node_types: torch.Tensor = None,
weights_per_type: bool = False, clockwork: bool = False, bias: bool = True):
"""
:param input_size: The number of expected features in the input `x`
:param hidden_size: The number of features in the hidden state `h`
:param num_nodes:
:param dropout:
:param recurrent_dropout:
:param graph_influence:
:param learn_influence:
:param additive_graph_influence:
:param learn_additive_graph_influence:
:param node_types:
:param weights_per_type:
:param bias:
"""
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.learn_influence = learn_influence
self.learn_additive_graph_influence = learn_additive_graph_influence
if graph_influence is not None:
assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'
num_nodes = graph_influence.shape[0]
if type(graph_influence) is Parameter:
assert learn_influence, "Graph Influence Matrix is a Parameter, therefore it must be learnable."
self.G = graph_influence
elif learn_influence:
self.G = Parameter(graph_influence)
else:
self.register_buffer('G', graph_influence)
else:
assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'
eye_influence = torch.eye(num_nodes, num_nodes)
if learn_influence:
self.G = Parameter(eye_influence)
else:
self.register_buffer('G', eye_influence)
if additive_graph_influence is not None:
if type(additive_graph_influence) is Parameter:
self.G_add = additive_graph_influence
elif learn_additive_graph_influence:
self.G_add = Parameter(additive_graph_influence)
else:
self.register_buffer('G_add', additive_graph_influence)
else:
if learn_additive_graph_influence:
self.G_add = Parameter(torch.zeros_like(self.G))
else:
self.G_add = 0.
if weights_per_type and node_types is None:
node_types = torch.tensor([i for i in range(num_nodes)])
if node_types is not None:
num_node_types = node_types.max() + 1
self.weight_ih = Parameter(torch.Tensor(num_node_types, 4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(num_node_types, 4 * hidden_size, hidden_size))
self.mm = gmm
self.register_buffer('node_type_index', node_types)
else:
self.weight_ih = Parameter(torch.Tensor(4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(4 * hidden_size, hidden_size))
self.mm = torch.matmul
self.register_buffer('node_type_index', None)
if bias:
if node_types is not None:
self.bias_ih = Parameter(torch.Tensor(num_node_types, 4 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(num_node_types, 4 * hidden_size))
else:
self.bias_ih = Parameter(torch.Tensor(4 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(4 * hidden_size))
else:
self.bias_ih = None
self.bias_hh = None
self.clockwork = clockwork
if clockwork:
phase = torch.arange(0., hidden_size)
phase = phase - phase.min()
phase = (phase / phase.max()) * 8.
phase += 1.
phase = torch.floor(phase)
self.register_buffer('phase', phase)
else:
phase = torch.ones(hidden_size)
self.register_buffer('phase', phase)
self.dropout = Dropout(dropout)
self.r_dropout = Dropout(recurrent_dropout)
self.num_nodes = num_nodes
self.init_weights()
def init_weights(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
if weight is self.G:
continue
if weight is self.G_add:
continue
weight.data.uniform_(-stdv, stdv)
if weight is self.weight_hh or weight is self.weight_ih and len(self.weight_ih.shape) == 3:
weight.data[1:] = weight.data[0]
def forward(self, input: torch.Tensor, state: GraphLSTMState, t: int = 0) -> Tuple[torch.Tensor, GraphLSTMState]:
hx, cx, gx = state
if hx is None:
hx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)
if cx is None:
cx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)
if gx is None and self.learn_influence:
gx = torch.nn.functional.normalize(self.G, p=1., dim=1)
#gx = torch.softmax(self.G, dim=1)
elif gx is None:
gx = self.G
hx = self.r_dropout(hx)
weight_ih = self.weight_ih[self.node_type_index]
weight_hh = self.weight_hh[self.node_type_index]
if self.bias_hh is not None:
bias_hh = self.bias_hh[self.node_type_index]
else:
bias_hh = 0.
c_mask = (torch.remainder(torch.tensor(t + 1., device=input.device), self.phase) < 0.01).type_as(cx)
gates = (self.dropout(self.mm(input, weight_ih.transpose(-2, -1))) +
self.mm(hx, weight_hh.transpose(-2, -1)) + bias_hh)
gates = torch.matmul(gx, gates)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 2)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = c_mask * ((forgetgate * cx) + (ingate * cellgate)) + (1 - c_mask) * cx
hy = outgate * torch.tanh(cy)
gx = gx + self.G_add
if self.learn_influence or self.learn_additive_graph_influence:
gx = torch.nn.functional.normalize(gx, p=1., dim=1)
#gx = torch.softmax(gx, dim=1)
return hy, (hy, cy, gx)
class StaticGraphLSTM_(Module):
def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, layer_dropout: float = 0.0, **kwargs):
super().__init__()
self.layers = ModuleList([StaticGraphLSTMCell_(input_size, hidden_size, **kwargs)]
+ [StaticGraphLSTMCell_(hidden_size, hidden_size, **kwargs) for _ in range(num_layers - 1)])
self.dropout = Dropout(layer_dropout)
def forward(self, input: torch.Tensor, states: Optional[List[GraphLSTMState]] = None, t_i: int = 0) -> Tuple[torch.Tensor, List[GraphLSTMState]]:
if states is None:
n: Optional[torch.Tensor] = None
states = [(n, n, n)] * len(self.layers)
output_states: List[GraphLSTMState] = []
output = input
i = 0
for rnn_layer in self.layers:
state = states[i]
inputs = output.unbind(1)
outputs: List[torch.Tensor] = []
for t, input in enumerate(inputs):
out, state = rnn_layer(input, state, t_i+t)
outputs += [out]
output = torch.stack(outputs, dim=1)
output = self.dropout(output)
output_states += [state]
i += 1
return output, output_states
def StaticGraphLSTM(*args, **kwargs):
return torch.jit.script(StaticGraphLSTM_(*args, **kwargs))
GraphGRUState = Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]
class StaticGraphGRUCell_(Module):
def __init__(self, input_size: int, hidden_size: int, num_nodes: int = None, dropout: float = 0.,
recurrent_dropout: float = 0., graph_influence: Union[torch.Tensor, Parameter] = None,
learn_influence: bool = False, additive_graph_influence: Union[torch.Tensor, Parameter] = None,
learn_additive_graph_influence: bool = False, node_types: torch.Tensor = None,
weights_per_type: bool = False, clockwork: bool = False, bias: bool = True):
"""
:param input_size: The number of expected features in the input `x`
:param hidden_size: The number of features in the hidden state `h`
:param num_nodes:
:param dropout:
:param recurrent_dropout:
:param graph_influence:
:param learn_influence:
:param additive_graph_influence:
:param learn_additive_graph_influence:
:param node_types:
:param weights_per_type:
:param bias:
"""
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.learn_influence = learn_influence
self.learn_additive_graph_influence = learn_additive_graph_influence
if graph_influence is not None:
assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'
num_nodes = graph_influence.shape[0]
if type(graph_influence) is Parameter:
assert learn_influence, "Graph Influence Matrix is a Parameter, therefore it must be learnable."
self.G = graph_influence
elif learn_influence:
self.G = Parameter(graph_influence)
else:
self.register_buffer('G', graph_influence)
else:
assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'
eye_influence = torch.eye(num_nodes, num_nodes)
if learn_influence:
self.G = Parameter(eye_influence)
else:
self.register_buffer('G', eye_influence)
if additive_graph_influence is not None:
if type(additive_graph_influence) is Parameter:
self.G_add = additive_graph_influence
elif learn_additive_graph_influence:
self.G_add = Parameter(additive_graph_influence)
else:
self.register_buffer('G_add', additive_graph_influence)
else:
if learn_additive_graph_influence:
self.G_add = Parameter(torch.zeros_like(self.G))
else:
self.G_add = 0.
if weights_per_type and node_types is None:
node_types = torch.tensor([i for i in range(num_nodes)])
if node_types is not None:
num_node_types = node_types.max() + 1
self.weight_ih = Parameter(torch.Tensor(num_node_types, 3 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(num_node_types, 3 * hidden_size, hidden_size))
self.mm = gmm
self.register_buffer('node_type_index', node_types)
else:
self.weight_ih = Parameter(torch.Tensor(3 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(3 * hidden_size, hidden_size))
self.mm = torch.matmul
self.register_buffer('node_type_index', None)
if bias:
if node_types is not None:
self.bias_ih = Parameter(torch.Tensor(num_node_types, 3 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(num_node_types, 3 * hidden_size))
else:
self.bias_ih = Parameter(torch.Tensor(3 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(3 * hidden_size))
else:
self.bias_ih = None
self.bias_hh = None
self.clockwork = clockwork
if clockwork:
phase = torch.arange(0., hidden_size)
phase = phase - phase.min()
phase = (phase / phase.max()) * 8.
phase += 1.
phase = torch.floor(phase)
self.register_buffer('phase', phase)
else:
phase = torch.ones(hidden_size)
self.register_buffer('phase', phase)
self.dropout = Dropout(dropout)
self.r_dropout = Dropout(recurrent_dropout)
self.num_nodes = num_nodes
self.init_weights()
def init_weights(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
if weight is self.G:
continue
if weight is self.G_add:
continue
weight.data.uniform_(-stdv, stdv)
#if weight is self.weight_hh or weight is self.weight_ih and len(self.weight_ih.shape) == 3:
# weight.data[1:] = weight.data[0]
def forward(self, input: torch.Tensor, state: GraphGRUState, t: int = 0) -> Tuple[torch.Tensor, GraphGRUState]:
hx, gx = state
if hx is None:
hx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)
if gx is None and self.learn_influence:
gx = torch.nn.functional.normalize(self.G, p=1., dim=1)
#gx = torch.softmax(self.G, dim=1)
elif gx is None:
gx = self.G
hx = self.r_dropout(hx)
weight_ih = self.weight_ih[self.node_type_index]
weight_hh = self.weight_hh[self.node_type_index]
if self.bias_hh is not None:
bias_hh = self.bias_hh[self.node_type_index]
else:
bias_hh = 0.
if self.bias_ih is not None:
bias_ih = self.bias_ih[self.node_type_index]
else:
bias_ih = 0.
c_mask = (torch.remainder(torch.tensor(t + 1., device=input.device), self.phase) < 0.01).type_as(hx)
x_results = self.dropout(self.mm(input, weight_ih.transpose(-2, -1))) + bias_ih
h_results = self.mm(hx, weight_hh.transpose(-2, -1)) + bias_hh
x_results = torch.matmul(gx, x_results)
h_results = torch.matmul(gx, h_results)
i_r, i_z, i_n = x_results.chunk(3, 2)
h_r, h_z, h_n = h_results.chunk(3, 2)
r = torch.sigmoid(i_r + h_r)
z = torch.sigmoid(i_z + h_z)
n = torch.tanh(i_n + r * h_n)
hy = n - torch.mul(n, z) + torch.mul(z, hx)
hy = c_mask * hy + (1 - c_mask) * hx
gx = gx + self.G_add
if self.learn_influence or self.learn_additive_graph_influence:
gx = torch.nn.functional.normalize(gx, p=1., dim=1)
#gx = torch.softmax(gx, dim=1)
return hy, (hy, gx)
class StaticGraphGRU_(Module):
def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, layer_dropout: float = 0.0, **kwargs):
super().__init__()
self.layers = ModuleList([StaticGraphGRUCell_(input_size, hidden_size, **kwargs)]
+ [StaticGraphGRUCell_(hidden_size, hidden_size, **kwargs) for _ in range(num_layers - 1)])
self.dropout = Dropout(layer_dropout)
def forward(self, input: torch.Tensor, states: Optional[List[GraphGRUState]] = None, t_i: int = 0) -> Tuple[torch.Tensor, List[GraphGRUState]]:
if states is None:
n: Optional[torch.Tensor] = None
states = [(n, n)] * len(self.layers)
output_states: List[GraphGRUState] = []
output = input
i = 0
for rnn_layer in self.layers:
state = states[i]
inputs = output.unbind(1)
outputs: List[torch.Tensor] = []
for t, input in enumerate(inputs):
out, state = rnn_layer(input, state, t_i+t)
outputs += [out]
output = torch.stack(outputs, dim=1)
output = self.dropout(output)
output_states += [state]
i += 1
return output, output_states
def StaticGraphGRU(*args, **kwargs):
return torch.jit.script(StaticGraphGRU_(*args, **kwargs))
| 42.067179
| 149
| 0.60793
|
from typing import Tuple, Optional, List, Union
import torch
from torch.nn import *
import math
def gmm(x: torch.Tensor, w: torch.Tensor) -> torch.Tensor:
return torch.einsum('ndo,bnd->bno', w, x)
class GraphLinear(Module):
def __init__(self, in_features: int, out_features: int):
super().__init__()
self.in_features = in_features
self.out_features = out_features
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if len(self.weight.shape) == 3:
self.weight.data[1:] = self.weight.data[0]
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input: torch.Tensor, g: Optional[torch.Tensor] = None) -> torch.Tensor:
if g is None and self.learn_influence:
g = torch.nn.functional.normalize(self.G, p=1., dim=1)
elif g is None:
g = self.G
w = self.weight[self.node_type_index]
output = self.mm(input, w.transpose(-2, -1))
if self.bias is not None:
bias = self.bias[self.node_type_index]
output += bias
output = g.matmul(output)
return output
class DynamicGraphLinear(GraphLinear):
def __init__(self, num_node_types: int = 1, *args):
super().__init__(*args)
def forward(self, input: torch.Tensor, g: torch.Tensor = None, t: torch.Tensor = None) -> torch.Tensor:
assert g is not None or t is not None, "Either Graph Influence Matrix or Node Type Vector is needed"
if g is None:
g = self.G[t][:, t]
return super().forward(input, g)
class StaticGraphLinear(GraphLinear):
def __init__(self, *args, bias: bool = True, num_nodes: int = None, graph_influence: Union[torch.Tensor, Parameter] = None,
learn_influence: bool = False, node_types: torch.Tensor = None, weights_per_type: bool = False):
super().__init__(*args)
self.learn_influence = learn_influence
if graph_influence is not None:
assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'
num_nodes = graph_influence.shape[0]
if type(graph_influence) is Parameter:
assert learn_influence, "Graph Influence Matrix is a Parameter, therefore it must be learnable."
self.G = graph_influence
elif learn_influence:
self.G = Parameter(graph_influence)
else:
self.register_buffer('G', graph_influence)
else:
assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'
eye_influence = torch.eye(num_nodes, num_nodes)
if learn_influence:
self.G = Parameter(eye_influence)
else:
self.register_buffer('G', eye_influence)
if weights_per_type and node_types is None:
node_types = torch.tensor([i for i in range(num_nodes)])
if node_types is not None:
num_node_types = node_types.max() + 1
self.weight = Parameter(torch.Tensor(num_node_types, self.out_features, self.in_features))
self.mm = gmm
self.node_type_index = node_types
else:
self.weight = Parameter(torch.Tensor(self.out_features, self.in_features))
self.mm = torch.matmul
self.node_type_index = None
if bias:
if node_types is not None:
self.bias = Parameter(torch.Tensor(num_node_types, self.out_features))
else:
self.bias = Parameter(torch.Tensor(self.out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
GraphLSTMState = Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]
class BN(Module):
def __init__(self, num_nodes, num_features):
super().__init__()
self.num_nodes = num_nodes
self.num_features = num_features
self.bn = BatchNorm1d(num_nodes * num_features)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.bn(x.view(-1, self.num_nodes * self.num_features)).view(-1, self.num_nodes, self.num_features)
class LinearX(Module):
def __init__(self):
super().__init__()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return input
class StaticGraphLSTMCell_(Module):
def __init__(self, input_size: int, hidden_size: int, num_nodes: int = None, dropout: float = 0.,
recurrent_dropout: float = 0., graph_influence: Union[torch.Tensor, Parameter] = None,
learn_influence: bool = False, additive_graph_influence: Union[torch.Tensor, Parameter] = None,
learn_additive_graph_influence: bool = False, node_types: torch.Tensor = None,
weights_per_type: bool = False, clockwork: bool = False, bias: bool = True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.learn_influence = learn_influence
self.learn_additive_graph_influence = learn_additive_graph_influence
if graph_influence is not None:
assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'
num_nodes = graph_influence.shape[0]
if type(graph_influence) is Parameter:
assert learn_influence, "Graph Influence Matrix is a Parameter, therefore it must be learnable."
self.G = graph_influence
elif learn_influence:
self.G = Parameter(graph_influence)
else:
self.register_buffer('G', graph_influence)
else:
assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'
eye_influence = torch.eye(num_nodes, num_nodes)
if learn_influence:
self.G = Parameter(eye_influence)
else:
self.register_buffer('G', eye_influence)
if additive_graph_influence is not None:
if type(additive_graph_influence) is Parameter:
self.G_add = additive_graph_influence
elif learn_additive_graph_influence:
self.G_add = Parameter(additive_graph_influence)
else:
self.register_buffer('G_add', additive_graph_influence)
else:
if learn_additive_graph_influence:
self.G_add = Parameter(torch.zeros_like(self.G))
else:
self.G_add = 0.
if weights_per_type and node_types is None:
node_types = torch.tensor([i for i in range(num_nodes)])
if node_types is not None:
num_node_types = node_types.max() + 1
self.weight_ih = Parameter(torch.Tensor(num_node_types, 4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(num_node_types, 4 * hidden_size, hidden_size))
self.mm = gmm
self.register_buffer('node_type_index', node_types)
else:
self.weight_ih = Parameter(torch.Tensor(4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(4 * hidden_size, hidden_size))
self.mm = torch.matmul
self.register_buffer('node_type_index', None)
if bias:
if node_types is not None:
self.bias_ih = Parameter(torch.Tensor(num_node_types, 4 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(num_node_types, 4 * hidden_size))
else:
self.bias_ih = Parameter(torch.Tensor(4 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(4 * hidden_size))
else:
self.bias_ih = None
self.bias_hh = None
self.clockwork = clockwork
if clockwork:
phase = torch.arange(0., hidden_size)
phase = phase - phase.min()
phase = (phase / phase.max()) * 8.
phase += 1.
phase = torch.floor(phase)
self.register_buffer('phase', phase)
else:
phase = torch.ones(hidden_size)
self.register_buffer('phase', phase)
self.dropout = Dropout(dropout)
self.r_dropout = Dropout(recurrent_dropout)
self.num_nodes = num_nodes
self.init_weights()
def init_weights(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
if weight is self.G:
continue
if weight is self.G_add:
continue
weight.data.uniform_(-stdv, stdv)
if weight is self.weight_hh or weight is self.weight_ih and len(self.weight_ih.shape) == 3:
weight.data[1:] = weight.data[0]
def forward(self, input: torch.Tensor, state: GraphLSTMState, t: int = 0) -> Tuple[torch.Tensor, GraphLSTMState]:
hx, cx, gx = state
if hx is None:
hx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)
if cx is None:
cx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)
if gx is None and self.learn_influence:
gx = torch.nn.functional.normalize(self.G, p=1., dim=1)
elif gx is None:
gx = self.G
hx = self.r_dropout(hx)
weight_ih = self.weight_ih[self.node_type_index]
weight_hh = self.weight_hh[self.node_type_index]
if self.bias_hh is not None:
bias_hh = self.bias_hh[self.node_type_index]
else:
bias_hh = 0.
c_mask = (torch.remainder(torch.tensor(t + 1., device=input.device), self.phase) < 0.01).type_as(cx)
gates = (self.dropout(self.mm(input, weight_ih.transpose(-2, -1))) +
self.mm(hx, weight_hh.transpose(-2, -1)) + bias_hh)
gates = torch.matmul(gx, gates)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 2)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = c_mask * ((forgetgate * cx) + (ingate * cellgate)) + (1 - c_mask) * cx
hy = outgate * torch.tanh(cy)
gx = gx + self.G_add
if self.learn_influence or self.learn_additive_graph_influence:
gx = torch.nn.functional.normalize(gx, p=1., dim=1)
return hy, (hy, cy, gx)
class StaticGraphLSTM_(Module):
def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, layer_dropout: float = 0.0, **kwargs):
super().__init__()
self.layers = ModuleList([StaticGraphLSTMCell_(input_size, hidden_size, **kwargs)]
+ [StaticGraphLSTMCell_(hidden_size, hidden_size, **kwargs) for _ in range(num_layers - 1)])
self.dropout = Dropout(layer_dropout)
def forward(self, input: torch.Tensor, states: Optional[List[GraphLSTMState]] = None, t_i: int = 0) -> Tuple[torch.Tensor, List[GraphLSTMState]]:
if states is None:
n: Optional[torch.Tensor] = None
states = [(n, n, n)] * len(self.layers)
output_states: List[GraphLSTMState] = []
output = input
i = 0
for rnn_layer in self.layers:
state = states[i]
inputs = output.unbind(1)
outputs: List[torch.Tensor] = []
for t, input in enumerate(inputs):
out, state = rnn_layer(input, state, t_i+t)
outputs += [out]
output = torch.stack(outputs, dim=1)
output = self.dropout(output)
output_states += [state]
i += 1
return output, output_states
def StaticGraphLSTM(*args, **kwargs):
return torch.jit.script(StaticGraphLSTM_(*args, **kwargs))
GraphGRUState = Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]
class StaticGraphGRUCell_(Module):
def __init__(self, input_size: int, hidden_size: int, num_nodes: int = None, dropout: float = 0.,
recurrent_dropout: float = 0., graph_influence: Union[torch.Tensor, Parameter] = None,
learn_influence: bool = False, additive_graph_influence: Union[torch.Tensor, Parameter] = None,
learn_additive_graph_influence: bool = False, node_types: torch.Tensor = None,
weights_per_type: bool = False, clockwork: bool = False, bias: bool = True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.learn_influence = learn_influence
self.learn_additive_graph_influence = learn_additive_graph_influence
if graph_influence is not None:
assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'
num_nodes = graph_influence.shape[0]
if type(graph_influence) is Parameter:
assert learn_influence, "Graph Influence Matrix is a Parameter, therefore it must be learnable."
self.G = graph_influence
elif learn_influence:
self.G = Parameter(graph_influence)
else:
self.register_buffer('G', graph_influence)
else:
assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'
eye_influence = torch.eye(num_nodes, num_nodes)
if learn_influence:
self.G = Parameter(eye_influence)
else:
self.register_buffer('G', eye_influence)
if additive_graph_influence is not None:
if type(additive_graph_influence) is Parameter:
self.G_add = additive_graph_influence
elif learn_additive_graph_influence:
self.G_add = Parameter(additive_graph_influence)
else:
self.register_buffer('G_add', additive_graph_influence)
else:
if learn_additive_graph_influence:
self.G_add = Parameter(torch.zeros_like(self.G))
else:
self.G_add = 0.
if weights_per_type and node_types is None:
node_types = torch.tensor([i for i in range(num_nodes)])
if node_types is not None:
num_node_types = node_types.max() + 1
self.weight_ih = Parameter(torch.Tensor(num_node_types, 3 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(num_node_types, 3 * hidden_size, hidden_size))
self.mm = gmm
self.register_buffer('node_type_index', node_types)
else:
self.weight_ih = Parameter(torch.Tensor(3 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(3 * hidden_size, hidden_size))
self.mm = torch.matmul
self.register_buffer('node_type_index', None)
if bias:
if node_types is not None:
self.bias_ih = Parameter(torch.Tensor(num_node_types, 3 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(num_node_types, 3 * hidden_size))
else:
self.bias_ih = Parameter(torch.Tensor(3 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(3 * hidden_size))
else:
self.bias_ih = None
self.bias_hh = None
self.clockwork = clockwork
if clockwork:
phase = torch.arange(0., hidden_size)
phase = phase - phase.min()
phase = (phase / phase.max()) * 8.
phase += 1.
phase = torch.floor(phase)
self.register_buffer('phase', phase)
else:
phase = torch.ones(hidden_size)
self.register_buffer('phase', phase)
self.dropout = Dropout(dropout)
self.r_dropout = Dropout(recurrent_dropout)
self.num_nodes = num_nodes
self.init_weights()
def init_weights(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
if weight is self.G:
continue
if weight is self.G_add:
continue
weight.data.uniform_(-stdv, stdv)
def forward(self, input: torch.Tensor, state: GraphGRUState, t: int = 0) -> Tuple[torch.Tensor, GraphGRUState]:
hx, gx = state
if hx is None:
hx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)
if gx is None and self.learn_influence:
gx = torch.nn.functional.normalize(self.G, p=1., dim=1)
elif gx is None:
gx = self.G
hx = self.r_dropout(hx)
weight_ih = self.weight_ih[self.node_type_index]
weight_hh = self.weight_hh[self.node_type_index]
if self.bias_hh is not None:
bias_hh = self.bias_hh[self.node_type_index]
else:
bias_hh = 0.
if self.bias_ih is not None:
bias_ih = self.bias_ih[self.node_type_index]
else:
bias_ih = 0.
c_mask = (torch.remainder(torch.tensor(t + 1., device=input.device), self.phase) < 0.01).type_as(hx)
x_results = self.dropout(self.mm(input, weight_ih.transpose(-2, -1))) + bias_ih
h_results = self.mm(hx, weight_hh.transpose(-2, -1)) + bias_hh
x_results = torch.matmul(gx, x_results)
h_results = torch.matmul(gx, h_results)
i_r, i_z, i_n = x_results.chunk(3, 2)
h_r, h_z, h_n = h_results.chunk(3, 2)
r = torch.sigmoid(i_r + h_r)
z = torch.sigmoid(i_z + h_z)
n = torch.tanh(i_n + r * h_n)
hy = n - torch.mul(n, z) + torch.mul(z, hx)
hy = c_mask * hy + (1 - c_mask) * hx
gx = gx + self.G_add
if self.learn_influence or self.learn_additive_graph_influence:
gx = torch.nn.functional.normalize(gx, p=1., dim=1)
return hy, (hy, gx)
class StaticGraphGRU_(Module):
def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, layer_dropout: float = 0.0, **kwargs):
super().__init__()
self.layers = ModuleList([StaticGraphGRUCell_(input_size, hidden_size, **kwargs)]
+ [StaticGraphGRUCell_(hidden_size, hidden_size, **kwargs) for _ in range(num_layers - 1)])
self.dropout = Dropout(layer_dropout)
def forward(self, input: torch.Tensor, states: Optional[List[GraphGRUState]] = None, t_i: int = 0) -> Tuple[torch.Tensor, List[GraphGRUState]]:
if states is None:
n: Optional[torch.Tensor] = None
states = [(n, n)] * len(self.layers)
output_states: List[GraphGRUState] = []
output = input
i = 0
for rnn_layer in self.layers:
state = states[i]
inputs = output.unbind(1)
outputs: List[torch.Tensor] = []
for t, input in enumerate(inputs):
out, state = rnn_layer(input, state, t_i+t)
outputs += [out]
output = torch.stack(outputs, dim=1)
output = self.dropout(output)
output_states += [state]
i += 1
return output, output_states
def StaticGraphGRU(*args, **kwargs):
return torch.jit.script(StaticGraphGRU_(*args, **kwargs))
| true
| true
|
f708b0b4a2fca5c6fdf52063180014ee9d3f169f
| 3,060
|
py
|
Python
|
htmltreediff/test_util.py
|
PolicyStat/htmltreediff
|
8065e39653ac85647a2d8d1f4acf6e2fbb862b17
|
[
"BSD-3-Clause"
] | 3
|
2015-04-04T20:35:17.000Z
|
2021-08-06T16:51:09.000Z
|
htmltreediff/test_util.py
|
tex/htmltreediff
|
ce5a94edd0cfb05ed5130aaed3f06c63668df127
|
[
"BSD-3-Clause"
] | 14
|
2015-01-15T16:03:14.000Z
|
2020-03-23T16:29:02.000Z
|
htmltreediff/test_util.py
|
tex/htmltreediff
|
ce5a94edd0cfb05ed5130aaed3f06c63668df127
|
[
"BSD-3-Clause"
] | 2
|
2017-05-16T04:17:46.000Z
|
2018-04-30T20:05:32.000Z
|
from htmltreediff.diff_core import Differ
from htmltreediff.edit_script_runner import EditScriptRunner
from htmltreediff.changes import (
split_text_nodes,
sort_del_before_ins,
_strip_changes_new,
_strip_changes_old,
)
from htmltreediff.util import (
minidom_tostring,
node_compare,
parse_minidom,
remove_dom_attributes,
walk_dom,
)
def reverse_edit_script(edit_script):
if edit_script is None:
return None
def opposite_action(action):
if action == 'delete':
return 'insert'
elif action == 'insert':
return 'delete'
reverse_script = []
for action, location, node_properties in reversed(edit_script):
reverse_script.append(
(opposite_action(action), location, node_properties),
)
return reverse_script
def reverse_changes_html(changes):
dom = parse_minidom(changes)
reverse_changes(dom)
return minidom_tostring(dom)
def reverse_changes(dom):
nodes = dom.getElementsByTagName('del') + dom.getElementsByTagName('ins')
for node in nodes:
if node.tagName == 'del':
node.tagName = 'ins'
elif node.tagName == 'ins':
node.tagName = 'del'
sort_del_before_ins(dom)
def get_edit_script(old_html, new_html):
old_dom = parse_minidom(old_html)
new_dom = parse_minidom(new_html)
split_text_nodes(old_dom)
split_text_nodes(new_dom)
differ = Differ(old_dom, new_dom)
return differ.get_edit_script()
def html_patch(old_html, edit_script):
old_dom = parse_minidom(old_html)
split_text_nodes(old_dom)
runner = EditScriptRunner(old_dom, edit_script)
return minidom_tostring(runner.run_edit_script())
def strip_changes_old(html):
dom = parse_minidom(html)
_strip_changes_old(dom)
return minidom_tostring(dom)
def strip_changes_new(html):
dom = parse_minidom(html)
_strip_changes_new(dom)
return minidom_tostring(dom)
def remove_attributes(html):
dom = parse_minidom(html)
remove_dom_attributes(dom)
return minidom_tostring(dom)
def collapse(html):
"""Remove any indentation and newlines from the html."""
return ''.join([line.strip() for line in html.split('\n')]).strip()
class Case(object):
pass
def parse_cases(cases):
for args in cases:
case = Case()
if len(args) == 4:
case.name, case.old_html, case.new_html, case.target_changes = args
case.edit_script = None
elif len(args) == 5:
(
case.name,
case.old_html,
case.new_html,
case.target_changes,
case.edit_script,
) = args
else:
raise ValueError('Invalid test spec: %r' % (args,))
yield case
def test_node_compare():
del_node = list(walk_dom(parse_minidom('<del/>')))[-1]
ins_node = list(walk_dom(parse_minidom('<ins/>')))[-1]
assert -1 == node_compare(del_node, ins_node)
assert 1 == node_compare(ins_node, del_node)
| 25.932203
| 79
| 0.658824
|
from htmltreediff.diff_core import Differ
from htmltreediff.edit_script_runner import EditScriptRunner
from htmltreediff.changes import (
split_text_nodes,
sort_del_before_ins,
_strip_changes_new,
_strip_changes_old,
)
from htmltreediff.util import (
minidom_tostring,
node_compare,
parse_minidom,
remove_dom_attributes,
walk_dom,
)
def reverse_edit_script(edit_script):
if edit_script is None:
return None
def opposite_action(action):
if action == 'delete':
return 'insert'
elif action == 'insert':
return 'delete'
reverse_script = []
for action, location, node_properties in reversed(edit_script):
reverse_script.append(
(opposite_action(action), location, node_properties),
)
return reverse_script
def reverse_changes_html(changes):
dom = parse_minidom(changes)
reverse_changes(dom)
return minidom_tostring(dom)
def reverse_changes(dom):
nodes = dom.getElementsByTagName('del') + dom.getElementsByTagName('ins')
for node in nodes:
if node.tagName == 'del':
node.tagName = 'ins'
elif node.tagName == 'ins':
node.tagName = 'del'
sort_del_before_ins(dom)
def get_edit_script(old_html, new_html):
old_dom = parse_minidom(old_html)
new_dom = parse_minidom(new_html)
split_text_nodes(old_dom)
split_text_nodes(new_dom)
differ = Differ(old_dom, new_dom)
return differ.get_edit_script()
def html_patch(old_html, edit_script):
old_dom = parse_minidom(old_html)
split_text_nodes(old_dom)
runner = EditScriptRunner(old_dom, edit_script)
return minidom_tostring(runner.run_edit_script())
def strip_changes_old(html):
dom = parse_minidom(html)
_strip_changes_old(dom)
return minidom_tostring(dom)
def strip_changes_new(html):
dom = parse_minidom(html)
_strip_changes_new(dom)
return minidom_tostring(dom)
def remove_attributes(html):
dom = parse_minidom(html)
remove_dom_attributes(dom)
return minidom_tostring(dom)
def collapse(html):
return ''.join([line.strip() for line in html.split('\n')]).strip()
class Case(object):
pass
def parse_cases(cases):
for args in cases:
case = Case()
if len(args) == 4:
case.name, case.old_html, case.new_html, case.target_changes = args
case.edit_script = None
elif len(args) == 5:
(
case.name,
case.old_html,
case.new_html,
case.target_changes,
case.edit_script,
) = args
else:
raise ValueError('Invalid test spec: %r' % (args,))
yield case
def test_node_compare():
del_node = list(walk_dom(parse_minidom('<del/>')))[-1]
ins_node = list(walk_dom(parse_minidom('<ins/>')))[-1]
assert -1 == node_compare(del_node, ins_node)
assert 1 == node_compare(ins_node, del_node)
| true
| true
|
f708b1c76df52ba9d7f3092ae8e625da432ba56c
| 518
|
py
|
Python
|
SimPEG/electromagnetics/natural_source/__init__.py
|
ElliotCheung/simpeg
|
ce5bde154179ca63798a62a12787a7ec3535472c
|
[
"MIT"
] | 1
|
2022-02-18T16:31:27.000Z
|
2022-02-18T16:31:27.000Z
|
SimPEG/electromagnetics/natural_source/__init__.py
|
ElliotCheung/simpeg
|
ce5bde154179ca63798a62a12787a7ec3535472c
|
[
"MIT"
] | null | null | null |
SimPEG/electromagnetics/natural_source/__init__.py
|
ElliotCheung/simpeg
|
ce5bde154179ca63798a62a12787a7ec3535472c
|
[
"MIT"
] | null | null | null |
""" module SimPEG.electromagnetics.natural_source
SimPEG implementation of the natural source problem
(including magenetotelluric, tipper and ZTEM)
"""
from . import utils
from . import sources as Src
from . import receivers as Rx
from .survey import Survey, Data
from .fields import Fields1DPrimarySecondary, Fields3DPrimarySecondary
from .simulation import Simulation1DPrimarySecondary, Simulation3DPrimarySecondary
from . import sources
from . import receivers
from .simulation_1d import Simulation1DRecursive
| 27.263158
| 82
| 0.832046
|
from . import utils
from . import sources as Src
from . import receivers as Rx
from .survey import Survey, Data
from .fields import Fields1DPrimarySecondary, Fields3DPrimarySecondary
from .simulation import Simulation1DPrimarySecondary, Simulation3DPrimarySecondary
from . import sources
from . import receivers
from .simulation_1d import Simulation1DRecursive
| true
| true
|
f708b24eec80e943958c6c09ca5f6ea763affe71
| 7,182
|
py
|
Python
|
uuv_teleop/scripts/vehicle_keyboard_teleop.py
|
pengzhi1998/uuv_simulator
|
42d276fd1cb4cd8ad3166b9d2b434543411c6fdd
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2021-10-20T09:20:34.000Z
|
2021-10-20T09:20:34.000Z
|
uuv_teleop/scripts/vehicle_keyboard_teleop.py
|
pengzhi1998/uuv_simulator
|
42d276fd1cb4cd8ad3166b9d2b434543411c6fdd
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
uuv_teleop/scripts/vehicle_keyboard_teleop.py
|
pengzhi1998/uuv_simulator
|
42d276fd1cb4cd8ad3166b9d2b434543411c6fdd
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import time
import sys, select, termios, tty
import rospy
import numpy as np
from std_msgs.msg import Bool
from geometry_msgs.msg import Twist, Accel, Vector3
class KeyBoardVehicleTeleop:
def __init__(self):
# Class Variables
self.settings = termios.tcgetattr(sys.stdin)
# Speed setting
self.speed = 1 # 1 = Slow, 2 = Fast
self.l = Vector3(0, 0, 0) # Linear Velocity for Publish
self.a = Vector3(0, 0, 0) # Angular Velocity for publishing
self.linear_increment = 0.05 # How much to increment linear velocities by, to avoid jerkyness
self.linear_limit = 0.2 # Linear velocity limit = self.linear_limit * self.speed
self.angular_increment = 0.05
self.angular_limit = 0.25
# User Interface
self.msg = """
Control Your Vehicle!
---------------------------
Moving around:
W/S: X-Axis
A/D: Y-Axis
X/Z: Z-Axis
Q/E: Yaw
I/K: Pitch
J/L: Roll
Slow / Fast: 1 / 2
CTRL-C to quit
"""
# Default message remains as twist
self._msg_type = 'twist'
if rospy.has_param('~type'):
self._msg_type = rospy.get_param('~type')
if self._msg_type not in ['twist', 'accel']:
raise rospy.ROSException('Teleoperation output must be either '
'twist or accel')
# Name Publisher topics accordingly
if self._msg_type == 'twist':
self._output_pub = rospy.Publisher('output', Twist, queue_size=1)
# self._output_pub = rospy.Publisher('/rexrov2/cmd_vel', Twist, queue_size=1)
else:
self._output_pub = rospy.Publisher('output', Accel, queue_size=1)
print(self.msg)
# Ros Spin
rate = rospy.Rate(50) # 50hz
while not rospy.is_shutdown():
rate.sleep()
self._parse_keyboard()
# Every spin this function will return the key being pressed
# Only works for one key per spin currently, thus limited control exploring alternative methods
def _get_key(self):
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)
return key
# Function to gradually build up the speed and avoid jerkyness #
def _speed_windup(self, speed, increment, limit, reverse):
if reverse == True:
speed -= increment * self.speed
if speed < -limit * self.speed:
speed = -limit * self.speed
else:
speed += increment * self.speed
if speed > limit * self.speed:
speed = limit * self.speed
return speed
def _parse_keyboard(self):
# Save key peing pressed
key_press = self._get_key()
# Set Vehicle Speed #
if key_press == "1":
self.speed = 1
if key_press == "2":
self.speed = 2
# Choose ros message accordingly
if self._msg_type == 'twist':
cmd = Twist()
else:
cmd = Accel()
# If a key is pressed assign relevent linear / angular vel
if key_press!='':
# Linear velocities:
# Forward
if key_press == "w":
self.l.x = self._speed_windup(self.l.x, self.linear_increment, self.linear_limit, False)
# Backwards
if key_press == "s":
self.l.x = self._speed_windup(self.l.x, self.linear_increment, self.linear_limit, True)
# Left
if key_press == "a":
self.l.y = self._speed_windup(self.l.y, self.linear_increment, self.linear_limit, False)
# Right
if key_press == "d":
self.l.y = self._speed_windup(self.l.y, self.linear_increment, self.linear_limit, True)
# Up
if key_press == "x":
self.l.z = self._speed_windup(self.l.z, self.linear_increment, self.linear_limit, False)
# Down
if key_press == "z":
self.l.z = self._speed_windup(self.l.z, self.linear_increment, self.linear_limit, True)
# Angular Velocities
# Roll Left
if key_press == "j":
self.a.x = self._speed_windup(self.a.x, self.linear_increment, self.linear_limit, True)
# Roll Right
if key_press == "l":
self.a.x = self._speed_windup(self.a.x, self.linear_increment, self.linear_limit, False)
# Pitch Down
if key_press == "i":
self.a.y = self._speed_windup(self.a.y, self.linear_increment, self.linear_limit, False)
# Pitch Up
if key_press == "k":
self.a.y = self._speed_windup(self.a.y, self.linear_increment, self.linear_limit, True)
# Yaw Left
if key_press == "q":
self.a.z = self._speed_windup(self.a.z, self.angular_increment, self.angular_limit, False)
# Yaw Right
if key_press == "e":
self.a.z = self._speed_windup(self.a.z, self.angular_increment, self.angular_limit, True)
else:
# If no button is pressed reset velocities to 0
self.l = Vector3(0, 0, 0)
self.a = Vector3(0, 0, 0)
# Store velocity message into Twist format
cmd.angular = self.a
cmd.linear = self.l
# If ctrl+c kill node
if (key_press == '\x03'):
rospy.loginfo('Keyboard Interrupt Pressed')
rospy.loginfo('Shutting down [%s] node' % node_name)
# Set twists to 0
cmd.angular = Vector3(0, 0, 0)
cmd.linear = Vector3(0, 0, 0)
self._output_pub.publish(cmd)
exit(-1)
# Publish message
self._output_pub.publish(cmd)
if __name__ == '__main__':
# Wait for 5 seconds, so the instructions are the last thing to print in terminal
time.sleep(5)
# Start the node
node_name = os.path.splitext(os.path.basename(__file__))[0]
rospy.init_node(node_name)
rospy.loginfo('Starting [%s] node' % node_name)
teleop = KeyBoardVehicleTeleop()
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
rospy.loginfo('Shutting down [%s] node' % node_name)
| 35.91
| 106
| 0.584935
|
from __future__ import print_function
import os
import time
import sys, select, termios, tty
import rospy
import numpy as np
from std_msgs.msg import Bool
from geometry_msgs.msg import Twist, Accel, Vector3
class KeyBoardVehicleTeleop:
def __init__(self):
self.settings = termios.tcgetattr(sys.stdin)
self.speed = 1 self.l = Vector3(0, 0, 0) self.a = Vector3(0, 0, 0) self.linear_increment = 0.05 self.linear_limit = 0.2 self.angular_increment = 0.05
self.angular_limit = 0.25
self.msg = """
Control Your Vehicle!
---------------------------
Moving around:
W/S: X-Axis
A/D: Y-Axis
X/Z: Z-Axis
Q/E: Yaw
I/K: Pitch
J/L: Roll
Slow / Fast: 1 / 2
CTRL-C to quit
"""
self._msg_type = 'twist'
if rospy.has_param('~type'):
self._msg_type = rospy.get_param('~type')
if self._msg_type not in ['twist', 'accel']:
raise rospy.ROSException('Teleoperation output must be either '
'twist or accel')
if self._msg_type == 'twist':
self._output_pub = rospy.Publisher('output', Twist, queue_size=1)
else:
self._output_pub = rospy.Publisher('output', Accel, queue_size=1)
print(self.msg)
rate = rospy.Rate(50) while not rospy.is_shutdown():
rate.sleep()
self._parse_keyboard()
def _get_key(self):
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)
return key
def _speed_windup(self, speed, increment, limit, reverse):
if reverse == True:
speed -= increment * self.speed
if speed < -limit * self.speed:
speed = -limit * self.speed
else:
speed += increment * self.speed
if speed > limit * self.speed:
speed = limit * self.speed
return speed
def _parse_keyboard(self):
key_press = self._get_key()
if key_press == "1":
self.speed = 1
if key_press == "2":
self.speed = 2
if self._msg_type == 'twist':
cmd = Twist()
else:
cmd = Accel()
if key_press!='':
if key_press == "w":
self.l.x = self._speed_windup(self.l.x, self.linear_increment, self.linear_limit, False)
if key_press == "s":
self.l.x = self._speed_windup(self.l.x, self.linear_increment, self.linear_limit, True)
if key_press == "a":
self.l.y = self._speed_windup(self.l.y, self.linear_increment, self.linear_limit, False)
if key_press == "d":
self.l.y = self._speed_windup(self.l.y, self.linear_increment, self.linear_limit, True)
if key_press == "x":
self.l.z = self._speed_windup(self.l.z, self.linear_increment, self.linear_limit, False)
if key_press == "z":
self.l.z = self._speed_windup(self.l.z, self.linear_increment, self.linear_limit, True)
if key_press == "j":
self.a.x = self._speed_windup(self.a.x, self.linear_increment, self.linear_limit, True)
if key_press == "l":
self.a.x = self._speed_windup(self.a.x, self.linear_increment, self.linear_limit, False)
if key_press == "i":
self.a.y = self._speed_windup(self.a.y, self.linear_increment, self.linear_limit, False)
if key_press == "k":
self.a.y = self._speed_windup(self.a.y, self.linear_increment, self.linear_limit, True)
if key_press == "q":
self.a.z = self._speed_windup(self.a.z, self.angular_increment, self.angular_limit, False)
if key_press == "e":
self.a.z = self._speed_windup(self.a.z, self.angular_increment, self.angular_limit, True)
else:
self.l = Vector3(0, 0, 0)
self.a = Vector3(0, 0, 0)
cmd.angular = self.a
cmd.linear = self.l
if (key_press == '\x03'):
rospy.loginfo('Keyboard Interrupt Pressed')
rospy.loginfo('Shutting down [%s] node' % node_name)
cmd.angular = Vector3(0, 0, 0)
cmd.linear = Vector3(0, 0, 0)
self._output_pub.publish(cmd)
exit(-1)
self._output_pub.publish(cmd)
if __name__ == '__main__':
time.sleep(5)
node_name = os.path.splitext(os.path.basename(__file__))[0]
rospy.init_node(node_name)
rospy.loginfo('Starting [%s] node' % node_name)
teleop = KeyBoardVehicleTeleop()
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
rospy.loginfo('Shutting down [%s] node' % node_name)
| true
| true
|
f708b3261f5463444587bbbdfaa6a90f62be1e27
| 4,101
|
py
|
Python
|
influxdb_client/domain/variable_links.py
|
MASIFAYUB/influxdb-client-python
|
a067fa5670a6fbc600db2ac4e54e29e1b7124998
|
[
"MIT"
] | null | null | null |
influxdb_client/domain/variable_links.py
|
MASIFAYUB/influxdb-client-python
|
a067fa5670a6fbc600db2ac4e54e29e1b7124998
|
[
"MIT"
] | null | null | null |
influxdb_client/domain/variable_links.py
|
MASIFAYUB/influxdb-client-python
|
a067fa5670a6fbc600db2ac4e54e29e1b7124998
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class VariableLinks(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_self': 'str',
'org': 'str',
'labels': 'str'
}
attribute_map = {
'_self': 'self',
'org': 'org',
'labels': 'labels'
}
def __init__(self, _self=None, org=None, labels=None): # noqa: E501,D401,D403
"""VariableLinks - a model defined in OpenAPI.""" # noqa: E501
self.__self = None
self._org = None
self._labels = None
self.discriminator = None
if _self is not None:
self._self = _self
if org is not None:
self.org = org
if labels is not None:
self.labels = labels
@property
def _self(self):
"""Get the _self of this VariableLinks.
:return: The _self of this VariableLinks.
:rtype: str
""" # noqa: E501
return self.__self
@_self.setter
def _self(self, _self):
"""Set the _self of this VariableLinks.
:param _self: The _self of this VariableLinks.
:type: str
""" # noqa: E501
self.__self = _self
@property
def org(self):
"""Get the org of this VariableLinks.
:return: The org of this VariableLinks.
:rtype: str
""" # noqa: E501
return self._org
@org.setter
def org(self, org):
"""Set the org of this VariableLinks.
:param org: The org of this VariableLinks.
:type: str
""" # noqa: E501
self._org = org
@property
def labels(self):
"""Get the labels of this VariableLinks.
:return: The labels of this VariableLinks.
:rtype: str
""" # noqa: E501
return self._labels
@labels.setter
def labels(self, labels):
"""Set the labels of this VariableLinks.
:param labels: The labels of this VariableLinks.
:type: str
""" # noqa: E501
self._labels = labels
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, VariableLinks):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
| 26.288462
| 159
| 0.547671
|
import pprint
import re
import six
class VariableLinks(object):
openapi_types = {
'_self': 'str',
'org': 'str',
'labels': 'str'
}
attribute_map = {
'_self': 'self',
'org': 'org',
'labels': 'labels'
}
def __init__(self, _self=None, org=None, labels=None): self.__self = None
self._org = None
self._labels = None
self.discriminator = None
if _self is not None:
self._self = _self
if org is not None:
self.org = org
if labels is not None:
self.labels = labels
@property
def _self(self):
return self.__self
@_self.setter
def _self(self, _self):
self.__self = _self
@property
def org(self):
return self._org
@org.setter
def org(self, org):
self._org = org
@property
def labels(self):
return self._labels
@labels.setter
def labels(self, labels):
self._labels = labels
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, VariableLinks):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f708b3c0e401d3177d103ade90529035f7330ba6
| 97
|
py
|
Python
|
tests/run/default_types_fwrap_doctest.py
|
wilsonify/fwrap
|
f2e20eb55eaa3de72905e2ef28198da00eebe262
|
[
"BSD-3-Clause"
] | 23
|
2015-02-25T00:24:15.000Z
|
2021-09-08T01:35:45.000Z
|
tests/run/default_types_fwrap_doctest.py
|
fwrap/fwrap
|
61a56f2d0050096b4973d88e5f11cfac2ef01a4b
|
[
"BSD-3-Clause"
] | 1
|
2021-09-08T01:45:02.000Z
|
2021-09-08T01:45:02.000Z
|
tests/run/default_types_fwrap_doctest.py
|
fwrap/fwrap
|
61a56f2d0050096b4973d88e5f11cfac2ef01a4b
|
[
"BSD-3-Clause"
] | 4
|
2015-03-22T01:33:39.000Z
|
2021-09-09T15:25:44.000Z
|
from default_types_fwrap import *
__doc__ = u'''
>>> bar(100,200,300) == (1, 2.0, 3.0)
True
'''
| 13.857143
| 37
| 0.597938
|
from default_types_fwrap import *
__doc__ = u'''
>>> bar(100,200,300) == (1, 2.0, 3.0)
True
'''
| true
| true
|
f708b3fb94966bcfac0627771309e72db45d4e20
| 1,538
|
py
|
Python
|
apps/common/models.py
|
kwanj-k/ctrim_api
|
e3ed4afcbcc138400f219f3637b51514e2696e5c
|
[
"MIT"
] | 1
|
2018-03-11T06:08:13.000Z
|
2018-03-11T06:08:13.000Z
|
apps/common/models.py
|
kwanj-k/ctrim_api
|
e3ed4afcbcc138400f219f3637b51514e2696e5c
|
[
"MIT"
] | 4
|
2019-07-22T14:19:35.000Z
|
2022-02-10T09:13:08.000Z
|
apps/common/models.py
|
kwanj-k/ctrim_api
|
e3ed4afcbcc138400f219f3637b51514e2696e5c
|
[
"MIT"
] | null | null | null |
from django.db import models
class CapitalizeField(models.CharField):
def __init__(self, *args, **kwargs):
super(CapitalizeField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname, None)
if value:
value = value.capitalize()
setattr(model_instance, self.attname, value)
return value
else:
return super(CapitalizeField, self).pre_save(model_instance, add)
class CustomManager(models.Manager):
"""
Custom manager so as not to return deleted objects
"""
def get_queryset(self):
return super(CustomManager, self).get_queryset().filter(deleted=False)
class AbstractBase(models.Model):
"""
This contains all common object attributes
Every model will inherit this class to avoid repetition
Its abstract hence can't be instatiated
"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(
default=False,
help_text="This is to make sure deletes are not actual deletes"
)
# everything will be used to query deleted objects e.g Model.everything.all()
everything = models.Manager()
objects = CustomManager()
def delete(self, *args, **kwargs):
self.deleted = True
self.save()
class Meta:
ordering = ['-updated_at', '-created_at']
abstract = True
| 30.156863
| 83
| 0.650845
|
from django.db import models
class CapitalizeField(models.CharField):
def __init__(self, *args, **kwargs):
super(CapitalizeField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname, None)
if value:
value = value.capitalize()
setattr(model_instance, self.attname, value)
return value
else:
return super(CapitalizeField, self).pre_save(model_instance, add)
class CustomManager(models.Manager):
def get_queryset(self):
return super(CustomManager, self).get_queryset().filter(deleted=False)
class AbstractBase(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(
default=False,
help_text="This is to make sure deletes are not actual deletes"
)
everything = models.Manager()
objects = CustomManager()
def delete(self, *args, **kwargs):
self.deleted = True
self.save()
class Meta:
ordering = ['-updated_at', '-created_at']
abstract = True
| true
| true
|
f708b425a838a4e43cb89ce3167062b2ad9a31d7
| 943
|
py
|
Python
|
convert/convert.py
|
qyp1997/leetcoder
|
4c01f11e5138cbb9aa12b4f6ef0c4a60d25b92c2
|
[
"MIT"
] | null | null | null |
convert/convert.py
|
qyp1997/leetcoder
|
4c01f11e5138cbb9aa12b4f6ef0c4a60d25b92c2
|
[
"MIT"
] | null | null | null |
convert/convert.py
|
qyp1997/leetcoder
|
4c01f11e5138cbb9aa12b4f6ef0c4a60d25b92c2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@Time : 2020/10/21 10:39
@Auth : Qi
@IDE : PyCharm
@Title: 6. Z 字形变换
@Link : https://leetcode-cn.com/problems/zigzag-conversion/
"""
class Solution:
def convert(self, s: str, numRows: int) -> str:
if numRows <= 0:
return ''
if numRows == 1:
return s
ret = ''
for i in range(numRows):
tmp = i
time = numRows * 2 - 2
while tmp < len(s):
if i == 0 or i == numRows - 1 and tmp:
ret += s[tmp]
tmp += time
else:
ret += s[tmp]
if tmp + time - i * 2 < len(s):
ret += s[tmp + time - i * 2]
else:
break
tmp += time
return ret
if __name__ == '__main__':
# 测试用例
s = Solution()
print(s.convert('ABCDE', 4))
| 24.179487
| 59
| 0.397667
|
class Solution:
def convert(self, s: str, numRows: int) -> str:
if numRows <= 0:
return ''
if numRows == 1:
return s
ret = ''
for i in range(numRows):
tmp = i
time = numRows * 2 - 2
while tmp < len(s):
if i == 0 or i == numRows - 1 and tmp:
ret += s[tmp]
tmp += time
else:
ret += s[tmp]
if tmp + time - i * 2 < len(s):
ret += s[tmp + time - i * 2]
else:
break
tmp += time
return ret
if __name__ == '__main__':
s = Solution()
print(s.convert('ABCDE', 4))
| true
| true
|
f708b523234576243dd72acacbe4d452a5ad4554
| 3,430
|
py
|
Python
|
setup.py
|
stacybrock/nws-wx-client
|
9d557ccf2291e1ebbdb483dcb4fa11b926d5ff94
|
[
"Apache-2.0"
] | 1
|
2019-12-08T16:18:16.000Z
|
2019-12-08T16:18:16.000Z
|
setup.py
|
stacybrock/nws-wx-client
|
9d557ccf2291e1ebbdb483dcb4fa11b926d5ff94
|
[
"Apache-2.0"
] | 4
|
2020-03-24T16:44:22.000Z
|
2021-02-02T21:54:26.000Z
|
setup.py
|
stacybrock/nws-wx-client
|
9d557ccf2291e1ebbdb483dcb4fa11b926d5ff94
|
[
"Apache-2.0"
] | 1
|
2019-03-26T03:01:02.000Z
|
2019-03-26T03:01:02.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Based on Kenneth Reitz's setup.py:
# https://github.com/kennethreitz/setup.py
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'nwswx'
DESCRIPTION = 'A Python 3 client for retrieving data from the NWS Weather Forecast API'
URL = 'https://github.com/stacybrock/nws-wx-client'
EMAIL = 'kalrnux@gmail.com'
AUTHOR = 'Stacy Brock'
REQUIRES_PYTHON = '>=3.4.0'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
'requests',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# ------------------------------------------------
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=['nwswx'],
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='Apache-2.0',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 27.007874
| 87
| 0.637609
|
# https://github.com/kennethreitz/setup.py
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'nwswx'
DESCRIPTION = 'A Python 3 client for retrieving data from the NWS Weather Forecast API'
URL = 'https://github.com/stacybrock/nws-wx-client'
EMAIL = 'kalrnux@gmail.com'
AUTHOR = 'Stacy Brock'
REQUIRES_PYTHON = '>=3.4.0'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
'requests',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# ------------------------------------------------
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=['nwswx'],
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='Apache-2.0',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
cmdclass={
'upload': UploadCommand,
},
)
| true
| true
|
f708b53394c167baaaa7923247a193908ac67370
| 1,028
|
py
|
Python
|
python-client/onesaitplatform/mqttclient/utils.py
|
javieronsurbe/onesait-cloud-platform-clientlibraries
|
832cb058b3144cbe56b1ac2cb88a040573741d66
|
[
"Apache-2.0"
] | 14
|
2019-05-14T13:23:35.000Z
|
2019-12-24T14:49:02.000Z
|
python-client/onesaitplatform/mqttclient/utils.py
|
javieronsurbe/onesait-cloud-platform-clientlibraries
|
832cb058b3144cbe56b1ac2cb88a040573741d66
|
[
"Apache-2.0"
] | 7
|
2019-11-13T09:38:03.000Z
|
2021-04-07T16:24:14.000Z
|
python-client/onesaitplatform/mqttclient/utils.py
|
javieronsurbe/onesait-cloud-platform-clientlibraries
|
832cb058b3144cbe56b1ac2cb88a040573741d66
|
[
"Apache-2.0"
] | 9
|
2019-04-09T15:38:28.000Z
|
2021-03-24T13:10:14.000Z
|
from threading import Event
class Message:
def __init__(self, timeout=10):
self._ready = Event()
self._timeout = timeout
self._response = None
@property
def result(self):
received = self._ready.wait(timeout=self._timeout)
if not received:
raise MqttError("CONNECTION", "No Response Received")
if not self._response['ok']:
raise MqttError(self._response['errorCode'], self._response['error'])
return self._response['data']
@result.setter
def result(self, dato):
self._response = dato
self._ready.set()
def __len__(self):
return len(self.result)
def __getitem__(self, key):
return self.result[key]
def __iter__(self):
return self.result.__iter__()
def __contains__(self, key):
return key in self.result
class MqttError(Exception):
def __init__(self, error_code, description):
self.error_code = error_code
self.description = description
| 25.073171
| 81
| 0.63035
|
from threading import Event
class Message:
def __init__(self, timeout=10):
self._ready = Event()
self._timeout = timeout
self._response = None
@property
def result(self):
received = self._ready.wait(timeout=self._timeout)
if not received:
raise MqttError("CONNECTION", "No Response Received")
if not self._response['ok']:
raise MqttError(self._response['errorCode'], self._response['error'])
return self._response['data']
@result.setter
def result(self, dato):
self._response = dato
self._ready.set()
def __len__(self):
return len(self.result)
def __getitem__(self, key):
return self.result[key]
def __iter__(self):
return self.result.__iter__()
def __contains__(self, key):
return key in self.result
class MqttError(Exception):
def __init__(self, error_code, description):
self.error_code = error_code
self.description = description
| true
| true
|
f708b5fba4d0b3640baa91f053179f9e31692cc9
| 671
|
py
|
Python
|
src/utils/preprocessor.py
|
EternalImmortal/Real-time-emotion-classifier-mini-Xception
|
161f295d4be511f7e4cc700399ca37c48ea81f6a
|
[
"MIT"
] | null | null | null |
src/utils/preprocessor.py
|
EternalImmortal/Real-time-emotion-classifier-mini-Xception
|
161f295d4be511f7e4cc700399ca37c48ea81f6a
|
[
"MIT"
] | null | null | null |
src/utils/preprocessor.py
|
EternalImmortal/Real-time-emotion-classifier-mini-Xception
|
161f295d4be511f7e4cc700399ca37c48ea81f6a
|
[
"MIT"
] | null | null | null |
import numpy as np
# from scipy.misc import imread, imresize
from scipy import misc
def preprocess_input(x, v2=True):
x = x.astype('float32')
x = x / 255.0
if v2:
x = x - 0.5
x = x * 2.0
return x
def _imread(image_name):
return misc.imread(image_name)
def _imresize(image_array, size):
return misc.imresize(image_array, size)
def to_categorical(integer_classes, num_classes=2):
integer_classes = np.asarray(integer_classes, dtype='int')
num_samples = integer_classes.shape[0]
categorical = np.zeros((num_samples, num_classes))
categorical[np.arange(num_samples), integer_classes] = 1
return categorical
| 23.137931
| 62
| 0.692996
|
import numpy as np
from scipy import misc
def preprocess_input(x, v2=True):
x = x.astype('float32')
x = x / 255.0
if v2:
x = x - 0.5
x = x * 2.0
return x
def _imread(image_name):
return misc.imread(image_name)
def _imresize(image_array, size):
return misc.imresize(image_array, size)
def to_categorical(integer_classes, num_classes=2):
integer_classes = np.asarray(integer_classes, dtype='int')
num_samples = integer_classes.shape[0]
categorical = np.zeros((num_samples, num_classes))
categorical[np.arange(num_samples), integer_classes] = 1
return categorical
| true
| true
|
f708b63569034b151cc8bc23cfe647bf20e52cb7
| 618
|
py
|
Python
|
var/spack/repos/builtin/packages/py-neurolab/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-neurolab/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/py-neurolab/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyNeurolab(PythonPackage):
"""Simple and powerfull neural network library for python"""
homepage = "http://neurolab.googlecode.com/"
pypi = "neurolab/neurolab-0.3.5.tar.gz"
version('0.3.5', sha256='96ec311988383c63664f3325668f27c30561cf4349e3bc5420665c042a3b9191')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
| 32.526316
| 95
| 0.731392
|
from spack.package import *
class PyNeurolab(PythonPackage):
homepage = "http://neurolab.googlecode.com/"
pypi = "neurolab/neurolab-0.3.5.tar.gz"
version('0.3.5', sha256='96ec311988383c63664f3325668f27c30561cf4349e3bc5420665c042a3b9191')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
| true
| true
|
f708b6dd3656aa570905a5bd46dc4d5ebef18b39
| 7,616
|
py
|
Python
|
conans/test/unittests/client/generators/pkg_config_test.py
|
sigmunjr/conan
|
ce173d25640d5c9cdd62b1c67598291be003633d
|
[
"MIT"
] | 1
|
2020-11-07T21:25:57.000Z
|
2020-11-07T21:25:57.000Z
|
conans/test/unittests/client/generators/pkg_config_test.py
|
ttencate/conan
|
3dc4fb35cc3be9865f0ae480c89e6a58813d5076
|
[
"MIT"
] | null | null | null |
conans/test/unittests/client/generators/pkg_config_test.py
|
ttencate/conan
|
3dc4fb35cc3be9865f0ae480c89e6a58813d5076
|
[
"MIT"
] | null | null | null |
import unittest
from conans.client.conf import get_default_settings_yml
from conans.client.generators.pkg_config import PkgConfigGenerator
from conans.model.build_info import CppInfo
from conans.model.conan_file import ConanFile
from conans.model.env_info import EnvValues
from conans.model.ref import ConanFileReference
from conans.model.settings import Settings
from conans.test.utils.mocks import TestBufferConanOutput
class PkgGeneratorTest(unittest.TestCase):
def variables_setup_test(self):
conanfile = ConanFile(TestBufferConanOutput(), None)
conanfile.initialize(Settings({}), EnvValues())
ref = ConanFileReference.loads("MyPkg/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.name = "my_pkg"
cpp_info.defines = ["MYDEFINE1"]
cpp_info.cflags.append("-Flag1=23")
cpp_info.version = "1.3"
cpp_info.description = "My cool description"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("MyPkg1/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.name = "MYPKG1"
cpp_info.defines = ["MYDEFINE11"]
cpp_info.cflags.append("-Flag1=21")
cpp_info.version = "1.7"
cpp_info.description = "My other cool description"
cpp_info.public_deps = ["MyPkg"]
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("MyPkg2/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder2")
cpp_info.defines = ["MYDEFINE2"]
cpp_info.version = "2.3"
cpp_info.exelinkflags = ["-exelinkflag"]
cpp_info.sharedlinkflags = ["-sharedlinkflag"]
cpp_info.cxxflags = ["-cxxflag"]
cpp_info.public_deps = ["MyPkg"]
conanfile.deps_cpp_info.add(ref.name, cpp_info)
generator = PkgConfigGenerator(conanfile)
files = generator.content
self.assertEqual(files["MyPkg2.pc"], """prefix=dummy_root_folder2
libdir=${prefix}/lib
includedir=${prefix}/include
Name: MyPkg2
Description: Conan package: MyPkg2
Version: 2.3
Libs: -L${libdir} -sharedlinkflag -exelinkflag
Cflags: -I${includedir} -cxxflag -DMYDEFINE2
Requires: my_pkg
""")
self.assertEqual(files["mypkg1.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: mypkg1
Description: My other cool description
Version: 1.7
Libs: -L${libdir}
Cflags: -I${includedir} -Flag1=21 -DMYDEFINE11
Requires: my_pkg
""")
self.assertEqual(files["my_pkg.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: my_pkg
Description: My cool description
Version: 1.3
Libs: -L${libdir}
Cflags: -I${includedir} -Flag1=23 -DMYDEFINE1
""")
def pkg_config_custom_names_test(self):
conanfile = ConanFile(TestBufferConanOutput(), None)
conanfile.initialize(Settings({}), EnvValues())
ref = ConanFileReference.loads("MyPkg/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.name = "my_pkg"
cpp_info.names["pkg_config"] = "my_pkg_custom_name"
cpp_info.defines = ["MYDEFINE1"]
cpp_info.cflags.append("-Flag1=23")
cpp_info.version = "1.3"
cpp_info.description = "My cool description"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("MyPkg1/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.name = "MYPKG1"
cpp_info.names["pkg_config"] = "my_pkg1_custom_name"
cpp_info.defines = ["MYDEFINE11"]
cpp_info.cflags.append("-Flag1=21")
cpp_info.version = "1.7"
cpp_info.description = "My other cool description"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("MyPkg2/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder2")
cpp_info.names["pkg_config"] = "my_pkg2_custom_name"
cpp_info.defines = ["MYDEFINE2"]
cpp_info.version = "2.3"
cpp_info.exelinkflags = ["-exelinkflag"]
cpp_info.sharedlinkflags = ["-sharedlinkflag"]
cpp_info.cxxflags = ["-cxxflag"]
cpp_info.public_deps = ["MyPkg", "MyPkg1"]
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("zlib/1.2.11@lasote/stable")
cpp_info = CppInfo(ref.name, "dummy_root_folder_zlib")
cpp_info.name = "ZLIB"
cpp_info.defines = ["MYZLIBDEFINE2"]
cpp_info.version = "1.2.11"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("bzip2/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder2")
cpp_info.name = "BZip2"
cpp_info.names["pkg_config"] = "BZip2"
cpp_info.defines = ["MYDEFINE2"]
cpp_info.version = "2.3"
cpp_info.exelinkflags = ["-exelinkflag"]
cpp_info.sharedlinkflags = ["-sharedlinkflag"]
cpp_info.cxxflags = ["-cxxflag"]
cpp_info.public_deps = ["MyPkg", "MyPkg1", "zlib"]
conanfile.deps_cpp_info.add(ref.name, cpp_info)
generator = PkgConfigGenerator(conanfile)
files = generator.content
self.assertEqual(files["my_pkg2_custom_name.pc"], """prefix=dummy_root_folder2
libdir=${prefix}/lib
includedir=${prefix}/include
Name: my_pkg2_custom_name
Description: Conan package: my_pkg2_custom_name
Version: 2.3
Libs: -L${libdir} -sharedlinkflag -exelinkflag
Cflags: -I${includedir} -cxxflag -DMYDEFINE2
Requires: my_pkg_custom_name my_pkg1_custom_name
""")
self.assertEqual(files["my_pkg1_custom_name.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: my_pkg1_custom_name
Description: My other cool description
Version: 1.7
Libs: -L${libdir}
Cflags: -I${includedir} -Flag1=21 -DMYDEFINE11
""")
self.assertEqual(files["my_pkg_custom_name.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: my_pkg_custom_name
Description: My cool description
Version: 1.3
Libs: -L${libdir}
Cflags: -I${includedir} -Flag1=23 -DMYDEFINE1
""")
self.assertEqual(files["BZip2.pc"], """prefix=dummy_root_folder2
libdir=${prefix}/lib
includedir=${prefix}/include
Name: BZip2
Description: Conan package: BZip2
Version: 2.3
Libs: -L${libdir} -sharedlinkflag -exelinkflag
Cflags: -I${includedir} -cxxflag -DMYDEFINE2
Requires: my_pkg_custom_name my_pkg1_custom_name zlib
""")
def apple_frameworks_test(self):
settings = Settings.loads(get_default_settings_yml())
settings.compiler = "apple-clang"
settings.os = "Macos"
conanfile = ConanFile(TestBufferConanOutput(), None)
conanfile.initialize(Settings({}), EnvValues())
conanfile.settings = settings
ref = ConanFileReference.loads("MyPkg/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.frameworks = ['AudioUnit', 'AudioToolbox']
cpp_info.version = "1.3"
cpp_info.description = "My cool description"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
generator = PkgConfigGenerator(conanfile)
files = generator.content
self.assertEqual(files["MyPkg.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: MyPkg
Description: My cool description
Version: 1.3
Libs: -L${libdir} -Wl,-rpath,"${libdir}" -framework AudioUnit -framework AudioToolbox
Cflags: -I${includedir}
""")
| 36.266667
| 86
| 0.693015
|
import unittest
from conans.client.conf import get_default_settings_yml
from conans.client.generators.pkg_config import PkgConfigGenerator
from conans.model.build_info import CppInfo
from conans.model.conan_file import ConanFile
from conans.model.env_info import EnvValues
from conans.model.ref import ConanFileReference
from conans.model.settings import Settings
from conans.test.utils.mocks import TestBufferConanOutput
class PkgGeneratorTest(unittest.TestCase):
def variables_setup_test(self):
conanfile = ConanFile(TestBufferConanOutput(), None)
conanfile.initialize(Settings({}), EnvValues())
ref = ConanFileReference.loads("MyPkg/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.name = "my_pkg"
cpp_info.defines = ["MYDEFINE1"]
cpp_info.cflags.append("-Flag1=23")
cpp_info.version = "1.3"
cpp_info.description = "My cool description"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("MyPkg1/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.name = "MYPKG1"
cpp_info.defines = ["MYDEFINE11"]
cpp_info.cflags.append("-Flag1=21")
cpp_info.version = "1.7"
cpp_info.description = "My other cool description"
cpp_info.public_deps = ["MyPkg"]
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("MyPkg2/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder2")
cpp_info.defines = ["MYDEFINE2"]
cpp_info.version = "2.3"
cpp_info.exelinkflags = ["-exelinkflag"]
cpp_info.sharedlinkflags = ["-sharedlinkflag"]
cpp_info.cxxflags = ["-cxxflag"]
cpp_info.public_deps = ["MyPkg"]
conanfile.deps_cpp_info.add(ref.name, cpp_info)
generator = PkgConfigGenerator(conanfile)
files = generator.content
self.assertEqual(files["MyPkg2.pc"], """prefix=dummy_root_folder2
libdir=${prefix}/lib
includedir=${prefix}/include
Name: MyPkg2
Description: Conan package: MyPkg2
Version: 2.3
Libs: -L${libdir} -sharedlinkflag -exelinkflag
Cflags: -I${includedir} -cxxflag -DMYDEFINE2
Requires: my_pkg
""")
self.assertEqual(files["mypkg1.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: mypkg1
Description: My other cool description
Version: 1.7
Libs: -L${libdir}
Cflags: -I${includedir} -Flag1=21 -DMYDEFINE11
Requires: my_pkg
""")
self.assertEqual(files["my_pkg.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: my_pkg
Description: My cool description
Version: 1.3
Libs: -L${libdir}
Cflags: -I${includedir} -Flag1=23 -DMYDEFINE1
""")
def pkg_config_custom_names_test(self):
conanfile = ConanFile(TestBufferConanOutput(), None)
conanfile.initialize(Settings({}), EnvValues())
ref = ConanFileReference.loads("MyPkg/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.name = "my_pkg"
cpp_info.names["pkg_config"] = "my_pkg_custom_name"
cpp_info.defines = ["MYDEFINE1"]
cpp_info.cflags.append("-Flag1=23")
cpp_info.version = "1.3"
cpp_info.description = "My cool description"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("MyPkg1/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.name = "MYPKG1"
cpp_info.names["pkg_config"] = "my_pkg1_custom_name"
cpp_info.defines = ["MYDEFINE11"]
cpp_info.cflags.append("-Flag1=21")
cpp_info.version = "1.7"
cpp_info.description = "My other cool description"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("MyPkg2/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder2")
cpp_info.names["pkg_config"] = "my_pkg2_custom_name"
cpp_info.defines = ["MYDEFINE2"]
cpp_info.version = "2.3"
cpp_info.exelinkflags = ["-exelinkflag"]
cpp_info.sharedlinkflags = ["-sharedlinkflag"]
cpp_info.cxxflags = ["-cxxflag"]
cpp_info.public_deps = ["MyPkg", "MyPkg1"]
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("zlib/1.2.11@lasote/stable")
cpp_info = CppInfo(ref.name, "dummy_root_folder_zlib")
cpp_info.name = "ZLIB"
cpp_info.defines = ["MYZLIBDEFINE2"]
cpp_info.version = "1.2.11"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
ref = ConanFileReference.loads("bzip2/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder2")
cpp_info.name = "BZip2"
cpp_info.names["pkg_config"] = "BZip2"
cpp_info.defines = ["MYDEFINE2"]
cpp_info.version = "2.3"
cpp_info.exelinkflags = ["-exelinkflag"]
cpp_info.sharedlinkflags = ["-sharedlinkflag"]
cpp_info.cxxflags = ["-cxxflag"]
cpp_info.public_deps = ["MyPkg", "MyPkg1", "zlib"]
conanfile.deps_cpp_info.add(ref.name, cpp_info)
generator = PkgConfigGenerator(conanfile)
files = generator.content
self.assertEqual(files["my_pkg2_custom_name.pc"], """prefix=dummy_root_folder2
libdir=${prefix}/lib
includedir=${prefix}/include
Name: my_pkg2_custom_name
Description: Conan package: my_pkg2_custom_name
Version: 2.3
Libs: -L${libdir} -sharedlinkflag -exelinkflag
Cflags: -I${includedir} -cxxflag -DMYDEFINE2
Requires: my_pkg_custom_name my_pkg1_custom_name
""")
self.assertEqual(files["my_pkg1_custom_name.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: my_pkg1_custom_name
Description: My other cool description
Version: 1.7
Libs: -L${libdir}
Cflags: -I${includedir} -Flag1=21 -DMYDEFINE11
""")
self.assertEqual(files["my_pkg_custom_name.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: my_pkg_custom_name
Description: My cool description
Version: 1.3
Libs: -L${libdir}
Cflags: -I${includedir} -Flag1=23 -DMYDEFINE1
""")
self.assertEqual(files["BZip2.pc"], """prefix=dummy_root_folder2
libdir=${prefix}/lib
includedir=${prefix}/include
Name: BZip2
Description: Conan package: BZip2
Version: 2.3
Libs: -L${libdir} -sharedlinkflag -exelinkflag
Cflags: -I${includedir} -cxxflag -DMYDEFINE2
Requires: my_pkg_custom_name my_pkg1_custom_name zlib
""")
def apple_frameworks_test(self):
settings = Settings.loads(get_default_settings_yml())
settings.compiler = "apple-clang"
settings.os = "Macos"
conanfile = ConanFile(TestBufferConanOutput(), None)
conanfile.initialize(Settings({}), EnvValues())
conanfile.settings = settings
ref = ConanFileReference.loads("MyPkg/0.1@lasote/stables")
cpp_info = CppInfo(ref.name, "dummy_root_folder1")
cpp_info.frameworks = ['AudioUnit', 'AudioToolbox']
cpp_info.version = "1.3"
cpp_info.description = "My cool description"
conanfile.deps_cpp_info.add(ref.name, cpp_info)
generator = PkgConfigGenerator(conanfile)
files = generator.content
self.assertEqual(files["MyPkg.pc"], """prefix=dummy_root_folder1
libdir=${prefix}/lib
includedir=${prefix}/include
Name: MyPkg
Description: My cool description
Version: 1.3
Libs: -L${libdir} -Wl,-rpath,"${libdir}" -framework AudioUnit -framework AudioToolbox
Cflags: -I${includedir}
""")
| true
| true
|
f708b85991c8dfcba354718ee1d392233e0b43f4
| 156
|
py
|
Python
|
src/decisionengine_modules/AWS/sources/BillingInfoSourceProxy.py
|
hyunwoo18/decisionengine_modules
|
a67462628c2074e768d0825edee4ee5d570030e0
|
[
"BSD-3-Clause"
] | null | null | null |
src/decisionengine_modules/AWS/sources/BillingInfoSourceProxy.py
|
hyunwoo18/decisionengine_modules
|
a67462628c2074e768d0825edee4ee5d570030e0
|
[
"BSD-3-Clause"
] | null | null | null |
src/decisionengine_modules/AWS/sources/BillingInfoSourceProxy.py
|
hyunwoo18/decisionengine_modules
|
a67462628c2074e768d0825edee4ee5d570030e0
|
[
"BSD-3-Clause"
] | null | null | null |
from decisionengine.framework.modules import Source, SourceProxy
BillingInfoSourceProxy = SourceProxy.SourceProxy
Source.describe(BillingInfoSourceProxy)
| 26
| 64
| 0.878205
|
from decisionengine.framework.modules import Source, SourceProxy
BillingInfoSourceProxy = SourceProxy.SourceProxy
Source.describe(BillingInfoSourceProxy)
| true
| true
|
f708b96f67ceebcc32a1ac0dc93b639c6567d104
| 27
|
py
|
Python
|
search/serialize/tests/__init__.py
|
ID2797370/arxiv-search
|
889402e8eef9a2faaa8e900978cd27ff2784ce33
|
[
"MIT"
] | 35
|
2018-12-18T02:51:09.000Z
|
2022-03-30T04:43:20.000Z
|
search/serialize/tests/__init__.py
|
ID2797370/arxiv-search
|
889402e8eef9a2faaa8e900978cd27ff2784ce33
|
[
"MIT"
] | 172
|
2018-02-02T14:35:11.000Z
|
2018-12-04T15:35:30.000Z
|
search/serialize/tests/__init__.py
|
ID2797370/arxiv-search
|
889402e8eef9a2faaa8e900978cd27ff2784ce33
|
[
"MIT"
] | 13
|
2019-01-10T22:01:48.000Z
|
2021-11-05T12:25:08.000Z
|
"""Serialization tests."""
| 13.5
| 26
| 0.666667
| true
| true
|
|
f708b9851df648db1d879ef81fdf4b02ee8f4efb
| 493
|
py
|
Python
|
output/models/ms_data/complex_type/ct_e008_xsd/ct_e008.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ms_data/complex_type/ct_e008_xsd/ct_e008.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ms_data/complex_type/ct_e008_xsd/ct_e008.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Dict
@dataclass
class FooType:
class Meta:
name = "fooType"
value: str = field(
default="",
metadata={
"required": True,
}
)
any_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##any",
}
)
@dataclass
class Root(FooType):
class Meta:
name = "root"
| 17
| 43
| 0.527383
|
from dataclasses import dataclass, field
from typing import Dict
@dataclass
class FooType:
class Meta:
name = "fooType"
value: str = field(
default="",
metadata={
"required": True,
}
)
any_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##any",
}
)
@dataclass
class Root(FooType):
class Meta:
name = "root"
| true
| true
|
f708bab24562ef63a18b37d8b771cc69788e98b2
| 5,371
|
py
|
Python
|
pytorch_lightning/plugins/training_type/parallel.py
|
randommm/pytorch-lightning
|
10e87b7b7acbbad8fc12ec5c07638ed093547ef8
|
[
"Apache-2.0"
] | 1
|
2021-07-22T14:06:43.000Z
|
2021-07-22T14:06:43.000Z
|
pytorch_lightning/plugins/training_type/parallel.py
|
randommm/pytorch-lightning
|
10e87b7b7acbbad8fc12ec5c07638ed093547ef8
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/plugins/training_type/parallel.py
|
randommm/pytorch-lightning
|
10e87b7b7acbbad8fc12ec5c07638ed093547ef8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import Any, List, Optional
import torch
from torch.nn.parallel import DistributedDataParallel
import pytorch_lightning as pl
from pytorch_lightning.overrides.base import unwrap_lightning_module
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.training_type.training_type_plugin import TrainingTypePlugin
from pytorch_lightning.utilities import _XLA_AVAILABLE
from pytorch_lightning.utilities.distributed import all_gather_ddp_if_available, ReduceOp
class ParallelPlugin(TrainingTypePlugin, ABC):
""" Plugin for training with multiple processes in parallel. """
def __init__(
self,
parallel_devices: Optional[List[torch.device]] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
):
super().__init__()
self.parallel_devices = parallel_devices
self.cluster_environment = cluster_environment
@property
@abstractmethod
def root_device(self) -> torch.device:
raise NotImplementedError
@property
def on_gpu(self) -> bool:
return self.root_device.type == "cuda" and torch.cuda.is_available()
@property
def on_tpu(self) -> bool:
return self.root_device.type == "xla" and _XLA_AVAILABLE
@property
def lightning_module(self):
return unwrap_lightning_module(self._model)
@property
def global_rank(self) -> int:
return self.cluster_environment.global_rank() if self.cluster_environment is not None else 0
@property
def local_rank(self) -> int:
return self.cluster_environment.local_rank() if self.cluster_environment is not None else 0
@property
def node_rank(self) -> int:
return self.cluster_environment.node_rank() if self.cluster_environment is not None else 0
@property
def world_size(self) -> int:
return self.cluster_environment.world_size() if self.cluster_environment is not None else 1
@property
def is_global_zero(self) -> bool:
return self.global_rank == 0
@property
def distributed_sampler_kwargs(self):
distributed_sampler_kwargs = dict(num_replicas=len(self.parallel_devices), rank=self.global_rank)
return distributed_sampler_kwargs
def reconciliate_processes(self, trace: str):
"""
Function to re-conciliate processes on failure
"""
def all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> torch.Tensor:
"""Perform a all_gather on all processes """
return all_gather_ddp_if_available(tensor, group=group, sync_grads=sync_grads)
def reduce_boolean_decision(self, decision: bool) -> bool:
decision = torch.tensor(int(decision), device=self.lightning_module.device)
decision = self.reduce(decision, reduce_op=ReduceOp.SUM)
decision = bool(decision == self.world_size)
return decision
@property
def torch_distributed_backend(self):
torch_backend = os.getenv("PL_TORCH_DISTRIBUTED_BACKEND")
if torch_backend is None:
torch_backend = "nccl" if self.on_gpu else "gloo"
return torch_backend
@staticmethod
def configure_sync_batchnorm(model: 'pl.LightningModule') -> 'pl.LightningModule':
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.
Override to synchronize batchnorm between specific process groups instead
of the whole world or use a different sync_bn like `apex`'s version.
Args:
model: pointer to current :class:`LightningModule`.
Return:
LightningModule with batchnorm layers synchronized between process groups
"""
return torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
@contextmanager
def block_backward_sync(self):
"""
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
Returns: context manager with sync behaviour off
"""
if isinstance(self.model, DistributedDataParallel):
with self.model.no_sync():
yield None
else:
yield None
def teardown(self) -> None:
# Un-reference the wrapper if any was used.
# todo (tchaton): Add support for all plugins.
if isinstance(self.model, DistributedDataParallel):
self.model = self.lightning_module
if self.on_gpu:
# GPU teardown
self.lightning_module.cpu()
# clean up memory
torch.cuda.empty_cache()
| 36.787671
| 118
| 0.707876
|
import os
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import Any, List, Optional
import torch
from torch.nn.parallel import DistributedDataParallel
import pytorch_lightning as pl
from pytorch_lightning.overrides.base import unwrap_lightning_module
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.training_type.training_type_plugin import TrainingTypePlugin
from pytorch_lightning.utilities import _XLA_AVAILABLE
from pytorch_lightning.utilities.distributed import all_gather_ddp_if_available, ReduceOp
class ParallelPlugin(TrainingTypePlugin, ABC):
def __init__(
self,
parallel_devices: Optional[List[torch.device]] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
):
super().__init__()
self.parallel_devices = parallel_devices
self.cluster_environment = cluster_environment
@property
@abstractmethod
def root_device(self) -> torch.device:
raise NotImplementedError
@property
def on_gpu(self) -> bool:
return self.root_device.type == "cuda" and torch.cuda.is_available()
@property
def on_tpu(self) -> bool:
return self.root_device.type == "xla" and _XLA_AVAILABLE
@property
def lightning_module(self):
return unwrap_lightning_module(self._model)
@property
def global_rank(self) -> int:
return self.cluster_environment.global_rank() if self.cluster_environment is not None else 0
@property
def local_rank(self) -> int:
return self.cluster_environment.local_rank() if self.cluster_environment is not None else 0
@property
def node_rank(self) -> int:
return self.cluster_environment.node_rank() if self.cluster_environment is not None else 0
@property
def world_size(self) -> int:
return self.cluster_environment.world_size() if self.cluster_environment is not None else 1
@property
def is_global_zero(self) -> bool:
return self.global_rank == 0
@property
def distributed_sampler_kwargs(self):
distributed_sampler_kwargs = dict(num_replicas=len(self.parallel_devices), rank=self.global_rank)
return distributed_sampler_kwargs
def reconciliate_processes(self, trace: str):
def all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> torch.Tensor:
return all_gather_ddp_if_available(tensor, group=group, sync_grads=sync_grads)
def reduce_boolean_decision(self, decision: bool) -> bool:
decision = torch.tensor(int(decision), device=self.lightning_module.device)
decision = self.reduce(decision, reduce_op=ReduceOp.SUM)
decision = bool(decision == self.world_size)
return decision
@property
def torch_distributed_backend(self):
torch_backend = os.getenv("PL_TORCH_DISTRIBUTED_BACKEND")
if torch_backend is None:
torch_backend = "nccl" if self.on_gpu else "gloo"
return torch_backend
@staticmethod
def configure_sync_batchnorm(model: 'pl.LightningModule') -> 'pl.LightningModule':
return torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
@contextmanager
def block_backward_sync(self):
if isinstance(self.model, DistributedDataParallel):
with self.model.no_sync():
yield None
else:
yield None
def teardown(self) -> None:
if isinstance(self.model, DistributedDataParallel):
self.model = self.lightning_module
if self.on_gpu:
self.lightning_module.cpu()
torch.cuda.empty_cache()
| true
| true
|
f708bacf347d7ba9da3a06c0436000a3c9c5d36a
| 724
|
gyp
|
Python
|
binding.gyp
|
manishmalik/Modsecurity-nodejs
|
fe198394dd4b062b6404a7b7c4000f8888c9a580
|
[
"MIT"
] | 15
|
2017-04-21T20:23:02.000Z
|
2020-12-21T11:56:53.000Z
|
binding.gyp
|
manishmalik/Modsecurity-nodejs
|
fe198394dd4b062b6404a7b7c4000f8888c9a580
|
[
"MIT"
] | 1
|
2016-08-07T05:04:32.000Z
|
2016-08-09T10:36:09.000Z
|
binding.gyp
|
manishmalik/Modsecurity-nodejs
|
fe198394dd4b062b6404a7b7c4000f8888c9a580
|
[
"MIT"
] | 4
|
2016-06-18T21:31:32.000Z
|
2018-11-13T22:40:24.000Z
|
{
"targets": [
{
"target_name": "modsecurity",
"sources": [ "modsecurity_wrap.cxx" ],
"include_dirs": ['/usr/include/modsecurity/',],
"libraries": ['/usr/lib/libmodsecurity.a',
'/usr/lib/libmodsecurity.so',
'/usr/lib/libmodsecurity.a',
'/usr/lib/libmodsecurity.so.3.0.0',
'/usr/lib/x86_64-linux-gnu/libxml2.so',
'/usr/lib/x86_64-linux-gnu/libcurl.so',
'/lib/x86_64-linux-gnu/libpcre.so.3',
'/usr/lib/x86_64-linux-gnu/libyajl.so',
'/usr/lib/x86_64-linux-gnu/libGeoIP.so',
'/usr/lib/x86_64-linux-gnu/liblmdb.so'],
"cflags" : [ "-std=c++11" ],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ]
}
]
}
| 32.909091
| 53
| 0.569061
|
{
"targets": [
{
"target_name": "modsecurity",
"sources": [ "modsecurity_wrap.cxx" ],
"include_dirs": ['/usr/include/modsecurity/',],
"libraries": ['/usr/lib/libmodsecurity.a',
'/usr/lib/libmodsecurity.so',
'/usr/lib/libmodsecurity.a',
'/usr/lib/libmodsecurity.so.3.0.0',
'/usr/lib/x86_64-linux-gnu/libxml2.so',
'/usr/lib/x86_64-linux-gnu/libcurl.so',
'/lib/x86_64-linux-gnu/libpcre.so.3',
'/usr/lib/x86_64-linux-gnu/libyajl.so',
'/usr/lib/x86_64-linux-gnu/libGeoIP.so',
'/usr/lib/x86_64-linux-gnu/liblmdb.so'],
"cflags" : [ "-std=c++11" ],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ]
}
]
}
| true
| true
|
f708bb067419da2a8f90d27a3e62ee93b9af35d0
| 2,044
|
py
|
Python
|
grab/captcha/backend/gui.py
|
brabadu/grab
|
92b1d68ceeece3087e053064520261a7aef3bd02
|
[
"MIT"
] | 1
|
2021-05-10T16:03:24.000Z
|
2021-05-10T16:03:24.000Z
|
grab/captcha/backend/gui.py
|
brabadu/grab
|
92b1d68ceeece3087e053064520261a7aef3bd02
|
[
"MIT"
] | null | null | null |
grab/captcha/backend/gui.py
|
brabadu/grab
|
92b1d68ceeece3087e053064520261a7aef3bd02
|
[
"MIT"
] | null | null | null |
import tempfile
import webbrowser
import time
import os
import pygtk
import gtk
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from grab import Grab
from .base import CaptchaBackend
pygtk.require('2.0')
class CaptchaWindow(object):
def __init__(self, path, solution):
self.solution = solution
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.show()
self.window.connect('destroy', self.destroy)
self.box = gtk.HBox()
self.image = gtk.Image()
self.image.set_from_file(path)
self.entry = gtk.Entry()
self.entry.connect('activate', self.solve)
self.button = gtk.Button('Go')
self.button.connect('clicked', self.solve)
self.window.add(self.box)
self.box.pack_start(self.image)
self.box.pack_start(self.entry)
self.box.pack_start(self.button)
self.box.show()
self.image.show()
self.button.show()
self.entry.show()
self.entry.grab_focus()
def destroy(self, *args):
gtk.main_quit()
def solve(self, *args):
self.solution.append(self.entry.get_text())
self.window.hide()
gtk.main_quit()
def main(self):
gtk.main()
class GuiBackend(CaptchaBackend):
def get_submit_captcha_request(self, data):
fd, path = tempfile.mkstemp()
with open(path, 'w') as out:
out.write(data)
url = 'file://' + path
g = Grab()
g.setup(url=url)
return g
def parse_submit_captcha_response(self, res):
return res.url.replace('file://', '')
def get_check_solution_request(self, captcha_id):
url = 'file://' + captcha_id
g = Grab()
g.setup(url=url)
return g
def parse_check_solution_response(self, res):
path = res.url.replace('file://', '')
solution = []
window = CaptchaWindow(path, solution)
window.main()
os.unlink(path)
return solution[0]
| 25.873418
| 53
| 0.606654
|
import tempfile
import webbrowser
import time
import os
import pygtk
import gtk
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from grab import Grab
from .base import CaptchaBackend
pygtk.require('2.0')
class CaptchaWindow(object):
def __init__(self, path, solution):
self.solution = solution
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.show()
self.window.connect('destroy', self.destroy)
self.box = gtk.HBox()
self.image = gtk.Image()
self.image.set_from_file(path)
self.entry = gtk.Entry()
self.entry.connect('activate', self.solve)
self.button = gtk.Button('Go')
self.button.connect('clicked', self.solve)
self.window.add(self.box)
self.box.pack_start(self.image)
self.box.pack_start(self.entry)
self.box.pack_start(self.button)
self.box.show()
self.image.show()
self.button.show()
self.entry.show()
self.entry.grab_focus()
def destroy(self, *args):
gtk.main_quit()
def solve(self, *args):
self.solution.append(self.entry.get_text())
self.window.hide()
gtk.main_quit()
def main(self):
gtk.main()
class GuiBackend(CaptchaBackend):
def get_submit_captcha_request(self, data):
fd, path = tempfile.mkstemp()
with open(path, 'w') as out:
out.write(data)
url = 'file://' + path
g = Grab()
g.setup(url=url)
return g
def parse_submit_captcha_response(self, res):
return res.url.replace('file://', '')
def get_check_solution_request(self, captcha_id):
url = 'file://' + captcha_id
g = Grab()
g.setup(url=url)
return g
def parse_check_solution_response(self, res):
path = res.url.replace('file://', '')
solution = []
window = CaptchaWindow(path, solution)
window.main()
os.unlink(path)
return solution[0]
| true
| true
|
f708bb09bee3270dd8d3eb7e6cd9129f9c54f611
| 54
|
py
|
Python
|
constants.py
|
I-question-this/metame
|
a055afde75e15d97a53731a223bfe5e5ba29c5ee
|
[
"MIT"
] | 484
|
2016-08-08T01:49:49.000Z
|
2022-03-06T05:20:37.000Z
|
constants.py
|
I-question-this/metame
|
a055afde75e15d97a53731a223bfe5e5ba29c5ee
|
[
"MIT"
] | 15
|
2016-08-08T01:59:36.000Z
|
2021-02-01T05:27:54.000Z
|
constants.py
|
I-question-this/metame
|
a055afde75e15d97a53731a223bfe5e5ba29c5ee
|
[
"MIT"
] | 94
|
2016-08-08T02:47:17.000Z
|
2022-02-01T17:44:27.000Z
|
supported_archs = ["x86"]
supported_bits = [32, 64]
| 13.5
| 26
| 0.666667
|
supported_archs = ["x86"]
supported_bits = [32, 64]
| true
| true
|
f708bb468e7ab709812ea009bdf654073360cd69
| 2,275
|
py
|
Python
|
code/processing/growth_rates/2021-08-14_r1_DoubleKO_acetate/analysis.py
|
cremerlab/useless_expression
|
a6020674f0ae73b4cc6173de60a0ea93016ee562
|
[
"MIT"
] | null | null | null |
code/processing/growth_rates/2021-08-14_r1_DoubleKO_acetate/analysis.py
|
cremerlab/useless_expression
|
a6020674f0ae73b4cc6173de60a0ea93016ee562
|
[
"MIT"
] | null | null | null |
code/processing/growth_rates/2021-08-14_r1_DoubleKO_acetate/analysis.py
|
cremerlab/useless_expression
|
a6020674f0ae73b4cc6173de60a0ea93016ee562
|
[
"MIT"
] | null | null | null |
#%%
import numpy as np
import pandas as pd
import futileprot.viz
import altair as alt
import altair_saver
import scipy.stats
colors, palette = futileprot.viz.altair_style()
# Add metadata
DATE = '2021-08-14'
RUN_NO = 1
STRAINS = 'DoubleKO'
MEDIUM = 'acetate'
# Load the measurement data
data = pd.read_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv')
# Perform a simplistic inference of the growth rate to get a sense of what
# the result is.
# data = data.groupby(['strain', 'elapsed_time_hr']).mean().reset_index()
data = data[['strain', 'elapsed_time_hr', 'od_600nm']]
# For each strain, infer the growth rate and compute the fit
layout = False
for g, d in data.groupby(['strain']):
time_range = np.linspace(0, 1.25 * d['elapsed_time_hr'].max(), 10)
# Perform the regression
popt = scipy.stats.linregress(d['elapsed_time_hr'], np.log(d['od_600nm']))
slope, intercept, err = popt[0], popt[1], popt[-1]
print(f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.')
# Compute the fit
fit = np.exp(intercept + slope * time_range)
fit_df = pd.DataFrame([])
fit_df['elapsed_time_hr'] = time_range
fit_df['od_600nm'] = fit
# Generate the plot
points = alt.Chart(
data=d,
width=300,
height=150
).mark_point(
color=colors['primary_blue']
).encode(
x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),
y=alt.Y('od_600nm:Q', title='optical density [a.u]',
scale=alt.Scale(type='log'))
)
fit = alt.Chart(data=fit_df,
title=f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.'
).mark_line(
color=colors['primary_blue']
).encode(
x='elapsed_time_hr:Q',
y='od_600nm:Q'
)
merge = points + fit
if layout == False:
layout = merge
else:
layout &= merge
altair_saver.save(layout, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_fits.png',
scale_factor=2)
# %%
| 32.971014
| 89
| 0.551648
|
import numpy as np
import pandas as pd
import futileprot.viz
import altair as alt
import altair_saver
import scipy.stats
colors, palette = futileprot.viz.altair_style()
DATE = '2021-08-14'
RUN_NO = 1
STRAINS = 'DoubleKO'
MEDIUM = 'acetate'
data = pd.read_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv')
data = data[['strain', 'elapsed_time_hr', 'od_600nm']]
layout = False
for g, d in data.groupby(['strain']):
time_range = np.linspace(0, 1.25 * d['elapsed_time_hr'].max(), 10)
popt = scipy.stats.linregress(d['elapsed_time_hr'], np.log(d['od_600nm']))
slope, intercept, err = popt[0], popt[1], popt[-1]
print(f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.')
fit = np.exp(intercept + slope * time_range)
fit_df = pd.DataFrame([])
fit_df['elapsed_time_hr'] = time_range
fit_df['od_600nm'] = fit
points = alt.Chart(
data=d,
width=300,
height=150
).mark_point(
color=colors['primary_blue']
).encode(
x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),
y=alt.Y('od_600nm:Q', title='optical density [a.u]',
scale=alt.Scale(type='log'))
)
fit = alt.Chart(data=fit_df,
title=f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.'
).mark_line(
color=colors['primary_blue']
).encode(
x='elapsed_time_hr:Q',
y='od_600nm:Q'
)
merge = points + fit
if layout == False:
layout = merge
else:
layout &= merge
altair_saver.save(layout, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_fits.png',
scale_factor=2)
| true
| true
|
f708bc3b0e1b8efa4b672733fdae01f2f74c4bfb
| 142
|
py
|
Python
|
wxwork_hr_syncing/wizard/__init__.py
|
rainbow-studio-solution/wxwork
|
344a0a8f8f0ac364101a1bb4a98c132588118839
|
[
"MulanPSL-1.0"
] | 9
|
2021-01-02T15:42:21.000Z
|
2021-08-13T08:09:16.000Z
|
wxwork_hr_syncing/wizard/__init__.py
|
rainbow-studio-solution/wxwork
|
344a0a8f8f0ac364101a1bb4a98c132588118839
|
[
"MulanPSL-1.0"
] | null | null | null |
wxwork_hr_syncing/wizard/__init__.py
|
rainbow-studio-solution/wxwork
|
344a0a8f8f0ac364101a1bb4a98c132588118839
|
[
"MulanPSL-1.0"
] | 4
|
2021-01-11T04:57:07.000Z
|
2021-05-21T06:01:55.000Z
|
# -*- coding: utf-8 -*-
from . import wizard_wxwork_contacts_sync
from . import wizard_wxwork_sync_tag
from . import wizard_wxwork_sync_user
| 23.666667
| 41
| 0.788732
|
from . import wizard_wxwork_contacts_sync
from . import wizard_wxwork_sync_tag
from . import wizard_wxwork_sync_user
| true
| true
|
f708bcd4339a6533749a5be7215ccfd3de77d575
| 1,536
|
py
|
Python
|
rango/models.py
|
StandeBoerIsle/tango_with_django_project
|
bb2e3a54e7dbc10c3e6ab7832a53dc0c75121341
|
[
"MIT"
] | null | null | null |
rango/models.py
|
StandeBoerIsle/tango_with_django_project
|
bb2e3a54e7dbc10c3e6ab7832a53dc0c75121341
|
[
"MIT"
] | null | null | null |
rango/models.py
|
StandeBoerIsle/tango_with_django_project
|
bb2e3a54e7dbc10c3e6ab7832a53dc0c75121341
|
[
"MIT"
] | 1
|
2018-02-20T15:46:10.000Z
|
2018-02-20T15:46:10.000Z
|
from __future__ import unicode_literals
from django.db import models
from django import forms
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
from django.utils import timezone
class Category(models.Model):
name = models.CharField(max_length=128, unique=True)
views = models.IntegerField(default=0)
likes = models.IntegerField(default=0)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
if self.views < 0:
self.views = 0
super(Category, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'categories'
class Page(models.Model):
category = models.ForeignKey(Category)
title = models.CharField(max_length=128)
url = models.URLField()
views = models.IntegerField(default=0)
first_visit = models.DateTimeField(default=timezone.now)
last_visit = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
class UserProfile(models.Model):
# This line is required. Links UserProfile to a User model instance.
user = models.OneToOneField(User)
# The additional attributes we wish to include.
website = models.URLField(blank=True)
picture = models.ImageField(upload_to='profile_images', blank=True)
# Override the __unicode__() method to return out something meaningful!
def __str__(self):
return self.user.username
| 30.117647
| 75
| 0.71224
|
from __future__ import unicode_literals
from django.db import models
from django import forms
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
from django.utils import timezone
class Category(models.Model):
name = models.CharField(max_length=128, unique=True)
views = models.IntegerField(default=0)
likes = models.IntegerField(default=0)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
if self.views < 0:
self.views = 0
super(Category, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'categories'
class Page(models.Model):
category = models.ForeignKey(Category)
title = models.CharField(max_length=128)
url = models.URLField()
views = models.IntegerField(default=0)
first_visit = models.DateTimeField(default=timezone.now)
last_visit = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
class UserProfile(models.Model):
user = models.OneToOneField(User)
website = models.URLField(blank=True)
picture = models.ImageField(upload_to='profile_images', blank=True)
def __str__(self):
return self.user.username
| true
| true
|
f708be84c637a1aff470e51a222399852d8dac30
| 3,127
|
py
|
Python
|
sensor_stick/src/sensor_stick/features.py
|
Fred159/3D-Perception
|
a23a42dc19d0a38e48beb5e7c0725e6d14c542f3
|
[
"MIT"
] | 8
|
2018-12-05T06:18:25.000Z
|
2021-01-15T03:13:50.000Z
|
sensor_stick/src/sensor_stick/features.py
|
Fred159/3D-Perception
|
a23a42dc19d0a38e48beb5e7c0725e6d14c542f3
|
[
"MIT"
] | null | null | null |
sensor_stick/src/sensor_stick/features.py
|
Fred159/3D-Perception
|
a23a42dc19d0a38e48beb5e7c0725e6d14c542f3
|
[
"MIT"
] | 1
|
2020-05-11T02:30:31.000Z
|
2020-05-11T02:30:31.000Z
|
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from pcl_helper import *
print('run features.py')
def rgb_to_hsv(rgb_list):
rgb_normalized = [1.0 * rgb_list[0] / 255, 1.0 * rgb_list[1] / 255, 1.0 * rgb_list[2] / 255]
hsv_normalized = matplotlib.colors.rgb_to_hsv([[rgb_normalized]])[0][0]
return hsv_normalized
def compute_color_histograms(cloud, using_hsv=False):
# Compute histograms for the clusters
point_colors_list = []
# Step through each point in the point cloud
for point in pc2.read_points(cloud, skip_nans=True):
rgb_list = float_to_rgb(point[3])
if using_hsv:
point_colors_list.append(rgb_to_hsv(rgb_list) * 255)
else:
point_colors_list.append(rgb_list)
# Populate lists with color values
channel_1_vals = []
channel_2_vals = []
channel_3_vals = []
for color in point_colors_list:
channel_1_vals.append(color[0])
channel_2_vals.append(color[1])
channel_3_vals.append(color[2])
# TODO: Compute histograms
nbins = 32
bins_range = (0, 256)
# TODO: Concatenate and normalize the histograms
channel_1_hist = np.histogram(channel_1_vals, bins=nbins, range=bins_range)
channel_2_hist = np.histogram(channel_2_vals, bins=nbins, range=bins_range)
channel_3_hist = np.histogram(channel_3_vals, bins=nbins, range=bins_range)
hist_features = np.concatenate((channel_1_hist[0], channel_2_hist[0], channel_3_hist[0])).astype(np.float64)
normed_features = hist_features / np.sum(hist_features)
# Generate random features for demo mode.
# Replace normed_features with your feature vectorl
# normed_features = np.random.random(96)
# print('run normed_features finished')
return normed_features
def compute_normal_histograms(normal_cloud):
norm_x_vals = []
norm_y_vals = []
norm_z_vals = []
nbins = 32
bins_range = (-1, 1)
for norm_component in pc2.read_points(normal_cloud,
field_names=('normal_x', 'normal_y', 'normal_z'),
skip_nans=True):
norm_x_vals.append(norm_component[0])
norm_y_vals.append(norm_component[1])
norm_z_vals.append(norm_component[2])
# TODO: Compute histograms of normal values (just like with color)
norm_x_hist = np.histogram(norm_x_vals, bins=nbins, range=bins_range)
norm_y_hist = np.histogram(norm_y_vals, bins=nbins, range=bins_range)
norm_z_hist = np.histogram(norm_z_vals, bins=nbins, range=bins_range)
# TODO: Concatenate and normalize the histograms
norm_hist_features = np.concatenate((norm_x_hist[0], norm_y_hist[0], norm_z_hist[0])).astype(np.float64)
normed_features = norm_hist_features / np.sum(norm_hist_features)
# Generate random features for demo mode.
# Replace normed_features with your feature vector
# normed_feature = np.random.random(96)
# print('run compute_normal_histograms function finished')
return normed_features
| 39.0875
| 116
| 0.683722
|
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from pcl_helper import *
print('run features.py')
def rgb_to_hsv(rgb_list):
rgb_normalized = [1.0 * rgb_list[0] / 255, 1.0 * rgb_list[1] / 255, 1.0 * rgb_list[2] / 255]
hsv_normalized = matplotlib.colors.rgb_to_hsv([[rgb_normalized]])[0][0]
return hsv_normalized
def compute_color_histograms(cloud, using_hsv=False):
point_colors_list = []
for point in pc2.read_points(cloud, skip_nans=True):
rgb_list = float_to_rgb(point[3])
if using_hsv:
point_colors_list.append(rgb_to_hsv(rgb_list) * 255)
else:
point_colors_list.append(rgb_list)
channel_1_vals = []
channel_2_vals = []
channel_3_vals = []
for color in point_colors_list:
channel_1_vals.append(color[0])
channel_2_vals.append(color[1])
channel_3_vals.append(color[2])
nbins = 32
bins_range = (0, 256)
channel_1_hist = np.histogram(channel_1_vals, bins=nbins, range=bins_range)
channel_2_hist = np.histogram(channel_2_vals, bins=nbins, range=bins_range)
channel_3_hist = np.histogram(channel_3_vals, bins=nbins, range=bins_range)
hist_features = np.concatenate((channel_1_hist[0], channel_2_hist[0], channel_3_hist[0])).astype(np.float64)
normed_features = hist_features / np.sum(hist_features)
return normed_features
def compute_normal_histograms(normal_cloud):
norm_x_vals = []
norm_y_vals = []
norm_z_vals = []
nbins = 32
bins_range = (-1, 1)
for norm_component in pc2.read_points(normal_cloud,
field_names=('normal_x', 'normal_y', 'normal_z'),
skip_nans=True):
norm_x_vals.append(norm_component[0])
norm_y_vals.append(norm_component[1])
norm_z_vals.append(norm_component[2])
norm_x_hist = np.histogram(norm_x_vals, bins=nbins, range=bins_range)
norm_y_hist = np.histogram(norm_y_vals, bins=nbins, range=bins_range)
norm_z_hist = np.histogram(norm_z_vals, bins=nbins, range=bins_range)
norm_hist_features = np.concatenate((norm_x_hist[0], norm_y_hist[0], norm_z_hist[0])).astype(np.float64)
normed_features = norm_hist_features / np.sum(norm_hist_features)
return normed_features
| true
| true
|
f708bf57521f7d9481aa81d8b11d1bb1fd26633a
| 2,945
|
py
|
Python
|
form_designer/views.py
|
LUKKIEN/django-form-designer
|
009e0870cae19e8570b9a480b6b64aee1dd38dfe
|
[
"BSD-3-Clause"
] | 1
|
2015-03-03T20:37:07.000Z
|
2015-03-03T20:37:07.000Z
|
form_designer/views.py
|
piquadrat/django-form-designer
|
5ae7c3b00e538ada23d830d15424b557cac73017
|
[
"BSD-3-Clause"
] | null | null | null |
form_designer/views.py
|
piquadrat/django-form-designer
|
5ae7c3b00e538ada23d830d15424b557cac73017
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect
from django.conf import settings
from django.contrib import messages
from django.core.context_processors import csrf
from form_designer.forms import DesignedForm
from form_designer.models import FormDefinition
def process_form(request, form_definition, context={}, is_cms_plugin=False):
success_message = form_definition.success_message or _('Thank you, the data was submitted successfully.')
error_message = form_definition.error_message or _('The data could not be submitted, please try again.')
message = None
form_error = False
form_success = False
is_submit = False
# If the form has been submitted...
if request.method == 'POST' and request.POST.get(form_definition.submit_flag_name):
form = DesignedForm(form_definition, None, request.POST)
is_submit = True
if request.method == 'GET' and request.GET.get(form_definition.submit_flag_name):
form = DesignedForm(form_definition, None, request.GET)
is_submit = True
if is_submit:
if form.is_valid():
# Successful submission
messages.success(request, success_message)
message = success_message
form_success = True
if form_definition.log_data:
form_definition.log(form)
if form_definition.mail_to:
form_definition.send_mail(form)
if form_definition.success_redirect and not is_cms_plugin:
# TODO Redirection does not work for cms plugin
return HttpResponseRedirect(form_definition.action or '?')
if form_definition.success_clear:
form = DesignedForm(form_definition) # clear form
else:
form_error = True
messages.error(request, error_message)
message = error_message
else:
if form_definition.allow_get_initial:
form = DesignedForm(form_definition, initial_data=request.GET)
else:
form = DesignedForm(form_definition)
context.update({
'message': message,
'form_error': form_error,
'form_success': form_success,
'form': form,
'form_definition': form_definition
})
context.update(csrf(request))
return context
def detail(request, object_name):
form_definition = get_object_or_404(FormDefinition, name=object_name)
result = process_form(request, form_definition)
if isinstance(result, HttpResponseRedirect):
return result
result.update({
'form_template': form_definition.form_template_name or settings.DEFAULT_FORM_TEMPLATE
})
return render_to_response('html/formdefinition/detail.html', result,
context_instance=RequestContext(request))
| 40.902778
| 109
| 0.70017
|
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect
from django.conf import settings
from django.contrib import messages
from django.core.context_processors import csrf
from form_designer.forms import DesignedForm
from form_designer.models import FormDefinition
def process_form(request, form_definition, context={}, is_cms_plugin=False):
success_message = form_definition.success_message or _('Thank you, the data was submitted successfully.')
error_message = form_definition.error_message or _('The data could not be submitted, please try again.')
message = None
form_error = False
form_success = False
is_submit = False
if request.method == 'POST' and request.POST.get(form_definition.submit_flag_name):
form = DesignedForm(form_definition, None, request.POST)
is_submit = True
if request.method == 'GET' and request.GET.get(form_definition.submit_flag_name):
form = DesignedForm(form_definition, None, request.GET)
is_submit = True
if is_submit:
if form.is_valid():
messages.success(request, success_message)
message = success_message
form_success = True
if form_definition.log_data:
form_definition.log(form)
if form_definition.mail_to:
form_definition.send_mail(form)
if form_definition.success_redirect and not is_cms_plugin:
return HttpResponseRedirect(form_definition.action or '?')
if form_definition.success_clear:
form = DesignedForm(form_definition) else:
form_error = True
messages.error(request, error_message)
message = error_message
else:
if form_definition.allow_get_initial:
form = DesignedForm(form_definition, initial_data=request.GET)
else:
form = DesignedForm(form_definition)
context.update({
'message': message,
'form_error': form_error,
'form_success': form_success,
'form': form,
'form_definition': form_definition
})
context.update(csrf(request))
return context
def detail(request, object_name):
form_definition = get_object_or_404(FormDefinition, name=object_name)
result = process_form(request, form_definition)
if isinstance(result, HttpResponseRedirect):
return result
result.update({
'form_template': form_definition.form_template_name or settings.DEFAULT_FORM_TEMPLATE
})
return render_to_response('html/formdefinition/detail.html', result,
context_instance=RequestContext(request))
| true
| true
|
f708c3199d4231ae99a6c0e5aafc7662e7c6bc86
| 9,291
|
py
|
Python
|
test/functional/feature_part_usbdevice.py
|
dmuralov/particl-core
|
ac4dc00b7cd6293329ff4bf3acaa65636238910a
|
[
"MIT"
] | null | null | null |
test/functional/feature_part_usbdevice.py
|
dmuralov/particl-core
|
ac4dc00b7cd6293329ff4bf3acaa65636238910a
|
[
"MIT"
] | null | null | null |
test/functional/feature_part_usbdevice.py
|
dmuralov/particl-core
|
ac4dc00b7cd6293329ff4bf3acaa65636238910a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Particl Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
import json
import configparser
from test_framework.test_falcon import (
FalconTestFramework,
isclose,
getIndexAtProperty,
)
from test_framework.test_framework import SkipTest
from test_framework.util import assert_raises_rpc_error
from test_framework.authproxy import JSONRPCException
class USBDeviceTest(FalconTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [ ['-debug','-noacceptnonstdtxn','-reservebalance=10000000', '-txindex'] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
self.connect_nodes_bi(0, 1)
self.connect_nodes_bi(0, 2)
self.connect_nodes_bi(1, 2)
self.sync_all()
def run_test(self):
# Check that falcon has been built with USB device enabled
config = configparser.ConfigParser()
if not self.options.configfile:
self.options.configfile = os.path.dirname(__file__) + "/../config.ini"
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_USBDEVICE"):
raise SkipTest("falcond has not been built with usb device enabled.")
nodes = self.nodes
self.import_genesis_coins_a(nodes[0])
ro = nodes[1].listdevices()
assert(len(ro) == 1)
assert(ro[0]['vendor'] == 'Debug')
assert(ro[0]['product'] == 'Device')
ro = nodes[1].getdeviceinfo()
assert(ro['device'] == 'debug')
ro = nodes[1].getdevicepublickey('0')
assert(ro['address'] == 'praish9BVxVdhykpqBYEs6L65AQ7iKd9z1')
assert(ro['path'] == "m/44'/1'/0'/0")
ro = nodes[1].getdevicepublickey('0/1')
assert(ro['address'] == 'peWvjy33QptC2Gz3ww7jTTLPjC2QJmifBR')
assert(ro['path'] == "m/44'/1'/0'/0/1")
ro = nodes[1].getdevicexpub("m/44'/1'/0'", "")
assert(ro == 'pparszKXPyRegWYwPacdPduNPNEryRbZDCAiSyo8oZYSsbTjc6FLP4TCPEX58kAeCB6YW9cSdR6fsbpeWDBTgjbkYjXCoD9CNoFVefbkg3exzpQE')
message = 'This is just a test message'
sig = nodes[1].devicesignmessage('0/1', message)
assert(True == nodes[1].verifymessage('peWvjy33QptC2Gz3ww7jTTLPjC2QJmifBR', sig, message))
ro = nodes[1].initaccountfromdevice('test_acc')
assert(ro['extkey'] == 'pparszKXPyRegWYwPacdPduNPNEryRbZDCAiSyo8oZYSsbTjc6FLP4TCPEX58kAeCB6YW9cSdR6fsbpeWDBTgjbkYjXCoD9CNoFVefbkg3exzpQE')
assert(ro['path'] == "m/44'/1'/0'")
ro = nodes[1].extkey('list', 'true')
assert(len(ro) == 1)
assert(ro[0]['path'] == "m/44h/1h/0h")
assert(ro[0]['epkey'] == 'pparszKXPyRegWYwPacdPduNPNEryRbZDCAiSyo8oZYSsbTjc6FLP4TCPEX58kAeCB6YW9cSdR6fsbpeWDBTgjbkYjXCoD9CNoFVefbkg3exzpQE')
assert(ro[0]['label'] == 'test_acc')
assert(ro[0]['hardware_device'] == '0xffff 0x0001')
ro = nodes[1].extkey('account')
n = getIndexAtProperty(ro['chains'], 'use_type', 'stealth_spend')
assert(n > -1)
assert(ro['chains'][n]['path'] == "m/0h/444445h")
addr1_0 = nodes[1].getnewaddress('lbl1_0')
ro = nodes[1].filteraddresses()
assert(len(ro) == 1)
assert(ro[0]['path'] == 'm/0/0')
assert(ro[0]['owned'] == 'true')
assert(ro[0]['label'] == 'lbl1_0')
va_addr1_0 = nodes[1].getaddressinfo(addr1_0)
assert(va_addr1_0['ismine'] == True)
assert(va_addr1_0['iswatchonly'] == False)
assert(va_addr1_0['isondevice'] == True)
assert(va_addr1_0['path'] == 'm/0/0')
try:
nodes[1].getnewstealthaddress()
raise AssertionError('Should have failed.')
except JSONRPCException as e:
pass
extaddr1_0 = nodes[1].getnewextaddress()
txnid0 = nodes[0].sendtoaddress(addr1_0, 6)
txnid1 = nodes[0].sendtoaddress(extaddr1_0, 6)
self.stakeBlocks(1)
block_txns = nodes[0].getblock(nodes[0].getblockhash(nodes[0].getblockcount()))['tx']
assert(txnid0 in block_txns)
assert(txnid1 in block_txns)
ro = nodes[1].getwalletinfo()
assert(isclose(ro['balance'], 12.0))
addr0_0 = nodes[0].getnewaddress()
hexRaw = nodes[1].createrawtransaction([], {addr0_0:10})
hexFunded = nodes[1].fundrawtransaction(hexRaw)['hex']
txDecoded = nodes[1].decoderawtransaction(hexFunded)
ro = nodes[1].devicesignrawtransactionwithwallet(hexFunded)
assert(ro['complete'] == True)
txnid1 = nodes[1].sendrawtransaction(ro['hex'])
self.sync_all()
self.stakeBlocks(1)
ro = nodes[1].devicesignrawtransactionwithwallet(hexFunded)
assert(ro['errors'][0]['error'] == 'Input not found or already spent')
prevtxns = []
for vin in txDecoded['vin']:
rtx = nodes[1].getrawtransaction(vin['txid'], True)
prev_out = rtx['vout'][vin['vout']]
prevtxns.append({'txid': vin['txid'], 'vout': vin['vout'], 'scriptPubKey': prev_out['scriptPubKey']['hex'], 'amount': prev_out['value']})
ro = nodes[1].devicesignrawtransaction(hexFunded, prevtxns, ['0/0', '2/0'])
assert(ro['complete'] == True)
ro = nodes[1].listunspent()
assert(ro[0]['ondevice'] == True)
txnid2 = nodes[1].sendtoaddress(addr0_0, 0.1)
self.sync_all()
nodes[0].syncwithvalidationinterfacequeue()
assert(nodes[0].filtertransactions()[0]['txid'] == txnid2)
hwsxaddr = nodes[1].devicegetnewstealthaddress()
assert(hwsxaddr == 'tps1qqpdwu7gqjqz9s9wfek843akvkzvw0xq3tkzs93sj4ceq60cp54mvzgpqf4tp6d7h0nza2xe362am697dax24hcr33yxqwvq58l5cf6j6q5hkqqqgykgrc')
hwsxaddr2 = nodes[1].devicegetnewstealthaddress('lbl2 4bits', '4', '0xaaaa', True)
assert(hwsxaddr2 == 'tps1qqpewyspjp93axk82zahx5xfjyprpvypfgnp95n9aynxxw3w0qs63acpq0s5z2rwk0raczg8jszl9qy5stncud76ahr5etn9hqmp30e3e86w2qqypgh9sgv0')
ro = nodes[1].getaddressinfo(hwsxaddr2)
assert(ro['prefix_num_bits'] == 4)
assert(ro['prefix_bitfield'] == '0x000a')
assert(ro['isondevice'] == True)
ro = nodes[1].liststealthaddresses()
assert(len(ro[0]['Stealth Addresses']) == 2)
ro = nodes[1].filteraddresses()
assert(len(ro) == 3)
txnid3 = nodes[0].sendtoaddress(hwsxaddr, 0.1, '', '', False, 'test msg')
self.stakeBlocks(1)
ro = nodes[1].listtransactions()
assert(len(ro) == 5)
assert('test msg' in self.dumpj(ro[4]))
ro = nodes[1].listunspent()
inputs = []
for output in ro:
if output['txid'] == txnid3:
inputs.append({'txid' : txnid3, 'vout' : output['vout']})
break
assert(len(inputs) > 0)
hexRaw = nodes[1].createrawtransaction(inputs, {addr0_0:0.09})
ro = nodes[1].devicesignrawtransactionwithwallet(hexRaw)
assert(ro['complete'] == True)
# import privkey in node2
rootkey = nodes[2].extkeyaltversion('xparFdrwJK7K2nfYzrkEqAKr5EcJNdY4c6ZNoLFFx1pMXQSQpo5MAufjogrS17RkqsLAijZJaBDHhG3G7SuJjtsTmRRTEKZDzGMnVCeX59cQCiR')
ro = nodes[2].extkey('import', rootkey, 'master key', True)
ro = nodes[2].extkey('setmaster', ro['id'])
assert(ro['result'] == 'Success.')
ro = nodes[2].extkey('deriveaccount', 'test account')
ro = nodes[2].extkey('setdefaultaccount', ro['account'])
assert(ro['result'] == 'Success.')
ro = nodes[1].extkey('account')
n = getIndexAtProperty(ro['chains'], 'use_type', 'stealth_spend')
assert(n > -1)
assert(ro['chains'][n]['path'] == "m/0h/444445h")
addrtest = nodes[2].getnewaddress()
ro = nodes[1].getdevicepublickey('0/0')
assert(addrtest == ro['address'])
addrtest = nodes[2].getnewstealthaddress('', '0', '', True, True)
assert(addrtest == hwsxaddr)
addrtest2 = nodes[2].getnewstealthaddress('lbl2 4bits', '4', '0xaaaa', True, True)
assert(addrtest2 == hwsxaddr2)
extaddr2_0 = nodes[2].getnewextaddress()
assert(extaddr1_0 == extaddr2_0)
# Ensure account matches after node restarts
account1 = nodes[1].extkey('account')
self.restart_node(1, extra_args=self.extra_args[1] + ['-wallet=default_wallet',])
account1_r = nodes[1].extkey('account')
assert(json.dumps(account1) == json.dumps(account1_r))
# Test for coverage
assert(nodes[1].promptunlockdevice()['sent'] is True)
assert(nodes[1].unlockdevice('123')['unlocked'] is True)
assert_raises_rpc_error(-8, 'Neither a pin nor a passphraseword was provided.', nodes[1].unlockdevice)
assert('complete' in nodes[1].devicebackup())
assert('complete' in nodes[1].deviceloadmnemonic())
if __name__ == '__main__':
USBDeviceTest().main()
| 38.7125
| 158
| 0.6347
|
import os
import json
import configparser
from test_framework.test_falcon import (
FalconTestFramework,
isclose,
getIndexAtProperty,
)
from test_framework.test_framework import SkipTest
from test_framework.util import assert_raises_rpc_error
from test_framework.authproxy import JSONRPCException
class USBDeviceTest(FalconTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [ ['-debug','-noacceptnonstdtxn','-reservebalance=10000000', '-txindex'] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
self.connect_nodes_bi(0, 1)
self.connect_nodes_bi(0, 2)
self.connect_nodes_bi(1, 2)
self.sync_all()
def run_test(self):
config = configparser.ConfigParser()
if not self.options.configfile:
self.options.configfile = os.path.dirname(__file__) + "/../config.ini"
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_USBDEVICE"):
raise SkipTest("falcond has not been built with usb device enabled.")
nodes = self.nodes
self.import_genesis_coins_a(nodes[0])
ro = nodes[1].listdevices()
assert(len(ro) == 1)
assert(ro[0]['vendor'] == 'Debug')
assert(ro[0]['product'] == 'Device')
ro = nodes[1].getdeviceinfo()
assert(ro['device'] == 'debug')
ro = nodes[1].getdevicepublickey('0')
assert(ro['address'] == 'praish9BVxVdhykpqBYEs6L65AQ7iKd9z1')
assert(ro['path'] == "m/44'/1'/0'/0")
ro = nodes[1].getdevicepublickey('0/1')
assert(ro['address'] == 'peWvjy33QptC2Gz3ww7jTTLPjC2QJmifBR')
assert(ro['path'] == "m/44'/1'/0'/0/1")
ro = nodes[1].getdevicexpub("m/44'/1'/0'", "")
assert(ro == 'pparszKXPyRegWYwPacdPduNPNEryRbZDCAiSyo8oZYSsbTjc6FLP4TCPEX58kAeCB6YW9cSdR6fsbpeWDBTgjbkYjXCoD9CNoFVefbkg3exzpQE')
message = 'This is just a test message'
sig = nodes[1].devicesignmessage('0/1', message)
assert(True == nodes[1].verifymessage('peWvjy33QptC2Gz3ww7jTTLPjC2QJmifBR', sig, message))
ro = nodes[1].initaccountfromdevice('test_acc')
assert(ro['extkey'] == 'pparszKXPyRegWYwPacdPduNPNEryRbZDCAiSyo8oZYSsbTjc6FLP4TCPEX58kAeCB6YW9cSdR6fsbpeWDBTgjbkYjXCoD9CNoFVefbkg3exzpQE')
assert(ro['path'] == "m/44'/1'/0'")
ro = nodes[1].extkey('list', 'true')
assert(len(ro) == 1)
assert(ro[0]['path'] == "m/44h/1h/0h")
assert(ro[0]['epkey'] == 'pparszKXPyRegWYwPacdPduNPNEryRbZDCAiSyo8oZYSsbTjc6FLP4TCPEX58kAeCB6YW9cSdR6fsbpeWDBTgjbkYjXCoD9CNoFVefbkg3exzpQE')
assert(ro[0]['label'] == 'test_acc')
assert(ro[0]['hardware_device'] == '0xffff 0x0001')
ro = nodes[1].extkey('account')
n = getIndexAtProperty(ro['chains'], 'use_type', 'stealth_spend')
assert(n > -1)
assert(ro['chains'][n]['path'] == "m/0h/444445h")
addr1_0 = nodes[1].getnewaddress('lbl1_0')
ro = nodes[1].filteraddresses()
assert(len(ro) == 1)
assert(ro[0]['path'] == 'm/0/0')
assert(ro[0]['owned'] == 'true')
assert(ro[0]['label'] == 'lbl1_0')
va_addr1_0 = nodes[1].getaddressinfo(addr1_0)
assert(va_addr1_0['ismine'] == True)
assert(va_addr1_0['iswatchonly'] == False)
assert(va_addr1_0['isondevice'] == True)
assert(va_addr1_0['path'] == 'm/0/0')
try:
nodes[1].getnewstealthaddress()
raise AssertionError('Should have failed.')
except JSONRPCException as e:
pass
extaddr1_0 = nodes[1].getnewextaddress()
txnid0 = nodes[0].sendtoaddress(addr1_0, 6)
txnid1 = nodes[0].sendtoaddress(extaddr1_0, 6)
self.stakeBlocks(1)
block_txns = nodes[0].getblock(nodes[0].getblockhash(nodes[0].getblockcount()))['tx']
assert(txnid0 in block_txns)
assert(txnid1 in block_txns)
ro = nodes[1].getwalletinfo()
assert(isclose(ro['balance'], 12.0))
addr0_0 = nodes[0].getnewaddress()
hexRaw = nodes[1].createrawtransaction([], {addr0_0:10})
hexFunded = nodes[1].fundrawtransaction(hexRaw)['hex']
txDecoded = nodes[1].decoderawtransaction(hexFunded)
ro = nodes[1].devicesignrawtransactionwithwallet(hexFunded)
assert(ro['complete'] == True)
txnid1 = nodes[1].sendrawtransaction(ro['hex'])
self.sync_all()
self.stakeBlocks(1)
ro = nodes[1].devicesignrawtransactionwithwallet(hexFunded)
assert(ro['errors'][0]['error'] == 'Input not found or already spent')
prevtxns = []
for vin in txDecoded['vin']:
rtx = nodes[1].getrawtransaction(vin['txid'], True)
prev_out = rtx['vout'][vin['vout']]
prevtxns.append({'txid': vin['txid'], 'vout': vin['vout'], 'scriptPubKey': prev_out['scriptPubKey']['hex'], 'amount': prev_out['value']})
ro = nodes[1].devicesignrawtransaction(hexFunded, prevtxns, ['0/0', '2/0'])
assert(ro['complete'] == True)
ro = nodes[1].listunspent()
assert(ro[0]['ondevice'] == True)
txnid2 = nodes[1].sendtoaddress(addr0_0, 0.1)
self.sync_all()
nodes[0].syncwithvalidationinterfacequeue()
assert(nodes[0].filtertransactions()[0]['txid'] == txnid2)
hwsxaddr = nodes[1].devicegetnewstealthaddress()
assert(hwsxaddr == 'tps1qqpdwu7gqjqz9s9wfek843akvkzvw0xq3tkzs93sj4ceq60cp54mvzgpqf4tp6d7h0nza2xe362am697dax24hcr33yxqwvq58l5cf6j6q5hkqqqgykgrc')
hwsxaddr2 = nodes[1].devicegetnewstealthaddress('lbl2 4bits', '4', '0xaaaa', True)
assert(hwsxaddr2 == 'tps1qqpewyspjp93axk82zahx5xfjyprpvypfgnp95n9aynxxw3w0qs63acpq0s5z2rwk0raczg8jszl9qy5stncud76ahr5etn9hqmp30e3e86w2qqypgh9sgv0')
ro = nodes[1].getaddressinfo(hwsxaddr2)
assert(ro['prefix_num_bits'] == 4)
assert(ro['prefix_bitfield'] == '0x000a')
assert(ro['isondevice'] == True)
ro = nodes[1].liststealthaddresses()
assert(len(ro[0]['Stealth Addresses']) == 2)
ro = nodes[1].filteraddresses()
assert(len(ro) == 3)
txnid3 = nodes[0].sendtoaddress(hwsxaddr, 0.1, '', '', False, 'test msg')
self.stakeBlocks(1)
ro = nodes[1].listtransactions()
assert(len(ro) == 5)
assert('test msg' in self.dumpj(ro[4]))
ro = nodes[1].listunspent()
inputs = []
for output in ro:
if output['txid'] == txnid3:
inputs.append({'txid' : txnid3, 'vout' : output['vout']})
break
assert(len(inputs) > 0)
hexRaw = nodes[1].createrawtransaction(inputs, {addr0_0:0.09})
ro = nodes[1].devicesignrawtransactionwithwallet(hexRaw)
assert(ro['complete'] == True)
rootkey = nodes[2].extkeyaltversion('xparFdrwJK7K2nfYzrkEqAKr5EcJNdY4c6ZNoLFFx1pMXQSQpo5MAufjogrS17RkqsLAijZJaBDHhG3G7SuJjtsTmRRTEKZDzGMnVCeX59cQCiR')
ro = nodes[2].extkey('import', rootkey, 'master key', True)
ro = nodes[2].extkey('setmaster', ro['id'])
assert(ro['result'] == 'Success.')
ro = nodes[2].extkey('deriveaccount', 'test account')
ro = nodes[2].extkey('setdefaultaccount', ro['account'])
assert(ro['result'] == 'Success.')
ro = nodes[1].extkey('account')
n = getIndexAtProperty(ro['chains'], 'use_type', 'stealth_spend')
assert(n > -1)
assert(ro['chains'][n]['path'] == "m/0h/444445h")
addrtest = nodes[2].getnewaddress()
ro = nodes[1].getdevicepublickey('0/0')
assert(addrtest == ro['address'])
addrtest = nodes[2].getnewstealthaddress('', '0', '', True, True)
assert(addrtest == hwsxaddr)
addrtest2 = nodes[2].getnewstealthaddress('lbl2 4bits', '4', '0xaaaa', True, True)
assert(addrtest2 == hwsxaddr2)
extaddr2_0 = nodes[2].getnewextaddress()
assert(extaddr1_0 == extaddr2_0)
account1 = nodes[1].extkey('account')
self.restart_node(1, extra_args=self.extra_args[1] + ['-wallet=default_wallet',])
account1_r = nodes[1].extkey('account')
assert(json.dumps(account1) == json.dumps(account1_r))
assert(nodes[1].promptunlockdevice()['sent'] is True)
assert(nodes[1].unlockdevice('123')['unlocked'] is True)
assert_raises_rpc_error(-8, 'Neither a pin nor a passphraseword was provided.', nodes[1].unlockdevice)
assert('complete' in nodes[1].devicebackup())
assert('complete' in nodes[1].deviceloadmnemonic())
if __name__ == '__main__':
USBDeviceTest().main()
| true
| true
|
f708c3bb5a529fe11a122490916ffbb446bcaccc
| 5,304
|
py
|
Python
|
submit.py
|
Complicateddd/Complicateddd-ROITransformer
|
2adfbf98892d569c460d100c6e2169c5fa3a9b82
|
[
"Apache-2.0"
] | null | null | null |
submit.py
|
Complicateddd/Complicateddd-ROITransformer
|
2adfbf98892d569c460d100c6e2169c5fa3a9b82
|
[
"Apache-2.0"
] | null | null | null |
submit.py
|
Complicateddd/Complicateddd-ROITransformer
|
2adfbf98892d569c460d100c6e2169c5fa3a9b82
|
[
"Apache-2.0"
] | 1
|
2021-12-17T12:49:06.000Z
|
2021-12-17T12:49:06.000Z
|
from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections
import mmcv
from mmcv import Config
from mmdet.datasets import get_dataset
import cv2
import os
import numpy as np
from tqdm import tqdm
import DOTA_devkit.polyiou as polyiou
import math
import pdb
def py_cpu_nms_poly_fast_np(dets, thresh):
obbs = dets[:, 0:-1]
x1 = np.min(obbs[:, 0::2], axis=1)
y1 = np.min(obbs[:, 1::2], axis=1)
x2 = np.max(obbs[:, 0::2], axis=1)
y2 = np.max(obbs[:, 1::2], axis=1)
scores = dets[:, 8]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
polys = []
for i in range(len(dets)):
tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1],
dets[i][2], dets[i][3],
dets[i][4], dets[i][5],
dets[i][6], dets[i][7]])
polys.append(tm_polygon)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
ovr = []
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
hbb_inter = w * h
hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)
h_inds = np.where(hbb_ovr > 0)[0]
tmp_order = order[h_inds + 1]
for j in range(tmp_order.size):
iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]])
hbb_ovr[h_inds[j]] = iou
try:
if math.isnan(ovr[0]):
pdb.set_trace()
except:
pass
inds = np.where(hbb_ovr <= thresh)[0]
order = order[inds + 1]
return keep
class DetectorModel():
def __init__(self,
config_file,
checkpoint_file):
# init RoITransformer
self.config_file = config_file
self.checkpoint_file = checkpoint_file
self.cfg = Config.fromfile(self.config_file)
self.data_test = self.cfg.data['test']
self.dataset = get_dataset(self.data_test)
# self.classnames = self.dataset.CLASSES
self.classnames = ('1', '2', '3', '4', '5')
self.model = init_detector(config_file, checkpoint_file, device='cuda:0')
def inference_single(self, imagname):
img = mmcv.imread(imagname)
height, width, channel = img.shape
# slide_h, slide_w = slide_size
# hn, wn = chip_size
# TODO: check the corner case
# import pdb; pdb.set_trace()
total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))]
# print(self.classnames)
chip_detections = inference_detector(self.model, img)
# nms
for i in range(5):
keep = py_cpu_nms_poly_fast_np(chip_detections[i], 0.1)
chip_detections[i] = chip_detections[i][keep]
return chip_detections
def inference_single_vis(self, srcpath, dstpath):
detections = self.inference_single(srcpath)
print(detections)
img = draw_poly_detections(srcpath, detections, self.classnames, scale=1, threshold=0.3)
cv2.imwrite(dstpath, img)
if __name__ == '__main__':
import tqdm
roitransformer = DetectorModel(r'configs/Huojianjun/faster_rcnn_RoITrans_r101x_fpn_1x_anchors_augs_augfpn.py',
r'work_dirs/faster_rcnn_RoITrans_r101_all_aug_rote_1333_crop_rote/epoch_278.pth')
# roitransformer.inference_single_vis(r'demo/48.tif',
# r'demo/48_out.tif',
# (1024, 1024),
# (1024, 1024))
threshold=0.0001
class_names=('1', '2', '3', '4', '5')
import os
path="/media/ubuntu/data/huojianjun/科目四/科目四/test2"
file_img_name=os.listdir(path)
result_file=open("./科目四_莘莘学子.txt",'w')
# print(file_img_name)
count=0
def filer(x):
x=int(x)
if x>1024:
return 1024
if x<0:
return 0
else:
return x
for name in tqdm.tqdm(file_img_name):
# count+=1
path_img=os.path.join(path,name)
detection_result=roitransformer.inference_single(path_img)
for j, name_cls in enumerate(class_names):
dets = detection_result[j]
for det in dets:
bbox = det[:8]
score = round(det[-1],2)
if score < threshold:
continue
bbox = list(map(filer, bbox))
# print(bbox)
# print(score)
# print(name_cls)
result_file.writelines(name+" "+str(name_cls)+" "+str(score)+" "
+str(bbox[0])
+" "+str(bbox[1])+" "+str(bbox[2])+" "+str(bbox[3])
+" "+str(bbox[4])+" "+str(bbox[5])+" "+str(bbox[6])
+" "+str(bbox[7]))
result_file.writelines("\n")
count+=1
# if name=="3.tif":
# print(count)
# if count==3:
# break
# print(path_img)
| 34
| 114
| 0.534691
|
from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections
import mmcv
from mmcv import Config
from mmdet.datasets import get_dataset
import cv2
import os
import numpy as np
from tqdm import tqdm
import DOTA_devkit.polyiou as polyiou
import math
import pdb
def py_cpu_nms_poly_fast_np(dets, thresh):
obbs = dets[:, 0:-1]
x1 = np.min(obbs[:, 0::2], axis=1)
y1 = np.min(obbs[:, 1::2], axis=1)
x2 = np.max(obbs[:, 0::2], axis=1)
y2 = np.max(obbs[:, 1::2], axis=1)
scores = dets[:, 8]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
polys = []
for i in range(len(dets)):
tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1],
dets[i][2], dets[i][3],
dets[i][4], dets[i][5],
dets[i][6], dets[i][7]])
polys.append(tm_polygon)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
ovr = []
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
hbb_inter = w * h
hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)
h_inds = np.where(hbb_ovr > 0)[0]
tmp_order = order[h_inds + 1]
for j in range(tmp_order.size):
iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]])
hbb_ovr[h_inds[j]] = iou
try:
if math.isnan(ovr[0]):
pdb.set_trace()
except:
pass
inds = np.where(hbb_ovr <= thresh)[0]
order = order[inds + 1]
return keep
class DetectorModel():
def __init__(self,
config_file,
checkpoint_file):
self.config_file = config_file
self.checkpoint_file = checkpoint_file
self.cfg = Config.fromfile(self.config_file)
self.data_test = self.cfg.data['test']
self.dataset = get_dataset(self.data_test)
self.classnames = ('1', '2', '3', '4', '5')
self.model = init_detector(config_file, checkpoint_file, device='cuda:0')
def inference_single(self, imagname):
img = mmcv.imread(imagname)
height, width, channel = img.shape
total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))]
chip_detections = inference_detector(self.model, img)
for i in range(5):
keep = py_cpu_nms_poly_fast_np(chip_detections[i], 0.1)
chip_detections[i] = chip_detections[i][keep]
return chip_detections
def inference_single_vis(self, srcpath, dstpath):
detections = self.inference_single(srcpath)
print(detections)
img = draw_poly_detections(srcpath, detections, self.classnames, scale=1, threshold=0.3)
cv2.imwrite(dstpath, img)
if __name__ == '__main__':
import tqdm
roitransformer = DetectorModel(r'configs/Huojianjun/faster_rcnn_RoITrans_r101x_fpn_1x_anchors_augs_augfpn.py',
r'work_dirs/faster_rcnn_RoITrans_r101_all_aug_rote_1333_crop_rote/epoch_278.pth')
threshold=0.0001
class_names=('1', '2', '3', '4', '5')
import os
path="/media/ubuntu/data/huojianjun/科目四/科目四/test2"
file_img_name=os.listdir(path)
result_file=open("./科目四_莘莘学子.txt",'w')
count=0
def filer(x):
x=int(x)
if x>1024:
return 1024
if x<0:
return 0
else:
return x
for name in tqdm.tqdm(file_img_name):
path_img=os.path.join(path,name)
detection_result=roitransformer.inference_single(path_img)
for j, name_cls in enumerate(class_names):
dets = detection_result[j]
for det in dets:
bbox = det[:8]
score = round(det[-1],2)
if score < threshold:
continue
bbox = list(map(filer, bbox))
result_file.writelines(name+" "+str(name_cls)+" "+str(score)+" "
+str(bbox[0])
+" "+str(bbox[1])+" "+str(bbox[2])+" "+str(bbox[3])
+" "+str(bbox[4])+" "+str(bbox[5])+" "+str(bbox[6])
+" "+str(bbox[7]))
result_file.writelines("\n")
count+=1
| true
| true
|
f708c565a30af39e3fe1c4a21b9dd18553b91c54
| 17,392
|
py
|
Python
|
nsot/api/serializers.py
|
narJH27/nsot
|
22e6a81c76147e55ab9a19eb55cdc741c5723fbc
|
[
"Apache-2.0"
] | null | null | null |
nsot/api/serializers.py
|
narJH27/nsot
|
22e6a81c76147e55ab9a19eb55cdc741c5723fbc
|
[
"Apache-2.0"
] | null | null | null |
nsot/api/serializers.py
|
narJH27/nsot
|
22e6a81c76147e55ab9a19eb55cdc741c5723fbc
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import ast
from collections import OrderedDict
import json
import logging
from django.contrib.auth import get_user_model
from rest_framework import fields, serializers
from rest_framework_bulk import BulkSerializerMixin, BulkListSerializer
from . import auth
from .. import exc, models, validators
from ..util import get_field_attr
log = logging.getLogger(__name__)
###############
# Custom Fields
###############
class JSONDataField(fields.Field):
"""
Base field used to represent attributes as JSON <-> ``field_type``.
It is an error if ``field_type`` is not defined in a subclass.
"""
field_type = None
def to_representation(self, value):
return value
def to_internal_value(self, data):
log.debug('JSONDictField.to_internal_value() data = %r', data)
if self.field_type is None:
raise NotImplementedError(
'You must subclass JSONDataField and define field_type'
)
if not data:
data = self.field_type()
if isinstance(data, self.field_type):
return data
# Try it as a regular JSON object
try:
return json.loads(data)
except ValueError:
# Or try it as a Python object
try:
return ast.literal_eval(data)
except (SyntaxError, ValueError) as err:
raise exc.ValidationError(err)
except Exception as err:
raise exc.ValidationError(err)
return data
class JSONDictField(JSONDataField):
"""Field used to represent attributes as JSON <-> Dict."""
field_type = dict
class JSONListField(JSONDataField):
"""Field used to represent attributes as JSON <-> List."""
field_type = list
class MACAddressField(fields.Field):
"""Field used to validate MAC address objects as integer or string."""
def to_representation(self, value):
return value
def to_internal_value(self, value):
return validators.validate_mac_address(value)
###################
# Base Serializer #
###################
class NsotSerializer(serializers.ModelSerializer):
"""Base serializer that logs change events."""
def to_internal_value(self, data):
"""Inject site_pk from view's kwargs if it's not already in data."""
kwargs = self.context['view'].kwargs
log.debug(
'NsotSerializer.to_internal_value() data [before] = %r', data
)
if 'site_id' not in data and 'site_pk' in kwargs:
data['site_id'] = kwargs['site_pk']
log.debug('NsotSerializer.to_internal_value() data [after] = %r', data)
return super(NsotSerializer, self).to_internal_value(data)
def to_representation(self, obj):
"""Always return the dict representation."""
if isinstance(obj, OrderedDict):
return obj
return obj.to_dict()
######
# User
######
class UserSerializer(serializers.ModelSerializer):
"""
UserProxy model serializer that takes optional `with_secret_key` argument
that controls whether the secret_key for the user should be displayed.
"""
def __init__(self, *args, **kwargs):
# Don't pass `with_secret_key` up to the superclass
self.with_secret_key = kwargs.pop('with_secret_key', None)
super(UserSerializer, self).__init__(*args, **kwargs)
# If we haven't passed `with_secret_key`, don't show the secret_key
# field.
if self.with_secret_key is None:
self.fields.pop('secret_key')
permissions = fields.ReadOnlyField(source='get_permissions')
class Meta:
model = get_user_model()
fields = ('id', 'email', 'permissions', 'secret_key')
######
# Site
######
class SiteSerializer(serializers.ModelSerializer):
class Meta:
model = models.Site
fields = '__all__'
#########
# Changes
#########
class ChangeSerializer(NsotSerializer):
"""Used for displaying Change events."""
class Meta:
model = models.Change
fields = '__all__'
###########
# Attribute
###########
class AttributeSerializer(NsotSerializer):
"""Used for GET, DELETE on Attributes."""
class Meta:
model = models.Attribute
fields = '__all__'
class AttributeCreateSerializer(AttributeSerializer):
"""Used for POST on Attributes."""
constraints = JSONDictField(
required=False,
label=get_field_attr(models.Attribute, 'constraints', 'verbose_name'),
help_text=get_field_attr(models.Attribute, 'constraints', 'help_text')
)
site_id = fields.IntegerField(
label=get_field_attr(models.Attribute, 'site', 'verbose_name'),
help_text=get_field_attr(models.Attribute, 'site', 'help_text')
)
class Meta:
model = models.Attribute
fields = ('name', 'description', 'resource_name', 'required',
'display', 'multi', 'constraints', 'site_id')
class AttributeUpdateSerializer(BulkSerializerMixin,
AttributeCreateSerializer):
"""
Used for PUT, PATCH, on Attributes.
Currently because Attributes have only one required field (name), and it
may not be updated, there is not much functional difference between PUT and
PATCH.
"""
class Meta:
model = models.Attribute
list_serializer_class = BulkListSerializer
fields = ('id', 'description', 'required', 'display', 'multi',
'constraints')
#######
# Value
#######
class ValueSerializer(serializers.ModelSerializer):
"""Used for GET, DELETE on Values."""
class Meta:
model = models.Value
fields = ('id', 'name', 'value', 'attribute', 'resource_name',
'resource_id')
# Not sure if we want to view an attribute value w/ so much context just
# yet.
# def to_representation(self, obj):
# return obj.to_dict()
class ValueCreateSerializer(ValueSerializer):
"""Used for POST on Values."""
class Meta:
model = models.Value
read_only_fields = ('id', 'name', 'resource_name')
fields = ('id', 'name', 'value', 'attribute', 'resource_name',
'resource_id')
###########
# Resources
###########
class ResourceSerializer(NsotSerializer):
"""For any object that can have attributes."""
attributes = JSONDictField(
required=False,
help_text='Dictionary of attributes to set.'
)
def create(self, validated_data, commit=True):
"""Create that is aware of attributes."""
# Remove the related fields before we write the object
attributes = validated_data.pop('attributes', {})
# Save the base object to the database.
obj = super(ResourceSerializer, self).create(validated_data)
# Try to populate the related fields and if there are any validation
# problems, delete the object and re-raise the error. If not, save the
# changes.
try:
obj.set_attributes(attributes)
except exc.ValidationError:
obj.delete()
raise
else:
if commit:
obj.save()
return obj
def update(self, instance, validated_data, commit=True):
"""
Update that is aware of attributes.
This will not set attributes if they are not provided during a partial
update.
"""
# Remove related fields before we write the object
attributes = validated_data.pop('attributes', None)
# Save the object to the database.
obj = super(ResourceSerializer, self).update(
instance, validated_data
)
# If attributes have been provided, populate them and save the object,
# allowing any validation errors to raise before saving.
obj.set_attributes(attributes, partial=self.partial)
if commit:
obj.save()
return obj
########
# Device
########
class DeviceSerializer(ResourceSerializer):
"""Used for GET, DELETE on Devices."""
class Meta:
model = models.Device
fields = '__all__'
class DeviceCreateSerializer(DeviceSerializer):
"""Used for POST on Devices."""
site_id = fields.IntegerField(
label=get_field_attr(models.Device, 'site', 'verbose_name'),
help_text=get_field_attr(models.Device, 'site', 'help_text')
)
class Meta:
model = models.Device
fields = ('hostname', 'attributes', 'site_id')
class DeviceUpdateSerializer(BulkSerializerMixin, DeviceCreateSerializer):
"""Used for PUT on Devices."""
attributes = JSONDictField(
required=True,
help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Device
list_serializer_class = BulkListSerializer
fields = ('id', 'hostname', 'attributes')
class DevicePartialUpdateSerializer(BulkSerializerMixin,
DeviceCreateSerializer):
"""Used for PATCH on Devices."""
class Meta:
model = models.Device
list_serializer_class = BulkListSerializer
fields = ('id', 'hostname', 'attributes')
#########
# Network
#########
class NetworkSerializer(ResourceSerializer):
"""Used for GET, DELETE on Networks."""
class Meta:
model = models.Network
fields = '__all__'
class NetworkCreateSerializer(NetworkSerializer):
"""Used for POST on Networks."""
cidr = fields.CharField(
write_only=True, required=False, label='CIDR',
help_text=(
'IPv4/IPv6 CIDR address. If provided, this overrides the value of '
'network_address & prefix_length. If not provided, '
'network_address & prefix_length are required.'
)
)
network_address = fields.ModelField(
model_field=models.Network._meta.get_field('network_address'),
required=False,
label=get_field_attr(
models.Network, 'network_address', 'verbose_name'
),
help_text=get_field_attr(
models.Network, 'network_address', 'help_text'
),
)
prefix_length = fields.IntegerField(
required=False,
label=get_field_attr(models.Network, 'prefix_length', 'verbose_name'),
help_text=get_field_attr(models.Network, 'prefix_length', 'help_text'),
)
site_id = fields.IntegerField(
label=get_field_attr(models.Network, 'site', 'verbose_name'),
help_text=get_field_attr(models.Network, 'site', 'help_text')
)
class Meta:
model = models.Network
fields = ('cidr', 'network_address', 'prefix_length', 'attributes',
'state', 'site_id')
class NetworkUpdateSerializer(BulkSerializerMixin, NetworkCreateSerializer):
"""Used for PUT on Networks."""
attributes = JSONDictField(
required=True,
help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Network
list_serializer_class = BulkListSerializer
fields = ('id', 'attributes', 'state')
class NetworkPartialUpdateSerializer(BulkSerializerMixin,
NetworkCreateSerializer):
"""Used for PATCH on Networks."""
class Meta:
model = models.Network
list_serializer_class = BulkListSerializer
fields = ('id', 'attributes', 'state')
###########
# Interface
###########
class InterfaceSerializer(ResourceSerializer):
"""Used for GET, DELETE on Interfaces."""
parent_id = fields.IntegerField(
required=False, allow_null=True,
label=get_field_attr(models.Interface, 'parent', 'verbose_name'),
help_text=get_field_attr(models.Interface, 'parent', 'help_text'),
)
class Meta:
model = models.Interface
fields = '__all__'
def create(self, validated_data):
log.debug('InterfaceCreateSerializer.create() validated_data = %r',
validated_data)
# Remove the related fields before we write the object
addresses = validated_data.pop('addresses', [])
# Create the base object to the database, but don't save attributes
# yet.
obj = super(InterfaceSerializer, self).create(
validated_data, commit=False
)
# Try to populate the related fields and if there are any validation
# problems, delete the object and re-raise the error. If not, save the
# changes.
try:
obj.set_addresses(addresses)
except exc.ValidationError:
obj.delete()
raise
else:
obj.save()
return obj
def update(self, instance, validated_data):
log.debug('InterfaceUpdateSerializer.update() validated_data = %r',
validated_data)
# Remove related fields before we write the object. Attributes are
# handled by the parent.
addresses = validated_data.pop('addresses', None)
# Update the attributes in the database, but don't save them yet.
obj = super(InterfaceSerializer, self).update(
instance, validated_data, commit=False
)
# Assign the address objects to the Interface.
obj.set_addresses(addresses, overwrite=True, partial=self.partial)
obj.save()
return obj
class InterfaceCreateSerializer(InterfaceSerializer):
"""Used for POST on Interfaces."""
addresses = JSONListField(
required=False, help_text='List of host addresses to assign.'
)
mac_address = MACAddressField(
required=False, allow_null=True,
label=get_field_attr(models.Interface, 'mac_address', 'verbose_name'),
help_text=get_field_attr(models.Interface, 'mac_address', 'help_text'),
)
class Meta:
model = models.Interface
fields = ('device', 'name', 'description', 'type', 'mac_address',
'speed', 'parent_id', 'addresses', 'attributes')
class InterfaceUpdateSerializer(BulkSerializerMixin,
InterfaceCreateSerializer):
"Used for PUT on Interfaces."""
addresses = JSONListField(
required=True, help_text='List of host addresses to assign.'
)
attributes = JSONDictField(
required=True,
help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Interface
list_serializer_class = BulkListSerializer
fields = ('id', 'name', 'description', 'type', 'mac_address', 'speed',
'parent_id', 'addresses', 'attributes')
class InterfacePartialUpdateSerializer(BulkSerializerMixin,
InterfaceCreateSerializer):
"Used for PATCH on Interfaces."""
class Meta:
model = models.Interface
list_serializer_class = BulkListSerializer
fields = ('id', 'name', 'description', 'type', 'mac_address', 'speed',
'parent_id', 'addresses', 'attributes')
#########
# Circuit
#########
class CircuitSerializer(ResourceSerializer):
"""Used for GET, DELETE on Circuits"""
class Meta:
model = models.Circuit
fields = '__all__'
class CircuitCreateSerializer(CircuitSerializer):
"""Used for POST on Circuits."""
class Meta:
model = models.Circuit
# Display name and site are auto-generated, don't include them here
fields = ('endpoint_a', 'endpoint_z', 'name', 'attributes')
class CircuitUpdateSerializer(BulkSerializerMixin, CircuitCreateSerializer):
"""Used for PUT on Circuits."""
attributes = JSONDictField(
required=True, help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Circuit
list_serializer_class = BulkListSerializer
fields = ('id', 'endpoint_a', 'endpoint_z', 'name', 'attributes')
class CircuitPartialUpdateSerializer(BulkSerializerMixin,
CircuitCreateSerializer):
"""Used for PATCH on Circuits."""
class Meta:
model = models.Circuit
list_serializer_class = BulkListSerializer
fields = ('id', 'endpoint_a', 'endpoint_z', 'name', 'attributes')
###########
# AuthToken
###########
class AuthTokenSerializer(serializers.Serializer):
"""
AuthToken authentication serializer to validate username/secret_key inputs.
"""
email = serializers.CharField(help_text='Email address of the user.')
secret_key = serializers.CharField(
label='Secret Key', help_text='Secret key of the user.'
)
def validate(self, attrs):
email = attrs.get('email')
secret_key = attrs.get('secret_key')
if email and secret_key:
auth_func = auth.SecretKeyAuthentication().authenticate_credentials
user, secret_key = auth_func(email, secret_key)
if user:
if not user.is_active:
msg = 'User account is disabled.'
raise exc.ValidationError(msg)
attrs['user'] = user
return attrs
else:
msg = 'Unable to login with provided credentials.'
raise exc.ValidationError(msg)
else:
msg = 'Must include "email" and "secret_key"'
raise exc.ValidationError(msg)
| 30.673721
| 79
| 0.626437
|
from __future__ import unicode_literals
import ast
from collections import OrderedDict
import json
import logging
from django.contrib.auth import get_user_model
from rest_framework import fields, serializers
from rest_framework_bulk import BulkSerializerMixin, BulkListSerializer
from . import auth
from .. import exc, models, validators
from ..util import get_field_attr
log = logging.getLogger(__name__)
class JSONDataField(fields.Field):
field_type = None
def to_representation(self, value):
return value
def to_internal_value(self, data):
log.debug('JSONDictField.to_internal_value() data = %r', data)
if self.field_type is None:
raise NotImplementedError(
'You must subclass JSONDataField and define field_type'
)
if not data:
data = self.field_type()
if isinstance(data, self.field_type):
return data
try:
return json.loads(data)
except ValueError:
try:
return ast.literal_eval(data)
except (SyntaxError, ValueError) as err:
raise exc.ValidationError(err)
except Exception as err:
raise exc.ValidationError(err)
return data
class JSONDictField(JSONDataField):
field_type = dict
class JSONListField(JSONDataField):
field_type = list
class MACAddressField(fields.Field):
def to_representation(self, value):
return value
def to_internal_value(self, value):
return validators.validate_mac_address(value)
class NsotSerializer(serializers.ModelSerializer):
def to_internal_value(self, data):
kwargs = self.context['view'].kwargs
log.debug(
'NsotSerializer.to_internal_value() data [before] = %r', data
)
if 'site_id' not in data and 'site_pk' in kwargs:
data['site_id'] = kwargs['site_pk']
log.debug('NsotSerializer.to_internal_value() data [after] = %r', data)
return super(NsotSerializer, self).to_internal_value(data)
def to_representation(self, obj):
if isinstance(obj, OrderedDict):
return obj
return obj.to_dict()
class UserSerializer(serializers.ModelSerializer):
def __init__(self, *args, **kwargs):
self.with_secret_key = kwargs.pop('with_secret_key', None)
super(UserSerializer, self).__init__(*args, **kwargs)
# If we haven't passed `with_secret_key`, don't show the secret_key
# field.
if self.with_secret_key is None:
self.fields.pop('secret_key')
permissions = fields.ReadOnlyField(source='get_permissions')
class Meta:
model = get_user_model()
fields = ('id', 'email', 'permissions', 'secret_key')
######
# Site
######
class SiteSerializer(serializers.ModelSerializer):
class Meta:
model = models.Site
fields = '__all__'
#########
# Changes
#########
class ChangeSerializer(NsotSerializer):
class Meta:
model = models.Change
fields = '__all__'
###########
# Attribute
###########
class AttributeSerializer(NsotSerializer):
class Meta:
model = models.Attribute
fields = '__all__'
class AttributeCreateSerializer(AttributeSerializer):
constraints = JSONDictField(
required=False,
label=get_field_attr(models.Attribute, 'constraints', 'verbose_name'),
help_text=get_field_attr(models.Attribute, 'constraints', 'help_text')
)
site_id = fields.IntegerField(
label=get_field_attr(models.Attribute, 'site', 'verbose_name'),
help_text=get_field_attr(models.Attribute, 'site', 'help_text')
)
class Meta:
model = models.Attribute
fields = ('name', 'description', 'resource_name', 'required',
'display', 'multi', 'constraints', 'site_id')
class AttributeUpdateSerializer(BulkSerializerMixin,
AttributeCreateSerializer):
class Meta:
model = models.Attribute
list_serializer_class = BulkListSerializer
fields = ('id', 'description', 'required', 'display', 'multi',
'constraints')
#######
# Value
#######
class ValueSerializer(serializers.ModelSerializer):
class Meta:
model = models.Value
fields = ('id', 'name', 'value', 'attribute', 'resource_name',
'resource_id')
# Not sure if we want to view an attribute value w/ so much context just
# yet.
# def to_representation(self, obj):
# return obj.to_dict()
class ValueCreateSerializer(ValueSerializer):
class Meta:
model = models.Value
read_only_fields = ('id', 'name', 'resource_name')
fields = ('id', 'name', 'value', 'attribute', 'resource_name',
'resource_id')
###########
# Resources
###########
class ResourceSerializer(NsotSerializer):
attributes = JSONDictField(
required=False,
help_text='Dictionary of attributes to set.'
)
def create(self, validated_data, commit=True):
# Remove the related fields before we write the object
attributes = validated_data.pop('attributes', {})
# Save the base object to the database.
obj = super(ResourceSerializer, self).create(validated_data)
# Try to populate the related fields and if there are any validation
# problems, delete the object and re-raise the error. If not, save the
# changes.
try:
obj.set_attributes(attributes)
except exc.ValidationError:
obj.delete()
raise
else:
if commit:
obj.save()
return obj
def update(self, instance, validated_data, commit=True):
# Remove related fields before we write the object
attributes = validated_data.pop('attributes', None)
# Save the object to the database.
obj = super(ResourceSerializer, self).update(
instance, validated_data
)
# If attributes have been provided, populate them and save the object,
# allowing any validation errors to raise before saving.
obj.set_attributes(attributes, partial=self.partial)
if commit:
obj.save()
return obj
########
# Device
########
class DeviceSerializer(ResourceSerializer):
class Meta:
model = models.Device
fields = '__all__'
class DeviceCreateSerializer(DeviceSerializer):
site_id = fields.IntegerField(
label=get_field_attr(models.Device, 'site', 'verbose_name'),
help_text=get_field_attr(models.Device, 'site', 'help_text')
)
class Meta:
model = models.Device
fields = ('hostname', 'attributes', 'site_id')
class DeviceUpdateSerializer(BulkSerializerMixin, DeviceCreateSerializer):
attributes = JSONDictField(
required=True,
help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Device
list_serializer_class = BulkListSerializer
fields = ('id', 'hostname', 'attributes')
class DevicePartialUpdateSerializer(BulkSerializerMixin,
DeviceCreateSerializer):
class Meta:
model = models.Device
list_serializer_class = BulkListSerializer
fields = ('id', 'hostname', 'attributes')
#########
# Network
#########
class NetworkSerializer(ResourceSerializer):
class Meta:
model = models.Network
fields = '__all__'
class NetworkCreateSerializer(NetworkSerializer):
cidr = fields.CharField(
write_only=True, required=False, label='CIDR',
help_text=(
'IPv4/IPv6 CIDR address. If provided, this overrides the value of '
'network_address & prefix_length. If not provided, '
'network_address & prefix_length are required.'
)
)
network_address = fields.ModelField(
model_field=models.Network._meta.get_field('network_address'),
required=False,
label=get_field_attr(
models.Network, 'network_address', 'verbose_name'
),
help_text=get_field_attr(
models.Network, 'network_address', 'help_text'
),
)
prefix_length = fields.IntegerField(
required=False,
label=get_field_attr(models.Network, 'prefix_length', 'verbose_name'),
help_text=get_field_attr(models.Network, 'prefix_length', 'help_text'),
)
site_id = fields.IntegerField(
label=get_field_attr(models.Network, 'site', 'verbose_name'),
help_text=get_field_attr(models.Network, 'site', 'help_text')
)
class Meta:
model = models.Network
fields = ('cidr', 'network_address', 'prefix_length', 'attributes',
'state', 'site_id')
class NetworkUpdateSerializer(BulkSerializerMixin, NetworkCreateSerializer):
attributes = JSONDictField(
required=True,
help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Network
list_serializer_class = BulkListSerializer
fields = ('id', 'attributes', 'state')
class NetworkPartialUpdateSerializer(BulkSerializerMixin,
NetworkCreateSerializer):
class Meta:
model = models.Network
list_serializer_class = BulkListSerializer
fields = ('id', 'attributes', 'state')
###########
# Interface
###########
class InterfaceSerializer(ResourceSerializer):
parent_id = fields.IntegerField(
required=False, allow_null=True,
label=get_field_attr(models.Interface, 'parent', 'verbose_name'),
help_text=get_field_attr(models.Interface, 'parent', 'help_text'),
)
class Meta:
model = models.Interface
fields = '__all__'
def create(self, validated_data):
log.debug('InterfaceCreateSerializer.create() validated_data = %r',
validated_data)
# Remove the related fields before we write the object
addresses = validated_data.pop('addresses', [])
# Create the base object to the database, but don't save attributes
obj = super(InterfaceSerializer, self).create(
validated_data, commit=False
)
try:
obj.set_addresses(addresses)
except exc.ValidationError:
obj.delete()
raise
else:
obj.save()
return obj
def update(self, instance, validated_data):
log.debug('InterfaceUpdateSerializer.update() validated_data = %r',
validated_data)
addresses = validated_data.pop('addresses', None)
obj = super(InterfaceSerializer, self).update(
instance, validated_data, commit=False
)
# Assign the address objects to the Interface.
obj.set_addresses(addresses, overwrite=True, partial=self.partial)
obj.save()
return obj
class InterfaceCreateSerializer(InterfaceSerializer):
addresses = JSONListField(
required=False, help_text='List of host addresses to assign.'
)
mac_address = MACAddressField(
required=False, allow_null=True,
label=get_field_attr(models.Interface, 'mac_address', 'verbose_name'),
help_text=get_field_attr(models.Interface, 'mac_address', 'help_text'),
)
class Meta:
model = models.Interface
fields = ('device', 'name', 'description', 'type', 'mac_address',
'speed', 'parent_id', 'addresses', 'attributes')
class InterfaceUpdateSerializer(BulkSerializerMixin,
InterfaceCreateSerializer):
addresses = JSONListField(
required=True, help_text='List of host addresses to assign.'
)
attributes = JSONDictField(
required=True,
help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Interface
list_serializer_class = BulkListSerializer
fields = ('id', 'name', 'description', 'type', 'mac_address', 'speed',
'parent_id', 'addresses', 'attributes')
class InterfacePartialUpdateSerializer(BulkSerializerMixin,
InterfaceCreateSerializer):
class Meta:
model = models.Interface
list_serializer_class = BulkListSerializer
fields = ('id', 'name', 'description', 'type', 'mac_address', 'speed',
'parent_id', 'addresses', 'attributes')
#########
# Circuit
#########
class CircuitSerializer(ResourceSerializer):
class Meta:
model = models.Circuit
fields = '__all__'
class CircuitCreateSerializer(CircuitSerializer):
class Meta:
model = models.Circuit
# Display name and site are auto-generated, don't include them here
fields = ('endpoint_a', 'endpoint_z', 'name', 'attributes')
class CircuitUpdateSerializer(BulkSerializerMixin, CircuitCreateSerializer):
attributes = JSONDictField(
required=True, help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Circuit
list_serializer_class = BulkListSerializer
fields = ('id', 'endpoint_a', 'endpoint_z', 'name', 'attributes')
class CircuitPartialUpdateSerializer(BulkSerializerMixin,
CircuitCreateSerializer):
class Meta:
model = models.Circuit
list_serializer_class = BulkListSerializer
fields = ('id', 'endpoint_a', 'endpoint_z', 'name', 'attributes')
class AuthTokenSerializer(serializers.Serializer):
email = serializers.CharField(help_text='Email address of the user.')
secret_key = serializers.CharField(
label='Secret Key', help_text='Secret key of the user.'
)
def validate(self, attrs):
email = attrs.get('email')
secret_key = attrs.get('secret_key')
if email and secret_key:
auth_func = auth.SecretKeyAuthentication().authenticate_credentials
user, secret_key = auth_func(email, secret_key)
if user:
if not user.is_active:
msg = 'User account is disabled.'
raise exc.ValidationError(msg)
attrs['user'] = user
return attrs
else:
msg = 'Unable to login with provided credentials.'
raise exc.ValidationError(msg)
else:
msg = 'Must include "email" and "secret_key"'
raise exc.ValidationError(msg)
| true
| true
|
f708c660e4e2ca50541d552931d0c6fba439a8f4
| 5,069
|
py
|
Python
|
stonesoup/predictor/tests/test_kalman.py
|
Isaac-JenkinsRA/Stone-Soup
|
54c9c7dca8162dadaa58e85933cf10a0f86ce1e1
|
[
"MIT"
] | 1
|
2020-07-21T15:20:20.000Z
|
2020-07-21T15:20:20.000Z
|
stonesoup/predictor/tests/test_kalman.py
|
Isaac-JenkinsRA/Stone-Soup
|
54c9c7dca8162dadaa58e85933cf10a0f86ce1e1
|
[
"MIT"
] | null | null | null |
stonesoup/predictor/tests/test_kalman.py
|
Isaac-JenkinsRA/Stone-Soup
|
54c9c7dca8162dadaa58e85933cf10a0f86ce1e1
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import datetime
import pytest
import numpy as np
from ...models.transition.linear import ConstantVelocity
from ...predictor.kalman import (
KalmanPredictor, ExtendedKalmanPredictor, UnscentedKalmanPredictor,
SqrtKalmanPredictor)
from ...types.prediction import GaussianStatePrediction
from ...types.state import GaussianState, SqrtGaussianState
from ...types.track import Track
@pytest.mark.parametrize(
"PredictorClass, transition_model, prior_mean, prior_covar",
[
( # Standard Kalman
KalmanPredictor,
ConstantVelocity(noise_diff_coeff=0.1),
np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])
),
( # Extended Kalman
ExtendedKalmanPredictor,
ConstantVelocity(noise_diff_coeff=0.1),
np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])
),
( # Unscented Kalman
UnscentedKalmanPredictor,
ConstantVelocity(noise_diff_coeff=0.1),
np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])
)
],
ids=["standard", "extended", "unscented"]
)
def test_kalman(PredictorClass, transition_model,
prior_mean, prior_covar):
# Define time related variables
timestamp = datetime.datetime.now()
timediff = 2 # 2sec
new_timestamp = timestamp + datetime.timedelta(seconds=timediff)
time_interval = new_timestamp - timestamp
# Define prior state
prior = GaussianState(prior_mean,
prior_covar,
timestamp=timestamp)
transition_model_matrix = transition_model.matrix(time_interval=time_interval)
transition_model_covar = transition_model.covar(time_interval=time_interval)
# Calculate evaluation variables
eval_prediction = GaussianStatePrediction(
transition_model_matrix @ prior.mean,
transition_model_matrix@prior.covar@transition_model_matrix.T + transition_model_covar)
# Initialise a kalman predictor
predictor = PredictorClass(transition_model=transition_model)
# Perform and assert state prediction
prediction = predictor.predict(prior=prior,
timestamp=new_timestamp)
assert np.allclose(prediction.mean,
eval_prediction.mean, 0, atol=1.e-14)
assert np.allclose(prediction.covar,
eval_prediction.covar, 0, atol=1.e-14)
assert prediction.timestamp == new_timestamp
# TODO: Test with Control Model
def test_lru_cache():
predictor = KalmanPredictor(ConstantVelocity(noise_diff_coeff=0))
timestamp = datetime.datetime.now()
state = GaussianState([[0.], [1.]], np.diag([1., 1.]), timestamp)
track = Track([state])
prediction_time = timestamp + datetime.timedelta(seconds=1)
prediction1 = predictor.predict(track, prediction_time)
assert np.array_equal(prediction1.state_vector, np.array([[1.], [1.]]))
prediction2 = predictor.predict(track, prediction_time)
assert prediction2 is prediction1
track.append(GaussianState([[1.], [1.]], np.diag([1., 1.]), prediction_time))
prediction3 = predictor.predict(track, prediction_time)
assert prediction3 is not prediction1
def test_sqrt_kalman():
# Define time related variables
timestamp = datetime.datetime.now()
timediff = 2 # 2sec
new_timestamp = timestamp + datetime.timedelta(seconds=timediff)
# Define prior state
prior_mean = np.array([[-6.45], [0.7]])
prior_covar = np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])
prior = GaussianState(prior_mean,
prior_covar,
timestamp=timestamp)
sqrt_prior_covar = np.linalg.cholesky(prior_covar)
sqrt_prior = SqrtGaussianState(prior_mean, sqrt_prior_covar,
timestamp=timestamp)
transition_model = ConstantVelocity(noise_diff_coeff=0.1)
# Initialise a kalman predictor
predictor = KalmanPredictor(transition_model=transition_model)
sqrt_predictor = SqrtKalmanPredictor(transition_model=transition_model)
# Can swap out this method
sqrt_predictor = SqrtKalmanPredictor(transition_model=transition_model, qr_method=True)
# Perform and assert state prediction
prediction = predictor.predict(prior=prior, timestamp=new_timestamp)
sqrt_prediction = sqrt_predictor.predict(prior=sqrt_prior,
timestamp=new_timestamp)
assert np.allclose(prediction.mean, sqrt_prediction.mean, 0, atol=1.e-14)
assert np.allclose(prediction.covar,
sqrt_prediction.sqrt_covar@sqrt_prediction.sqrt_covar.T, 0,
atol=1.e-14)
assert np.allclose(prediction.covar, sqrt_prediction.covar, 0, atol=1.e-14)
assert prediction.timestamp == sqrt_prediction.timestamp
| 37.828358
| 95
| 0.655356
|
import datetime
import pytest
import numpy as np
from ...models.transition.linear import ConstantVelocity
from ...predictor.kalman import (
KalmanPredictor, ExtendedKalmanPredictor, UnscentedKalmanPredictor,
SqrtKalmanPredictor)
from ...types.prediction import GaussianStatePrediction
from ...types.state import GaussianState, SqrtGaussianState
from ...types.track import Track
@pytest.mark.parametrize(
"PredictorClass, transition_model, prior_mean, prior_covar",
[
( KalmanPredictor,
ConstantVelocity(noise_diff_coeff=0.1),
np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])
),
( ExtendedKalmanPredictor,
ConstantVelocity(noise_diff_coeff=0.1),
np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])
),
( UnscentedKalmanPredictor,
ConstantVelocity(noise_diff_coeff=0.1),
np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])
)
],
ids=["standard", "extended", "unscented"]
)
def test_kalman(PredictorClass, transition_model,
prior_mean, prior_covar):
timestamp = datetime.datetime.now()
timediff = 2 new_timestamp = timestamp + datetime.timedelta(seconds=timediff)
time_interval = new_timestamp - timestamp
prior = GaussianState(prior_mean,
prior_covar,
timestamp=timestamp)
transition_model_matrix = transition_model.matrix(time_interval=time_interval)
transition_model_covar = transition_model.covar(time_interval=time_interval)
eval_prediction = GaussianStatePrediction(
transition_model_matrix @ prior.mean,
transition_model_matrix@prior.covar@transition_model_matrix.T + transition_model_covar)
predictor = PredictorClass(transition_model=transition_model)
prediction = predictor.predict(prior=prior,
timestamp=new_timestamp)
assert np.allclose(prediction.mean,
eval_prediction.mean, 0, atol=1.e-14)
assert np.allclose(prediction.covar,
eval_prediction.covar, 0, atol=1.e-14)
assert prediction.timestamp == new_timestamp
def test_lru_cache():
predictor = KalmanPredictor(ConstantVelocity(noise_diff_coeff=0))
timestamp = datetime.datetime.now()
state = GaussianState([[0.], [1.]], np.diag([1., 1.]), timestamp)
track = Track([state])
prediction_time = timestamp + datetime.timedelta(seconds=1)
prediction1 = predictor.predict(track, prediction_time)
assert np.array_equal(prediction1.state_vector, np.array([[1.], [1.]]))
prediction2 = predictor.predict(track, prediction_time)
assert prediction2 is prediction1
track.append(GaussianState([[1.], [1.]], np.diag([1., 1.]), prediction_time))
prediction3 = predictor.predict(track, prediction_time)
assert prediction3 is not prediction1
def test_sqrt_kalman():
timestamp = datetime.datetime.now()
timediff = 2 new_timestamp = timestamp + datetime.timedelta(seconds=timediff)
prior_mean = np.array([[-6.45], [0.7]])
prior_covar = np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])
prior = GaussianState(prior_mean,
prior_covar,
timestamp=timestamp)
sqrt_prior_covar = np.linalg.cholesky(prior_covar)
sqrt_prior = SqrtGaussianState(prior_mean, sqrt_prior_covar,
timestamp=timestamp)
transition_model = ConstantVelocity(noise_diff_coeff=0.1)
predictor = KalmanPredictor(transition_model=transition_model)
sqrt_predictor = SqrtKalmanPredictor(transition_model=transition_model)
sqrt_predictor = SqrtKalmanPredictor(transition_model=transition_model, qr_method=True)
prediction = predictor.predict(prior=prior, timestamp=new_timestamp)
sqrt_prediction = sqrt_predictor.predict(prior=sqrt_prior,
timestamp=new_timestamp)
assert np.allclose(prediction.mean, sqrt_prediction.mean, 0, atol=1.e-14)
assert np.allclose(prediction.covar,
sqrt_prediction.sqrt_covar@sqrt_prediction.sqrt_covar.T, 0,
atol=1.e-14)
assert np.allclose(prediction.covar, sqrt_prediction.covar, 0, atol=1.e-14)
assert prediction.timestamp == sqrt_prediction.timestamp
| true
| true
|
f708c66d9c43e6918050056e64a62284c29ad04e
| 1,997
|
py
|
Python
|
changeDetection.py
|
jials/CS4243-project
|
100d7ed1cbd379de3b2e65c16e037bf4afec0fb1
|
[
"MIT"
] | null | null | null |
changeDetection.py
|
jials/CS4243-project
|
100d7ed1cbd379de3b2e65c16e037bf4afec0fb1
|
[
"MIT"
] | null | null | null |
changeDetection.py
|
jials/CS4243-project
|
100d7ed1cbd379de3b2e65c16e037bf4afec0fb1
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import imageMarker
lucas_kanade_params = dict(
winSize= (4, 4),
maxLevel= 3, #level of pyramids used
criteria= (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
)
def mark_features_on_all_images(images, features_coordinates):
marked_images = []
marked_frame_coordinates = []
last_gs_img = cv2.cvtColor(images[0], cv2.COLOR_BGR2GRAY)
p0 = []
for coordinate in features_coordinates:
p0.append([coordinate,])
p0 = np.float32(p0)
mask = np.zeros_like(images[0])
status_arr = []
for fr in range(1, len(images)):
marked_coordinates = []
if images[fr] is None:
print('change detection problematic frame', fr)
print('len of given images', len(images))
frame = images[fr].copy()
gs_img = cv2.cvtColor(images[fr], cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(last_gs_img, gs_img, p0, None, **lucas_kanade_params)
status_arr.append(st)
if p1 is None:
marked_images.append(frame)
marked_frame_coordinates.append(features_coordinates if len(images) == 1 else marked_frame_coordinates[-1])
continue
new_points = []
for index in range(len(p1)):
if st[index] == 1:
new_points.append(p1[index])
else:
new_points.append(p0[index])
new_points = np.array(new_points)
for index, point in enumerate(new_points):
x, y = point.ravel()
marked_coordinates.append([x,y])
imageMarker.mark_image_at_point(frame, int(y), int(x), 9, imageMarker.colors[index])
marked_frame_coordinates.append(marked_coordinates)
img = cv2.add(frame,mask)
marked_images.append(img)
# update last frame and point
last_gs_img = gs_img.copy()
p0 = new_points.reshape(-1,1,2)
return marked_images, marked_frame_coordinates, status_arr
| 31.698413
| 119
| 0.632949
|
import numpy as np
import cv2
import imageMarker
lucas_kanade_params = dict(
winSize= (4, 4),
maxLevel= 3, criteria= (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
)
def mark_features_on_all_images(images, features_coordinates):
marked_images = []
marked_frame_coordinates = []
last_gs_img = cv2.cvtColor(images[0], cv2.COLOR_BGR2GRAY)
p0 = []
for coordinate in features_coordinates:
p0.append([coordinate,])
p0 = np.float32(p0)
mask = np.zeros_like(images[0])
status_arr = []
for fr in range(1, len(images)):
marked_coordinates = []
if images[fr] is None:
print('change detection problematic frame', fr)
print('len of given images', len(images))
frame = images[fr].copy()
gs_img = cv2.cvtColor(images[fr], cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(last_gs_img, gs_img, p0, None, **lucas_kanade_params)
status_arr.append(st)
if p1 is None:
marked_images.append(frame)
marked_frame_coordinates.append(features_coordinates if len(images) == 1 else marked_frame_coordinates[-1])
continue
new_points = []
for index in range(len(p1)):
if st[index] == 1:
new_points.append(p1[index])
else:
new_points.append(p0[index])
new_points = np.array(new_points)
for index, point in enumerate(new_points):
x, y = point.ravel()
marked_coordinates.append([x,y])
imageMarker.mark_image_at_point(frame, int(y), int(x), 9, imageMarker.colors[index])
marked_frame_coordinates.append(marked_coordinates)
img = cv2.add(frame,mask)
marked_images.append(img)
last_gs_img = gs_img.copy()
p0 = new_points.reshape(-1,1,2)
return marked_images, marked_frame_coordinates, status_arr
| true
| true
|
f708c79acb0b72bf6f596c6e15d29009ca1ee58b
| 67,151
|
py
|
Python
|
Scripts/ANN_AllAnalysis_ClimateModels_v4-RandomNoise-TestWarmthGFDL.py
|
zmlabe/ModelBiasesANN
|
df28842a8594870db3282682b1261af5058af832
|
[
"MIT"
] | 1
|
2022-02-12T11:56:54.000Z
|
2022-02-12T11:56:54.000Z
|
Scripts/ANN_AllAnalysis_ClimateModels_v4-RandomNoise-TestWarmthGFDL.py
|
zmlabe/ModelBiasesANN
|
df28842a8594870db3282682b1261af5058af832
|
[
"MIT"
] | null | null | null |
Scripts/ANN_AllAnalysis_ClimateModels_v4-RandomNoise-TestWarmthGFDL.py
|
zmlabe/ModelBiasesANN
|
df28842a8594870db3282682b1261af5058af832
|
[
"MIT"
] | null | null | null |
"""
ANN for evaluating model biases, differences, and other thresholds using
explainable AI (add warmth/cool GFDL-CM3 model only)
Reference : Barnes et al. [2020, JAMES]
Author : Zachary M. Labe
Date : 20 July 2021
Version : 4 - subsamples random weight class (#8) for mmmean
"""
### Import packages
import sys
import math
import time
import matplotlib.pyplot as plt
import numpy as np
import keras.backend as K
from keras.layers import Dense, Activation
from keras import regularizers
from keras import metrics
from keras import optimizers
from keras.models import Sequential
import tensorflow.keras as keras
import tensorflow as tf
import pandas as pd
import random
import scipy.stats as stats
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import cmocean as cmocean
import calc_Utilities as UT
import calc_dataFunctions as df
import calc_Stats as dSS
import calc_LRPclass as LRP
import innvestigate
from sklearn.metrics import accuracy_score
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
### Prevent tensorflow 2.+ deprecation warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
### LRP param
DEFAULT_NUM_BWO_ITERATIONS = 200
DEFAULT_BWO_LEARNING_RATE = .001
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
directorydataLLL = '/Users/zlabe/Data/LENS/monthly'
directorydataENS = '/Users/zlabe/Data/SMILE/'
directorydataBB = '/Users/zlabe/Data/BEST/'
directorydataEE = '/Users/zlabe/Data/ERA5/'
directoryoutput = '/Users/zlabe/Documents/Research/ModelComparison/Data/'
###############################################################################
###############################################################################
modelGCMs = ['CCCma_canesm2','MPI','CSIRO_MK3.6','KNMI_ecearth',
'GFDL_CM3','GFDL_ESM2M','lens']
datasetsingle = ['SMILE']
dataset_obs = 'ERA5BE'
seasons = ['annual']
variq = 'T2M'
reg_name = 'LowerArctic'
timeper = 'historical'
###############################################################################
###############################################################################
# pickSMILE = ['CCCma_canesm2','CSIRO_MK3.6','KNMI_ecearth',
# 'GFDL_ESM2M','lens']
# pickSMILE = ['CCCma_canesm2','MPI','lens']
pickSMILE = []
if len(pickSMILE) >= 1:
lenOfPicks = len(pickSMILE)
else:
lenOfPicks = len(modelGCMs)
###############################################################################
###############################################################################
land_only = False
ocean_only = False
if land_only == True:
maskNoiseClass = 'land'
elif ocean_only == True:
maskNoiseClass = 'ocean'
else:
maskNoiseClass = 'none'
###############################################################################
###############################################################################
rm_merid_mean = False
rm_annual_mean = False
###############################################################################
###############################################################################
rm_ensemble_mean = False
rm_observational_mean = False
###############################################################################
###############################################################################
calculate_anomalies = False
if calculate_anomalies == True:
if timeper == 'historical':
baseline = np.arange(1951,1980+1,1)
elif timeper == 'future':
baseline = np.arange(2021,2050+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
###############################################################################
###############################################################################
window = 0
ensTypeExperi = 'ENS'
# shuffletype = 'TIMEENS'
# shuffletype = 'ALLENSRAND'
# shuffletype = 'ALLENSRANDrmmean'
shuffletype = 'RANDGAUSS'
sizeOfTwin = 4 # name of experiment for adding noise class #8
if sizeOfTwin > 0:
sizeOfTwinq = 1
else:
sizeOfTwinq = sizeOfTwin
###############################################################################
###############################################################################
factorObs = 10 # factor to add to obs
###############################################################################
###############################################################################
if ensTypeExperi == 'ENS':
if window == 0:
rm_standard_dev = False
if timeper == 'historical':
yearsall = np.arange(1950,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
if timeper == 'historical':
yearsall = np.arange(1950+window,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020+window,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravelmodeltime = False
ravel_modelens = True
elif ensTypeExperi == 'GCM':
if window == 0:
rm_standard_dev = False
yearsall = np.arange(1950,2019+1,1)
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
if timeper == 'historical':
yearsall = np.arange(1950,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravelmodeltime = False
ravel_modelens = True
###############################################################################
###############################################################################
numOfEns = 16
lensalso = True
if len(pickSMILE) == 0:
if modelGCMs[-1] == 'RANDOM':
randomalso = True
else:
randomalso = False
elif len(pickSMILE) != 0:
if pickSMILE[-1] == 'RANDOM':
randomalso = True
else:
randomalso = False
lentime = len(yearsall)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
num_of_class = lenOfPicks + sizeOfTwinq
###############################################################################
###############################################################################
lrpRule = 'z'
normLRP = True
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Picking experiment to save
typeOfAnalysis = 'issueWithExperiment'
# Experiment #1
if rm_ensemble_mean == True:
if window > 1:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-1'
# Experiment #2
if rm_ensemble_mean == True:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-2'
# Experiment #3 (raw data)
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-3'
if variq == 'T2M':
integer = 20 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 20 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 20 # random noise value to add/subtract from each grid point
# Experiment #4
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-4'
if variq == 'T2M':
integer = 25 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 15 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 5 # random noise value to add/subtract from each grid point
# Experiment #5
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-5'
# Experiment #6
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-6'
# Experiment #7
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-7'
# Experiment #8
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-8'
if variq == 'T2M':
integer = 1 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 1 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 5 # random noise value to add/subtract from each grid point
# Experiment #9
if rm_ensemble_mean == False:
if window > 1:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-9'
print('\n<<<<<<<<<<<< Analysis == %s (%s) ! >>>>>>>>>>>>>>>\n' % (typeOfAnalysis,timeper))
if typeOfAnalysis == 'issueWithExperiment':
sys.exit('Wrong parameters selected to analyze')
### Select how to save files
if land_only == True:
saveData = timeper + '_' + seasons[0] + '_LAND' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
elif ocean_only == True:
saveData = timeper + '_' + seasons[0] + '_OCEAN' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
else:
saveData = timeper + '_' + seasons[0] + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
print('*Filename == < %s >' % saveData)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Create sample class labels for each model for my own testing
### Appends a twin set of classes for the random noise class
if seasons != 'none':
classesl = np.empty((lenOfPicks,numOfEns,len(yearsall)))
for i in range(lenOfPicks):
classesl[i,:,:] = np.full((numOfEns,len(yearsall)),i)
if sizeOfTwin > 0:
### Add random noise models
randomNoiseClass = np.full((sizeOfTwinq,numOfEns,len(yearsall)),i+1)
classesl = np.append(classesl,randomNoiseClass,axis=0)
if ensTypeExperi == 'ENS':
classeslnew = np.swapaxes(classesl,0,1)
elif ensTypeExperi == 'GCM':
classeslnew = classesl
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Begin ANN and the entire script
for sis,singlesimulation in enumerate(datasetsingle):
lrpsns = []
for seas in range(len(seasons)):
###############################################################################
###############################################################################
###############################################################################
### ANN preliminaries
simuqq = datasetsingle[0]
monthlychoice = seasons[seas]
lat_bounds,lon_bounds = UT.regions(reg_name)
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
experiment_result = pd.DataFrame(columns=['actual iters','hiddens','cascade',
'RMSE Train','RMSE Test',
'ridge penalty','zero mean',
'zero merid mean','land only?','ocean only?'])
### Define primary dataset to use
dataset = singlesimulation
modelType = dataset
### Whether to test and plot the results using obs data
if dataset_obs == '20CRv3':
year_obsall = np.arange(yearsall[sis].min(),2015+1,1)
elif dataset_obs == 'ERA5':
year_obsall = np.arange(1979+window,2019+1,1)
if rm_standard_dev == False:
year_obsall = np.arange(1979,2019+1,1)
elif dataset_obs == 'ERA5BE':
year_obsall = np.arange(1950+window,2019+1,1)
if rm_standard_dev == False:
year_obsall = np.arange(1950,2019+1,1)
if monthlychoice == 'DJF':
obsyearstart = year_obsall.min()+1
year_obs = year_obsall[1:]
else:
obsyearstart = year_obsall.min()
year_obs = year_obsall
### Remove the annual mean? True to subtract it from dataset ##########
if rm_annual_mean == True:
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
### Rove the ensemble mean? True to subtract it from dataset ##########
if rm_ensemble_mean == True:
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
### Split the data into training and testing sets? value of 1 will use all
### data as training
segment_data_factor = .75
### Hiddens corresponds to the number of hidden layers the nnet will use - 0
### for linear model, or a list [10, 20, 5] for multiple layers of nodes
### (10 nodes in first layer, 20 in second, etc); The "loop" part
### allows you to loop through multiple architectures. For example,
### hiddens_loop = [[2,4],[0],[1 1 1]] would produce three separate NNs, the
### first with 2 hidden layers of 2 and 4 nodes, the next the linear model,
### and the next would be 3 hidden layers of 1 node each.
### Set useGPU to True to use the GPU, but only if you selected the GPU
### Runtime in the menu at the top of this page
useGPU = False
### Set Cascade to True to utilize the nnet's cascade function
cascade = False
### Plot within the training loop - may want to set to False when testing out
### larget sets of parameters
plot_in_train = False
###############################################################################
###############################################################################
###############################################################################
### Read in model and observational/reanalysis data
def read_primary_dataset(variq,dataset,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
def read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,
lat_bounds,lon_bounds)
print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)
return data_obs,lats_obs,lons_obs
###############################################################################
###############################################################################
###############################################################################
### Select data to test, train on
def segment_data(data,classesl,ensTypeExperi,fac = segment_data_factor):
global random_segment_seed,trainIndices,testIndices
if random_segment_seed == None:
random_segment_seed = int(int(np.random.randint(1, 100000)))
np.random.seed(random_segment_seed)
###############################################################################
###############################################################################
###############################################################################
###################################################################
### Large Ensemble experiment
if ensTypeExperi == 'ENS':
### Flip GCM and ensemble member axes
datanew = np.swapaxes(data,0,1)
classeslnew = np.swapaxes(classesl,0,1)
if fac < 1 :
nrows = datanew.shape[0]
segment_train = int(np.round(nrows * fac))
segment_test = nrows - segment_train
print('Training on',segment_train,'ensembles, testing on',segment_test)
### Picking out random ensembles
i = 0
trainIndices = list()
while i < segment_train:
line = np.random.randint(0, nrows)
if line not in trainIndices:
trainIndices.append(line)
i += 1
else:
pass
i = 0
testIndices = list()
while i < segment_test:
line = np.random.randint(0, nrows)
if line not in trainIndices:
if line not in testIndices:
testIndices.append(line)
i += 1
else:
pass
### Training segment----------
data_train = np.empty((len(trainIndices),datanew.shape[1],
datanew.shape[2],datanew.shape[3],
datanew.shape[4]))
Ytrain = np.empty((len(trainIndices),classeslnew.shape[1],
classeslnew.shape[2]))
for index,ensemble in enumerate(trainIndices):
data_train[index,:,:,:,:] = datanew[ensemble,:,:,:,:]
Ytrain[index,:,:] = classeslnew[ensemble,:,:]
### Random ensembles are picked
if debug:
print('\nTraining on ensembles: ',trainIndices)
print('Testing on ensembles: ',testIndices)
print('\norg data - shape', datanew.shape)
print('training data - shape', data_train.shape)
### Reshape into X and Y
Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]*data_train.shape[2]),(data_train.shape[3]*data_train.shape[4]))
Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]*Ytrain.shape[2]))
Xtrain_shape = (data_train.shape[0])
### Testing segment----------
data_test = np.empty((len(testIndices),datanew.shape[1],
datanew.shape[2],datanew.shape[3],
datanew.shape[4]))
Ytest = np.empty((len(testIndices),classeslnew.shape[1],
classeslnew.shape[2]))
for index,ensemble in enumerate(testIndices):
data_test[index,:,:,:,:] = datanew[ensemble,:,:,:,:]
Ytest[index,:,:] = classeslnew[ensemble,:,:]
### Random ensembles are picked
if debug:
print('Training on ensembles: %s' % len(trainIndices))
print('Testing on ensembles: %s' % len(testIndices))
print('\norg data - shape', datanew.shape)
print('testing data - shape', data_test.shape)
### Reshape into X and Y
Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]*data_test.shape[2]),(data_test.shape[3]*data_test.shape[4]))
Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]*Ytest.shape[2]))
Xtest_shape = (data_test.shape[0])
Xtest_shape = (data_test.shape[0], data_test.shape[1])
data_train_shape = data_train.shape[0]
data_test_shape = data_test.shape[0]
### 'unlock' the random seed
np.random.seed(None)
### One-hot vectors
Ytrain = keras.utils.to_categorical(Ytrain)
Ytest = keras.utils.to_categorical(Ytest)
### Class weights
class_weight = class_weight_creator(Ytrain)
###############################################################################
###############################################################################
###############################################################################
###################################################################
### GCM type experiments without ensembles
elif ensTypeExperi == 'GCM':
if data.ndim == 5:
datanew = np.reshape(data,(data.shape[0]*data.shape[1],data.shape[2],data.shape[3],data.shape[4]))
classeslnew = np.reshape(classesl,(classesl.shape[0]*classesl.shape[1],classesl.shape[2]))
else:
datanew = data
classeslnew = classesl
if fac < 1 :
nrows = datanew.shape[1]
segment_train = int(np.floor(nrows * fac))
segment_test = nrows - segment_train
print('Training on',segment_train,'years, testing on',segment_test)
### Picking out random ensembles
firstyears = int(np.floor(segment_test/2))
lastyears = -int(np.floor(segment_test/2))
trainIndices = np.arange(firstyears,firstyears+segment_train,1)
testIndices = np.append(np.arange(firstyears),np.arange(trainIndices[-1]+1,nrows,1),axis=0)
### Training segment----------
data_train = np.empty((datanew.shape[0],len(trainIndices),
datanew.shape[2],datanew.shape[3]))
Ytrain = np.empty((classeslnew.shape[0],len(trainIndices)))
for index,ensemble in enumerate(trainIndices):
data_train[:,index,:,:] = datanew[:,ensemble,:,:]
Ytrain[:,index] = classeslnew[:,ensemble]
### Random ensembles are picked
if debug:
print('\nTraining on years: ',trainIndices)
print('Testing on years: ',testIndices)
print('\norg data - shape', datanew.shape)
print('training data - shape', data_train.shape)
### Reshape into X and Y
Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]),(data_train.shape[2]*data_train.shape[3]))
Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]))
Xtrain_shape = (data_train.shape[0])
### Testing segment----------
data_test = np.empty((datanew.shape[0],len(testIndices),
datanew.shape[2],datanew.shape[3]))
Ytest = np.empty((classeslnew.shape[0],len(testIndices)))
for index,ensemble in enumerate(testIndices):
data_test[:,index,:,:] = datanew[:,ensemble,:,:]
Ytest[:,index] = classeslnew[:,ensemble]
### Random ensembles are picked
if debug:
print('Training on years: %s' % len(trainIndices))
print('Testing on years: %s' % len(testIndices))
print('\norg data - shape', datanew.shape)
print('testing data - shape', data_test.shape)
### Reshape into X and Y
Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]),(data_test.shape[2]*data_test.shape[3]))
Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]))
Xtest_shape = (data_test.shape[0])
Xtest_shape = (data_test.shape[0], data_test.shape[1])
data_train_shape = data_train.shape[0]
data_test_shape = data_test.shape[0]
### 'unlock' the random seed
np.random.seed(None)
### One-hot vectors
Ytrain = keras.utils.to_categorical(Ytrain)
Ytest = keras.utils.to_categorical(Ytest)
### Class weights
class_weight = class_weight_creator(Ytrain)
else:
print(ValueError('WRONG EXPERIMENT!'))
return Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight
###############################################################################
###############################################################################
###############################################################################
### Plotting functions
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
###############################################################################
###############################################################################
###############################################################################
### Create a class weight dictionary to help if the classes are unbalanced
def class_weight_creator(Y):
class_dict = {}
weights = np.max(np.sum(Y, axis=0)) / np.sum(Y, axis=0)
for i in range( Y.shape[-1] ):
class_dict[i] = weights[i]
return class_dict
###############################################################################
###############################################################################
###############################################################################
### Neural Network Creation & Training
class TimeHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, epoch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, epoch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
def defineNN(hidden, input_shape, output_shape, ridgePenalty):
model = Sequential()
### Initialize first layer
### Model is a single node with activation function
model.add(Dense(hidden[0],input_shape=(input_shape,),
activation=actFun, use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=ridgePenalty),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
### Initialize other layers
for layer in hidden[1:]:
model.add(Dense(layer,activation=actFun,
use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=0.00),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
print('\nTHIS IS AN ANN!\n')
#### Initialize output layer
model.add(Dense(output_shape,activation=None,use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=0.00),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
### Add softmax layer at the end
model.add(Activation('softmax'))
return model
def trainNN(model, Xtrain, Ytrain, niter, class_weight, verbose):
global lr_here, batch_size
lr_here = 0.001
model.compile(optimizer=optimizers.SGD(lr=lr_here,
momentum=0.9,nesterov=True),
loss = 'categorical_crossentropy',
metrics=[metrics.categorical_accuracy])
# model.compile(optimizer=optimizers.Nadam(lr=lr_here),
# loss = 'categorical_crossentropy',
# metrics=[metrics.categorical_accuracy])
### Declare the relevant model parameters
batch_size = 24
print('----ANN Training: learning rate = '+str(lr_here)+'; activation = '+actFun+'; batch = '+str(batch_size) + '----')
### Callbacks
time_callback = TimeHistory()
early_stopping = keras.callbacks.EarlyStopping(monitor='loss',
patience=2,
verbose=1,
mode='auto')
history = model.fit(Xtrain,Ytrain,batch_size=batch_size,epochs=niter,
shuffle=True,verbose=verbose,
callbacks=[time_callback,early_stopping],
validation_split=0.)
print('******** done training ***********')
return model, history
def test_train_loopClass(Xtrain,Ytrain,Xtest,Ytest,iterations,ridge_penalty,hiddens,class_weight,plot_in_train=True):
"""or loops to iterate through training iterations, ridge penalty,
and hidden layer list
"""
results = {}
global nnet,random_network_seed
for niter in iterations:
for penalty in ridge_penalty:
for hidden in hiddens:
### Check / use random seed
if random_network_seed == None:
np.random.seed(None)
random_network_seed = int(np.random.randint(1, 100000))
np.random.seed(random_network_seed)
random.seed(random_network_seed)
tf.set_random_seed(0)
### Standardize the data
Xtrain,Xtest,stdVals = dSS.standardize_data(Xtrain,Xtest)
Xmean,Xstd = stdVals
### Define the model
model = defineNN(hidden,
input_shape=np.shape(Xtrain)[1],
output_shape=np.shape(Ytrain)[1],
ridgePenalty=penalty)
### Train the net
model, history = trainNN(model,Xtrain,
Ytrain,niter,class_weight,verbose=1)
### After training, use the network with training data to
### check that we don't have any errors and output RMSE
rmse_train = dSS.rmse(Ytrain,model.predict(Xtrain))
if type(Ytest) != bool:
rmse_test = 0.
rmse_test = dSS.rmse(Ytest,model.predict(Xtest))
else:
rmse_test = False
this_result = {'iters': niter,
'hiddens' : hidden,
'RMSE Train' : rmse_train,
'RMSE Test' : rmse_test,
'ridge penalty': penalty,
'zero mean' : rm_annual_mean,
'zero merid mean' : rm_merid_mean,
'land only?' : land_only,
'ocean only?' : ocean_only,
'Segment Seed' : random_segment_seed,
'Network Seed' : random_network_seed }
results.update(this_result)
global experiment_result
experiment_result = experiment_result.append(results,
ignore_index=True)
#if True to plot each iter's graphs.
if plot_in_train == True:
plt.figure()
plt.subplot(1,1,1)
plt.plot(history.history['loss'],label = 'training')
plt.title(history.history['loss'][-1])
plt.xlabel('epoch')
plt.xlim(2,len(history.history['loss'])-1)
plt.legend()
plt.grid(True)
plt.show()
#'unlock' the random seed
np.random.seed(None)
random.seed(None)
tf.set_random_seed(None)
return experiment_result, model
###############################################################################
###############################################################################
###############################################################################
### Results
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
K.clear_session()
### Parameters
debug = True
NNType = 'ANN'
avgHalfChunk = 0
option4 = True
biasBool = False
hiddensList = [[10,10]]
ridge_penalty = [0.1]
# hiddensList = [[8,8]]
# ridge_penalty = [0.2]
actFun = 'relu'
if any([maskNoiseClass=='land',maskNoiseClass=='ocean']):
debug = True
NNType = 'ANN'
avgHalfChunk = 0
option4 = True
biasBool = False
hiddensList = [[8,8]]
ridge_penalty = [0.10]
actFun = 'relu'
expList = [(0)] # (0,1)
expN = np.size(expList)
iterations = [100]
random_segment = True
foldsN = 1
for avgHalfChunk in (0,):
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
K.clear_session()
for loop in ([0]):
### Get info about the region
lat_bounds,lon_bounds = UT.regions(reg_name)
data_all,lats,lons = read_primary_dataset(variq,dataset,
numOfEns,lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,
lat_bounds,
lon_bounds)
data_obs_all,lats_obs,lons_obs = read_obs_dataset(variq,
dataset_obs,
numOfEns,
lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,
lat_bounds,
lon_bounds)
###############################################################################
###############################################################################
###############################################################################
for exp in expList:
### Get the data together
data, data_obs, = data_all, data_obs_all,
###############################################################################
if len(pickSMILE) >= 1:
data = dSS.pickSmileModels(data,modelGCMs,pickSMILE)
print('\n*Pick models to analysis from %s*\n' % pickSMILE)
###############################################################################
if calculate_anomalies == True:
data, data_obs = dSS.calculate_anomalies(data,data_obs,
lats,lons,baseline,yearsall)
print('\n*Calculate anomalies for %s-%s*\n' % (baseline.min(),baseline.max()))
###############################################################################
if rm_annual_mean == True:
data, data_obs = dSS.remove_annual_mean(data,data_obs,
lats,lons,
lats_obs,lons_obs)
print('\n*Removed annual mean*\n')
###############################################################################
if rm_merid_mean == True:
data, data_obs = dSS.remove_merid_mean(data,data_obs,
lats,lons,
lats_obs,lons_obs)
print('\n*Removed meridional mean*\n')
###############################################################################
if rm_ensemble_mean == True:
data = dSS.remove_ensemble_mean(data,ravel_modelens,
ravelmodeltime,
rm_standard_dev,
numOfEns)
print('\n*Removed ensemble mean*')
###############################################################################
if rm_standard_dev == True:
data = dSS.rm_standard_dev(data,window,ravelmodeltime,
numOfEns)
print('\n*Removed standard deviation*')
###############################################################################
if rm_observational_mean == True:
data = dSS.remove_observations_mean(data,data_obs,lats,lons)
print('\n*Removed observational data*')
###############################################################################
if land_only == True:
data, data_obs = dSS.remove_ocean(data,data_obs,
lat_bounds,
lon_bounds)
print('\n*Removed ocean data*')
###############################################################################
if ocean_only == True:
data, data_obs = dSS.remove_land(data,data_obs,
lat_bounds,
lon_bounds)
print('\n*Removed land data*')
###############################################################################
### Adding random data
if sizeOfTwin > 0:
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))
data = dSS.addNoiseTwinSingle(data,data_obs,integer,sizeOfTwin,random_segment_seed,maskNoiseClass,lat_bounds,lon_bounds)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Modify the GFDL-CM3 model for warmth and cooling that model only
print('\n <<< FACTOR FOR OBS IS %s! >>>\n' % factorObs)
if factorObs == 0:
data = data
elif factorObs == 1: # warm its mean state
GFDL = data[4,:,:,:,:]
GFDLwarmer = GFDL + 3
data[4,:,:,:,:] = GFDLwarmer
elif factorObs == 2: # cool its mean state
GFDL = data[4,:,:,:,:]
GFDLcooler = GFDL - 3
data[4,:,:,:,:] = GFDLcooler
elif factorObs == 3: # warm recent 10 years
GFDL = data[4,:,:,:,:]
GFDLbefore = GFDL[:,:-10,:,:]
GFDLafter = GFDL[:,-10:,:,:] + 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
elif factorObs == 4: # cool recent 10 years
GFDL = data[4,:,:,:,:]
GFDLbefore = GFDL[:,:-10,:,:]
GFDLafter = GFDL[:,-10:,:,:] - 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
elif factorObs == 5: # warm the North Pole
sizeofNP = 10
GFDL = data[4,:,:,:,:]
warmerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) + 5
addtoclimoNP = GFDL[:,:,sizeofNP:,:] + warmerNP
GFDL[:,:,sizeofNP:,:] = addtoclimoNP
data[4,:,:,:,:] = GFDL
elif factorObs == 6: # cool the North Pole
sizeofNP = 10
GFDL = data[4,:,:,:,:]
coolerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) - 5
addtoclimoNP = GFDL[:,:,sizeofNP:,:] + coolerNP
GFDL[:,:,sizeofNP:,:] = addtoclimoNP
data[4,:,:,:,:] = GFDL
elif factorObs == 7: # warm the Lower Arctic
sizeofLA = 5
GFDL = data[4,:,:,:,:]
warmerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) + 5
addtoclimoLA = GFDL[:,:,:sizeofLA,:] + warmerLA
GFDL[:,:,:sizeofLA,:] = addtoclimoLA
data[4,:,:,:,:] = GFDL
elif factorObs == 8: # cool the Lower Arctic
sizeofLA = 5
GFDL = data[4,:,:,:,:]
coolerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) - 5
addtoclimoLA = GFDL[:,:,:sizeofLA,:] + coolerLA
GFDL[:,:,:sizeofLA,:] = addtoclimoLA
data[4,:,:,:,:] = GFDL
elif factorObs == 9: # warm early 50 years
GFDL = data[4,:,:,:,:]
GFDLafter = GFDL[:,50:,:,:]
GFDLbefore = GFDL[:,:50,:,:] + 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
elif factorObs == 10: # cool early 50 years
GFDL = data[4,:,:,:,:]
GFDLafter = GFDL[:,50:,:,:]
GFDLbefore = GFDL[:,:50,:,:] - 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Loop over folds
for loop in np.arange(0,foldsN):
K.clear_session()
#---------------------------
# random_segment_seed = 34515
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))
#---------------------------
Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight = segment_data(data,classesl,ensTypeExperi,segment_data_factor)
YtrainClassMulti = Ytrain
YtestClassMulti = Ytest
# For use later
XtrainS,XtestS,stdVals = dSS.standardize_data(Xtrain,Xtest)
Xmean, Xstd = stdVals
#---------------------------
random_network_seed = 87750
#---------------------------
# Create and train network
exp_result,model = test_train_loopClass(Xtrain,
YtrainClassMulti,
Xtest,
YtestClassMulti,
iterations=iterations,
ridge_penalty=ridge_penalty,
hiddens=hiddensList,class_weight=class_weight,
plot_in_train = True)
model.summary()
################################################################################################################################################
# save the model
dirname = '/Users/zlabe/Desktop/ModelComparison_v1/'
savename = modelType+'_'+variq+'_kerasMultiClassBinaryOption4'+'_' + NNType + '_L2_'+ str(ridge_penalty[0])+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(iterations[0]) + '_' + str(hiddensList[0][0]) + 'x' + str(hiddensList[0][-1]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
savenameModelTestTrain = modelType+'_'+variq+'_modelTrainTest_SegSeed'+str(random_segment_seed)+'_NetSeed'+str(random_network_seed)
if(reg_name=='Globe'):
regSave = ''
else:
regSave = '_' + reg_name
if(rm_annual_mean==True):
savename = savename + '_AnnualMeanRemoved'
savenameModelTestTrain = savenameModelTestTrain + '_AnnualMeanRemoved'
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
savenameModelTestTrain = savenameModelTestTrain + '_EnsembleMeanRemoved'
savename = savename + regSave
# model.save(dirname + savename + '.h5')
# np.savez(dirname + savenameModelTestTrain + '.npz',trainModels=trainIndices,testModels=testIndices,Xtrain=Xtrain,Ytrain=Ytrain,Xtest=Xtest,Ytest=Ytest,Xmean=Xmean,Xstd=Xstd,lats=lats,lons=lons)
print('saving ' + savename)
###############################################################
### Make final plot
### Get obs
dataOBSERVATIONS = data_obs
latsOBSERVATIONS = lats_obs
lonsOBSERVATIONS = lons_obs
Xobs = dataOBSERVATIONS.reshape(dataOBSERVATIONS.shape[0],dataOBSERVATIONS.shape[1]*dataOBSERVATIONS.shape[2])
annType = 'class'
if monthlychoice == 'DJF':
startYear = yearsall[sis].min()+1
endYear = yearsall[sis].max()
else:
startYear = yearsall[sis].min()
endYear = yearsall[sis].max()
years = np.arange(startYear,endYear+1,1)
Xmeanobs = np.nanmean(Xobs,axis=0)
Xstdobs = np.nanstd(Xobs,axis=0)
XobsS = (Xobs-Xmeanobs)/Xstdobs
XobsS[np.isnan(XobsS)] = 0
xtrainpred = (Xtrain-Xmean)/Xstd
xtrainpred[np.isnan(xtrainpred)] = 0
xtestpred = (Xtest-Xmean)/Xstd
xtestpred[np.isnan(xtestpred)] = 0
if(annType=='class'):
YpredObs = model.predict(XobsS)
YpredTrain = model.predict(xtrainpred)
YpredTest = model.predict(xtestpred)
#######################################################
#######################################################
#######################################################
### Check null hypothesis of random data!
randarray,latsra,lonsra = read_primary_dataset(variq,'RANDOM',
numOfEns,lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,
lat_bounds,
lon_bounds)
randarrayn = randarray.reshape(randarray.shape[0],randarray.shape[1]*randarray.shape[2])
randarraymean = np.nanmean(randarrayn,axis=0)
randarraystd = np.nanstd(randarrayn,axis=0)
randarrayS = (randarrayn-randarraymean)/randarraystd
### Prediction on random data
YpredRand = model.predict(randarrayS)
#######################################################
#######################################################
#######################################################
### Get output from model
trainingout = YpredTrain
testingout = YpredTest
if ensTypeExperi == 'ENS':
classesltrain = classeslnew[trainIndices,:,:].ravel()
classesltest = classeslnew[testIndices,:,:].ravel()
elif ensTypeExperi == 'GCM':
classesltrain = classeslnew[:,:,trainIndices].ravel()
classesltest = classeslnew[:,:,testIndices].ravel()
### Random data tests
randout = YpredRand
labelsrand = np.argmax(randout,axis=1)
uniquerand,countrand = np.unique(labelsrand,return_counts=True)
np.savetxt(directoryoutput + 'RandLabels_' + saveData + '.txt',labelsrand)
np.savetxt(directoryoutput + 'RandConfid_' + saveData + '.txt',randout)
### Observations
obsout = YpredObs
labelsobs = np.argmax(obsout,axis=1)
uniqueobs,countobs = np.unique(labelsobs,return_counts=True)
print(labelsobs)
np.savetxt(directoryoutput + 'obsLabels_' + saveData + '.txt',labelsobs)
np.savetxt(directoryoutput + 'obsConfid_' + saveData + '.txt',obsout)
def truelabel(data):
"""
Calculate argmax
"""
maxindexdata= np.argmax(data[:,:],axis=1)
return maxindexdata
def accuracyTotalTime(data_pred,data_true):
"""
Compute accuracy for the entire time series
"""
data_truer = data_true
data_predr = data_pred
accdata_pred = accuracy_score(data_truer,data_predr)
return accdata_pred
##############################################################################
##############################################################################
##############################################################################
indextrain = truelabel(trainingout)
acctrain = accuracyTotalTime(indextrain,classesltrain)
indextest = truelabel(testingout)
acctest = accuracyTotalTime(indextest,classesltest)
print('\n\nAccuracy Training == ',acctrain)
print('Accuracy Testing == ',acctest)
## Save the output for plotting
np.savetxt(directoryoutput + 'trainingEnsIndices_' + saveData + '.txt',trainIndices)
np.savetxt(directoryoutput + 'testingEnsIndices_' + saveData + '.txt',testIndices)
np.savetxt(directoryoutput + 'trainingTrueLabels_' + saveData + '.txt',classesltrain)
np.savetxt(directoryoutput + 'testingTrueLabels_' + saveData + '.txt',classesltest)
np.savetxt(directoryoutput + 'trainingPredictedLabels_' + saveData + '.txt',indextrain)
np.savetxt(directoryoutput + 'testingPredictedLabels_' + saveData + '.txt',indextest)
### See more more details
model.layers[0].get_config()
## Define variable for analysis
print('\n\n------------------------')
print(variq,'= Variable!')
print(monthlychoice,'= Time!')
print(reg_name,'= Region!')
print(lat_bounds,lon_bounds)
print(dataset,'= Model!')
print(dataset_obs,'= Observations!\n')
print(rm_annual_mean,'= rm_annual_mean')
print(rm_merid_mean,'= rm_merid_mean')
print(rm_ensemble_mean,'= rm_ensemble_mean')
print(land_only,'= land_only')
print(ocean_only,'= ocean_only')
## Variables for plotting
lons2,lats2 = np.meshgrid(lons,lats)
observations = data_obs
modeldata = data
modeldatamean = np.nanmean(modeldata,axis=1)
spatialmean_obs = UT.calc_weightedAve(observations,lats2)
spatialmean_mod = UT.calc_weightedAve(modeldata,lats2)
spatialmean_modmean = np.nanmean(spatialmean_mod,axis=1)
plt.figure()
plt.plot(yearsall,spatialmean_modmean.transpose())
plt.plot(yearsall,spatialmean_modmean.transpose()[:,4],linewidth=3,color='red',label=r'GFDL-CM3 - %s-Experiment' % factorObs)
plt.xlabel('Years')
plt.ylabel('Average Arctic Temperature')
plt.legend()
plt.ylim([-14.5,-1])
plt.savefig('/Users/zlabe/Desktop/factor-%s.png' % factorObs,dpi=300)
plt.figure()
plt.plot(spatialmean_obs)
##############################################################################
##############################################################################
##############################################################################
## Visualizing through LRP
numLats = lats.shape[0]
numLons = lons.shape[0]
numDim = 3
##############################################################################
##############################################################################
##############################################################################
lrpall = LRP.calc_LRPModel(model,np.append(XtrainS,XtestS,axis=0),
np.append(Ytrain,Ytest,axis=0),
biasBool,annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
meanlrp = np.nanmean(lrpall,axis=0)
fig=plt.figure()
plt.contourf(meanlrp,300,cmap=cmocean.cm.thermal)
### For training data only
lrptrain = LRP.calc_LRPModel(model,XtrainS,Ytrain,biasBool,
annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
### For training data only
lrptest = LRP.calc_LRPModel(model,XtestS,Ytest,biasBool,
annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
### For observations data only
lrpobservations = LRP.calc_LRPObs(model,XobsS,biasBool,annType,
num_of_class,yearsall,lrpRule,
normLRP,numLats,numLons,numDim)
### For random data only
lrprandom = LRP.calc_LRPObs(model,randarrayS,biasBool,annType,
num_of_class,yearsall,lrpRule,
normLRP,numLats,numLons,numDim)
##############################################################################
##############################################################################
##############################################################################
def netcdfLRP(lats,lons,var,directory,typemodel,saveData):
print('\n>>> Using netcdfLRP function!')
from netCDF4 import Dataset
import numpy as np
name = 'LRPMap' + typemodel + '_' + saveData + '.nc'
filename = directory + name
ncfile = Dataset(filename,'w',format='NETCDF4')
ncfile.description = 'LRP maps for using selected seed'
### Dimensions
ncfile.createDimension('years',var.shape[0])
ncfile.createDimension('lat',var.shape[1])
ncfile.createDimension('lon',var.shape[2])
### Variables
years = ncfile.createVariable('years','f4',('years'))
latitude = ncfile.createVariable('lat','f4',('lat'))
longitude = ncfile.createVariable('lon','f4',('lon'))
varns = ncfile.createVariable('LRP','f4',('years','lat','lon'))
### Units
varns.units = 'unitless relevance'
ncfile.title = 'LRP relevance'
ncfile.instituion = 'Colorado State University'
ncfile.references = 'Barnes et al. [2020]'
### Data
years[:] = np.arange(var.shape[0])
latitude[:] = lats
longitude[:] = lons
varns[:] = var
ncfile.close()
print('*Completed: Created netCDF4 File!')
netcdfLRP(lats,lons,lrpall,directoryoutput,'AllData',saveData)
netcdfLRP(lats,lons,lrptrain,directoryoutput,'Training',saveData)
netcdfLRP(lats,lons,lrptest,directoryoutput,'Testing',saveData)
netcdfLRP(lats,lons,lrpobservations,directoryoutput,'Obs',saveData)
| 51.496166
| 355
| 0.420024
|
import sys
import math
import time
import matplotlib.pyplot as plt
import numpy as np
import keras.backend as K
from keras.layers import Dense, Activation
from keras import regularizers
from keras import metrics
from keras import optimizers
from keras.models import Sequential
import tensorflow.keras as keras
import tensorflow as tf
import pandas as pd
import random
import scipy.stats as stats
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import cmocean as cmocean
import calc_Utilities as UT
import calc_dataFunctions as df
import calc_Stats as dSS
import calc_LRPclass as LRP
import innvestigate
from sklearn.metrics import accuracy_score
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
DEFAULT_NUM_BWO_ITERATIONS = 200
DEFAULT_BWO_LEARNING_RATE = .001
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
directorydataLLL = '/Users/zlabe/Data/LENS/monthly'
directorydataENS = '/Users/zlabe/Data/SMILE/'
directorydataBB = '/Users/zlabe/Data/BEST/'
directorydataEE = '/Users/zlabe/Data/ERA5/'
directoryoutput = '/Users/zlabe/Documents/Research/ModelComparison/Data/'
modelGCMs = ['CCCma_canesm2','MPI','CSIRO_MK3.6','KNMI_ecearth',
'GFDL_CM3','GFDL_ESM2M','lens']
datasetsingle = ['SMILE']
dataset_obs = 'ERA5BE'
seasons = ['annual']
variq = 'T2M'
reg_name = 'LowerArctic'
timeper = 'historical'
pickSMILE = []
if len(pickSMILE) >= 1:
lenOfPicks = len(pickSMILE)
else:
lenOfPicks = len(modelGCMs)
land_only = False
ocean_only = False
if land_only == True:
maskNoiseClass = 'land'
elif ocean_only == True:
maskNoiseClass = 'ocean'
else:
maskNoiseClass = 'none'
rm_merid_mean = False
rm_annual_mean = False
rm_ensemble_mean = False
rm_observational_mean = False
calculate_anomalies = False
if calculate_anomalies == True:
if timeper == 'historical':
baseline = np.arange(1951,1980+1,1)
elif timeper == 'future':
baseline = np.arange(2021,2050+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
window = 0
ensTypeExperi = 'ENS'
shuffletype = 'RANDGAUSS'
sizeOfTwin = 4 if sizeOfTwin > 0:
sizeOfTwinq = 1
else:
sizeOfTwinq = sizeOfTwin
factorObs = 10 if ensTypeExperi == 'ENS':
if window == 0:
rm_standard_dev = False
if timeper == 'historical':
yearsall = np.arange(1950,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
if timeper == 'historical':
yearsall = np.arange(1950+window,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020+window,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravelmodeltime = False
ravel_modelens = True
elif ensTypeExperi == 'GCM':
if window == 0:
rm_standard_dev = False
yearsall = np.arange(1950,2019+1,1)
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
if timeper == 'historical':
yearsall = np.arange(1950,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravelmodeltime = False
ravel_modelens = True
numOfEns = 16
lensalso = True
if len(pickSMILE) == 0:
if modelGCMs[-1] == 'RANDOM':
randomalso = True
else:
randomalso = False
elif len(pickSMILE) != 0:
if pickSMILE[-1] == 'RANDOM':
randomalso = True
else:
randomalso = False
lentime = len(yearsall)
ravelyearsbinary = False
ravelbinary = False
num_of_class = lenOfPicks + sizeOfTwinq
lrpRule = 'z'
normLRP = True
typeOfAnalysis = 'issueWithExperiment'
if rm_ensemble_mean == True:
if window > 1:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-1'
if rm_ensemble_mean == True:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-2'
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-3'
if variq == 'T2M':
integer = 20 elif variq == 'P':
integer = 20 elif variq == 'SLP':
integer = 20 if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-4'
if variq == 'T2M':
integer = 25 elif variq == 'P':
integer = 15 elif variq == 'SLP':
integer = 5 if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-5'
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-6'
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-7'
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-8'
if variq == 'T2M':
integer = 1 elif variq == 'P':
integer = 1 elif variq == 'SLP':
integer = 5 if rm_ensemble_mean == False:
if window > 1:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-9'
print('\n<<<<<<<<<<<< Analysis == %s (%s) ! >>>>>>>>>>>>>>>\n' % (typeOfAnalysis,timeper))
if typeOfAnalysis == 'issueWithExperiment':
sys.exit('Wrong parameters selected to analyze')
if land_only == True:
saveData = timeper + '_' + seasons[0] + '_LAND' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
elif ocean_only == True:
saveData = timeper + '_' + seasons[0] + '_OCEAN' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
else:
saveData = timeper + '_' + seasons[0] + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
print('*Filename == < %s >' % saveData)
if seasons != 'none':
classesl = np.empty((lenOfPicks,numOfEns,len(yearsall)))
for i in range(lenOfPicks):
classesl[i,:,:] = np.full((numOfEns,len(yearsall)),i)
if sizeOfTwin > 0:
randomNoiseClass = np.full((sizeOfTwinq,numOfEns,len(yearsall)),i+1)
classesl = np.append(classesl,randomNoiseClass,axis=0)
if ensTypeExperi == 'ENS':
classeslnew = np.swapaxes(classesl,0,1)
elif ensTypeExperi == 'GCM':
classeslnew = classesl
for sis,singlesimulation in enumerate(datasetsingle):
lrpsns = []
for seas in range(len(seasons)):
simuqq = datasetsingle[0]
monthlychoice = seasons[seas]
lat_bounds,lon_bounds = UT.regions(reg_name)
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
experiment_result = pd.DataFrame(columns=['actual iters','hiddens','cascade',
'RMSE Train','RMSE Test',
'ridge penalty','zero mean',
'zero merid mean','land only?','ocean only?'])
dataset = singlesimulation
modelType = dataset
if dataset_obs == '20CRv3':
year_obsall = np.arange(yearsall[sis].min(),2015+1,1)
elif dataset_obs == 'ERA5':
year_obsall = np.arange(1979+window,2019+1,1)
if rm_standard_dev == False:
year_obsall = np.arange(1979,2019+1,1)
elif dataset_obs == 'ERA5BE':
year_obsall = np.arange(1950+window,2019+1,1)
if rm_standard_dev == False:
year_obsall = np.arange(1950,2019+1,1)
if monthlychoice == 'DJF':
obsyearstart = year_obsall.min()+1
year_obs = year_obsall[1:]
else:
obsyearstart = year_obsall.min()
year_obs = year_obsall
if rm_annual_mean == True:
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
if rm_ensemble_mean == True:
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
segment_data_factor = .75
useGPU = False
cascade = False
### Plot within the training loop - may want to set to False when testing out
### larget sets of parameters
plot_in_train = False
###############################################################################
###############################################################################
###############################################################################
### Read in model and observational/reanalysis data
def read_primary_dataset(variq,dataset,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
def read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,
lat_bounds,lon_bounds)
print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)
return data_obs,lats_obs,lons_obs
###############################################################################
###############################################################################
###############################################################################
### Select data to test, train on
def segment_data(data,classesl,ensTypeExperi,fac = segment_data_factor):
global random_segment_seed,trainIndices,testIndices
if random_segment_seed == None:
random_segment_seed = int(int(np.random.randint(1, 100000)))
np.random.seed(random_segment_seed)
###############################################################################
###############################################################################
###############################################################################
###################################################################
### Large Ensemble experiment
if ensTypeExperi == 'ENS':
### Flip GCM and ensemble member axes
datanew = np.swapaxes(data,0,1)
classeslnew = np.swapaxes(classesl,0,1)
if fac < 1 :
nrows = datanew.shape[0]
segment_train = int(np.round(nrows * fac))
segment_test = nrows - segment_train
print('Training on',segment_train,'ensembles, testing on',segment_test)
### Picking out random ensembles
i = 0
trainIndices = list()
while i < segment_train:
line = np.random.randint(0, nrows)
if line not in trainIndices:
trainIndices.append(line)
i += 1
else:
pass
i = 0
testIndices = list()
while i < segment_test:
line = np.random.randint(0, nrows)
if line not in trainIndices:
if line not in testIndices:
testIndices.append(line)
i += 1
else:
pass
### Training segment----------
data_train = np.empty((len(trainIndices),datanew.shape[1],
datanew.shape[2],datanew.shape[3],
datanew.shape[4]))
Ytrain = np.empty((len(trainIndices),classeslnew.shape[1],
classeslnew.shape[2]))
for index,ensemble in enumerate(trainIndices):
data_train[index,:,:,:,:] = datanew[ensemble,:,:,:,:]
Ytrain[index,:,:] = classeslnew[ensemble,:,:]
### Random ensembles are picked
if debug:
print('\nTraining on ensembles: ',trainIndices)
print('Testing on ensembles: ',testIndices)
print('\norg data - shape', datanew.shape)
print('training data - shape', data_train.shape)
### Reshape into X and Y
Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]*data_train.shape[2]),(data_train.shape[3]*data_train.shape[4]))
Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]*Ytrain.shape[2]))
Xtrain_shape = (data_train.shape[0])
### Testing segment----------
data_test = np.empty((len(testIndices),datanew.shape[1],
datanew.shape[2],datanew.shape[3],
datanew.shape[4]))
Ytest = np.empty((len(testIndices),classeslnew.shape[1],
classeslnew.shape[2]))
for index,ensemble in enumerate(testIndices):
data_test[index,:,:,:,:] = datanew[ensemble,:,:,:,:]
Ytest[index,:,:] = classeslnew[ensemble,:,:]
### Random ensembles are picked
if debug:
print('Training on ensembles: %s' % len(trainIndices))
print('Testing on ensembles: %s' % len(testIndices))
print('\norg data - shape', datanew.shape)
print('testing data - shape', data_test.shape)
### Reshape into X and Y
Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]*data_test.shape[2]),(data_test.shape[3]*data_test.shape[4]))
Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]*Ytest.shape[2]))
Xtest_shape = (data_test.shape[0])
Xtest_shape = (data_test.shape[0], data_test.shape[1])
data_train_shape = data_train.shape[0]
data_test_shape = data_test.shape[0]
### 'unlock' the random seed
np.random.seed(None)
### One-hot vectors
Ytrain = keras.utils.to_categorical(Ytrain)
Ytest = keras.utils.to_categorical(Ytest)
### Class weights
class_weight = class_weight_creator(Ytrain)
###############################################################################
###############################################################################
###############################################################################
###################################################################
### GCM type experiments without ensembles
elif ensTypeExperi == 'GCM':
if data.ndim == 5:
datanew = np.reshape(data,(data.shape[0]*data.shape[1],data.shape[2],data.shape[3],data.shape[4]))
classeslnew = np.reshape(classesl,(classesl.shape[0]*classesl.shape[1],classesl.shape[2]))
else:
datanew = data
classeslnew = classesl
if fac < 1 :
nrows = datanew.shape[1]
segment_train = int(np.floor(nrows * fac))
segment_test = nrows - segment_train
print('Training on',segment_train,'years, testing on',segment_test)
### Picking out random ensembles
firstyears = int(np.floor(segment_test/2))
lastyears = -int(np.floor(segment_test/2))
trainIndices = np.arange(firstyears,firstyears+segment_train,1)
testIndices = np.append(np.arange(firstyears),np.arange(trainIndices[-1]+1,nrows,1),axis=0)
### Training segment----------
data_train = np.empty((datanew.shape[0],len(trainIndices),
datanew.shape[2],datanew.shape[3]))
Ytrain = np.empty((classeslnew.shape[0],len(trainIndices)))
for index,ensemble in enumerate(trainIndices):
data_train[:,index,:,:] = datanew[:,ensemble,:,:]
Ytrain[:,index] = classeslnew[:,ensemble]
### Random ensembles are picked
if debug:
print('\nTraining on years: ',trainIndices)
print('Testing on years: ',testIndices)
print('\norg data - shape', datanew.shape)
print('training data - shape', data_train.shape)
### Reshape into X and Y
Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]),(data_train.shape[2]*data_train.shape[3]))
Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]))
Xtrain_shape = (data_train.shape[0])
### Testing segment----------
data_test = np.empty((datanew.shape[0],len(testIndices),
datanew.shape[2],datanew.shape[3]))
Ytest = np.empty((classeslnew.shape[0],len(testIndices)))
for index,ensemble in enumerate(testIndices):
data_test[:,index,:,:] = datanew[:,ensemble,:,:]
Ytest[:,index] = classeslnew[:,ensemble]
### Random ensembles are picked
if debug:
print('Training on years: %s' % len(trainIndices))
print('Testing on years: %s' % len(testIndices))
print('\norg data - shape', datanew.shape)
print('testing data - shape', data_test.shape)
### Reshape into X and Y
Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]),(data_test.shape[2]*data_test.shape[3]))
Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]))
Xtest_shape = (data_test.shape[0])
Xtest_shape = (data_test.shape[0], data_test.shape[1])
data_train_shape = data_train.shape[0]
data_test_shape = data_test.shape[0]
### 'unlock' the random seed
np.random.seed(None)
### One-hot vectors
Ytrain = keras.utils.to_categorical(Ytrain)
Ytest = keras.utils.to_categorical(Ytest)
### Class weights
class_weight = class_weight_creator(Ytrain)
else:
print(ValueError('WRONG EXPERIMENT!'))
return Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight
###############################################################################
###############################################################################
###############################################################################
### Plotting functions
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
###############################################################################
###############################################################################
###############################################################################
### Create a class weight dictionary to help if the classes are unbalanced
def class_weight_creator(Y):
class_dict = {}
weights = np.max(np.sum(Y, axis=0)) / np.sum(Y, axis=0)
for i in range( Y.shape[-1] ):
class_dict[i] = weights[i]
return class_dict
###############################################################################
###############################################################################
###############################################################################
### Neural Network Creation & Training
class TimeHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, epoch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, epoch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
def defineNN(hidden, input_shape, output_shape, ridgePenalty):
model = Sequential()
### Initialize first layer
### Model is a single node with activation function
model.add(Dense(hidden[0],input_shape=(input_shape,),
activation=actFun, use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=ridgePenalty),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
### Initialize other layers
for layer in hidden[1:]:
model.add(Dense(layer,activation=actFun,
use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=0.00),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
print('\nTHIS IS AN ANN!\n')
#### Initialize output layer
model.add(Dense(output_shape,activation=None,use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=0.00),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
### Add softmax layer at the end
model.add(Activation('softmax'))
return model
def trainNN(model, Xtrain, Ytrain, niter, class_weight, verbose):
global lr_here, batch_size
lr_here = 0.001
model.compile(optimizer=optimizers.SGD(lr=lr_here,
momentum=0.9,nesterov=True),
loss = 'categorical_crossentropy',
metrics=[metrics.categorical_accuracy])
# model.compile(optimizer=optimizers.Nadam(lr=lr_here),
# loss = 'categorical_crossentropy',
# metrics=[metrics.categorical_accuracy])
### Declare the relevant model parameters
batch_size = 24
print('----ANN Training: learning rate = '+str(lr_here)+'; activation = '+actFun+'; batch = '+str(batch_size) + '----')
### Callbacks
time_callback = TimeHistory()
early_stopping = keras.callbacks.EarlyStopping(monitor='loss',
patience=2,
verbose=1,
mode='auto')
history = model.fit(Xtrain,Ytrain,batch_size=batch_size,epochs=niter,
shuffle=True,verbose=verbose,
callbacks=[time_callback,early_stopping],
validation_split=0.)
print('******** done training ***********')
return model, history
def test_train_loopClass(Xtrain,Ytrain,Xtest,Ytest,iterations,ridge_penalty,hiddens,class_weight,plot_in_train=True):
results = {}
global nnet,random_network_seed
for niter in iterations:
for penalty in ridge_penalty:
for hidden in hiddens:
### Check / use random seed
if random_network_seed == None:
np.random.seed(None)
random_network_seed = int(np.random.randint(1, 100000))
np.random.seed(random_network_seed)
random.seed(random_network_seed)
tf.set_random_seed(0)
### Standardize the data
Xtrain,Xtest,stdVals = dSS.standardize_data(Xtrain,Xtest)
Xmean,Xstd = stdVals
### Define the model
model = defineNN(hidden,
input_shape=np.shape(Xtrain)[1],
output_shape=np.shape(Ytrain)[1],
ridgePenalty=penalty)
### Train the net
model, history = trainNN(model,Xtrain,
Ytrain,niter,class_weight,verbose=1)
### After training, use the network with training data to
### check that we don't have any errors and output RMSE
rmse_train = dSS.rmse(Ytrain,model.predict(Xtrain))
if type(Ytest) != bool:
rmse_test = 0.
rmse_test = dSS.rmse(Ytest,model.predict(Xtest))
else:
rmse_test = False
this_result = {'iters': niter,
'hiddens' : hidden,
'RMSE Train' : rmse_train,
'RMSE Test' : rmse_test,
'ridge penalty': penalty,
'zero mean' : rm_annual_mean,
'zero merid mean' : rm_merid_mean,
'land only?' : land_only,
'ocean only?' : ocean_only,
'Segment Seed' : random_segment_seed,
'Network Seed' : random_network_seed }
results.update(this_result)
global experiment_result
experiment_result = experiment_result.append(results,
ignore_index=True)
if plot_in_train == True:
plt.figure()
plt.subplot(1,1,1)
plt.plot(history.history['loss'],label = 'training')
plt.title(history.history['loss'][-1])
plt.xlabel('epoch')
plt.xlim(2,len(history.history['loss'])-1)
plt.legend()
plt.grid(True)
plt.show()
#'unlock' the random seed
np.random.seed(None)
random.seed(None)
tf.set_random_seed(None)
return experiment_result, model
###############################################################################
###############################################################################
###############################################################################
### Results
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
K.clear_session()
### Parameters
debug = True
NNType = 'ANN'
avgHalfChunk = 0
option4 = True
biasBool = False
hiddensList = [[10,10]]
ridge_penalty = [0.1]
# hiddensList = [[8,8]]
# ridge_penalty = [0.2]
actFun = 'relu'
if any([maskNoiseClass=='land',maskNoiseClass=='ocean']):
debug = True
NNType = 'ANN'
avgHalfChunk = 0
option4 = True
biasBool = False
hiddensList = [[8,8]]
ridge_penalty = [0.10]
actFun = 'relu'
expList = [(0)] # (0,1)
expN = np.size(expList)
iterations = [100]
random_segment = True
foldsN = 1
for avgHalfChunk in (0,):
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
K.clear_session()
for loop in ([0]):
### Get info about the region
lat_bounds,lon_bounds = UT.regions(reg_name)
data_all,lats,lons = read_primary_dataset(variq,dataset,
numOfEns,lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,
lat_bounds,
lon_bounds)
data_obs_all,lats_obs,lons_obs = read_obs_dataset(variq,
dataset_obs,
numOfEns,
lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,
lat_bounds,
lon_bounds)
###############################################################################
###############################################################################
###############################################################################
for exp in expList:
### Get the data together
data, data_obs, = data_all, data_obs_all,
###############################################################################
if len(pickSMILE) >= 1:
data = dSS.pickSmileModels(data,modelGCMs,pickSMILE)
print('\n*Pick models to analysis from %s*\n' % pickSMILE)
###############################################################################
if calculate_anomalies == True:
data, data_obs = dSS.calculate_anomalies(data,data_obs,
lats,lons,baseline,yearsall)
print('\n*Calculate anomalies for %s-%s*\n' % (baseline.min(),baseline.max()))
###############################################################################
if rm_annual_mean == True:
data, data_obs = dSS.remove_annual_mean(data,data_obs,
lats,lons,
lats_obs,lons_obs)
print('\n*Removed annual mean*\n')
###############################################################################
if rm_merid_mean == True:
data, data_obs = dSS.remove_merid_mean(data,data_obs,
lats,lons,
lats_obs,lons_obs)
print('\n*Removed meridional mean*\n')
###############################################################################
if rm_ensemble_mean == True:
data = dSS.remove_ensemble_mean(data,ravel_modelens,
ravelmodeltime,
rm_standard_dev,
numOfEns)
print('\n*Removed ensemble mean*')
###############################################################################
if rm_standard_dev == True:
data = dSS.rm_standard_dev(data,window,ravelmodeltime,
numOfEns)
print('\n*Removed standard deviation*')
###############################################################################
if rm_observational_mean == True:
data = dSS.remove_observations_mean(data,data_obs,lats,lons)
print('\n*Removed observational data*')
###############################################################################
if land_only == True:
data, data_obs = dSS.remove_ocean(data,data_obs,
lat_bounds,
lon_bounds)
print('\n*Removed ocean data*')
###############################################################################
if ocean_only == True:
data, data_obs = dSS.remove_land(data,data_obs,
lat_bounds,
lon_bounds)
print('\n*Removed land data*')
###############################################################################
### Adding random data
if sizeOfTwin > 0:
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))
data = dSS.addNoiseTwinSingle(data,data_obs,integer,sizeOfTwin,random_segment_seed,maskNoiseClass,lat_bounds,lon_bounds)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Modify the GFDL-CM3 model for warmth and cooling that model only
print('\n <<< FACTOR FOR OBS IS %s! >>>\n' % factorObs)
if factorObs == 0:
data = data
elif factorObs == 1: # warm its mean state
GFDL = data[4,:,:,:,:]
GFDLwarmer = GFDL + 3
data[4,:,:,:,:] = GFDLwarmer
elif factorObs == 2: # cool its mean state
GFDL = data[4,:,:,:,:]
GFDLcooler = GFDL - 3
data[4,:,:,:,:] = GFDLcooler
elif factorObs == 3: # warm recent 10 years
GFDL = data[4,:,:,:,:]
GFDLbefore = GFDL[:,:-10,:,:]
GFDLafter = GFDL[:,-10:,:,:] + 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
elif factorObs == 4: # cool recent 10 years
GFDL = data[4,:,:,:,:]
GFDLbefore = GFDL[:,:-10,:,:]
GFDLafter = GFDL[:,-10:,:,:] - 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
elif factorObs == 5: # warm the North Pole
sizeofNP = 10
GFDL = data[4,:,:,:,:]
warmerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) + 5
addtoclimoNP = GFDL[:,:,sizeofNP:,:] + warmerNP
GFDL[:,:,sizeofNP:,:] = addtoclimoNP
data[4,:,:,:,:] = GFDL
elif factorObs == 6: # cool the North Pole
sizeofNP = 10
GFDL = data[4,:,:,:,:]
coolerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) - 5
addtoclimoNP = GFDL[:,:,sizeofNP:,:] + coolerNP
GFDL[:,:,sizeofNP:,:] = addtoclimoNP
data[4,:,:,:,:] = GFDL
elif factorObs == 7: # warm the Lower Arctic
sizeofLA = 5
GFDL = data[4,:,:,:,:]
warmerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) + 5
addtoclimoLA = GFDL[:,:,:sizeofLA,:] + warmerLA
GFDL[:,:,:sizeofLA,:] = addtoclimoLA
data[4,:,:,:,:] = GFDL
elif factorObs == 8: # cool the Lower Arctic
sizeofLA = 5
GFDL = data[4,:,:,:,:]
coolerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) - 5
addtoclimoLA = GFDL[:,:,:sizeofLA,:] + coolerLA
GFDL[:,:,:sizeofLA,:] = addtoclimoLA
data[4,:,:,:,:] = GFDL
elif factorObs == 9: # warm early 50 years
GFDL = data[4,:,:,:,:]
GFDLafter = GFDL[:,50:,:,:]
GFDLbefore = GFDL[:,:50,:,:] + 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
elif factorObs == 10: # cool early 50 years
GFDL = data[4,:,:,:,:]
GFDLafter = GFDL[:,50:,:,:]
GFDLbefore = GFDL[:,:50,:,:] - 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Loop over folds
for loop in np.arange(0,foldsN):
K.clear_session()
#---------------------------
# random_segment_seed = 34515
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))
#---------------------------
Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight = segment_data(data,classesl,ensTypeExperi,segment_data_factor)
YtrainClassMulti = Ytrain
YtestClassMulti = Ytest
# For use later
XtrainS,XtestS,stdVals = dSS.standardize_data(Xtrain,Xtest)
Xmean, Xstd = stdVals
#---------------------------
random_network_seed = 87750
#---------------------------
# Create and train network
exp_result,model = test_train_loopClass(Xtrain,
YtrainClassMulti,
Xtest,
YtestClassMulti,
iterations=iterations,
ridge_penalty=ridge_penalty,
hiddens=hiddensList,class_weight=class_weight,
plot_in_train = True)
model.summary()
################################################################################################################################################
# save the model
dirname = '/Users/zlabe/Desktop/ModelComparison_v1/'
savename = modelType+'_'+variq+'_kerasMultiClassBinaryOption4'+'_' + NNType + '_L2_'+ str(ridge_penalty[0])+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(iterations[0]) + '_' + str(hiddensList[0][0]) + 'x' + str(hiddensList[0][-1]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
savenameModelTestTrain = modelType+'_'+variq+'_modelTrainTest_SegSeed'+str(random_segment_seed)+'_NetSeed'+str(random_network_seed)
if(reg_name=='Globe'):
regSave = ''
else:
regSave = '_' + reg_name
if(rm_annual_mean==True):
savename = savename + '_AnnualMeanRemoved'
savenameModelTestTrain = savenameModelTestTrain + '_AnnualMeanRemoved'
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
savenameModelTestTrain = savenameModelTestTrain + '_EnsembleMeanRemoved'
savename = savename + regSave
# model.save(dirname + savename + '.h5')
# np.savez(dirname + savenameModelTestTrain + '.npz',trainModels=trainIndices,testModels=testIndices,Xtrain=Xtrain,Ytrain=Ytrain,Xtest=Xtest,Ytest=Ytest,Xmean=Xmean,Xstd=Xstd,lats=lats,lons=lons)
print('saving ' + savename)
###############################################################
### Make final plot
### Get obs
dataOBSERVATIONS = data_obs
latsOBSERVATIONS = lats_obs
lonsOBSERVATIONS = lons_obs
Xobs = dataOBSERVATIONS.reshape(dataOBSERVATIONS.shape[0],dataOBSERVATIONS.shape[1]*dataOBSERVATIONS.shape[2])
annType = 'class'
if monthlychoice == 'DJF':
startYear = yearsall[sis].min()+1
endYear = yearsall[sis].max()
else:
startYear = yearsall[sis].min()
endYear = yearsall[sis].max()
years = np.arange(startYear,endYear+1,1)
Xmeanobs = np.nanmean(Xobs,axis=0)
Xstdobs = np.nanstd(Xobs,axis=0)
XobsS = (Xobs-Xmeanobs)/Xstdobs
XobsS[np.isnan(XobsS)] = 0
xtrainpred = (Xtrain-Xmean)/Xstd
xtrainpred[np.isnan(xtrainpred)] = 0
xtestpred = (Xtest-Xmean)/Xstd
xtestpred[np.isnan(xtestpred)] = 0
if(annType=='class'):
YpredObs = model.predict(XobsS)
YpredTrain = model.predict(xtrainpred)
YpredTest = model.predict(xtestpred)
#######################################################
#######################################################
#######################################################
### Check null hypothesis of random data!
randarray,latsra,lonsra = read_primary_dataset(variq,'RANDOM',
numOfEns,lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,
lat_bounds,
lon_bounds)
randarrayn = randarray.reshape(randarray.shape[0],randarray.shape[1]*randarray.shape[2])
randarraymean = np.nanmean(randarrayn,axis=0)
randarraystd = np.nanstd(randarrayn,axis=0)
randarrayS = (randarrayn-randarraymean)/randarraystd
### Prediction on random data
YpredRand = model.predict(randarrayS)
#######################################################
#######################################################
#######################################################
### Get output from model
trainingout = YpredTrain
testingout = YpredTest
if ensTypeExperi == 'ENS':
classesltrain = classeslnew[trainIndices,:,:].ravel()
classesltest = classeslnew[testIndices,:,:].ravel()
elif ensTypeExperi == 'GCM':
classesltrain = classeslnew[:,:,trainIndices].ravel()
classesltest = classeslnew[:,:,testIndices].ravel()
### Random data tests
randout = YpredRand
labelsrand = np.argmax(randout,axis=1)
uniquerand,countrand = np.unique(labelsrand,return_counts=True)
np.savetxt(directoryoutput + 'RandLabels_' + saveData + '.txt',labelsrand)
np.savetxt(directoryoutput + 'RandConfid_' + saveData + '.txt',randout)
### Observations
obsout = YpredObs
labelsobs = np.argmax(obsout,axis=1)
uniqueobs,countobs = np.unique(labelsobs,return_counts=True)
print(labelsobs)
np.savetxt(directoryoutput + 'obsLabels_' + saveData + '.txt',labelsobs)
np.savetxt(directoryoutput + 'obsConfid_' + saveData + '.txt',obsout)
def truelabel(data):
maxindexdata= np.argmax(data[:,:],axis=1)
return maxindexdata
def accuracyTotalTime(data_pred,data_true):
data_truer = data_true
data_predr = data_pred
accdata_pred = accuracy_score(data_truer,data_predr)
return accdata_pred
##############################################################################
##############################################################################
##############################################################################
indextrain = truelabel(trainingout)
acctrain = accuracyTotalTime(indextrain,classesltrain)
indextest = truelabel(testingout)
acctest = accuracyTotalTime(indextest,classesltest)
print('\n\nAccuracy Training == ',acctrain)
print('Accuracy Testing == ',acctest)
## Save the output for plotting
np.savetxt(directoryoutput + 'trainingEnsIndices_' + saveData + '.txt',trainIndices)
np.savetxt(directoryoutput + 'testingEnsIndices_' + saveData + '.txt',testIndices)
np.savetxt(directoryoutput + 'trainingTrueLabels_' + saveData + '.txt',classesltrain)
np.savetxt(directoryoutput + 'testingTrueLabels_' + saveData + '.txt',classesltest)
np.savetxt(directoryoutput + 'trainingPredictedLabels_' + saveData + '.txt',indextrain)
np.savetxt(directoryoutput + 'testingPredictedLabels_' + saveData + '.txt',indextest)
### See more more details
model.layers[0].get_config()
## Define variable for analysis
print('\n\n------------------------')
print(variq,'= Variable!')
print(monthlychoice,'= Time!')
print(reg_name,'= Region!')
print(lat_bounds,lon_bounds)
print(dataset,'= Model!')
print(dataset_obs,'= Observations!\n')
print(rm_annual_mean,'= rm_annual_mean')
print(rm_merid_mean,'= rm_merid_mean')
print(rm_ensemble_mean,'= rm_ensemble_mean')
print(land_only,'= land_only')
print(ocean_only,'= ocean_only')
## Variables for plotting
lons2,lats2 = np.meshgrid(lons,lats)
observations = data_obs
modeldata = data
modeldatamean = np.nanmean(modeldata,axis=1)
spatialmean_obs = UT.calc_weightedAve(observations,lats2)
spatialmean_mod = UT.calc_weightedAve(modeldata,lats2)
spatialmean_modmean = np.nanmean(spatialmean_mod,axis=1)
plt.figure()
plt.plot(yearsall,spatialmean_modmean.transpose())
plt.plot(yearsall,spatialmean_modmean.transpose()[:,4],linewidth=3,color='red',label=r'GFDL-CM3 - %s-Experiment' % factorObs)
plt.xlabel('Years')
plt.ylabel('Average Arctic Temperature')
plt.legend()
plt.ylim([-14.5,-1])
plt.savefig('/Users/zlabe/Desktop/factor-%s.png' % factorObs,dpi=300)
plt.figure()
plt.plot(spatialmean_obs)
##############################################################################
##############################################################################
##############################################################################
## Visualizing through LRP
numLats = lats.shape[0]
numLons = lons.shape[0]
numDim = 3
##############################################################################
##############################################################################
##############################################################################
lrpall = LRP.calc_LRPModel(model,np.append(XtrainS,XtestS,axis=0),
np.append(Ytrain,Ytest,axis=0),
biasBool,annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
meanlrp = np.nanmean(lrpall,axis=0)
fig=plt.figure()
plt.contourf(meanlrp,300,cmap=cmocean.cm.thermal)
### For training data only
lrptrain = LRP.calc_LRPModel(model,XtrainS,Ytrain,biasBool,
annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
### For training data only
lrptest = LRP.calc_LRPModel(model,XtestS,Ytest,biasBool,
annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
### For observations data only
lrpobservations = LRP.calc_LRPObs(model,XobsS,biasBool,annType,
num_of_class,yearsall,lrpRule,
normLRP,numLats,numLons,numDim)
### For random data only
lrprandom = LRP.calc_LRPObs(model,randarrayS,biasBool,annType,
num_of_class,yearsall,lrpRule,
normLRP,numLats,numLons,numDim)
##############################################################################
##############################################################################
##############################################################################
def netcdfLRP(lats,lons,var,directory,typemodel,saveData):
print('\n>>> Using netcdfLRP function!')
from netCDF4 import Dataset
import numpy as np
name = 'LRPMap' + typemodel + '_' + saveData + '.nc'
filename = directory + name
ncfile = Dataset(filename,'w',format='NETCDF4')
ncfile.description = 'LRP maps for using selected seed'
### Dimensions
ncfile.createDimension('years',var.shape[0])
ncfile.createDimension('lat',var.shape[1])
ncfile.createDimension('lon',var.shape[2])
### Variables
years = ncfile.createVariable('years','f4',('years'))
latitude = ncfile.createVariable('lat','f4',('lat'))
longitude = ncfile.createVariable('lon','f4',('lon'))
varns = ncfile.createVariable('LRP','f4',('years','lat','lon'))
### Units
varns.units = 'unitless relevance'
ncfile.title = 'LRP relevance'
ncfile.instituion = 'Colorado State University'
ncfile.references = 'Barnes et al. [2020]'
### Data
years[:] = np.arange(var.shape[0])
latitude[:] = lats
longitude[:] = lons
varns[:] = var
ncfile.close()
print('*Completed: Created netCDF4 File!')
netcdfLRP(lats,lons,lrpall,directoryoutput,'AllData',saveData)
netcdfLRP(lats,lons,lrptrain,directoryoutput,'Training',saveData)
netcdfLRP(lats,lons,lrptest,directoryoutput,'Testing',saveData)
netcdfLRP(lats,lons,lrpobservations,directoryoutput,'Obs',saveData)
| true
| true
|
f708c7fb5daa5795a1afe4d156b806022b4a3826
| 19,116
|
py
|
Python
|
ansible/modules/cloud/rackspace/rax_files_objects.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ansible/modules/cloud/rackspace/rax_files_objects.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ansible/modules/cloud/rackspace/rax_files_objects.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2020-02-13T14:24:57.000Z
|
2020-02-13T14:24:57.000Z
|
#!/usr/bin/python
# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_files_objects
short_description: Upload, download, and delete objects in Rackspace Cloud Files
description:
- Upload, download, and delete objects in Rackspace Cloud Files
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing objects.
Selecting this option is only appropriate when setting type=meta
choices:
- "yes"
- "no"
default: "no"
container:
description:
- The container to use for file object operations.
required: true
default: null
dest:
description:
- The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
Used to specify the destination of an operation on a remote object; i.e. a file name,
"file1", or a comma-separated list of remote objects, "file1,file2,file17"
expires:
description:
- Used to set an expiration on a file or folder uploaded to Cloud Files.
Requires an integer, specifying expiration in seconds
default: null
meta:
description:
- A hash of items to set as metadata values on an uploaded file or folder
default: null
method:
description:
- The method of operation to be performed. For example, put to upload files
to Cloud Files, get to download files from Cloud Files or delete to delete
remote objects in Cloud Files
choices:
- get
- put
- delete
default: get
src:
description:
- Source from which to upload files. Used to specify a remote object as a source for
an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
"file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
default: null
structure:
description:
- Used to specify whether to maintain nested directory structure when downloading objects
from Cloud Files. Setting to false downloads the contents of a container to a single,
flat directory
choices:
- yes
- "no"
default: "yes"
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
type:
description:
- Type of object to do work on
- Metadata object or a file object
choices:
- file
- meta
default: file
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: "Test Cloud Files Objects"
hosts: local
gather_facts: False
tasks:
- name: "Get objects from test container"
rax_files_objects:
container: testcont
dest: ~/Downloads/testcont
- name: "Get single object from test container"
rax_files_objects:
container: testcont
src: file1
dest: ~/Downloads/testcont
- name: "Get several objects from test container"
rax_files_objects:
container: testcont
src: file1,file2,file3
dest: ~/Downloads/testcont
- name: "Delete one object in test container"
rax_files_objects:
container: testcont
method: delete
dest: file1
- name: "Delete several objects in test container"
rax_files_objects:
container: testcont
method: delete
dest: file2,file3,file4
- name: "Delete all objects in test container"
rax_files_objects:
container: testcont
method: delete
- name: "Upload all files to test container"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/onehundred
- name: "Upload one file to test container"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/testcont/file1
- name: "Upload one file to test container with metadata"
rax_files_objects:
container: testcont
src: ~/Downloads/testcont/file2
method: put
meta:
testkey: testdata
who_uploaded_this: someuser@example.com
- name: "Upload one file to test container with TTL of 60 seconds"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/testcont/file3
expires: 60
- name: "Attempt to get remote object that does not exist"
rax_files_objects:
container: testcont
method: get
src: FileThatDoesNotExist.jpg
dest: ~/Downloads/testcont
ignore_errors: yes
- name: "Attempt to delete remote object that does not exist"
rax_files_objects:
container: testcont
method: delete
dest: FileThatDoesNotExist.jpg
ignore_errors: yes
- name: "Test Cloud Files Objects Metadata"
hosts: local
gather_facts: false
tasks:
- name: "Get metadata on one object"
rax_files_objects:
container: testcont
type: meta
dest: file2
- name: "Get metadata on several objects"
rax_files_objects:
container: testcont
type: meta
src: file2,file1
- name: "Set metadata on an object"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: put
meta:
key1: value1
key2: value2
clear_meta: true
- name: "Verify metadata is set"
rax_files_objects:
container: testcont
type: meta
src: file17
- name: "Delete metadata"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: delete
meta:
key1: ''
key2: ''
- name: "Get metadata on all objects"
rax_files_objects:
container: testcont
type: meta
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
EXIT_DICT = dict(success=False)
META_PREFIX = 'x-object-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer as e:
module.fail_json(msg=e.message)
def _upload_folder(cf, folder, container, ttl=None, headers=None):
""" Uploads a folder to Cloud Files.
"""
total_bytes = 0
for root, dirs, files in os.walk(folder):
for fname in files:
full_path = os.path.join(root, fname)
obj_name = os.path.relpath(full_path, folder)
obj_size = os.path.getsize(full_path)
cf.upload_file(container, full_path,
obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
total_bytes += obj_size
return total_bytes
def upload(module, cf, container, src, dest, meta, expires):
""" Uploads a single object or a folder to Cloud Files Optionally sets an
metadata, TTL value (expires), or Content-Disposition and Content-Encoding
headers.
"""
if not src:
module.fail_json(msg='src must be specified when uploading')
c = _get_container(module, cf, container)
src = os.path.abspath(os.path.expanduser(src))
is_dir = os.path.isdir(src)
if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
module.fail_json(msg='src must be a file or a directory')
if dest and is_dir:
module.fail_json(msg='dest cannot be set when whole '
'directories are uploaded')
cont_obj = None
total_bytes = 0
if dest and not is_dir:
try:
cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
elif is_dir:
try:
total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
try:
cont_obj = c.upload_file(src, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
EXIT_DICT['success'] = True
EXIT_DICT['container'] = c.name
EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
if cont_obj or total_bytes > 0:
EXIT_DICT['changed'] = True
if meta:
EXIT_DICT['meta'] = dict(updated=True)
if cont_obj:
EXIT_DICT['bytes'] = cont_obj.total_bytes
EXIT_DICT['etag'] = cont_obj.etag
else:
EXIT_DICT['bytes'] = total_bytes
module.exit_json(**EXIT_DICT)
def download(module, cf, container, src, dest, structure):
""" Download objects from Cloud Files to a local path specified by "dest".
Optionally disable maintaining a directory structure by by passing a
false value to "structure".
"""
# Looking for an explicit destination
if not dest:
module.fail_json(msg='dest is a required argument when '
'downloading from Cloud Files')
# Attempt to fetch the container by name
c = _get_container(module, cf, container)
# Accept a single object name or a comma-separated list of objs
# If not specified, get the entire container
if src:
objs = src.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
dest = os.path.abspath(os.path.expanduser(dest))
is_dir = os.path.isdir(dest)
if not is_dir:
module.fail_json(msg='dest must be a directory')
results = []
for obj in objs:
try:
c.download_object(obj, dest, structure=structure)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(obj)
len_results = len(results)
len_objs = len(objs)
EXIT_DICT['container'] = c.name
EXIT_DICT['requested_downloaded'] = results
if results:
EXIT_DICT['changed'] = True
if len_results == len_objs:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
else:
EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
"downloaded" % (len_results, len_objs)
module.exit_json(**EXIT_DICT)
def delete(module, cf, container, src, dest):
""" Delete specific objects by proving a single file name or a
comma-separated list to src OR dest (but not both). Omitting file name(s)
assumes the entire container is to be deleted.
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
c = _get_container(module, cf, container)
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
num_objs = len(objs)
results = []
for obj in objs:
try:
result = c.delete_object(obj)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
num_deleted = results.count(True)
EXIT_DICT['container'] = c.name
EXIT_DICT['deleted'] = num_deleted
EXIT_DICT['requested_deleted'] = objs
if num_deleted:
EXIT_DICT['changed'] = True
if num_objs == num_deleted:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
else:
EXIT_DICT['msg'] = ("Error: only %s of %s objects "
"deleted" % (num_deleted, num_objs))
module.exit_json(**EXIT_DICT)
def get_meta(module, cf, container, src, dest):
""" Get metadata for a single file, comma-separated list, or entire
container
"""
c = _get_container(module, cf, container)
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
results = dict()
for obj in objs:
try:
meta = c.get_object(obj).get_metadata()
except Exception as e:
module.fail_json(msg=e.message)
else:
results[obj] = dict()
for k, v in meta.items():
meta_key = k.split(META_PREFIX)[-1]
results[obj][meta_key] = v
EXIT_DICT['container'] = c.name
if results:
EXIT_DICT['meta_results'] = results
EXIT_DICT['success'] = True
module.exit_json(**EXIT_DICT)
def put_meta(module, cf, container, src, dest, meta, clear_meta):
""" Set metadata on a container, single file, or comma-separated list.
Passing a true value to clear_meta clears the metadata stored in Cloud
Files before setting the new metadata to the value of "meta".
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to set meta"
" have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = []
for obj in objs:
try:
result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_changed'] = True
module.exit_json(**EXIT_DICT)
def delete_meta(module, cf, container, src, dest, meta):
""" Removes metadata keys and values specified in meta, if any. Deletes on
all objects specified by src or dest (but not both), if any; otherwise it
deletes keys on all objects in the container
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
"deleted have been specified on both src and dest"
" args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = [] # Num of metadata keys removed, not objects affected
for obj in objs:
if meta:
for k, v in meta.items():
try:
result = c.get_object(obj).remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
else:
try:
o = c.get_object(obj)
except pyrax.exc.NoSuchObject as e:
module.fail_json(msg=e.message)
for k, v in o.get_metadata().items():
try:
result = o.remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_deleted'] = len(results)
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
structure, expires):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "file":
if method == 'put':
upload(module, cf, container, src, dest, meta, expires)
elif method == 'get':
download(module, cf, container, src, dest, structure)
elif method == 'delete':
delete(module, cf, container, src, dest)
else:
if method == 'get':
get_meta(module, cf, container, src, dest)
if method == 'put':
put_meta(module, cf, container, src, dest, meta, clear_meta)
if method == 'delete':
delete_meta(module, cf, container, src, dest, meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(required=True),
src=dict(),
dest=dict(),
method=dict(default='get', choices=['put', 'get', 'delete']),
type=dict(default='file', choices=['file', 'meta']),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
structure=dict(default=True, type='bool'),
expires=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container = module.params.get('container')
src = module.params.get('src')
dest = module.params.get('dest')
method = module.params.get('method')
typ = module.params.get('type')
meta = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
structure = module.params.get('structure')
expires = module.params.get('expires')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
if __name__ == '__main__':
main()
| 30.43949
| 99
| 0.615191
|
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_files_objects
short_description: Upload, download, and delete objects in Rackspace Cloud Files
description:
- Upload, download, and delete objects in Rackspace Cloud Files
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing objects.
Selecting this option is only appropriate when setting type=meta
choices:
- "yes"
- "no"
default: "no"
container:
description:
- The container to use for file object operations.
required: true
default: null
dest:
description:
- The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
Used to specify the destination of an operation on a remote object; i.e. a file name,
"file1", or a comma-separated list of remote objects, "file1,file2,file17"
expires:
description:
- Used to set an expiration on a file or folder uploaded to Cloud Files.
Requires an integer, specifying expiration in seconds
default: null
meta:
description:
- A hash of items to set as metadata values on an uploaded file or folder
default: null
method:
description:
- The method of operation to be performed. For example, put to upload files
to Cloud Files, get to download files from Cloud Files or delete to delete
remote objects in Cloud Files
choices:
- get
- put
- delete
default: get
src:
description:
- Source from which to upload files. Used to specify a remote object as a source for
an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
"file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
default: null
structure:
description:
- Used to specify whether to maintain nested directory structure when downloading objects
from Cloud Files. Setting to false downloads the contents of a container to a single,
flat directory
choices:
- yes
- "no"
default: "yes"
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
type:
description:
- Type of object to do work on
- Metadata object or a file object
choices:
- file
- meta
default: file
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: "Test Cloud Files Objects"
hosts: local
gather_facts: False
tasks:
- name: "Get objects from test container"
rax_files_objects:
container: testcont
dest: ~/Downloads/testcont
- name: "Get single object from test container"
rax_files_objects:
container: testcont
src: file1
dest: ~/Downloads/testcont
- name: "Get several objects from test container"
rax_files_objects:
container: testcont
src: file1,file2,file3
dest: ~/Downloads/testcont
- name: "Delete one object in test container"
rax_files_objects:
container: testcont
method: delete
dest: file1
- name: "Delete several objects in test container"
rax_files_objects:
container: testcont
method: delete
dest: file2,file3,file4
- name: "Delete all objects in test container"
rax_files_objects:
container: testcont
method: delete
- name: "Upload all files to test container"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/onehundred
- name: "Upload one file to test container"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/testcont/file1
- name: "Upload one file to test container with metadata"
rax_files_objects:
container: testcont
src: ~/Downloads/testcont/file2
method: put
meta:
testkey: testdata
who_uploaded_this: someuser@example.com
- name: "Upload one file to test container with TTL of 60 seconds"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/testcont/file3
expires: 60
- name: "Attempt to get remote object that does not exist"
rax_files_objects:
container: testcont
method: get
src: FileThatDoesNotExist.jpg
dest: ~/Downloads/testcont
ignore_errors: yes
- name: "Attempt to delete remote object that does not exist"
rax_files_objects:
container: testcont
method: delete
dest: FileThatDoesNotExist.jpg
ignore_errors: yes
- name: "Test Cloud Files Objects Metadata"
hosts: local
gather_facts: false
tasks:
- name: "Get metadata on one object"
rax_files_objects:
container: testcont
type: meta
dest: file2
- name: "Get metadata on several objects"
rax_files_objects:
container: testcont
type: meta
src: file2,file1
- name: "Set metadata on an object"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: put
meta:
key1: value1
key2: value2
clear_meta: true
- name: "Verify metadata is set"
rax_files_objects:
container: testcont
type: meta
src: file17
- name: "Delete metadata"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: delete
meta:
key1: ''
key2: ''
- name: "Get metadata on all objects"
rax_files_objects:
container: testcont
type: meta
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
EXIT_DICT = dict(success=False)
META_PREFIX = 'x-object-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer as e:
module.fail_json(msg=e.message)
def _upload_folder(cf, folder, container, ttl=None, headers=None):
total_bytes = 0
for root, dirs, files in os.walk(folder):
for fname in files:
full_path = os.path.join(root, fname)
obj_name = os.path.relpath(full_path, folder)
obj_size = os.path.getsize(full_path)
cf.upload_file(container, full_path,
obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
total_bytes += obj_size
return total_bytes
def upload(module, cf, container, src, dest, meta, expires):
if not src:
module.fail_json(msg='src must be specified when uploading')
c = _get_container(module, cf, container)
src = os.path.abspath(os.path.expanduser(src))
is_dir = os.path.isdir(src)
if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
module.fail_json(msg='src must be a file or a directory')
if dest and is_dir:
module.fail_json(msg='dest cannot be set when whole '
'directories are uploaded')
cont_obj = None
total_bytes = 0
if dest and not is_dir:
try:
cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
elif is_dir:
try:
total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
try:
cont_obj = c.upload_file(src, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
EXIT_DICT['success'] = True
EXIT_DICT['container'] = c.name
EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
if cont_obj or total_bytes > 0:
EXIT_DICT['changed'] = True
if meta:
EXIT_DICT['meta'] = dict(updated=True)
if cont_obj:
EXIT_DICT['bytes'] = cont_obj.total_bytes
EXIT_DICT['etag'] = cont_obj.etag
else:
EXIT_DICT['bytes'] = total_bytes
module.exit_json(**EXIT_DICT)
def download(module, cf, container, src, dest, structure):
if not dest:
module.fail_json(msg='dest is a required argument when '
'downloading from Cloud Files')
c = _get_container(module, cf, container)
if src:
objs = src.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
dest = os.path.abspath(os.path.expanduser(dest))
is_dir = os.path.isdir(dest)
if not is_dir:
module.fail_json(msg='dest must be a directory')
results = []
for obj in objs:
try:
c.download_object(obj, dest, structure=structure)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(obj)
len_results = len(results)
len_objs = len(objs)
EXIT_DICT['container'] = c.name
EXIT_DICT['requested_downloaded'] = results
if results:
EXIT_DICT['changed'] = True
if len_results == len_objs:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
else:
EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
"downloaded" % (len_results, len_objs)
module.exit_json(**EXIT_DICT)
def delete(module, cf, container, src, dest):
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
c = _get_container(module, cf, container)
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
num_objs = len(objs)
results = []
for obj in objs:
try:
result = c.delete_object(obj)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
num_deleted = results.count(True)
EXIT_DICT['container'] = c.name
EXIT_DICT['deleted'] = num_deleted
EXIT_DICT['requested_deleted'] = objs
if num_deleted:
EXIT_DICT['changed'] = True
if num_objs == num_deleted:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
else:
EXIT_DICT['msg'] = ("Error: only %s of %s objects "
"deleted" % (num_deleted, num_objs))
module.exit_json(**EXIT_DICT)
def get_meta(module, cf, container, src, dest):
c = _get_container(module, cf, container)
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
results = dict()
for obj in objs:
try:
meta = c.get_object(obj).get_metadata()
except Exception as e:
module.fail_json(msg=e.message)
else:
results[obj] = dict()
for k, v in meta.items():
meta_key = k.split(META_PREFIX)[-1]
results[obj][meta_key] = v
EXIT_DICT['container'] = c.name
if results:
EXIT_DICT['meta_results'] = results
EXIT_DICT['success'] = True
module.exit_json(**EXIT_DICT)
def put_meta(module, cf, container, src, dest, meta, clear_meta):
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to set meta"
" have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = []
for obj in objs:
try:
result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_changed'] = True
module.exit_json(**EXIT_DICT)
def delete_meta(module, cf, container, src, dest, meta):
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
"deleted have been specified on both src and dest"
" args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = [] for obj in objs:
if meta:
for k, v in meta.items():
try:
result = c.get_object(obj).remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
else:
try:
o = c.get_object(obj)
except pyrax.exc.NoSuchObject as e:
module.fail_json(msg=e.message)
for k, v in o.get_metadata().items():
try:
result = o.remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_deleted'] = len(results)
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
structure, expires):
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "file":
if method == 'put':
upload(module, cf, container, src, dest, meta, expires)
elif method == 'get':
download(module, cf, container, src, dest, structure)
elif method == 'delete':
delete(module, cf, container, src, dest)
else:
if method == 'get':
get_meta(module, cf, container, src, dest)
if method == 'put':
put_meta(module, cf, container, src, dest, meta, clear_meta)
if method == 'delete':
delete_meta(module, cf, container, src, dest, meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(required=True),
src=dict(),
dest=dict(),
method=dict(default='get', choices=['put', 'get', 'delete']),
type=dict(default='file', choices=['file', 'meta']),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
structure=dict(default=True, type='bool'),
expires=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container = module.params.get('container')
src = module.params.get('src')
dest = module.params.get('dest')
method = module.params.get('method')
typ = module.params.get('type')
meta = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
structure = module.params.get('structure')
expires = module.params.get('expires')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
if __name__ == '__main__':
main()
| true
| true
|
f708c8fa7db92a7a71d90a6a40b14f43250d1014
| 678
|
py
|
Python
|
ctypes_generation/extended_structs/_OBJECT_ATTRIBUTES.py
|
IMULMUL/PythonForWindows
|
61e027a678d5b87aa64fcf8a37a6661a86236589
|
[
"BSD-3-Clause"
] | 479
|
2016-01-08T00:53:34.000Z
|
2022-03-22T10:28:19.000Z
|
ctypes_generation/extended_structs/_OBJECT_ATTRIBUTES.py
|
IMULMUL/PythonForWindows
|
61e027a678d5b87aa64fcf8a37a6661a86236589
|
[
"BSD-3-Clause"
] | 38
|
2017-12-29T17:09:04.000Z
|
2022-01-31T08:27:47.000Z
|
ctypes_generation/extended_structs/_OBJECT_ATTRIBUTES.py
|
IMULMUL/PythonForWindows
|
61e027a678d5b87aa64fcf8a37a6661a86236589
|
[
"BSD-3-Clause"
] | 103
|
2016-01-10T01:32:17.000Z
|
2021-12-24T17:21:06.000Z
|
class _OBJECT_ATTRIBUTES(_OBJECT_ATTRIBUTES):
@classmethod
def from_string(cls, path, attributes=OBJ_CASE_INSENSITIVE): # Directly on constructor ?
self = cls()
self.Length = ctypes.sizeof(self)
self.RootDirectory = 0
self.ObjectName = ctypes.pointer(LSA_UNICODE_STRING.from_string(path))
self.Attributes = attributes
self.SecurityDescriptor = 0
self.SecurityQualityOfService = 0
return self
def __repr__(self):
if not self.ObjectName:
return super(_OBJECT_ATTRIBUTES, self).__repr__()
return """<{0} ObjectName="{1}">""".format(type(self).__name__, self.ObjectName[0].str)
| 42.375
| 95
| 0.669617
|
class _OBJECT_ATTRIBUTES(_OBJECT_ATTRIBUTES):
@classmethod
def from_string(cls, path, attributes=OBJ_CASE_INSENSITIVE): self = cls()
self.Length = ctypes.sizeof(self)
self.RootDirectory = 0
self.ObjectName = ctypes.pointer(LSA_UNICODE_STRING.from_string(path))
self.Attributes = attributes
self.SecurityDescriptor = 0
self.SecurityQualityOfService = 0
return self
def __repr__(self):
if not self.ObjectName:
return super(_OBJECT_ATTRIBUTES, self).__repr__()
return """<{0} ObjectName="{1}">""".format(type(self).__name__, self.ObjectName[0].str)
| true
| true
|
f708cb03dff9c74d69c541a4405a739d71ae3c40
| 860
|
py
|
Python
|
tests/lib/test_script.py
|
lucasan123/BitgesellX-server
|
99e184f5e829dad7901d4ed4e4490ac8ddc6c538
|
[
"MIT"
] | null | null | null |
tests/lib/test_script.py
|
lucasan123/BitgesellX-server
|
99e184f5e829dad7901d4ed4e4490ac8ddc6c538
|
[
"MIT"
] | null | null | null |
tests/lib/test_script.py
|
lucasan123/BitgesellX-server
|
99e184f5e829dad7901d4ed4e4490ac8ddc6c538
|
[
"MIT"
] | null | null | null |
import pytest
from bitgesellx.lib.script import OpCodes, is_unspendable_legacy, is_unspendable_genesis
@pytest.mark.parametrize("script, iug", (
(bytes([OpCodes.OP_RETURN]), False),
(bytes([OpCodes.OP_RETURN]) + bytes([2, 28, 50]), False),
(bytes([OpCodes.OP_0, OpCodes.OP_RETURN]), True),
(bytes([OpCodes.OP_0, OpCodes.OP_RETURN]) + bytes([2, 28, 50]), True)
))
def test_op_return_legacy(script, iug):
assert is_unspendable_legacy(script)
assert is_unspendable_genesis(script) is iug
@pytest.mark.parametrize("script", (
bytes([]),
bytes([OpCodes.OP_1, OpCodes.OP_RETURN]) + bytes([2, 28, 50]),
bytes([OpCodes.OP_0]),
bytes([OpCodes.OP_0, OpCodes.OP_1]),
bytes([OpCodes.OP_HASH160]),
))
def test_not_op_return(script):
assert not is_unspendable_legacy(script)
assert not is_unspendable_genesis(script)
| 31.851852
| 88
| 0.70814
|
import pytest
from bitgesellx.lib.script import OpCodes, is_unspendable_legacy, is_unspendable_genesis
@pytest.mark.parametrize("script, iug", (
(bytes([OpCodes.OP_RETURN]), False),
(bytes([OpCodes.OP_RETURN]) + bytes([2, 28, 50]), False),
(bytes([OpCodes.OP_0, OpCodes.OP_RETURN]), True),
(bytes([OpCodes.OP_0, OpCodes.OP_RETURN]) + bytes([2, 28, 50]), True)
))
def test_op_return_legacy(script, iug):
assert is_unspendable_legacy(script)
assert is_unspendable_genesis(script) is iug
@pytest.mark.parametrize("script", (
bytes([]),
bytes([OpCodes.OP_1, OpCodes.OP_RETURN]) + bytes([2, 28, 50]),
bytes([OpCodes.OP_0]),
bytes([OpCodes.OP_0, OpCodes.OP_1]),
bytes([OpCodes.OP_HASH160]),
))
def test_not_op_return(script):
assert not is_unspendable_legacy(script)
assert not is_unspendable_genesis(script)
| true
| true
|
f708cbdb70c458a183de5c672f8e50b1773e3d2a
| 974
|
py
|
Python
|
tests/test_worker_aio.py
|
chainsquad/python-graphenelib
|
6df90dbc116d8333f2d3db830818d9f22934e33f
|
[
"MIT"
] | 83
|
2015-09-04T13:49:55.000Z
|
2022-03-30T21:13:54.000Z
|
tests/test_worker_aio.py
|
chainsquad/python-graphenelib
|
6df90dbc116d8333f2d3db830818d9f22934e33f
|
[
"MIT"
] | 146
|
2015-09-23T19:07:16.000Z
|
2021-07-01T01:39:15.000Z
|
tests/test_worker_aio.py
|
chainsquad/python-graphenelib
|
6df90dbc116d8333f2d3db830818d9f22934e33f
|
[
"MIT"
] | 70
|
2015-09-23T18:43:37.000Z
|
2021-11-12T14:58:29.000Z
|
# -*- coding: utf-8 -*-
import aiounittest
from datetime import datetime
from .fixtures_aio import fixture_data, Worker, Workers, Account
from graphenecommon import exceptions
class Testcases(aiounittest.AsyncTestCase):
def setUp(self):
fixture_data()
async def test_worker(self):
w = await Worker("1.14.139")
self.assertIsInstance(w["work_end_date"], datetime)
self.assertIsInstance(w["work_begin_date"], datetime)
self.assertIsInstance(w["work_begin_date"], datetime)
self.assertIsInstance(w["daily_pay"], int)
account = await w.account
self.assertIsInstance(account, Account)
self.assertEqual(account["id"], "1.2.100")
await Worker(w)
async def test_nonexist(self):
with self.assertRaises(exceptions.WorkerDoesNotExistsException):
await Worker("foobar")
async def test_workers(self):
ws = await Workers()
self.assertEqual(len(ws), 2)
| 32.466667
| 72
| 0.678645
|
import aiounittest
from datetime import datetime
from .fixtures_aio import fixture_data, Worker, Workers, Account
from graphenecommon import exceptions
class Testcases(aiounittest.AsyncTestCase):
def setUp(self):
fixture_data()
async def test_worker(self):
w = await Worker("1.14.139")
self.assertIsInstance(w["work_end_date"], datetime)
self.assertIsInstance(w["work_begin_date"], datetime)
self.assertIsInstance(w["work_begin_date"], datetime)
self.assertIsInstance(w["daily_pay"], int)
account = await w.account
self.assertIsInstance(account, Account)
self.assertEqual(account["id"], "1.2.100")
await Worker(w)
async def test_nonexist(self):
with self.assertRaises(exceptions.WorkerDoesNotExistsException):
await Worker("foobar")
async def test_workers(self):
ws = await Workers()
self.assertEqual(len(ws), 2)
| true
| true
|
f708cdb5883a2e4ef8a0b863ae08e582d5757825
| 2,436
|
py
|
Python
|
integration_tests/test_suites/celery-k8s-integration-test-suite/conftest.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 1
|
2021-01-31T19:16:29.000Z
|
2021-01-31T19:16:29.000Z
|
integration_tests/test_suites/celery-k8s-integration-test-suite/conftest.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | null | null | null |
integration_tests/test_suites/celery-k8s-integration-test-suite/conftest.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 1
|
2019-09-11T03:02:27.000Z
|
2019-09-11T03:02:27.000Z
|
# pylint: disable=unused-import
import os
import docker
import pytest
from dagster_celery_k8s.launcher import CeleryK8sRunLauncher
from dagster_k8s_test_infra.helm import TEST_AWS_CONFIGMAP_NAME
from dagster_k8s_test_infra.integration_utils import image_pull_policy
from dagster_test.test_project import build_and_tag_test_image, get_test_project_docker_image
from dagster_k8s_test_infra.cluster import ( # isort:skip
dagster_instance,
dagster_instance_for_user_deployments_subchart_disabled,
dagster_instance_for_daemon,
define_cluster_provider_fixture,
helm_postgres_url,
helm_postgres_url_for_user_deployments_subchart_disabled,
helm_postgres_url_for_daemon,
)
pytest_plugins = ["dagster_k8s_test_infra.helm"]
cluster_provider = define_cluster_provider_fixture()
IS_BUILDKITE = os.getenv("BUILDKITE") is not None
@pytest.fixture(scope="session")
def dagster_docker_image():
docker_image = get_test_project_docker_image()
if not IS_BUILDKITE:
try:
client = docker.from_env()
client.images.get(docker_image)
print( # pylint: disable=print-call
"Found existing image tagged {image}, skipping image build. To rebuild, first run: "
"docker rmi {image}".format(image=docker_image)
)
except docker.errors.ImageNotFound:
build_and_tag_test_image(docker_image)
return docker_image
# See: https://stackoverflow.com/a/31526934/324449
def pytest_addoption(parser):
# We catch the ValueError to support cases where we are loading multiple test suites, e.g., in
# the VSCode test explorer. When pytest tries to add an option twice, we get, e.g.
#
# ValueError: option names {'--cluster-provider'} already added
# Use kind or some other cluster provider?
try:
parser.addoption("--cluster-provider", action="store", default="kind")
except ValueError:
pass
# Specify an existing kind cluster name to use
try:
parser.addoption("--kind-cluster", action="store")
except ValueError:
pass
# Keep resources around after tests are done
try:
parser.addoption("--no-cleanup", action="store_true", default=False)
except ValueError:
pass
# Use existing Helm chart/namespace
try:
parser.addoption("--existing-helm-namespace", action="store")
except ValueError:
pass
| 32.052632
| 100
| 0.721675
|
import os
import docker
import pytest
from dagster_celery_k8s.launcher import CeleryK8sRunLauncher
from dagster_k8s_test_infra.helm import TEST_AWS_CONFIGMAP_NAME
from dagster_k8s_test_infra.integration_utils import image_pull_policy
from dagster_test.test_project import build_and_tag_test_image, get_test_project_docker_image
from dagster_k8s_test_infra.cluster import ( dagster_instance,
dagster_instance_for_user_deployments_subchart_disabled,
dagster_instance_for_daemon,
define_cluster_provider_fixture,
helm_postgres_url,
helm_postgres_url_for_user_deployments_subchart_disabled,
helm_postgres_url_for_daemon,
)
pytest_plugins = ["dagster_k8s_test_infra.helm"]
cluster_provider = define_cluster_provider_fixture()
IS_BUILDKITE = os.getenv("BUILDKITE") is not None
@pytest.fixture(scope="session")
def dagster_docker_image():
docker_image = get_test_project_docker_image()
if not IS_BUILDKITE:
try:
client = docker.from_env()
client.images.get(docker_image)
print( "Found existing image tagged {image}, skipping image build. To rebuild, first run: "
"docker rmi {image}".format(image=docker_image)
)
except docker.errors.ImageNotFound:
build_and_tag_test_image(docker_image)
return docker_image
def pytest_addoption(parser):
try:
parser.addoption("--cluster-provider", action="store", default="kind")
except ValueError:
pass
try:
parser.addoption("--kind-cluster", action="store")
except ValueError:
pass
try:
parser.addoption("--no-cleanup", action="store_true", default=False)
except ValueError:
pass
try:
parser.addoption("--existing-helm-namespace", action="store")
except ValueError:
pass
| true
| true
|
f708cdc6ccb2bec72bbd500c4dbdbfa7b65f8e8d
| 1,633
|
py
|
Python
|
My_AutoML/_legacy/__init__.py
|
PanyiDong/AutoML
|
4d981b0287fa27d7a38f029e4b20b3a89e1de4f9
|
[
"MIT"
] | 2
|
2022-03-03T16:24:08.000Z
|
2022-03-03T17:17:28.000Z
|
My_AutoML/_legacy/__init__.py
|
PanyiDong/My_AutoML
|
510727bd797e4f6fa213939c62d1d7601952e491
|
[
"MIT"
] | null | null | null |
My_AutoML/_legacy/__init__.py
|
PanyiDong/My_AutoML
|
510727bd797e4f6fa213939c62d1d7601952e491
|
[
"MIT"
] | null | null | null |
"""
File: __init__.py
Author: Panyi Dong
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /My_AutoML/_legacy/__init__.py
File Created: Thursday, 7th April 2022 3:59:55 pm
Author: Panyi Dong (panyid2@illinois.edu)
-----
Last Modified: Friday, 8th April 2022 10:25:42 pm
Modified By: Panyi Dong (panyid2@illinois.edu)
-----
MIT License
Copyright (c) 2022 - 2022, Panyi Dong
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from My_AutoML._hpo._legacy import (
AutoTabular,
AutoTabularClassifier,
AutoTabularRegressor,
)
| 35.5
| 78
| 0.784446
|
from My_AutoML._hpo._legacy import (
AutoTabular,
AutoTabularClassifier,
AutoTabularRegressor,
)
| true
| true
|
f708ce1cc05e059b422d58631ec18ea706b397a7
| 2,612
|
py
|
Python
|
bokeh_plot.py
|
ulrica221/used_car_playground
|
ff99ebc6bf256bb4d90be979c90cbd54479a9959
|
[
"MIT"
] | null | null | null |
bokeh_plot.py
|
ulrica221/used_car_playground
|
ff99ebc6bf256bb4d90be979c90cbd54479a9959
|
[
"MIT"
] | null | null | null |
bokeh_plot.py
|
ulrica221/used_car_playground
|
ff99ebc6bf256bb4d90be979c90cbd54479a9959
|
[
"MIT"
] | null | null | null |
from bokeh.io import show, output_notebook
from bokeh.models import (CDSView, ColorBar, ColumnDataSource,
CustomJS, CustomJSFilter,
GeoJSONDataSource, HoverTool,
LinearColorMapper, Slider)
from bokeh.layouts import column, row, widgetbox
# pylint: disable=no-name-in-module
from bokeh.palettes import brewer
from bokeh.plotting import figure
def plot(ny):
# Input GeoJSON source that contains features for plotting
ny_source = GeoJSONDataSource(geojson = ny.to_json())
# Define color palettes
palette = brewer['OrRd'][8]
palette = palette[::-1] # reverse order of colors so higher values have darker colors
# Instantiate LinearColorMapper that linearly maps numbers in a range, into a sequence of colors.
color_mapper = LinearColorMapper(palette = palette, low = ny['Points'].min(), high = ny['Points'].max())
# Create color bar.
color_bar = ColorBar(color_mapper = color_mapper,
label_standoff = 8,
width = 500, height = 20,
border_line_color = None,
location = (0,0),
orientation = 'horizontal')
# Create figure object.
p = figure(title = 'Calculated Weighted Points',
plot_height = 650 ,
plot_width = 950,
toolbar_location = 'below',
tools = "pan, wheel_zoom, box_zoom, reset",
output_backend="webgl")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
# Add patch renderer to figure.
states = p.patches('xs','ys', source = ny_source,
fill_color = {'field' :'Points',
'transform' : color_mapper},
line_color = "gray",
line_width = 0.25,
fill_alpha = 1)
# Create hover tool
p.add_tools(HoverTool(renderers = [states],
tooltips = [('PO Name','@PO_NAME'),
('Points','@Points')
]))
color_bar = ColorBar(color_mapper = color_mapper,
label_standoff = 8,
width = 950, height = 20,
border_line_color = None,
location = (0,0),
orientation = 'horizontal')
p.add_layout(color_bar, 'below')
show(p)
| 45.034483
| 112
| 0.517228
|
from bokeh.io import show, output_notebook
from bokeh.models import (CDSView, ColorBar, ColumnDataSource,
CustomJS, CustomJSFilter,
GeoJSONDataSource, HoverTool,
LinearColorMapper, Slider)
from bokeh.layouts import column, row, widgetbox
from bokeh.palettes import brewer
from bokeh.plotting import figure
def plot(ny):
ny_source = GeoJSONDataSource(geojson = ny.to_json())
palette = brewer['OrRd'][8]
palette = palette[::-1] color_mapper = LinearColorMapper(palette = palette, low = ny['Points'].min(), high = ny['Points'].max())
color_bar = ColorBar(color_mapper = color_mapper,
label_standoff = 8,
width = 500, height = 20,
border_line_color = None,
location = (0,0),
orientation = 'horizontal')
p = figure(title = 'Calculated Weighted Points',
plot_height = 650 ,
plot_width = 950,
toolbar_location = 'below',
tools = "pan, wheel_zoom, box_zoom, reset",
output_backend="webgl")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
states = p.patches('xs','ys', source = ny_source,
fill_color = {'field' :'Points',
'transform' : color_mapper},
line_color = "gray",
line_width = 0.25,
fill_alpha = 1)
p.add_tools(HoverTool(renderers = [states],
tooltips = [('PO Name','@PO_NAME'),
('Points','@Points')
]))
color_bar = ColorBar(color_mapper = color_mapper,
label_standoff = 8,
width = 950, height = 20,
border_line_color = None,
location = (0,0),
orientation = 'horizontal')
p.add_layout(color_bar, 'below')
show(p)
| true
| true
|
f708ce5f44d667ef8e7f39018776b1547561c78b
| 31,248
|
py
|
Python
|
kscore/serialize.py
|
WeiZhixiong/ksc-sdk-python
|
a93237ce376e107eaae644678ef6b99819a9f8eb
|
[
"Apache-2.0"
] | 53
|
2016-09-21T15:52:14.000Z
|
2021-12-23T09:23:00.000Z
|
kscore/serialize.py
|
WeiZhixiong/ksc-sdk-python
|
a93237ce376e107eaae644678ef6b99819a9f8eb
|
[
"Apache-2.0"
] | 27
|
2016-09-21T15:24:43.000Z
|
2021-11-18T08:38:38.000Z
|
kscore/serialize.py
|
WeiZhixiong/ksc-sdk-python
|
a93237ce376e107eaae644678ef6b99819a9f8eb
|
[
"Apache-2.0"
] | 68
|
2016-09-06T10:33:09.000Z
|
2021-11-16T07:13:03.000Z
|
# Copyright 2014 ksyun.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Protocol input serializes.
This module contains classes that implement input serialization
for the various KSYUN protocol types.
These classes essentially take user input, a model object that
represents what the expected input should look like, and it returns
a dictionary that contains the various parts of a request. A few
high level design decisions:
* Each protocol type maps to a separate class, all inherit from
``Serializer``.
* The return value for ``serialize_to_request`` (the main entry
point) returns a dictionary that represents a request. This
will have keys like ``url_path``, ``query_string``, etc. This
is done so that it's a) easy to test and b) not tied to a
particular HTTP library. See the ``serialize_to_request`` docstring
for more details.
Unicode
-------
The input to the serializers should be text (str/unicode), not bytes,
with the exception of blob types. Those are assumed to be binary,
and if a str/unicode type is passed in, it will be encoded as utf-8.
"""
import re
import base64
from xml.etree import ElementTree
import calendar
from kscore.compat import six
from kscore.compat import json, formatdate
from kscore.utils import parse_to_aware_datetime
from kscore.utils import percent_encode
from kscore import validate
# From the spec, the default timestamp format if not specified is iso8601.
DEFAULT_TIMESTAMP_FORMAT = 'iso8601'
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
# Same as ISO8601, but with microsecond precision.
ISO8601_MICRO = '%Y-%m-%dT%H:%M:%S.%fZ'
def create_serializer(protocol_name, include_validation=True):
# TODO: Unknown protocols.
serializer = SERIALIZERS[protocol_name]()
if include_validation:
validator = validate.ParamValidator()
serializer = validate.ParamValidationDecorator(validator, serializer)
return serializer
class Serializer(object):
DEFAULT_METHOD = 'POST'
# Clients can change this to a different MutableMapping
# (i.e OrderedDict) if they want. This is used in the
# compliance test to match the hash ordering used in the
# tests.
MAP_TYPE = dict
DEFAULT_ENCODING = 'utf-8'
def serialize_to_request(self, parameters, operation_model):
"""Serialize parameters into an HTTP request.
This method takes user provided parameters and a shape
model and serializes the parameters to an HTTP request.
More specifically, this method returns information about
parts of the HTTP request, it does not enforce a particular
interface or standard for an HTTP request. It instead returns
a dictionary of:
* 'url_path'
* 'query_string'
* 'headers'
* 'body'
* 'method'
It is then up to consumers to decide how to map this to a Request
object of their HTTP library of choice. Below is an example
return value::
{'body': {'Action': 'OperationName',
'Bar': 'val2',
'Foo': 'val1',
'Version': '2014-01-01'},
'headers': {},
'method': 'POST',
'query_string': '',
'url_path': '/'}
:param parameters: The dictionary input parameters for the
operation (i.e the user input).
:param operation_model: The OperationModel object that describes
the operation.
"""
raise NotImplementedError("serialize_to_request")
def _create_default_request(self):
# Creates a boilerplate default request dict that subclasses
# can use as a starting point.
serialized = {
'url_path': '/',
'query_string': '',
'method': self.DEFAULT_METHOD,
'headers': self.headers,
# An empty body is represented as an empty byte string.
'body': b''
}
return serialized
def _serialize_not_shape(self, data, parameters):
pass
def _serialize_data(self, serialized, data):
serialized['body'] = data
return serialized
@property
def headers(self):
return {}
# Some extra utility methods subclasses can use.
def _timestamp_iso8601(self, value):
if value.microsecond > 0:
timestamp_format = ISO8601_MICRO
else:
timestamp_format = ISO8601
return value.strftime(timestamp_format)
def _timestamp_unixtimestamp(self, value):
return int(calendar.timegm(value.timetuple()))
def _timestamp_rfc822(self, value):
return formatdate(value, usegmt=True)
def _convert_timestamp_to_str(self, value):
datetime_obj = parse_to_aware_datetime(value)
converter = getattr(
self, '_timestamp_%s' % self.TIMESTAMP_FORMAT.lower())
final_value = converter(datetime_obj)
return final_value
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
return shape.serialization.get('name', default_name)
def _get_base64(self, value):
# Returns the base64-encoded version of value, handling
# both strings and bytes. The returned value is a string
# via the default encoding.
if isinstance(value, six.text_type):
value = value.encode(self.DEFAULT_ENCODING)
return base64.b64encode(value).strip().decode(
self.DEFAULT_ENCODING)
class QuerySerializer(Serializer):
"""
BASE HTTP QUERY REQUEST
"""
TIMESTAMP_FORMAT = 'iso8601'
def serialize_to_request(self, parameters, operation_model):
shape = operation_model.input_shape
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
# The query serializer only deals with body params so
# that's what we hand off the _serialize_* methods.
serialized['headers'].update(
{
'X-Action': operation_model.name,
'X-Version': operation_model.metadata['apiVersion'],
}
)
if 'requestUri' in operation_model.http:
serialized['url_path'] = operation_model.http['requestUri']
body_params = self.MAP_TYPE()
body_params['Action'] = operation_model.name
body_params['Version'] = operation_model.metadata['apiVersion']
if shape is not None:
self._serialize(body_params, parameters, shape)
else:
self._serialize_not_shape(body_params, parameters)
return self._serialize_data(serialized, body_params)
def _serialize_not_shape(self, data, parameters):
pass
def _serialize_data(self, serialized, data):
serialized['body'] = data
return serialized
def _serialize(self, serialized, value, shape, prefix=''):
# serialized: The dict that is incrementally added to with the
# final serialized parameters.
# value: The current user input value.
# shape: The shape object that describes the structure of the
# input.
# prefix: The incrementally built up prefix for the serialized
# key (i.e Foo.bar.members.1).
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, prefix=prefix)
def _serialize_type_structure(self, serialized, value, shape, prefix=''):
members = shape.members
for key, value in value.items():
member_shape = members[key]
member_prefix = self._get_serialized_name(member_shape, key)
if prefix:
member_prefix = '%s.%s' % (prefix, member_prefix)
self._serialize(serialized, value, member_shape, member_prefix)
def _serialize_type_list(self, serialized, value, shape, prefix=''):
if not value:
# The query protocol serializes empty lists.
serialized[prefix] = ''
return
if self._is_shape_flattened(shape):
list_prefix = prefix
if shape.member.serialization.get('name'):
name = self._get_serialized_name(shape.member, default_name='')
# Replace '.Original' with '.{name}'.
list_prefix = '.'.join(prefix.split('.')[:-1] + [name])
else:
list_name = shape.member.serialization.get('name', 'member')
list_prefix = '%s.%s' % (prefix, list_name)
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (list_prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
def _serialize_type_map(self, serialized, value, shape, prefix=''):
if self._is_shape_flattened(shape):
full_prefix = prefix
else:
full_prefix = '%s.entry' % prefix
template = full_prefix + '.{i}.{suffix}'
key_shape = shape.key
value_shape = shape.value
key_suffix = self._get_serialized_name(key_shape, default_name='key')
value_suffix = self._get_serialized_name(value_shape, 'value')
for i, key in enumerate(value, 1):
key_prefix = template.format(i=i, suffix=key_suffix)
value_prefix = template.format(i=i, suffix=value_suffix)
self._serialize(serialized, key, key_shape, key_prefix)
self._serialize(serialized, value[key], value_shape, value_prefix)
def _serialize_type_blob(self, serialized, value, shape, prefix=''):
# Blob args must be base64 encoded.
serialized[prefix] = self._get_base64(value)
def _serialize_type_timestamp(self, serialized, value, shape, prefix=''):
serialized[prefix] = self._convert_timestamp_to_str(value)
def _serialize_type_boolean(self, serialized, value, shape, prefix=''):
if value:
serialized[prefix] = 'true'
else:
serialized[prefix] = 'false'
def _default_serialize(self, serialized, value, shape, prefix=''):
serialized[prefix] = value
def _is_shape_flattened(self, shape):
return shape.serialization.get('flattened')
class EC2Serializer(QuerySerializer):
"""EC2 specific customizations to the query protocol serializers.
The EC2 model is almost, but not exactly, similar to the query protocol
serializer. This class encapsulates those differences. The model
will have be marked with a ``protocol`` of ``ec2``, so you don't need
to worry about wiring this class up correctly.
"""
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
if 'queryName' in shape.serialization:
return shape.serialization['queryName']
elif 'name' in shape.serialization:
# A locationName is always capitalized
# on input for the ec2 protocol.
name = shape.serialization['name']
return name[0].upper() + name[1:]
else:
return default_name
def _serialize_type_list(self, serialized, value, shape, prefix=''):
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
class QueryAcceptJsonSerializer(QuerySerializer):
@property
def headers(self):
return {"Accept": 'application/json'}
def _serialize_not_shape(self, data, parameters):
data.update(parameters)
def _serialize_data(self, serialized, data):
if serialized['method'].lower() == "get":
serialized['body'] = {}
serialized['query_string'] = data
else:
serialized['body'] = data
return serialized
class KCSSerializer(QueryAcceptJsonSerializer):
def _serialize_data(self, serialized, data):
serialized['body'] = {}
serialized['query_string'] = data
return serialized
class CustomBodySerializer(QueryAcceptJsonSerializer):
def serialize_to_request(self, parameters, operation_model):
shape = operation_model.input_shape
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
# The query serializer only deals with body params so
# that's what we hand off the _serialize_* methods.
serialized['headers'].update(
{
'X-Action': operation_model.name,
'X-Version': operation_model.metadata['apiVersion'],
}
)
if 'requestUri' in operation_model.http:
serialized['url_path'] = operation_model.http['requestUri']
body_params = self.MAP_TYPE()
custom_body = None
if 'Body' in parameters:
custom_body = parameters.pop('Body')
if shape is not None:
self._serialize(body_params, parameters, shape)
else:
self._serialize_not_shape(body_params, parameters)
return self._serialize_data(serialized, body_params, custom_body)
def _serialize_data(self, serialized, data, body=None):
if body is not None:
serialized['body'] = json.dumps(body).encode(self.DEFAULT_ENCODING)
serialized['query_string'] = data
return serialized
class JSONSerializer(Serializer):
"""
BASE JSON REQUEST all method with json body
"""
TIMESTAMP_FORMAT = 'unixtimestamp'
def serialize_to_request(self, parameters, operation_model):
target = '%s.%s' % (operation_model.metadata['targetPrefix'],
operation_model.name)
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
if 'requestUri' in operation_model.http:
serialized['url_path'] = operation_model.http['requestUri']
serialized['query_string'] = self.MAP_TYPE()
serialized['headers'] = {
'X-Amz-Target': target,
'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Action': operation_model.name,
'X-Version': operation_model.metadata['apiVersion']
}
body = self.MAP_TYPE()
input_shape = operation_model.input_shape
if input_shape is not None:
self._serialize(body, parameters, input_shape)
else:
self._serialize_not_shape(body, parameters)
return self._serialize_data(serialized, body)
def _serialize_not_shape(self, data, parameters):
data.update(parameters)
def _serialize_data(self, serialized, data):
serialized['body'] = json.dumps(data).encode(self.DEFAULT_ENCODING)
return serialized
def _serialize(self, serialized, value, shape, key=None):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, key)
def _serialize_type_structure(self, serialized, value, shape, key):
if key is not None:
# If a key is provided, this is a result of a recursive
# call so we need to add a new child dict as the value
# of the passed in serialized dict. We'll then add
# all the structure members as key/vals in the new serialized
# dictionary we just created.
new_serialized = self.MAP_TYPE()
serialized[key] = new_serialized
serialized = new_serialized
members = shape.members
for member_key, member_value in value.items():
member_shape = members[member_key]
if 'name' in member_shape.serialization:
member_key = member_shape.serialization['name']
self._serialize(serialized, member_value, member_shape, member_key)
def _serialize_type_map(self, serialized, value, shape, key):
map_obj = self.MAP_TYPE()
serialized[key] = map_obj
for sub_key, sub_value in value.items():
self._serialize(map_obj, sub_value, shape.value, sub_key)
def _serialize_type_list(self, serialized, value, shape, key):
list_obj = []
serialized[key] = list_obj
for list_item in value:
wrapper = {}
# The JSON list serialization is the only case where we aren't
# setting a key on a dict. We handle this by using
# a __current__ key on a wrapper dict to serialize each
# list item before appending it to the serialized list.
self._serialize(wrapper, list_item, shape.member, "__current__")
list_obj.append(wrapper["__current__"])
def _default_serialize(self, serialized, value, shape, key):
serialized[key] = value
def _serialize_type_timestamp(self, serialized, value, shape, key):
serialized[key] = self._convert_timestamp_to_str(value)
def _serialize_type_blob(self, serialized, value, shape, key):
serialized[key] = self._get_base64(value)
class NotGetJsonSerializer(JSONSerializer):
def _serialize_data(self, serialized, data):
if serialized['method'].lower() == "get":
serialized['body'] = {}
serialized['query_string'].update(data)
else:
serialized['body'] = json.dumps(data).encode(self.DEFAULT_ENCODING)
return serialized
class BaseRestSerializer(Serializer):
"""Base class for rest protocols.
The only variance between the various rest protocols is the
way that the body is serialized. All other aspects (headers, uri, etc.)
are the same and logic for serializing those aspects lives here.
Subclasses must implement the ``_serialize_body_params`` method.
"""
# This is a list of known values for the "location" key in the
# serialization dict. The location key tells us where on the request
# to put the serialized value.
KNOWN_LOCATIONS = ['uri', 'querystring', 'header', 'headers']
def serialize_to_request(self, parameters, operation_model):
serialized = self._create_default_request()
serialized['headers'] = {
'X-Action': operation_model.name,
'X-Version': operation_model.metadata['apiVersion']
}
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
shape = operation_model.input_shape
if shape is None:
serialized['url_path'] = operation_model.http['requestUri']
return serialized
shape_members = shape.members
# While the ``serialized`` key holds the final serialized request
# data, we need interim dicts for the various locations of the
# request. We need this for the uri_path_kwargs and the
# query_string_kwargs because they are templated, so we need
# to gather all the needed data for the string template,
# then we render the template. The body_kwargs is needed
# because once we've collected them all, we run them through
# _serialize_body_params, which for rest-json, creates JSON,
# and for rest-xml, will create XML. This is what the
# ``partitioned`` dict below is for.
partitioned = {
'uri_path_kwargs': self.MAP_TYPE(),
'query_string_kwargs': self.MAP_TYPE(),
'body_kwargs': self.MAP_TYPE(),
'headers': self.MAP_TYPE(),
}
for param_name, param_value in parameters.items():
if param_value is None:
# Don't serialize any parameter with a None value.
continue
self._partition_parameters(partitioned, param_name, param_value,
shape_members)
serialized['url_path'] = self._render_uri_template(
operation_model.http['requestUri'],
partitioned['uri_path_kwargs'])
# Note that we lean on the http implementation to handle the case
# where the requestUri path already has query parameters.
# The bundled http client, requests, already supports this.
serialized['query_string'] = partitioned['query_string_kwargs']
if partitioned['headers']:
serialized['headers'] = partitioned['headers']
self._serialize_payload(partitioned, parameters,
serialized, shape, shape_members)
return serialized
def _render_uri_template(self, uri_template, params):
# We need to handle two cases::
#
# /{Bucket}/foo
# /{Key+}/bar
# A label ending with '+' is greedy. There can only
# be one greedy key.
encoded_params = {}
for template_param in re.findall(r'{(.*?)}', uri_template):
if template_param.endswith('+'):
encoded_params[template_param] = percent_encode(
params[template_param[:-1]], safe='/~')
else:
encoded_params[template_param] = percent_encode(
params[template_param])
return uri_template.format(**encoded_params)
def _serialize_payload(self, partitioned, parameters,
serialized, shape, shape_members):
# partitioned - The user input params partitioned by location.
# parameters - The user input params.
# serialized - The final serialized request dict.
# shape - Describes the expected input shape
# shape_members - The members of the input struct shape
payload_member = shape.serialization.get('payload')
if payload_member is not None and \
shape_members[payload_member].type_name in ['blob', 'string']:
# If it's streaming, then the body is just the
# value of the payload.
body_payload = parameters.get(payload_member, b'')
body_payload = self._encode_payload(body_payload)
serialized['body'] = body_payload
elif payload_member is not None:
# If there's a payload member, we serialized that
# member to they body.
body_params = parameters.get(payload_member)
if body_params is not None:
serialized['body'] = self._serialize_body_params(
body_params,
shape_members[payload_member])
elif partitioned['body_kwargs']:
serialized['body'] = self._serialize_body_params(
partitioned['body_kwargs'], shape)
def _encode_payload(self, body):
if isinstance(body, six.text_type):
return body.encode(self.DEFAULT_ENCODING)
return body
def _partition_parameters(self, partitioned, param_name,
param_value, shape_members):
# This takes the user provided input parameter (``param``)
# and figures out where they go in the request dict.
# Some params are HTTP headers, some are used in the URI, some
# are in the request body. This method deals with this.
member = shape_members[param_name]
location = member.serialization.get('location')
key_name = member.serialization.get('name', param_name)
if location == 'uri':
partitioned['uri_path_kwargs'][key_name] = param_value
elif location == 'querystring':
if isinstance(param_value, dict):
partitioned['query_string_kwargs'].update(param_value)
else:
partitioned['query_string_kwargs'][key_name] = param_value
elif location == 'header':
shape = shape_members[param_name]
value = self._convert_header_value(shape, param_value)
partitioned['headers'][key_name] = str(value)
elif location == 'headers':
# 'headers' is a bit of an oddball. The ``key_name``
# is actually really a prefix for the header names:
header_prefix = key_name
# The value provided by the user is a dict so we'll be
# creating multiple header key/val pairs. The key
# name to use for each header is the header_prefix (``key_name``)
# plus the key provided by the user.
self._do_serialize_header_map(header_prefix,
partitioned['headers'],
param_value)
else:
partitioned['body_kwargs'][param_name] = param_value
def _do_serialize_header_map(self, header_prefix, headers, user_input):
for key, val in user_input.items():
full_key = header_prefix + key
headers[full_key] = val
def _serialize_body_params(self, params, shape):
raise NotImplementedError('_serialize_body_params')
def _convert_header_value(self, shape, value):
if shape.type_name == 'timestamp':
datetime_obj = parse_to_aware_datetime(value)
timestamp = calendar.timegm(datetime_obj.utctimetuple())
return self._timestamp_rfc822(timestamp)
else:
return value
class RestJSONSerializer(BaseRestSerializer, JSONSerializer):
def _serialize_body_params(self, params, shape):
serialized_body = self.MAP_TYPE()
self._serialize(serialized_body, params, shape)
return json.dumps(serialized_body).encode(self.DEFAULT_ENCODING)
class RestXMLSerializer(BaseRestSerializer):
TIMESTAMP_FORMAT = 'iso8601'
def _serialize_body_params(self, params, shape):
root_name = shape.serialization['name']
pseudo_root = ElementTree.Element('')
self._serialize(shape, params, pseudo_root, root_name)
real_root = list(pseudo_root)[0]
return ElementTree.tostring(real_root, encoding=self.DEFAULT_ENCODING)
def _serialize(self, shape, params, xmlnode, name):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(xmlnode, params, shape, name)
def _serialize_type_structure(self, xmlnode, params, shape, name):
structure_node = ElementTree.SubElement(xmlnode, name)
if 'xmlNamespace' in shape.serialization:
namespace_metadata = shape.serialization['xmlNamespace']
attribute_name = 'xmlns'
if namespace_metadata.get('prefix'):
attribute_name += ':%s' % namespace_metadata['prefix']
structure_node.attrib[attribute_name] = namespace_metadata['uri']
for key, value in params.items():
member_shape = shape.members[key]
member_name = member_shape.serialization.get('name', key)
# We need to special case member shapes that are marked as an
# xmlAttribute. Rather than serializing into an XML child node,
# we instead serialize the shape to an XML attribute of the
# *current* node.
if value is None:
# Don't serialize any param whose value is None.
return
if member_shape.serialization.get('xmlAttribute'):
# xmlAttributes must have a serialization name.
xml_attribute_name = member_shape.serialization['name']
structure_node.attrib[xml_attribute_name] = value
continue
self._serialize(member_shape, value, structure_node, member_name)
def _serialize_type_list(self, xmlnode, params, shape, name):
member_shape = shape.member
if shape.serialization.get('flattened'):
element_name = name
list_node = xmlnode
else:
element_name = member_shape.serialization.get('name', 'member')
list_node = ElementTree.SubElement(xmlnode, name)
for item in params:
self._serialize(member_shape, item, list_node, element_name)
def _serialize_type_map(self, xmlnode, params, shape, name):
# Given the ``name`` of MyMap, and input of {"key1": "val1"}
# we serialize this as:
# <MyMap>
# <entry>
# <key>key1</key>
# <value>val1</value>
# </entry>
# </MyMap>
node = ElementTree.SubElement(xmlnode, name)
# TODO: handle flattened maps.
for key, value in params.items():
entry_node = ElementTree.SubElement(node, 'entry')
key_name = self._get_serialized_name(shape.key, default_name='key')
val_name = self._get_serialized_name(shape.value,
default_name='value')
self._serialize(shape.key, key, entry_node, key_name)
self._serialize(shape.value, value, entry_node, val_name)
def _serialize_type_boolean(self, xmlnode, params, shape, name):
# For scalar types, the 'params' attr is actually just a scalar
# value representing the data we need to serialize as a boolean.
# It will either be 'true' or 'false'
node = ElementTree.SubElement(xmlnode, name)
if params:
str_value = 'true'
else:
str_value = 'false'
node.text = str_value
def _serialize_type_blob(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._get_base64(params)
def _serialize_type_timestamp(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._convert_timestamp_to_str(params)
def _default_serialize(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = str(params)
SERIALIZERS = {
'kcs': KCSSerializer,
'ec2': EC2Serializer,
'query': QuerySerializer,
'query-json': QueryAcceptJsonSerializer,
'json': JSONSerializer,
'json2': NotGetJsonSerializer,
'rest-json': RestJSONSerializer,
'rest-xml': RestXMLSerializer,
'custom-body': CustomBodySerializer,
}
| 40.476684
| 79
| 0.634633
|
import re
import base64
from xml.etree import ElementTree
import calendar
from kscore.compat import six
from kscore.compat import json, formatdate
from kscore.utils import parse_to_aware_datetime
from kscore.utils import percent_encode
from kscore import validate
DEFAULT_TIMESTAMP_FORMAT = 'iso8601'
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
ISO8601_MICRO = '%Y-%m-%dT%H:%M:%S.%fZ'
def create_serializer(protocol_name, include_validation=True):
serializer = SERIALIZERS[protocol_name]()
if include_validation:
validator = validate.ParamValidator()
serializer = validate.ParamValidationDecorator(validator, serializer)
return serializer
class Serializer(object):
DEFAULT_METHOD = 'POST'
MAP_TYPE = dict
DEFAULT_ENCODING = 'utf-8'
def serialize_to_request(self, parameters, operation_model):
raise NotImplementedError("serialize_to_request")
def _create_default_request(self):
serialized = {
'url_path': '/',
'query_string': '',
'method': self.DEFAULT_METHOD,
'headers': self.headers,
'body': b''
}
return serialized
def _serialize_not_shape(self, data, parameters):
pass
def _serialize_data(self, serialized, data):
serialized['body'] = data
return serialized
@property
def headers(self):
return {}
def _timestamp_iso8601(self, value):
if value.microsecond > 0:
timestamp_format = ISO8601_MICRO
else:
timestamp_format = ISO8601
return value.strftime(timestamp_format)
def _timestamp_unixtimestamp(self, value):
return int(calendar.timegm(value.timetuple()))
def _timestamp_rfc822(self, value):
return formatdate(value, usegmt=True)
def _convert_timestamp_to_str(self, value):
datetime_obj = parse_to_aware_datetime(value)
converter = getattr(
self, '_timestamp_%s' % self.TIMESTAMP_FORMAT.lower())
final_value = converter(datetime_obj)
return final_value
def _get_serialized_name(self, shape, default_name):
return shape.serialization.get('name', default_name)
def _get_base64(self, value):
if isinstance(value, six.text_type):
value = value.encode(self.DEFAULT_ENCODING)
return base64.b64encode(value).strip().decode(
self.DEFAULT_ENCODING)
class QuerySerializer(Serializer):
TIMESTAMP_FORMAT = 'iso8601'
def serialize_to_request(self, parameters, operation_model):
shape = operation_model.input_shape
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
serialized['headers'].update(
{
'X-Action': operation_model.name,
'X-Version': operation_model.metadata['apiVersion'],
}
)
if 'requestUri' in operation_model.http:
serialized['url_path'] = operation_model.http['requestUri']
body_params = self.MAP_TYPE()
body_params['Action'] = operation_model.name
body_params['Version'] = operation_model.metadata['apiVersion']
if shape is not None:
self._serialize(body_params, parameters, shape)
else:
self._serialize_not_shape(body_params, parameters)
return self._serialize_data(serialized, body_params)
def _serialize_not_shape(self, data, parameters):
pass
def _serialize_data(self, serialized, data):
serialized['body'] = data
return serialized
def _serialize(self, serialized, value, shape, prefix=''):
# serialized: The dict that is incrementally added to with the
# final serialized parameters.
# value: The current user input value.
# shape: The shape object that describes the structure of the
# input.
# prefix: The incrementally built up prefix for the serialized
# key (i.e Foo.bar.members.1).
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, prefix=prefix)
def _serialize_type_structure(self, serialized, value, shape, prefix=''):
members = shape.members
for key, value in value.items():
member_shape = members[key]
member_prefix = self._get_serialized_name(member_shape, key)
if prefix:
member_prefix = '%s.%s' % (prefix, member_prefix)
self._serialize(serialized, value, member_shape, member_prefix)
def _serialize_type_list(self, serialized, value, shape, prefix=''):
if not value:
# The query protocol serializes empty lists.
serialized[prefix] = ''
return
if self._is_shape_flattened(shape):
list_prefix = prefix
if shape.member.serialization.get('name'):
name = self._get_serialized_name(shape.member, default_name='')
# Replace '.Original' with '.{name}'.
list_prefix = '.'.join(prefix.split('.')[:-1] + [name])
else:
list_name = shape.member.serialization.get('name', 'member')
list_prefix = '%s.%s' % (prefix, list_name)
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (list_prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
def _serialize_type_map(self, serialized, value, shape, prefix=''):
if self._is_shape_flattened(shape):
full_prefix = prefix
else:
full_prefix = '%s.entry' % prefix
template = full_prefix + '.{i}.{suffix}'
key_shape = shape.key
value_shape = shape.value
key_suffix = self._get_serialized_name(key_shape, default_name='key')
value_suffix = self._get_serialized_name(value_shape, 'value')
for i, key in enumerate(value, 1):
key_prefix = template.format(i=i, suffix=key_suffix)
value_prefix = template.format(i=i, suffix=value_suffix)
self._serialize(serialized, key, key_shape, key_prefix)
self._serialize(serialized, value[key], value_shape, value_prefix)
def _serialize_type_blob(self, serialized, value, shape, prefix=''):
# Blob args must be base64 encoded.
serialized[prefix] = self._get_base64(value)
def _serialize_type_timestamp(self, serialized, value, shape, prefix=''):
serialized[prefix] = self._convert_timestamp_to_str(value)
def _serialize_type_boolean(self, serialized, value, shape, prefix=''):
if value:
serialized[prefix] = 'true'
else:
serialized[prefix] = 'false'
def _default_serialize(self, serialized, value, shape, prefix=''):
serialized[prefix] = value
def _is_shape_flattened(self, shape):
return shape.serialization.get('flattened')
class EC2Serializer(QuerySerializer):
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
if 'queryName' in shape.serialization:
return shape.serialization['queryName']
elif 'name' in shape.serialization:
# A locationName is always capitalized
# on input for the ec2 protocol.
name = shape.serialization['name']
return name[0].upper() + name[1:]
else:
return default_name
def _serialize_type_list(self, serialized, value, shape, prefix=''):
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
class QueryAcceptJsonSerializer(QuerySerializer):
@property
def headers(self):
return {"Accept": 'application/json'}
def _serialize_not_shape(self, data, parameters):
data.update(parameters)
def _serialize_data(self, serialized, data):
if serialized['method'].lower() == "get":
serialized['body'] = {}
serialized['query_string'] = data
else:
serialized['body'] = data
return serialized
class KCSSerializer(QueryAcceptJsonSerializer):
def _serialize_data(self, serialized, data):
serialized['body'] = {}
serialized['query_string'] = data
return serialized
class CustomBodySerializer(QueryAcceptJsonSerializer):
def serialize_to_request(self, parameters, operation_model):
shape = operation_model.input_shape
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
# The query serializer only deals with body params so
# that's what we hand off the _serialize_* methods.
serialized['headers'].update(
{
'X-Action': operation_model.name,
'X-Version': operation_model.metadata['apiVersion'],
}
)
if 'requestUri' in operation_model.http:
serialized['url_path'] = operation_model.http['requestUri']
body_params = self.MAP_TYPE()
custom_body = None
if 'Body' in parameters:
custom_body = parameters.pop('Body')
if shape is not None:
self._serialize(body_params, parameters, shape)
else:
self._serialize_not_shape(body_params, parameters)
return self._serialize_data(serialized, body_params, custom_body)
def _serialize_data(self, serialized, data, body=None):
if body is not None:
serialized['body'] = json.dumps(body).encode(self.DEFAULT_ENCODING)
serialized['query_string'] = data
return serialized
class JSONSerializer(Serializer):
TIMESTAMP_FORMAT = 'unixtimestamp'
def serialize_to_request(self, parameters, operation_model):
target = '%s.%s' % (operation_model.metadata['targetPrefix'],
operation_model.name)
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
if 'requestUri' in operation_model.http:
serialized['url_path'] = operation_model.http['requestUri']
serialized['query_string'] = self.MAP_TYPE()
serialized['headers'] = {
'X-Amz-Target': target,
'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Action': operation_model.name,
'X-Version': operation_model.metadata['apiVersion']
}
body = self.MAP_TYPE()
input_shape = operation_model.input_shape
if input_shape is not None:
self._serialize(body, parameters, input_shape)
else:
self._serialize_not_shape(body, parameters)
return self._serialize_data(serialized, body)
def _serialize_not_shape(self, data, parameters):
data.update(parameters)
def _serialize_data(self, serialized, data):
serialized['body'] = json.dumps(data).encode(self.DEFAULT_ENCODING)
return serialized
def _serialize(self, serialized, value, shape, key=None):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, key)
def _serialize_type_structure(self, serialized, value, shape, key):
if key is not None:
# all the structure members as key/vals in the new serialized
# dictionary we just created.
new_serialized = self.MAP_TYPE()
serialized[key] = new_serialized
serialized = new_serialized
members = shape.members
for member_key, member_value in value.items():
member_shape = members[member_key]
if 'name' in member_shape.serialization:
member_key = member_shape.serialization['name']
self._serialize(serialized, member_value, member_shape, member_key)
def _serialize_type_map(self, serialized, value, shape, key):
map_obj = self.MAP_TYPE()
serialized[key] = map_obj
for sub_key, sub_value in value.items():
self._serialize(map_obj, sub_value, shape.value, sub_key)
def _serialize_type_list(self, serialized, value, shape, key):
list_obj = []
serialized[key] = list_obj
for list_item in value:
wrapper = {}
# The JSON list serialization is the only case where we aren't
self._serialize(wrapper, list_item, shape.member, "__current__")
list_obj.append(wrapper["__current__"])
def _default_serialize(self, serialized, value, shape, key):
serialized[key] = value
def _serialize_type_timestamp(self, serialized, value, shape, key):
serialized[key] = self._convert_timestamp_to_str(value)
def _serialize_type_blob(self, serialized, value, shape, key):
serialized[key] = self._get_base64(value)
class NotGetJsonSerializer(JSONSerializer):
def _serialize_data(self, serialized, data):
if serialized['method'].lower() == "get":
serialized['body'] = {}
serialized['query_string'].update(data)
else:
serialized['body'] = json.dumps(data).encode(self.DEFAULT_ENCODING)
return serialized
class BaseRestSerializer(Serializer):
KNOWN_LOCATIONS = ['uri', 'querystring', 'header', 'headers']
def serialize_to_request(self, parameters, operation_model):
serialized = self._create_default_request()
serialized['headers'] = {
'X-Action': operation_model.name,
'X-Version': operation_model.metadata['apiVersion']
}
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
shape = operation_model.input_shape
if shape is None:
serialized['url_path'] = operation_model.http['requestUri']
return serialized
shape_members = shape.members
# _serialize_body_params, which for rest-json, creates JSON,
# and for rest-xml, will create XML. This is what the
# ``partitioned`` dict below is for.
partitioned = {
'uri_path_kwargs': self.MAP_TYPE(),
'query_string_kwargs': self.MAP_TYPE(),
'body_kwargs': self.MAP_TYPE(),
'headers': self.MAP_TYPE(),
}
for param_name, param_value in parameters.items():
if param_value is None:
# Don't serialize any parameter with a None value.
continue
self._partition_parameters(partitioned, param_name, param_value,
shape_members)
serialized['url_path'] = self._render_uri_template(
operation_model.http['requestUri'],
partitioned['uri_path_kwargs'])
serialized['query_string'] = partitioned['query_string_kwargs']
if partitioned['headers']:
serialized['headers'] = partitioned['headers']
self._serialize_payload(partitioned, parameters,
serialized, shape, shape_members)
return serialized
def _render_uri_template(self, uri_template, params):
encoded_params = {}
for template_param in re.findall(r'{(.*?)}', uri_template):
if template_param.endswith('+'):
encoded_params[template_param] = percent_encode(
params[template_param[:-1]], safe='/~')
else:
encoded_params[template_param] = percent_encode(
params[template_param])
return uri_template.format(**encoded_params)
def _serialize_payload(self, partitioned, parameters,
serialized, shape, shape_members):
payload_member = shape.serialization.get('payload')
if payload_member is not None and \
shape_members[payload_member].type_name in ['blob', 'string']:
# value of the payload.
body_payload = parameters.get(payload_member, b'')
body_payload = self._encode_payload(body_payload)
serialized['body'] = body_payload
elif payload_member is not None:
# If there's a payload member, we serialized that
body_params = parameters.get(payload_member)
if body_params is not None:
serialized['body'] = self._serialize_body_params(
body_params,
shape_members[payload_member])
elif partitioned['body_kwargs']:
serialized['body'] = self._serialize_body_params(
partitioned['body_kwargs'], shape)
def _encode_payload(self, body):
if isinstance(body, six.text_type):
return body.encode(self.DEFAULT_ENCODING)
return body
def _partition_parameters(self, partitioned, param_name,
param_value, shape_members):
member = shape_members[param_name]
location = member.serialization.get('location')
key_name = member.serialization.get('name', param_name)
if location == 'uri':
partitioned['uri_path_kwargs'][key_name] = param_value
elif location == 'querystring':
if isinstance(param_value, dict):
partitioned['query_string_kwargs'].update(param_value)
else:
partitioned['query_string_kwargs'][key_name] = param_value
elif location == 'header':
shape = shape_members[param_name]
value = self._convert_header_value(shape, param_value)
partitioned['headers'][key_name] = str(value)
elif location == 'headers':
header_prefix = key_name
# creating multiple header key/val pairs. The key
# name to use for each header is the header_prefix (``key_name``)
# plus the key provided by the user.
self._do_serialize_header_map(header_prefix,
partitioned['headers'],
param_value)
else:
partitioned['body_kwargs'][param_name] = param_value
def _do_serialize_header_map(self, header_prefix, headers, user_input):
for key, val in user_input.items():
full_key = header_prefix + key
headers[full_key] = val
def _serialize_body_params(self, params, shape):
raise NotImplementedError('_serialize_body_params')
def _convert_header_value(self, shape, value):
if shape.type_name == 'timestamp':
datetime_obj = parse_to_aware_datetime(value)
timestamp = calendar.timegm(datetime_obj.utctimetuple())
return self._timestamp_rfc822(timestamp)
else:
return value
class RestJSONSerializer(BaseRestSerializer, JSONSerializer):
def _serialize_body_params(self, params, shape):
serialized_body = self.MAP_TYPE()
self._serialize(serialized_body, params, shape)
return json.dumps(serialized_body).encode(self.DEFAULT_ENCODING)
class RestXMLSerializer(BaseRestSerializer):
TIMESTAMP_FORMAT = 'iso8601'
def _serialize_body_params(self, params, shape):
root_name = shape.serialization['name']
pseudo_root = ElementTree.Element('')
self._serialize(shape, params, pseudo_root, root_name)
real_root = list(pseudo_root)[0]
return ElementTree.tostring(real_root, encoding=self.DEFAULT_ENCODING)
def _serialize(self, shape, params, xmlnode, name):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(xmlnode, params, shape, name)
def _serialize_type_structure(self, xmlnode, params, shape, name):
structure_node = ElementTree.SubElement(xmlnode, name)
if 'xmlNamespace' in shape.serialization:
namespace_metadata = shape.serialization['xmlNamespace']
attribute_name = 'xmlns'
if namespace_metadata.get('prefix'):
attribute_name += ':%s' % namespace_metadata['prefix']
structure_node.attrib[attribute_name] = namespace_metadata['uri']
for key, value in params.items():
member_shape = shape.members[key]
member_name = member_shape.serialization.get('name', key)
# We need to special case member shapes that are marked as an
# xmlAttribute. Rather than serializing into an XML child node,
# we instead serialize the shape to an XML attribute of the
# *current* node.
if value is None:
# Don't serialize any param whose value is None.
return
if member_shape.serialization.get('xmlAttribute'):
xml_attribute_name = member_shape.serialization['name']
structure_node.attrib[xml_attribute_name] = value
continue
self._serialize(member_shape, value, structure_node, member_name)
def _serialize_type_list(self, xmlnode, params, shape, name):
member_shape = shape.member
if shape.serialization.get('flattened'):
element_name = name
list_node = xmlnode
else:
element_name = member_shape.serialization.get('name', 'member')
list_node = ElementTree.SubElement(xmlnode, name)
for item in params:
self._serialize(member_shape, item, list_node, element_name)
def _serialize_type_map(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
for key, value in params.items():
entry_node = ElementTree.SubElement(node, 'entry')
key_name = self._get_serialized_name(shape.key, default_name='key')
val_name = self._get_serialized_name(shape.value,
default_name='value')
self._serialize(shape.key, key, entry_node, key_name)
self._serialize(shape.value, value, entry_node, val_name)
def _serialize_type_boolean(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
if params:
str_value = 'true'
else:
str_value = 'false'
node.text = str_value
def _serialize_type_blob(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._get_base64(params)
def _serialize_type_timestamp(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._convert_timestamp_to_str(params)
def _default_serialize(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = str(params)
SERIALIZERS = {
'kcs': KCSSerializer,
'ec2': EC2Serializer,
'query': QuerySerializer,
'query-json': QueryAcceptJsonSerializer,
'json': JSONSerializer,
'json2': NotGetJsonSerializer,
'rest-json': RestJSONSerializer,
'rest-xml': RestXMLSerializer,
'custom-body': CustomBodySerializer,
}
| true
| true
|
f708cecc49ed56d9057dcff2713d8f85cfda72a4
| 9,432
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/aio/_compute_management_client_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/aio/_compute_management_client_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/aio/_compute_management_client_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration_async import ComputeManagementClientConfiguration
from .operations_async import Operations
from .operations_async import AvailabilitySetsOperations
from .operations_async import ProximityPlacementGroupsOperations
from .operations_async import VirtualMachineExtensionImagesOperations
from .operations_async import VirtualMachineExtensionsOperations
from .operations_async import VirtualMachineImagesOperations
from .operations_async import UsageOperations
from .operations_async import VirtualMachinesOperations
from .operations_async import VirtualMachineSizesOperations
from .operations_async import ImagesOperations
from .operations_async import VirtualMachineScaleSetsOperations
from .operations_async import VirtualMachineScaleSetExtensionsOperations
from .operations_async import VirtualMachineScaleSetRollingUpgradesOperations
from .operations_async import VirtualMachineScaleSetVMsOperations
from .operations_async import LogAnalyticsOperations
from .operations_async import VirtualMachineRunCommandsOperations
from .operations_async import DisksOperations
from .operations_async import SnapshotsOperations
from .. import models
class ComputeManagementClient(object):
"""Compute Client.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.compute.v2018_04_01.aio.operations_async.Operations
:ivar availability_sets: AvailabilitySetsOperations operations
:vartype availability_sets: azure.mgmt.compute.v2018_04_01.aio.operations_async.AvailabilitySetsOperations
:ivar proximity_placement_groups: ProximityPlacementGroupsOperations operations
:vartype proximity_placement_groups: azure.mgmt.compute.v2018_04_01.aio.operations_async.ProximityPlacementGroupsOperations
:ivar virtual_machine_extension_images: VirtualMachineExtensionImagesOperations operations
:vartype virtual_machine_extension_images: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineExtensionImagesOperations
:ivar virtual_machine_extensions: VirtualMachineExtensionsOperations operations
:vartype virtual_machine_extensions: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineExtensionsOperations
:ivar virtual_machine_images: VirtualMachineImagesOperations operations
:vartype virtual_machine_images: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineImagesOperations
:ivar usage: UsageOperations operations
:vartype usage: azure.mgmt.compute.v2018_04_01.aio.operations_async.UsageOperations
:ivar virtual_machines: VirtualMachinesOperations operations
:vartype virtual_machines: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachinesOperations
:ivar virtual_machine_sizes: VirtualMachineSizesOperations operations
:vartype virtual_machine_sizes: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineSizesOperations
:ivar images: ImagesOperations operations
:vartype images: azure.mgmt.compute.v2018_04_01.aio.operations_async.ImagesOperations
:ivar virtual_machine_scale_sets: VirtualMachineScaleSetsOperations operations
:vartype virtual_machine_scale_sets: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineScaleSetsOperations
:ivar virtual_machine_scale_set_extensions: VirtualMachineScaleSetExtensionsOperations operations
:vartype virtual_machine_scale_set_extensions: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineScaleSetExtensionsOperations
:ivar virtual_machine_scale_set_rolling_upgrades: VirtualMachineScaleSetRollingUpgradesOperations operations
:vartype virtual_machine_scale_set_rolling_upgrades: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineScaleSetRollingUpgradesOperations
:ivar virtual_machine_scale_set_vms: VirtualMachineScaleSetVMsOperations operations
:vartype virtual_machine_scale_set_vms: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineScaleSetVMsOperations
:ivar log_analytics: LogAnalyticsOperations operations
:vartype log_analytics: azure.mgmt.compute.v2018_04_01.aio.operations_async.LogAnalyticsOperations
:ivar virtual_machine_run_commands: VirtualMachineRunCommandsOperations operations
:vartype virtual_machine_run_commands: azure.mgmt.compute.v2018_04_01.aio.operations_async.VirtualMachineRunCommandsOperations
:ivar disks: DisksOperations operations
:vartype disks: azure.mgmt.compute.v2018_04_01.aio.operations_async.DisksOperations
:ivar snapshots: SnapshotsOperations operations
:vartype snapshots: azure.mgmt.compute.v2018_04_01.aio.operations_async.SnapshotsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ComputeManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.availability_sets = AvailabilitySetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.proximity_placement_groups = ProximityPlacementGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_extension_images = VirtualMachineExtensionImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_extensions = VirtualMachineExtensionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_images = VirtualMachineImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usage = UsageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machines = VirtualMachinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_sizes = VirtualMachineSizesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.images = ImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_sets = VirtualMachineScaleSetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_extensions = VirtualMachineScaleSetExtensionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_rolling_upgrades = VirtualMachineScaleSetRollingUpgradesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_vms = VirtualMachineScaleSetVMsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.log_analytics = LogAnalyticsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_run_commands = VirtualMachineRunCommandsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.disks = DisksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.snapshots = SnapshotsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ComputeManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 63.302013
| 172
| 0.791031
|
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration_async import ComputeManagementClientConfiguration
from .operations_async import Operations
from .operations_async import AvailabilitySetsOperations
from .operations_async import ProximityPlacementGroupsOperations
from .operations_async import VirtualMachineExtensionImagesOperations
from .operations_async import VirtualMachineExtensionsOperations
from .operations_async import VirtualMachineImagesOperations
from .operations_async import UsageOperations
from .operations_async import VirtualMachinesOperations
from .operations_async import VirtualMachineSizesOperations
from .operations_async import ImagesOperations
from .operations_async import VirtualMachineScaleSetsOperations
from .operations_async import VirtualMachineScaleSetExtensionsOperations
from .operations_async import VirtualMachineScaleSetRollingUpgradesOperations
from .operations_async import VirtualMachineScaleSetVMsOperations
from .operations_async import LogAnalyticsOperations
from .operations_async import VirtualMachineRunCommandsOperations
from .operations_async import DisksOperations
from .operations_async import SnapshotsOperations
from .. import models
class ComputeManagementClient(object):
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ComputeManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.availability_sets = AvailabilitySetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.proximity_placement_groups = ProximityPlacementGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_extension_images = VirtualMachineExtensionImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_extensions = VirtualMachineExtensionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_images = VirtualMachineImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usage = UsageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machines = VirtualMachinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_sizes = VirtualMachineSizesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.images = ImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_sets = VirtualMachineScaleSetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_extensions = VirtualMachineScaleSetExtensionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_rolling_upgrades = VirtualMachineScaleSetRollingUpgradesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_vms = VirtualMachineScaleSetVMsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.log_analytics = LogAnalyticsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_run_commands = VirtualMachineRunCommandsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.disks = DisksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.snapshots = SnapshotsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ComputeManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| true
| true
|
f708ceee9ccbcb860ce2b2f569a32b4453f38050
| 2,392
|
py
|
Python
|
custom/penn_state/models.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
custom/penn_state/models.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T01:03:25.000Z
|
2022-03-12T01:03:25.000Z
|
custom/penn_state/models.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from dimagi.ext.couchdbkit import (Document, StringProperty,
ListProperty, DictProperty, DateProperty)
from corehq.apps.groups.models import Group
from .constants import *
class LegacyWeeklyReport(Document):
"""
This doc stores the aggregate weekly results per site.
Example:
domain: 'mikesproject',
site: 'Pennsylvania State Elementary School',
week_end_date: Saturday Sept 28, 2013,
site_strategy: [3, -1, 0, 4, 2],
site_game: [2, 4, 3, 1, 0],
individual: {
'mikeo': {
'strategy': [2, 4, 0, 1, 3],
'game': [1, 2, 4, 1, 0],
'weekly_totals': [
['Sept 9', 3],
['Sept 16', 2],
['Sept 23', 5], # current week
],
},
},
'weekly_totals': [
['Sept 9', 11],
['Sept 16', 6],
['Sept 23', 9], # current week
],
Where each week is a 5 element list. 0 indicates that
no strategies/games were recorded, -1 indicates an off
day (nothing recorded, but that's okay).
"""
domain = StringProperty()
site = StringProperty()
week_end_date = DateProperty()
site_strategy = ListProperty()
site_game = ListProperty()
individual = DictProperty()
weekly_totals = ListProperty()
@classmethod
def by_site(cls, site, date=None):
if isinstance(site, Group):
site = site.name
if date is None:
# get the most recent saturday (isoweekday==6)
days = [6, 7, 1, 2, 3, 4, 5]
today = datetime.date.today()
date = today - datetime.timedelta(
days=days.index(today.isoweekday())
)
report = cls.view(
'penn_state/smiley_weekly_reports',
key=[DOMAIN, site, str(date)],
reduce=False,
include_docs=True,
).first()
return report
@classmethod
def by_user(cls, user, date=None):
# Users should only have one group, and it should be a report group
groups = Group.by_user(user).all()
# if len(groups) != 1 or not groups[0].reporting:
if len(groups) == 0 or not groups[0].reporting:
return
site = groups[0].name
return cls.by_site(site, date)
| 30.278481
| 75
| 0.539716
|
import datetime
from dimagi.ext.couchdbkit import (Document, StringProperty,
ListProperty, DictProperty, DateProperty)
from corehq.apps.groups.models import Group
from .constants import *
class LegacyWeeklyReport(Document):
domain = StringProperty()
site = StringProperty()
week_end_date = DateProperty()
site_strategy = ListProperty()
site_game = ListProperty()
individual = DictProperty()
weekly_totals = ListProperty()
@classmethod
def by_site(cls, site, date=None):
if isinstance(site, Group):
site = site.name
if date is None:
days = [6, 7, 1, 2, 3, 4, 5]
today = datetime.date.today()
date = today - datetime.timedelta(
days=days.index(today.isoweekday())
)
report = cls.view(
'penn_state/smiley_weekly_reports',
key=[DOMAIN, site, str(date)],
reduce=False,
include_docs=True,
).first()
return report
@classmethod
def by_user(cls, user, date=None):
groups = Group.by_user(user).all()
if len(groups) == 0 or not groups[0].reporting:
return
site = groups[0].name
return cls.by_site(site, date)
| true
| true
|
f708cef5f660495ed8e57399503b12749a716007
| 121,273
|
py
|
Python
|
tests/unit/gapic/gaming_v1beta/test_game_server_clusters_service.py
|
LaudateCorpus1/python-game-servers
|
9e22e6dd4e2543d694e33eb1ec2c4f9a05d8b940
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/gaming_v1beta/test_game_server_clusters_service.py
|
LaudateCorpus1/python-game-servers
|
9e22e6dd4e2543d694e33eb1ec2c4f9a05d8b940
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/gaming_v1beta/test_game_server_clusters_service.py
|
LaudateCorpus1/python-game-servers
|
9e22e6dd4e2543d694e33eb1ec2c4f9a05d8b940
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.gaming_v1beta.services.game_server_clusters_service import (
GameServerClustersServiceAsyncClient,
)
from google.cloud.gaming_v1beta.services.game_server_clusters_service import (
GameServerClustersServiceClient,
)
from google.cloud.gaming_v1beta.services.game_server_clusters_service import pagers
from google.cloud.gaming_v1beta.services.game_server_clusters_service import transports
from google.cloud.gaming_v1beta.types import common
from google.cloud.gaming_v1beta.types import game_server_clusters
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert GameServerClustersServiceClient._get_default_mtls_endpoint(None) is None
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[GameServerClustersServiceClient, GameServerClustersServiceAsyncClient,],
)
def test_game_server_clusters_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "gameservices.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.GameServerClustersServiceGrpcTransport, "grpc"),
(transports.GameServerClustersServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_game_server_clusters_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[GameServerClustersServiceClient, GameServerClustersServiceAsyncClient,],
)
def test_game_server_clusters_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "gameservices.googleapis.com:443"
def test_game_server_clusters_service_client_get_transport_class():
transport = GameServerClustersServiceClient.get_transport_class()
available_transports = [
transports.GameServerClustersServiceGrpcTransport,
]
assert transport in available_transports
transport = GameServerClustersServiceClient.get_transport_class("grpc")
assert transport == transports.GameServerClustersServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
GameServerClustersServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceClient),
)
@mock.patch.object(
GameServerClustersServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceAsyncClient),
)
def test_game_server_clusters_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
GameServerClustersServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
GameServerClustersServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
"true",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
"false",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
GameServerClustersServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceClient),
)
@mock.patch.object(
GameServerClustersServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_game_server_clusters_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class",
[GameServerClustersServiceClient, GameServerClustersServiceAsyncClient],
)
@mock.patch.object(
GameServerClustersServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceClient),
)
@mock.patch.object(
GameServerClustersServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceAsyncClient),
)
def test_game_server_clusters_service_client_get_mtls_endpoint_and_cert_source(
client_class,
):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_game_server_clusters_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_game_server_clusters_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_game_server_clusters_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = GameServerClustersServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.ListGameServerClustersRequest, dict,]
)
def test_list_game_server_clusters(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.ListGameServerClustersResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_game_server_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.ListGameServerClustersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListGameServerClustersPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_game_server_clusters_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
client.list_game_server_clusters()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.ListGameServerClustersRequest()
@pytest.mark.asyncio
async def test_list_game_server_clusters_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.ListGameServerClustersRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.ListGameServerClustersResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_game_server_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.ListGameServerClustersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListGameServerClustersAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_game_server_clusters_async_from_dict():
await test_list_game_server_clusters_async(request_type=dict)
def test_list_game_server_clusters_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.ListGameServerClustersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.return_value = game_server_clusters.ListGameServerClustersResponse()
client.list_game_server_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_game_server_clusters_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.ListGameServerClustersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.ListGameServerClustersResponse()
)
await client.list_game_server_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_game_server_clusters_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.ListGameServerClustersResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_game_server_clusters(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_game_server_clusters_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_game_server_clusters(
game_server_clusters.ListGameServerClustersRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_game_server_clusters_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.ListGameServerClustersResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.ListGameServerClustersResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_game_server_clusters(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_game_server_clusters_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_game_server_clusters(
game_server_clusters.ListGameServerClustersRequest(), parent="parent_value",
)
def test_list_game_server_clusters_pager(transport_name: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_game_server_clusters(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, game_server_clusters.GameServerCluster) for i in results
)
def test_list_game_server_clusters_pages(transport_name: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
pages = list(client.list_game_server_clusters(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_game_server_clusters_async_pager():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
async_pager = await client.list_game_server_clusters(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, game_server_clusters.GameServerCluster) for i in responses
)
@pytest.mark.asyncio
async def test_list_game_server_clusters_async_pages():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_game_server_clusters),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_game_server_clusters(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [game_server_clusters.GetGameServerClusterRequest, dict,]
)
def test_get_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.GameServerCluster(
name="name_value", etag="etag_value", description="description_value",
)
response = client.get_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.GetGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, game_server_clusters.GameServerCluster)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
def test_get_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
client.get_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.GetGameServerClusterRequest()
@pytest.mark.asyncio
async def test_get_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.GetGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.GameServerCluster(
name="name_value", etag="etag_value", description="description_value",
)
)
response = await client.get_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.GetGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, game_server_clusters.GameServerCluster)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_game_server_cluster_async_from_dict():
await test_get_game_server_cluster_async(request_type=dict)
def test_get_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.GetGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
call.return_value = game_server_clusters.GameServerCluster()
client.get_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.GetGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.GameServerCluster()
)
await client.get_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.GameServerCluster()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_game_server_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_game_server_cluster(
game_server_clusters.GetGameServerClusterRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.GameServerCluster()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.GameServerCluster()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_game_server_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_game_server_cluster(
game_server_clusters.GetGameServerClusterRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.CreateGameServerClusterRequest, dict,]
)
def test_create_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.CreateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
client.create_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.CreateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_create_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.CreateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.CreateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_game_server_cluster_async_from_dict():
await test_create_game_server_cluster_async(request_type=dict)
def test_create_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.CreateGameServerClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.CreateGameServerClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_game_server_cluster(
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].game_server_cluster_id
mock_val = "game_server_cluster_id_value"
assert arg == mock_val
def test_create_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_game_server_cluster(
game_server_clusters.CreateGameServerClusterRequest(),
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
@pytest.mark.asyncio
async def test_create_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_game_server_cluster(
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].game_server_cluster_id
mock_val = "game_server_cluster_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_game_server_cluster(
game_server_clusters.CreateGameServerClusterRequest(),
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.PreviewCreateGameServerClusterRequest, dict,]
)
def test_preview_create_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.PreviewCreateGameServerClusterResponse(
etag="etag_value",
)
response = client.preview_create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewCreateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewCreateGameServerClusterResponse
)
assert response.etag == "etag_value"
def test_preview_create_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
client.preview_create_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewCreateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_preview_create_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.PreviewCreateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewCreateGameServerClusterResponse(
etag="etag_value",
)
)
response = await client.preview_create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewCreateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewCreateGameServerClusterResponse
)
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_preview_create_game_server_cluster_async_from_dict():
await test_preview_create_game_server_cluster_async(request_type=dict)
def test_preview_create_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewCreateGameServerClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
call.return_value = (
game_server_clusters.PreviewCreateGameServerClusterResponse()
)
client.preview_create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_preview_create_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewCreateGameServerClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewCreateGameServerClusterResponse()
)
await client.preview_create_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [game_server_clusters.DeleteGameServerClusterRequest, dict,]
)
def test_delete_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.DeleteGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
client.delete_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.DeleteGameServerClusterRequest()
@pytest.mark.asyncio
async def test_delete_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.DeleteGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.DeleteGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_game_server_cluster_async_from_dict():
await test_delete_game_server_cluster_async(request_type=dict)
def test_delete_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.DeleteGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.DeleteGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_game_server_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_game_server_cluster(
game_server_clusters.DeleteGameServerClusterRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_game_server_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_game_server_cluster(
game_server_clusters.DeleteGameServerClusterRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.PreviewDeleteGameServerClusterRequest, dict,]
)
def test_preview_delete_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.PreviewDeleteGameServerClusterResponse(
etag="etag_value",
)
response = client.preview_delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewDeleteGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewDeleteGameServerClusterResponse
)
assert response.etag == "etag_value"
def test_preview_delete_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
client.preview_delete_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewDeleteGameServerClusterRequest()
@pytest.mark.asyncio
async def test_preview_delete_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.PreviewDeleteGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewDeleteGameServerClusterResponse(
etag="etag_value",
)
)
response = await client.preview_delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewDeleteGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewDeleteGameServerClusterResponse
)
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_preview_delete_game_server_cluster_async_from_dict():
await test_preview_delete_game_server_cluster_async(request_type=dict)
def test_preview_delete_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewDeleteGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
call.return_value = (
game_server_clusters.PreviewDeleteGameServerClusterResponse()
)
client.preview_delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_preview_delete_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewDeleteGameServerClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewDeleteGameServerClusterResponse()
)
await client.preview_delete_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [game_server_clusters.UpdateGameServerClusterRequest, dict,]
)
def test_update_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.UpdateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
client.update_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.UpdateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_update_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.UpdateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.UpdateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_game_server_cluster_async_from_dict():
await test_update_game_server_cluster_async(request_type=dict)
def test_update_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.UpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.UpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
def test_update_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_game_server_cluster(
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_game_server_cluster(
game_server_clusters.UpdateGameServerClusterRequest(),
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_game_server_cluster(
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_game_server_cluster(
game_server_clusters.UpdateGameServerClusterRequest(),
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.PreviewUpdateGameServerClusterRequest, dict,]
)
def test_preview_update_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = game_server_clusters.PreviewUpdateGameServerClusterResponse(
etag="etag_value",
)
response = client.preview_update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewUpdateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewUpdateGameServerClusterResponse
)
assert response.etag == "etag_value"
def test_preview_update_game_server_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
client.preview_update_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewUpdateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_preview_update_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.PreviewUpdateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewUpdateGameServerClusterResponse(
etag="etag_value",
)
)
response = await client.preview_update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewUpdateGameServerClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, game_server_clusters.PreviewUpdateGameServerClusterResponse
)
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_preview_update_game_server_cluster_async_from_dict():
await test_preview_update_game_server_cluster_async(request_type=dict)
def test_preview_update_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewUpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
call.return_value = (
game_server_clusters.PreviewUpdateGameServerClusterResponse()
)
client.preview_update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_preview_update_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = game_server_clusters.PreviewUpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewUpdateGameServerClusterResponse()
)
await client.preview_update_game_server_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = GameServerClustersServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.GameServerClustersServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.GameServerClustersServiceGrpcTransport,
)
def test_game_server_clusters_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.GameServerClustersServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_game_server_clusters_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.GameServerClustersServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_game_server_clusters",
"get_game_server_cluster",
"create_game_server_cluster",
"preview_create_game_server_cluster",
"delete_game_server_cluster",
"preview_delete_game_server_cluster",
"update_game_server_cluster",
"preview_update_game_server_cluster",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_game_server_clusters_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.GameServerClustersServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_game_server_clusters_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.GameServerClustersServiceTransport()
adc.assert_called_once()
def test_game_server_clusters_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
GameServerClustersServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.GameServerClustersServiceGrpcTransport, grpc_helpers),
(transports.GameServerClustersServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_game_server_clusters_service_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"gameservices.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="gameservices.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_game_server_clusters_service_host_no_port():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="gameservices.googleapis.com"
),
)
assert client.transport._host == "gameservices.googleapis.com:443"
def test_game_server_clusters_service_host_with_port():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="gameservices.googleapis.com:8000"
),
)
assert client.transport._host == "gameservices.googleapis.com:8000"
def test_game_server_clusters_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.GameServerClustersServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_game_server_clusters_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.GameServerClustersServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_game_server_clusters_service_grpc_lro_client():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_game_server_clusters_service_grpc_lro_async_client():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_game_server_cluster_path():
project = "squid"
location = "clam"
realm = "whelk"
cluster = "octopus"
expected = "projects/{project}/locations/{location}/realms/{realm}/gameServerClusters/{cluster}".format(
project=project, location=location, realm=realm, cluster=cluster,
)
actual = GameServerClustersServiceClient.game_server_cluster_path(
project, location, realm, cluster
)
assert expected == actual
def test_parse_game_server_cluster_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"realm": "cuttlefish",
"cluster": "mussel",
}
path = GameServerClustersServiceClient.game_server_cluster_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_game_server_cluster_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = GameServerClustersServiceClient.common_billing_account_path(
billing_account
)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = GameServerClustersServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = GameServerClustersServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = GameServerClustersServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = GameServerClustersServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = GameServerClustersServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = GameServerClustersServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = GameServerClustersServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = GameServerClustersServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = GameServerClustersServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = GameServerClustersServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.GameServerClustersServiceTransport, "_prep_wrapped_messages"
) as prep:
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.GameServerClustersServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = GameServerClustersServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| 38.683573
| 143
| 0.698573
|
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.gaming_v1beta.services.game_server_clusters_service import (
GameServerClustersServiceAsyncClient,
)
from google.cloud.gaming_v1beta.services.game_server_clusters_service import (
GameServerClustersServiceClient,
)
from google.cloud.gaming_v1beta.services.game_server_clusters_service import pagers
from google.cloud.gaming_v1beta.services.game_server_clusters_service import transports
from google.cloud.gaming_v1beta.types import common
from google.cloud.gaming_v1beta.types import game_server_clusters
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 from google.protobuf import timestamp_pb2 import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert GameServerClustersServiceClient._get_default_mtls_endpoint(None) is None
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
GameServerClustersServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[GameServerClustersServiceClient, GameServerClustersServiceAsyncClient,],
)
def test_game_server_clusters_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "gameservices.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.GameServerClustersServiceGrpcTransport, "grpc"),
(transports.GameServerClustersServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_game_server_clusters_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[GameServerClustersServiceClient, GameServerClustersServiceAsyncClient,],
)
def test_game_server_clusters_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "gameservices.googleapis.com:443"
def test_game_server_clusters_service_client_get_transport_class():
transport = GameServerClustersServiceClient.get_transport_class()
available_transports = [
transports.GameServerClustersServiceGrpcTransport,
]
assert transport in available_transports
transport = GameServerClustersServiceClient.get_transport_class("grpc")
assert transport == transports.GameServerClustersServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
GameServerClustersServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceClient),
)
@mock.patch.object(
GameServerClustersServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceAsyncClient),
)
def test_game_server_clusters_service_client_client_options(
client_class, transport_class, transport_name
):
with mock.patch.object(
GameServerClustersServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
GameServerClustersServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
"true",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
"false",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
GameServerClustersServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceClient),
)
@mock.patch.object(
GameServerClustersServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_game_server_clusters_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class",
[GameServerClustersServiceClient, GameServerClustersServiceAsyncClient],
)
@mock.patch.object(
GameServerClustersServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceClient),
)
@mock.patch.object(
GameServerClustersServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(GameServerClustersServiceAsyncClient),
)
def test_game_server_clusters_service_client_get_mtls_endpoint_and_cert_source(
client_class,
):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_game_server_clusters_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
"grpc",
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_game_server_clusters_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_game_server_clusters_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = GameServerClustersServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.ListGameServerClustersRequest, dict,]
)
def test_list_game_server_clusters(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.return_value = game_server_clusters.ListGameServerClustersResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_game_server_clusters(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.ListGameServerClustersRequest()
assert isinstance(response, pagers.ListGameServerClustersPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_game_server_clusters_empty_call():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
client.list_game_server_clusters()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.ListGameServerClustersRequest()
@pytest.mark.asyncio
async def test_list_game_server_clusters_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.ListGameServerClustersRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.ListGameServerClustersResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_game_server_clusters(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.ListGameServerClustersRequest()
assert isinstance(response, pagers.ListGameServerClustersAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_game_server_clusters_async_from_dict():
await test_list_game_server_clusters_async(request_type=dict)
def test_list_game_server_clusters_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.ListGameServerClustersRequest()
request.parent = "parent/value"
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.return_value = game_server_clusters.ListGameServerClustersResponse()
client.list_game_server_clusters(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_game_server_clusters_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.ListGameServerClustersRequest()
request.parent = "parent/value"
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.ListGameServerClustersResponse()
)
await client.list_game_server_clusters(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_game_server_clusters_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.return_value = game_server_clusters.ListGameServerClustersResponse()
client.list_game_server_clusters(parent="parent_value",)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_game_server_clusters_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client.list_game_server_clusters(
game_server_clusters.ListGameServerClustersRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_game_server_clusters_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.return_value = game_server_clusters.ListGameServerClustersResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.ListGameServerClustersResponse()
)
response = await client.list_game_server_clusters(parent="parent_value",)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_game_server_clusters_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
await client.list_game_server_clusters(
game_server_clusters.ListGameServerClustersRequest(), parent="parent_value",
)
def test_list_game_server_clusters_pager(transport_name: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_game_server_clusters(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, game_server_clusters.GameServerCluster) for i in results
)
def test_list_game_server_clusters_pages(transport_name: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
with mock.patch.object(
type(client.transport.list_game_server_clusters), "__call__"
) as call:
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
pages = list(client.list_game_server_clusters(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_game_server_clusters_async_pager():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
with mock.patch.object(
type(client.transport.list_game_server_clusters),
"__call__",
new_callable=mock.AsyncMock,
) as call:
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
async_pager = await client.list_game_server_clusters(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, game_server_clusters.GameServerCluster) for i in responses
)
@pytest.mark.asyncio
async def test_list_game_server_clusters_async_pages():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
with mock.patch.object(
type(client.transport.list_game_server_clusters),
"__call__",
new_callable=mock.AsyncMock,
) as call:
call.side_effect = (
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
next_page_token="abc",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[], next_page_token="def",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[game_server_clusters.GameServerCluster(),],
next_page_token="ghi",
),
game_server_clusters.ListGameServerClustersResponse(
game_server_clusters=[
game_server_clusters.GameServerCluster(),
game_server_clusters.GameServerCluster(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_game_server_clusters(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [game_server_clusters.GetGameServerClusterRequest, dict,]
)
def test_get_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
call.return_value = game_server_clusters.GameServerCluster(
name="name_value", etag="etag_value", description="description_value",
)
response = client.get_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.GetGameServerClusterRequest()
assert isinstance(response, game_server_clusters.GameServerCluster)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
def test_get_game_server_cluster_empty_call():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
client.get_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.GetGameServerClusterRequest()
@pytest.mark.asyncio
async def test_get_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.GetGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.GameServerCluster(
name="name_value", etag="etag_value", description="description_value",
)
)
response = await client.get_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.GetGameServerClusterRequest()
assert isinstance(response, game_server_clusters.GameServerCluster)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_game_server_cluster_async_from_dict():
await test_get_game_server_cluster_async(request_type=dict)
def test_get_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.GetGameServerClusterRequest()
request.name = "name/value"
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
call.return_value = game_server_clusters.GameServerCluster()
client.get_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.GetGameServerClusterRequest()
request.name = "name/value"
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.GameServerCluster()
)
await client.get_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
call.return_value = game_server_clusters.GameServerCluster()
client.get_game_server_cluster(name="name_value",)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client.get_game_server_cluster(
game_server_clusters.GetGameServerClusterRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with mock.patch.object(
type(client.transport.get_game_server_cluster), "__call__"
) as call:
call.return_value = game_server_clusters.GameServerCluster()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.GameServerCluster()
)
response = await client.get_game_server_cluster(name="name_value",)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
await client.get_game_server_cluster(
game_server_clusters.GetGameServerClusterRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.CreateGameServerClusterRequest, dict,]
)
def test_create_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.CreateGameServerClusterRequest()
assert isinstance(response, future.Future)
def test_create_game_server_cluster_empty_call():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
client.create_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.CreateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_create_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.CreateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.CreateGameServerClusterRequest()
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_game_server_cluster_async_from_dict():
await test_create_game_server_cluster_async(request_type=dict)
def test_create_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.CreateGameServerClusterRequest()
request.parent = "parent/value"
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.CreateGameServerClusterRequest()
request.parent = "parent/value"
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_game_server_cluster(
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].game_server_cluster_id
mock_val = "game_server_cluster_id_value"
assert arg == mock_val
def test_create_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client.create_game_server_cluster(
game_server_clusters.CreateGameServerClusterRequest(),
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
@pytest.mark.asyncio
async def test_create_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with mock.patch.object(
type(client.transport.create_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_game_server_cluster(
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].game_server_cluster_id
mock_val = "game_server_cluster_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
await client.create_game_server_cluster(
game_server_clusters.CreateGameServerClusterRequest(),
parent="parent_value",
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
game_server_cluster_id="game_server_cluster_id_value",
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.PreviewCreateGameServerClusterRequest, dict,]
)
def test_preview_create_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
call.return_value = game_server_clusters.PreviewCreateGameServerClusterResponse(
etag="etag_value",
)
response = client.preview_create_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewCreateGameServerClusterRequest()
assert isinstance(
response, game_server_clusters.PreviewCreateGameServerClusterResponse
)
assert response.etag == "etag_value"
def test_preview_create_game_server_cluster_empty_call():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
client.preview_create_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewCreateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_preview_create_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.PreviewCreateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewCreateGameServerClusterResponse(
etag="etag_value",
)
)
response = await client.preview_create_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewCreateGameServerClusterRequest()
assert isinstance(
response, game_server_clusters.PreviewCreateGameServerClusterResponse
)
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_preview_create_game_server_cluster_async_from_dict():
await test_preview_create_game_server_cluster_async(request_type=dict)
def test_preview_create_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.PreviewCreateGameServerClusterRequest()
request.parent = "parent/value"
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
call.return_value = (
game_server_clusters.PreviewCreateGameServerClusterResponse()
)
client.preview_create_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_preview_create_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.PreviewCreateGameServerClusterRequest()
request.parent = "parent/value"
with mock.patch.object(
type(client.transport.preview_create_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewCreateGameServerClusterResponse()
)
await client.preview_create_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [game_server_clusters.DeleteGameServerClusterRequest, dict,]
)
def test_delete_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.DeleteGameServerClusterRequest()
assert isinstance(response, future.Future)
def test_delete_game_server_cluster_empty_call():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
client.delete_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.DeleteGameServerClusterRequest()
@pytest.mark.asyncio
async def test_delete_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.DeleteGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.DeleteGameServerClusterRequest()
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_game_server_cluster_async_from_dict():
await test_delete_game_server_cluster_async(request_type=dict)
def test_delete_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.DeleteGameServerClusterRequest()
request.name = "name/value"
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.DeleteGameServerClusterRequest()
request.name = "name/value"
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_game_server_cluster(name="name_value",)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client.delete_game_server_cluster(
game_server_clusters.DeleteGameServerClusterRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with mock.patch.object(
type(client.transport.delete_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_game_server_cluster(name="name_value",)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
await client.delete_game_server_cluster(
game_server_clusters.DeleteGameServerClusterRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.PreviewDeleteGameServerClusterRequest, dict,]
)
def test_preview_delete_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
call.return_value = game_server_clusters.PreviewDeleteGameServerClusterResponse(
etag="etag_value",
)
response = client.preview_delete_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewDeleteGameServerClusterRequest()
assert isinstance(
response, game_server_clusters.PreviewDeleteGameServerClusterResponse
)
assert response.etag == "etag_value"
def test_preview_delete_game_server_cluster_empty_call():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
client.preview_delete_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewDeleteGameServerClusterRequest()
@pytest.mark.asyncio
async def test_preview_delete_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.PreviewDeleteGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewDeleteGameServerClusterResponse(
etag="etag_value",
)
)
response = await client.preview_delete_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewDeleteGameServerClusterRequest()
assert isinstance(
response, game_server_clusters.PreviewDeleteGameServerClusterResponse
)
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_preview_delete_game_server_cluster_async_from_dict():
await test_preview_delete_game_server_cluster_async(request_type=dict)
def test_preview_delete_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.PreviewDeleteGameServerClusterRequest()
request.name = "name/value"
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
call.return_value = (
game_server_clusters.PreviewDeleteGameServerClusterResponse()
)
client.preview_delete_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_preview_delete_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.PreviewDeleteGameServerClusterRequest()
request.name = "name/value"
with mock.patch.object(
type(client.transport.preview_delete_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewDeleteGameServerClusterResponse()
)
await client.preview_delete_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [game_server_clusters.UpdateGameServerClusterRequest, dict,]
)
def test_update_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.UpdateGameServerClusterRequest()
assert isinstance(response, future.Future)
def test_update_game_server_cluster_empty_call():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
client.update_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.UpdateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_update_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.UpdateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.UpdateGameServerClusterRequest()
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_game_server_cluster_async_from_dict():
await test_update_game_server_cluster_async(request_type=dict)
def test_update_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.UpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.UpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
def test_update_game_server_cluster_flattened():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_game_server_cluster(
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_game_server_cluster_flattened_error():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client.update_game_server_cluster(
game_server_clusters.UpdateGameServerClusterRequest(),
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_game_server_cluster_flattened_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with mock.patch.object(
type(client.transport.update_game_server_cluster), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_game_server_cluster(
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].game_server_cluster
mock_val = game_server_clusters.GameServerCluster(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_game_server_cluster_flattened_error_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
await client.update_game_server_cluster(
game_server_clusters.UpdateGameServerClusterRequest(),
game_server_cluster=game_server_clusters.GameServerCluster(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [game_server_clusters.PreviewUpdateGameServerClusterRequest, dict,]
)
def test_preview_update_game_server_cluster(request_type, transport: str = "grpc"):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
call.return_value = game_server_clusters.PreviewUpdateGameServerClusterResponse(
etag="etag_value",
)
response = client.preview_update_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewUpdateGameServerClusterRequest()
assert isinstance(
response, game_server_clusters.PreviewUpdateGameServerClusterResponse
)
assert response.etag == "etag_value"
def test_preview_update_game_server_cluster_empty_call():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
client.preview_update_game_server_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewUpdateGameServerClusterRequest()
@pytest.mark.asyncio
async def test_preview_update_game_server_cluster_async(
transport: str = "grpc_asyncio",
request_type=game_server_clusters.PreviewUpdateGameServerClusterRequest,
):
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewUpdateGameServerClusterResponse(
etag="etag_value",
)
)
response = await client.preview_update_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == game_server_clusters.PreviewUpdateGameServerClusterRequest()
assert isinstance(
response, game_server_clusters.PreviewUpdateGameServerClusterResponse
)
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_preview_update_game_server_cluster_async_from_dict():
await test_preview_update_game_server_cluster_async(request_type=dict)
def test_preview_update_game_server_cluster_field_headers():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.PreviewUpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
call.return_value = (
game_server_clusters.PreviewUpdateGameServerClusterResponse()
)
client.preview_update_game_server_cluster(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_preview_update_game_server_cluster_field_headers_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
request = game_server_clusters.PreviewUpdateGameServerClusterRequest()
request.game_server_cluster.name = "game_server_cluster.name/value"
with mock.patch.object(
type(client.transport.preview_update_game_server_cluster), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
game_server_clusters.PreviewUpdateGameServerClusterResponse()
)
await client.preview_update_game_server_cluster(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"game_server_cluster.name=game_server_cluster.name/value",
) in kw["metadata"]
def test_credentials_transport_error():
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options=options, transport=transport,
)
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = GameServerClustersServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = GameServerClustersServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
transport = transports.GameServerClustersServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.GameServerClustersServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.GameServerClustersServiceGrpcTransport,
)
def test_game_server_clusters_service_base_transport_error():
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.GameServerClustersServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_game_server_clusters_service_base_transport():
with mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.GameServerClustersServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
methods = (
"list_game_server_clusters",
"get_game_server_cluster",
"create_game_server_cluster",
"preview_create_game_server_cluster",
"delete_game_server_cluster",
"preview_delete_game_server_cluster",
"update_game_server_cluster",
"preview_update_game_server_cluster",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
with pytest.raises(NotImplementedError):
transport.operations_client
def test_game_server_clusters_service_base_transport_with_credentials_file():
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.GameServerClustersServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_game_server_clusters_service_base_transport_with_adc():
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.gaming_v1beta.services.game_server_clusters_service.transports.GameServerClustersServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.GameServerClustersServiceTransport()
adc.assert_called_once()
def test_game_server_clusters_service_auth_adc():
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
GameServerClustersServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_transport_auth_adc(transport_class):
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.GameServerClustersServiceGrpcTransport, grpc_helpers),
(transports.GameServerClustersServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_game_server_clusters_service_transport_create_channel(
transport_class, grpc_helpers
):
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"gameservices.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="gameservices.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_game_server_clusters_service_host_no_port():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="gameservices.googleapis.com"
),
)
assert client.transport._host == "gameservices.googleapis.com:443"
def test_game_server_clusters_service_host_with_port():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="gameservices.googleapis.com:8000"
),
)
assert client.transport._host == "gameservices.googleapis.com:8000"
def test_game_server_clusters_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
transport = transports.GameServerClustersServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_game_server_clusters_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
transport = transports.GameServerClustersServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize(
"transport_class",
[
transports.GameServerClustersServiceGrpcTransport,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
],
)
def test_game_server_clusters_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_game_server_clusters_service_grpc_lro_client():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
assert transport.operations_client is transport.operations_client
def test_game_server_clusters_service_grpc_lro_async_client():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
assert transport.operations_client is transport.operations_client
def test_game_server_cluster_path():
project = "squid"
location = "clam"
realm = "whelk"
cluster = "octopus"
expected = "projects/{project}/locations/{location}/realms/{realm}/gameServerClusters/{cluster}".format(
project=project, location=location, realm=realm, cluster=cluster,
)
actual = GameServerClustersServiceClient.game_server_cluster_path(
project, location, realm, cluster
)
assert expected == actual
def test_parse_game_server_cluster_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"realm": "cuttlefish",
"cluster": "mussel",
}
path = GameServerClustersServiceClient.game_server_cluster_path(**expected)
actual = GameServerClustersServiceClient.parse_game_server_cluster_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = GameServerClustersServiceClient.common_billing_account_path(
billing_account
)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = GameServerClustersServiceClient.common_billing_account_path(**expected)
actual = GameServerClustersServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = GameServerClustersServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = GameServerClustersServiceClient.common_folder_path(**expected)
actual = GameServerClustersServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = GameServerClustersServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = GameServerClustersServiceClient.common_organization_path(**expected)
actual = GameServerClustersServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = GameServerClustersServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = GameServerClustersServiceClient.common_project_path(**expected)
actual = GameServerClustersServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = GameServerClustersServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = GameServerClustersServiceClient.common_location_path(**expected)
actual = GameServerClustersServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.GameServerClustersServiceTransport, "_prep_wrapped_messages"
) as prep:
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.GameServerClustersServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = GameServerClustersServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = GameServerClustersServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = GameServerClustersServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(
GameServerClustersServiceClient,
transports.GameServerClustersServiceGrpcTransport,
),
(
GameServerClustersServiceAsyncClient,
transports.GameServerClustersServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| true
| true
|
f708cf65433e09e25b9e03b6708df6923be84eac
| 168
|
py
|
Python
|
src/pydantic_vault/__init__.py
|
nymous/pydantic-vault
|
1d35885a9bb588d8f4d788d0a259a4894c207e8d
|
[
"MIT"
] | 26
|
2020-03-13T10:13:15.000Z
|
2022-02-05T17:58:06.000Z
|
src/pydantic_vault/__init__.py
|
nymous/pydantic-vault
|
1d35885a9bb588d8f4d788d0a259a4894c207e8d
|
[
"MIT"
] | 7
|
2020-03-21T14:24:57.000Z
|
2021-09-02T14:03:11.000Z
|
src/pydantic_vault/__init__.py
|
nymous/pydantic-vault
|
1d35885a9bb588d8f4d788d0a259a4894c207e8d
|
[
"MIT"
] | 1
|
2021-06-06T20:53:02.000Z
|
2021-06-06T20:53:02.000Z
|
__version__ = "0.7.1"
from .vault_settings import VaultParameterError, vault_config_settings_source
__all__ = ["vault_config_settings_source", "VaultParameterError"]
| 28
| 77
| 0.827381
|
__version__ = "0.7.1"
from .vault_settings import VaultParameterError, vault_config_settings_source
__all__ = ["vault_config_settings_source", "VaultParameterError"]
| true
| true
|
f708cf955db7b4498d4f38ba6a115c6fb17aea00
| 7,505
|
py
|
Python
|
accelbyte_py_sdk/api/lobby/operations/notification/free_form_notification.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/lobby/operations/notification/free_form_notification.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/lobby/operations/notification/free_form_notification.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
# Auto-generated at 2021-09-27T17:01:26.691956+08:00
# from: Justice Lobby Service (1.33.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HttpResponse
from ...models import ModelFreeFormNotificationRequest
from ...models import RestapiErrorResponseBody
class FreeFormNotification(Operation):
"""send freeform notification to a user (freeFormNotification)
Properties:
url: /notification/namespaces/{namespace}/freeform
method: POST
tags: notification
consumes: ["application/json"]
produces: ["application/json"]
security: bearer
body: (body) REQUIRED ModelFreeFormNotificationRequest in body
namespace: (namespace) REQUIRED str in path
Responses:
202: Accepted - (Accepted)
400: Bad Request - RestapiErrorResponseBody (Bad Request)
401: Unauthorized - RestapiErrorResponseBody (Unauthorized)
403: Forbidden - RestapiErrorResponseBody (Forbidden)
404: Not Found - RestapiErrorResponseBody (Not Found)
"""
# region fields
_url: str = "/notification/namespaces/{namespace}/freeform"
_method: str = "POST"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_security: Optional[str] = "bearer"
_location_query: str = None
body: ModelFreeFormNotificationRequest # REQUIRED in [body]
namespace: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def security(self) -> Optional[str]:
return self._security
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
def get_full_url(self, base_url: Union[None, str] = None) -> str:
result = base_url if base_url is not None else ""
# path params
url = self.url
for k, v in self.get_path_params().items():
url = url.replace(f"{{{k}}}", v)
result += url
return result
# noinspection PyMethodMayBeStatic
def get_all_required_fields(self) -> List[str]:
return [
"body",
"namespace",
]
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
return result
# endregion get_x_params methods
# region is/has methods
def is_valid(self) -> bool:
if not hasattr(self, "body") or self.body is None:
return False
if not hasattr(self, "namespace") or self.namespace is None:
return False
return True
# endregion is/has methods
# region with_x methods
def with_body(self, value: ModelFreeFormNotificationRequest) -> FreeFormNotification:
self.body = value
return self
def with_namespace(self, value: str) -> FreeFormNotification:
self.namespace = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ModelFreeFormNotificationRequest()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, HttpResponse], Union[None, RestapiErrorResponseBody]]:
"""Parse the given response.
202: Accepted - (Accepted)
400: Bad Request - RestapiErrorResponseBody (Bad Request)
401: Unauthorized - RestapiErrorResponseBody (Unauthorized)
403: Forbidden - RestapiErrorResponseBody (Forbidden)
404: Not Found - RestapiErrorResponseBody (Not Found)
"""
if code == 202:
return HttpResponse.create(code, "Accepted"), None
if code == 400:
return None, RestapiErrorResponseBody.create_from_dict(content)
if code == 401:
return None, RestapiErrorResponseBody.create_from_dict(content)
if code == 403:
return None, RestapiErrorResponseBody.create_from_dict(content)
if code == 404:
return None, RestapiErrorResponseBody.create_from_dict(content)
was_handled, undocumented_response = HttpResponse.try_create_undocumented_response(code, content)
if was_handled:
return None, undocumented_response
return None, HttpResponse.create_unhandled_error()
# endregion response methods
# region static methods
@classmethod
def create(
cls,
body: ModelFreeFormNotificationRequest,
namespace: str,
) -> FreeFormNotification:
instance = cls()
instance.body = body
instance.namespace = namespace
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> FreeFormNotification:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ModelFreeFormNotificationRequest.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = ModelFreeFormNotificationRequest()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
}
# endregion static methods
| 29.664032
| 148
| 0.64024
|
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HttpResponse
from ...models import ModelFreeFormNotificationRequest
from ...models import RestapiErrorResponseBody
class FreeFormNotification(Operation):
_url: str = "/notification/namespaces/{namespace}/freeform"
_method: str = "POST"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_security: Optional[str] = "bearer"
_location_query: str = None
body: ModelFreeFormNotificationRequest namespace: str
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def security(self) -> Optional[str]:
return self._security
@property
def location_query(self) -> str:
return self._location_query
def get_full_url(self, base_url: Union[None, str] = None) -> str:
result = base_url if base_url is not None else ""
url = self.url
for k, v in self.get_path_params().items():
url = url.replace(f"{{{k}}}", v)
result += url
return result
def get_all_required_fields(self) -> List[str]:
return [
"body",
"namespace",
]
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
return result
def is_valid(self) -> bool:
if not hasattr(self, "body") or self.body is None:
return False
if not hasattr(self, "namespace") or self.namespace is None:
return False
return True
def with_body(self, value: ModelFreeFormNotificationRequest) -> FreeFormNotification:
self.body = value
return self
def with_namespace(self, value: str) -> FreeFormNotification:
self.namespace = value
return self
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ModelFreeFormNotificationRequest()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
return result
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, HttpResponse], Union[None, RestapiErrorResponseBody]]:
if code == 202:
return HttpResponse.create(code, "Accepted"), None
if code == 400:
return None, RestapiErrorResponseBody.create_from_dict(content)
if code == 401:
return None, RestapiErrorResponseBody.create_from_dict(content)
if code == 403:
return None, RestapiErrorResponseBody.create_from_dict(content)
if code == 404:
return None, RestapiErrorResponseBody.create_from_dict(content)
was_handled, undocumented_response = HttpResponse.try_create_undocumented_response(code, content)
if was_handled:
return None, undocumented_response
return None, HttpResponse.create_unhandled_error()
@classmethod
def create(
cls,
body: ModelFreeFormNotificationRequest,
namespace: str,
) -> FreeFormNotification:
instance = cls()
instance.body = body
instance.namespace = namespace
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> FreeFormNotification:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ModelFreeFormNotificationRequest.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = ModelFreeFormNotificationRequest()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
}
| true
| true
|
f708cfc7280e646718c5d5f20ecb8f4fc890797b
| 2,719
|
py
|
Python
|
Scripts/extract_cds.py
|
sivico26/Bioinfo_errands
|
5cea098f422e1134639e4d6d8aa76098a0c70551
|
[
"MIT"
] | 1
|
2021-01-13T22:07:00.000Z
|
2021-01-13T22:07:00.000Z
|
Scripts/extract_cds.py
|
sivico26/Bioinfo_errands
|
5cea098f422e1134639e4d6d8aa76098a0c70551
|
[
"MIT"
] | null | null | null |
Scripts/extract_cds.py
|
sivico26/Bioinfo_errands
|
5cea098f422e1134639e4d6d8aa76098a0c70551
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from Bio import SeqIO
import argparse
import pathlib
def get_arguments():
parser = argparse.ArgumentParser(description='Extract CDS from a genbank to output a fasta',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input', type=str,
help='Path to input genbank file')
parser.add_argument('output', type=str,help='Path to put file/folder output')
parser.add_argument('-i', '--ignore', type=str, metavar = 'KEY', default=None, help="if 'key' matches a CDS name it won't be included in the output")
parser.add_argument('-m', '--multi', action ='store_true', help = "Specify if the input file is a multigenbank, in which case the CDS of each entry would be extracted in a different fasta file in an output directory in the specified output path")
args = parser.parse_args()
return args
def get_features(record, key):
cds = {}
if key == None:
for i,ft in enumerate(record.features):
if ft.type == "CDS":
if "gene" in ft.qualifiers.keys():
gene = ft.qualifiers["gene"][0]
cds[gene] = ft.extract(record)
else:
for i,ft in enumerate(record.features):
if ft.type == "CDS":
if "gene" in ft.qualifiers.keys():
if key not in ft.qualifiers["gene"][0]:
gene = ft.qualifiers["gene"][0]
cds[gene] = ft.extract(record)
return cds
def reformat(cds):
for gene, record in cds.items():
record.id = gene
record.description = ""
return cds
def main():
args = get_arguments()
#if args.ignore == None:
# args.ignore == ""
if args.multi is True:
recs = SeqIO.parse(args.input,"gb")
taxa = {}
for rec in recs:
specie = rec.annotations["organism"].replace(" ","_")
taxa[specie] = reformat(get_features(rec, args.ignore))
## Create directory
pathlib.Path(args.output.rstrip("/")+'/extract_cds_output').mkdir(parents=True, exist_ok=True)
## Write fastas
for specie, genes in taxa.items():
filepath = args.output.rstrip("/")+'/extract_cds_output'+"/"+specie+".fasta"
SeqIO.write(genes.values(),filepath,"fasta")
else:
rec = SeqIO.read(args.input, "gb")
aux = get_features(rec, args.ignore)
cds = reformat(aux)
## Write filenames
filename = args.output.strip("/")
# filename = args.output.strip("/") + "/" + rec.annotations["organism"].replace(" ","_") + ".fasta"
SeqIO.write(cds.values(), filename, "fasta")
if __name__ == '__main__':
main()
| 34.417722
| 250
| 0.598014
|
from Bio import SeqIO
import argparse
import pathlib
def get_arguments():
parser = argparse.ArgumentParser(description='Extract CDS from a genbank to output a fasta',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input', type=str,
help='Path to input genbank file')
parser.add_argument('output', type=str,help='Path to put file/folder output')
parser.add_argument('-i', '--ignore', type=str, metavar = 'KEY', default=None, help="if 'key' matches a CDS name it won't be included in the output")
parser.add_argument('-m', '--multi', action ='store_true', help = "Specify if the input file is a multigenbank, in which case the CDS of each entry would be extracted in a different fasta file in an output directory in the specified output path")
args = parser.parse_args()
return args
def get_features(record, key):
cds = {}
if key == None:
for i,ft in enumerate(record.features):
if ft.type == "CDS":
if "gene" in ft.qualifiers.keys():
gene = ft.qualifiers["gene"][0]
cds[gene] = ft.extract(record)
else:
for i,ft in enumerate(record.features):
if ft.type == "CDS":
if "gene" in ft.qualifiers.keys():
if key not in ft.qualifiers["gene"][0]:
gene = ft.qualifiers["gene"][0]
cds[gene] = ft.extract(record)
return cds
def reformat(cds):
for gene, record in cds.items():
record.id = gene
record.description = ""
return cds
def main():
args = get_arguments()
#if args.ignore == None:
# args.ignore == ""
if args.multi is True:
recs = SeqIO.parse(args.input,"gb")
taxa = {}
for rec in recs:
specie = rec.annotations["organism"].replace(" ","_")
taxa[specie] = reformat(get_features(rec, args.ignore))
## Create directory
pathlib.Path(args.output.rstrip("/")+'/extract_cds_output').mkdir(parents=True, exist_ok=True)
## Write fastas
for specie, genes in taxa.items():
filepath = args.output.rstrip("/")+'/extract_cds_output'+"/"+specie+".fasta"
SeqIO.write(genes.values(),filepath,"fasta")
else:
rec = SeqIO.read(args.input, "gb")
aux = get_features(rec, args.ignore)
cds = reformat(aux)
## Write filenames
filename = args.output.strip("/")
# filename = args.output.strip("/") + "/" + rec.annotations["organism"].replace(" ","_") + ".fasta"
SeqIO.write(cds.values(), filename, "fasta")
if __name__ == '__main__':
main()
| true
| true
|
f708d0b021de0c328c38f3e3891f14b4ae0db94c
| 1,479
|
py
|
Python
|
backend/stock/workers/get_valuation_ratio.py
|
fengxia41103/stock
|
1bba08f77e9038ebdd3905fe734bb51e5fb1bdf1
|
[
"MIT"
] | 1
|
2021-09-30T05:25:08.000Z
|
2021-09-30T05:25:08.000Z
|
backend/stock/workers/get_valuation_ratio.py
|
fengxia41103/stock
|
1bba08f77e9038ebdd3905fe734bb51e5fb1bdf1
|
[
"MIT"
] | 8
|
2021-09-30T05:27:09.000Z
|
2021-12-03T23:02:24.000Z
|
backend/stock/workers/get_valuation_ratio.py
|
fengxia41103/stock
|
1bba08f77e9038ebdd3905fe734bb51e5fb1bdf1
|
[
"MIT"
] | 3
|
2021-09-29T05:11:45.000Z
|
2021-10-31T07:26:31.000Z
|
import logging
import pandas as pd
from stock.models import MyStock
from stock.models import ValuationRatio
from yahooquery import Ticker
logger = logging.getLogger("stock")
class MyValuationRatio:
def __init__(self, symbol):
self.stock = MyStock.objects.get(symbol=symbol)
def get(self):
s = Ticker(self.stock.symbol, timeout=15)
# all numbers convert to million
df = s.valuation_measures
if "unavailable" in df or "error" in df:
logger.error("{}: {}".format(self.stock.symbol, df))
return
# DB doesn't like NaN
df = df.where(pd.notnull(df), 0)
mapping = {
"forward_pe": "ForwardPeRatio",
"pb": "PbRatio",
"pe": "PeRatio",
"peg": "PegRatio",
"ps": "PsRatio",
}
# enumerate data frame
for row in df.itertuples(index=False):
i, created = ValuationRatio.objects.get_or_create(
stock=self.stock, on=row.asOfDate.date()
)
for key, val in mapping.items():
try:
tmp = float(getattr(row, val))
except AttributeError:
tmp = 0
# set value
setattr(i, key, tmp)
i.save()
# if all values are 0, discard the record
ValuationRatio.objects.filter(
forward_pe=0, pb=0, pe=0, peg=0, ps=0
).delete()
| 26.410714
| 64
| 0.536173
|
import logging
import pandas as pd
from stock.models import MyStock
from stock.models import ValuationRatio
from yahooquery import Ticker
logger = logging.getLogger("stock")
class MyValuationRatio:
def __init__(self, symbol):
self.stock = MyStock.objects.get(symbol=symbol)
def get(self):
s = Ticker(self.stock.symbol, timeout=15)
df = s.valuation_measures
if "unavailable" in df or "error" in df:
logger.error("{}: {}".format(self.stock.symbol, df))
return
df = df.where(pd.notnull(df), 0)
mapping = {
"forward_pe": "ForwardPeRatio",
"pb": "PbRatio",
"pe": "PeRatio",
"peg": "PegRatio",
"ps": "PsRatio",
}
# enumerate data frame
for row in df.itertuples(index=False):
i, created = ValuationRatio.objects.get_or_create(
stock=self.stock, on=row.asOfDate.date()
)
for key, val in mapping.items():
try:
tmp = float(getattr(row, val))
except AttributeError:
tmp = 0
# set value
setattr(i, key, tmp)
i.save()
# if all values are 0, discard the record
ValuationRatio.objects.filter(
forward_pe=0, pb=0, pe=0, peg=0, ps=0
).delete()
| true
| true
|
f708d15462b81866e50f7f7ce288da9790a439e4
| 13,342
|
py
|
Python
|
slybot/slybot/utils.py
|
1583582847/-
|
11e10d5ffc0bf7f25534f5f444f59e9b792b42b8
|
[
"BSD-3-Clause"
] | 1
|
2019-01-03T02:16:01.000Z
|
2019-01-03T02:16:01.000Z
|
slybot/slybot/utils.py
|
1583582847/-
|
11e10d5ffc0bf7f25534f5f444f59e9b792b42b8
|
[
"BSD-3-Clause"
] | null | null | null |
slybot/slybot/utils.py
|
1583582847/-
|
11e10d5ffc0bf7f25534f5f444f59e9b792b42b8
|
[
"BSD-3-Clause"
] | null | null | null |
from six.moves.urllib_parse import urlparse
import chardet
import itertools
import json
import os
import re
import six
from collections import OrderedDict, namedtuple
from itertools import chain
from scrapely.htmlpage import HtmlPage, HtmlTagType
from scrapy.utils.misc import load_object
from w3lib.encoding import html_body_declared_encoding
TAGID = u"data-tagid"
GENERATEDTAGID = u"data-genid"
OPEN_TAG = HtmlTagType.OPEN_TAG
CLOSE_TAG = HtmlTagType.CLOSE_TAG
UNPAIRED_TAG = HtmlTagType.UNPAIRED_TAG
# Encodings: https://w3techs.com/technologies/overview/character_encoding/all
ENCODINGS = ['UTF-8', 'ISO-8859-1', 'Windows-1251', 'Shift JIS',
'Windows-1252', 'GB2312', 'EUC-KR', 'EUC-JP', 'GBK', 'ISO-8859-2',
'Windows-1250', 'ISO-8859-15', 'Windows-1256', 'ISO-8859-9',
'Big5', 'Windows-1254', 'Windows-874']
MimeType = namedtuple('MimeType', ['type', 'maintype', 'subtype', 'params'])
def content_type(response):
full_content_type = decode(response.headers.get('Content-Type') or u'')
type_ = full_content_type.split(';', 1)
split = type_[0].split('/', 1)
if len(split) < 2:
maintype = type_
subtype = ''
else:
maintype, subtype = split
# Parse params if needed
return MimeType(full_content_type, maintype, subtype, [])
def encode(html, default=None):
if isinstance(html, six.binary_type):
return html
return _encode_or_decode_string(html, type(html).encode, default)
def decode(html, default=None):
if isinstance(html, six.text_type):
return html
return _encode_or_decode_string(html, type(html).decode, default)
def _encode_or_decode_string(html, method, default):
if not default:
encoding = html_body_declared_encoding(html)
if encoding:
default = [encoding]
else:
default = []
elif isinstance(default, six.string_types):
default = [default]
for encoding in itertools.chain(default, ENCODINGS):
try:
return method(html, encoding)
except (UnicodeDecodeError, UnicodeEncodeError, LookupError):
pass
except AttributeError:
return html
encoding = chardet.detect(html).get('encoding')
return method(html, encoding)
def iter_unique_scheme_hostname(urls):
"""Return an iterator of tuples (scheme, hostname) over the given urls,
filtering dupes
"""
scheme_hostname = set()
for x in urls:
p = urlparse(x)
scheme_hostname.add((p.scheme, p.hostname))
return list(scheme_hostname)
def open_project_from_dir(project_dir):
storage = Storage(project_dir)
specs = {"spiders": SpiderLoader(storage)}
for name in ['project', 'items', 'extractors']:
try:
specs[name] = storage.open('{}.json'.format(name))
except IOError:
specs[name] = {}
return specs
def read(fp, encoding='utf-8'):
content = fp.read()
if hasattr(content, 'decode'):
content = content.decode('utf-8')
return content
def _build_sample(sample, legacy=False):
from slybot.plugins.scrapely_annotations.builder import Annotations
Annotations(sample, legacy=legacy).build()
sample['annotated'] = True
return sample
def htmlpage_from_response(response, _add_tagids=False):
body = response.body_as_unicode()
if _add_tagids:
body = add_tagids(body)
return HtmlPage(response.url, response.headers, body,
encoding=response.encoding)
def load_plugins(settings):
if settings.get('LOADED_PLUGINS', None):
return settings.get('LOADED_PLUGINS', None)
plugins = settings['PLUGINS']
if plugins:
return [load_object(p) if isinstance(p, str) else p for p in plugins]
else:
from slybot.plugins.scrapely_annotations import Annotations
return [Annotations]
def load_plugin_names(settings):
"""
Generate a unique name for a plugin based on the class name module name
and path
>>> settings = {'PLUGINS': ['a', 'b.c', 'a.c']}
>>> load_plugin_names(settings)
['a', 'c', 'a.c']
"""
seen = set()
def generate_name(path, maxsplit=0, splits=None):
if splits is None:
splits = len(path.split('.')) - 1
name = '.'.join(path.split('.', splits - maxsplit)[-1].rsplit('.',
maxsplit))
if name not in seen or maxsplit >= splits:
seen.add(name)
return name
return generate_name(path, maxsplit + 1, splits)
if settings['PLUGINS']:
return [generate_name(path) for path in settings['PLUGINS']]
else:
return ['Annotations']
def include_exclude_filter(include_patterns, exclude_patterns):
filterf = None
includef = None
if include_patterns:
pattern = include_patterns[0] if len(include_patterns) == 1 else \
"(?:%s)" % '|'.join(include_patterns)
includef = re.compile(pattern).search
filterf = includef
if exclude_patterns:
pattern = exclude_patterns[0] if len(exclude_patterns) == 1 else \
"(?:%s)" % '|'.join(exclude_patterns)
excludef = re.compile(pattern).search
if not includef:
filterf = lambda x: not excludef(x)
else:
filterf = lambda x: includef(x) and not excludef(x)
return filterf if filterf else bool
class IndexedDict(OrderedDict):
"""
Ordered dictionary where values can also be obtained by their index as if
they were in a list
>>> idd = IndexedDict([('spam', 1), ('eggs', 2), ('bacon', 3)])
>>> idd['spam']
1
>>> idd[0]
1
>>> idd['bacon']
3
>>> idd[2]
3
>>> idd[2] = 'ham'
Traceback (most recent call last):
...
TypeError: keys must not be an integers
>>> idd[3]
Traceback (most recent call last):
...
IndexError: index out of range
"""
def __setitem__(self, key, value):
if isinstance(key, int):
raise TypeError("keys must not be an integers")
super(IndexedDict, self).__setitem__(key, value)
def __getitem__(self, key):
if isinstance(key, int):
if key >= len(self):
raise IndexError('index out of range')
for i, k in enumerate(self):
if i == key:
key = k
break
return super(IndexedDict, self).__getitem__(key)
def _quotify(mystr):
"""
quotifies an html tag attribute value.
Assumes then, that any ocurrence of ' or " in the
string is escaped if original string was quoted
with it.
So this function does not altere the original string
except for quotation at both ends, and is limited just
to guess if string must be quoted with '"' or "'"
"""
quote = '"'
l = len(mystr)
for i in range(l):
if mystr[i] == "\\" and i + 1 < l and mystr[i + 1] == "'":
quote = "'"
break
elif mystr[i] == "\\" and i + 1 < l and mystr[i + 1] == '"':
quote = '"'
break
elif mystr[i] == "'":
quote = '"'
break
elif mystr[i] == '"':
quote = "'"
break
return quote + mystr + quote
def serialize_tag(tag):
"""
Converts a tag into a string when a slice [tag.start:tag.end]
over the source can't be used because tag has been modified
"""
out = "<"
if tag.tag_type == HtmlTagType.CLOSE_TAG:
out += "/"
out += tag.tag
attributes = []
for key, val in tag.attributes.items():
aout = key
if val is not None:
aout += "=" + _quotify(val)
attributes.append(aout)
if attributes:
out += " " + " ".join(attributes)
if tag.tag_type == HtmlTagType.UNPAIRED_TAG:
out += "/"
return out + ">"
def _must_add_tagid(element):
return (hasattr(element, 'tag_type') and
hasattr(element, 'tag') and
element.tag_type != CLOSE_TAG and
element.tag != 'ins')
def _modify_tagids(source, add=True):
"""Add or remove tags ids to/from HTML document"""
output = []
tagcount = 0
if not isinstance(source, HtmlPage):
source = HtmlPage(body=source)
for element in source.parsed_body:
if _must_add_tagid(element):
if add:
element.attributes[TAGID] = str(tagcount)
tagcount += 1
else: # Remove previously added tagid
element.attributes.pop(TAGID, None)
output.append(serialize_tag(element))
else:
output.append(source.body[element.start:element.end])
return u''.join(output)
def add_tagids(source):
"""
Applies a unique attribute code number for each tag element in order to be
identified later in the process of apply annotation"""
return _modify_tagids(source, True)
def remove_tagids(source):
"""remove from the given page, all tagids previously added by add_tagids()
"""
return _modify_tagids(source, False)
class Storage(object):
def __init__(self, base_path):
self.base_path = os.path.abspath(base_path)
def rel_path(self, *args):
return os.sep.join(args)
def _path(self, *args):
return os.path.join(self.base_path, self.rel_path(*args))
def isdir(self, *args, **kwargs):
return os.path.isdir(self._path(*args), **kwargs)
def listdir(self, *args, **kwargs):
return os.listdir(self._path(*args), **kwargs)
def open(self, *args, **kwargs):
"""Open files from filesystem."""
raw = kwargs.pop('raw', False)
with open(self._path(*args), encoding = 'utf-8') as f:
return decode(f.read()) if raw else json.load(f)
class SpiderLoader(object):
def __init__(self, storage):
if isinstance(storage, six.string_types):
self.storage = Storage(storage)
else:
fsattrs = ['isdir', 'listdir', 'open', 'rel_path']
if any(not hasattr(storage, attr) for attr in fsattrs):
raise TypeError('Storage class must have "{}" methods'.format(
'", "'.join(fsattrs)))
self.storage = storage
self.spider_dir = self.storage.rel_path('spiders')
self.spider_names = {
s[:-len('.json')] for s in self.storage.listdir(self.spider_dir)
if s.endswith('.json')
}
self._spiders = {}
def __getitem__(self, key):
if key not in self.spider_names:
raise KeyError('The spider "{}" does not exist'.format(key))
if key not in self._spiders:
self._spiders[key] = self.load_spider(key)
return self._spiders[key]
def load_spider(self, spider_name):
spec = self.storage.open(self.spider_dir,
'{}.json'.format(spider_name))
try:
if spec.get('templates'):
templates = []
for template in spec.get('templates', []):
if template.get('version', '') < '0.13.0':
templates.append(template)
else:
templates.append(_build_sample(template))
spec['templates'] = templates
else:
templates = self.load_external_templates(self.spider_dir,
spider_name)
spec.setdefault("templates", []).extend(templates)
return spec
except ValueError as e:
raise ValueError(
"Error parsing spider (invalid JSON): %s: %s" %
(spider_name, e)
)
def keys(self):
for spider_name in self.spider_names:
yield spider_name
def items(self):
spiders = chain(self._spiders, self.spider_names - set(self._spiders))
for spider_name in spiders:
yield spider_name, self[spider_name]
def values(self):
for _, spider in self.items():
yield spider
def load_external_templates(self, spec_base, spider_name):
"""A generator yielding the content of all passed `template_names` for
`spider_name`.
"""
spider_dir = self.storage.rel_path('spiders', spider_name)
if not self.storage.isdir(spider_dir):
raise StopIteration
for name in self.storage.listdir(spider_dir):
if not name.endswith('.json'):
continue
path = self.storage.rel_path(spider_dir, name)
sample = self.storage.open(path)
if not sample:
continue
sample_dir = path[:-len('.json')]
if self.storage.isdir(sample_dir):
for fname in self.storage.listdir(sample_dir):
if fname.endswith('.html'):
attr = fname[:-len('.html')]
html = self.storage.open(sample_dir, fname, raw=1)
sample[attr] = html
if 'original_body' not in sample:
sample['original_body'] = u'<html></html>'
version = sample.get('version', '')
yield _build_sample(sample, legacy=version < '0.13.0')
| 32.227053
| 79
| 0.590766
|
from six.moves.urllib_parse import urlparse
import chardet
import itertools
import json
import os
import re
import six
from collections import OrderedDict, namedtuple
from itertools import chain
from scrapely.htmlpage import HtmlPage, HtmlTagType
from scrapy.utils.misc import load_object
from w3lib.encoding import html_body_declared_encoding
TAGID = u"data-tagid"
GENERATEDTAGID = u"data-genid"
OPEN_TAG = HtmlTagType.OPEN_TAG
CLOSE_TAG = HtmlTagType.CLOSE_TAG
UNPAIRED_TAG = HtmlTagType.UNPAIRED_TAG
ENCODINGS = ['UTF-8', 'ISO-8859-1', 'Windows-1251', 'Shift JIS',
'Windows-1252', 'GB2312', 'EUC-KR', 'EUC-JP', 'GBK', 'ISO-8859-2',
'Windows-1250', 'ISO-8859-15', 'Windows-1256', 'ISO-8859-9',
'Big5', 'Windows-1254', 'Windows-874']
MimeType = namedtuple('MimeType', ['type', 'maintype', 'subtype', 'params'])
def content_type(response):
full_content_type = decode(response.headers.get('Content-Type') or u'')
type_ = full_content_type.split(';', 1)
split = type_[0].split('/', 1)
if len(split) < 2:
maintype = type_
subtype = ''
else:
maintype, subtype = split
return MimeType(full_content_type, maintype, subtype, [])
def encode(html, default=None):
if isinstance(html, six.binary_type):
return html
return _encode_or_decode_string(html, type(html).encode, default)
def decode(html, default=None):
if isinstance(html, six.text_type):
return html
return _encode_or_decode_string(html, type(html).decode, default)
def _encode_or_decode_string(html, method, default):
if not default:
encoding = html_body_declared_encoding(html)
if encoding:
default = [encoding]
else:
default = []
elif isinstance(default, six.string_types):
default = [default]
for encoding in itertools.chain(default, ENCODINGS):
try:
return method(html, encoding)
except (UnicodeDecodeError, UnicodeEncodeError, LookupError):
pass
except AttributeError:
return html
encoding = chardet.detect(html).get('encoding')
return method(html, encoding)
def iter_unique_scheme_hostname(urls):
scheme_hostname = set()
for x in urls:
p = urlparse(x)
scheme_hostname.add((p.scheme, p.hostname))
return list(scheme_hostname)
def open_project_from_dir(project_dir):
storage = Storage(project_dir)
specs = {"spiders": SpiderLoader(storage)}
for name in ['project', 'items', 'extractors']:
try:
specs[name] = storage.open('{}.json'.format(name))
except IOError:
specs[name] = {}
return specs
def read(fp, encoding='utf-8'):
content = fp.read()
if hasattr(content, 'decode'):
content = content.decode('utf-8')
return content
def _build_sample(sample, legacy=False):
from slybot.plugins.scrapely_annotations.builder import Annotations
Annotations(sample, legacy=legacy).build()
sample['annotated'] = True
return sample
def htmlpage_from_response(response, _add_tagids=False):
body = response.body_as_unicode()
if _add_tagids:
body = add_tagids(body)
return HtmlPage(response.url, response.headers, body,
encoding=response.encoding)
def load_plugins(settings):
if settings.get('LOADED_PLUGINS', None):
return settings.get('LOADED_PLUGINS', None)
plugins = settings['PLUGINS']
if plugins:
return [load_object(p) if isinstance(p, str) else p for p in plugins]
else:
from slybot.plugins.scrapely_annotations import Annotations
return [Annotations]
def load_plugin_names(settings):
seen = set()
def generate_name(path, maxsplit=0, splits=None):
if splits is None:
splits = len(path.split('.')) - 1
name = '.'.join(path.split('.', splits - maxsplit)[-1].rsplit('.',
maxsplit))
if name not in seen or maxsplit >= splits:
seen.add(name)
return name
return generate_name(path, maxsplit + 1, splits)
if settings['PLUGINS']:
return [generate_name(path) for path in settings['PLUGINS']]
else:
return ['Annotations']
def include_exclude_filter(include_patterns, exclude_patterns):
filterf = None
includef = None
if include_patterns:
pattern = include_patterns[0] if len(include_patterns) == 1 else \
"(?:%s)" % '|'.join(include_patterns)
includef = re.compile(pattern).search
filterf = includef
if exclude_patterns:
pattern = exclude_patterns[0] if len(exclude_patterns) == 1 else \
"(?:%s)" % '|'.join(exclude_patterns)
excludef = re.compile(pattern).search
if not includef:
filterf = lambda x: not excludef(x)
else:
filterf = lambda x: includef(x) and not excludef(x)
return filterf if filterf else bool
class IndexedDict(OrderedDict):
def __setitem__(self, key, value):
if isinstance(key, int):
raise TypeError("keys must not be an integers")
super(IndexedDict, self).__setitem__(key, value)
def __getitem__(self, key):
if isinstance(key, int):
if key >= len(self):
raise IndexError('index out of range')
for i, k in enumerate(self):
if i == key:
key = k
break
return super(IndexedDict, self).__getitem__(key)
def _quotify(mystr):
quote = '"'
l = len(mystr)
for i in range(l):
if mystr[i] == "\\" and i + 1 < l and mystr[i + 1] == "'":
quote = "'"
break
elif mystr[i] == "\\" and i + 1 < l and mystr[i + 1] == '"':
quote = '"'
break
elif mystr[i] == "'":
quote = '"'
break
elif mystr[i] == '"':
quote = "'"
break
return quote + mystr + quote
def serialize_tag(tag):
out = "<"
if tag.tag_type == HtmlTagType.CLOSE_TAG:
out += "/"
out += tag.tag
attributes = []
for key, val in tag.attributes.items():
aout = key
if val is not None:
aout += "=" + _quotify(val)
attributes.append(aout)
if attributes:
out += " " + " ".join(attributes)
if tag.tag_type == HtmlTagType.UNPAIRED_TAG:
out += "/"
return out + ">"
def _must_add_tagid(element):
return (hasattr(element, 'tag_type') and
hasattr(element, 'tag') and
element.tag_type != CLOSE_TAG and
element.tag != 'ins')
def _modify_tagids(source, add=True):
output = []
tagcount = 0
if not isinstance(source, HtmlPage):
source = HtmlPage(body=source)
for element in source.parsed_body:
if _must_add_tagid(element):
if add:
element.attributes[TAGID] = str(tagcount)
tagcount += 1
else: # Remove previously added tagid
element.attributes.pop(TAGID, None)
output.append(serialize_tag(element))
else:
output.append(source.body[element.start:element.end])
return u''.join(output)
def add_tagids(source):
return _modify_tagids(source, True)
def remove_tagids(source):
return _modify_tagids(source, False)
class Storage(object):
def __init__(self, base_path):
self.base_path = os.path.abspath(base_path)
def rel_path(self, *args):
return os.sep.join(args)
def _path(self, *args):
return os.path.join(self.base_path, self.rel_path(*args))
def isdir(self, *args, **kwargs):
return os.path.isdir(self._path(*args), **kwargs)
def listdir(self, *args, **kwargs):
return os.listdir(self._path(*args), **kwargs)
def open(self, *args, **kwargs):
raw = kwargs.pop('raw', False)
with open(self._path(*args), encoding = 'utf-8') as f:
return decode(f.read()) if raw else json.load(f)
class SpiderLoader(object):
def __init__(self, storage):
if isinstance(storage, six.string_types):
self.storage = Storage(storage)
else:
fsattrs = ['isdir', 'listdir', 'open', 'rel_path']
if any(not hasattr(storage, attr) for attr in fsattrs):
raise TypeError('Storage class must have "{}" methods'.format(
'", "'.join(fsattrs)))
self.storage = storage
self.spider_dir = self.storage.rel_path('spiders')
self.spider_names = {
s[:-len('.json')] for s in self.storage.listdir(self.spider_dir)
if s.endswith('.json')
}
self._spiders = {}
def __getitem__(self, key):
if key not in self.spider_names:
raise KeyError('The spider "{}" does not exist'.format(key))
if key not in self._spiders:
self._spiders[key] = self.load_spider(key)
return self._spiders[key]
def load_spider(self, spider_name):
spec = self.storage.open(self.spider_dir,
'{}.json'.format(spider_name))
try:
if spec.get('templates'):
templates = []
for template in spec.get('templates', []):
if template.get('version', '') < '0.13.0':
templates.append(template)
else:
templates.append(_build_sample(template))
spec['templates'] = templates
else:
templates = self.load_external_templates(self.spider_dir,
spider_name)
spec.setdefault("templates", []).extend(templates)
return spec
except ValueError as e:
raise ValueError(
"Error parsing spider (invalid JSON): %s: %s" %
(spider_name, e)
)
def keys(self):
for spider_name in self.spider_names:
yield spider_name
def items(self):
spiders = chain(self._spiders, self.spider_names - set(self._spiders))
for spider_name in spiders:
yield spider_name, self[spider_name]
def values(self):
for _, spider in self.items():
yield spider
def load_external_templates(self, spec_base, spider_name):
spider_dir = self.storage.rel_path('spiders', spider_name)
if not self.storage.isdir(spider_dir):
raise StopIteration
for name in self.storage.listdir(spider_dir):
if not name.endswith('.json'):
continue
path = self.storage.rel_path(spider_dir, name)
sample = self.storage.open(path)
if not sample:
continue
sample_dir = path[:-len('.json')]
if self.storage.isdir(sample_dir):
for fname in self.storage.listdir(sample_dir):
if fname.endswith('.html'):
attr = fname[:-len('.html')]
html = self.storage.open(sample_dir, fname, raw=1)
sample[attr] = html
if 'original_body' not in sample:
sample['original_body'] = u'<html></html>'
version = sample.get('version', '')
yield _build_sample(sample, legacy=version < '0.13.0')
| true
| true
|
f708d1cd931d1d693bf81474cd5bdb17ae3406f6
| 834
|
py
|
Python
|
shapenet/networks/utils.py
|
ss18/shapenet
|
5a605bee6b2750f3a586ca9a740165e66b5dd7d8
|
[
"BSD-2-Clause"
] | null | null | null |
shapenet/networks/utils.py
|
ss18/shapenet
|
5a605bee6b2750f3a586ca9a740165e66b5dd7d8
|
[
"BSD-2-Clause"
] | null | null | null |
shapenet/networks/utils.py
|
ss18/shapenet
|
5a605bee6b2750f3a586ca9a740165e66b5dd7d8
|
[
"BSD-2-Clause"
] | 1
|
2020-09-25T08:55:12.000Z
|
2020-09-25T08:55:12.000Z
|
# author: Justus Schock (justus.schock@rwth-aachen.de)
import torch
class CustomGroupNorm(torch.nn.Module):
"""
Custom Group Norm which adds n_groups=2 as default parameter
"""
def __init__(self, n_features, n_groups=2):
"""
Parameters
----------
n_features : int
number of input features
n_groups : int
number of normalization groups
"""
super().__init__()
self.norm = torch.nn.GroupNorm(n_groups, n_features)
def forward(self, x):
"""
Forward batch through network
Parameters
----------
x : :class:`torch.Tensor`
batch to forward
Returns
-------
:class:`torch.Tensor`
normalized results
"""
return self.norm(x)
| 20.85
| 64
| 0.533573
|
import torch
class CustomGroupNorm(torch.nn.Module):
def __init__(self, n_features, n_groups=2):
super().__init__()
self.norm = torch.nn.GroupNorm(n_groups, n_features)
def forward(self, x):
return self.norm(x)
| true
| true
|
f708d1eeb45291c951a49f1eee98109cbc0c9f78
| 2,499
|
py
|
Python
|
hooks/charmhelpers/fetch/giturl.py
|
andreibacos/nova-compute-charm
|
09a27bf91b8b4ee9c1226f8bebc489a192549873
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
hooks/charmhelpers/fetch/giturl.py
|
andreibacos/nova-compute-charm
|
09a27bf91b8b4ee9c1226f8bebc489a192549873
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
hooks/charmhelpers/fetch/giturl.py
|
andreibacos/nova-compute-charm
|
09a27bf91b8b4ee9c1226f8bebc489a192549873
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from subprocess import check_call, CalledProcessError
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource,
filter_installed_packages,
apt_install,
)
if filter_installed_packages(['git']) != []:
apt_install(['git'])
if filter_installed_packages(['git']) != []:
raise NotImplementedError('Unable to install git')
class GitUrlFetchHandler(BaseFetchHandler):
"""Handler for git branches via generic and github URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
# TODO (mattyw) no support for ssh git@ yet
if url_parts.scheme not in ('http', 'https', 'git', ''):
return False
elif not url_parts.scheme:
return os.path.exists(os.path.join(source, '.git'))
else:
return True
def clone(self, source, dest, branch="master", depth=None):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
if os.path.exists(dest):
cmd = ['git', '-C', dest, 'pull', source, branch]
else:
cmd = ['git', 'clone', source, dest, '--branch', branch]
if depth:
cmd.extend(['--depth', depth])
check_call(cmd)
def install(self, source, branch="master", dest=None, depth=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
try:
self.clone(source, dest_dir, branch, depth)
except CalledProcessError as e:
raise UnhandledSource(e)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir
| 36.217391
| 75
| 0.635054
|
import os
from subprocess import check_call, CalledProcessError
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource,
filter_installed_packages,
apt_install,
)
if filter_installed_packages(['git']) != []:
apt_install(['git'])
if filter_installed_packages(['git']) != []:
raise NotImplementedError('Unable to install git')
class GitUrlFetchHandler(BaseFetchHandler):
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('http', 'https', 'git', ''):
return False
elif not url_parts.scheme:
return os.path.exists(os.path.join(source, '.git'))
else:
return True
def clone(self, source, dest, branch="master", depth=None):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
if os.path.exists(dest):
cmd = ['git', '-C', dest, 'pull', source, branch]
else:
cmd = ['git', 'clone', source, dest, '--branch', branch]
if depth:
cmd.extend(['--depth', depth])
check_call(cmd)
def install(self, source, branch="master", dest=None, depth=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
try:
self.clone(source, dest_dir, branch, depth)
except CalledProcessError as e:
raise UnhandledSource(e)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir
| true
| true
|
f708d2da1b61965d0523fb2dd82a2fff52ab6f66
| 2,098
|
py
|
Python
|
enb/tcall.py
|
AlysH/experiment-notebook
|
c6a40b1dd518814ccac50f83b3a09d59202b138e
|
[
"MIT"
] | null | null | null |
enb/tcall.py
|
AlysH/experiment-notebook
|
c6a40b1dd518814ccac50f83b3a09d59202b138e
|
[
"MIT"
] | null | null | null |
enb/tcall.py
|
AlysH/experiment-notebook
|
c6a40b1dd518814ccac50f83b3a09d59202b138e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Timed calls to subprocess, so that real execution times can be obtained.
"""
__author__ = "Miguel Hernández Cabronero <miguel.hernandez@uab.cat>"
__date__ = "23/05/2020"
import os
import subprocess
import re
import time
import platform
import shutil
class InvocationError(Exception):
"""Raised when an invocation fails.
"""
pass
def get_status_output_time(invocation, expected_status_value=0, wall=False):
"""Run invocation, and return its status, output, and total
(wall or user+system) time in seconds.
:param expected_status_value: if not None, status must be equal to this value or
an InvocationError is raised.
:param wall: if True, execution wall time is returned. Otherwise, user+system CPU time is returned.
(both in seconds).
:return: status, output, time
"""
if "Darwin" in platform.system():
time_command = "/usr/local/bin/gtime"
else:
time_command = "/usr/bin/time"
if os.path.isfile(time_command):
invocation = f"{time_command} -f 'u%U@s%S' {invocation}"
else:
invocation = f"{invocation}"
wall = True
wall_time_before = time.time()
status, output = subprocess.getstatusoutput(invocation)
wall_time_after = time.time()
output_lines = output.splitlines()
output = "\n".join(output_lines[:-1] if not wall else output_lines)
if expected_status_value is not None and status != expected_status_value:
raise InvocationError(
f"status={status} != {expected_status_value}.\nInput=[{invocation}].\nOutput=[{output}]".format(
status, invocation, output))
if wall:
measured_time = wall_time_after - wall_time_before
else:
m = re.fullmatch(r"u(\d+\.\d+)@s(\d+\.\d+)", output_lines[-1])
if m is not None:
measured_time = float(m.group(1)) + float(m.group(2))
else:
raise InvocationError(f"Output {output_lines} did not contain a valid time signature")
return status, output, measured_time
| 31.787879
| 108
| 0.661106
|
__author__ = "Miguel Hernández Cabronero <miguel.hernandez@uab.cat>"
__date__ = "23/05/2020"
import os
import subprocess
import re
import time
import platform
import shutil
class InvocationError(Exception):
pass
def get_status_output_time(invocation, expected_status_value=0, wall=False):
if "Darwin" in platform.system():
time_command = "/usr/local/bin/gtime"
else:
time_command = "/usr/bin/time"
if os.path.isfile(time_command):
invocation = f"{time_command} -f 'u%U@s%S' {invocation}"
else:
invocation = f"{invocation}"
wall = True
wall_time_before = time.time()
status, output = subprocess.getstatusoutput(invocation)
wall_time_after = time.time()
output_lines = output.splitlines()
output = "\n".join(output_lines[:-1] if not wall else output_lines)
if expected_status_value is not None and status != expected_status_value:
raise InvocationError(
f"status={status} != {expected_status_value}.\nInput=[{invocation}].\nOutput=[{output}]".format(
status, invocation, output))
if wall:
measured_time = wall_time_after - wall_time_before
else:
m = re.fullmatch(r"u(\d+\.\d+)@s(\d+\.\d+)", output_lines[-1])
if m is not None:
measured_time = float(m.group(1)) + float(m.group(2))
else:
raise InvocationError(f"Output {output_lines} did not contain a valid time signature")
return status, output, measured_time
| true
| true
|
f708d313d84dcab7bba4a1676f1e38302853d437
| 1,000
|
py
|
Python
|
pyf/_close_.py
|
snoopyjc/pythonizer
|
6b3683084f41f0aa06b1b4e652a0f00b19cceac1
|
[
"Artistic-2.0"
] | 1
|
2022-03-13T22:08:25.000Z
|
2022-03-13T22:08:25.000Z
|
pyf/_close_.py
|
snoopyjc/pythonizer
|
6b3683084f41f0aa06b1b4e652a0f00b19cceac1
|
[
"Artistic-2.0"
] | 21
|
2022-03-17T16:53:04.000Z
|
2022-03-31T23:55:24.000Z
|
pyf/_close_.py
|
snoopyjc/pythonizer
|
6b3683084f41f0aa06b1b4e652a0f00b19cceac1
|
[
"Artistic-2.0"
] | null | null | null |
def _close_(fh):
"""Implementation of perl close"""
global AUTODIE, TRACEBACK, OS_ERROR, TRACE_RUN
try:
if hasattr(fh, '_sp'): # issue 72: subprocess
fh.flush()
fh._sp.communicate()
if TRACE_RUN:
sp = subprocess.CompletedProcess(f"open({fh._file})", fh._sp.returncode)
_carp(f'trace close({fh._file}): {repr(sp)}', skip=2)
fh.close()
if fh._sp.returncode:
raise IOError(f"close({fh._file}): failed with {fh._sp.returncode}")
return 1
if fh is None:
raise TypeError(f"close(None): failed")
#if WARNING and fh.closed:
#_carp(f"close failed: Filehandle is already closed", skip=2)
fh.close()
return 1
except Exception as _e:
OS_ERROR = str(_e)
if TRACEBACK:
_cluck(OS_ERROR,skip=2)
if AUTODIE:
raise
return 0
| 32.258065
| 89
| 0.518
|
def _close_(fh):
global AUTODIE, TRACEBACK, OS_ERROR, TRACE_RUN
try:
if hasattr(fh, '_sp'): fh.flush()
fh._sp.communicate()
if TRACE_RUN:
sp = subprocess.CompletedProcess(f"open({fh._file})", fh._sp.returncode)
_carp(f'trace close({fh._file}): {repr(sp)}', skip=2)
fh.close()
if fh._sp.returncode:
raise IOError(f"close({fh._file}): failed with {fh._sp.returncode}")
return 1
if fh is None:
raise TypeError(f"close(None): failed")
fh.close()
return 1
except Exception as _e:
OS_ERROR = str(_e)
if TRACEBACK:
_cluck(OS_ERROR,skip=2)
if AUTODIE:
raise
return 0
| true
| true
|
f708d3202e725236311e670212f40c058faa6dcf
| 3,055
|
py
|
Python
|
SiebelCOM/sa.py
|
komarov-sergey/py-siebel-com
|
3253c1380ba292234deac5def2c340bbb478f593
|
[
"MIT"
] | 3
|
2018-04-04T17:29:42.000Z
|
2022-02-09T16:48:34.000Z
|
SiebelCOM/sa.py
|
KomarovSergei/py-siebel-com
|
3253c1380ba292234deac5def2c340bbb478f593
|
[
"MIT"
] | null | null | null |
SiebelCOM/sa.py
|
KomarovSergei/py-siebel-com
|
3253c1380ba292234deac5def2c340bbb478f593
|
[
"MIT"
] | 1
|
2021-05-06T06:03:34.000Z
|
2021-05-06T06:03:34.000Z
|
import win32com.client as wc
from utils import vstr
from utils import vshort
from utils import vstrarr
from utils import check_error
from bc import SiebelBusObject
from ps import SiebelPropertySet
from bs import SiebelService
PROGID = 'SiebelDataServer.ApplicationObject'
class SiebelApplication(object):
def __init__(self, conf):
self._sa = wc.Dispatch(PROGID)
self._sa.LoadObjects(vstr(conf), vshort(0))
def getLastErrText(self):
return self._sa.GetLastErrText
@check_error
def getBusObject(self, name):
return SiebelBusObject(self._sa.GetBusObject(vstr(name), vshort(0)),
self._sa)
@check_error
def getProfileAttr(self, name):
return self._sa.GetProfileAttr(vstr(name), vshort(0))
@check_error
def getService(self, name):
return SiebelService(self._sa.GetService(vstr(name), vshort(0)),
self._sa)
@check_error
def getSharedGlobal(self, name):
return self._sa.GetSharedGlobal(vstr(name), vshort(0))
@check_error
def invokeMethod(self, methodName, *methodArgs):
return self._sa.InvokeMethod(vstr(methodName),
vstrarr(list(methodArgs)),
vshort(0))
@check_error
def currencyCode(self):
return self._sa.CurrencyCode(vshort(0))
@check_error
def login(self, login, password):
self._sa.Login(vstr(login), vstr(password), vshort(0))
@check_error
def loginId(self):
return self._sa.LoginId(vshort(0))
@check_error
def loginName(self):
return self._sa.LoginName(vshort(0))
@check_error
def newPropertySet(self):
return SiebelPropertySet(self._sa.NewPropertySet(vshort(0)), self._sa)
@check_error
def positionId(self):
return self._sa.PositionId(vshort(0))
@check_error
def positionName(self):
return self._sa.PositionName(vshort(0))
@check_error
def setPositionId(self, value):
self._sa.SetPositionId(vstr(value), vshort(0))
@check_error
def setPositionName(self, value):
self._sa.SetPositionName(vstr(value), vshort(0))
@check_error
def setProfileAttr(self, name, value):
self._sa.SetProfileAttr(vstr(name), vstr(value), vshort(0))
@check_error
def setSharedGlobal(self, name, value):
self._sa.SetSharedGlobal(vstr(name), vstr(value), vshort(0))
@check_error
def trace(self, msg):
self._sa.Trace(vstr(msg), vshort(0))
@check_error
def traceOff(self):
self._sa.TraceOff(vshort(0))
@check_error
def traceOn(self, file_name, category, source):
self._sa.TraceOn(vstr(file_name), vstr(
category), vstr(source), vshort(0))
def evalExpr(self, expr):
bo = self.getBusObject('Employee')
bc = bo.getBusComp('Employee')
return bc.invokeMethod('EvalExpr', expr)
def repositoryId(self):
return self.evalExpr("RepositoryId()")
| 27.522523
| 78
| 0.646154
|
import win32com.client as wc
from utils import vstr
from utils import vshort
from utils import vstrarr
from utils import check_error
from bc import SiebelBusObject
from ps import SiebelPropertySet
from bs import SiebelService
PROGID = 'SiebelDataServer.ApplicationObject'
class SiebelApplication(object):
def __init__(self, conf):
self._sa = wc.Dispatch(PROGID)
self._sa.LoadObjects(vstr(conf), vshort(0))
def getLastErrText(self):
return self._sa.GetLastErrText
@check_error
def getBusObject(self, name):
return SiebelBusObject(self._sa.GetBusObject(vstr(name), vshort(0)),
self._sa)
@check_error
def getProfileAttr(self, name):
return self._sa.GetProfileAttr(vstr(name), vshort(0))
@check_error
def getService(self, name):
return SiebelService(self._sa.GetService(vstr(name), vshort(0)),
self._sa)
@check_error
def getSharedGlobal(self, name):
return self._sa.GetSharedGlobal(vstr(name), vshort(0))
@check_error
def invokeMethod(self, methodName, *methodArgs):
return self._sa.InvokeMethod(vstr(methodName),
vstrarr(list(methodArgs)),
vshort(0))
@check_error
def currencyCode(self):
return self._sa.CurrencyCode(vshort(0))
@check_error
def login(self, login, password):
self._sa.Login(vstr(login), vstr(password), vshort(0))
@check_error
def loginId(self):
return self._sa.LoginId(vshort(0))
@check_error
def loginName(self):
return self._sa.LoginName(vshort(0))
@check_error
def newPropertySet(self):
return SiebelPropertySet(self._sa.NewPropertySet(vshort(0)), self._sa)
@check_error
def positionId(self):
return self._sa.PositionId(vshort(0))
@check_error
def positionName(self):
return self._sa.PositionName(vshort(0))
@check_error
def setPositionId(self, value):
self._sa.SetPositionId(vstr(value), vshort(0))
@check_error
def setPositionName(self, value):
self._sa.SetPositionName(vstr(value), vshort(0))
@check_error
def setProfileAttr(self, name, value):
self._sa.SetProfileAttr(vstr(name), vstr(value), vshort(0))
@check_error
def setSharedGlobal(self, name, value):
self._sa.SetSharedGlobal(vstr(name), vstr(value), vshort(0))
@check_error
def trace(self, msg):
self._sa.Trace(vstr(msg), vshort(0))
@check_error
def traceOff(self):
self._sa.TraceOff(vshort(0))
@check_error
def traceOn(self, file_name, category, source):
self._sa.TraceOn(vstr(file_name), vstr(
category), vstr(source), vshort(0))
def evalExpr(self, expr):
bo = self.getBusObject('Employee')
bc = bo.getBusComp('Employee')
return bc.invokeMethod('EvalExpr', expr)
def repositoryId(self):
return self.evalExpr("RepositoryId()")
| true
| true
|
f708d38e4ada2c2472030b04a3af3bd0cb4f2dc2
| 413
|
py
|
Python
|
transfermarket.py
|
DaniilGumin/SiteParser
|
97aca6393141f5a3e8fba03745b01e8d4a918d2f
|
[
"MIT"
] | null | null | null |
transfermarket.py
|
DaniilGumin/SiteParser
|
97aca6393141f5a3e8fba03745b01e8d4a918d2f
|
[
"MIT"
] | null | null | null |
transfermarket.py
|
DaniilGumin/SiteParser
|
97aca6393141f5a3e8fba03745b01e8d4a918d2f
|
[
"MIT"
] | null | null | null |
from lxml import html
import requests
url = 'https://www.transfermarkt.com/ac-mailand/transfers/verein/5/saison_id/2017'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
}
page = requests.get(url, headers=headers)
tree = html.fromstring(page.content)
players = tree.xpath('//a[@class="spielprofil_tooltip"]/text()')
print('Players: ', players)
| 27.533333
| 101
| 0.72155
|
from lxml import html
import requests
url = 'https://www.transfermarkt.com/ac-mailand/transfers/verein/5/saison_id/2017'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
}
page = requests.get(url, headers=headers)
tree = html.fromstring(page.content)
players = tree.xpath('//a[@class="spielprofil_tooltip"]/text()')
print('Players: ', players)
| true
| true
|
f708d555666b85db4bf67ef80495a4f682e089a3
| 753
|
py
|
Python
|
package/alphabets/capital_alphabets/O.py
|
venkateshvsn/patterns
|
7e1d1926b40695a65e04c370655c5d79dd63bf6e
|
[
"MIT"
] | null | null | null |
package/alphabets/capital_alphabets/O.py
|
venkateshvsn/patterns
|
7e1d1926b40695a65e04c370655c5d79dd63bf6e
|
[
"MIT"
] | null | null | null |
package/alphabets/capital_alphabets/O.py
|
venkateshvsn/patterns
|
7e1d1926b40695a65e04c370655c5d79dd63bf6e
|
[
"MIT"
] | null | null | null |
def for_O():
"""printing capital 'O' using for loop"""
for row in range(5):
for col in range(5):
if col==0 and row not in(0,4) or col==4 and row not in(0,4) or row==0 and col in(1,2,3) or row==4 and col in(1,2,3) :
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_O():
"""printing capital 'O' using while loop"""
i=0
while i<5:
j=0
while j<5:
if j==0 and i not in(0,4) or i==0 and j not in(0,4)or i==4 and j not in(0,4)or j==4 and i not in(0,4):
print("*",end=" ")
else:
print(" ",end=" ")
j+=1
i+=1
print()
| 28.961538
| 130
| 0.409031
|
def for_O():
for row in range(5):
for col in range(5):
if col==0 and row not in(0,4) or col==4 and row not in(0,4) or row==0 and col in(1,2,3) or row==4 and col in(1,2,3) :
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_O():
i=0
while i<5:
j=0
while j<5:
if j==0 and i not in(0,4) or i==0 and j not in(0,4)or i==4 and j not in(0,4)or j==4 and i not in(0,4):
print("*",end=" ")
else:
print(" ",end=" ")
j+=1
i+=1
print()
| true
| true
|
f708d6c9b4f981534604870130c87e1774584567
| 2,074
|
py
|
Python
|
plugins/hw_wallet/plugin.py
|
qupengcheng/btcnano-1.0
|
777733284a103c619ac15933cc0b8106642b9dca
|
[
"MIT"
] | 3
|
2018-01-16T09:45:41.000Z
|
2018-01-27T04:07:10.000Z
|
plugins/hw_wallet/plugin.py
|
qupengcheng/btcnano-1.0
|
777733284a103c619ac15933cc0b8106642b9dca
|
[
"MIT"
] | null | null | null |
plugins/hw_wallet/plugin.py
|
qupengcheng/btcnano-1.0
|
777733284a103c619ac15933cc0b8106642b9dca
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from bitcoinnano.plugins import BasePlugin, hook
from bitcoinnano.i18n import _
class HW_PluginBase(BasePlugin):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.device = self.keystore_class.device
self.keystore_class.plugin = self
def is_enabled(self):
return True
def device_manager(self):
return self.parent.device_manager
@hook
def close_wallet(self, wallet):
for keystore in wallet.get_keystores():
if isinstance(keystore, self.keystore_class):
self.device_manager().unpair_xpub(keystore.xpub)
| 38.407407
| 73
| 0.737223
|
from bitcoinnano.plugins import BasePlugin, hook
from bitcoinnano.i18n import _
class HW_PluginBase(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.device = self.keystore_class.device
self.keystore_class.plugin = self
def is_enabled(self):
return True
def device_manager(self):
return self.parent.device_manager
@hook
def close_wallet(self, wallet):
for keystore in wallet.get_keystores():
if isinstance(keystore, self.keystore_class):
self.device_manager().unpair_xpub(keystore.xpub)
| true
| true
|
f708d6ec719c9d7fc6f27e40ee97ebb008dbadeb
| 9,417
|
py
|
Python
|
test_widerface.py
|
DevD1092/Retinaface_DLIB
|
455e393f1bd688cf2d1cc41960105af9ea8a26c6
|
[
"Apache-2.0"
] | 3
|
2021-09-23T23:56:46.000Z
|
2022-03-25T16:15:33.000Z
|
test_widerface.py
|
DevD1092/Retinaface_DLIB
|
455e393f1bd688cf2d1cc41960105af9ea8a26c6
|
[
"Apache-2.0"
] | null | null | null |
test_widerface.py
|
DevD1092/Retinaface_DLIB
|
455e393f1bd688cf2d1cc41960105af9ea8a26c6
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import os
import sys
import argparse
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from data import cfg_mnet, cfg_re50
from layers.functions.prior_box import PriorBox
from utils.nms.py_cpu_nms import py_cpu_nms
import cv2
from models.retinaface import RetinaFace
from utils.box_utils import decode, decode_landm
from utils.timer import Timer
parser = argparse.ArgumentParser(description='Retinaface')
parser.add_argument('-m', '--trained_model', default='./weights/Resnet50_Final.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--network', default='resnet50', help='Backbone network mobile0.25 or resnet50')
parser.add_argument('--origin_size', default=True, type=str, help='Whether use origin image size to evaluate')
parser.add_argument('--save_folder', default='./widerface_evaluate/widerface_txt/', type=str, help='Dir to save txt results')
parser.add_argument('--cpu', action="store_true", default=False, help='Use cpu inference')
parser.add_argument('--dataset_folder', default='./data/widerface/widerface/val/images/', type=str, help='dataset path')
parser.add_argument('--confidence_threshold', default=0.02, type=float, help='confidence_threshold')
parser.add_argument('--top_k', default=5000, type=int, help='top_k')
parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
parser.add_argument('-s', '--save_image', action="store_true", default=False, help='show detection results')
parser.add_argument('--vis_thres', default=0.5, type=float, help='visualization_threshold')
args = parser.parse_args()
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
if __name__ == '__main__':
torch.set_grad_enabled(False)
cfg = None
if args.network == "mobile0.25":
cfg = cfg_mnet
elif args.network == "resnet50":
cfg = cfg_re50
# net and model
net = RetinaFace(cfg=cfg, phase = 'test')
net = load_model(net, args.trained_model, args.cpu)
net.eval()
print('Finished loading model!')
print(net)
cudnn.benchmark = True
device = torch.device("cpu" if args.cpu else "cuda")
net = net.to(device)
# testing dataset
testset_folder = args.dataset_folder
print (testset_folder)
testset_list = args.dataset_folder + "test_list.txt"
test_dataset = []
#print (testset_list)
with open(testset_list, 'r') as fr:
content = fr.readlines()
test_dataset = [line.strip() for line in content]
num_images = len(test_dataset)
print (num_images)
_t = {'forward_pass': Timer(), 'misc': Timer()}
# testing begin
for i, img_name in enumerate(test_dataset):
image_path = testset_folder + img_name
print (image_path)
img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
img = np.float32(img_raw)
# testing scale
target_size = 1600
max_size = 2150
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
resize = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(resize * im_size_max) > max_size:
resize = float(max_size) / float(im_size_max)
if args.origin_size:
resize = 1
if resize != 1:
img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
im_height, im_width, _ = img.shape
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
scale = scale.to(device)
_t['forward_pass'].tic()
loc, conf, landms = net(img) # forward pass
_t['forward_pass'].toc()
_t['misc'].tic()
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]])
scale1 = scale1.to(device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
# ignore low scores
inds = np.where(scores > args.confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1]
# order = scores.argsort()[::-1][:args.top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(dets, args.nms_threshold)
# keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)
dets = dets[keep, :]
landms = landms[keep]
# keep top-K faster NMS
# dets = dets[:args.keep_top_k, :]
# landms = landms[:args.keep_top_k, :]
dets = np.concatenate((dets, landms), axis=1)
_t['misc'].toc()
# --------------------------------------------------------------------
save_name = args.save_folder + img_name[:-4] + ".txt"
dirname = os.path.dirname(save_name)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(save_name, "w") as fd:
bboxs = dets
file_name = os.path.basename(save_name)[:-4] + "\n"
bboxs_num = str(len(bboxs)) + "\n"
fd.write(file_name)
fd.write(bboxs_num)
for box in bboxs:
x = int(box[0])
y = int(box[1])
w = int(box[2]) - int(box[0])
h = int(box[3]) - int(box[1])
confidence = str(box[4])
line = str(x) + " " + str(y) + " " + str(w) + " " + str(h) + " " + confidence + " \n"
fd.write(line)
print('im_detect: {:d}/{:d} forward_pass_time: {:.4f}s misc: {:.4f}s'.format(i + 1, num_images, _t['forward_pass'].average_time, _t['misc'].average_time))
# save image
if args.save_image:
for b in dets:
if b[4] < args.vis_thres:
continue
text = "{:.4f}".format(b[4])
b = list(map(int, b))
cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
cx = b[0]
cy = b[1] + 12
cv2.putText(img_raw, text, (cx, cy),
cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
# landms
cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)
cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)
cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)
cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)
cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)
# save image
if not os.path.exists("./results_handtask/"):
os.makedirs("./results_handtask/")
name = "./results_handtask/%05d.jpg" % i
cv2.imwrite(name, img_raw)
| 41.484581
| 162
| 0.606456
|
from __future__ import print_function
import os
import sys
import argparse
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from data import cfg_mnet, cfg_re50
from layers.functions.prior_box import PriorBox
from utils.nms.py_cpu_nms import py_cpu_nms
import cv2
from models.retinaface import RetinaFace
from utils.box_utils import decode, decode_landm
from utils.timer import Timer
parser = argparse.ArgumentParser(description='Retinaface')
parser.add_argument('-m', '--trained_model', default='./weights/Resnet50_Final.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--network', default='resnet50', help='Backbone network mobile0.25 or resnet50')
parser.add_argument('--origin_size', default=True, type=str, help='Whether use origin image size to evaluate')
parser.add_argument('--save_folder', default='./widerface_evaluate/widerface_txt/', type=str, help='Dir to save txt results')
parser.add_argument('--cpu', action="store_true", default=False, help='Use cpu inference')
parser.add_argument('--dataset_folder', default='./data/widerface/widerface/val/images/', type=str, help='dataset path')
parser.add_argument('--confidence_threshold', default=0.02, type=float, help='confidence_threshold')
parser.add_argument('--top_k', default=5000, type=int, help='top_k')
parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
parser.add_argument('-s', '--save_image', action="store_true", default=False, help='show detection results')
parser.add_argument('--vis_thres', default=0.5, type=float, help='visualization_threshold')
args = parser.parse_args()
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
if __name__ == '__main__':
torch.set_grad_enabled(False)
cfg = None
if args.network == "mobile0.25":
cfg = cfg_mnet
elif args.network == "resnet50":
cfg = cfg_re50
net = RetinaFace(cfg=cfg, phase = 'test')
net = load_model(net, args.trained_model, args.cpu)
net.eval()
print('Finished loading model!')
print(net)
cudnn.benchmark = True
device = torch.device("cpu" if args.cpu else "cuda")
net = net.to(device)
testset_folder = args.dataset_folder
print (testset_folder)
testset_list = args.dataset_folder + "test_list.txt"
test_dataset = []
with open(testset_list, 'r') as fr:
content = fr.readlines()
test_dataset = [line.strip() for line in content]
num_images = len(test_dataset)
print (num_images)
_t = {'forward_pass': Timer(), 'misc': Timer()}
for i, img_name in enumerate(test_dataset):
image_path = testset_folder + img_name
print (image_path)
img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
img = np.float32(img_raw)
target_size = 1600
max_size = 2150
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
resize = float(target_size) / float(im_size_min)
if np.round(resize * im_size_max) > max_size:
resize = float(max_size) / float(im_size_max)
if args.origin_size:
resize = 1
if resize != 1:
img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
im_height, im_width, _ = img.shape
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
scale = scale.to(device)
_t['forward_pass'].tic()
loc, conf, landms = net(img) _t['forward_pass'].toc()
_t['misc'].tic()
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]])
scale1 = scale1.to(device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
inds = np.where(scores > args.confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
order = scores.argsort()[::-1]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(dets, args.nms_threshold)
dets = dets[keep, :]
landms = landms[keep]
dets = np.concatenate((dets, landms), axis=1)
_t['misc'].toc()
save_name = args.save_folder + img_name[:-4] + ".txt"
dirname = os.path.dirname(save_name)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(save_name, "w") as fd:
bboxs = dets
file_name = os.path.basename(save_name)[:-4] + "\n"
bboxs_num = str(len(bboxs)) + "\n"
fd.write(file_name)
fd.write(bboxs_num)
for box in bboxs:
x = int(box[0])
y = int(box[1])
w = int(box[2]) - int(box[0])
h = int(box[3]) - int(box[1])
confidence = str(box[4])
line = str(x) + " " + str(y) + " " + str(w) + " " + str(h) + " " + confidence + " \n"
fd.write(line)
print('im_detect: {:d}/{:d} forward_pass_time: {:.4f}s misc: {:.4f}s'.format(i + 1, num_images, _t['forward_pass'].average_time, _t['misc'].average_time))
if args.save_image:
for b in dets:
if b[4] < args.vis_thres:
continue
text = "{:.4f}".format(b[4])
b = list(map(int, b))
cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
cx = b[0]
cy = b[1] + 12
cv2.putText(img_raw, text, (cx, cy),
cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)
cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)
cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)
cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)
cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)
if not os.path.exists("./results_handtask/"):
os.makedirs("./results_handtask/")
name = "./results_handtask/%05d.jpg" % i
cv2.imwrite(name, img_raw)
| true
| true
|
f708d97eef5629065ace28e265a871b9fdfb1637
| 3,058
|
py
|
Python
|
projectlaika/addWindow.py
|
TheSgtPepper23/LaikaIA
|
fc73aa17f74462b211c4a4159b663ed7c3cdb1bd
|
[
"MIT"
] | null | null | null |
projectlaika/addWindow.py
|
TheSgtPepper23/LaikaIA
|
fc73aa17f74462b211c4a4159b663ed7c3cdb1bd
|
[
"MIT"
] | null | null | null |
projectlaika/addWindow.py
|
TheSgtPepper23/LaikaIA
|
fc73aa17f74462b211c4a4159b663ed7c3cdb1bd
|
[
"MIT"
] | null | null | null |
import os
import hashlib
from PyQt5 import uic
from PyQt5.QtWidgets import QMainWindow, QMessageBox
from internationalization import LANGUAGE
from logic import Hash
from windows.message import Message
from databaseAccess import DbMethods
class AddWindow(QMainWindow):
def __init__(self, lang):
QMainWindow.__init__(self)
uic.loadUi("windows/AddUser.ui", self)
self.lang = lang
self.reload_text()
self.back_button.clicked.connect(self.go_to_back)
self.add_button.clicked.connect(self.add_user)
def reload_text(self):
"""Change the language of the window according to the chosen previously"""
self.language = LANGUAGE.get(self.lang)
self.setWindowTitle(self.language["add_user"])
self.user_name_label.setText(self.language["username"])
self.pass_label.setText(self.language["password"])
self.confirm_pass_label.setText(self.language["confirm_pass"])
self.add_button.setText(self.language["add_user"])
self.back_button.setText(self.language["back"])
def add_user(self):
"""Add a new user to the game"""
if len(self.user_name_text.text()) < 4:
message = Message(self.language["inv_username"], self.language["user_not_long"])
warning_message = message.create_iw_message(self.language["ok"], "warning")
warning_message.exec()
elif len(self.password_text.text()) < 8:
message = Message(self.language["inv_pass"], self.language["pass_not_long"])
warning_message = message.create_iw_message(self.language["ok"], "warning")
warning_message.exec()
else:
if self.password_text.text() == self.confirm_pass_text.text():
data_acces = DbMethods()
response = data_acces.add_player(self.user_name_text.text(), Hash.encrypt(self.password_text.text()))
if response == True:
message = Message(self.language["registered"], self.language["welcome"])
information_message = message.create_iw_message(self.language["ok"], "information")
information_message.exec()
elif response == False:
message = Message(self.language["other_name"], self.language["existing_user"])
warning_message = message.create_iw_message(self.language["ok"], "warning")
warning_message.exec()
self.user_name_text.clear()
self.password_text.clear()
self.confirm_pass_text.clear()
else:
message = Message(self.language["pass_problem"], self.language["pass_dont_match"])
warning_message = message.create_iw_message(self.language["ok"], "warning")
warning_message.exec()
def go_to_back(self):
"""Return to administration window"""
from adminWindow import AdminWindow
self.admin = AdminWindow(self.lang)
self.admin.show()
self.close()
| 46.333333
| 117
| 0.64225
|
import os
import hashlib
from PyQt5 import uic
from PyQt5.QtWidgets import QMainWindow, QMessageBox
from internationalization import LANGUAGE
from logic import Hash
from windows.message import Message
from databaseAccess import DbMethods
class AddWindow(QMainWindow):
def __init__(self, lang):
QMainWindow.__init__(self)
uic.loadUi("windows/AddUser.ui", self)
self.lang = lang
self.reload_text()
self.back_button.clicked.connect(self.go_to_back)
self.add_button.clicked.connect(self.add_user)
def reload_text(self):
self.language = LANGUAGE.get(self.lang)
self.setWindowTitle(self.language["add_user"])
self.user_name_label.setText(self.language["username"])
self.pass_label.setText(self.language["password"])
self.confirm_pass_label.setText(self.language["confirm_pass"])
self.add_button.setText(self.language["add_user"])
self.back_button.setText(self.language["back"])
def add_user(self):
if len(self.user_name_text.text()) < 4:
message = Message(self.language["inv_username"], self.language["user_not_long"])
warning_message = message.create_iw_message(self.language["ok"], "warning")
warning_message.exec()
elif len(self.password_text.text()) < 8:
message = Message(self.language["inv_pass"], self.language["pass_not_long"])
warning_message = message.create_iw_message(self.language["ok"], "warning")
warning_message.exec()
else:
if self.password_text.text() == self.confirm_pass_text.text():
data_acces = DbMethods()
response = data_acces.add_player(self.user_name_text.text(), Hash.encrypt(self.password_text.text()))
if response == True:
message = Message(self.language["registered"], self.language["welcome"])
information_message = message.create_iw_message(self.language["ok"], "information")
information_message.exec()
elif response == False:
message = Message(self.language["other_name"], self.language["existing_user"])
warning_message = message.create_iw_message(self.language["ok"], "warning")
warning_message.exec()
self.user_name_text.clear()
self.password_text.clear()
self.confirm_pass_text.clear()
else:
message = Message(self.language["pass_problem"], self.language["pass_dont_match"])
warning_message = message.create_iw_message(self.language["ok"], "warning")
warning_message.exec()
def go_to_back(self):
from adminWindow import AdminWindow
self.admin = AdminWindow(self.lang)
self.admin.show()
self.close()
| true
| true
|
f708d9f68089c5a0379aa93fd7e35b1682b87353
| 736
|
py
|
Python
|
h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2oparse_raw.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 6,098
|
2015-05-22T02:46:12.000Z
|
2022-03-31T16:54:51.000Z
|
h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2oparse_raw.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2,517
|
2015-05-23T02:10:54.000Z
|
2022-03-30T17:03:39.000Z
|
h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2oparse_raw.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2,199
|
2015-05-22T04:09:55.000Z
|
2022-03-28T22:20:45.000Z
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2oparse_raw():
"""
Python API test: h2o.parse_raw(setup, id=None, first_line_is_header=0)
copied from pyunit_hexdev_29_parse_false.py
"""
fraw = h2o.import_file(pyunit_utils.locate("smalldata/jira/hexdev_29.csv"), parse=False)
assert isinstance(fraw, list)
fhex = h2o.parse_raw(h2o.parse_setup(fraw), id='hexdev_29.hex', first_line_is_header=0)
fhex.summary()
assert_is_type(fhex, H2OFrame)
if __name__ == "__main__":
pyunit_utils.standalone_test(h2oparse_raw)
else:
h2oparse_raw()
| 28.307692
| 92
| 0.743207
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2oparse_raw():
fraw = h2o.import_file(pyunit_utils.locate("smalldata/jira/hexdev_29.csv"), parse=False)
assert isinstance(fraw, list)
fhex = h2o.parse_raw(h2o.parse_setup(fraw), id='hexdev_29.hex', first_line_is_header=0)
fhex.summary()
assert_is_type(fhex, H2OFrame)
if __name__ == "__main__":
pyunit_utils.standalone_test(h2oparse_raw)
else:
h2oparse_raw()
| true
| true
|
f708dc3f498afa726fef95d36981fa1fcc1f7c69
| 712
|
py
|
Python
|
backend/lambda-developer-endpoints/index.py
|
UBC-CIC/people-counting-with-aws-rekognition-Admin-Website
|
a635cfcf8acd7f66da761a2e03c99479b74d0b82
|
[
"Apache-1.1"
] | null | null | null |
backend/lambda-developer-endpoints/index.py
|
UBC-CIC/people-counting-with-aws-rekognition-Admin-Website
|
a635cfcf8acd7f66da761a2e03c99479b74d0b82
|
[
"Apache-1.1"
] | null | null | null |
backend/lambda-developer-endpoints/index.py
|
UBC-CIC/people-counting-with-aws-rekognition-Admin-Website
|
a635cfcf8acd7f66da761a2e03c99479b74d0b82
|
[
"Apache-1.1"
] | 1
|
2021-06-04T00:17:51.000Z
|
2021-06-04T00:17:51.000Z
|
import json
import boto3
import os
client = boto3.client('dynamodb')
CURRENT_COUNTS_TABLE_NAME = os.environ['CURRENT_COUNTS_TABLE_NAME']
AVERAGE_COUNTS_TABLE_NAME = os.environ['AVERAGE_COUNTS_TABLE_NAME']
def lambda_handler(event, context):
if "getCurrentCounts" in event:
response = client.scan(TableName=CURRENT_COUNTS_TABLE_NAME)
return {
'statusCode': 200,
'body': response
}
if "getAverageCounts" in event:
response = client.scan(TableName=AVERAGE_COUNTS_TABLE_NAME)
return {
'statusCode': 200,
'body': response
}
response = {}
return {
'statusCode': 200,
'body': response
}
| 28.48
| 67
| 0.640449
|
import json
import boto3
import os
client = boto3.client('dynamodb')
CURRENT_COUNTS_TABLE_NAME = os.environ['CURRENT_COUNTS_TABLE_NAME']
AVERAGE_COUNTS_TABLE_NAME = os.environ['AVERAGE_COUNTS_TABLE_NAME']
def lambda_handler(event, context):
if "getCurrentCounts" in event:
response = client.scan(TableName=CURRENT_COUNTS_TABLE_NAME)
return {
'statusCode': 200,
'body': response
}
if "getAverageCounts" in event:
response = client.scan(TableName=AVERAGE_COUNTS_TABLE_NAME)
return {
'statusCode': 200,
'body': response
}
response = {}
return {
'statusCode': 200,
'body': response
}
| true
| true
|
f708dc7d41fdf5af19cb3cc6f93b7b6e31a65bbc
| 2,096
|
py
|
Python
|
archai/datasets/providers/svhn_provider.py
|
bluetyson/archai
|
b370a7397cb8703a052d82297ae748a35c6a49c7
|
[
"MIT"
] | 344
|
2020-06-12T22:12:56.000Z
|
2022-03-29T06:48:20.000Z
|
archai/datasets/providers/svhn_provider.py
|
QPC-database/archai
|
50f70ccccf536466cc0370c8a63401e05dec33fd
|
[
"MIT"
] | 29
|
2020-06-13T19:56:49.000Z
|
2022-03-30T20:26:48.000Z
|
archai/datasets/providers/svhn_provider.py
|
QPC-database/archai
|
50f70ccccf536466cc0370c8a63401e05dec33fd
|
[
"MIT"
] | 68
|
2020-06-12T19:32:43.000Z
|
2022-03-05T06:58:40.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Tuple, Union, Optional
from overrides import overrides, EnforceOverrides
from torch.utils.data.dataset import Dataset
import torchvision
from torchvision.transforms import transforms
from torch.utils.data import ConcatDataset
from archai.datasets.dataset_provider import DatasetProvider, register_dataset_provider, TrainTestDatasets
from archai.common.config import Config
from archai.common import utils
class SvhnProvider(DatasetProvider):
def __init__(self, conf_dataset:Config):
super().__init__(conf_dataset)
self._dataroot = utils.full_path(conf_dataset['dataroot'])
@overrides
def get_datasets(self, load_train:bool, load_test:bool,
transform_train, transform_test)->TrainTestDatasets:
trainset, testset = None, None
if load_train:
trainset = torchvision.datasets.SVHN(root=self._dataroot, split='train',
download=True, transform=transform_train)
extraset = torchvision.datasets.SVHN(root=self._dataroot, split='extra',
download=True, transform=transform_train)
trainset = ConcatDataset([trainset, extraset])
if load_test:
testset = torchvision.datasets.SVHN(root=self._dataroot, split='test',
download=True, transform=transform_test)
return trainset, testset
@overrides
def get_transforms(self)->tuple:
MEAN = [0.4914, 0.4822, 0.4465]
STD = [0.2023, 0.1994, 0.20100]
transf = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip()
]
normalize = [
transforms.ToTensor(),
transforms.Normalize(MEAN, STD)
]
train_transform = transforms.Compose(transf + normalize)
test_transform = transforms.Compose(normalize)
return train_transform, test_transform
register_dataset_provider('svhn', SvhnProvider)
| 35.525424
| 107
| 0.670324
|
from typing import List, Tuple, Union, Optional
from overrides import overrides, EnforceOverrides
from torch.utils.data.dataset import Dataset
import torchvision
from torchvision.transforms import transforms
from torch.utils.data import ConcatDataset
from archai.datasets.dataset_provider import DatasetProvider, register_dataset_provider, TrainTestDatasets
from archai.common.config import Config
from archai.common import utils
class SvhnProvider(DatasetProvider):
def __init__(self, conf_dataset:Config):
super().__init__(conf_dataset)
self._dataroot = utils.full_path(conf_dataset['dataroot'])
@overrides
def get_datasets(self, load_train:bool, load_test:bool,
transform_train, transform_test)->TrainTestDatasets:
trainset, testset = None, None
if load_train:
trainset = torchvision.datasets.SVHN(root=self._dataroot, split='train',
download=True, transform=transform_train)
extraset = torchvision.datasets.SVHN(root=self._dataroot, split='extra',
download=True, transform=transform_train)
trainset = ConcatDataset([trainset, extraset])
if load_test:
testset = torchvision.datasets.SVHN(root=self._dataroot, split='test',
download=True, transform=transform_test)
return trainset, testset
@overrides
def get_transforms(self)->tuple:
MEAN = [0.4914, 0.4822, 0.4465]
STD = [0.2023, 0.1994, 0.20100]
transf = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip()
]
normalize = [
transforms.ToTensor(),
transforms.Normalize(MEAN, STD)
]
train_transform = transforms.Compose(transf + normalize)
test_transform = transforms.Compose(normalize)
return train_transform, test_transform
register_dataset_provider('svhn', SvhnProvider)
| true
| true
|
f708df00d9857cd302b7ec9775cbf7aa234f50ba
| 49,006
|
py
|
Python
|
test/test_resources_site_shared_credential.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
test/test_resources_site_shared_credential.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
test/test_resources_site_shared_credential.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is like` ` not like` | | `container-status` | `is` ` is not` | | `containers` | `are` | | `criticality-tag` | `is` ` is not` ` is greater than` ` is less than` ` is applied` ` is not applied` | | `custom-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `cve` | `is` ` is not` ` contains` ` does not contain` | | `cvss-access-complexity` | `is` ` is not` | | `cvss-authentication-required` | `is` ` is not` | | `cvss-access-vector` | `is` ` is not` | | `cvss-availability-impact` | `is` ` is not` | | `cvss-confidentiality-impact` | `is` ` is not` | | `cvss-integrity-impact` | `is` ` is not` | | `cvss-v3-confidentiality-impact` | `is` ` is not` | | `cvss-v3-integrity-impact` | `is` ` is not` | | `cvss-v3-availability-impact` | `is` ` is not` | | `cvss-v3-attack-vector` | `is` ` is not` | | `cvss-v3-attack-complexity` | `is` ` is not` | | `cvss-v3-user-interaction` | `is` ` is not` | | `cvss-v3-privileges-required` | `is` ` is not` | | `host-name` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is empty` ` is not empty` ` is like` ` not like` | | `host-type` | `in` ` not in` | | `ip-address` | `is` ` is not` ` in range` ` not in range` ` is like` ` not like` | | `ip-address-type` | `in` ` not in` | | `last-scan-date` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `location-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is earlier than` | | `open-ports` | `is` ` is not` ` in range` | | `operating-system` | `contains` ` does not contain` ` is empty` ` is not empty` | | `owner-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is not` ` in range` ` greater than` ` less than` | | `service-name` | `contains` ` does not contain` | | `site-id` | `in` ` not in` | | `software` | `contains` ` does not contain` | | `vAsset-cluster` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-datacenter` | `is` ` is not` | | `vAsset-host-name` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-power-state` | `in` ` not in` | | `vAsset-resource-pool-path` | `contains` ` does not contain` | | `vulnerability-assessed` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `vulnerability-category` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` | | `vulnerability-cvss-v3-score` | `is` ` is not` | | `vulnerability-cvss-score` | `is` ` is not` ` in range` ` is greater than` ` is less than` | | `vulnerability-exposures` | `includes` ` does not include` | | `vulnerability-title` | `contains` ` does not contain` ` is` ` is not` ` starts with` ` ends with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `string` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.resources_site_shared_credential import ResourcesSiteSharedCredential # noqa: E501
from swagger_client.rest import ApiException
class TestResourcesSiteSharedCredential(unittest.TestCase):
"""ResourcesSiteSharedCredential unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testResourcesSiteSharedCredential(self):
"""Test ResourcesSiteSharedCredential"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.resources_site_shared_credential.ResourcesSiteSharedCredential() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 1,195.268293
| 48,043
| 0.491593
|
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.resources_site_shared_credential import ResourcesSiteSharedCredential from swagger_client.rest import ApiException
class TestResourcesSiteSharedCredential(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testResourcesSiteSharedCredential(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
f708df1e87b7c6f449fe4da943695469a8a37f7f
| 1,800
|
py
|
Python
|
setup.py
|
coderfi/rets
|
6d1a23d0356e41fbeaf5edeb5d40b516a9946a07
|
[
"MIT"
] | null | null | null |
setup.py
|
coderfi/rets
|
6d1a23d0356e41fbeaf5edeb5d40b516a9946a07
|
[
"MIT"
] | null | null | null |
setup.py
|
coderfi/rets
|
6d1a23d0356e41fbeaf5edeb5d40b516a9946a07
|
[
"MIT"
] | null | null | null |
import sys
from setuptools import setup
if sys.version_info < (3, 5):
print('rets requires Python 3.5 or later')
sys.exit(1)
long_desc = 'Python 3 client for the Real Estate Transaction Standard (RETS) Version 1.7.2'
install_requires = [
'requests>=2.12.3',
'requests-toolbelt>=0.7.0,!=0.9.0',
'udatetime==0.0.16',
'docopts',
'lxml>=4.3.0',
]
setup_requires = [
'pytest-runner',
]
tests_requires = [
'flake8',
'pytest',
]
packages = [
'rets',
'rets.client',
'rets.http',
'rets.http.parsers',
]
setup(
name='rets-python',
version='0.4.2',
description='rets-python',
long_description=long_desc,
author='Martin Liu',
author_email='martin@opendoor.com',
url='https://github.com/opendoor-labs/rets',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Information Technology',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
],
license='MIT License',
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_requires,
packages=packages,
)
| 26.470588
| 91
| 0.618889
|
import sys
from setuptools import setup
if sys.version_info < (3, 5):
print('rets requires Python 3.5 or later')
sys.exit(1)
long_desc = 'Python 3 client for the Real Estate Transaction Standard (RETS) Version 1.7.2'
install_requires = [
'requests>=2.12.3',
'requests-toolbelt>=0.7.0,!=0.9.0',
'udatetime==0.0.16',
'docopts',
'lxml>=4.3.0',
]
setup_requires = [
'pytest-runner',
]
tests_requires = [
'flake8',
'pytest',
]
packages = [
'rets',
'rets.client',
'rets.http',
'rets.http.parsers',
]
setup(
name='rets-python',
version='0.4.2',
description='rets-python',
long_description=long_desc,
author='Martin Liu',
author_email='martin@opendoor.com',
url='https://github.com/opendoor-labs/rets',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Information Technology',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
],
license='MIT License',
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_requires,
packages=packages,
)
| true
| true
|
f708e019c4c06af383eb73f2545dc68cbdeaf8c3
| 6,534
|
py
|
Python
|
src/services/stream/crunchyroll.py
|
flipstables/holo
|
4e86ce74172318ab179fede29d849e34e92c7b0b
|
[
"MIT"
] | 102
|
2016-01-07T21:54:42.000Z
|
2022-01-17T02:03:05.000Z
|
src/services/stream/crunchyroll.py
|
flipstables/holo
|
4e86ce74172318ab179fede29d849e34e92c7b0b
|
[
"MIT"
] | 25
|
2016-01-10T11:46:40.000Z
|
2018-06-19T14:47:04.000Z
|
src/services/stream/crunchyroll.py
|
flipstables/holo
|
4e86ce74172318ab179fede29d849e34e92c7b0b
|
[
"MIT"
] | 36
|
2016-02-18T17:37:17.000Z
|
2019-02-24T02:01:40.000Z
|
from logging import debug, info, warning, error, exception
import re
from datetime import datetime, timedelta
from .. import AbstractServiceHandler
from data.models import Episode, UnprocessedStream
class ServiceHandler(AbstractServiceHandler):
_show_url = "http://crunchyroll.com/{id}"
_show_re = re.compile("crunchyroll.com/([\w-]+)", re.I)
_episode_rss = "http://crunchyroll.com/{id}.rss"
_backup_rss = "http://crunchyroll.com/rss/anime"
_season_url = "http://crunchyroll.com/lineup"
def __init__(self):
super().__init__("crunchyroll", "Crunchyroll", False)
# Episode finding
def get_all_episodes(self, stream, **kwargs):
info("Getting live episodes for Crunchyroll/{}".format(stream.show_key))
episode_datas = self._get_feed_episodes(stream.show_key, **kwargs)
# Check data validity and digest
episodes = []
for episode_data in episode_datas:
if _is_valid_episode(episode_data, stream.show_key):
try:
episodes.append(_digest_episode(episode_data))
except:
exception("Problem digesting episode for Crunchyroll/{}".format(stream.show_key))
if len(episode_datas) > 0:
debug(" {} episodes found, {} valid".format(len(episode_datas), len(episodes)))
else:
debug(" No episodes found")
return episodes
def _get_feed_episodes(self, show_key, **kwargs):
"""
Always returns a list.
"""
info("Getting episodes for Crunchyroll/{}".format(show_key))
url = self._get_feed_url(show_key)
# Send request
response = self.request(url, rss=True, **kwargs)
if response is None:
error("Cannot get latest show for Crunchyroll/{}".format(show_key))
return list()
# Parse RSS feed
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
return response.get("entries", list())
@classmethod
def _get_feed_url(cls, show_key):
# Sometimes shows don't have an RSS feed
# Use the backup global feed when it doesn't
if show_key is not None:
return cls._episode_rss.format(id=show_key)
else:
debug(" Using backup feed")
return cls._backup_rss
# Remote info getting
_title_fix = re.compile("(.*) Episodes", re.I)
def get_stream_info(self, stream, **kwargs):
info("Getting stream info for Crunchyroll/{}".format(stream.show_key))
url = self._get_feed_url(stream.show_key)
response = self.request(url, rss=True, **kwargs)
if response is None:
error("Cannot get feed")
return None
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
stream.name = response.feed.title
match = self._title_fix.match(stream.name)
if match:
stream.name = match.group(1)
return stream
def get_seasonal_streams(self, **kwargs):
debug("Getting season shows")
# Request page
response = self.request(self._season_url, html=True, **kwargs)
if response is None:
error("Failed to get seasonal streams page")
return list()
# Find sections (continuing simulcast, new simulcast, new catalog)
lists = response.find_all(class_="lineup-grid")
if len(lists) < 2:
error("Unsupported structure of lineup page")
return list()
elif len(lists) < 2 or len(lists) > 3:
warning("Unexpected number of lineup grids")
# Parse individual shows
# WARNING: Some may be dramas and there's nothing distinguishing them from anime
show_elements = lists[1].find_all(class_="element-lineup-anime")
raw_streams = list()
for show in show_elements:
title = show["title"]
if "to be announced" not in title.lower():
debug(" Show: {}".format(title))
url = show["href"]
debug(" URL: {}".format(url))
url_match = self._show_re.search(url)
if not url_match:
error("Failed to parse show URL: {}".format(url))
continue
key = url_match.group(1)
debug(" Key: {}".format(key))
remote_offset, display_offset = self._get_stream_info(key)
raw_stream = UnprocessedStream(self.key, key, None, title, remote_offset, display_offset)
raw_streams.append(raw_stream)
return raw_streams
def _get_stream_info(self, show_key):
#TODO: load show page and figure out offsets based on contents
return 0, 0
# Local info formatting
def get_stream_link(self, stream):
# Just going to assume it's the correct service
return self._show_url.format(id=stream.show_key)
def extract_show_key(self, url):
match = self._show_re.search(url)
if match:
return match.group(1)
return None
# Episode feeds
def _verify_feed(feed):
debug("Verifying feed")
if feed.bozo:
debug(" Feed was malformed")
return False
if "crunchyroll" not in feed.namespaces or feed.namespaces["crunchyroll"] != "http://www.crunchyroll.com/rss":
debug(" Crunchyroll namespace not found or invalid")
return False
if feed.feed.language != "en-us":
debug(" Language not en-us")
return False
debug(" Feed verified")
return True
def _is_valid_episode(feed_episode, show_id):
# We don't want non-episodes (PVs, VA interviews, etc.)
if feed_episode.get("crunchyroll_isclip", False) or not hasattr(feed_episode, "crunchyroll_episodenumber"):
debug("Is PV, ignoring")
return False
# Sanity check
if _get_slug(feed_episode.link) != show_id:
debug("Wrong ID")
return False
# Don't check really old episodes
episode_date = datetime(*feed_episode.published_parsed[:6])
date_diff = datetime.utcnow() - episode_date
if date_diff >= timedelta(days=2):
debug(" Episode too old")
return False
return True
_episode_name_correct = re.compile("Episode \d+ - (.*)")
_episode_count_fix = re.compile("([0-9]+)[abc]?", re.I)
def _digest_episode(feed_episode):
debug("Digesting episode")
# Get data
num_match = _episode_count_fix.match(feed_episode.crunchyroll_episodenumber)
if num_match:
num = int(num_match.group(1))
else:
warning("Unknown episode number format \"{}\"".format(feed_episode.crunchyroll_episodenumber))
num = 0
debug(" num={}".format(num))
name = feed_episode.title
match = _episode_name_correct.match(name)
if match:
debug(" Corrected title from \"{}\"".format(name))
name = match.group(1)
debug(" name={}".format(name))
link = feed_episode.link
debug(" link={}".format(link))
date = feed_episode.published_parsed
debug(" date={}".format(date))
return Episode(num, name, link, date)
_slug_regex = re.compile("crunchyroll.com/([a-z0-9-]+)/", re.I)
def _get_slug(episode_link):
match = _slug_regex.search(episode_link)
if match:
return match.group(1)
return None
# Season page
| 30.25
| 111
| 0.712274
|
from logging import debug, info, warning, error, exception
import re
from datetime import datetime, timedelta
from .. import AbstractServiceHandler
from data.models import Episode, UnprocessedStream
class ServiceHandler(AbstractServiceHandler):
_show_url = "http://crunchyroll.com/{id}"
_show_re = re.compile("crunchyroll.com/([\w-]+)", re.I)
_episode_rss = "http://crunchyroll.com/{id}.rss"
_backup_rss = "http://crunchyroll.com/rss/anime"
_season_url = "http://crunchyroll.com/lineup"
def __init__(self):
super().__init__("crunchyroll", "Crunchyroll", False)
def get_all_episodes(self, stream, **kwargs):
info("Getting live episodes for Crunchyroll/{}".format(stream.show_key))
episode_datas = self._get_feed_episodes(stream.show_key, **kwargs)
episodes = []
for episode_data in episode_datas:
if _is_valid_episode(episode_data, stream.show_key):
try:
episodes.append(_digest_episode(episode_data))
except:
exception("Problem digesting episode for Crunchyroll/{}".format(stream.show_key))
if len(episode_datas) > 0:
debug(" {} episodes found, {} valid".format(len(episode_datas), len(episodes)))
else:
debug(" No episodes found")
return episodes
def _get_feed_episodes(self, show_key, **kwargs):
info("Getting episodes for Crunchyroll/{}".format(show_key))
url = self._get_feed_url(show_key)
response = self.request(url, rss=True, **kwargs)
if response is None:
error("Cannot get latest show for Crunchyroll/{}".format(show_key))
return list()
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
return response.get("entries", list())
@classmethod
def _get_feed_url(cls, show_key):
# Use the backup global feed when it doesn't
if show_key is not None:
return cls._episode_rss.format(id=show_key)
else:
debug(" Using backup feed")
return cls._backup_rss
_title_fix = re.compile("(.*) Episodes", re.I)
def get_stream_info(self, stream, **kwargs):
info("Getting stream info for Crunchyroll/{}".format(stream.show_key))
url = self._get_feed_url(stream.show_key)
response = self.request(url, rss=True, **kwargs)
if response is None:
error("Cannot get feed")
return None
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
stream.name = response.feed.title
match = self._title_fix.match(stream.name)
if match:
stream.name = match.group(1)
return stream
def get_seasonal_streams(self, **kwargs):
debug("Getting season shows")
response = self.request(self._season_url, html=True, **kwargs)
if response is None:
error("Failed to get seasonal streams page")
return list()
lists = response.find_all(class_="lineup-grid")
if len(lists) < 2:
error("Unsupported structure of lineup page")
return list()
elif len(lists) < 2 or len(lists) > 3:
warning("Unexpected number of lineup grids")
show_elements = lists[1].find_all(class_="element-lineup-anime")
raw_streams = list()
for show in show_elements:
title = show["title"]
if "to be announced" not in title.lower():
debug(" Show: {}".format(title))
url = show["href"]
debug(" URL: {}".format(url))
url_match = self._show_re.search(url)
if not url_match:
error("Failed to parse show URL: {}".format(url))
continue
key = url_match.group(1)
debug(" Key: {}".format(key))
remote_offset, display_offset = self._get_stream_info(key)
raw_stream = UnprocessedStream(self.key, key, None, title, remote_offset, display_offset)
raw_streams.append(raw_stream)
return raw_streams
def _get_stream_info(self, show_key):
#TODO: load show page and figure out offsets based on contents
return 0, 0
# Local info formatting
def get_stream_link(self, stream):
# Just going to assume it's the correct service
return self._show_url.format(id=stream.show_key)
def extract_show_key(self, url):
match = self._show_re.search(url)
if match:
return match.group(1)
return None
def _verify_feed(feed):
debug("Verifying feed")
if feed.bozo:
debug(" Feed was malformed")
return False
if "crunchyroll" not in feed.namespaces or feed.namespaces["crunchyroll"] != "http://www.crunchyroll.com/rss":
debug(" Crunchyroll namespace not found or invalid")
return False
if feed.feed.language != "en-us":
debug(" Language not en-us")
return False
debug(" Feed verified")
return True
def _is_valid_episode(feed_episode, show_id):
if feed_episode.get("crunchyroll_isclip", False) or not hasattr(feed_episode, "crunchyroll_episodenumber"):
debug("Is PV, ignoring")
return False
# Sanity check
if _get_slug(feed_episode.link) != show_id:
debug("Wrong ID")
return False
# Don't check really old episodes
episode_date = datetime(*feed_episode.published_parsed[:6])
date_diff = datetime.utcnow() - episode_date
if date_diff >= timedelta(days=2):
debug(" Episode too old")
return False
return True
_episode_name_correct = re.compile("Episode \d+ - (.*)")
_episode_count_fix = re.compile("([0-9]+)[abc]?", re.I)
def _digest_episode(feed_episode):
debug("Digesting episode")
num_match = _episode_count_fix.match(feed_episode.crunchyroll_episodenumber)
if num_match:
num = int(num_match.group(1))
else:
warning("Unknown episode number format \"{}\"".format(feed_episode.crunchyroll_episodenumber))
num = 0
debug(" num={}".format(num))
name = feed_episode.title
match = _episode_name_correct.match(name)
if match:
debug(" Corrected title from \"{}\"".format(name))
name = match.group(1)
debug(" name={}".format(name))
link = feed_episode.link
debug(" link={}".format(link))
date = feed_episode.published_parsed
debug(" date={}".format(date))
return Episode(num, name, link, date)
_slug_regex = re.compile("crunchyroll.com/([a-z0-9-]+)/", re.I)
def _get_slug(episode_link):
match = _slug_regex.search(episode_link)
if match:
return match.group(1)
return None
| true
| true
|
f708e062dee09bdd223cf03577105bdf406b13fd
| 1,386
|
py
|
Python
|
var/spack/repos/builtin/packages/r-multcomp/package.py
|
kehw/spack
|
4f49b1a9301447a8cf880c99820cad65e5c2d7e3
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2020-09-10T22:50:08.000Z
|
2021-01-12T22:18:54.000Z
|
var/spack/repos/builtin/packages/r-multcomp/package.py
|
kehw/spack
|
4f49b1a9301447a8cf880c99820cad65e5c2d7e3
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2021-01-08T22:23:53.000Z
|
2022-03-30T11:08:17.000Z
|
var/spack/repos/builtin/packages/r-multcomp/package.py
|
kehw/spack
|
4f49b1a9301447a8cf880c99820cad65e5c2d7e3
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMultcomp(RPackage):
"""Simultaneous tests and confidence intervals for general linear
hypotheses in parametric models, including linear, generalized linear,
linear mixed effects, and survival models. The package includes demos
reproducing analyzes presented in the book "Multiple Comparisons Using R"
(Bretz, Hothorn, Westfall, 2010, CRC Press)."""
homepage = "http://multcomp.r-forge.r-project.org/"
url = "https://cloud.r-project.org/src/contrib/multcomp_1.4-6.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/multcomp"
version('1.4-10', sha256='29bcc635c0262e304551b139cd9ee655ab25a908d9693e1cacabfc2a936df5cf')
version('1.4-8', sha256='a20876619312310e9523d67e9090af501383ce49dc6113c6b4ca30f9c943a73a')
version('1.4-6', sha256='fe9efbe671416a49819cbdb9137cc218faebcd76e0f170fd1c8d3c84c42eeda2')
depends_on('r-mvtnorm@1.0-10:', type=('build', 'run'))
depends_on('r-survival@2.39-4:', type=('build', 'run'))
depends_on('r-th-data@1.0-2:', type=('build', 'run'))
depends_on('r-sandwich@2.3-0:', type=('build', 'run'))
depends_on('r-codetools', type=('build', 'run'))
| 47.793103
| 96
| 0.727273
|
from spack import *
class RMultcomp(RPackage):
homepage = "http://multcomp.r-forge.r-project.org/"
url = "https://cloud.r-project.org/src/contrib/multcomp_1.4-6.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/multcomp"
version('1.4-10', sha256='29bcc635c0262e304551b139cd9ee655ab25a908d9693e1cacabfc2a936df5cf')
version('1.4-8', sha256='a20876619312310e9523d67e9090af501383ce49dc6113c6b4ca30f9c943a73a')
version('1.4-6', sha256='fe9efbe671416a49819cbdb9137cc218faebcd76e0f170fd1c8d3c84c42eeda2')
depends_on('r-mvtnorm@1.0-10:', type=('build', 'run'))
depends_on('r-survival@2.39-4:', type=('build', 'run'))
depends_on('r-th-data@1.0-2:', type=('build', 'run'))
depends_on('r-sandwich@2.3-0:', type=('build', 'run'))
depends_on('r-codetools', type=('build', 'run'))
| true
| true
|
f708e0a2f1d6f3fbaa539ae288ad1af3bf9feb80
| 16,474
|
py
|
Python
|
model-optimizer/extensions/middle/Reduce_test.py
|
shinh/dldt
|
693ab4e79a428e0801f17f4511b129a3fa8f4a62
|
[
"Apache-2.0"
] | 1
|
2021-02-20T21:48:36.000Z
|
2021-02-20T21:48:36.000Z
|
model-optimizer/extensions/middle/Reduce_test.py
|
erinpark33/dldt
|
edd86d090592f7779f4dbb2681546e1f4e81284f
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/middle/Reduce_test.py
|
erinpark33/dldt
|
edd86d090592f7779f4dbb2681546e1f4e81284f
|
[
"Apache-2.0"
] | 1
|
2018-12-14T07:52:51.000Z
|
2018-12-14T07:52:51.000Z
|
"""
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from extensions.middle.Reduce import ReduceReplacer
from mo.middle.passes.eliminate_test import build_graph
from mo.middle.passes.fusing.fuse_linear_ops_test import compare_graphs
# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the
# dictionary with node attributes.
nodes_attributes = {
# Placeholder layers
'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_4_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# Reshape layers
'reduce_1': {'type': 'Reduce', 'kind': 'op', 'op': 'Reduce'},
'reduce_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Reshape layers
'reshape_1': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'reshape_2': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'},
# Pooling
'pooling': {'type': 'Pooling', 'kind': 'op', 'op': 'Pooling'},
'pooling_data': {'value': None, 'shape': None, 'kind': 'data'},
# Power
'power': {'type': 'Power', 'kind': 'op', 'op': 'Power'},
'power_data': {'value': None, 'shape': None, 'kind': 'data'},
# Concat
'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
}
class ReduceReplacerTest(unittest.TestCase):
def test1(self):
# Original graph
# data(1,64,1)-->Reduce(axis=1,keep_dims=True)-->data(1,1,1)
#
# Reference graph
# data(1,61,1)->Reshape(1,1,64,1)->Pool(1,1,1,1)->Reshape(1,1,1)
#
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 64, 1])},
'reduce_1': {'axis': np.array([1]), 'keep_dims': True, 'reduce_type': 'Mean'},
'reduce_1_data': {'shape': np.array([1, 1, 1])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 64, 1])},
'reshape_1': {'dim': np.array([1, 1, 64, 1])},
'reshape_1_data': {'shape': np.array([1, 1, 64, 1])},
'pooling': {'window': np.array([1, 1, 64, 1])},
'pooling_data': {'shape': np.array([1, 1, 1, 1])},
'reshape_2': {'dim': np.array([1, 1, 1])},
'reshape_2_data': {'shape': np.array([1, 1, 1])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test2(self):
# Original graph
# data(1,3,64,64)-->Reduce(axis=2,keep_dims=True)-->data(1,3,1,64)
#
# Reference graph
# data(1,3,64,64)->Reshape->Pool(1,3,1,64)->Reshape(1,3,1,64)
#
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
'reduce_1': {'axis': np.array([2]), 'keep_dims': True, 'reduce_type': 'Mean'},
'reduce_1_data': {'shape': np.array([1, 3, 1, 64])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
'reshape_1': {'dim': np.array([1, 3, 64, 64])},
'reshape_1_data': {'shape': np.array([1, 3, 64, 64])},
'pooling': {'window': np.array([1, 1, 64, 1])},
'pooling_data': {'shape': np.array([1, 3, 1, 64])},
'reshape_2': {'dim': np.array([1, 3, 1, 64])},
'reshape_2_data': {'shape': np.array([1, 3, 1, 64])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test3(self):
# Original graph
# data(1,3,64,64)-->Reduce(axis=[2,3],keep_dims=True)-->data(1,3,1,1)
#
# Reference graph
# data(1,3,64,64)->Reshape->Pool(1,3,1,1)->Reshape(1,3,1,1)
#
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
'reduce_1': {'axis': np.array([2, 3]), 'keep_dims': True, 'reduce_type': 'Mean'},
'reduce_1_data': {'shape': np.array([1, 3, 1, 1])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
'reshape_1': {'dim': np.array([1, 3, 64 * 64, 1])},
'reshape_1_data': {'shape': np.array([1, 3, 64 * 64, 1])},
'pooling': {'window': np.array([1, 1, 64 * 64, 1])},
'pooling_data': {'shape': np.array([1, 3, 1, 1])},
'reshape_2': {'dim': np.array([1, 3, 1, 1])},
'reshape_2_data': {'shape': np.array([1, 3, 1, 1])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test4(self):
# Original graph
# data(2,3,64,64)-->Reduce(axis=[1,2,3],keep_dims=False)-->data(2)
#
# Reference graph
# data(2,3,64,64)->Reshape(2,1,3*64*64,1)->Pool(2,1,1,1)->Reshape(2)
#
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([2, 3, 64, 64])},
'reduce_1': {'axis': np.array([1, 2, 3]), 'keep_dims': False, 'reduce_type': 'Mean'},
'reduce_1_data': {'shape': np.array([2])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([2, 3, 64, 64])},
'reshape_1': {'dim': np.array([2, 1, 3 * 64 * 64, 1])},
'reshape_1_data': {'shape': np.array([2, 1, 3 * 64 * 64, 1])},
'pooling': {'window': np.array([1, 1, 3 * 64 * 64, 1])},
'pooling_data': {'shape': np.array([2, 1, 1, 1])},
'reshape_2': {'dim': np.array([2])},
'reshape_2_data': {'shape': np.array([2])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test5(self):
# Original graph
# data(1, 16, 64, 64, 64, 4)-->Reduce(axis=[5],keep_dims=False)-->data(1, 16, 64, 64, 64)
#
# Reference graph
# data(1, 16, 64, 64, 64, 4)->Reshape(1*16*64*64, 64, 4, 1)->Pool(1, 1, 4, 1)->Reshape(1, 16, 64, 64, 64)
#
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 16, 64, 64, 64, 4])},
'reduce_1': {'axis': np.array([5]), 'keep_dims': False, 'reduce_type': 'max'},
'reduce_1_data': {'shape': np.array([1, 16, 64, 64, 64])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 16, 64, 64, 64, 4])},
'reshape_1': {'dim': np.array([65536, 64, 4, 1])},
'reshape_1_data': {'shape': np.array([65536, 64, 4, 1])},
'pooling': {'window': np.array([1, 1, 4, 1])},
'pooling_data': {'shape': np.array([65536, 64, 1, 1])},
'reshape_2': {'dim': np.array([1, 16, 64, 64, 64])},
'reshape_2_data': {'shape': np.array([1, 16, 64, 64, 64])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test6(self):
# Original graph
# data(1,64,1)-->Reduce(axis=-2,keep_dims=True, reduce_type=Sum)-->data(1,1,1)
#
# Reference graph
# data(1,61,1)->Reshape(1,1,64,1)->Pool(1,1,1,1)->Reshape(1,1,1)->Power(scale=64)
#
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 64, 1])},
'reduce_1': {'axis': np.array([-2]), 'keep_dims': True, 'reduce_type': 'Sum'},
'reduce_1_data': {'shape': np.array([1, 1, 1])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'power'),
('power', 'power_data'),
('power_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 64, 1])},
'reshape_1': {'dim': np.array([1, 1, 64, 1])},
'reshape_1_data': {'shape': np.array([1, 1, 64, 1])},
'pooling': {'window': np.array([1, 1, 64, 1])},
'pooling_data': {'shape': np.array([1, 1, 1, 1])},
'reshape_2': {'dim': np.array([1, 1, 1])},
'reshape_2_data': {'shape': np.array([1, 1, 1])},
'power': {'scale': 64.0},
'power_data': {'shape': np.array([1, 1, 1])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
| 51.320872
| 119
| 0.440452
|
import unittest
import numpy as np
from extensions.middle.Reduce import ReduceReplacer
from mo.middle.passes.eliminate_test import build_graph
from mo.middle.passes.fusing.fuse_linear_ops_test import compare_graphs
nodes_attributes = {
'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_4_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'reduce_1': {'type': 'Reduce', 'kind': 'op', 'op': 'Reduce'},
'reduce_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'reshape_1': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'reshape_2': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'pooling': {'type': 'Pooling', 'kind': 'op', 'op': 'Pooling'},
'pooling_data': {'value': None, 'shape': None, 'kind': 'data'},
'power': {'type': 'Power', 'kind': 'op', 'op': 'Power'},
'power_data': {'value': None, 'shape': None, 'kind': 'data'},
'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
}
class ReduceReplacerTest(unittest.TestCase):
def test1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 64, 1])},
'reduce_1': {'axis': np.array([1]), 'keep_dims': True, 'reduce_type': 'Mean'},
'reduce_1_data': {'shape': np.array([1, 1, 1])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 64, 1])},
'reshape_1': {'dim': np.array([1, 1, 64, 1])},
'reshape_1_data': {'shape': np.array([1, 1, 64, 1])},
'pooling': {'window': np.array([1, 1, 64, 1])},
'pooling_data': {'shape': np.array([1, 1, 1, 1])},
'reshape_2': {'dim': np.array([1, 1, 1])},
'reshape_2_data': {'shape': np.array([1, 1, 1])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
'reduce_1': {'axis': np.array([2]), 'keep_dims': True, 'reduce_type': 'Mean'},
'reduce_1_data': {'shape': np.array([1, 3, 1, 64])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
'reshape_1': {'dim': np.array([1, 3, 64, 64])},
'reshape_1_data': {'shape': np.array([1, 3, 64, 64])},
'pooling': {'window': np.array([1, 1, 64, 1])},
'pooling_data': {'shape': np.array([1, 3, 1, 64])},
'reshape_2': {'dim': np.array([1, 3, 1, 64])},
'reshape_2_data': {'shape': np.array([1, 3, 1, 64])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test3(self):
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
'reduce_1': {'axis': np.array([2, 3]), 'keep_dims': True, 'reduce_type': 'Mean'},
'reduce_1_data': {'shape': np.array([1, 3, 1, 1])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
'reshape_1': {'dim': np.array([1, 3, 64 * 64, 1])},
'reshape_1_data': {'shape': np.array([1, 3, 64 * 64, 1])},
'pooling': {'window': np.array([1, 1, 64 * 64, 1])},
'pooling_data': {'shape': np.array([1, 3, 1, 1])},
'reshape_2': {'dim': np.array([1, 3, 1, 1])},
'reshape_2_data': {'shape': np.array([1, 3, 1, 1])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test4(self):
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([2, 3, 64, 64])},
'reduce_1': {'axis': np.array([1, 2, 3]), 'keep_dims': False, 'reduce_type': 'Mean'},
'reduce_1_data': {'shape': np.array([2])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([2, 3, 64, 64])},
'reshape_1': {'dim': np.array([2, 1, 3 * 64 * 64, 1])},
'reshape_1_data': {'shape': np.array([2, 1, 3 * 64 * 64, 1])},
'pooling': {'window': np.array([1, 1, 3 * 64 * 64, 1])},
'pooling_data': {'shape': np.array([2, 1, 1, 1])},
'reshape_2': {'dim': np.array([2])},
'reshape_2_data': {'shape': np.array([2])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test5(self):
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 16, 64, 64, 64, 4])},
'reduce_1': {'axis': np.array([5]), 'keep_dims': False, 'reduce_type': 'max'},
'reduce_1_data': {'shape': np.array([1, 16, 64, 64, 64])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 16, 64, 64, 64, 4])},
'reshape_1': {'dim': np.array([65536, 64, 4, 1])},
'reshape_1_data': {'shape': np.array([65536, 64, 4, 1])},
'pooling': {'window': np.array([1, 1, 4, 1])},
'pooling_data': {'shape': np.array([65536, 64, 1, 1])},
'reshape_2': {'dim': np.array([1, 16, 64, 64, 64])},
'reshape_2_data': {'shape': np.array([1, 16, 64, 64, 64])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
def test6(self):
graph = build_graph(nodes_attributes,
[('placeholder_1_data', 'reduce_1'),
('reduce_1', 'reduce_1_data'),
('reduce_1_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 64, 1])},
'reduce_1': {'axis': np.array([-2]), 'keep_dims': True, 'reduce_type': 'Sum'},
'reduce_1_data': {'shape': np.array([1, 1, 1])},
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'pooling'),
('pooling', 'pooling_data'),
('pooling_data', 'reshape_2'),
('reshape_2', 'reshape_2_data'),
('reshape_2_data', 'power'),
('power', 'power_data'),
('power_data', 'concat'),
],
{'placeholder_1_data': {'shape': np.array([1, 64, 1])},
'reshape_1': {'dim': np.array([1, 1, 64, 1])},
'reshape_1_data': {'shape': np.array([1, 1, 64, 1])},
'pooling': {'window': np.array([1, 1, 64, 1])},
'pooling_data': {'shape': np.array([1, 1, 1, 1])},
'reshape_2': {'dim': np.array([1, 1, 1])},
'reshape_2_data': {'shape': np.array([1, 1, 1])},
'power': {'scale': 64.0},
'power_data': {'shape': np.array([1, 1, 1])},
}, nodes_with_edges_only=True)
pattern = ReduceReplacer()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
self.assertTrue(flag, resp)
| true
| true
|
f708e2172ca4f1c9b05cfd9786cb97914a8c95b9
| 110
|
py
|
Python
|
app/services/storage_service.py
|
jubbp/maker-hub
|
93bde3bcde7869c8454613061c50c0dcb3d2f573
|
[
"MIT"
] | 4
|
2021-09-28T04:55:16.000Z
|
2021-12-11T03:33:01.000Z
|
app/services/storage_service.py
|
jubbp/maker-hub
|
93bde3bcde7869c8454613061c50c0dcb3d2f573
|
[
"MIT"
] | 92
|
2021-03-18T07:26:43.000Z
|
2022-03-29T21:25:38.000Z
|
app/services/storage_service.py
|
jubbp/maker-hub
|
93bde3bcde7869c8454613061c50c0dcb3d2f573
|
[
"MIT"
] | 4
|
2021-02-27T16:31:41.000Z
|
2021-07-25T02:20:09.000Z
|
async def get_location_count() -> int:
return 234
async def get_locations_used() -> int:
return 230
| 15.714286
| 38
| 0.690909
|
async def get_location_count() -> int:
return 234
async def get_locations_used() -> int:
return 230
| true
| true
|
f708e25637b6e36499a5b132a0d4cc72da1b4e4b
| 6,297
|
py
|
Python
|
discordSuperUtils/ban.py
|
Heapy1337/discord-super-utils
|
be9d65fbc957d017df534ac502457f387594a9c8
|
[
"MIT"
] | 91
|
2021-07-14T13:01:31.000Z
|
2022-03-25T10:28:49.000Z
|
discordSuperUtils/ban.py
|
KortaPo/discord-super-utils
|
b8c1cd1a986bc5c78eaf472bb5caf44dd7b605e4
|
[
"MIT"
] | 14
|
2021-08-13T14:23:54.000Z
|
2022-03-25T09:57:12.000Z
|
discordSuperUtils/ban.py
|
KortaPo/discord-super-utils
|
b8c1cd1a986bc5c78eaf472bb5caf44dd7b605e4
|
[
"MIT"
] | 42
|
2021-08-02T00:27:24.000Z
|
2022-03-31T15:47:37.000Z
|
from __future__ import annotations
import asyncio
from datetime import datetime
from typing import TYPE_CHECKING, Union, Optional, List, Dict, Any
import discord
from .base import DatabaseChecker
from .punishments import Punisher
if TYPE_CHECKING:
from .punishments import Punishment
from discord.ext import commands
__all__ = ("UnbanFailure", "BanManager")
class UnbanFailure(Exception):
"""Raises an exception when the user tries to unban a discord.User without passing the guild."""
class BanManager(DatabaseChecker, Punisher):
"""
A BanManager that manages guild bans.
"""
__slots__ = ("bot",)
def __init__(self, bot: commands.Bot):
super().__init__(
[
{
"guild": "snowflake",
"member": "snowflake",
"reason": "string",
"timestamp": "snowflake",
}
],
["bans"],
)
self.bot = bot
self.add_event(self._on_database_connect, "on_database_connect")
async def _on_database_connect(self):
self.bot.loop.create_task(self.__check_bans())
@DatabaseChecker.uses_database
async def get_banned_members(self) -> List[Dict[str, Any]]:
"""
|coro|
This function returns all the members that are supposed to be unbanned but are banned.
:return: The list of unbanned members.
:rtype: List[Dict[str, Any]]
"""
return [
x
for x in await self.database.select(self.tables["bans"], [], fetchall=True)
if x["timestamp"] <= datetime.utcnow().timestamp()
]
async def __check_bans(self) -> None:
"""
|coro|
A loop that ensures that members are unbanned when they need to.
:return: None
:rtype: None
"""
await self.bot.wait_until_ready()
while not self.bot.is_closed():
for banned_member in await self.get_banned_members():
guild = self.bot.get_guild(banned_member["guild"])
if guild is None:
continue
user = await self.bot.fetch_user(banned_member["member"])
if await self.unban(user, guild):
await self.call_event("on_unban", user, banned_member["reason"])
await asyncio.sleep(300)
async def punish(
self, ctx: commands.Context, member: discord.Member, punishment: Punishment
) -> None:
try:
self.bot.loop.create_task(
self.ban(
member,
punishment.punishment_reason,
punishment.punishment_time.total_seconds(),
)
)
except discord.errors.Forbidden as e:
raise e
else:
await self.call_event("on_punishment", ctx, member, punishment)
@staticmethod
async def get_ban(
member: Union[discord.Member, discord.User], guild: discord.Guild
) -> Optional[discord.User]:
"""
|coro|
This function returns the user object of the member if he is banned from the guild.
:param member: The banned member.
:type member: discord.Member
:param guild: The guild.
:type guild: discord.Guild
:return: The user object if found.
:rtype: Optional[discord.User]
"""
banned = await guild.bans()
for x in banned:
if x.user.id == member.id:
return x.user
@DatabaseChecker.uses_database
async def unban(
self, member: Union[discord.Member, discord.User], guild: discord.Guild = None
) -> bool:
"""
|coro|
Unbans the member from the guild.
:param Union[discord.Member, discord.User] member: The member or user to unban.
:param discord.Guild guild: The guild to unban the member from.
:return: A bool representing if the unban was successful.
:rtype: bool
:raises: UnbanFailure: Cannot unban a discord.User without a guild.
"""
if isinstance(member, discord.User) and not guild:
raise UnbanFailure("Cannot unban a discord.User without a guild.")
guild = guild if guild is not None else member.guild
await self.database.delete(
self.tables["bans"], {"guild": guild.id, "member": member.id}
)
if user := await self.get_ban(member, guild):
await guild.unban(user)
return True
async def __handle_unban(
self, time_of_ban: Union[int, float], member: discord.Member, reason: str
) -> None:
"""
|coro|
A function that handles the member's unban that runs separately from the ban method so it wont be blocked.
:param Union[int, float] time_of_ban: The time until the member's unban timestamp.
:param discord.Member member: The member to unban.
:param str reason: The reason of the mute.
:return: None
:rtype: None
"""
await asyncio.sleep(time_of_ban)
if await self.unban(member):
await self.call_event("on_unban", member, reason)
@DatabaseChecker.uses_database
async def ban(
self,
member: discord.Member,
reason: str = "No reason provided.",
time_of_ban: Union[int, float] = 0,
) -> None:
"""
|coro|
Bans the member from the guild.
:param member: The member to ban.
:type member: discord.Member
:param reason: The reason of the ban.
:type reason: str
:param time_of_ban: The time of ban.
:type time_of_ban: Union[int, float]
:return: None
:rtype: None
"""
await member.ban(reason=reason)
if time_of_ban <= 0:
return
await self.database.insert(
self.tables["bans"],
{
"guild": member.guild.id,
"member": member.id,
"reason": reason,
"timestamp": datetime.utcnow().timestamp() + time_of_ban,
},
)
self.bot.loop.create_task(self.__handle_unban(time_of_ban, member, reason))
| 29.018433
| 114
| 0.576147
|
from __future__ import annotations
import asyncio
from datetime import datetime
from typing import TYPE_CHECKING, Union, Optional, List, Dict, Any
import discord
from .base import DatabaseChecker
from .punishments import Punisher
if TYPE_CHECKING:
from .punishments import Punishment
from discord.ext import commands
__all__ = ("UnbanFailure", "BanManager")
class UnbanFailure(Exception):
class BanManager(DatabaseChecker, Punisher):
__slots__ = ("bot",)
def __init__(self, bot: commands.Bot):
super().__init__(
[
{
"guild": "snowflake",
"member": "snowflake",
"reason": "string",
"timestamp": "snowflake",
}
],
["bans"],
)
self.bot = bot
self.add_event(self._on_database_connect, "on_database_connect")
async def _on_database_connect(self):
self.bot.loop.create_task(self.__check_bans())
@DatabaseChecker.uses_database
async def get_banned_members(self) -> List[Dict[str, Any]]:
return [
x
for x in await self.database.select(self.tables["bans"], [], fetchall=True)
if x["timestamp"] <= datetime.utcnow().timestamp()
]
async def __check_bans(self) -> None:
await self.bot.wait_until_ready()
while not self.bot.is_closed():
for banned_member in await self.get_banned_members():
guild = self.bot.get_guild(banned_member["guild"])
if guild is None:
continue
user = await self.bot.fetch_user(banned_member["member"])
if await self.unban(user, guild):
await self.call_event("on_unban", user, banned_member["reason"])
await asyncio.sleep(300)
async def punish(
self, ctx: commands.Context, member: discord.Member, punishment: Punishment
) -> None:
try:
self.bot.loop.create_task(
self.ban(
member,
punishment.punishment_reason,
punishment.punishment_time.total_seconds(),
)
)
except discord.errors.Forbidden as e:
raise e
else:
await self.call_event("on_punishment", ctx, member, punishment)
@staticmethod
async def get_ban(
member: Union[discord.Member, discord.User], guild: discord.Guild
) -> Optional[discord.User]:
banned = await guild.bans()
for x in banned:
if x.user.id == member.id:
return x.user
@DatabaseChecker.uses_database
async def unban(
self, member: Union[discord.Member, discord.User], guild: discord.Guild = None
) -> bool:
if isinstance(member, discord.User) and not guild:
raise UnbanFailure("Cannot unban a discord.User without a guild.")
guild = guild if guild is not None else member.guild
await self.database.delete(
self.tables["bans"], {"guild": guild.id, "member": member.id}
)
if user := await self.get_ban(member, guild):
await guild.unban(user)
return True
async def __handle_unban(
self, time_of_ban: Union[int, float], member: discord.Member, reason: str
) -> None:
await asyncio.sleep(time_of_ban)
if await self.unban(member):
await self.call_event("on_unban", member, reason)
@DatabaseChecker.uses_database
async def ban(
self,
member: discord.Member,
reason: str = "No reason provided.",
time_of_ban: Union[int, float] = 0,
) -> None:
await member.ban(reason=reason)
if time_of_ban <= 0:
return
await self.database.insert(
self.tables["bans"],
{
"guild": member.guild.id,
"member": member.id,
"reason": reason,
"timestamp": datetime.utcnow().timestamp() + time_of_ban,
},
)
self.bot.loop.create_task(self.__handle_unban(time_of_ban, member, reason))
| true
| true
|
f708e2e806370b31b4b855475a4664b8918bfc13
| 4,694
|
py
|
Python
|
qa/rpc-tests/mempool_reorg.py
|
mirzaei-ce/core-javabit
|
bfc1f145268455ca788c8a0b70fb3f054e4287f9
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/mempool_reorg.py
|
mirzaei-ce/core-javabit
|
bfc1f145268455ca788c8a0b70fb3f054e4287f9
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/mempool_reorg.py
|
mirzaei-ce/core-javabit
|
bfc1f145268455ca788c8a0b70fb3f054e4287f9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import JavabitTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(JavabitTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = self.create_tx(coinbase_txids[1], node1_address, 50)
spend_102_raw = self.create_tx(coinbase_txids[2], node0_address, 50)
spend_103_raw = self.create_tx(coinbase_txids[3], node0_address, 50)
# Create a block-height-locked transaction which will be invalid after reorg
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 50})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111111", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = self.create_tx(spend_102_id, node1_address, 50)
spend_103_1_raw = self.create_tx(spend_103_id, node1_address, 50)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| 44.283019
| 119
| 0.684278
|
from test_framework.test_framework import JavabitTestFramework
from test_framework.util import *
class MempoolCoinbaseTest(JavabitTestFramework):
alert_filename = None
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
start_count = self.nodes[0].getblockcount()
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = self.create_tx(coinbase_txids[1], node1_address, 50)
spend_102_raw = self.create_tx(coinbase_txids[2], node0_address, 50)
spend_103_raw = self.create_tx(coinbase_txids[3], node0_address, 50)
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 50})
timelock_tx = timelock_tx.replace("ffffffff", "11111111", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
spend_102_1_raw = self.create_tx(spend_102_id, node1_address, 50)
spend_103_1_raw = self.create_tx(spend_103_id, node1_address, 50)
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| true
| true
|
f708e3a9dcf1d33165f200c2aec241d065dd8605
| 2,906
|
py
|
Python
|
Homography/hw2-2/homography.py
|
Yfyangd/Computer_Vision_CS665
|
59dca3ce42f43b4aea446497a578f4a0eb93995d
|
[
"Apache-2.0"
] | 2
|
2019-11-06T03:40:08.000Z
|
2019-11-06T03:40:19.000Z
|
Homography/hw2-2/homography.py
|
Yfyangd/Computer_Vision_CS665
|
59dca3ce42f43b4aea446497a578f4a0eb93995d
|
[
"Apache-2.0"
] | null | null | null |
Homography/hw2-2/homography.py
|
Yfyangd/Computer_Vision_CS665
|
59dca3ce42f43b4aea446497a578f4a0eb93995d
|
[
"Apache-2.0"
] | 2
|
2022-02-14T05:02:36.000Z
|
2022-02-21T16:02:23.000Z
|
# coding: utf-8
# In[1]:
import numpy as np
def get_homograph(u,v):
A = np.array([[u[0][0], u[0][1], 1, 0, 0, 0, -1 * u[0][0] * v[0][0], -1 * u[0][1] * v[0][0]],
[0, 0, 0, u[0][0], u[0][1], 1, -1 * u[0][0] * v[0][1], -1 * u[0][1] * v[0][1]],
[u[1][0], u[1][1], 1, 0, 0, 0, -1 * u[1][0] * v[1][0], -1 * u[1][1] * v[1][0]],
[0, 0, 0, u[1][0], u[1][1], 1, -1 * u[1][0] * v[1][1], -1 * u[1][1] * v[1][1]],
[u[2][0], u[2][1], 1, 0, 0, 0, -1 * u[2][0] * v[2][0], -1 * u[2][1] * v[2][0]],
[0, 0, 0, u[2][0], u[2][1], 1, -1 * u[2][0] * v[2][1], -1 * u[2][1] * v[2][1]],
[u[3][0], u[3][1], 1, 0, 0, 0, -1 * u[3][0] * v[3][0], -1 * u[3][1] * v[3][0]],
[0, 0, 0, u[3][0], u[3][1], 1, -1 * u[3][0] * v[3][1], -1 * u[3][1] * v[3][1]]
])
b = np.array([[v[0][0]],
[v[0][1]],
[v[1][0]],
[v[1][1]],
[v[2][0]],
[v[2][1]],
[v[3][0]],
[v[3][1]]
])
tmp = np.dot(np.linalg.inv(A), b)
H = np.array([[tmp[0][0], tmp[1][0], tmp[2][0]],
[tmp[3][0], tmp[4][0], tmp[5][0]],
[tmp[6][0], tmp[7][0], 1]
])
return H
def interpolation(img, new_x, new_y):
fx = round(new_x - int(new_x), 2)
fy = round(new_y - int(new_y), 2)
p = np.zeros((3,))
p += (1 - fx) * (1 - fy) * img[int(new_y), int(new_x)]
p += (1 - fx) * fy * img[int(new_y) + 1, int(new_x)]
p += fx * (1 - fy) * img[int(new_y), int(new_x) + 1]
p += fx * fy * img[int(new_y) + 1, int(new_x) + 1]
return p
def forward_warping(u,v,input_image,canvas):
matrix = get_homograph(u,v)
i0_max = u[0:4,0:1].max()
i0_min = u[0:4,0:1].min()
i1_max = u[0:4,1:2].max()
i1_min = u[0:4,1:2].min()
i0_range = i0_max-i0_min
i1_range = i1_max-i1_min
for i in range(i1_range):
for j in range(i0_range):
tmp2 = np.dot(matrix, np.array([[j+i0_min, i+i1_min, 1]]).T)
x, y = int(tmp2[0][0] / tmp2[2][0]), int(tmp2[1][0] / tmp2[2][0])
canvas[y][x] = input_image[i+i1_min][j+i0_min]
return canvas
def backward_warping(u,v,input_image,canvas):
matrix = get_homograph(u,v) # v: output, u: input
i0_max = u[0:4,0:1].max()
i0_min = u[0:4,0:1].min()
i1_max = u[0:4,1:2].max()
i1_min = u[0:4,1:2].min()
i0_range = i0_max-i0_min
i1_range = i1_max-i1_min
for j in range(i1_range):
for i in range(i0_range):
new_pos = np.dot(matrix, np.array([[i+i0_min, j+i1_min, 1]]).T)
new_x, new_y = new_pos[0][0] / new_pos[2][0], new_pos[1][0] / new_pos[2][0]
res = interpolation(input_image, new_x, new_y)
canvas[j+i1_min][i+i0_min] = res
return canvas
| 38.746667
| 97
| 0.419133
|
import numpy as np
def get_homograph(u,v):
A = np.array([[u[0][0], u[0][1], 1, 0, 0, 0, -1 * u[0][0] * v[0][0], -1 * u[0][1] * v[0][0]],
[0, 0, 0, u[0][0], u[0][1], 1, -1 * u[0][0] * v[0][1], -1 * u[0][1] * v[0][1]],
[u[1][0], u[1][1], 1, 0, 0, 0, -1 * u[1][0] * v[1][0], -1 * u[1][1] * v[1][0]],
[0, 0, 0, u[1][0], u[1][1], 1, -1 * u[1][0] * v[1][1], -1 * u[1][1] * v[1][1]],
[u[2][0], u[2][1], 1, 0, 0, 0, -1 * u[2][0] * v[2][0], -1 * u[2][1] * v[2][0]],
[0, 0, 0, u[2][0], u[2][1], 1, -1 * u[2][0] * v[2][1], -1 * u[2][1] * v[2][1]],
[u[3][0], u[3][1], 1, 0, 0, 0, -1 * u[3][0] * v[3][0], -1 * u[3][1] * v[3][0]],
[0, 0, 0, u[3][0], u[3][1], 1, -1 * u[3][0] * v[3][1], -1 * u[3][1] * v[3][1]]
])
b = np.array([[v[0][0]],
[v[0][1]],
[v[1][0]],
[v[1][1]],
[v[2][0]],
[v[2][1]],
[v[3][0]],
[v[3][1]]
])
tmp = np.dot(np.linalg.inv(A), b)
H = np.array([[tmp[0][0], tmp[1][0], tmp[2][0]],
[tmp[3][0], tmp[4][0], tmp[5][0]],
[tmp[6][0], tmp[7][0], 1]
])
return H
def interpolation(img, new_x, new_y):
fx = round(new_x - int(new_x), 2)
fy = round(new_y - int(new_y), 2)
p = np.zeros((3,))
p += (1 - fx) * (1 - fy) * img[int(new_y), int(new_x)]
p += (1 - fx) * fy * img[int(new_y) + 1, int(new_x)]
p += fx * (1 - fy) * img[int(new_y), int(new_x) + 1]
p += fx * fy * img[int(new_y) + 1, int(new_x) + 1]
return p
def forward_warping(u,v,input_image,canvas):
matrix = get_homograph(u,v)
i0_max = u[0:4,0:1].max()
i0_min = u[0:4,0:1].min()
i1_max = u[0:4,1:2].max()
i1_min = u[0:4,1:2].min()
i0_range = i0_max-i0_min
i1_range = i1_max-i1_min
for i in range(i1_range):
for j in range(i0_range):
tmp2 = np.dot(matrix, np.array([[j+i0_min, i+i1_min, 1]]).T)
x, y = int(tmp2[0][0] / tmp2[2][0]), int(tmp2[1][0] / tmp2[2][0])
canvas[y][x] = input_image[i+i1_min][j+i0_min]
return canvas
def backward_warping(u,v,input_image,canvas):
matrix = get_homograph(u,v) i0_max = u[0:4,0:1].max()
i0_min = u[0:4,0:1].min()
i1_max = u[0:4,1:2].max()
i1_min = u[0:4,1:2].min()
i0_range = i0_max-i0_min
i1_range = i1_max-i1_min
for j in range(i1_range):
for i in range(i0_range):
new_pos = np.dot(matrix, np.array([[i+i0_min, j+i1_min, 1]]).T)
new_x, new_y = new_pos[0][0] / new_pos[2][0], new_pos[1][0] / new_pos[2][0]
res = interpolation(input_image, new_x, new_y)
canvas[j+i1_min][i+i0_min] = res
return canvas
| true
| true
|
f708e4a18c9e2a5f4a165c78f3009567fbd27a2d
| 10,365
|
py
|
Python
|
autotest/ogr/ogr_flatgeobuf.py
|
landam/gdal
|
0232dcf743829e23268a2ae0c4fd10aaaeb14b3c
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_flatgeobuf.py
|
landam/gdal
|
0232dcf743829e23268a2ae0c4fd10aaaeb14b3c
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_flatgeobuf.py
|
landam/gdal
|
0232dcf743829e23268a2ae0c4fd10aaaeb14b3c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: FlatGeobuf driver test suite.
# Author: Björn Harrtell <bjorn@wololo.org>
#
###############################################################################
# Copyright (c) 2018-2019, Björn Harrtell <bjorn@wololo.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
from osgeo import ogr
from osgeo import gdal
import gdaltest
import ogrtest
import pytest
### utils
def verify_flatgeobuf_copy(name, fids, names):
if gdaltest.features is None:
print('Missing features collection')
return False
fname = os.path.join('tmp', name + '.fgb')
ds = ogr.Open(fname)
if ds is None:
print('Can not open \'' + fname + '\'')
return False
lyr = ds.GetLayer(0)
if lyr is None:
print('Missing layer')
return False
######################################################
# Test attributes
ret = ogrtest.check_features_against_list(lyr, 'FID', fids)
if ret != 1:
print('Wrong values in \'FID\' field')
return False
lyr.ResetReading()
ret = ogrtest.check_features_against_list(lyr, 'NAME', names)
if ret != 1:
print('Wrong values in \'NAME\' field')
return False
######################################################
# Test geometries
lyr.ResetReading()
for i in range(len(gdaltest.features)):
orig_feat = gdaltest.features[i]
feat = lyr.GetNextFeature()
if feat is None:
print('Failed trying to read feature')
return False
if ogrtest.check_feature_geometry(feat, orig_feat.GetGeometryRef(),
max_error=0.001) != 0:
print('Geometry test failed')
gdaltest.features = None
return False
gdaltest.features = None
lyr = None
return True
def copy_shape_to_flatgeobuf(name, wkbType, compress=None, options=[]):
if gdaltest.flatgeobuf_drv is None:
return False
if compress is not None:
if compress[0:5] == '/vsig':
dst_name = os.path.join('/vsigzip/', 'tmp', name + '.fgb' + '.gz')
elif compress[0:4] == '/vsiz':
dst_name = os.path.join('/vsizip/', 'tmp', name + '.fgb' + '.zip')
elif compress == '/vsistdout/':
dst_name = compress
else:
return False
else:
dst_name = os.path.join('tmp', name + '.fgb')
ds = gdaltest.flatgeobuf_drv.CreateDataSource(dst_name)
if ds is None:
return False
######################################################
# Create layer
lyr = ds.CreateLayer(name, None, wkbType, options)
if lyr is None:
return False
######################################################
# Setup schema (all test shapefiles use common schmea)
ogrtest.quick_create_layer_def(lyr,
[('FID', ogr.OFTReal),
('NAME', ogr.OFTString)])
######################################################
# Copy in shp
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
src_name = os.path.join('data', name + '.shp')
shp_ds = ogr.Open(src_name)
shp_lyr = shp_ds.GetLayer(0)
feat = shp_lyr.GetNextFeature()
gdaltest.features = []
while feat is not None:
gdaltest.features.append(feat)
dst_feat.SetFrom(feat)
lyr.CreateFeature(dst_feat)
feat = shp_lyr.GetNextFeature()
shp_lyr = None
lyr = None
ds = None
return True
### tests
def test_ogr_flatgeobuf_1():
gdaltest.flatgeobuf_drv = ogr.GetDriverByName('FlatGeobuf')
if gdaltest.flatgeobuf_drv is not None:
return
pytest.fail()
def test_ogr_flatgeobuf_2():
fgb_ds = ogr.Open('data/testfgb/poly.fgb')
fgb_lyr = fgb_ds.GetLayer(0)
# test expected spatial filter feature count consistency
c = fgb_lyr.GetFeatureCount()
assert c == 10
c = fgb_lyr.SetSpatialFilterRect(478315.531250, 4762880.500000, 481645.312500, 4765610.500000)
c = fgb_lyr.GetFeatureCount()
assert c == 10
c = fgb_lyr.SetSpatialFilterRect(878315.531250, 4762880.500000, 881645.312500, 4765610.500000)
c = fgb_lyr.GetFeatureCount()
assert c == 0
c = fgb_lyr.SetSpatialFilterRect(479586.0,4764618.6,479808.2,4764797.8)
c = fgb_lyr.GetFeatureCount()
if ogrtest.have_geos():
assert c == 4
else:
assert c == 5
# check that ResetReading does not affect subsequent enumeration or filtering
num = len(list([x for x in fgb_lyr]))
if ogrtest.have_geos():
assert num == 4
else:
assert num == 5
fgb_lyr.ResetReading()
c = fgb_lyr.GetFeatureCount()
if ogrtest.have_geos():
assert c == 4
else:
assert c == 5
fgb_lyr.ResetReading()
num = len(list([x for x in fgb_lyr]))
if ogrtest.have_geos():
assert num == 4
else:
assert num == 5
def wktRoundtrip(expected):
ds = ogr.GetDriverByName('FlatGeobuf').CreateDataSource('/vsimem/test.fgb')
g = ogr.CreateGeometryFromWkt(expected)
lyr = ds.CreateLayer('test', None, g.GetGeometryType(), [])
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(g)
lyr.CreateFeature(f)
ds = None
fgb_ds = ogr.Open('/vsimem/test.fgb')
fgb_lyr = fgb_ds.GetLayer(0)
f = fgb_lyr.GetNextFeature()
g = f.GetGeometryRef()
actual = g.ExportToWkt()
fgb_ds = None
ogr.GetDriverByName('FlatGeobuf').DeleteDataSource('/vsimem/test.fgb')
assert not gdal.VSIStatL('/vsimem/test.fgb')
assert actual == expected
def test_ogr_flatgeobuf_3():
if gdaltest.flatgeobuf_drv is None:
pytest.skip()
wktRoundtrip('POINT (1 1)')
wktRoundtrip('POINT (1.1234 1.4321)')
wktRoundtrip('POINT (1.12345678901234 1.4321)') # max precision 15 decimals
#wktRoundtrip('POINT (1.123456789012341 1.4321)') # 16 decimals, will not pass
wktRoundtrip('POINT (1.2 -2.1)')
wktRoundtrip('MULTIPOINT (10 40,40 30,20 20,30 10)')
wktRoundtrip('LINESTRING (1.2 -2.1,2.4 -4.8)')
wktRoundtrip('MULTILINESTRING ((10 10,20 20,10 40),(40 40,30 30,40 20,30 10),(50 50,60 60,50 90))')
wktRoundtrip('MULTILINESTRING ((1.2 -2.1,2.4 -4.8))')
wktRoundtrip('POLYGON ((30 10,40 40,20 40,10 20,30 10))')
wktRoundtrip('POLYGON ((35 10,45 45,15 40,10 20,35 10),(20 30,35 35,30 20,20 30))')
wktRoundtrip('MULTIPOLYGON (((30 20,45 40,10 40,30 20)),((15 5,40 10,10 20,5 10,15 5)))')
wktRoundtrip('MULTIPOLYGON (((40 40,20 45,45 30,40 40)),((20 35,10 30,10 10,30 5,45 20,20 35),(30 20,20 15,20 25,30 20)))')
wktRoundtrip('MULTIPOLYGON (((30 20,45 40,10 40,30 20)))')
wktRoundtrip('MULTIPOLYGON (((35 10,45 45,15 40,10 20,35 10),(20 30,35 35,30 20,20 30)))')
#wktRoundtrip('POINT ZM (1 2 3 4)')
# Run test_ogrsf
def test_ogr_flatgeobuf_8():
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
pytest.skip()
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro data/testfgb/poly.fgb')
assert ret.find('INFO') != -1 and ret.find('ERROR') == -1
def test_ogr_flatgeobuf_9():
if gdaltest.flatgeobuf_drv is None:
pytest.skip()
gdaltest.tests = [
['gjpoint', [1], ['Point 1'], ogr.wkbPoint],
['gjline', [1], ['Line 1'], ogr.wkbLineString],
['gjpoly', [1], ['Polygon 1'], ogr.wkbPolygon],
['gjmultipoint', [1], ['MultiPoint 1'], ogr.wkbMultiPoint],
['gjmultiline', [2], ['MultiLine 1'], ogr.wkbMultiLineString],
['gjmultipoly', [2], ['MultiPoly 1'], ogr.wkbMultiPolygon]
]
for i in range(len(gdaltest.tests)):
test = gdaltest.tests[i]
rc = copy_shape_to_flatgeobuf(test[0], test[3])
assert rc, ('Failed making copy of ' + test[0] + '.shp')
rc = verify_flatgeobuf_copy(test[0], test[1], test[2])
assert rc, ('Verification of copy of ' + test[0] + '.shp failed')
for i in range(len(gdaltest.tests)):
test = gdaltest.tests[i]
rc = copy_shape_to_flatgeobuf(test[0], test[3], None, ['SPATIAL_INDEX=NO'])
assert rc, ('Failed making copy of ' + test[0] + '.shp')
rc = verify_flatgeobuf_copy(test[0], test[1], test[2])
assert rc, ('Verification of copy of ' + test[0] + '.shp failed')
# Test support for multiple layers in a directory
def test_ogr_flatgeobuf_directory():
if gdaltest.flatgeobuf_drv is None:
pytest.skip()
ds = ogr.GetDriverByName('FlatGeobuf').CreateDataSource('/vsimem/multi_layer')
with gdaltest.error_handler(): # name will be laundered
ds.CreateLayer('foo<', geom_type = ogr.wkbPoint)
ds.CreateLayer('bar', geom_type = ogr.wkbPoint)
ds = None
ds = gdal.OpenEx('/vsimem/multi_layer')
assert set(ds.GetFileList()) == set(['/vsimem/multi_layer/bar.fgb', '/vsimem/multi_layer/foo_.fgb'])
assert ds.GetLayer('foo<')
assert ds.GetLayer('bar')
ds = None
ogr.GetDriverByName('FlatGeobuf').DeleteDataSource('/vsimem/multi_layer')
assert not gdal.VSIStatL('/vsimem/multi_layer')
| 32.904762
| 127
| 0.608394
|
import os
from osgeo import ogr
from osgeo import gdal
import gdaltest
import ogrtest
import pytest
def verify_flatgeobuf_copy(name, fids, names):
if gdaltest.features is None:
print('Missing features collection')
return False
fname = os.path.join('tmp', name + '.fgb')
ds = ogr.Open(fname)
if ds is None:
print('Can not open \'' + fname + '\'')
return False
lyr = ds.GetLayer(0)
if lyr is None:
print('Missing layer')
return False
ret = ogrtest.check_features_against_list(lyr, 'FID', fids)
if ret != 1:
print('Wrong values in \'FID\' field')
return False
lyr.ResetReading()
ret = ogrtest.check_features_against_list(lyr, 'NAME', names)
if ret != 1:
print('Wrong values in \'NAME\' field')
return False
lyr.ResetReading()
for i in range(len(gdaltest.features)):
orig_feat = gdaltest.features[i]
feat = lyr.GetNextFeature()
if feat is None:
print('Failed trying to read feature')
return False
if ogrtest.check_feature_geometry(feat, orig_feat.GetGeometryRef(),
max_error=0.001) != 0:
print('Geometry test failed')
gdaltest.features = None
return False
gdaltest.features = None
lyr = None
return True
def copy_shape_to_flatgeobuf(name, wkbType, compress=None, options=[]):
if gdaltest.flatgeobuf_drv is None:
return False
if compress is not None:
if compress[0:5] == '/vsig':
dst_name = os.path.join('/vsigzip/', 'tmp', name + '.fgb' + '.gz')
elif compress[0:4] == '/vsiz':
dst_name = os.path.join('/vsizip/', 'tmp', name + '.fgb' + '.zip')
elif compress == '/vsistdout/':
dst_name = compress
else:
return False
else:
dst_name = os.path.join('tmp', name + '.fgb')
ds = gdaltest.flatgeobuf_drv.CreateDataSource(dst_name)
if ds is None:
return False
lyr = ds.CreateLayer(name, None, wkbType, options)
if lyr is None:
return False
ogrtest.quick_create_layer_def(lyr,
[('FID', ogr.OFTReal),
('NAME', ogr.OFTString)])
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
src_name = os.path.join('data', name + '.shp')
shp_ds = ogr.Open(src_name)
shp_lyr = shp_ds.GetLayer(0)
feat = shp_lyr.GetNextFeature()
gdaltest.features = []
while feat is not None:
gdaltest.features.append(feat)
dst_feat.SetFrom(feat)
lyr.CreateFeature(dst_feat)
feat = shp_lyr.GetNextFeature()
shp_lyr = None
lyr = None
ds = None
return True
def test_ogr_flatgeobuf_1():
gdaltest.flatgeobuf_drv = ogr.GetDriverByName('FlatGeobuf')
if gdaltest.flatgeobuf_drv is not None:
return
pytest.fail()
def test_ogr_flatgeobuf_2():
fgb_ds = ogr.Open('data/testfgb/poly.fgb')
fgb_lyr = fgb_ds.GetLayer(0)
c = fgb_lyr.GetFeatureCount()
assert c == 10
c = fgb_lyr.SetSpatialFilterRect(478315.531250, 4762880.500000, 481645.312500, 4765610.500000)
c = fgb_lyr.GetFeatureCount()
assert c == 10
c = fgb_lyr.SetSpatialFilterRect(878315.531250, 4762880.500000, 881645.312500, 4765610.500000)
c = fgb_lyr.GetFeatureCount()
assert c == 0
c = fgb_lyr.SetSpatialFilterRect(479586.0,4764618.6,479808.2,4764797.8)
c = fgb_lyr.GetFeatureCount()
if ogrtest.have_geos():
assert c == 4
else:
assert c == 5
num = len(list([x for x in fgb_lyr]))
if ogrtest.have_geos():
assert num == 4
else:
assert num == 5
fgb_lyr.ResetReading()
c = fgb_lyr.GetFeatureCount()
if ogrtest.have_geos():
assert c == 4
else:
assert c == 5
fgb_lyr.ResetReading()
num = len(list([x for x in fgb_lyr]))
if ogrtest.have_geos():
assert num == 4
else:
assert num == 5
def wktRoundtrip(expected):
ds = ogr.GetDriverByName('FlatGeobuf').CreateDataSource('/vsimem/test.fgb')
g = ogr.CreateGeometryFromWkt(expected)
lyr = ds.CreateLayer('test', None, g.GetGeometryType(), [])
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(g)
lyr.CreateFeature(f)
ds = None
fgb_ds = ogr.Open('/vsimem/test.fgb')
fgb_lyr = fgb_ds.GetLayer(0)
f = fgb_lyr.GetNextFeature()
g = f.GetGeometryRef()
actual = g.ExportToWkt()
fgb_ds = None
ogr.GetDriverByName('FlatGeobuf').DeleteDataSource('/vsimem/test.fgb')
assert not gdal.VSIStatL('/vsimem/test.fgb')
assert actual == expected
def test_ogr_flatgeobuf_3():
if gdaltest.flatgeobuf_drv is None:
pytest.skip()
wktRoundtrip('POINT (1 1)')
wktRoundtrip('POINT (1.1234 1.4321)')
wktRoundtrip('POINT (1.12345678901234 1.4321)') wktRoundtrip('POINT (1.2 -2.1)')
wktRoundtrip('MULTIPOINT (10 40,40 30,20 20,30 10)')
wktRoundtrip('LINESTRING (1.2 -2.1,2.4 -4.8)')
wktRoundtrip('MULTILINESTRING ((10 10,20 20,10 40),(40 40,30 30,40 20,30 10),(50 50,60 60,50 90))')
wktRoundtrip('MULTILINESTRING ((1.2 -2.1,2.4 -4.8))')
wktRoundtrip('POLYGON ((30 10,40 40,20 40,10 20,30 10))')
wktRoundtrip('POLYGON ((35 10,45 45,15 40,10 20,35 10),(20 30,35 35,30 20,20 30))')
wktRoundtrip('MULTIPOLYGON (((30 20,45 40,10 40,30 20)),((15 5,40 10,10 20,5 10,15 5)))')
wktRoundtrip('MULTIPOLYGON (((40 40,20 45,45 30,40 40)),((20 35,10 30,10 10,30 5,45 20,20 35),(30 20,20 15,20 25,30 20)))')
wktRoundtrip('MULTIPOLYGON (((30 20,45 40,10 40,30 20)))')
wktRoundtrip('MULTIPOLYGON (((35 10,45 45,15 40,10 20,35 10),(20 30,35 35,30 20,20 30)))')
def test_ogr_flatgeobuf_8():
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
pytest.skip()
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro data/testfgb/poly.fgb')
assert ret.find('INFO') != -1 and ret.find('ERROR') == -1
def test_ogr_flatgeobuf_9():
if gdaltest.flatgeobuf_drv is None:
pytest.skip()
gdaltest.tests = [
['gjpoint', [1], ['Point 1'], ogr.wkbPoint],
['gjline', [1], ['Line 1'], ogr.wkbLineString],
['gjpoly', [1], ['Polygon 1'], ogr.wkbPolygon],
['gjmultipoint', [1], ['MultiPoint 1'], ogr.wkbMultiPoint],
['gjmultiline', [2], ['MultiLine 1'], ogr.wkbMultiLineString],
['gjmultipoly', [2], ['MultiPoly 1'], ogr.wkbMultiPolygon]
]
for i in range(len(gdaltest.tests)):
test = gdaltest.tests[i]
rc = copy_shape_to_flatgeobuf(test[0], test[3])
assert rc, ('Failed making copy of ' + test[0] + '.shp')
rc = verify_flatgeobuf_copy(test[0], test[1], test[2])
assert rc, ('Verification of copy of ' + test[0] + '.shp failed')
for i in range(len(gdaltest.tests)):
test = gdaltest.tests[i]
rc = copy_shape_to_flatgeobuf(test[0], test[3], None, ['SPATIAL_INDEX=NO'])
assert rc, ('Failed making copy of ' + test[0] + '.shp')
rc = verify_flatgeobuf_copy(test[0], test[1], test[2])
assert rc, ('Verification of copy of ' + test[0] + '.shp failed')
def test_ogr_flatgeobuf_directory():
if gdaltest.flatgeobuf_drv is None:
pytest.skip()
ds = ogr.GetDriverByName('FlatGeobuf').CreateDataSource('/vsimem/multi_layer')
with gdaltest.error_handler(): ds.CreateLayer('foo<', geom_type = ogr.wkbPoint)
ds.CreateLayer('bar', geom_type = ogr.wkbPoint)
ds = None
ds = gdal.OpenEx('/vsimem/multi_layer')
assert set(ds.GetFileList()) == set(['/vsimem/multi_layer/bar.fgb', '/vsimem/multi_layer/foo_.fgb'])
assert ds.GetLayer('foo<')
assert ds.GetLayer('bar')
ds = None
ogr.GetDriverByName('FlatGeobuf').DeleteDataSource('/vsimem/multi_layer')
assert not gdal.VSIStatL('/vsimem/multi_layer')
| true
| true
|
f708e4e7c4120d37ff34dd3959cff2c61ac27f10
| 524
|
py
|
Python
|
venv/lib/python3.6/site-packages/Sastrawi/Morphology/Disambiguator/DisambiguatorPrefixRule7.py
|
purwnt/customer-service-chatbot-app
|
519caacc8557de04e1557456b852e66fea641ff4
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/Sastrawi/Morphology/Disambiguator/DisambiguatorPrefixRule7.py
|
purwnt/customer-service-chatbot-app
|
519caacc8557de04e1557456b852e66fea641ff4
|
[
"MIT"
] | 1
|
2021-05-14T23:07:45.000Z
|
2021-05-14T23:07:45.000Z
|
venv/lib/python3.6/site-packages/Sastrawi/Morphology/Disambiguator/DisambiguatorPrefixRule7.py
|
purwnt/customer-service-chatbot-app
|
519caacc8557de04e1557456b852e66fea641ff4
|
[
"MIT"
] | null | null | null |
import re
class DisambiguatorPrefixRule7(object):
"""Disambiguate Prefix Rule 7
Rule 7 : terCerv -> ter-CerV where C != 'r'
"""
def disambiguate(self, word):
"""Disambiguate Prefix Rule 7
Rule 7 : terCerv -> ter-CerV where C != 'r'
"""
matches = re.match(r'^ter([bcdfghjklmnpqrstvwxyz])er([aiueo].*)$', word)
if matches:
if matches.group(1) == 'r':
return
return matches.group(1) + 'er' + matches.group(2)
| 30.823529
| 81
| 0.532443
|
import re
class DisambiguatorPrefixRule7(object):
def disambiguate(self, word):
matches = re.match(r'^ter([bcdfghjklmnpqrstvwxyz])er([aiueo].*)$', word)
if matches:
if matches.group(1) == 'r':
return
return matches.group(1) + 'er' + matches.group(2)
| true
| true
|
f708e50db1f4c3784cacecdfda9d99df22227d9f
| 2,340
|
py
|
Python
|
sp.py
|
The-SocialLion/Speech-Emotion-Recognition-using-MLP-Classifier
|
5c4101ebbe2b43db28dbb97f94dc3001bdf56ff8
|
[
"Apache-2.0"
] | null | null | null |
sp.py
|
The-SocialLion/Speech-Emotion-Recognition-using-MLP-Classifier
|
5c4101ebbe2b43db28dbb97f94dc3001bdf56ff8
|
[
"Apache-2.0"
] | null | null | null |
sp.py
|
The-SocialLion/Speech-Emotion-Recognition-using-MLP-Classifier
|
5c4101ebbe2b43db28dbb97f94dc3001bdf56ff8
|
[
"Apache-2.0"
] | null | null | null |
import librosa
import soundfile
import os, glob, pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
def extract_feature(file_name, mfcc, chroma, mel):
with soundfile.SoundFile(file_name) as sound_file:
X = sound_file.read(dtype="float32")
sample_rate=sound_file.samplerate
if chroma:
stft=np.abs(librosa.stft(X))
result=np.array([])
if mfcc:
mfccs=np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result=np.hstack((result, mfccs))
if chroma:
chroma=np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
result=np.hstack((result, chroma))
if mel:
mel=np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
result=np.hstack((result, mel))
return result
emotions={
'01':'neutral',
'02':'calm',
'03':'happy',
'04':'sad',
'05':'angry',
'06':'fearful',
'07':'disgust',
'08':'surprised'
}
#DataFlair - Emotions to observe
observed_emotions=['calm', 'happy', 'fearful', 'disgust']
def load_data(ts):
tr=abs(1-ts)
x,y=[],[]
for file in glob.glob("D:\\python\\dl programs\\SP\\DATA\\Actor_*\\*.wav"):
file_name=os.path.basename(file)
emotion=emotions[file_name.split("-")[2]]
print(emotion)
if emotion not in observed_emotions:
continue
feature=extract_feature(file, mfcc=True, chroma=True, mel=True)
x.append(feature)
y.append(emotion)
return train_test_split(np.array(x), y, test_size=ts, train_size=tr ,random_state=9)
ts=0.25
load_data(ts)
x_train,x_test,y_train,y_test=load_data(ts)
print((x_train.shape[0], x_test.shape[0]))
print(f'Features extracted: {x_train.shape[1]}')
#DataFlair - Initialize the Multi Layer Perceptron Classifier
model=MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08, hidden_layer_sizes=(300,), learning_rate='adaptive', max_iter=500)
model.fit(x_train,y_train)
y_pred=model.predict(x_test)
accuracy=accuracy_score(y_true=y_test, y_pred=y_pred)
#DataFlair - Print the accuracy
print("Accuracy: {:.2f}%".format(accuracy*100))
| 36
| 130
| 0.658974
|
import librosa
import soundfile
import os, glob, pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
def extract_feature(file_name, mfcc, chroma, mel):
with soundfile.SoundFile(file_name) as sound_file:
X = sound_file.read(dtype="float32")
sample_rate=sound_file.samplerate
if chroma:
stft=np.abs(librosa.stft(X))
result=np.array([])
if mfcc:
mfccs=np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result=np.hstack((result, mfccs))
if chroma:
chroma=np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
result=np.hstack((result, chroma))
if mel:
mel=np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
result=np.hstack((result, mel))
return result
emotions={
'01':'neutral',
'02':'calm',
'03':'happy',
'04':'sad',
'05':'angry',
'06':'fearful',
'07':'disgust',
'08':'surprised'
}
observed_emotions=['calm', 'happy', 'fearful', 'disgust']
def load_data(ts):
tr=abs(1-ts)
x,y=[],[]
for file in glob.glob("D:\\python\\dl programs\\SP\\DATA\\Actor_*\\*.wav"):
file_name=os.path.basename(file)
emotion=emotions[file_name.split("-")[2]]
print(emotion)
if emotion not in observed_emotions:
continue
feature=extract_feature(file, mfcc=True, chroma=True, mel=True)
x.append(feature)
y.append(emotion)
return train_test_split(np.array(x), y, test_size=ts, train_size=tr ,random_state=9)
ts=0.25
load_data(ts)
x_train,x_test,y_train,y_test=load_data(ts)
print((x_train.shape[0], x_test.shape[0]))
print(f'Features extracted: {x_train.shape[1]}')
model=MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08, hidden_layer_sizes=(300,), learning_rate='adaptive', max_iter=500)
model.fit(x_train,y_train)
y_pred=model.predict(x_test)
accuracy=accuracy_score(y_true=y_test, y_pred=y_pred)
print("Accuracy: {:.2f}%".format(accuracy*100))
| true
| true
|
f708e557288c87be71ea0404a8bb00dc3767cf97
| 10,040
|
py
|
Python
|
src/transformers/tokenization_auto.py
|
mariamabarham/transformers
|
d490b5d5003654f104af3abd0556e598335b5650
|
[
"Apache-2.0"
] | 6
|
2020-06-22T01:42:20.000Z
|
2021-12-24T02:55:51.000Z
|
src/transformers/tokenization_auto.py
|
mariamabarham/transformers
|
d490b5d5003654f104af3abd0556e598335b5650
|
[
"Apache-2.0"
] | 3
|
2020-11-29T18:11:03.000Z
|
2021-06-11T10:04:30.000Z
|
src/transformers/tokenization_auto.py
|
mariamabarham/transformers
|
d490b5d5003654f104af3abd0556e598335b5650
|
[
"Apache-2.0"
] | 1
|
2020-11-29T16:37:16.000Z
|
2020-11-29T16:37:16.000Z
|
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
import logging
from collections import OrderedDict
from .configuration_auto import (
AlbertConfig,
AutoConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
FlaubertConfig,
GPT2Config,
OpenAIGPTConfig,
RobertaConfig,
T5Config,
TransfoXLConfig,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
)
from .configuration_utils import PretrainedConfig
from .tokenization_albert import AlbertTokenizer
from .tokenization_bert import BertTokenizer, BertTokenizerFast
from .tokenization_bert_japanese import BertJapaneseTokenizer
from .tokenization_camembert import CamembertTokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_distilbert import DistilBertTokenizer, DistilBertTokenizerFast
from .tokenization_flaubert import FlaubertTokenizer
from .tokenization_gpt2 import GPT2Tokenizer, GPT2TokenizerFast
from .tokenization_openai import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from .tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from .tokenization_t5 import T5Tokenizer
from .tokenization_transfo_xl import TransfoXLTokenizer, TransfoXLTokenizerFast
from .tokenization_xlm import XLMTokenizer
from .tokenization_xlm_roberta import XLMRobertaTokenizer
from .tokenization_xlnet import XLNetTokenizer
logger = logging.getLogger(__name__)
TOKENIZER_MAPPING = OrderedDict(
[
(T5Config, (T5Tokenizer, None)),
(DistilBertConfig, (DistilBertTokenizer, DistilBertTokenizerFast)),
(AlbertConfig, (AlbertTokenizer, None)),
(CamembertConfig, (CamembertTokenizer, None)),
(XLMRobertaConfig, (XLMRobertaTokenizer, None)),
(RobertaConfig, (RobertaTokenizer, RobertaTokenizerFast)),
(BertConfig, (BertTokenizer, BertTokenizerFast)),
(OpenAIGPTConfig, (OpenAIGPTTokenizer, OpenAIGPTTokenizerFast)),
(GPT2Config, (GPT2Tokenizer, GPT2TokenizerFast)),
(TransfoXLConfig, (TransfoXLTokenizer, TransfoXLTokenizerFast)),
(XLNetConfig, (XLNetTokenizer, None)),
(FlaubertConfig, (FlaubertTokenizer, None)),
(XLMConfig, (XLMTokenizer, None)),
(CTRLConfig, (CTRLTokenizer, None)),
]
)
class AutoTokenizer:
r""":class:`~transformers.AutoTokenizer` is a generic tokenizer class
that will be instantiated as one of the tokenizer classes of the library
when created with the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method take care of returning the correct tokenizer class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The tokenizer class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: T5Tokenizer (T5 model)
- contains `distilbert`: DistilBertTokenizer (DistilBert model)
- contains `albert`: AlbertTokenizer (ALBERT model)
- contains `camembert`: CamembertTokenizer (CamemBERT model)
- contains `xlm-roberta`: XLMRobertaTokenizer (XLM-RoBERTa model)
- contains `roberta`: RobertaTokenizer (RoBERTa model)
- contains `bert`: BertTokenizer (Bert model)
- contains `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model)
- contains `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model)
- contains `transfo-xl`: TransfoXLTokenizer (Transformer-XL model)
- contains `xlnet`: XLNetTokenizer (XLNet model)
- contains `xlm`: XLMTokenizer (XLM model)
- contains `ctrl`: CTRLTokenizer (Salesforce CTRL model)
This class cannot be instantiated using `__init__()` (throw an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoTokenizer is designed to be instantiated "
"using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
r""" Instantiate one of the tokenizer classes of the library
from a pre-trained model vocabulary.
The tokenizer class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: T5Tokenizer (T5 model)
- contains `distilbert`: DistilBertTokenizer (DistilBert model)
- contains `albert`: AlbertTokenizer (ALBERT model)
- contains `camembert`: CamembertTokenizer (CamemBERT model)
- contains `xlm-roberta`: XLMRobertaTokenizer (XLM-RoBERTa model)
- contains `roberta`: RobertaTokenizer (RoBERTa model)
- contains `bert-base-japanese`: BertJapaneseTokenizer (Bert model)
- contains `bert`: BertTokenizer (Bert model)
- contains `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model)
- contains `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model)
- contains `transfo-xl`: TransfoXLTokenizer (Transformer-XL model)
- contains `xlnet`: XLNetTokenizer (XLNet model)
- contains `xlm`: XLMTokenizer (XLM model)
- contains `ctrl`: CTRLTokenizer (Salesforce CTRL model)
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
- (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the vocabulary files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
use_fast: (`optional`) boolean, default True:
Indicate if transformers should try to load the fast version of the tokenizer (True) or use the Python one (False).
inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.
Examples::
# Download vocabulary from S3 and cache.
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
# Download vocabulary from S3 (user-uploaded) and cache.
tokenizer = AutoTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/')
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
if "bert-base-japanese" in pretrained_model_name_or_path:
return BertJapaneseTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
use_fast = kwargs.pop("use_fast", True)
for config_class, (tokenizer_class_py, tokenizer_class_fast) in TOKENIZER_MAPPING.items():
if isinstance(config, config_class):
if tokenizer_class_fast and use_fast:
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
raise ValueError(
"Unrecognized configuration class {} to build an AutoTokenizer.\n"
"Model type should be one of {}.".format(
config.__class__, ", ".join(c.__name__ for c in TOKENIZER_MAPPING.keys())
)
)
| 50.964467
| 372
| 0.697211
|
import logging
from collections import OrderedDict
from .configuration_auto import (
AlbertConfig,
AutoConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
FlaubertConfig,
GPT2Config,
OpenAIGPTConfig,
RobertaConfig,
T5Config,
TransfoXLConfig,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
)
from .configuration_utils import PretrainedConfig
from .tokenization_albert import AlbertTokenizer
from .tokenization_bert import BertTokenizer, BertTokenizerFast
from .tokenization_bert_japanese import BertJapaneseTokenizer
from .tokenization_camembert import CamembertTokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_distilbert import DistilBertTokenizer, DistilBertTokenizerFast
from .tokenization_flaubert import FlaubertTokenizer
from .tokenization_gpt2 import GPT2Tokenizer, GPT2TokenizerFast
from .tokenization_openai import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from .tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from .tokenization_t5 import T5Tokenizer
from .tokenization_transfo_xl import TransfoXLTokenizer, TransfoXLTokenizerFast
from .tokenization_xlm import XLMTokenizer
from .tokenization_xlm_roberta import XLMRobertaTokenizer
from .tokenization_xlnet import XLNetTokenizer
logger = logging.getLogger(__name__)
TOKENIZER_MAPPING = OrderedDict(
[
(T5Config, (T5Tokenizer, None)),
(DistilBertConfig, (DistilBertTokenizer, DistilBertTokenizerFast)),
(AlbertConfig, (AlbertTokenizer, None)),
(CamembertConfig, (CamembertTokenizer, None)),
(XLMRobertaConfig, (XLMRobertaTokenizer, None)),
(RobertaConfig, (RobertaTokenizer, RobertaTokenizerFast)),
(BertConfig, (BertTokenizer, BertTokenizerFast)),
(OpenAIGPTConfig, (OpenAIGPTTokenizer, OpenAIGPTTokenizerFast)),
(GPT2Config, (GPT2Tokenizer, GPT2TokenizerFast)),
(TransfoXLConfig, (TransfoXLTokenizer, TransfoXLTokenizerFast)),
(XLNetConfig, (XLNetTokenizer, None)),
(FlaubertConfig, (FlaubertTokenizer, None)),
(XLMConfig, (XLMTokenizer, None)),
(CTRLConfig, (CTRLTokenizer, None)),
]
)
class AutoTokenizer:
def __init__(self):
raise EnvironmentError(
"AutoTokenizer is designed to be instantiated "
"using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
if "bert-base-japanese" in pretrained_model_name_or_path:
return BertJapaneseTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
use_fast = kwargs.pop("use_fast", True)
for config_class, (tokenizer_class_py, tokenizer_class_fast) in TOKENIZER_MAPPING.items():
if isinstance(config, config_class):
if tokenizer_class_fast and use_fast:
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
raise ValueError(
"Unrecognized configuration class {} to build an AutoTokenizer.\n"
"Model type should be one of {}.".format(
config.__class__, ", ".join(c.__name__ for c in TOKENIZER_MAPPING.keys())
)
)
| true
| true
|
f708e5c068545992b18a1ec51e23cff6cfb0e647
| 1,422
|
py
|
Python
|
ifollow/wsgi.py
|
moe-szyslak/TheCondor
|
8066202cfe2d972ad643e4b7c179be5089dbcc65
|
[
"MIT"
] | 1
|
2015-10-27T04:02:41.000Z
|
2015-10-27T04:02:41.000Z
|
ifollow/wsgi.py
|
moe-szyslak/TheCondor
|
8066202cfe2d972ad643e4b7c179be5089dbcc65
|
[
"MIT"
] | null | null | null |
ifollow/wsgi.py
|
moe-szyslak/TheCondor
|
8066202cfe2d972ad643e4b7c179be5089dbcc65
|
[
"MIT"
] | null | null | null |
"""
WSGI config for ifollow project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "ifollow.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ifollow.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 43.090909
| 79
| 0.803094
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ifollow.settings")
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| true
| true
|
f708e5f4e21d0ec582297a904c5c3b950283833a
| 5,230
|
py
|
Python
|
examples/inverse/plot_lcmv_beamformer_volume.py
|
ragatti/mne-python
|
c6825a49c3452db616fc980d62d33f6dddf4cd65
|
[
"BSD-3-Clause"
] | 1
|
2020-04-25T05:01:54.000Z
|
2020-04-25T05:01:54.000Z
|
examples/inverse/plot_lcmv_beamformer_volume.py
|
ragatti/mne-python
|
c6825a49c3452db616fc980d62d33f6dddf4cd65
|
[
"BSD-3-Clause"
] | null | null | null |
examples/inverse/plot_lcmv_beamformer_volume.py
|
ragatti/mne-python
|
c6825a49c3452db616fc980d62d33f6dddf4cd65
|
[
"BSD-3-Clause"
] | null | null | null |
"""
====================================================
Compute LCMV inverse solution in volume source space
====================================================
Compute LCMV beamformers on an auditory evoked dataset in a volume source
space, and show activation on ``fsaverage``.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample, fetch_fsaverage
from mne.beamformer import make_lcmv, apply_lcmv
print(__doc__)
###############################################################################
# Data preprocessing:
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-vol-7-fwd.fif'
fetch_fsaverage(subjects_dir) # ensure fsaverage src exists
fname_fs_src = subjects_dir + '/fsaverage/bem/fsaverage-vol-5-src.fif'
# Get epochs
event_id, tmin, tmax = [1, 2], -0.2, 0.5
# Read forward model
forward = mne.read_forward_solution(fname_fwd)
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.find_events(raw)
# Pick the channels of interest
raw.pick(['meg', 'eog'])
# Read epochs
proj = False # already applied
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=proj,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
# Visualize sensor space data
evoked.plot_joint()
###############################################################################
# Compute covariance matrices
# ---------------------------
#
# These matrices need to be inverted at some point, but since they are rank
# deficient, some regularization needs to be done for them to be invertable.
# Regularization can be added either by the :func:`mne.compute_covariance`
# function or later by the :func:`mne.beamformer.make_lcmv` function. In this
# example, we'll go with the latter option, so we specify ``method='empirical``
# here.
# Read regularized noise covariance and compute regularized data covariance
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0,
method='empirical')
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='empirical')
###############################################################################
# Compute beamformer filters
# --------------------------
#
# Compute weights of free orientation (vector) beamformer with weight
# normalization (neural activity index, NAI). Providing a noise covariance
# matrix enables whitening of the data and forward solution. Source orientation
# is optimized by setting pick_ori to 'max-power'.
# weight_norm can also be set to 'unit-noise-gain'. Source orientation can also
# be 'normal' (but only when using a surface-based source space) or None,
# which computes a vector beamfomer. Note, however, that not all combinations
# of orientation selection and weight normalization are implemented yet.
filters = make_lcmv(evoked.info, forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori='max-power',
weight_norm='nai', rank=None)
print(filters)
# You can save these with:
# filters.save('filters-lcmv.h5')
# Apply this spatial filter to the evoked data.
stc = apply_lcmv(evoked, filters, max_ori_out='signed')
###############################################################################
# Plot source space activity
# --------------------------
# You can save result in stc files with:
# stc.save('lcmv-vol')
lims = [0.3, 0.6, 0.9]
stc.plot(
src=forward['src'], subject='sample', subjects_dir=subjects_dir,
clim=dict(kind='value', pos_lims=lims), mode='stat_map',
initial_time=0.1, verbose=True)
###############################################################################
# Now let's plot this on a glass brain, which will automatically transform the
# data to MNI Talairach space:
# sphinx_gallery_thumbnail_number = 4
stc.plot(
src=forward['src'], subject='sample', subjects_dir=subjects_dir,
mode='glass_brain', clim=dict(kind='value', lims=lims),
initial_time=0.1, verbose=True)
###############################################################################
# Finally let's get another view, this time plotting again a ``'stat_map'``
# style but using volumetric morphing to get data to fsaverage space,
# which we can get by passing a :class:`mne.SourceMorph` as the ``src``
# argument to `mne.VolSourceEstimate.plot`. To save a bit of speed when
# applying the morph, we will crop the STC:
src_fs = mne.read_source_spaces(fname_fs_src)
morph = mne.compute_source_morph(
forward['src'], subject_from='sample', src_to=src_fs,
subjects_dir=subjects_dir,
niter_sdr=[10, 10, 5], niter_affine=[10, 10, 5], # just for speed
verbose=True)
stc_fs = morph.apply(stc.copy().crop(0.05, 0.18))
stc_fs.plot(
src=src_fs, mode='stat_map', initial_time=0.1, subjects_dir=subjects_dir,
clim=dict(kind='value', pos_lims=lims), verbose=True)
| 39.323308
| 79
| 0.630784
|
import mne
from mne.datasets import sample, fetch_fsaverage
from mne.beamformer import make_lcmv, apply_lcmv
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-vol-7-fwd.fif'
fetch_fsaverage(subjects_dir) fname_fs_src = subjects_dir + '/fsaverage/bem/fsaverage-vol-5-src.fif'
event_id, tmin, tmax = [1, 2], -0.2, 0.5
forward = mne.read_forward_solution(fname_fwd)
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] events = mne.find_events(raw)
raw.pick(['meg', 'eog'])
proj = False epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=proj,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
evoked.plot_joint()
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0,
method='empirical')
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='empirical')
filters = make_lcmv(evoked.info, forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori='max-power',
weight_norm='nai', rank=None)
print(filters)
stc = apply_lcmv(evoked, filters, max_ori_out='signed')
lims = [0.3, 0.6, 0.9]
stc.plot(
src=forward['src'], subject='sample', subjects_dir=subjects_dir,
clim=dict(kind='value', pos_lims=lims), mode='stat_map',
initial_time=0.1, verbose=True)
# data to MNI Talairach space:
# sphinx_gallery_thumbnail_number = 4
stc.plot(
src=forward['src'], subject='sample', subjects_dir=subjects_dir,
mode='glass_brain', clim=dict(kind='value', lims=lims),
initial_time=0.1, verbose=True)
###############################################################################
# Finally let's get another view, this time plotting again a ``'stat_map'``
src_fs = mne.read_source_spaces(fname_fs_src)
morph = mne.compute_source_morph(
forward['src'], subject_from='sample', src_to=src_fs,
subjects_dir=subjects_dir,
niter_sdr=[10, 10, 5], niter_affine=[10, 10, 5], verbose=True)
stc_fs = morph.apply(stc.copy().crop(0.05, 0.18))
stc_fs.plot(
src=src_fs, mode='stat_map', initial_time=0.1, subjects_dir=subjects_dir,
clim=dict(kind='value', pos_lims=lims), verbose=True)
| true
| true
|
f708e6377e37447588e0d9fb8fda40d91bd7ea72
| 2,306
|
py
|
Python
|
Contents/scripts/animmemo/_lib.py
|
mochio326/AnimMemo
|
41cc0cd16056231a336d5e33fe7a6128fc11d50b
|
[
"MIT"
] | 8
|
2018-01-08T02:38:13.000Z
|
2020-12-22T05:15:47.000Z
|
Contents/scripts/animmemo/_lib.py
|
mochio326/AnimMemo
|
41cc0cd16056231a336d5e33fe7a6128fc11d50b
|
[
"MIT"
] | null | null | null |
Contents/scripts/animmemo/_lib.py
|
mochio326/AnimMemo
|
41cc0cd16056231a336d5e33fe7a6128fc11d50b
|
[
"MIT"
] | null | null | null |
## -*- coding: utf-8 -*-
from .vendor.Qt import QtCore, QtGui, QtWidgets
import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMayaUI as OpenMayaUI
import maya.OpenMaya as OpenMaya
import json
import os
def maya_version():
return int(cmds.about(v=True)[:4])
def maya_api_version():
return int(cmds.about(api=True))
if 2017 <= maya_version():
import shiboken2 as shiboken
else:
import shiboken
def get_anim_curve_editor():
return cmds.animCurveEditor('graphEditor1GraphEd', q=True, control=True)
def get_play_back_slider():
return mel.eval("$_=$gPlayBackSlider")
def get_timeline_wiget():
_pbs = get_play_back_slider()
_c = OpenMayaUI.MQtUtil.findControl(_pbs)
w = shiboken.wrapInstance(long(_c), QtWidgets.QWidget)
return w
def get_anim_curve_editor_wiget():
_pbs = get_anim_curve_editor()
_c = OpenMayaUI.MQtUtil.findControl(_pbs)
if _c is None:
return None
w = shiboken.wrapInstance(long(_c), QtWidgets.QWidget)
return w.children()[1]
def get_timeline_highlight_range():
_pbs = get_play_back_slider()
_r = cmds.timeControl(_pbs, q=True, ra=True)
return _r[0], _r[1]
def get_timeline_renge():
r = cmds.timeControl(get_play_back_slider(), query=True, ra=True)
return [int(r[0]), int(r[1]) - 1]
def draw_data_to_multi_line_data(draw_data):
lines = []
for d in draw_data:
_dfr = d['fr']
_append = False
for line in lines:
_overlap = False
for l in line:
_lfr = l['fr']
# 既存のデータのフレーム範囲に追加分のフレームが被っている
if _lfr[0] <= _dfr[0] <= _lfr[1] or _lfr[0] <= _dfr[1] <= _lfr[1]:
_overlap = True
break
# 追加分のフレーム範囲が既存のデータをすっぽり包んでいる
if _dfr[0] <= _lfr[0] <= _dfr[1] and _dfr[0] <= _lfr[1] <= _dfr[1]:
_overlap = True
break
if not _overlap:
line.append(d)
_append = True
break
# 新しい行追加
if not _append:
lines.append([d])
return lines
#-----------------------------------------------------------------------------
# EOF
#-----------------------------------------------------------------------------
| 26.204545
| 83
| 0.561145
|
from .vendor.Qt import QtCore, QtGui, QtWidgets
import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMayaUI as OpenMayaUI
import maya.OpenMaya as OpenMaya
import json
import os
def maya_version():
return int(cmds.about(v=True)[:4])
def maya_api_version():
return int(cmds.about(api=True))
if 2017 <= maya_version():
import shiboken2 as shiboken
else:
import shiboken
def get_anim_curve_editor():
return cmds.animCurveEditor('graphEditor1GraphEd', q=True, control=True)
def get_play_back_slider():
return mel.eval("$_=$gPlayBackSlider")
def get_timeline_wiget():
_pbs = get_play_back_slider()
_c = OpenMayaUI.MQtUtil.findControl(_pbs)
w = shiboken.wrapInstance(long(_c), QtWidgets.QWidget)
return w
def get_anim_curve_editor_wiget():
_pbs = get_anim_curve_editor()
_c = OpenMayaUI.MQtUtil.findControl(_pbs)
if _c is None:
return None
w = shiboken.wrapInstance(long(_c), QtWidgets.QWidget)
return w.children()[1]
def get_timeline_highlight_range():
_pbs = get_play_back_slider()
_r = cmds.timeControl(_pbs, q=True, ra=True)
return _r[0], _r[1]
def get_timeline_renge():
r = cmds.timeControl(get_play_back_slider(), query=True, ra=True)
return [int(r[0]), int(r[1]) - 1]
def draw_data_to_multi_line_data(draw_data):
lines = []
for d in draw_data:
_dfr = d['fr']
_append = False
for line in lines:
_overlap = False
for l in line:
_lfr = l['fr']
if _lfr[0] <= _dfr[0] <= _lfr[1] or _lfr[0] <= _dfr[1] <= _lfr[1]:
_overlap = True
break
if _dfr[0] <= _lfr[0] <= _dfr[1] and _dfr[0] <= _lfr[1] <= _dfr[1]:
_overlap = True
break
if not _overlap:
line.append(d)
_append = True
break
if not _append:
lines.append([d])
return lines
| true
| true
|
f708e7b5f388dd6c976262da9d55674a512e7986
| 897
|
py
|
Python
|
thumt/utils/distribute.py
|
THUNLP-MT/Copy4APE
|
2e341f1bb31d0e25d7ff46cc31521ac3632eb746
|
[
"BSD-3-Clause"
] | 12
|
2019-09-06T14:36:55.000Z
|
2021-11-18T02:11:04.000Z
|
thumt/utils/distribute.py
|
THUNLP-MT/Copy4APE
|
2e341f1bb31d0e25d7ff46cc31521ac3632eb746
|
[
"BSD-3-Clause"
] | 4
|
2019-10-10T15:49:12.000Z
|
2021-05-06T01:11:58.000Z
|
thumt/utils/distribute.py
|
THUNLP-MT/Copy4APE
|
2e341f1bb31d0e25d7ff46cc31521ac3632eb746
|
[
"BSD-3-Clause"
] | 3
|
2020-01-03T07:53:02.000Z
|
2020-03-26T04:19:15.000Z
|
# coding=utf-8
# Copyright 2017-2019 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
_ENGINE = None
def enable_distributed_training():
global _ENGINE
try:
import horovod.tensorflow as hvd
_ENGINE = hvd
hvd.init()
except ImportError:
sys.stderr.write("Error: You must install horovod first in order to"
" enable distributed training.\n")
exit()
def is_distributed_training_mode():
return _ENGINE is not None
def rank():
return _ENGINE.rank()
def local_rank():
return _ENGINE.local_rank()
def size():
return _ENGINE.size()
def all_reduce(tensor):
return _ENGINE.allreduce(tensor, compression=_ENGINE.Compression.fp16)
def get_broadcast_hook():
return _ENGINE.BroadcastGlobalVariablesHook(0)
| 19.085106
| 76
| 0.703456
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
_ENGINE = None
def enable_distributed_training():
global _ENGINE
try:
import horovod.tensorflow as hvd
_ENGINE = hvd
hvd.init()
except ImportError:
sys.stderr.write("Error: You must install horovod first in order to"
" enable distributed training.\n")
exit()
def is_distributed_training_mode():
return _ENGINE is not None
def rank():
return _ENGINE.rank()
def local_rank():
return _ENGINE.local_rank()
def size():
return _ENGINE.size()
def all_reduce(tensor):
return _ENGINE.allreduce(tensor, compression=_ENGINE.Compression.fp16)
def get_broadcast_hook():
return _ENGINE.BroadcastGlobalVariablesHook(0)
| true
| true
|
f708e804af34c7cabb6dba3ee78730930ad65f23
| 827
|
py
|
Python
|
tests/functional/dashboard/test_partner.py
|
iicc/django-oscar
|
67ebe6bc21c242e9b0750b9f306b2f46a2758199
|
[
"BSD-3-Clause"
] | 2
|
2019-07-27T23:00:28.000Z
|
2021-09-08T14:25:30.000Z
|
tests/functional/dashboard/test_partner.py
|
iicc/django-oscar
|
67ebe6bc21c242e9b0750b9f306b2f46a2758199
|
[
"BSD-3-Clause"
] | 11
|
2019-12-21T06:06:48.000Z
|
2022-01-13T01:41:33.000Z
|
tests/functional/dashboard/test_partner.py
|
iicc/django-oscar
|
67ebe6bc21c242e9b0750b9f306b2f46a2758199
|
[
"BSD-3-Clause"
] | 3
|
2019-03-20T16:17:58.000Z
|
2022-02-25T09:38:38.000Z
|
from django.urls import reverse
from oscar.test.testcases import WebTestCase
from oscar.apps.partner import models
class TestPartnerDashboard(WebTestCase):
is_staff = True
def test_allows_a_partner_user_to_be_created(self):
partner = models.Partner.objects.create(
name="Acme Ltd")
url = reverse('dashboard:partner-list')
list_page = self.get(url)
detail_page = list_page.click("Manage partner and users")
user_page = detail_page.click("Link a new user")
form = user_page.form
form['first_name'] = "Maik"
form['last_name'] = "Hoepfel"
form['email'] = "maik@gmail.com"
form['password1'] = "helloworld"
form['password2'] = "helloworld"
form.submit()
self.assertEqual(1, partner.users.all().count())
| 30.62963
| 65
| 0.649335
|
from django.urls import reverse
from oscar.test.testcases import WebTestCase
from oscar.apps.partner import models
class TestPartnerDashboard(WebTestCase):
is_staff = True
def test_allows_a_partner_user_to_be_created(self):
partner = models.Partner.objects.create(
name="Acme Ltd")
url = reverse('dashboard:partner-list')
list_page = self.get(url)
detail_page = list_page.click("Manage partner and users")
user_page = detail_page.click("Link a new user")
form = user_page.form
form['first_name'] = "Maik"
form['last_name'] = "Hoepfel"
form['email'] = "maik@gmail.com"
form['password1'] = "helloworld"
form['password2'] = "helloworld"
form.submit()
self.assertEqual(1, partner.users.all().count())
| true
| true
|
f708e8920634dfa425f8e6c30f8e45d04837f031
| 65
|
py
|
Python
|
homedisplay/repeating_tasks/__init__.py
|
ojarva/home-info-display
|
873d022308732baff94d0dc2381cf9dc7dce23b7
|
[
"BSD-3-Clause"
] | 1
|
2016-11-28T04:35:06.000Z
|
2016-11-28T04:35:06.000Z
|
homedisplay/repeating_tasks/__init__.py
|
ojarva/home-info-display
|
873d022308732baff94d0dc2381cf9dc7dce23b7
|
[
"BSD-3-Clause"
] | 160
|
2015-01-01T20:59:29.000Z
|
2016-04-25T13:36:52.000Z
|
homedisplay/repeating_tasks/__init__.py
|
ojarva/home-info-display
|
873d022308732baff94d0dc2381cf9dc7dce23b7
|
[
"BSD-3-Clause"
] | 1
|
2015-02-25T21:24:01.000Z
|
2015-02-25T21:24:01.000Z
|
default_app_config = 'repeating_tasks.apps.RepeatingTasksConfig'
| 32.5
| 64
| 0.876923
|
default_app_config = 'repeating_tasks.apps.RepeatingTasksConfig'
| true
| true
|
f708e90b495e842e0c6fdb21da7bd73edf90dfca
| 4,586
|
py
|
Python
|
cottonwood/core/layers/dense.py
|
brohrer/nn_methods
|
acf3d1369e240971e5ee05696610c59c4c993a30
|
[
"MIT"
] | 73
|
2019-10-15T22:02:52.000Z
|
2022-03-18T20:33:58.000Z
|
cottonwood/core/layers/dense.py
|
brohrer/nn_methods
|
acf3d1369e240971e5ee05696610c59c4c993a30
|
[
"MIT"
] | 7
|
2019-11-23T00:10:55.000Z
|
2021-05-29T03:50:42.000Z
|
cottonwood/core/layers/dense.py
|
brohrer/nn_methods
|
acf3d1369e240971e5ee05696610c59c4c993a30
|
[
"MIT"
] | 14
|
2019-10-16T02:39:42.000Z
|
2019-12-08T07:02:07.000Z
|
import numpy as np
from cottonwood.core.activation import Tanh
from cottonwood.core.initializers import LSUV
from cottonwood.core.layers.generic_layer import GenericLayer
from cottonwood.core.optimizers import SGD
import cottonwood.core.toolbox as tb
class Dense(GenericLayer):
def __init__(
self,
n_outputs,
m_inputs=None,
activation_function=None,
dropout_rate=0,
initializer=None,
previous_layer=None,
optimizer=None,
):
self.previous_layer = previous_layer
if m_inputs is not None:
self.m_inputs = m_inputs
else:
self.m_inputs = self.previous_layer.y.size
self.n_outputs = int(n_outputs)
self.activation_function = activation_function
self.dropout_rate = dropout_rate
if activation_function is None:
self.activation_function = Tanh()
else:
self.activation_function = activation_function
if initializer is None:
self.initializer = LSUV()
else:
self.initializer = initializer
if optimizer is None:
self.optimizer = SGD()
else:
self.optimizer = optimizer
# Choose random weights.
# Inputs match to rows. Outputs match to columns.
# Add one to m_inputs to account for the bias term.
self.weights = self.initializer.initialize(
self.m_inputs + 1, self.n_outputs)
self.reset()
self.regularizers = []
def __str__(self):
"""
Make a descriptive, human-readable string for this layer.
"""
str_parts = [
"fully connected",
f"number of inputs: {self.m_inputs}",
f"number of outputs: {self.n_outputs}",
"activation function:" + tb.indent(
self.activation_function.__str__()),
"initialization:" + tb.indent(self.initializer.__str__()),
"optimizer:" + tb.indent(self.optimizer.__str__()),
]
for regularizer in self.regularizers:
str_parts.append(
"regularizer:" + tb.indent(regularizer.__str__()))
return "\n".join(str_parts)
def add_regularizer(self, new_regularizer):
self.regularizers.append(new_regularizer)
def reset(self):
self.x = np.zeros((1, self.m_inputs))
self.y = np.zeros((1, self.n_outputs))
self.de_dx = np.zeros((1, self.m_inputs))
self.de_dy = np.zeros((1, self.n_outputs))
def forward_pass(self, evaluating=False, **kwargs):
"""
Propagate the inputs forward through the network.
evaluating: boolean
Is this part of a training run or an evaluation run?
"""
if self.previous_layer is not None:
self.x += self.previous_layer.y
# Apply dropout only during training runs.
if evaluating:
dropout_rate = 0
else:
dropout_rate = self.dropout_rate
if dropout_rate > 0:
self.i_dropout = np.zeros(self.x.size, dtype=bool)
self.i_dropout[np.where(
np.random.uniform(size=self.x.size) < dropout_rate)] = True
self.x[:, self.i_dropout] = 0
self.x[:, np.logical_not(self.i_dropout)] *= 1 / (1 - dropout_rate)
else:
self.i_dropout = None
bias = np.ones((1, 1))
x_w_bias = np.concatenate((self.x, bias), axis=1)
v = x_w_bias @ self.weights
self.y = self.activation_function.calc(v)
def backward_pass(self):
"""
Propagate the outputs back through the layer.
"""
bias = np.ones((1, 1))
x_w_bias = np.concatenate((self.x, bias), axis=1)
dy_dv = self.activation_function.calc_d(self.y)
# v = self.x @ self.weights
dv_dw = x_w_bias.transpose()
dv_dx = self.weights.transpose()
dy_dw = dv_dw @ dy_dv
self.de_dw = self.de_dy * dy_dw
for regularizer in self.regularizers:
regularizer.pre_optim_update(self)
self.optimizer.update(self)
for regularizer in self.regularizers:
regularizer.post_optim_update(self)
self.de_dx = (self.de_dy * dy_dv) @ dv_dx
# Remove the dropped-out inputs from this run.
de_dx_no_bias = self.de_dx[:, :-1]
if self.i_dropout is not None:
de_dx_no_bias[:, self.i_dropout] = 0
# Remove the bias node from the gradient vector.
self.previous_layer.de_dy += de_dx_no_bias
| 32.295775
| 79
| 0.597907
|
import numpy as np
from cottonwood.core.activation import Tanh
from cottonwood.core.initializers import LSUV
from cottonwood.core.layers.generic_layer import GenericLayer
from cottonwood.core.optimizers import SGD
import cottonwood.core.toolbox as tb
class Dense(GenericLayer):
def __init__(
self,
n_outputs,
m_inputs=None,
activation_function=None,
dropout_rate=0,
initializer=None,
previous_layer=None,
optimizer=None,
):
self.previous_layer = previous_layer
if m_inputs is not None:
self.m_inputs = m_inputs
else:
self.m_inputs = self.previous_layer.y.size
self.n_outputs = int(n_outputs)
self.activation_function = activation_function
self.dropout_rate = dropout_rate
if activation_function is None:
self.activation_function = Tanh()
else:
self.activation_function = activation_function
if initializer is None:
self.initializer = LSUV()
else:
self.initializer = initializer
if optimizer is None:
self.optimizer = SGD()
else:
self.optimizer = optimizer
self.weights = self.initializer.initialize(
self.m_inputs + 1, self.n_outputs)
self.reset()
self.regularizers = []
def __str__(self):
str_parts = [
"fully connected",
f"number of inputs: {self.m_inputs}",
f"number of outputs: {self.n_outputs}",
"activation function:" + tb.indent(
self.activation_function.__str__()),
"initialization:" + tb.indent(self.initializer.__str__()),
"optimizer:" + tb.indent(self.optimizer.__str__()),
]
for regularizer in self.regularizers:
str_parts.append(
"regularizer:" + tb.indent(regularizer.__str__()))
return "\n".join(str_parts)
def add_regularizer(self, new_regularizer):
self.regularizers.append(new_regularizer)
def reset(self):
self.x = np.zeros((1, self.m_inputs))
self.y = np.zeros((1, self.n_outputs))
self.de_dx = np.zeros((1, self.m_inputs))
self.de_dy = np.zeros((1, self.n_outputs))
def forward_pass(self, evaluating=False, **kwargs):
if self.previous_layer is not None:
self.x += self.previous_layer.y
if evaluating:
dropout_rate = 0
else:
dropout_rate = self.dropout_rate
if dropout_rate > 0:
self.i_dropout = np.zeros(self.x.size, dtype=bool)
self.i_dropout[np.where(
np.random.uniform(size=self.x.size) < dropout_rate)] = True
self.x[:, self.i_dropout] = 0
self.x[:, np.logical_not(self.i_dropout)] *= 1 / (1 - dropout_rate)
else:
self.i_dropout = None
bias = np.ones((1, 1))
x_w_bias = np.concatenate((self.x, bias), axis=1)
v = x_w_bias @ self.weights
self.y = self.activation_function.calc(v)
def backward_pass(self):
bias = np.ones((1, 1))
x_w_bias = np.concatenate((self.x, bias), axis=1)
dy_dv = self.activation_function.calc_d(self.y)
dv_dw = x_w_bias.transpose()
dv_dx = self.weights.transpose()
dy_dw = dv_dw @ dy_dv
self.de_dw = self.de_dy * dy_dw
for regularizer in self.regularizers:
regularizer.pre_optim_update(self)
self.optimizer.update(self)
for regularizer in self.regularizers:
regularizer.post_optim_update(self)
self.de_dx = (self.de_dy * dy_dv) @ dv_dx
de_dx_no_bias = self.de_dx[:, :-1]
if self.i_dropout is not None:
de_dx_no_bias[:, self.i_dropout] = 0
self.previous_layer.de_dy += de_dx_no_bias
| true
| true
|
f708e95a3c98b6a895028e81a6372e1ef21c132a
| 11,535
|
py
|
Python
|
pyActLearn/sensors/sensor2vec.py
|
TinghuiWang/pyActLearn
|
d858136e86324fac51b0943765ef60bd405e31d1
|
[
"BSD-3-Clause"
] | 3
|
2017-03-15T03:42:57.000Z
|
2020-01-19T15:47:12.000Z
|
pyActLearn/sensors/sensor2vec.py
|
TinghuiWang/pyActLearn
|
d858136e86324fac51b0943765ef60bd405e31d1
|
[
"BSD-3-Clause"
] | 2
|
2019-02-04T15:31:49.000Z
|
2020-01-26T17:49:22.000Z
|
pyActLearn/sensors/sensor2vec.py
|
TinghuiWang/pyActLearn
|
d858136e86324fac51b0943765ef60bd405e31d1
|
[
"BSD-3-Clause"
] | 3
|
2019-02-02T19:36:17.000Z
|
2021-01-02T15:42:43.000Z
|
import math
import numpy as np
import tensorflow as tf
from ..learning.nn.injectors import SkipGramInjector
def sensor2vec(num_sensors, sensor_event_list, embedding_size=20,
batch_size=128, num_skips=8, skip_window=5,
num_neg_samples=64, learning_rate=1.0):
"""Sensor to Vector
"""
if num_neg_samples > num_sensors:
num_neg_samples = num_sensors
# Initialize a SkipGram Injector
injector = SkipGramInjector(sensor_event_list, batch_size, num_skips, skip_window)
# Build Training Model
graph = tf.Graph()
with graph.as_default():
# Input Place Holder
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# As we normally do not have too many sensors - it is OK to use all of them
valid_dataset = tf.constant([i for i in range(num_sensors)], dtype=tf.int32)
# Only CPU supports NCE loss
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([num_sensors, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([num_sensors, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([num_sensors]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_neg_samples,
num_classes=num_sensors))
# Construct the Optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in range(num_steps):
batch_inputs, batch_labels = injector.next_batch()
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
final_embeddings = normalized_embeddings.eval()
final_similarity = 1 - similarity.eval()
distance_matrix = final_similarity / np.max(final_similarity, axis=1)[:, None]
return final_embeddings, distance_matrix
def sensor2vec_data(sensor_list, event_list, embedding_size=20,
batch_size=128, num_skips=8, skip_window=5,
num_neg_samples=64, learning_rate=1.0, ignore_off=True):
"""Transform sensor to high dimensional space
Similar to word embedding used in natural language processing system, we want
to represent sensors using in a synthesized vector space as well, instead of
using an arbitrary labels for each sensors without any useful information.
The methods used to find word embeddings can be classified into two categories:
count-based methods (Latent Semantic Analysis) and predictive models.
In this implementation for mapping sensor into high dimension vector space, we
use skip-gram negative sampling models.
Args:
sensor_list (:obj:`list` of :obj:`dict`): List of dictionary containing
sensor information.
event_list (:obj:`list` of :obj:`dict`): List of events.
embedding_size (:obj:`int`): The size of embedding vector.
batch_size (:obj:`int`): The number of batch used in training
num_skips (:obj:`int`): How many times to re-use an input to generate a label
in skip-gram model.
skip_window (:obj:`int`): How many items to consider left or right in skip-gram
model.
num_neg_samples (:obj:`int`): Number of negative samples to draw from the vocabulary.
ignore_off (:obj:`bool`): Ignore motion-sensor with ``Off`` state in event.rst list.
Please refer to :func:`sensor_distance` for an example of ``sensor_list``.
Please refer to :func:`sensor_mi_distance` for an example of ``event_list``.
"""
# Put sensor in hash table for fast fetch of index
num_sensors = len(sensor_list)
# Negative samples cannot exceed sensor numbers
if num_neg_samples > num_sensors:
num_neg_samples = num_sensors
# Store sensor ID in hash table for faster access
sensor_dict = {}
for i in range(num_sensors):
sensor_dict[sensor_list[i]['name']] = i
# Generate event.rst sensor list
event_sensor_list = []
for event_entry in event_list:
if ignore_off and event_entry['sensor_status'].upper() == "OFF":
continue
event_sensor_list.append(sensor_dict[event_entry['sensor_id']])
# Initialize a SkipGram Injector
injector = SkipGramInjector(event_sensor_list, batch_size, num_skips, skip_window)
# Build Training Model
graph = tf.Graph()
with graph.as_default():
# Input Place Holder
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# As we normally do not have too many sensors - it is OK to use all of them
valid_dataset = tf.constant([i for i in range(num_sensors)], dtype=tf.int32)
# Only CPU supports NCE loss
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([num_sensors, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([num_sensors, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([num_sensors]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_neg_samples,
num_classes=num_sensors))
# Construct the Optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in range(num_steps):
batch_inputs, batch_labels = injector.next_batch()
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in range(num_sensors):
valid_sensor = sensor_list[i]['name']
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_sensor
for k in range(top_k):
close_sensor = sensor_list[nearest[k]]['name']
log_str = "%s %s," % (log_str, close_sensor)
print(log_str)
final_embeddings = normalized_embeddings.eval()
final_similarity = 1 - similarity.eval()
distance_matrix = final_similarity / np.max(final_similarity, axis=1)[:,None]
# try:
# from sklearn.manifold import TSNE
# import matplotlib.pyplot as plt
#
# tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
# low_dim_embs = tsne.fit_transform(final_embeddings)
# labels = [sensor_list[i]['name'] for i in range(num_sensors)]
#
# assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
# plt.figure(figsize=(18, 18)) # in inches
# for i, label in enumerate(labels):
# x, y = low_dim_embs[i, :]
# plt.scatter(x, y)
# plt.annotate(label,
# xy=(x, y),
# xytext=(5, 2),
# textcoords='offset points',
# ha='right',
# va='bottom')
# plt.show()
# except ImportError:
# print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
return final_embeddings, distance_matrix
| 45.77381
| 93
| 0.604335
|
import math
import numpy as np
import tensorflow as tf
from ..learning.nn.injectors import SkipGramInjector
def sensor2vec(num_sensors, sensor_event_list, embedding_size=20,
batch_size=128, num_skips=8, skip_window=5,
num_neg_samples=64, learning_rate=1.0):
if num_neg_samples > num_sensors:
num_neg_samples = num_sensors
injector = SkipGramInjector(sensor_event_list, batch_size, num_skips, skip_window)
graph = tf.Graph()
with graph.as_default():
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant([i for i in range(num_sensors)], dtype=tf.int32)
with tf.device('/cpu:0'):
embeddings = tf.Variable(
tf.random_uniform([num_sensors, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
nce_weights = tf.Variable(
tf.truncated_normal([num_sensors, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([num_sensors]))
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_neg_samples,
num_classes=num_sensors))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
init = tf.initialize_all_variables()
num_steps = 100001
with tf.Session(graph=graph) as session:
init.run()
print("Initialized")
average_loss = 0
for step in range(num_steps):
batch_inputs, batch_labels = injector.next_batch()
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
final_embeddings = normalized_embeddings.eval()
final_similarity = 1 - similarity.eval()
distance_matrix = final_similarity / np.max(final_similarity, axis=1)[:, None]
return final_embeddings, distance_matrix
def sensor2vec_data(sensor_list, event_list, embedding_size=20,
batch_size=128, num_skips=8, skip_window=5,
num_neg_samples=64, learning_rate=1.0, ignore_off=True):
num_sensors = len(sensor_list)
if num_neg_samples > num_sensors:
num_neg_samples = num_sensors
sensor_dict = {}
for i in range(num_sensors):
sensor_dict[sensor_list[i]['name']] = i
event_sensor_list = []
for event_entry in event_list:
if ignore_off and event_entry['sensor_status'].upper() == "OFF":
continue
event_sensor_list.append(sensor_dict[event_entry['sensor_id']])
injector = SkipGramInjector(event_sensor_list, batch_size, num_skips, skip_window)
graph = tf.Graph()
with graph.as_default():
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant([i for i in range(num_sensors)], dtype=tf.int32)
with tf.device('/cpu:0'):
embeddings = tf.Variable(
tf.random_uniform([num_sensors, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
nce_weights = tf.Variable(
tf.truncated_normal([num_sensors, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([num_sensors]))
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_neg_samples,
num_classes=num_sensors))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
init = tf.initialize_all_variables()
num_steps = 100001
with tf.Session(graph=graph) as session:
init.run()
print("Initialized")
average_loss = 0
for step in range(num_steps):
batch_inputs, batch_labels = injector.next_batch()
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
if step % 10000 == 0:
sim = similarity.eval()
for i in range(num_sensors):
valid_sensor = sensor_list[i]['name']
top_k = 8 nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_sensor
for k in range(top_k):
close_sensor = sensor_list[nearest[k]]['name']
log_str = "%s %s," % (log_str, close_sensor)
print(log_str)
final_embeddings = normalized_embeddings.eval()
final_similarity = 1 - similarity.eval()
distance_matrix = final_similarity / np.max(final_similarity, axis=1)[:,None]
return final_embeddings, distance_matrix
| true
| true
|
f708e99111649f36ac940b6ac45ab557741cb43e
| 694
|
py
|
Python
|
week4_divide_and_conquer/2_majority_element/majority_element.py
|
sebas119/algorithmic-toolbox
|
8b7e4d66b04f95f9aa159544e96bbe8765abfa56
|
[
"MIT"
] | null | null | null |
week4_divide_and_conquer/2_majority_element/majority_element.py
|
sebas119/algorithmic-toolbox
|
8b7e4d66b04f95f9aa159544e96bbe8765abfa56
|
[
"MIT"
] | null | null | null |
week4_divide_and_conquer/2_majority_element/majority_element.py
|
sebas119/algorithmic-toolbox
|
8b7e4d66b04f95f9aa159544e96bbe8765abfa56
|
[
"MIT"
] | null | null | null |
# Uses python3
import sys
""" def get_majority_element(a, left, right):
if left == right:
return -1
if left + 1 == right:
return a[left]
#write your code here
return -1 """
def get_majority_element_hash_approach(a, n):
new = {}
for e in a:
if e not in new:
new[e] = 1
else:
new[e] += 1
for keys, val in new.items():
if val > n / 2:
return 1
return 0
if __name__ == '__main__':
n = int(input())
a = list(map(int, input().split()))
# if get_majority_element(a, 0, n) != -1:
if get_majority_element_hash_approach(a, n):
print(1)
else:
print(0)
| 20.411765
| 52
| 0.520173
|
import sys
def get_majority_element_hash_approach(a, n):
new = {}
for e in a:
if e not in new:
new[e] = 1
else:
new[e] += 1
for keys, val in new.items():
if val > n / 2:
return 1
return 0
if __name__ == '__main__':
n = int(input())
a = list(map(int, input().split()))
if get_majority_element_hash_approach(a, n):
print(1)
else:
print(0)
| true
| true
|
f708e9a4590e61d74102e1c7483f7e1fb43ae436
| 1,710
|
py
|
Python
|
button.py
|
qodzero/ukivy
|
d7179a83c2e6e357cf50113f53d24c780bf29789
|
[
"MIT"
] | null | null | null |
button.py
|
qodzero/ukivy
|
d7179a83c2e6e357cf50113f53d24c780bf29789
|
[
"MIT"
] | null | null | null |
button.py
|
qodzero/ukivy
|
d7179a83c2e6e357cf50113f53d24c780bf29789
|
[
"MIT"
] | null | null | null |
from kivy.uix.button import Button
from kivy.properties import StringProperty, BooleanProperty, NumericProperty, ObjectProperty
from kivy.graphics import Color, Rectangle, RoundedRectangle, Ellipse
from kivy.lang import Builder
Builder.load_string('''
<FlatButton>:
background_normal: ''
background_color: [0,0,0,0]
text_size: self.size
valign: 'middle'
halign: 'center'
markup: True
''')
class RoundedButton(FlatButton):
radius = NumericProperty(10)
def update_back(self):
with self.canvas.before:
self.color = Color(rgba=self.background_color)
self.rect = RoundedRectangle(
pos=self.pos,
size=self.size,
radius=self.radius)
def on_radius(self, _, value):
"""When the radius is set/changed, this function
is called to update the radius of the button on the
canvas
Parameters
----------
_ : widget
This is usually the instance calling the function,
we dont care about this
value : number
The value of the radius property
Returns
-------
None
"""
self.rect.radius = value
class FlatButton(Button):
"""A normal ::class `kivy.uix.button.Button` with all
the visual representations removed, this button
basically just looks like a label, but ofcourse, unlike
a label, its clickable.
Since this inherits from a normal Button, it
supports all of its properties.
Usage
---------
from ukivy.button import FlatButton
...
btn = FlatButton(text='myButton')
some_widget.add_widget(btn)
...
"""
pass
| 24.428571
| 92
| 0.623392
|
from kivy.uix.button import Button
from kivy.properties import StringProperty, BooleanProperty, NumericProperty, ObjectProperty
from kivy.graphics import Color, Rectangle, RoundedRectangle, Ellipse
from kivy.lang import Builder
Builder.load_string('''
<FlatButton>:
background_normal: ''
background_color: [0,0,0,0]
text_size: self.size
valign: 'middle'
halign: 'center'
markup: True
''')
class RoundedButton(FlatButton):
radius = NumericProperty(10)
def update_back(self):
with self.canvas.before:
self.color = Color(rgba=self.background_color)
self.rect = RoundedRectangle(
pos=self.pos,
size=self.size,
radius=self.radius)
def on_radius(self, _, value):
self.rect.radius = value
class FlatButton(Button):
pass
| true
| true
|
f708ea3cdf88f21e1a5d732ac490535d1e427158
| 411
|
py
|
Python
|
reportsmanagement/asgi.py
|
saadhaxxan/Reports-Management-Django
|
9acbcaa89fa174b1bf7876eb40ccf5193eb9f653
|
[
"MIT"
] | null | null | null |
reportsmanagement/asgi.py
|
saadhaxxan/Reports-Management-Django
|
9acbcaa89fa174b1bf7876eb40ccf5193eb9f653
|
[
"MIT"
] | null | null | null |
reportsmanagement/asgi.py
|
saadhaxxan/Reports-Management-Django
|
9acbcaa89fa174b1bf7876eb40ccf5193eb9f653
|
[
"MIT"
] | 1
|
2021-05-02T20:27:44.000Z
|
2021-05-02T20:27:44.000Z
|
"""
ASGI config for reportsmanagement project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reportsmanagement.settings')
application = get_asgi_application()
| 24.176471
| 78
| 0.79562
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reportsmanagement.settings')
application = get_asgi_application()
| true
| true
|
f708ea3dd9f5758a8498ae276155cf1d472e275f
| 3,230
|
py
|
Python
|
app/app/settings.py
|
elmaraliyevdev/recipe-api
|
c5b5e8ae1454e1b568971b71a308e3cec930c353
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
elmaraliyevdev/recipe-api
|
c5b5e8ae1454e1b568971b71a308e3cec930c353
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
elmaraliyevdev/recipe-api
|
c5b5e8ae1454e1b568971b71a308e3cec930c353
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-nao&q&bu0i4@-&!nep#b%6x=-_f@-4hu)tb!09w8nujq5nwma*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.634921
| 91
| 0.69969
|
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-nao&q&bu0i4@-&!nep#b%6x=-_f@-4hu)tb!09w8nujq5nwma*'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| true
| true
|
f708ea805e770aa48410e49e530f2410137eb6dd
| 11,174
|
py
|
Python
|
isi_sdk_8_2_1/isi_sdk_8_2_1/models/cluster_node_drive_d_config.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_1/isi_sdk_8_2_1/models/cluster_node_drive_d_config.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_1/isi_sdk_8_2_1/models/cluster_node_drive_d_config.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_1.models.node_driveconfig_node_alert import NodeDriveconfigNodeAlert # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_allow import NodeDriveconfigNodeAllow # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_automatic_replacement_recognition import NodeDriveconfigNodeAutomaticReplacementRecognition # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_instant_secure_erase import NodeDriveconfigNodeInstantSecureErase # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_log import NodeDriveconfigNodeLog # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_reboot import NodeDriveconfigNodeReboot # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_spin_wait import NodeDriveconfigNodeSpinWait # noqa: F401,E501
from isi_sdk_8_2_1.models.node_driveconfig_node_stall import NodeDriveconfigNodeStall # noqa: F401,E501
class ClusterNodeDriveDConfig(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'alert': 'NodeDriveconfigNodeAlert',
'allow': 'NodeDriveconfigNodeAllow',
'automatic_replacement_recognition': 'NodeDriveconfigNodeAutomaticReplacementRecognition',
'instant_secure_erase': 'NodeDriveconfigNodeInstantSecureErase',
'log': 'NodeDriveconfigNodeLog',
'reboot': 'NodeDriveconfigNodeReboot',
'spin_wait': 'NodeDriveconfigNodeSpinWait',
'stall': 'NodeDriveconfigNodeStall'
}
attribute_map = {
'alert': 'alert',
'allow': 'allow',
'automatic_replacement_recognition': 'automatic_replacement_recognition',
'instant_secure_erase': 'instant_secure_erase',
'log': 'log',
'reboot': 'reboot',
'spin_wait': 'spin_wait',
'stall': 'stall'
}
def __init__(self, alert=None, allow=None, automatic_replacement_recognition=None, instant_secure_erase=None, log=None, reboot=None, spin_wait=None, stall=None): # noqa: E501
"""ClusterNodeDriveDConfig - a model defined in Swagger""" # noqa: E501
self._alert = None
self._allow = None
self._automatic_replacement_recognition = None
self._instant_secure_erase = None
self._log = None
self._reboot = None
self._spin_wait = None
self._stall = None
self.discriminator = None
if alert is not None:
self.alert = alert
if allow is not None:
self.allow = allow
if automatic_replacement_recognition is not None:
self.automatic_replacement_recognition = automatic_replacement_recognition
if instant_secure_erase is not None:
self.instant_secure_erase = instant_secure_erase
if log is not None:
self.log = log
if reboot is not None:
self.reboot = reboot
if spin_wait is not None:
self.spin_wait = spin_wait
if stall is not None:
self.stall = stall
@property
def alert(self):
"""Gets the alert of this ClusterNodeDriveDConfig. # noqa: E501
Configuration setting for drive alerts. # noqa: E501
:return: The alert of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeAlert
"""
return self._alert
@alert.setter
def alert(self, alert):
"""Sets the alert of this ClusterNodeDriveDConfig.
Configuration setting for drive alerts. # noqa: E501
:param alert: The alert of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeAlert
"""
self._alert = alert
@property
def allow(self):
"""Gets the allow of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings for drive formatting. # noqa: E501
:return: The allow of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeAllow
"""
return self._allow
@allow.setter
def allow(self, allow):
"""Sets the allow of this ClusterNodeDriveDConfig.
Configuration settings for drive formatting. # noqa: E501
:param allow: The allow of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeAllow
"""
self._allow = allow
@property
def automatic_replacement_recognition(self):
"""Gets the automatic_replacement_recognition of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings for Automatic Replacement Recognition (ARR). # noqa: E501
:return: The automatic_replacement_recognition of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeAutomaticReplacementRecognition
"""
return self._automatic_replacement_recognition
@automatic_replacement_recognition.setter
def automatic_replacement_recognition(self, automatic_replacement_recognition):
"""Sets the automatic_replacement_recognition of this ClusterNodeDriveDConfig.
Configuration settings for Automatic Replacement Recognition (ARR). # noqa: E501
:param automatic_replacement_recognition: The automatic_replacement_recognition of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeAutomaticReplacementRecognition
"""
self._automatic_replacement_recognition = automatic_replacement_recognition
@property
def instant_secure_erase(self):
"""Gets the instant_secure_erase of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings for instant secure erase (ISE). # noqa: E501
:return: The instant_secure_erase of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeInstantSecureErase
"""
return self._instant_secure_erase
@instant_secure_erase.setter
def instant_secure_erase(self, instant_secure_erase):
"""Sets the instant_secure_erase of this ClusterNodeDriveDConfig.
Configuration settings for instant secure erase (ISE). # noqa: E501
:param instant_secure_erase: The instant_secure_erase of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeInstantSecureErase
"""
self._instant_secure_erase = instant_secure_erase
@property
def log(self):
"""Gets the log of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings for drive statistics logs. # noqa: E501
:return: The log of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeLog
"""
return self._log
@log.setter
def log(self, log):
"""Sets the log of this ClusterNodeDriveDConfig.
Configuration settings for drive statistics logs. # noqa: E501
:param log: The log of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeLog
"""
self._log = log
@property
def reboot(self):
"""Gets the reboot of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings for a node reboot due to a drive error. # noqa: E501
:return: The reboot of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeReboot
"""
return self._reboot
@reboot.setter
def reboot(self, reboot):
"""Sets the reboot of this ClusterNodeDriveDConfig.
Configuration settings for a node reboot due to a drive error. # noqa: E501
:param reboot: The reboot of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeReboot
"""
self._reboot = reboot
@property
def spin_wait(self):
"""Gets the spin_wait of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings for sleeping the drive daemon before node is rescanned. # noqa: E501
:return: The spin_wait of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeSpinWait
"""
return self._spin_wait
@spin_wait.setter
def spin_wait(self, spin_wait):
"""Sets the spin_wait of this ClusterNodeDriveDConfig.
Configuration settings for sleeping the drive daemon before node is rescanned. # noqa: E501
:param spin_wait: The spin_wait of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeSpinWait
"""
self._spin_wait = spin_wait
@property
def stall(self):
"""Gets the stall of this ClusterNodeDriveDConfig. # noqa: E501
Configuration settings to evaluate a drive stall. # noqa: E501
:return: The stall of this ClusterNodeDriveDConfig. # noqa: E501
:rtype: NodeDriveconfigNodeStall
"""
return self._stall
@stall.setter
def stall(self, stall):
"""Sets the stall of this ClusterNodeDriveDConfig.
Configuration settings to evaluate a drive stall. # noqa: E501
:param stall: The stall of this ClusterNodeDriveDConfig. # noqa: E501
:type: NodeDriveconfigNodeStall
"""
self._stall = stall
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClusterNodeDriveDConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 34.91875
| 179
| 0.665205
|
import pprint
import re
import six
from isi_sdk_8_2_1.models.node_driveconfig_node_alert import NodeDriveconfigNodeAlert from isi_sdk_8_2_1.models.node_driveconfig_node_allow import NodeDriveconfigNodeAllow from isi_sdk_8_2_1.models.node_driveconfig_node_automatic_replacement_recognition import NodeDriveconfigNodeAutomaticReplacementRecognition from isi_sdk_8_2_1.models.node_driveconfig_node_instant_secure_erase import NodeDriveconfigNodeInstantSecureErase from isi_sdk_8_2_1.models.node_driveconfig_node_log import NodeDriveconfigNodeLog from isi_sdk_8_2_1.models.node_driveconfig_node_reboot import NodeDriveconfigNodeReboot from isi_sdk_8_2_1.models.node_driveconfig_node_spin_wait import NodeDriveconfigNodeSpinWait from isi_sdk_8_2_1.models.node_driveconfig_node_stall import NodeDriveconfigNodeStall
class ClusterNodeDriveDConfig(object):
swagger_types = {
'alert': 'NodeDriveconfigNodeAlert',
'allow': 'NodeDriveconfigNodeAllow',
'automatic_replacement_recognition': 'NodeDriveconfigNodeAutomaticReplacementRecognition',
'instant_secure_erase': 'NodeDriveconfigNodeInstantSecureErase',
'log': 'NodeDriveconfigNodeLog',
'reboot': 'NodeDriveconfigNodeReboot',
'spin_wait': 'NodeDriveconfigNodeSpinWait',
'stall': 'NodeDriveconfigNodeStall'
}
attribute_map = {
'alert': 'alert',
'allow': 'allow',
'automatic_replacement_recognition': 'automatic_replacement_recognition',
'instant_secure_erase': 'instant_secure_erase',
'log': 'log',
'reboot': 'reboot',
'spin_wait': 'spin_wait',
'stall': 'stall'
}
def __init__(self, alert=None, allow=None, automatic_replacement_recognition=None, instant_secure_erase=None, log=None, reboot=None, spin_wait=None, stall=None):
self._alert = None
self._allow = None
self._automatic_replacement_recognition = None
self._instant_secure_erase = None
self._log = None
self._reboot = None
self._spin_wait = None
self._stall = None
self.discriminator = None
if alert is not None:
self.alert = alert
if allow is not None:
self.allow = allow
if automatic_replacement_recognition is not None:
self.automatic_replacement_recognition = automatic_replacement_recognition
if instant_secure_erase is not None:
self.instant_secure_erase = instant_secure_erase
if log is not None:
self.log = log
if reboot is not None:
self.reboot = reboot
if spin_wait is not None:
self.spin_wait = spin_wait
if stall is not None:
self.stall = stall
@property
def alert(self):
return self._alert
@alert.setter
def alert(self, alert):
self._alert = alert
@property
def allow(self):
return self._allow
@allow.setter
def allow(self, allow):
self._allow = allow
@property
def automatic_replacement_recognition(self):
return self._automatic_replacement_recognition
@automatic_replacement_recognition.setter
def automatic_replacement_recognition(self, automatic_replacement_recognition):
self._automatic_replacement_recognition = automatic_replacement_recognition
@property
def instant_secure_erase(self):
return self._instant_secure_erase
@instant_secure_erase.setter
def instant_secure_erase(self, instant_secure_erase):
self._instant_secure_erase = instant_secure_erase
@property
def log(self):
return self._log
@log.setter
def log(self, log):
self._log = log
@property
def reboot(self):
return self._reboot
@reboot.setter
def reboot(self, reboot):
self._reboot = reboot
@property
def spin_wait(self):
return self._spin_wait
@spin_wait.setter
def spin_wait(self, spin_wait):
self._spin_wait = spin_wait
@property
def stall(self):
return self._stall
@stall.setter
def stall(self, stall):
self._stall = stall
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ClusterNodeDriveDConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f708eaec40c92aa56ad6fdfbd0430f325b24f5c8
| 10,137
|
py
|
Python
|
FlaskApp/blog.py
|
j2B237/FlaskJoblogueur
|
144b0ed8343e93cab715b034b6d477142ce9681a
|
[
"Apache-2.0"
] | null | null | null |
FlaskApp/blog.py
|
j2B237/FlaskJoblogueur
|
144b0ed8343e93cab715b034b6d477142ce9681a
|
[
"Apache-2.0"
] | null | null | null |
FlaskApp/blog.py
|
j2B237/FlaskJoblogueur
|
144b0ed8343e93cab715b034b6d477142ce9681a
|
[
"Apache-2.0"
] | null | null | null |
# ******************* BLOG MODULE ****************************** #
# ** Created by Yossep
# ** github: https://github.com/j2B237/
# ** Project : Joblogueur
# ** Description:
#
# Within this module we have many functions designed to help display posts
# Methods such as :
# display all posts
# display posts per category
# display individual post
# register email user for the newsletter
# ************************************************************************ #
# Third party import
from flask import Blueprint, render_template, flash, request, redirect, url_for
from flask_mail import Message
# Local import
from FlaskApp.models import Post, Category, Moderator, Comment
from FlaskApp.forms import CommentForm
from . import db, ext, mail
bp = Blueprint('blog', __name__)
# Fake data to seed the website view
fake_Category = [
{
'id': 1,
'category_name': "10 bonnes raisons",
'color': 'primary'
},
{
'id': 2,
'category_name': "Comment réussir ?",
'color': 'success',
},
{
'id': 3,
'category_name': "Offres et formations",
'color': 'warning'
}
]
fake_moderators = [
{ 'id': 1,
'username': 'admin',
'email': 'admin@exemple.com',
'password': 'admin237',
'address1': 'address1',
'address2': 'address2',
'city': 'city',
'state': 'state',
'country': 'country',
'zipcode': 'zipcode',
'is_admin': True,
'image_file': 'default.jpg',
'created_on': '21/02/2021',
'posts': []
}
]
fake_posts = [
{
'id': 1,
'title': 'Comment réussir à gagner de l\'argent sur internet',
'introduction': 'Qu\’ils soient aujourd\’hui milliardaires ou non, reconnus à l\’international ou en France.',
'p_intro': 'Ils ont tous commencer simplement. Pour toi modeste citoyen qui voudrait gagner de l\'argent pour arrondir tes fins du mois, nous avons sélectionner une liste de sites et bonnes astuces à essayer',
'h1': "",
'p_h1': "",
'h2': "",
'p_h2': "",
'h3': "",
'p_h3': "",
'h4': "",
'p_h4': "",
'h5': "",
'p_h5': "",
'conclusion': "",
'p_conclusion': "",
'date_posted': '10/02/2021',
'display_or_not': True,
'moderator_id': 1,
'category_id': 1,
'comments': [],
}
]
fake_comments = [
{
'id': 1,
'author_name': 'admin',
'email_author': 'admin@exemple.com',
'content': 'C\'est bon tout ca.',
'date_posted': '12/02/2021',
'approved_or_not': True,
'post_id': 1
}
]
# Create a sitemap
@ext.register_generator
def index():
yield 'index', {}
# Home blog view
@bp.route('/')
def index():
global fake_moderators, fake_comments, fake_posts, fake_Category
categories = Category.query.all()
moderators = Moderator.query.all()
posts_to_display = Post.query.all()
post_banner = Post.query.join(Category).filter(Category.category_name == "BUSINESS").\
order_by(Post.date_posted.desc()).first()
last_post = Post.query.join(Category).filter(Category.category_name == "TUTORIELS").order_by(
Post.date_posted.desc()).first()
posts_for_cards = Post.query.filter_by(display_or_not=True).order_by(Post.date_posted.desc())[:4]
post_business = Post.query.join(Category).filter(Category.category_name == "BUSINESS").\
order_by(Post.date_posted.desc()).first()
post_formation = Post.query.join(Category).filter(Category.category_name == "FORMATIONS"). \
order_by(Post.date_posted.desc()).first()
post_tutoriel = Post.query.join(Category).filter(Category.category_name == "TUTORIELS"). \
order_by(Post.date_posted.desc()).first()
post_ressource = Post.query.join(Category).filter(Category.category_name == "RESSOURCES"). \
order_by(Post.date_posted.desc()).first()
image_posts = []
for post in posts_for_cards:
image = post.img_title
image_posts.append(image)
return render_template('blog/blog.html', title="Accueil - Joblogueur",
categories=categories, last_post=last_post,moderators=moderators,
images=image_posts, posts_to_display=posts_to_display,
post_banner=post_banner, post_business=post_business,
post_formation=post_formation, post_tutoriel=post_tutoriel, post_ressource=post_ressource)
# Display individual post
@bp.route('/publication/<post_title>', methods=['POST', 'GET'])
def post(post_title):
form = CommentForm()
titre = post_title.replace('-', ' ')
# Recherche la publication par son titre
post = Post.query.filter_by(title=titre).first()
moderators = Moderator.query.all()
# Recherche tous les commentaires liés à cette publication
comments_to_display = Comment.query.join(Post).filter(Comment.post_id == post.id).\
order_by(Comment.date_posted.desc()).all()
# Liste toutes les categories
categories = Category.query.all()
nbr_comments = 0
# Calcul le nbre de commentaires par publication
for comment in post.comments:
if comment.approved_or_not:
nbr_comments += 1
if form.validate_on_submit():
search_comments = Comment.query.filter_by(email_author=form.author_email.data).all()
ids = []
for comment in search_comments:
ids.append(comment.post_id)
if post.id in ids:
flash("Vous avez deja commenté cet article", "info")
# Création d'un commentaire
else:
new_comment = Comment(name_author=form.author.data, email_author=form.author_email.data,
content=form.content.data, post_id=post.id, approved_or_not=False)
db.session.add(new_comment)
db.session.commit()
form.author.data = ""
form.author_email.data = ""
form.content.data = ""
flash("Votre commentaire est en cours de validation", "success")
return render_template('blog/blog_post.html', title=titre + " | Joblogueur", post=post, form=form,
nbr_comments=int(nbr_comments), categories=categories, comments=comments_to_display,
titre=post_title)
form.author.data = ""
form.author_email.data = ""
form.content.data = ""
image_file = url_for('static', filename='upload/'+str(post.img_title))
return render_template("blog/blog_post.html", title=titre + " | Joblogueur", post=post, form=form,
nbr_comments=int(nbr_comments), categories=categories,
comments=comments_to_display, image=image_file, moderators=moderators,
titre=post_title)
# Display post per category
@bp.route('/publications/<category_name>')
def post_per_category(category_name):
page = request.args.get('page', 1, type=int)
search_category = category_name.replace('-', ' ')
categories = Category.query.all()
posts = Post.query.join(Category).filter(Category.category_name == search_category).\
order_by(Post.date_posted.desc()).paginate(per_page=7, page=page)
image_posts = []
for post in posts.items:
image = post.img_title
image_posts.append(image)
return render_template("blog/posts_per_category.html", title=search_category + " | Joblogueur", posts=posts,
categories=categories, search_category=search_category, images=image_posts)
# Register user for daily news
@bp.route('/newsletter-invitation', methods=['POST','GET'])
def newsletter_invitation():
categories = Category.query.all()
posts_per_category = []
for category in categories:
last_post = Post.query.join(Category).filter(Post.category_id == category.id).first()
posts_per_category.append(last_post)
if request.method == 'POST':
usermail = request.form['usermail']
content = """
Salut très cher(e),
Comment vas-tu ?
Il y'a du nouveau sur ton blog préféré www.digitalschools.sn/blog
Ci-dessous une liste des publications que tu as surement manqués:
1- https://3df5e7df0cdb.ngrok.io/blog/publication/10-raisons-pourquoi-toute-entreprise-doit-cr%C3%A9er-ou-avoir-un-site-Web
2- https://3df5e7df0cdb.ngrok.io/blog/publication/10-bonnes-raisons-d%27apprendre-%C3%A0-son-enfant-%C3%A0-coder
3- https://3df5e7df0cdb.ngrok.io/blog/publication/FLASK-1.0.0
Merci pour ton temps et ta perséverance dans la lecture quotidienne.
Youssouf BINYOUM (digitalschools.sn)
"""
msg = Message("Nouvelle publication sur digitalschools.sn/blog", recipients=[usermail],
sender='contact@digitalschools.sn')
msg.body = content
mail.send(msg)
print(request.args)
return redirect(url_for('blog.index'))
| 38.397727
| 229
| 0.555687
|
from flask import Blueprint, render_template, flash, request, redirect, url_for
from flask_mail import Message
from FlaskApp.models import Post, Category, Moderator, Comment
from FlaskApp.forms import CommentForm
from . import db, ext, mail
bp = Blueprint('blog', __name__)
fake_Category = [
{
'id': 1,
'category_name': "10 bonnes raisons",
'color': 'primary'
},
{
'id': 2,
'category_name': "Comment réussir ?",
'color': 'success',
},
{
'id': 3,
'category_name': "Offres et formations",
'color': 'warning'
}
]
fake_moderators = [
{ 'id': 1,
'username': 'admin',
'email': 'admin@exemple.com',
'password': 'admin237',
'address1': 'address1',
'address2': 'address2',
'city': 'city',
'state': 'state',
'country': 'country',
'zipcode': 'zipcode',
'is_admin': True,
'image_file': 'default.jpg',
'created_on': '21/02/2021',
'posts': []
}
]
fake_posts = [
{
'id': 1,
'title': 'Comment réussir à gagner de l\'argent sur internet',
'introduction': 'Qu\’ils soient aujourd\’hui milliardaires ou non, reconnus à l\’international ou en France.',
'p_intro': 'Ils ont tous commencer simplement. Pour toi modeste citoyen qui voudrait gagner de l\'argent pour arrondir tes fins du mois, nous avons sélectionner une liste de sites et bonnes astuces à essayer',
'h1': "",
'p_h1': "",
'h2': "",
'p_h2': "",
'h3': "",
'p_h3': "",
'h4': "",
'p_h4': "",
'h5': "",
'p_h5': "",
'conclusion': "",
'p_conclusion': "",
'date_posted': '10/02/2021',
'display_or_not': True,
'moderator_id': 1,
'category_id': 1,
'comments': [],
}
]
fake_comments = [
{
'id': 1,
'author_name': 'admin',
'email_author': 'admin@exemple.com',
'content': 'C\'est bon tout ca.',
'date_posted': '12/02/2021',
'approved_or_not': True,
'post_id': 1
}
]
# Create a sitemap
@ext.register_generator
def index():
yield 'index', {}
# Home blog view
@bp.route('/')
def index():
global fake_moderators, fake_comments, fake_posts, fake_Category
categories = Category.query.all()
moderators = Moderator.query.all()
posts_to_display = Post.query.all()
post_banner = Post.query.join(Category).filter(Category.category_name == "BUSINESS").\
order_by(Post.date_posted.desc()).first()
last_post = Post.query.join(Category).filter(Category.category_name == "TUTORIELS").order_by(
Post.date_posted.desc()).first()
posts_for_cards = Post.query.filter_by(display_or_not=True).order_by(Post.date_posted.desc())[:4]
post_business = Post.query.join(Category).filter(Category.category_name == "BUSINESS").\
order_by(Post.date_posted.desc()).first()
post_formation = Post.query.join(Category).filter(Category.category_name == "FORMATIONS"). \
order_by(Post.date_posted.desc()).first()
post_tutoriel = Post.query.join(Category).filter(Category.category_name == "TUTORIELS"). \
order_by(Post.date_posted.desc()).first()
post_ressource = Post.query.join(Category).filter(Category.category_name == "RESSOURCES"). \
order_by(Post.date_posted.desc()).first()
image_posts = []
for post in posts_for_cards:
image = post.img_title
image_posts.append(image)
return render_template('blog/blog.html', title="Accueil - Joblogueur",
categories=categories, last_post=last_post,moderators=moderators,
images=image_posts, posts_to_display=posts_to_display,
post_banner=post_banner, post_business=post_business,
post_formation=post_formation, post_tutoriel=post_tutoriel, post_ressource=post_ressource)
# Display individual post
@bp.route('/publication/<post_title>', methods=['POST', 'GET'])
def post(post_title):
form = CommentForm()
titre = post_title.replace('-', ' ')
# Recherche la publication par son titre
post = Post.query.filter_by(title=titre).first()
moderators = Moderator.query.all()
# Recherche tous les commentaires liés à cette publication
comments_to_display = Comment.query.join(Post).filter(Comment.post_id == post.id).\
order_by(Comment.date_posted.desc()).all()
# Liste toutes les categories
categories = Category.query.all()
nbr_comments = 0
# Calcul le nbre de commentaires par publication
for comment in post.comments:
if comment.approved_or_not:
nbr_comments += 1
if form.validate_on_submit():
search_comments = Comment.query.filter_by(email_author=form.author_email.data).all()
ids = []
for comment in search_comments:
ids.append(comment.post_id)
if post.id in ids:
flash("Vous avez deja commenté cet article", "info")
# Création d'un commentaire
else:
new_comment = Comment(name_author=form.author.data, email_author=form.author_email.data,
content=form.content.data, post_id=post.id, approved_or_not=False)
db.session.add(new_comment)
db.session.commit()
form.author.data = ""
form.author_email.data = ""
form.content.data = ""
flash("Votre commentaire est en cours de validation", "success")
return render_template('blog/blog_post.html', title=titre + " | Joblogueur", post=post, form=form,
nbr_comments=int(nbr_comments), categories=categories, comments=comments_to_display,
titre=post_title)
form.author.data = ""
form.author_email.data = ""
form.content.data = ""
image_file = url_for('static', filename='upload/'+str(post.img_title))
return render_template("blog/blog_post.html", title=titre + " | Joblogueur", post=post, form=form,
nbr_comments=int(nbr_comments), categories=categories,
comments=comments_to_display, image=image_file, moderators=moderators,
titre=post_title)
@bp.route('/publications/<category_name>')
def post_per_category(category_name):
page = request.args.get('page', 1, type=int)
search_category = category_name.replace('-', ' ')
categories = Category.query.all()
posts = Post.query.join(Category).filter(Category.category_name == search_category).\
order_by(Post.date_posted.desc()).paginate(per_page=7, page=page)
image_posts = []
for post in posts.items:
image = post.img_title
image_posts.append(image)
return render_template("blog/posts_per_category.html", title=search_category + " | Joblogueur", posts=posts,
categories=categories, search_category=search_category, images=image_posts)
@bp.route('/newsletter-invitation', methods=['POST','GET'])
def newsletter_invitation():
categories = Category.query.all()
posts_per_category = []
for category in categories:
last_post = Post.query.join(Category).filter(Post.category_id == category.id).first()
posts_per_category.append(last_post)
if request.method == 'POST':
usermail = request.form['usermail']
content = """
Salut très cher(e),
Comment vas-tu ?
Il y'a du nouveau sur ton blog préféré www.digitalschools.sn/blog
Ci-dessous une liste des publications que tu as surement manqués:
1- https://3df5e7df0cdb.ngrok.io/blog/publication/10-raisons-pourquoi-toute-entreprise-doit-cr%C3%A9er-ou-avoir-un-site-Web
2- https://3df5e7df0cdb.ngrok.io/blog/publication/10-bonnes-raisons-d%27apprendre-%C3%A0-son-enfant-%C3%A0-coder
3- https://3df5e7df0cdb.ngrok.io/blog/publication/FLASK-1.0.0
Merci pour ton temps et ta perséverance dans la lecture quotidienne.
Youssouf BINYOUM (digitalschools.sn)
"""
msg = Message("Nouvelle publication sur digitalschools.sn/blog", recipients=[usermail],
sender='contact@digitalschools.sn')
msg.body = content
mail.send(msg)
print(request.args)
return redirect(url_for('blog.index'))
| true
| true
|
f708eb4171dc8694530ffc022c1cc83d3407d688
| 14,078
|
py
|
Python
|
comic_dl/honcho.py
|
PauuloG/comic-dl
|
6d8b70751b5ae3388f28264d5c1dd9d7fbfeda4b
|
[
"MIT"
] | null | null | null |
comic_dl/honcho.py
|
PauuloG/comic-dl
|
6d8b70751b5ae3388f28264d5c1dd9d7fbfeda4b
|
[
"MIT"
] | null | null | null |
comic_dl/honcho.py
|
PauuloG/comic-dl
|
6d8b70751b5ae3388f28264d5c1dd9d7fbfeda4b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import logging
from sites import foolSlide
from sites import readcomicOnlineto
from sites import comicNaver
from sites import mangaHere
from sites import rawSenManga
from sites import mangaFox
from sites import omgBeauPeep
from sites import mangaReader
from sites import mangaEden
from sites import acQQ
from sites import stripUtopia
from sites import readComicBooksOnline
from sites import readComicsWebsite
from sites import batoto
from sites import hqbr
from sites import comicextra
from sites import readComicsIO
from sites import japscan
from sites import manganelo
import globalFunctions
class Honcho(object):
def comic_language_resolver(self, language_code):
# Will return the Language Name corresponding to the language code.
language_dict = {
'0': 'English',
'1': 'Italian',
'2': 'Spanish',
'3': 'French',
'4': 'German',
'5': 'Portuguese',
'6': 'Turkish',
'7': 'Indonesian',
'8': 'Greek',
'9': 'Filipino',
'10': 'Polish',
'11': 'Thai',
'12': 'Malay',
'13 ': 'Hungarian',
'14': 'Romanian',
'15': ' Arabic',
'16': 'Hebrew',
'17': 'Russian',
'18': 'Vietnamese',
'19': 'Dutch',
'20': 'Bengali',
'21': 'Persian',
'22': 'Czech',
'23': 'Brazilian',
'24': 'Bulgarian',
'25': 'Danish',
'26': 'Esperanto',
'27': 'Swedish',
'28': 'Lithuanian',
'29': 'Other'
}
return language_dict[language_code]
def checker(self, comic_url, download_directory, chapter_range, **kwargs):
user_name = kwargs.get("username")
password = kwargs.get("password")
current_directory = kwargs.get("current_directory")
log_flag = kwargs.get("logger")
sorting = kwargs.get("sorting_order")
comic_language = kwargs.get("comic_language")
print_index = kwargs.get("print_index")
if log_flag is True:
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG)
logging.debug("Comic Url : %s" % comic_url)
domain = urlparse(comic_url).netloc
logging.debug("Selected Domain : %s" % domain)
# Remove the "/" from ending to make checking URL for Full Series or Single Chapter easier.
if comic_url[-1] == "/":
comic_url = comic_url[:-1]
if domain in ["yomanga.co", "gomanga.co"]:
foolSlide.FoolSlide(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"))
return 0
elif domain in ["www.readcomiconline.to", "readcomiconline.to"]:
readcomicOnlineto.ReadComicOnlineTo(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
image_quality=kwargs.get("image_quality"),
print_index=print_index)
return 0
elif domain in ["www.comic.naver.com", "comic.naver.com"]:
comicNaver.ComicNaver(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangahere.co", "mangahere.co", "www.mangahere.cc", "mangahere.cc"]:
mangaHere.MangaHere(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.raw.senmanga.com", "raw.senmanga.com"]:
rawSenManga.RawSenaManga(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangafox.me", "mangafox.me", "www.mangafox.la", "mangafox.la", "www.fanfox.net",
"fanfox.net"]:
mangaFox.MangaFox(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.omgbeaupeep.com", "omgbeaupeep.com", "www.otakusmash.com", "otakusmash.com"]:
omgBeauPeep.OmgBeauPeep(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
# TODO KO --print-index -i http://ac.qq.com/Comic/comicInfo/id/547059?trace_id=907_27.156.162.231_1539265645 broken?
elif domain in ["www.ac.qq.com", "ac.qq.com"]:
acQQ.AcQq(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range,
print_index=print_index)
return 0
elif domain in ["www.striputopija.blogspot.in", "striputopija.blogspot.in", "www.striputopija.blogspot.com",
"striputopija.blogspot.com"]:
stripUtopia.StripUtopia(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range,
print_index=print_index)
return 0
elif domain in ["www.mangareader.net", "mangareader.net"]:
mangaReader.MangaReader(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.readcomicbooksonline.net", "readcomicbooksonline.net", "www.readcomicbooksonline.org",
"readcomicbooksonline.org"]:
readComicBooksOnline.ReadComicBooksOnline(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
# TODO KO seems broken
elif domain in ["www.readcomics.website", "readcomics.website"]:
readComicsWebsite.ReadComicsWebsite(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.japscan.to"]:
japscan.Japscan(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.hqbr.com.br", "hqbr.com.br"]:
hqbr.Hqbr(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.comicextra.com", "comicextra.com"]:
comicextra.ComicExtra(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
# TODO KO seems broken
elif domain in ["www.readcomics.io", "readcomics.io"]:
readComicsIO.ReadComicsIO(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.kissmanga.com", "kissmanga.com"]:
# kissManga.KissManga(manga_url = comic_url, logger = logging,
# current_directory = current_directory, sorting_order = sorting)
print("Under Development!")
return 0
elif domain in ["www.bato.to", "bato.to"]:
batoto.Batoto(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"), username=user_name, password=password,
comic_language=self.comic_language_resolver(comic_language),
print_index=print_index)
return 0
elif domain in ["manganelo.com", "mangakakalot.com"]:
manganelo.Manganelo(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangaeden.com"]:
if print_index:
print("please use -find and -cid instead!")
return -1
mangaEden.MangaEden(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"))
return 0
else:
print("%s is not supported at the moment. You can request it on the Github repository." % domain)
| 58.903766
| 126
| 0.573022
|
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import logging
from sites import foolSlide
from sites import readcomicOnlineto
from sites import comicNaver
from sites import mangaHere
from sites import rawSenManga
from sites import mangaFox
from sites import omgBeauPeep
from sites import mangaReader
from sites import mangaEden
from sites import acQQ
from sites import stripUtopia
from sites import readComicBooksOnline
from sites import readComicsWebsite
from sites import batoto
from sites import hqbr
from sites import comicextra
from sites import readComicsIO
from sites import japscan
from sites import manganelo
import globalFunctions
class Honcho(object):
def comic_language_resolver(self, language_code):
language_dict = {
'0': 'English',
'1': 'Italian',
'2': 'Spanish',
'3': 'French',
'4': 'German',
'5': 'Portuguese',
'6': 'Turkish',
'7': 'Indonesian',
'8': 'Greek',
'9': 'Filipino',
'10': 'Polish',
'11': 'Thai',
'12': 'Malay',
'13 ': 'Hungarian',
'14': 'Romanian',
'15': ' Arabic',
'16': 'Hebrew',
'17': 'Russian',
'18': 'Vietnamese',
'19': 'Dutch',
'20': 'Bengali',
'21': 'Persian',
'22': 'Czech',
'23': 'Brazilian',
'24': 'Bulgarian',
'25': 'Danish',
'26': 'Esperanto',
'27': 'Swedish',
'28': 'Lithuanian',
'29': 'Other'
}
return language_dict[language_code]
def checker(self, comic_url, download_directory, chapter_range, **kwargs):
user_name = kwargs.get("username")
password = kwargs.get("password")
current_directory = kwargs.get("current_directory")
log_flag = kwargs.get("logger")
sorting = kwargs.get("sorting_order")
comic_language = kwargs.get("comic_language")
print_index = kwargs.get("print_index")
if log_flag is True:
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG)
logging.debug("Comic Url : %s" % comic_url)
domain = urlparse(comic_url).netloc
logging.debug("Selected Domain : %s" % domain)
if comic_url[-1] == "/":
comic_url = comic_url[:-1]
if domain in ["yomanga.co", "gomanga.co"]:
foolSlide.FoolSlide(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"))
return 0
elif domain in ["www.readcomiconline.to", "readcomiconline.to"]:
readcomicOnlineto.ReadComicOnlineTo(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
image_quality=kwargs.get("image_quality"),
print_index=print_index)
return 0
elif domain in ["www.comic.naver.com", "comic.naver.com"]:
comicNaver.ComicNaver(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangahere.co", "mangahere.co", "www.mangahere.cc", "mangahere.cc"]:
mangaHere.MangaHere(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.raw.senmanga.com", "raw.senmanga.com"]:
rawSenManga.RawSenaManga(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangafox.me", "mangafox.me", "www.mangafox.la", "mangafox.la", "www.fanfox.net",
"fanfox.net"]:
mangaFox.MangaFox(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.omgbeaupeep.com", "omgbeaupeep.com", "www.otakusmash.com", "otakusmash.com"]:
omgBeauPeep.OmgBeauPeep(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.ac.qq.com", "ac.qq.com"]:
acQQ.AcQq(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range,
print_index=print_index)
return 0
elif domain in ["www.striputopija.blogspot.in", "striputopija.blogspot.in", "www.striputopija.blogspot.com",
"striputopija.blogspot.com"]:
stripUtopia.StripUtopia(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range,
print_index=print_index)
return 0
elif domain in ["www.mangareader.net", "mangareader.net"]:
mangaReader.MangaReader(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.readcomicbooksonline.net", "readcomicbooksonline.net", "www.readcomicbooksonline.org",
"readcomicbooksonline.org"]:
readComicBooksOnline.ReadComicBooksOnline(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.readcomics.website", "readcomics.website"]:
readComicsWebsite.ReadComicsWebsite(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.japscan.to"]:
japscan.Japscan(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.hqbr.com.br", "hqbr.com.br"]:
hqbr.Hqbr(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.comicextra.com", "comicextra.com"]:
comicextra.ComicExtra(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.readcomics.io", "readcomics.io"]:
readComicsIO.ReadComicsIO(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.kissmanga.com", "kissmanga.com"]:
print("Under Development!")
return 0
elif domain in ["www.bato.to", "bato.to"]:
batoto.Batoto(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"), username=user_name, password=password,
comic_language=self.comic_language_resolver(comic_language),
print_index=print_index)
return 0
elif domain in ["manganelo.com", "mangakakalot.com"]:
manganelo.Manganelo(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangaeden.com"]:
if print_index:
print("please use -find and -cid instead!")
return -1
mangaEden.MangaEden(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"))
return 0
else:
print("%s is not supported at the moment. You can request it on the Github repository." % domain)
| true
| true
|
f708ebb2ee0deed992fca9ca8fb815c1304e74c9
| 1,065
|
py
|
Python
|
colossus/apps/subscribers/urls.py
|
Beracah-Group/colossus
|
7bce25039a223da7197cc8a969ec72ee26aeffa8
|
[
"MIT"
] | 2
|
2018-08-14T14:06:54.000Z
|
2018-09-10T16:57:18.000Z
|
colossus/apps/subscribers/urls.py
|
Beracah-Group/colossus
|
7bce25039a223da7197cc8a969ec72ee26aeffa8
|
[
"MIT"
] | null | null | null |
colossus/apps/subscribers/urls.py
|
Beracah-Group/colossus
|
7bce25039a223da7197cc8a969ec72ee26aeffa8
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'subscribers'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('manage/', views.manage, name='manage'),
path('goodbye/<uuid:mailing_list_uuid>/', views.goodbye, name='goodbye'),
path('subscribe/<uuid:mailing_list_uuid>/', views.subscribe, name='subscribe'),
path('subscribe/<uuid:mailing_list_uuid>/confirm/', views.confirm_subscription, name='confirm_subscription'),
path('subscribe/<uuid:mailing_list_uuid>/confirm/<str:token>/', views.confirm_double_optin_token, name='confirm_double_optin_token'), # noqa
path('unsubscribe/<uuid:mailing_list_uuid>/', views.unsubscribe_manual, name='unsubscribe_manual'),
path('unsubscribe/<uuid:mailing_list_uuid>/<uuid:subscriber_uuid>/<uuid:campaign_uuid>/', views.unsubscribe, name='unsubscribe'), # noqa
path('track/open/<uuid:email_uuid>/<uuid:subscriber_uuid>/', views.track_open, name='open'),
path('track/click/<uuid:link_uuid>/<uuid:subscriber_uuid>/', views.track_click, name='click'),
]
| 53.25
| 145
| 0.734272
|
from django.urls import path
from . import views
app_name = 'subscribers'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('manage/', views.manage, name='manage'),
path('goodbye/<uuid:mailing_list_uuid>/', views.goodbye, name='goodbye'),
path('subscribe/<uuid:mailing_list_uuid>/', views.subscribe, name='subscribe'),
path('subscribe/<uuid:mailing_list_uuid>/confirm/', views.confirm_subscription, name='confirm_subscription'),
path('subscribe/<uuid:mailing_list_uuid>/confirm/<str:token>/', views.confirm_double_optin_token, name='confirm_double_optin_token'), path('unsubscribe/<uuid:mailing_list_uuid>/', views.unsubscribe_manual, name='unsubscribe_manual'),
path('unsubscribe/<uuid:mailing_list_uuid>/<uuid:subscriber_uuid>/<uuid:campaign_uuid>/', views.unsubscribe, name='unsubscribe'), path('track/open/<uuid:email_uuid>/<uuid:subscriber_uuid>/', views.track_open, name='open'),
path('track/click/<uuid:link_uuid>/<uuid:subscriber_uuid>/', views.track_click, name='click'),
]
| true
| true
|
f708ec0d4bcf6e7a2b03661fc934f58647505014
| 7,295
|
py
|
Python
|
Chapter03/03_atari_gan.py
|
Yelloooowww/Deep-Reinforcement-Learning-Hands-On
|
d1a3a1272d7ceff8796fe412deb4e4d5bd6665a5
|
[
"MIT"
] | null | null | null |
Chapter03/03_atari_gan.py
|
Yelloooowww/Deep-Reinforcement-Learning-Hands-On
|
d1a3a1272d7ceff8796fe412deb4e4d5bd6665a5
|
[
"MIT"
] | null | null | null |
Chapter03/03_atari_gan.py
|
Yelloooowww/Deep-Reinforcement-Learning-Hands-On
|
d1a3a1272d7ceff8796fe412deb4e4d5bd6665a5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import random
import argparse
import cv2
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
import torchvision.utils as vutils
import gym
import gym.spaces
import numpy as np
log = gym.logger
log.set_level(gym.logger.INFO)
LATENT_VECTOR_SIZE = 100
DISCR_FILTERS = 64
GENER_FILTERS = 64
BATCH_SIZE = 16
# dimension input image will be rescaled
IMAGE_SIZE = 64
LEARNING_RATE = 0.0001
REPORT_EVERY_ITER = 25
SAVE_IMAGE_EVERY_ITER = 1000
class InputWrapper(gym.ObservationWrapper):
"""
Preprocessing of input numpy array:
1. resize image into predefined size
2. move color channel axis to a first place
"""
def __init__(self, *args):
super(InputWrapper, self).__init__(*args)
assert isinstance(self.observation_space, gym.spaces.Box)
old_space = self.observation_space
self.observation_space = gym.spaces.Box(self.observation(old_space.low), self.observation(old_space.high),
dtype=np.float32)
def observation(self, observation):
# resize image
new_obs = cv2.resize(observation, (IMAGE_SIZE, IMAGE_SIZE))
# transform (210, 160, 3) -> (3, 210, 160)
new_obs = np.moveaxis(new_obs, 2, 0)
return new_obs.astype(np.float32)
class Discriminator(nn.Module):
def __init__(self, input_shape):
super(Discriminator, self).__init__()
# this pipe converges image into the single number
self.conv_pipe = nn.Sequential(
nn.Conv2d(in_channels=input_shape[0], out_channels=DISCR_FILTERS,
kernel_size=4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS, out_channels=DISCR_FILTERS*2,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS*2),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 2, out_channels=DISCR_FILTERS * 4,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS * 4),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 4, out_channels=DISCR_FILTERS * 8,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS * 8),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 8, out_channels=1,
kernel_size=4, stride=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
conv_out = self.conv_pipe(x)
return conv_out.view(-1, 1).squeeze(dim=1)
class Generator(nn.Module):
def __init__(self, output_shape):
super(Generator, self).__init__()
# pipe deconvolves input vector into (3, 64, 64) image
self.pipe = nn.Sequential(
nn.ConvTranspose2d(in_channels=LATENT_VECTOR_SIZE, out_channels=GENER_FILTERS * 8,
kernel_size=4, stride=1, padding=0),
nn.BatchNorm2d(GENER_FILTERS * 8),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 8, out_channels=GENER_FILTERS * 4,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS * 4),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 4, out_channels=GENER_FILTERS * 2,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS * 2),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 2, out_channels=GENER_FILTERS,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS, out_channels=output_shape[0],
kernel_size=4, stride=2, padding=1),
nn.Tanh()
)
def forward(self, x):
return self.pipe(x)
def iterate_batches(envs, batch_size=BATCH_SIZE):
batch = [e.reset() for e in envs]
env_gen = iter(lambda: random.choice(envs), None)
while True:
e = next(env_gen)
obs, reward, is_done, _ = e.step(e.action_space.sample())
if np.mean(obs) > 0.01:
batch.append(obs)
if len(batch) == batch_size:
# Normalising input between -1 to 1
batch_np = np.array(batch, dtype=np.float32) * 2.0 / 255.0 - 1.0
yield torch.tensor(batch_np)
batch.clear()
if is_done:
e.reset()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument("--cuda", default=False, action='store_true', help="Enable cuda computation")
parser.add_argument("--cuda", default=True, action='store_true', help="Enable cuda computation")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
envs = [InputWrapper(gym.make(name)) for name in ('Breakout-v0', 'AirRaid-v0', 'Pong-v0')]
input_shape = envs[0].observation_space.shape
net_discr = Discriminator(input_shape=input_shape).to(device)
net_gener = Generator(output_shape=input_shape).to(device)
objective = nn.BCELoss()
gen_optimizer = optim.Adam(params=net_gener.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
dis_optimizer = optim.Adam(params=net_discr.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
writer = SummaryWriter()
gen_losses = []
dis_losses = []
iter_no = 0
true_labels_v = torch.ones(BATCH_SIZE, dtype=torch.float32, device=device)
fake_labels_v = torch.zeros(BATCH_SIZE, dtype=torch.float32, device=device)
for batch_v in iterate_batches(envs):
# generate extra fake samples, input is 4D: batch, filters, x, y
gen_input_v = torch.FloatTensor(BATCH_SIZE, LATENT_VECTOR_SIZE, 1, 1).normal_(0, 1).to(device)
batch_v = batch_v.to(device)
gen_output_v = net_gener(gen_input_v)
# train discriminator
dis_optimizer.zero_grad()
dis_output_true_v = net_discr(batch_v)
dis_output_fake_v = net_discr(gen_output_v.detach())
dis_loss = objective(dis_output_true_v, true_labels_v) + objective(dis_output_fake_v, fake_labels_v)
dis_loss.backward()
dis_optimizer.step()
dis_losses.append(dis_loss.item())
# train generator
gen_optimizer.zero_grad()
dis_output_v = net_discr(gen_output_v)
gen_loss_v = objective(dis_output_v, true_labels_v)
gen_loss_v.backward()
gen_optimizer.step()
gen_losses.append(gen_loss_v.item())
iter_no += 1
if iter_no % REPORT_EVERY_ITER == 0:
log.info("Iter %d: gen_loss=%.3e, dis_loss=%.3e", iter_no, np.mean(gen_losses), np.mean(dis_losses))
writer.add_scalar("gen_loss", np.mean(gen_losses), iter_no)
writer.add_scalar("dis_loss", np.mean(dis_losses), iter_no)
gen_losses = []
dis_losses = []
if iter_no % SAVE_IMAGE_EVERY_ITER == 0:
writer.add_image("fake", vutils.make_grid(gen_output_v.data[:64], normalize=True), iter_no)
writer.add_image("real", vutils.make_grid(batch_v.data[:64], normalize=True), iter_no)
| 38.193717
| 114
| 0.632625
|
import random
import argparse
import cv2
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
import torchvision.utils as vutils
import gym
import gym.spaces
import numpy as np
log = gym.logger
log.set_level(gym.logger.INFO)
LATENT_VECTOR_SIZE = 100
DISCR_FILTERS = 64
GENER_FILTERS = 64
BATCH_SIZE = 16
IMAGE_SIZE = 64
LEARNING_RATE = 0.0001
REPORT_EVERY_ITER = 25
SAVE_IMAGE_EVERY_ITER = 1000
class InputWrapper(gym.ObservationWrapper):
def __init__(self, *args):
super(InputWrapper, self).__init__(*args)
assert isinstance(self.observation_space, gym.spaces.Box)
old_space = self.observation_space
self.observation_space = gym.spaces.Box(self.observation(old_space.low), self.observation(old_space.high),
dtype=np.float32)
def observation(self, observation):
new_obs = cv2.resize(observation, (IMAGE_SIZE, IMAGE_SIZE))
new_obs = np.moveaxis(new_obs, 2, 0)
return new_obs.astype(np.float32)
class Discriminator(nn.Module):
def __init__(self, input_shape):
super(Discriminator, self).__init__()
self.conv_pipe = nn.Sequential(
nn.Conv2d(in_channels=input_shape[0], out_channels=DISCR_FILTERS,
kernel_size=4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS, out_channels=DISCR_FILTERS*2,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS*2),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 2, out_channels=DISCR_FILTERS * 4,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS * 4),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 4, out_channels=DISCR_FILTERS * 8,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS * 8),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 8, out_channels=1,
kernel_size=4, stride=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
conv_out = self.conv_pipe(x)
return conv_out.view(-1, 1).squeeze(dim=1)
class Generator(nn.Module):
def __init__(self, output_shape):
super(Generator, self).__init__()
self.pipe = nn.Sequential(
nn.ConvTranspose2d(in_channels=LATENT_VECTOR_SIZE, out_channels=GENER_FILTERS * 8,
kernel_size=4, stride=1, padding=0),
nn.BatchNorm2d(GENER_FILTERS * 8),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 8, out_channels=GENER_FILTERS * 4,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS * 4),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 4, out_channels=GENER_FILTERS * 2,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS * 2),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 2, out_channels=GENER_FILTERS,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS, out_channels=output_shape[0],
kernel_size=4, stride=2, padding=1),
nn.Tanh()
)
def forward(self, x):
return self.pipe(x)
def iterate_batches(envs, batch_size=BATCH_SIZE):
batch = [e.reset() for e in envs]
env_gen = iter(lambda: random.choice(envs), None)
while True:
e = next(env_gen)
obs, reward, is_done, _ = e.step(e.action_space.sample())
if np.mean(obs) > 0.01:
batch.append(obs)
if len(batch) == batch_size:
batch_np = np.array(batch, dtype=np.float32) * 2.0 / 255.0 - 1.0
yield torch.tensor(batch_np)
batch.clear()
if is_done:
e.reset()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=True, action='store_true', help="Enable cuda computation")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
envs = [InputWrapper(gym.make(name)) for name in ('Breakout-v0', 'AirRaid-v0', 'Pong-v0')]
input_shape = envs[0].observation_space.shape
net_discr = Discriminator(input_shape=input_shape).to(device)
net_gener = Generator(output_shape=input_shape).to(device)
objective = nn.BCELoss()
gen_optimizer = optim.Adam(params=net_gener.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
dis_optimizer = optim.Adam(params=net_discr.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
writer = SummaryWriter()
gen_losses = []
dis_losses = []
iter_no = 0
true_labels_v = torch.ones(BATCH_SIZE, dtype=torch.float32, device=device)
fake_labels_v = torch.zeros(BATCH_SIZE, dtype=torch.float32, device=device)
for batch_v in iterate_batches(envs):
gen_input_v = torch.FloatTensor(BATCH_SIZE, LATENT_VECTOR_SIZE, 1, 1).normal_(0, 1).to(device)
batch_v = batch_v.to(device)
gen_output_v = net_gener(gen_input_v)
dis_optimizer.zero_grad()
dis_output_true_v = net_discr(batch_v)
dis_output_fake_v = net_discr(gen_output_v.detach())
dis_loss = objective(dis_output_true_v, true_labels_v) + objective(dis_output_fake_v, fake_labels_v)
dis_loss.backward()
dis_optimizer.step()
dis_losses.append(dis_loss.item())
gen_optimizer.zero_grad()
dis_output_v = net_discr(gen_output_v)
gen_loss_v = objective(dis_output_v, true_labels_v)
gen_loss_v.backward()
gen_optimizer.step()
gen_losses.append(gen_loss_v.item())
iter_no += 1
if iter_no % REPORT_EVERY_ITER == 0:
log.info("Iter %d: gen_loss=%.3e, dis_loss=%.3e", iter_no, np.mean(gen_losses), np.mean(dis_losses))
writer.add_scalar("gen_loss", np.mean(gen_losses), iter_no)
writer.add_scalar("dis_loss", np.mean(dis_losses), iter_no)
gen_losses = []
dis_losses = []
if iter_no % SAVE_IMAGE_EVERY_ITER == 0:
writer.add_image("fake", vutils.make_grid(gen_output_v.data[:64], normalize=True), iter_no)
writer.add_image("real", vutils.make_grid(batch_v.data[:64], normalize=True), iter_no)
| true
| true
|
f708ec7e3e680cdaf80ef8e90b01a7804d08b581
| 191
|
py
|
Python
|
configs/gfl/gfl_r50_fpn_2x_coco.py
|
ruiningTang/mmdetection
|
100b0b5e0edddc45af0812b9f1474493c61671ef
|
[
"Apache-2.0"
] | null | null | null |
configs/gfl/gfl_r50_fpn_2x_coco.py
|
ruiningTang/mmdetection
|
100b0b5e0edddc45af0812b9f1474493c61671ef
|
[
"Apache-2.0"
] | null | null | null |
configs/gfl/gfl_r50_fpn_2x_coco.py
|
ruiningTang/mmdetection
|
100b0b5e0edddc45af0812b9f1474493c61671ef
|
[
"Apache-2.0"
] | null | null | null |
_base_ = './gfl_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
work_dir = 'work_dirs/coco/gfl/gfl_r50_fpn_2x_coco'
| 38.2
| 53
| 0.769634
|
_base_ = './gfl_r50_fpn_1x_coco.py'
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
work_dir = 'work_dirs/coco/gfl/gfl_r50_fpn_2x_coco'
| true
| true
|
f708ecc2659e7dad9f641b77c908e306e5f808bc
| 3,029
|
py
|
Python
|
watcher_dashboard/utils/utils.py
|
openstack/watcher-dashboard
|
146e547da934c2464ec5f49326eabed0eecfda96
|
[
"Apache-2.0"
] | 15
|
2016-02-12T07:33:42.000Z
|
2019-01-28T22:13:27.000Z
|
watcher_dashboard/utils/utils.py
|
openstack/watcher-dashboard
|
146e547da934c2464ec5f49326eabed0eecfda96
|
[
"Apache-2.0"
] | null | null | null |
watcher_dashboard/utils/utils.py
|
openstack/watcher-dashboard
|
146e547da934c2464ec5f49326eabed0eecfda96
|
[
"Apache-2.0"
] | 2
|
2017-08-11T02:25:37.000Z
|
2017-10-10T09:59:40.000Z
|
# -*- coding: utf8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
CAMEL_RE = re.compile(r'([A-Z][a-z]+|[A-Z]+(?=[A-Z\s]|$))')
def de_camel_case(text):
"""Convert CamelCase names to human-readable format."""
return ' '.join(w.strip() for w in CAMEL_RE.split(text) if w.strip())
def list_to_dict(object_list, key_attribute='id'):
"""Converts an object list to a dict
:param object_list: list of objects to be put into a dict
:type object_list: list
:param key_attribute: object attribute used as index by dict
:type key_attribute: str
:return: dict containing the objects in the list
:rtype: dict
"""
return dict((getattr(o, key_attribute), o) for o in object_list)
def length(iterator):
"""A length function for iterators
Returns the number of items in the specified iterator. Note that this
function consumes the iterator in the process.
"""
return sum(1 for _item in iterator)
def check_image_type(image, image_type):
"""Check if image 'type' property matches passed-in image_type.
If image has no 'type' property' return True, as we cannot
be sure what type of image it is.
"""
return (image.properties.get('type', image_type) == image_type)
def filter_items(items, **kwargs):
"""Filters the list of items and returns the filtered list.
Example usage:
>>> class Item(object):
... def __init__(self, index):
... self.index = index
... def __repr__(self):
... return '<Item index=%d>' % self.index
>>> items = [Item(i) for i in range(7)]
>>> list(filter_items(items, index=1))
[<Item index=1>]
>>> list(filter_items(items, index__in=(1, 2, 3)))
[<Item index=1>, <Item index=2>, <Item index=3>]
>>> list(filter_items(items, index__not_in=(1, 2, 3)))
[<Item index=0>, <Item index=4>, <Item index=5>, <Item index=6>]
"""
for item in items:
for name, value in kwargs.items():
if name.endswith('__in'):
if getattr(item, name[:-len('__in')]) not in value:
break
elif name.endswith('__not_in'):
if getattr(item, name[:-len('__not_in')]) in value:
break
else:
if getattr(item, name) != value:
break
else:
yield item
def safe_int_cast(value):
try:
return int(value)
except (TypeError, ValueError):
return 0
| 31.884211
| 78
| 0.621657
|
import re
CAMEL_RE = re.compile(r'([A-Z][a-z]+|[A-Z]+(?=[A-Z\s]|$))')
def de_camel_case(text):
return ' '.join(w.strip() for w in CAMEL_RE.split(text) if w.strip())
def list_to_dict(object_list, key_attribute='id'):
return dict((getattr(o, key_attribute), o) for o in object_list)
def length(iterator):
return sum(1 for _item in iterator)
def check_image_type(image, image_type):
return (image.properties.get('type', image_type) == image_type)
def filter_items(items, **kwargs):
for item in items:
for name, value in kwargs.items():
if name.endswith('__in'):
if getattr(item, name[:-len('__in')]) not in value:
break
elif name.endswith('__not_in'):
if getattr(item, name[:-len('__not_in')]) in value:
break
else:
if getattr(item, name) != value:
break
else:
yield item
def safe_int_cast(value):
try:
return int(value)
except (TypeError, ValueError):
return 0
| true
| true
|
f708ecc7d0e54f2284d531ef1dd0316ffeddf396
| 6,204
|
py
|
Python
|
CAGG-NAS/tools/nn/nn_visualise.py
|
csjtx1021/CAGG
|
67fde2f1488ee6e2ff137e87860b5243c5b5fe7c
|
[
"MIT"
] | 7
|
2020-09-05T01:50:06.000Z
|
2021-09-29T13:33:35.000Z
|
CAGG-NAS/tools/nn/nn_visualise.py
|
csjtx1021/CAND
|
67fde2f1488ee6e2ff137e87860b5243c5b5fe7c
|
[
"MIT"
] | null | null | null |
CAGG-NAS/tools/nn/nn_visualise.py
|
csjtx1021/CAND
|
67fde2f1488ee6e2ff137e87860b5243c5b5fe7c
|
[
"MIT"
] | 1
|
2021-12-07T03:16:24.000Z
|
2021-12-07T03:16:24.000Z
|
"""
Harness for visualising a neural network.
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=invalid-name
import functools
import graphviz as gv
import os
import networkx as nx
import numpy as np
# Parameters for plotting
_SAVE_FORMAT = 'eps'
# _SAVE_FORMAT = 'png'
_LAYER_SHAPE = 'rectangle'
_IPOP_SHAPE = 'circle'
_LAYER_FONT = 'DejaVuSans'
_IPOP_FONT = 'Helvetica'
_LAYER_FONTSIZE = '16'
_FILLCOLOR = 'transparent'
_IPOP_FONTSIZE = '12'
_IPOP_FILLCOLOR = '#ffc0cb'
_DECISION_FILLCOLOR = '#98fb98'
_GRAPH_STYLES = {
'graph': {
'fontsize': _LAYER_FONTSIZE,
'rankdir': 'TB',
'label': None,
},
'nodes': {
},
'edges': {
'arrowhead': 'open',
'fontsize': '12',
}
}
GV_GRAPH = functools.partial(gv.Graph, format=_SAVE_FORMAT)
GV_DIGRAPH = functools.partial(gv.Digraph, format=_SAVE_FORMAT)
# Utilities for adding nodes, edges and styles -------------------------------------------
def add_nodes(graph, nodes):
""" Adds nodes to the graph. """
for n in nodes:
if isinstance(n, tuple):
graph.node(n[0], **n[1])
else:
graph.node(n)
return graph
def add_edges(graph, edges):
""" Adds edges to the graph. """
# pylint: disable=star-args
for e in edges:
if isinstance(e[0], tuple):
graph.edge(*e[0], **e[1])
else:
graph.edge(*e)
return graph
def apply_styles(graph, styles):
""" Applies styles to the graph. """
graph.graph_attr.update(
('graph' in styles and styles['graph']) or {}
)
graph.node_attr.update(
('nodes' in styles and styles['nodes']) or {}
)
graph.edge_attr.update(
('edges' in styles and styles['edges']) or {}
)
return graph
# Wrappers for tedious routines ----------------------------------------------------------
def _get_ip_layer(layer_idx):
""" Returns a tuple representing the input layer. """
return (str(layer_idx), {'label': 'i/p', 'shape': 'circle', 'style': 'filled',
'fillcolor': _IPOP_FILLCOLOR, 'fontsize': _IPOP_FONTSIZE,
'fontname': _IPOP_FONT})
def _get_op_layer(layer_idx):
""" Returns a tuple representing the output layer. """
return (str(layer_idx), {'label': 'o/p', 'shape': 'circle', 'style': 'filled',
'fillcolor': _IPOP_FILLCOLOR, 'fontsize': _IPOP_FONTSIZE,
'fontname': _IPOP_FONT})
def _get_layer(layer_idx, nn, for_pres):
""" Returns a tuple representing the layer label. """
if nn.layer_labels[layer_idx] in ['ip', 'op']:
fill_colour = _IPOP_FILLCOLOR
elif nn.layer_labels[layer_idx] in ['softmax', 'linear']:
fill_colour = _DECISION_FILLCOLOR
else:
fill_colour = _FILLCOLOR
label = nn.get_layer_descr(layer_idx, for_pres)
return (str(layer_idx), {'label': label, 'shape': 'rectangle', 'fillcolor': fill_colour,
'style': 'filled', 'fontname': _LAYER_FONT}),((layer_idx), nn.layer_labels[layer_idx],(nn.num_units_in_each_layer[layer_idx]))
def _get_edge(layer_idx_start, layer_idx_end):
""" Returns a tuple which is an edge. """
return (str(layer_idx_start), str(layer_idx_end))
def _get_edges(conn_mat):
""" Returns all edges. """
starts, ends = conn_mat.nonzero()
return [_get_edge(starts[i], ends[i]) for i in range(len(starts))]
# Main API ------------------------------------------------------------------------------
def visualise_nn(nn, save_file_prefix, fig_label=None, for_pres=True):
""" The main API which will be used to visualise the network. """
# First create nodes in the order
nodes = [_get_layer(i, nn, for_pres)[0] for i in range(nn.num_layers)]
nodes_my = [_get_layer(i, nn, for_pres)[1] for i in range(nn.num_layers)]
#print("nodes_my=",nodes_my)
edges = _get_edges(nn.conn_mat)
edges_my = [(int(s),int(t)) for s,t in edges]
#print("edges_my=",edges_my)
nn_graph = GV_DIGRAPH()
add_nodes(nn_graph, nodes)
add_edges(nn_graph, edges)
graph_styles = _GRAPH_STYLES
graph_styles['graph']['label'] = fig_label
apply_styles(nn_graph, graph_styles)
nn_graph.render(save_file_prefix)
if os.path.exists(save_file_prefix):
# graphviz also creates another file in the name of the prefix. delete it.
os.remove(save_file_prefix)
return tonxgraph(nodes_my,edges_my)
NODE_TYPES = ['ip', 'op', 'linear']
hidden_list = [8,16,32,64,128,256,512,1024]
for i in hidden_list:
NODE_TYPES.append("relu-%s"%i)
NODE_TYPES.append("crelu-%s"%i)
NODE_TYPES.append("leaky-relu-%s"%i)
NODE_TYPES.append("softplus-%s"%i)
NODE_TYPES.append("elu-%s"%i)
NODE_TYPES.append("logistic-%s"%i)
NODE_TYPES.append("tanh-%s"%i)
def tonxgraph(nodes_my,edges_my):
g = {"x":[],"edge_index":[],"edge_attr":[]}
for n_idx, type, num_hidden in nodes_my:
n_idx = int(n_idx)
if type=='ip' or type=='op' or type=='linear':
g["x"].append(np.eye(len(NODE_TYPES))[NODE_TYPES.index(type)])
else:
num_hidden = np.random.choice(hidden_list)
g["x"].append(np.eye(len(NODE_TYPES))[NODE_TYPES.index("%s-%s"%(type,num_hidden))])
row = []
col = []
for s, t in edges_my:
row.append(s)
col.append(t)
g["edge_attr"].append(np.ones(1))
g["edge_index"].append(row)
g["edge_index"].append(col)
g["x"]=np.array(g["x"])
g["edge_attr"]=np.array(g["edge_attr"])
print("+",g["x"].shape)
assert g["x"].shape[0] <= 20
return g
#g_nx = nx.nx_agraph.from_agraph(nn_graph)
#A = nx.nx_agraph.to_agraph(g_nx) # convert to a graphviz graph
#A.layout() # neato layout
#A.draw("a.ps")
def visualise_list_of_nns(list_of_nns, save_dir, fig_labels=None, fig_file_names=None,
for_pres=False):
""" Visualises a list of neural networks. """
g_list = []
if fig_labels is None:
fig_labels = [None] * len(list_of_nns)
if fig_file_names is None:
fig_file_names = [str(idx) for idx in range(len(list_of_nns))]
for idx, nn in enumerate(list_of_nns):
save_file_prefix = os.path.join(save_dir, fig_file_names[idx])
g = visualise_nn(nn, save_file_prefix, fig_labels[idx], for_pres)
g_list.append(g)
return g_list
| 31.175879
| 153
| 0.629433
|
import functools
import graphviz as gv
import os
import networkx as nx
import numpy as np
_SAVE_FORMAT = 'eps'
_LAYER_SHAPE = 'rectangle'
_IPOP_SHAPE = 'circle'
_LAYER_FONT = 'DejaVuSans'
_IPOP_FONT = 'Helvetica'
_LAYER_FONTSIZE = '16'
_FILLCOLOR = 'transparent'
_IPOP_FONTSIZE = '12'
_IPOP_FILLCOLOR = '#ffc0cb'
_DECISION_FILLCOLOR = '#98fb98'
_GRAPH_STYLES = {
'graph': {
'fontsize': _LAYER_FONTSIZE,
'rankdir': 'TB',
'label': None,
},
'nodes': {
},
'edges': {
'arrowhead': 'open',
'fontsize': '12',
}
}
GV_GRAPH = functools.partial(gv.Graph, format=_SAVE_FORMAT)
GV_DIGRAPH = functools.partial(gv.Digraph, format=_SAVE_FORMAT)
def add_nodes(graph, nodes):
for n in nodes:
if isinstance(n, tuple):
graph.node(n[0], **n[1])
else:
graph.node(n)
return graph
def add_edges(graph, edges):
for e in edges:
if isinstance(e[0], tuple):
graph.edge(*e[0], **e[1])
else:
graph.edge(*e)
return graph
def apply_styles(graph, styles):
graph.graph_attr.update(
('graph' in styles and styles['graph']) or {}
)
graph.node_attr.update(
('nodes' in styles and styles['nodes']) or {}
)
graph.edge_attr.update(
('edges' in styles and styles['edges']) or {}
)
return graph
def _get_ip_layer(layer_idx):
return (str(layer_idx), {'label': 'i/p', 'shape': 'circle', 'style': 'filled',
'fillcolor': _IPOP_FILLCOLOR, 'fontsize': _IPOP_FONTSIZE,
'fontname': _IPOP_FONT})
def _get_op_layer(layer_idx):
return (str(layer_idx), {'label': 'o/p', 'shape': 'circle', 'style': 'filled',
'fillcolor': _IPOP_FILLCOLOR, 'fontsize': _IPOP_FONTSIZE,
'fontname': _IPOP_FONT})
def _get_layer(layer_idx, nn, for_pres):
if nn.layer_labels[layer_idx] in ['ip', 'op']:
fill_colour = _IPOP_FILLCOLOR
elif nn.layer_labels[layer_idx] in ['softmax', 'linear']:
fill_colour = _DECISION_FILLCOLOR
else:
fill_colour = _FILLCOLOR
label = nn.get_layer_descr(layer_idx, for_pres)
return (str(layer_idx), {'label': label, 'shape': 'rectangle', 'fillcolor': fill_colour,
'style': 'filled', 'fontname': _LAYER_FONT}),((layer_idx), nn.layer_labels[layer_idx],(nn.num_units_in_each_layer[layer_idx]))
def _get_edge(layer_idx_start, layer_idx_end):
return (str(layer_idx_start), str(layer_idx_end))
def _get_edges(conn_mat):
starts, ends = conn_mat.nonzero()
return [_get_edge(starts[i], ends[i]) for i in range(len(starts))]
def visualise_nn(nn, save_file_prefix, fig_label=None, for_pres=True):
nodes = [_get_layer(i, nn, for_pres)[0] for i in range(nn.num_layers)]
nodes_my = [_get_layer(i, nn, for_pres)[1] for i in range(nn.num_layers)]
edges = _get_edges(nn.conn_mat)
edges_my = [(int(s),int(t)) for s,t in edges]
nn_graph = GV_DIGRAPH()
add_nodes(nn_graph, nodes)
add_edges(nn_graph, edges)
graph_styles = _GRAPH_STYLES
graph_styles['graph']['label'] = fig_label
apply_styles(nn_graph, graph_styles)
nn_graph.render(save_file_prefix)
if os.path.exists(save_file_prefix):
os.remove(save_file_prefix)
return tonxgraph(nodes_my,edges_my)
NODE_TYPES = ['ip', 'op', 'linear']
hidden_list = [8,16,32,64,128,256,512,1024]
for i in hidden_list:
NODE_TYPES.append("relu-%s"%i)
NODE_TYPES.append("crelu-%s"%i)
NODE_TYPES.append("leaky-relu-%s"%i)
NODE_TYPES.append("softplus-%s"%i)
NODE_TYPES.append("elu-%s"%i)
NODE_TYPES.append("logistic-%s"%i)
NODE_TYPES.append("tanh-%s"%i)
def tonxgraph(nodes_my,edges_my):
g = {"x":[],"edge_index":[],"edge_attr":[]}
for n_idx, type, num_hidden in nodes_my:
n_idx = int(n_idx)
if type=='ip' or type=='op' or type=='linear':
g["x"].append(np.eye(len(NODE_TYPES))[NODE_TYPES.index(type)])
else:
num_hidden = np.random.choice(hidden_list)
g["x"].append(np.eye(len(NODE_TYPES))[NODE_TYPES.index("%s-%s"%(type,num_hidden))])
row = []
col = []
for s, t in edges_my:
row.append(s)
col.append(t)
g["edge_attr"].append(np.ones(1))
g["edge_index"].append(row)
g["edge_index"].append(col)
g["x"]=np.array(g["x"])
g["edge_attr"]=np.array(g["edge_attr"])
print("+",g["x"].shape)
assert g["x"].shape[0] <= 20
return g
def visualise_list_of_nns(list_of_nns, save_dir, fig_labels=None, fig_file_names=None,
for_pres=False):
g_list = []
if fig_labels is None:
fig_labels = [None] * len(list_of_nns)
if fig_file_names is None:
fig_file_names = [str(idx) for idx in range(len(list_of_nns))]
for idx, nn in enumerate(list_of_nns):
save_file_prefix = os.path.join(save_dir, fig_file_names[idx])
g = visualise_nn(nn, save_file_prefix, fig_labels[idx], for_pres)
g_list.append(g)
return g_list
| true
| true
|
f708ed2fb34b2811477e4d2bb6b9fda638b2306e
| 814
|
py
|
Python
|
django_api/serializers.py
|
KrishnaChandrapati/django_api1
|
3ce95318301c8d1b885041a3de1fae3b1fe52a73
|
[
"MIT"
] | null | null | null |
django_api/serializers.py
|
KrishnaChandrapati/django_api1
|
3ce95318301c8d1b885041a3de1fae3b1fe52a73
|
[
"MIT"
] | null | null | null |
django_api/serializers.py
|
KrishnaChandrapati/django_api1
|
3ce95318301c8d1b885041a3de1fae3b1fe52a73
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from .models import *
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email', 'groups']
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ['url', 'name']
class ItemListSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ItemList
fields = ['id', 'status', 'type', 'name', 'city']
class ExampleModelLessSerializer(serializers.Serializer):
project_name = serializers.CharField()
total_head_count = serializers.IntegerField()
start_date = serializers.DateTimeField()
location = serializers.CharField()
| 23.941176
| 65
| 0.708845
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from .models import *
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email', 'groups']
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ['url', 'name']
class ItemListSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ItemList
fields = ['id', 'status', 'type', 'name', 'city']
class ExampleModelLessSerializer(serializers.Serializer):
project_name = serializers.CharField()
total_head_count = serializers.IntegerField()
start_date = serializers.DateTimeField()
location = serializers.CharField()
| true
| true
|
f708edcc42449148f4a71856e7c68b84c7f934c7
| 2,583
|
py
|
Python
|
bfg9000/path.py
|
Mattlk13/bfg9000
|
2d897db09ea81a0ffef0a52f2e06cb9cb4a70a02
|
[
"BSD-3-Clause"
] | 72
|
2015-06-23T02:35:13.000Z
|
2021-12-08T01:47:40.000Z
|
bfg9000/path.py
|
jimporter/bfg9000
|
c206646ecfed0d1a510e993b93e6a15677f45a14
|
[
"BSD-3-Clause"
] | 139
|
2015-03-01T18:48:17.000Z
|
2021-06-18T15:45:14.000Z
|
bfg9000/path.py
|
Mattlk13/bfg9000
|
2d897db09ea81a0ffef0a52f2e06cb9cb4a70a02
|
[
"BSD-3-Clause"
] | 19
|
2015-12-23T21:24:33.000Z
|
2022-01-06T04:04:41.000Z
|
import functools
import os
from contextlib import contextmanager
from .platforms.basepath import BasePath, Root, InstallRoot, DestDir # noqa
from .platforms.host import platform_info
Path = platform_info().Path
def abspath(path, type=Path, **kwargs):
return type.abspath(path, **kwargs)
def commonprefix(paths):
if not paths or any(i.root != paths[0].root for i in paths):
return None
cls = type(paths[0])
split = [i.split() for i in paths]
lo, hi = min(split), max(split)
for i, bit in enumerate(lo):
if bit != hi[i]:
return cls(cls.sep.join(lo[:i]), paths[0].root, directory=True)
return cls(cls.sep.join(lo), paths[0].root, directory=(lo != hi))
def uniquetrees(paths):
def ischild(a, b):
for i, j in zip(a, b):
if i != j:
return False
return True
if not paths:
return []
paths = [(i, [i.root.value] + i.split()) for i in paths]
paths.sort(key=lambda i: i[1])
piter = iter(paths)
p, last = next(piter)
uniques = [p]
for p, bits in piter:
if not ischild(last, bits):
last = bits
uniques.append(p)
return uniques
def _wrap_ospath(fn):
@functools.wraps(fn)
def wrapper(path, variables=None):
return fn(path.string(variables))
return wrapper
exists = _wrap_ospath(os.path.exists)
isdir = _wrap_ospath(os.path.isdir)
isfile = _wrap_ospath(os.path.isfile)
islink = _wrap_ospath(os.path.islink)
def samefile(path1, path2, variables=None):
return os.path.samefile(path1.string(variables),
path2.string(variables))
def listdir(path, variables=None):
dirs, nondirs = [], []
try:
names = os.listdir(path.string(variables))
for name in names:
curpath = path.append(name)
if isdir(curpath, variables):
dirs.append(curpath.as_directory())
else:
nondirs.append(curpath)
except OSError:
pass
return dirs, nondirs
def walk(top, variables=None):
if not exists(top, variables):
return
dirs, nondirs = listdir(top, variables)
yield top, dirs, nondirs
for d in dirs:
if not islink(d, variables):
for i in walk(d, variables):
yield i
@contextmanager
def pushd(dirname, makedirs=False, mode=0o777, exist_ok=False):
old = os.getcwd()
if makedirs:
os.makedirs(dirname, mode, exist_ok)
os.chdir(dirname)
try:
yield
finally:
os.chdir(old)
| 23.916667
| 76
| 0.603562
|
import functools
import os
from contextlib import contextmanager
from .platforms.basepath import BasePath, Root, InstallRoot, DestDir from .platforms.host import platform_info
Path = platform_info().Path
def abspath(path, type=Path, **kwargs):
return type.abspath(path, **kwargs)
def commonprefix(paths):
if not paths or any(i.root != paths[0].root for i in paths):
return None
cls = type(paths[0])
split = [i.split() for i in paths]
lo, hi = min(split), max(split)
for i, bit in enumerate(lo):
if bit != hi[i]:
return cls(cls.sep.join(lo[:i]), paths[0].root, directory=True)
return cls(cls.sep.join(lo), paths[0].root, directory=(lo != hi))
def uniquetrees(paths):
def ischild(a, b):
for i, j in zip(a, b):
if i != j:
return False
return True
if not paths:
return []
paths = [(i, [i.root.value] + i.split()) for i in paths]
paths.sort(key=lambda i: i[1])
piter = iter(paths)
p, last = next(piter)
uniques = [p]
for p, bits in piter:
if not ischild(last, bits):
last = bits
uniques.append(p)
return uniques
def _wrap_ospath(fn):
@functools.wraps(fn)
def wrapper(path, variables=None):
return fn(path.string(variables))
return wrapper
exists = _wrap_ospath(os.path.exists)
isdir = _wrap_ospath(os.path.isdir)
isfile = _wrap_ospath(os.path.isfile)
islink = _wrap_ospath(os.path.islink)
def samefile(path1, path2, variables=None):
return os.path.samefile(path1.string(variables),
path2.string(variables))
def listdir(path, variables=None):
dirs, nondirs = [], []
try:
names = os.listdir(path.string(variables))
for name in names:
curpath = path.append(name)
if isdir(curpath, variables):
dirs.append(curpath.as_directory())
else:
nondirs.append(curpath)
except OSError:
pass
return dirs, nondirs
def walk(top, variables=None):
if not exists(top, variables):
return
dirs, nondirs = listdir(top, variables)
yield top, dirs, nondirs
for d in dirs:
if not islink(d, variables):
for i in walk(d, variables):
yield i
@contextmanager
def pushd(dirname, makedirs=False, mode=0o777, exist_ok=False):
old = os.getcwd()
if makedirs:
os.makedirs(dirname, mode, exist_ok)
os.chdir(dirname)
try:
yield
finally:
os.chdir(old)
| true
| true
|
f708ede6397fe9c30873a9c8fdff9588cddf90dd
| 1,429
|
py
|
Python
|
tests/shell/test_basic_commands.py
|
hn04147/pytorch-project-template
|
4bbe17a61af8b2f47f7afa1c96e4ff347123bfb8
|
[
"MIT",
"Unlicense"
] | 2
|
2020-11-05T18:56:32.000Z
|
2020-11-12T22:38:32.000Z
|
tests/shell/test_basic_commands.py
|
hn04147/pytorch-project-template
|
4bbe17a61af8b2f47f7afa1c96e4ff347123bfb8
|
[
"MIT",
"Unlicense"
] | 19
|
2020-11-12T20:42:21.000Z
|
2020-11-29T15:14:04.000Z
|
tests/shell/test_basic_commands.py
|
hn04147/pytorch-project-template
|
4bbe17a61af8b2f47f7afa1c96e4ff347123bfb8
|
[
"MIT",
"Unlicense"
] | 1
|
2020-11-12T20:19:51.000Z
|
2020-11-12T20:19:51.000Z
|
import pytest
from tests.helpers.run_command import run_command
from tests.helpers.runif import RunIf
"""
A couple of sanity checks to make sure the model doesn't crash with different running options.
"""
def test_fast_dev_run():
"""Test running for 1 train, val and test batch."""
command = ["train.py", "++trainer.fast_dev_run=true"]
run_command(command)
@pytest.mark.slow
def test_cpu():
"""Test running 1 epoch on CPU."""
command = ["train.py", "++trainer.max_epochs=1", "++trainer.gpus=0"]
run_command(command)
# use RunIf to skip execution of some tests, e.g. when no gpus are available
@RunIf(min_gpus=1)
@pytest.mark.slow
def test_gpu():
"""Test running 1 epoch on GPU."""
command = [
"train.py",
"++trainer.max_epochs=1",
"++trainer.gpus=1",
]
run_command(command)
@RunIf(min_gpus=1)
@pytest.mark.slow
def test_mixed_precision():
"""Test running 1 epoch with pytorch native automatic mixed precision (AMP)."""
command = [
"train.py",
"++trainer.max_epochs=1",
"++trainer.gpus=1",
"++trainer.precision=16",
]
run_command(command)
@pytest.mark.slow
def test_double_validation_loop():
"""Test running 1 epoch with validation loop twice per epoch."""
command = [
"train.py",
"++trainer.max_epochs=1",
"++trainer.val_check_interval=0.5",
]
run_command(command)
| 24.220339
| 94
| 0.647306
|
import pytest
from tests.helpers.run_command import run_command
from tests.helpers.runif import RunIf
def test_fast_dev_run():
command = ["train.py", "++trainer.fast_dev_run=true"]
run_command(command)
@pytest.mark.slow
def test_cpu():
command = ["train.py", "++trainer.max_epochs=1", "++trainer.gpus=0"]
run_command(command)
@RunIf(min_gpus=1)
@pytest.mark.slow
def test_gpu():
command = [
"train.py",
"++trainer.max_epochs=1",
"++trainer.gpus=1",
]
run_command(command)
@RunIf(min_gpus=1)
@pytest.mark.slow
def test_mixed_precision():
command = [
"train.py",
"++trainer.max_epochs=1",
"++trainer.gpus=1",
"++trainer.precision=16",
]
run_command(command)
@pytest.mark.slow
def test_double_validation_loop():
command = [
"train.py",
"++trainer.max_epochs=1",
"++trainer.val_check_interval=0.5",
]
run_command(command)
| true
| true
|
f708ee95a7c0d97611564ac57312b30829517a80
| 4,323
|
py
|
Python
|
inkfish/cmds.py
|
alanefl/vdf-competition
|
84efc3aec180c43582c9421c6fb7fb2e22000635
|
[
"Apache-2.0"
] | 97
|
2018-10-04T18:10:42.000Z
|
2021-08-23T10:37:06.000Z
|
inkfish/cmds.py
|
alanefl/vdf-competition
|
84efc3aec180c43582c9421c6fb7fb2e22000635
|
[
"Apache-2.0"
] | 4
|
2018-10-04T18:20:49.000Z
|
2021-05-03T07:13:14.000Z
|
inkfish/cmds.py
|
alanefl/vdf-competition
|
84efc3aec180c43582c9421c6fb7fb2e22000635
|
[
"Apache-2.0"
] | 17
|
2018-10-08T18:08:21.000Z
|
2022-01-12T00:54:32.000Z
|
import argparse
import binascii
import sys
import time
from inkfish.proof_of_time import (create_proof_of_time_wesolowski,
create_proof_of_time_nwesolowski,
create_proof_of_time_pietrzak,
check_proof_of_time_wesolowski,
check_proof_of_time_nwesolowski,
check_proof_of_time_pietrzak)
from .classgroup import ClassGroup
from .create_discriminant import create_discriminant
def create_pot_parser():
parser = argparse.ArgumentParser(
description='Generate or verify a proof of time using the Chia ' +
'Verfiable Delay Function (VDF)',
)
parser.add_argument("-t", "--type", default="wesolowski",
choices=["wesolowski", "n-wesolowski", "pietrzak"],
help="the type of proof, wesolowski, n-wesolowski, or pietrzak")
parser.add_argument("-l", "--length", type=int, default=2048,
help="the number of bits of the discriminant")
parser.add_argument("-d", "--depth", type=int, default=2,
help="depth of n-wesolowski (n) default is 2")
parser.add_argument("-v", "--verbose", action="store_true",
help="print a bunch of extra stuff about the proof")
parser.add_argument("discriminant_challenge", type=binascii.unhexlify,
help="a hex-encoded challenge used to derive the discriminant")
parser.add_argument("iterations", type=int,
help="number of iterations")
parser.add_argument("proof", type=binascii.unhexlify,
help="the hex-encoded proof", nargs="?")
return parser
def pot(args=sys.argv):
parser = create_pot_parser()
args = parser.parse_args(args=args[1:])
discriminant = create_discriminant(args.discriminant_challenge, args.length)
if args.verbose:
print("proof type: %s" % args.type)
print("discriminant: %s" % discriminant)
print("discriminant size: %s" % args.length)
# Generator element is created as a=2, b=1.
x = ClassGroup.from_ab_discriminant(2, 1, discriminant)
if args.verbose:
print("x: %s" % str(x))
if args.proof:
if args.type == "wesolowski":
ok = check_proof_of_time_wesolowski(
discriminant, x, args.proof, args.iterations, args.length)
elif args.type == "n-wesolowski":
ok = check_proof_of_time_nwesolowski(
discriminant, x, args.proof, args.iterations, args.length)
elif args.type == "pietrzak":
ok = check_proof_of_time_pietrzak(
discriminant, x, args.proof, args.iterations, args.length)
if ok:
print("Proof is valid")
else:
print("** INVALID PROOF")
return -1
else:
start_t = time.time() * 1000
if args.type == "wesolowski":
result, proof = create_proof_of_time_wesolowski(
discriminant, x, args.iterations, args.length)
elif args.type == "n-wesolowski":
result, proof = create_proof_of_time_nwesolowski(
discriminant, x, args.iterations, args.length, args.depth, 0)
elif args.type == "pietrzak":
result, proof = create_proof_of_time_pietrzak(
discriminant, x, args.iterations, args.length)
if args.verbose:
print("Finished in ", round(((time.time() * 1000) - start_t), 2), "ms")
hex_result = binascii.hexlify(result).decode("utf8")
hex_proof = binascii.hexlify(proof).decode("utf8")
print(hex_result + hex_proof)
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
| 41.970874
| 88
| 0.623178
|
import argparse
import binascii
import sys
import time
from inkfish.proof_of_time import (create_proof_of_time_wesolowski,
create_proof_of_time_nwesolowski,
create_proof_of_time_pietrzak,
check_proof_of_time_wesolowski,
check_proof_of_time_nwesolowski,
check_proof_of_time_pietrzak)
from .classgroup import ClassGroup
from .create_discriminant import create_discriminant
def create_pot_parser():
parser = argparse.ArgumentParser(
description='Generate or verify a proof of time using the Chia ' +
'Verfiable Delay Function (VDF)',
)
parser.add_argument("-t", "--type", default="wesolowski",
choices=["wesolowski", "n-wesolowski", "pietrzak"],
help="the type of proof, wesolowski, n-wesolowski, or pietrzak")
parser.add_argument("-l", "--length", type=int, default=2048,
help="the number of bits of the discriminant")
parser.add_argument("-d", "--depth", type=int, default=2,
help="depth of n-wesolowski (n) default is 2")
parser.add_argument("-v", "--verbose", action="store_true",
help="print a bunch of extra stuff about the proof")
parser.add_argument("discriminant_challenge", type=binascii.unhexlify,
help="a hex-encoded challenge used to derive the discriminant")
parser.add_argument("iterations", type=int,
help="number of iterations")
parser.add_argument("proof", type=binascii.unhexlify,
help="the hex-encoded proof", nargs="?")
return parser
def pot(args=sys.argv):
parser = create_pot_parser()
args = parser.parse_args(args=args[1:])
discriminant = create_discriminant(args.discriminant_challenge, args.length)
if args.verbose:
print("proof type: %s" % args.type)
print("discriminant: %s" % discriminant)
print("discriminant size: %s" % args.length)
x = ClassGroup.from_ab_discriminant(2, 1, discriminant)
if args.verbose:
print("x: %s" % str(x))
if args.proof:
if args.type == "wesolowski":
ok = check_proof_of_time_wesolowski(
discriminant, x, args.proof, args.iterations, args.length)
elif args.type == "n-wesolowski":
ok = check_proof_of_time_nwesolowski(
discriminant, x, args.proof, args.iterations, args.length)
elif args.type == "pietrzak":
ok = check_proof_of_time_pietrzak(
discriminant, x, args.proof, args.iterations, args.length)
if ok:
print("Proof is valid")
else:
print("** INVALID PROOF")
return -1
else:
start_t = time.time() * 1000
if args.type == "wesolowski":
result, proof = create_proof_of_time_wesolowski(
discriminant, x, args.iterations, args.length)
elif args.type == "n-wesolowski":
result, proof = create_proof_of_time_nwesolowski(
discriminant, x, args.iterations, args.length, args.depth, 0)
elif args.type == "pietrzak":
result, proof = create_proof_of_time_pietrzak(
discriminant, x, args.iterations, args.length)
if args.verbose:
print("Finished in ", round(((time.time() * 1000) - start_t), 2), "ms")
hex_result = binascii.hexlify(result).decode("utf8")
hex_proof = binascii.hexlify(proof).decode("utf8")
print(hex_result + hex_proof)
| true
| true
|
f708eeacfbf6c4ebf00516f1ac9d10f8e5349ebe
| 1,546
|
py
|
Python
|
qhub/cli/validate.py
|
pierrotsmnrd/qhub
|
399684c79f331923444b4fe46fae38ee02bfa2ac
|
[
"BSD-3-Clause"
] | 100
|
2020-05-06T14:36:51.000Z
|
2022-03-31T20:09:29.000Z
|
qhub/cli/validate.py
|
pierrotsmnrd/qhub
|
399684c79f331923444b4fe46fae38ee02bfa2ac
|
[
"BSD-3-Clause"
] | 778
|
2020-04-08T06:28:29.000Z
|
2022-03-31T21:32:08.000Z
|
qhub/cli/validate.py
|
pierrotsmnrd/qhub
|
399684c79f331923444b4fe46fae38ee02bfa2ac
|
[
"BSD-3-Clause"
] | 36
|
2020-08-19T21:03:32.000Z
|
2022-03-18T17:04:50.000Z
|
import pathlib
from ruamel import yaml
from qhub.schema import verify
from qhub.provider.cicd.linter import comment_on_pr
def create_validate_subcommand(subparser):
subparser = subparser.add_parser("validate")
subparser.add_argument(
"configdeprecated",
help="qhub configuration yaml file (deprecated - please pass in as -c/--config flag)",
nargs="?",
)
subparser.add_argument(
"-c", "--config", help="qhub configuration yaml file", required=False
)
subparser.add_argument(
"--enable-commenting", help="Turn on PR commenting", action="store_true"
)
subparser.set_defaults(func=handle_validate)
def handle_validate(args):
if args.configdeprecated and args.config:
raise ValueError(
"Please pass in -c/--config flag specifying your qhub-config.yaml file, and do NOT pass it as a standalone argument"
)
config_filename = args.config or args.configdeprecated
if not config_filename:
raise ValueError(
"Please pass in a qhub-config.yaml filename using the -c/--config argument"
)
config_filename = pathlib.Path(args.config or args.configdeprecated)
if not config_filename.is_file():
raise ValueError(
f"passed in configuration filename={config_filename} must exist"
)
with config_filename.open() as f:
config = yaml.safe_load(f.read())
if args.enable_commenting:
# for PR's only
comment_on_pr(config)
else:
verify(config)
| 30.92
| 128
| 0.674644
|
import pathlib
from ruamel import yaml
from qhub.schema import verify
from qhub.provider.cicd.linter import comment_on_pr
def create_validate_subcommand(subparser):
subparser = subparser.add_parser("validate")
subparser.add_argument(
"configdeprecated",
help="qhub configuration yaml file (deprecated - please pass in as -c/--config flag)",
nargs="?",
)
subparser.add_argument(
"-c", "--config", help="qhub configuration yaml file", required=False
)
subparser.add_argument(
"--enable-commenting", help="Turn on PR commenting", action="store_true"
)
subparser.set_defaults(func=handle_validate)
def handle_validate(args):
if args.configdeprecated and args.config:
raise ValueError(
"Please pass in -c/--config flag specifying your qhub-config.yaml file, and do NOT pass it as a standalone argument"
)
config_filename = args.config or args.configdeprecated
if not config_filename:
raise ValueError(
"Please pass in a qhub-config.yaml filename using the -c/--config argument"
)
config_filename = pathlib.Path(args.config or args.configdeprecated)
if not config_filename.is_file():
raise ValueError(
f"passed in configuration filename={config_filename} must exist"
)
with config_filename.open() as f:
config = yaml.safe_load(f.read())
if args.enable_commenting:
comment_on_pr(config)
else:
verify(config)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.