hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794bb84bac8a6b82aaa034636e975968bd63ad5a
| 754
|
py
|
Python
|
pm4pyws/handlers/parquet/statistics/case_duration.py
|
ehbasouri/pm4py-ws
|
9bf5f88848a4aa2873bae86af95d37f64ae1dde1
|
[
"Apache-2.0"
] | null | null | null |
pm4pyws/handlers/parquet/statistics/case_duration.py
|
ehbasouri/pm4py-ws
|
9bf5f88848a4aa2873bae86af95d37f64ae1dde1
|
[
"Apache-2.0"
] | null | null | null |
pm4pyws/handlers/parquet/statistics/case_duration.py
|
ehbasouri/pm4py-ws
|
9bf5f88848a4aa2873bae86af95d37f64ae1dde1
|
[
"Apache-2.0"
] | null | null | null |
from pm4py.statistics.traces.pandas import case_statistics
from pm4py.visualization.common.utils import get_base64_from_file
from pm4py.visualization.graphs import factory as graphs_factory
def get_case_duration_svg(dataframe, parameters=None):
"""
Gets the SVG of the case duration graph
Parameters
-------------
dataframe
Dataframe
parameters
Possible parameters of the algorithm
Returns
-------------
graph
Case duration graph
"""
if parameters is None:
parameters = {}
x, y = case_statistics.get_kde_caseduration(dataframe, parameters)
gviz = graphs_factory.apply_plot(x, y, variant="cases", parameters={"format": "svg"})
return get_base64_from_file(gviz)
| 26
| 89
| 0.690981
|
794bb8756a2767062ef55c0d8c1e48e4940bfaf2
| 3,538
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/firmicutesbacteriumcag1295924.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/firmicutesbacteriumcag1295924.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/firmicutesbacteriumcag1295924.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Firmicutes bacterium CAG:129_59_24.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def FirmicutesBacteriumCag1295924(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Firmicutes bacterium CAG:129_59_24 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Firmicutes bacterium CAG:129_59_24 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="FirmicutesBacteriumCag1295924",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.695238
| 223
| 0.682024
|
794bb89f6a43d741d1f3dc68f51a4505b0915254
| 7,568
|
py
|
Python
|
python/Utility/Metric.py
|
billy000400/MLTracking
|
e5bd3e1f51919a093bb05d78ec9c3fa7877c3744
|
[
"MIT"
] | null | null | null |
python/Utility/Metric.py
|
billy000400/MLTracking
|
e5bd3e1f51919a093bb05d78ec9c3fa7877c3744
|
[
"MIT"
] | 1
|
2021-01-03T08:57:34.000Z
|
2021-01-03T23:41:22.000Z
|
python/Utility/Metric.py
|
billy000400/MLTracking
|
e5bd3e1f51919a093bb05d78ec9c3fa7877c3744
|
[
"MIT"
] | null | null | null |
import sys
from pathlib import Path
import tensorflow as tf
from tensorflow.math import exp
from tensorflow.keras.metrics import (
binary_accuracy,
categorical_accuracy
)
from tensorflow.keras.backend import print_tensor
def union(rec_a, rec_b, intersection):
area_a = (rec_a[1]-rec_a[0])*(rec_a[3]-rec_a[2])
area_b = (rec_b[1]-rec_b[0])*(rec_b[3]-rec_b[2])
return area_a+area_b-intersection
def intersection(rec_a, rec_b):
# rec_a(b) should be (xmin, xmax, ymin, ymax)
w = tf.math.reduce_min([rec_a[1], rec_b[1]]) - tf.math.reduce_max([rec_a[0], rec_b[0]])
h = tf.math.reduce_min([rec_a[3], rec_b[3]]) - tf.math.reduce_max([rec_a[2], rec_b[2]])
def f1(): return 0.0
def f2(): return w*h
result = tf.cond( tf.math.logical_or( tf.less(w,0.0), tf.less(h,0.0) ), f1, f2)
return result
def iou(rec_a, rec_b):
overlap = intersection(rec_a, rec_b)
sum = union(rec_a, rec_b, overlap)
return overlap/sum
def unmasked_binary_accuracy(p_r, p_p):
mask = ~tf.math.is_nan(p_r)
#mask.set_shape([None,32,32,18])
mp_r = tf.boolean_mask(p_r, mask=mask)
mp_p = tf.boolean_mask(p_p, mask=mask)
score_ew = binary_accuracy(mp_r, mp_p)
score = tf.math.reduce_sum(score_ew)
N_cls = tf.size(score_ew)
N_cls = tf.cast(N_cls, tf.float32)
return score/N_cls*100.0
def unmasked_categorical_accuracy(p_r, p_p):
# p_real_sl = p_r[:,:,:,0]
# mask = ~tf.math.is_nan(p_real_sl)
mask = ~tf.math.is_nan(p_r)
mp_r = tf.boolean_mask(p_r, mask=mask)
mp_p = tf.boolean_mask(p_p, mask=mask)
score_ew = categorical_accuracy(mp_r, mp_p)
score = tf.math.reduce_sum(score_ew)
N_cls = tf.size(score_ew)
N_cls = tf.cast(N_cls, tf.float32)
return score/N_cls*100.0
def top2_categorical_accuracy(y_real, y_predict):
major_mask = y_real[:,:,:,2]==1
bg_mask = y_real[:,:,:,1]==1
y_real_major = tf.boolean_mask(y_real, major_mask)
y_predict_major = tf.boolean_mask(y_predict, major_mask)
y_real_bg = tf.boolean_mask(y_real, bg_mask)
y_predict_bg = tf.boolean_mask(y_predict, bg_mask)
score_major_ew = categorical_accuracy(y_real_major, y_predict_major)
score_bg_ew = categorical_accuracy(y_real_bg, y_predict_bg)
score_major = tf.math.reduce_sum(score_major_ew)
score_bg = tf.math.reduce_sum(score_bg_ew)
N_major = tf.size(score_major_ew)
N_major = tf.cast(N_major, tf.float32)
N_bg = tf.size(score_bg_ew)
N_bg = tf.cast(N_bg, tf.float32)
sum = N_major + N_bg
return (score_major+score_bg)/sum*100
def weighted_unmasked_binary_accuracy(y_real, y_predict):
major_mask = (y_real==1)
bg_mask = (y_real==0)
y_real_major = tf.boolean_mask(y_real, major_mask)
y_predict_major = tf.boolean_mask(y_predict, major_mask)
y_real_bg = tf.boolean_mask(y_real, bg_mask)
y_predict_bg = tf.boolean_mask(y_predict, bg_mask)
score_major_avg = binary_accuracy(y_real_major, y_predict_major)
score_bg_avg = binary_accuracy(y_real_bg, y_predict_bg)
N_major = tf.size(y_real_major)
N_major = tf.cast(N_major, tf.float32)
N_bg = tf.size(y_real_bg)
N_bg = tf.cast(N_bg, tf.float32)
sum = N_major + N_bg
return (score_major_avg*N_major+score_bg_avg*N_bg)/sum*100
def unmasked_IoU(t_r, t_p):
mask = ~tf.math.is_nan(t_r)
#mask.set_shape([None,32,32,72])
mt_r = tf.boolean_mask(t_r, mask=mask)
mt_p = tf.boolean_mask(t_p, mask=mask)
mt_r_4 = tf.reshape(mt_r, [tf.size(mt_r)/4, 4])
mt_p_4 = tf.reshape(mt_p, [tf.size(mt_p)/4, 4])
rx = tf.gather(mt_r_4, 0, axis=1)
ry = tf.gather(mt_r_4, 1, axis=1)
log_rw = tf.gather(mt_r_4, 2, axis=1)
log_rh = tf.gather(mt_r_4, 3, axis=1)
rw = exp(log_rw)
rh = exp(log_rh)
rx1 = rx-rw/2
rx2 = rx+rw/2
ry1 = ry-rh/2
ry2 = ry+rh/2
rec_r = tf.stack([rx1, rx2, ry1, ry2], axis=1)
px = tf.gather(mt_p_4, 0, axis=1)
py = tf.gather(mt_p_4, 1, axis=1)
log_pw = tf.gather(mt_p_4, 2, axis=1)
log_ph = tf.gather(mt_p_4, 3, axis=1)
pw = exp(log_pw)
ph = exp(log_ph)
px1 = px-pw/2
px2 = px+pw/2
py1 = py-ph/2
py2 = py+ph/2
rec_p = tf.stack([px1, px2, py1, py2], axis=1)
rowNum = tf.shape(rec_r)[0]
i = 0
iou_tot = 0.0
def add_i(i, rowNum, iou_tot):
return [tf.add(i,1), rowNum, tf.add(iou_tot, iou(rec_r[i], rec_p[i])) ]
def c(i, rowNum, iou_tot):
return tf.less(i,rowNum)
i, rowNum, iou_tot = tf.while_loop(c, add_i, [i, rowNum, iou_tot])
rowNum = tf.cast(rowNum, tf.float32)
return iou_tot/rowNum
def unmasked_IoUV2(t_r, t_p):
# IoU for Fast R-CNN
mask = ~tf.math.is_nan(t_r)
mt_r = tf.boolean_mask(t_r, mask=mask)
mt_p = tf.boolean_mask(t_p, mask=mask)
mt_r_4 = tf.reshape(mt_r, [tf.size(mt_r)/4, 4])
mt_p_4 = tf.reshape(mt_p, [tf.size(mt_p)/4, 4])
rx = tf.gather(mt_r_4, 0, axis=1)
ry = tf.gather(mt_r_4, 1, axis=1)
rw = tf.gather(mt_r_4, 2, axis=1)
rh = tf.gather(mt_r_4, 3, axis=1)
rx1 = rx
rx2 = rx+rw
ry1 = ry-rh
ry2 = ry
rec_r = tf.stack([rx1, rx2, ry1, ry2], axis=1)
px = tf.gather(mt_p_4, 0, axis=1)
py = tf.gather(mt_p_4, 1, axis=1)
pw = tf.gather(mt_p_4, 2, axis=1)
ph = tf.gather(mt_p_4, 3, axis=1)
px1 = px
px2 = px+pw
py1 = py-ph
py2 = py
rec_p = tf.stack([px1, px2, py1, py2], axis=1)
rowNum = tf.shape(rec_r)[0]
i = 0
iou_tot = 0.0
def add_i(i, rowNum, iou_tot):
iou_val = iou(rec_r[i], rec_p[i])
return [tf.add(i,1), rowNum, tf.add(iou_tot, iou_val) ]
def c(i, rowNum, iou_tot):
return tf.less(i,rowNum)
i, rowNum, iou_tot = tf.while_loop(c, add_i, [i, rowNum, iou_tot])
rowNum = tf.cast(rowNum, tf.float32)
return iou_tot/rowNum
# hit purity: real major/predicted major
def hit_purity(y_r, y_p):
predict_major_mask = tf.argmax(y_p, axis=3, output_type=tf.int32)==2
y_predict_major = tf.boolean_mask(y_p, predict_major_mask)
y_real_selected = tf.boolean_mask(y_r, predict_major_mask)
binary_purity = categorical_accuracy(y_real_selected, y_predict_major)
binary_sum = tf.math.reduce_sum(binary_purity)
N = tf.cast(tf.size(binary_purity), tf.float32)
N = tf.math.maximum(N,1.0)
purity = binary_sum/N*100
return purity
# hit efficiency: real major/all major
def hit_efficiency(y_r, y_p):
real_major_mask = y_r[:,:,:,2]==1
y_real_major = tf.boolean_mask(y_r, real_major_mask)
y_predict_selected = tf.boolean_mask(y_p, real_major_mask)
binary_purity = categorical_accuracy(y_real_major, y_predict_selected)
binary_sum = tf.math.reduce_sum(binary_purity)
N = tf.cast(tf.size(binary_purity), tf.float32)
N = tf.math.maximum(N,1.0)
efficiency = binary_sum/N*100
return efficiency
def positive_number(p_r, p_p):
positive_truth = tf.math.greater(p_p, 0.5)
pos_num = tf.reduce_sum(tf.cast(positive_truth, tf.float32))
batch_size = tf.shape(p_r)[0]
batch_size = tf.cast(batch_size, tf.float32)
mask = ~tf.math.is_nan(p_r)
mp_r = tf.boolean_mask(p_r, mask=mask)
# print_tensor(tf.reduce_sum(tf.cast(tf.math.greater(p_r,0.5), tf.float32)), message='pos')
# print_tensor(tf.reduce_sum(tf.cast(tf.math.less(p_r,0.5), tf.float32)), message='neg')
# print_tensor(batch_size, 'batch_size: ')
# print_tensor(tf.reduce_sum(mp_r)/batch_size, 'sampled positive: ')
# print_tensor(pos_num, 'total positive number: ')
return pos_num/batch_size # denominator is batch size
| 29.107692
| 95
| 0.661734
|
794bb8e30c46ec5c32f0a9463b9253fb2abf73d2
| 1,144
|
py
|
Python
|
aliyun-python-sdk-kms/aliyunsdkkms/request/v20160120/DescribeKeyRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-kms/aliyunsdkkms/request/v20160120/DescribeKeyRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-kms/aliyunsdkkms/request/v20160120/DescribeKeyRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeKeyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Kms', '2016-01-20', 'DescribeKey','kms')
self.set_protocol_type('https');
def get_KeyId(self):
return self.get_query_params().get('KeyId')
def set_KeyId(self,KeyId):
self.add_query_param('KeyId',KeyId)
| 36.903226
| 70
| 0.758741
|
794bbbc8832857f6ce2a94dcd41cacc777edef6a
| 2,219
|
py
|
Python
|
Assignment4/src/lr-mnist.py
|
haniamatera/cds-visual_new
|
91c52cb607c839e6b23881aca047bda7c9888b29
|
[
"MIT"
] | null | null | null |
Assignment4/src/lr-mnist.py
|
haniamatera/cds-visual_new
|
91c52cb607c839e6b23881aca047bda7c9888b29
|
[
"MIT"
] | null | null | null |
Assignment4/src/lr-mnist.py
|
haniamatera/cds-visual_new
|
91c52cb607c839e6b23881aca047bda7c9888b29
|
[
"MIT"
] | null | null | null |
#____Assignment4_____#
#building logistic regression classifier
#importing necessary packages
import os
import sys
import argparse
sys.path.append(os.path.join(".."))
# Import teaching utils
import numpy as np
import utils.classifier_utils as clf_util
# Import sklearn metrics
from sklearn import metrics
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
#defining main function
def main(args):
#assigning text_size to the text size specified in the command line
test_size = args["test_size"]
#fetching data
X, y = fetch_openml('mnist_784', version=1, return_X_y = True)
#x is a list of pixels and y is a label
#changing x and y to arrays
X = np.array(X)
y = np.array(y)
#predefining classes
classes = sorted(set(y))
nclasses = len(classes)
#creating a training data set
X_train, X_test, y_train, y_test = train_test_split(X,
y,
random_state = 9,
test_size = test_size)
#scaling the features
X_train_scaled = X_train/255.0
X_test_scaled = X_test/255.0
#training the model
clf = LogisticRegression(penalty='none',
tol=0.1,
solver='saga',
multi_class='multinomial').fit(X_train_scaled, y_train)
y_pred = clf.predict(X_test_scaled)
#predicting the accuracy
accuracy = accuracy_score(y_test, y_pred)
#confusion matrix
cm = metrics.classification_report(y_test, y_pred)
print(cm)#printing the accuracy in the terminal
if __name__=="__main__":
#creating a parser
ap = argparse.ArgumentParser(description = "Training a model")
#argument -t for specifying the text size
ap.add_argument("-t","--test_size", help ="please provide a desired data test size", default= 0.25, type = float)
args = vars(ap.parse_args())
main(args)
| 26.105882
| 117
| 0.623704
|
794bbc0db73a1729cb2327a988be13bb335c218c
| 209
|
py
|
Python
|
giico/giico/doctype/downhole_test/test_downhole_test.py
|
thispl/giico
|
14c5631639ab56a586a7962be9871d722c20e205
|
[
"MIT"
] | null | null | null |
giico/giico/doctype/downhole_test/test_downhole_test.py
|
thispl/giico
|
14c5631639ab56a586a7962be9871d722c20e205
|
[
"MIT"
] | null | null | null |
giico/giico/doctype/downhole_test/test_downhole_test.py
|
thispl/giico
|
14c5631639ab56a586a7962be9871d722c20e205
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, VHRS and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestDownholeTest(unittest.TestCase):
pass
| 19
| 43
| 0.760766
|
794bbc44ceab57ac9e99dfb2d8c734893ace0212
| 10,355
|
py
|
Python
|
mpunet/models/unet.py
|
sandeepsinghsengar/MPUNet2Plus
|
fd97800cd349ee47d2c9cce1851a332dcbcb047c
|
[
"MIT"
] | null | null | null |
mpunet/models/unet.py
|
sandeepsinghsengar/MPUNet2Plus
|
fd97800cd349ee47d2c9cce1851a332dcbcb047c
|
[
"MIT"
] | null | null | null |
mpunet/models/unet.py
|
sandeepsinghsengar/MPUNet2Plus
|
fd97800cd349ee47d2c9cce1851a332dcbcb047c
|
[
"MIT"
] | null | null | null |
"""
Mathias Perslev
MSc Bioinformatics
University of Copenhagen
November 2017
"""
from mpunet.logging import ScreenLogger
from mpunet.utils.conv_arithmetics import compute_receptive_fields
from tensorflow.keras.models import Model
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Input, BatchNormalization, Cropping2D, \
Concatenate, Conv2D, MaxPooling2D, \
UpSampling2D, Reshape
import numpy as np
class UNet(Model):
"""
2D UNet implementation with batch normalization and complexity factor adj.
See original paper at http://arxiv.org/abs/1505.04597
"""
def __init__(self,
n_classes,
img_rows=None,
img_cols=None,
dim=None,
n_channels=1,
depth=4,
out_activation="softmax",
activation="relu",
kernel_size=3,
padding="same",
complexity_factor=1,
flatten_output=False,
l2_reg=None,
logger=None,
**kwargs):
"""
n_classes (int):
The number of classes to model, gives the number of filters in the
final 1x1 conv layer.
img_rows, img_cols (int, int):
Image dimensions. Note that depending on image dims cropping may
be necessary. To avoid this, use image dimensions DxD for which
D * (1/2)^n is an integer, where n is the number of (2x2)
max-pooling layers; in this implementation 4.
For n=4, D \in {..., 192, 208, 224, 240, 256, ...} etc.
dim (int):
img_rows and img_cols will both be set to 'dim'
n_channels (int):
Number of channels in the input image.
depth (int):
Number of conv blocks in encoding layer (number of 2x2 max pools)
Note: each block doubles the filter count while halving the spatial
dimensions of the features.
out_activation (string):
Activation function of output 1x1 conv layer. Usually one of
'softmax', 'sigmoid' or 'linear'.
activation (string):
Activation function for convolution layers
kernel_size (int):
Kernel size for convolution layers
padding (string):
Padding type ('same' or 'valid')
complexity_factor (int/float):
Use int(N * sqrt(complexity_factor)) number of filters in each
2D convolution layer instead of default N.
flatten_output (bool):
Flatten the output to array of shape [batch_size, -1, n_classes]
l2_reg (float in [0, 1])
L2 regularization on Conv2D weights
logger (mpunet.logging.Logger | ScreenLogger):
MutliViewUNet.Logger object, logging to files or screen.
"""
super(UNet, self).__init__()
# Set logger or standard print wrapper
self.logger = logger or ScreenLogger()
if not ((img_rows and img_cols) or dim):
raise ValueError("Must specify either img_rows and img_col or dim")
if dim:
img_rows, img_cols = dim, dim
# Set various attributes
self.img_shape = (img_rows, img_cols, n_channels)
self.n_classes = n_classes
self.cf = np.sqrt(complexity_factor)
self.kernel_size = kernel_size
self.activation = activation
self.out_activation = out_activation
self.l2_reg = l2_reg
self.padding = padding
self.depth = depth
self.flatten_output = flatten_output
# Shows the number of pixels cropped of the input image to the output
self.label_crop = np.array([[0, 0], [0, 0]])
# Build model and init base keras Model class
super().__init__(*self.init_model())
# Compute receptive field
names = [x.__class__.__name__ for x in self.layers]
index = names.index("UpSampling2D")
self.receptive_field = compute_receptive_fields(self.layers[:index])[-1][-1]
# Log the model definition
self.log()
def _create_encoder(self, in_, init_filters, kernel_reg=None,
name="encoder"):
filters = init_filters
residual_connections = []
for i in range(self.depth):
l_name = name + "_L%i" % i
conv = Conv2D(int(filters * self.cf), self.kernel_size,
activation=self.activation, padding=self.padding,
kernel_regularizer=kernel_reg,
name=l_name + "_conv1")(in_)
conv = Conv2D(int(filters * self.cf), self.kernel_size,
activation=self.activation, padding=self.padding,
kernel_regularizer=kernel_reg,
name=l_name + "_conv2")(conv)
bn = BatchNormalization(name=l_name + "_BN")(conv)
in_ = MaxPooling2D(pool_size=(2, 2), name=l_name + "_pool")(bn)
# Update filter count and add bn layer to list for residual conn.
filters *= 2
residual_connections.append(bn)
return in_, residual_connections, filters
def _create_bottom(self, in_, filters, kernel_reg=None, name="bottom"):
conv = Conv2D(int(filters * self.cf), self.kernel_size,
activation=self.activation, padding=self.padding,
kernel_regularizer=kernel_reg,
name=name + "_conv1")(in_)
conv = Conv2D(int(filters * self.cf), self.kernel_size,
activation=self.activation, padding=self.padding,
kernel_regularizer=kernel_reg,
name=name + "_conv2")(conv)
bn = BatchNormalization(name=name + "_BN")(conv)
return bn
def _create_upsample(self, in_, res_conns, filters, kernel_reg=None,
name="upsample"):
residual_connections = res_conns[::-1]
for i in range(self.depth):
l_name = name + "_L%i" % i
# Reduce filter count
filters /= 2
# Up-sampling block
# Note: 2x2 filters used for backward comp, but you probably
# want to use 3x3 here instead.
up = UpSampling2D(size=(2, 2), name=l_name + "_up")(in_)
conv = Conv2D(int(filters * self.cf), 2,
activation=self.activation,
padding=self.padding, kernel_regularizer=kernel_reg,
name=l_name + "_conv1")(up)
bn = BatchNormalization(name=l_name + "_BN1")(conv)
# Crop and concatenate
cropped_res = self.crop_nodes_to_match(residual_connections[i], bn)
merge = Concatenate(axis=-1,
name=l_name + "_concat")([cropped_res, bn])
conv = Conv2D(int(filters * self.cf), self.kernel_size,
activation=self.activation, padding=self.padding,
kernel_regularizer=kernel_reg,
name=l_name + "_conv2")(merge)
conv = Conv2D(int(filters * self.cf), self.kernel_size,
activation=self.activation, padding=self.padding,
kernel_regularizer=kernel_reg,
name=l_name + "_conv3")(conv)
in_ = BatchNormalization(name=l_name + "_BN2")(conv)
return in_
def init_model(self):
"""
Build the UNet model with the specified input image shape.
"""
inputs = Input(shape=self.img_shape)
# Apply regularization if not None or 0
kr = regularizers.l2(self.l2_reg) if self.l2_reg else None
"""
Encoding path
"""
in_, residual_cons, filters = self._create_encoder(in_=inputs,
init_filters=64,
kernel_reg=kr)
"""
Bottom (no max-pool)
"""
bn = self._create_bottom(in_, filters, kr)
"""
Up-sampling
"""
bn = self._create_upsample(bn, residual_cons, filters, kr)
"""
Output modeling layer
"""
out = Conv2D(self.n_classes, 1, activation=self.out_activation)(bn)
if self.flatten_output:
out = Reshape([self.img_shape[0]*self.img_shape[1],
self.n_classes], name='flatten_output')(out)
print('shapes of input and out are', inputs.shape, out.shape)
return [inputs], [out]
def crop_nodes_to_match(self, node1, node2):
"""
If necessary, applies Cropping2D layer to node1 to match shape of node2
"""
s1 = np.array(node1.get_shape().as_list())[1:-1]
s2 = np.array(node2.get_shape().as_list())[1:-1]
if np.any(s1 != s2):
c = (s1 - s2).astype(np.int)
cr = np.array([c//2, c//2]).T
cr[:, 1] += c % 2
cropped_node1 = Cropping2D(cr)(node1)
self.label_crop += cr
else:
cropped_node1 = node1
return cropped_node1
def log(self):
self.logger("UNet Model Summary\n------------------")
self.logger("Image rows: %i" % self.img_shape[0])
self.logger("Image cols: %i" % self.img_shape[1])
self.logger("Image channels: %i" % self.img_shape[2])
self.logger("N classes: %i" % self.n_classes)
self.logger("CF factor: %.3f" % self.cf**2)
self.logger("Depth: %i" % self.depth)
self.logger("l2 reg: %s" % self.l2_reg)
self.logger("Padding: %s" % self.padding)
self.logger("Conv activation: %s" % self.activation)
self.logger("Out activation: %s" % self.out_activation)
self.logger("Receptive field: %s" % self.receptive_field)
self.logger("N params: %i" % self.count_params())
self.logger("Output: %s" % self.output)
self.logger("Crop: %s" % (self.label_crop if np.sum(self.label_crop) != 0 else "None"))
| 41.25498
| 108
| 0.555577
|
794bbcb1eaaf6bc339ecb8d76aa7af0261265622
| 3,461
|
py
|
Python
|
statick_tool/plugins/reporting/json_reporting_plugin.py
|
xydesa/statick
|
22d863306bfd6fe411758e5807ae036679a309c5
|
[
"CC0-1.0"
] | 1
|
2022-01-25T16:08:45.000Z
|
2022-01-25T16:08:45.000Z
|
statick_tool/plugins/reporting/json_reporting_plugin.py
|
xydesa/statick
|
22d863306bfd6fe411758e5807ae036679a309c5
|
[
"CC0-1.0"
] | 1
|
2020-05-06T01:41:35.000Z
|
2020-05-06T01:41:35.000Z
|
statick_tool/plugins/reporting/json_reporting_plugin.py
|
jhdcs/statick
|
121ab511f206967c587d70d88217ae23db84726d
|
[
"CC0-1.0"
] | null | null | null |
"""Prints the Statick reports out to the terminal or file in JSON format."""
import json
import logging
import os
from collections import OrderedDict
from typing import Dict, List, Optional, Tuple
from statick_tool.issue import Issue
from statick_tool.package import Package
from statick_tool.reporting_plugin import ReportingPlugin
class JsonReportingPlugin(ReportingPlugin):
"""Prints the Statick reports out to the terminal or file in JSON format."""
def get_name(self) -> str:
"""Return the plugin name."""
return "json"
def report(
self, package: Package, issues: Dict[str, List[Issue]], level: str
) -> Tuple[Optional[None], bool]:
"""Go through the issues list and print them in JSON format.
Args:
package (:obj:`Package`): The Package object that was analyzed.
issues (:obj:`dict` of :obj:`str` to :obj:`Issue`): The issues
found by the Statick analysis, keyed by the tool that found
them.
level: (:obj:`str`): Name of the level used in the scan.
"""
if not self.plugin_context or not self.plugin_context.config:
return None, False
file_output = False
terminal_output = False
file_output_str = self.plugin_context.config.get_reporting_config(
self.get_name(), level, "files"
)
if file_output_str and file_output_str.lower() == "true":
file_output = True
terminal_output_str = self.plugin_context.config.get_reporting_config(
self.get_name(), level, "terminal"
)
if terminal_output_str and terminal_output_str.lower() == "true":
terminal_output = True
all_issues = []
for _, value in issues.items():
for issue in value:
issue_dict = OrderedDict()
issue_dict["fileName"] = issue.filename
issue_dict["lineNumber"] = issue.line_number
issue_dict["tool"] = issue.tool
issue_dict["type"] = issue.issue_type
issue_dict["severity"] = issue.severity
issue_dict["message"] = issue.message
issue_dict["certReference"] = ""
if issue.cert_reference:
issue_dict["certReference"] = issue.cert_reference
all_issues.append(issue_dict)
report_json = {"issues": all_issues}
line = json.dumps(report_json)
if file_output:
if not self.write_output(package, level, line):
return None, False
if terminal_output:
print(line)
return None, True
def write_output(self, package: Package, level: str, line: str) -> bool:
"""Write JSON output to a file."""
if not self.plugin_context:
return False
output_dir = os.path.join(
self.plugin_context.args.output_directory, package.name + "-" + level
)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if not os.path.isdir(output_dir):
logging.error("Unable to create output directory at %s!", output_dir)
return False
output_file = os.path.join(output_dir, package.name + "-" + level + ".json")
logging.info("Writing output to %s", output_file)
with open(output_file, "w") as out:
out.write(line)
return True
| 37.215054
| 84
| 0.605894
|
794bbd750be049a9bf66d2174d199b24017ef01f
| 1,580
|
py
|
Python
|
new_representation.py
|
dsoumis/NeuralNetworks_Intro
|
9f11600faf867dc6b137848e66f391a9564e137d
|
[
"MIT"
] | null | null | null |
new_representation.py
|
dsoumis/NeuralNetworks_Intro
|
9f11600faf867dc6b137848e66f391a9564e137d
|
[
"MIT"
] | null | null | null |
new_representation.py
|
dsoumis/NeuralNetworks_Intro
|
9f11600faf867dc6b137848e66f391a9564e137d
|
[
"MIT"
] | null | null | null |
import sys
import keras
import numpy
import pandas
from keras import layers
from keras.models import load_model
def assign_values_from_arguments():
inp = ""
mod = ""
if len(sys.argv) != 5:
print("Please re-run with correct arguments.")
sys.exit()
for i in range(len(sys.argv)):
if sys.argv[i] == "-i":
inp = sys.argv[i+1]
elif sys.argv[i] == "-m":
mod = sys.argv[i + 1]
return inp, mod
if __name__ == '__main__':
"""
The main function called when predict.py is run from the command line
>Execute: python3.7 new_representation.py -i path_to:nn_representations.csv -m path_to:WindDenseNN.h5
"""
inputFile, inputModel = assign_values_from_arguments()
weights = load_model(inputModel).layers[0].get_weights()
# If we uncomment the following line, we notice that relu is used as activation to this layer
# print(load_model(inputModel).layers[0].output)
model = keras.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(128,)))
model.set_weights(weights)
data = pandas.read_csv(inputFile, header=None)
timestamps = data.loc[:, 0]
for index, each in enumerate(timestamps):
timestamps[index] = each.replace(" ", ",")
data = data.drop(data.columns[0], axis=1)
# model.summary()
result = numpy.c_[timestamps.to_numpy(), model.predict(data, batch_size=32)]
beginning_of_file = 'vectors'
numpy.savetxt("new_representation.csv", result, delimiter="\t", fmt='%s', header=beginning_of_file, comments='')
| 30.980392
| 116
| 0.662658
|
794bbd99359b99748bf7d022cc0145eb5cb1d88b
| 1,087
|
py
|
Python
|
websites/__init__.py
|
briangmaddox/NewsArticleExtractor
|
2225392cfb5500477a3dfd2602100c8d41d83432
|
[
"MIT"
] | 1
|
2021-07-29T16:04:40.000Z
|
2021-07-29T16:04:40.000Z
|
websites/__init__.py
|
briangmaddox/NewsArticleExtractor
|
2225392cfb5500477a3dfd2602100c8d41d83432
|
[
"MIT"
] | null | null | null |
websites/__init__.py
|
briangmaddox/NewsArticleExtractor
|
2225392cfb5500477a3dfd2602100c8d41d83432
|
[
"MIT"
] | null | null | null |
from importlib import import_module
from .ExtractedArticle import ExtractedArticle
from .WebsiteBase import WebsiteBase
import multiprocessing
import configparser
# **********************************************************************************************************************
def WebsiteFactory(inURL: tuple, inQueue: multiprocessing.Queue, inConfigObject: configparser.ConfigParser) -> WebsiteBase:
"""
Factory class method to load and return a module that handles the passed-in string
"""
if not inURL or not inQueue or not inConfigObject:
return None
# Instance object to return
instance = None
try:
# Pull the URL and class name from the tuple
url, classname = inURL
# Load the module
classmodule = import_module("." + classname, package="websites")
tempclass = getattr(classmodule, classname)
instance = tempclass(inQueue, inConfigObject, url)
return instance
except Exception as e:
raise ImportError("The URL {} cannot be handled at this time.".format(inURL))
| 31.970588
| 123
| 0.628335
|
794bbf0b07fe0f89418ac2847dcffe40886e4c09
| 3,693
|
py
|
Python
|
utils/generate_test_splits.py
|
bertop89/fnc-1-baseline
|
c15343b1563a137a293e3b37c57088d09e370365
|
[
"Apache-2.0"
] | 1
|
2021-03-06T05:18:27.000Z
|
2021-03-06T05:18:27.000Z
|
utils/generate_test_splits.py
|
bertop89/fnc-1-baseline
|
c15343b1563a137a293e3b37c57088d09e370365
|
[
"Apache-2.0"
] | null | null | null |
utils/generate_test_splits.py
|
bertop89/fnc-1-baseline
|
c15343b1563a137a293e3b37c57088d09e370365
|
[
"Apache-2.0"
] | null | null | null |
import random
import os
from collections import defaultdict
import numpy as np
from random import randint
def generate_hold_out_split (dataset, training = 0.8, base_dir="splits"):
r = random.Random()
r.seed(1489215)
article_ids = list(dataset.articles.keys()) # get a list of article ids
r.shuffle(article_ids) # and shuffle that list
amount = len(article_ids)
training_ids = article_ids[:int(training * amount)]
hold_out_ids = article_ids[int(training * amount):]
# write the split body ids out to files for future use
with open(base_dir+ "/"+ "training_ids.txt", "w+") as f:
f.write("\n".join([str(id) for id in training_ids]))
with open(base_dir+ "/"+ "hold_out_ids.txt", "w+") as f:
f.write("\n".join([str(id) for id in hold_out_ids]))
def read_ids(file,base):
ids = []
with open(base+"/"+file,"r") as f:
for line in f:
ids.append(int(line))
return ids
def kfold_split(dataset, training = 0.8, n_folds = 10, base_dir="splits"):
if not (os.path.exists(base_dir+ "/"+ "training_ids.txt")
and os.path.exists(base_dir+ "/"+ "hold_out_ids.txt")):
generate_hold_out_split(dataset,training,base_dir)
training_ids = read_ids("training_ids.txt", base_dir)
hold_out_ids = read_ids("hold_out_ids.txt", base_dir)
folds = []
for k in range(n_folds):
folds.append(training_ids[int(k*len(training_ids)/n_folds):int((k+1)*len(training_ids)/n_folds)])
return folds,hold_out_ids
def get_stances_for_folds(dataset,folds,hold_out):
stances_folds = defaultdict(list)
stances_hold_out = []
for stance in dataset.stances:
if stance['Body ID'] in hold_out:
stances_hold_out.append(stance)
else:
fold_id = 0
for fold in folds:
if stance['Body ID'] in fold:
stances_folds[fold_id].append(stance)
fold_id += 1
return stances_folds,stances_hold_out
def load_train_nn(Xnn,ynn,train_i):
if not os.path.isfile('features/nn/headlines.npy'):
train_headlines = np.vstack([np.expand_dims(x, 0) for x in Xnn[:train_i,0]])
train_bodies = np.vstack([np.expand_dims(x, 0) for x in Xnn[:train_i,1]])
train_labels = np.vstack([np.expand_dims(x, 0) for x in ynn[:train_i]])
agree_idx = []
while len(agree_idx) < 3321:
idx = randint(0,train_headlines.shape[0]-1)
if (train_labels[idx] == [1,0,0]).all():
agree_idx.append(idx)
disagree_idx = []
while len(disagree_idx) < 5181:
idx = randint(0,train_headlines.shape[0]-1)
if (train_labels[idx] == [0,1,0]).all():
disagree_idx.append(idx)
for i in agree_idx:
train_headlines = np.append(train_headlines, train_headlines[i].reshape(1,20), axis=0)
train_bodies = np.append(train_bodies, train_bodies[i].reshape(1,200), axis=0)
train_labels = np.append(train_labels, train_labels[i].reshape(1,3), axis=0)
for i in disagree_idx:
train_headlines = np.append(train_headlines, train_headlines[i].reshape(1,20), axis=0)
train_bodies = np.append(train_bodies, train_bodies[i].reshape(1,200), axis=0)
train_labels = np.append(train_labels, train_labels[i].reshape(1,3), axis=0)
np.save('features/nn/headlines.npy', train_headlines)
np.save('features/nn/bodies.npy', train_bodies)
np.save('features/nn/labels.npy', train_labels)
return np.load('features/nn/headlines.npy'), np.load('features/nn/bodies.npy'), np.load('features/nn/labels.npy')
| 37.30303
| 117
| 0.636068
|
794bbf8c1d30033d6e60c0b339503ab5ad0c3622
| 2,241
|
py
|
Python
|
listings/choises.py
|
MalYou/btre
|
0959dabe4f6790c7f6274662e3886cb149b85941
|
[
"MIT"
] | null | null | null |
listings/choises.py
|
MalYou/btre
|
0959dabe4f6790c7f6274662e3886cb149b85941
|
[
"MIT"
] | null | null | null |
listings/choises.py
|
MalYou/btre
|
0959dabe4f6790c7f6274662e3886cb149b85941
|
[
"MIT"
] | null | null | null |
bedroom_choices = {
'1':1,
'2':2,
'3':3,
'4':4,
'5':5,
'6':6,
'7':7,
'8':8,
'9':9,
'10':10
}
price_choices = {
'100000':'$100,000',
'200000':'$200,000',
'300000':'$300,000',
'400000':'$400,000',
'500000':'$500,000',
'600000':'$600,000',
'700000':'$700,000',
'800000':'$800,000',
'900000':'$900,000',
'1000000':'$1M+',
}
state_choices = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
| 26.364706
| 45
| 0.354306
|
794bc2060be7f738b456c5638e741ea22e6fc7d5
| 9,376
|
py
|
Python
|
test/box-py/iproto.test.py
|
ocelot-inc/tarantool
|
47fdd0edf29d79b8a0d33a8d0716146e31bac644
|
[
"BSD-2-Clause"
] | 1
|
2018-10-01T04:54:39.000Z
|
2018-10-01T04:54:39.000Z
|
test/box-py/iproto.test.py
|
ocelot-inc/tarantool
|
47fdd0edf29d79b8a0d33a8d0716146e31bac644
|
[
"BSD-2-Clause"
] | null | null | null |
test/box-py/iproto.test.py
|
ocelot-inc/tarantool
|
47fdd0edf29d79b8a0d33a8d0716146e31bac644
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import sys
import struct
import socket
import msgpack
from tarantool.const import *
from tarantool import Connection
from tarantool.request import Request, RequestInsert, RequestSelect
from tarantool.response import Response
from lib.tarantool_connection import TarantoolConnection
admin("box.schema.user.grant('guest', 'read,write,execute', 'universe')")
print """
#
# iproto packages test
#
"""
# opeing new connection to tarantool/box
conn = TarantoolConnection(server.iproto.host, server.iproto.port)
conn.connect()
s = conn.socket
print """
# Test bug #899343 (server assertion failure on incorrect packet)
"""
print "# send the package with invalid length"
invalid_request = struct.pack('<LLL', 1, 4294967290, 1)
print s.send(invalid_request)
print "# check that is server alive"
print iproto.py_con.ping() > 0
# closing connection
s.close()
key_names = {}
for (k,v) in globals().items():
if type(k) == str and k.startswith('IPROTO_') and type(v) == int:
key_names[v] = k
def repr_dict(todump):
d = {}
for (k, v) in todump.items():
k_name = key_names.get(k, k)
d[k_name] = v
return repr(d)
def test(header, body):
# Connect and authenticate
c = Connection('localhost', server.iproto.port)
c.connect()
print 'query', repr_dict(header), repr_dict(body)
header = msgpack.dumps(header)
body = msgpack.dumps(body)
query = msgpack.dumps(len(header) + len(body)) + header + body
# Send raw request using connectred socket
s = c._socket
try:
s.send(query)
except OSError as e:
print ' => ', 'Failed to send request'
c.close()
print iproto.py_con.ping() > 0
print """
# Test gh-206 "Segfault if sending IPROTO package without `KEY` field"
"""
print "IPROTO_SELECT"
test({ IPROTO_CODE : REQUEST_TYPE_SELECT }, { IPROTO_SPACE_ID: 280 })
print "\n"
print "IPROTO_DELETE"
test({ IPROTO_CODE : REQUEST_TYPE_DELETE }, { IPROTO_SPACE_ID: 280 })
print "\n"
print "IPROTO_UPDATE"
test({ IPROTO_CODE : REQUEST_TYPE_UPDATE }, { IPROTO_SPACE_ID: 280 })
test({ IPROTO_CODE : REQUEST_TYPE_UPDATE },
{ IPROTO_SPACE_ID: 280, IPROTO_KEY: (1, )})
print "\n"
print "IPROTO_REPLACE"
test({ IPROTO_CODE : REQUEST_TYPE_REPLACE }, { IPROTO_SPACE_ID: 280 })
print "\n"
print "IPROTO_CALL"
test({ IPROTO_CODE : REQUEST_TYPE_CALL }, {})
test({ IPROTO_CODE : REQUEST_TYPE_CALL }, { IPROTO_KEY: ('procname', )})
print "\n"
# gh-434 Tarantool crashes on multiple iproto requests with WAL enabled
admin("box.cfg.wal_mode")
admin("space = box.schema.space.create('test', { id = 567 })")
admin("index = space:create_index('primary', { type = 'hash' })")
admin("box.schema.user.grant('guest', 'read,write,execute', 'space', 'test')")
c = Connection('localhost', server.iproto.port)
c.connect()
request1 = RequestInsert(c, 567, [1, "baobab"])
request2 = RequestInsert(c, 567, [2, "obbaba"])
s = c._socket
try:
s.send(bytes(request1) + bytes(request2))
except OSError as e:
print ' => ', 'Failed to send request'
response1 = Response(c, c._read_response())
response2 = Response(c, c._read_response())
print response1.__str__()
print response2.__str__()
request1 = RequestInsert(c, 567, [3, "occama"])
request2 = RequestSelect(c, 567, 0, [1], 0, 1, 0)
s = c._socket
try:
s.send(bytes(request1) + bytes(request2))
except OSError as e:
print ' => ', 'Failed to send request'
response1 = Response(c, c._read_response())
response2 = Response(c, c._read_response())
print response1.__str__()
print response2.__str__()
request1 = RequestSelect(c, 567, 0, [2], 0, 1, 0)
request2 = RequestInsert(c, 567, [4, "ockham"])
s = c._socket
try:
s.send(bytes(request1) + bytes(request2))
except OSError as e:
print ' => ', 'Failed to send request'
response1 = Response(c, c._read_response())
response2 = Response(c, c._read_response())
print response1.__str__()
print response2.__str__()
request1 = RequestSelect(c, 567, 0, [1], 0, 1, 0)
request2 = RequestSelect(c, 567, 0, [2], 0, 1, 0)
s = c._socket
try:
s.send(bytes(request1) + bytes(request2))
except OSError as e:
print ' => ', 'Failed to send request'
response1 = Response(c, c._read_response())
response2 = Response(c, c._read_response())
print response1.__str__()
print response2.__str__()
c.close()
admin("space:drop()")
#
# gh-522: Broken compatibility with msgpack-python for strings of size 33..255
#
admin("space = box.schema.space.create('test')")
admin("index = space:create_index('primary', { type = 'hash', parts = {1, 'str'}})")
class RawInsert(Request):
request_type = REQUEST_TYPE_INSERT
def __init__(self, conn, space_no, blob):
super(RawInsert, self).__init__(conn)
request_body = "\x82" + msgpack.dumps(IPROTO_SPACE_ID) + \
msgpack.dumps(space_id) + msgpack.dumps(IPROTO_TUPLE) + blob
self._bytes = self.header(len(request_body)) + request_body
class RawSelect(Request):
request_type = REQUEST_TYPE_SELECT
def __init__(self, conn, space_no, blob):
super(RawSelect, self).__init__(conn)
request_body = "\x83" + msgpack.dumps(IPROTO_SPACE_ID) + \
msgpack.dumps(space_id) + msgpack.dumps(IPROTO_KEY) + blob + \
msgpack.dumps(IPROTO_LIMIT) + msgpack.dumps(100);
self._bytes = self.header(len(request_body)) + request_body
c = iproto.py_con
space = c.space('test')
space_id = space.space_no
TESTS = [
(1, "\xa1", "\xd9\x01", "\xda\x00\x01", "\xdb\x00\x00\x00\x01"),
(31, "\xbf", "\xd9\x1f", "\xda\x00\x1f", "\xdb\x00\x00\x00\x1f"),
(32, "\xd9\x20", "\xda\x00\x20", "\xdb\x00\x00\x00\x20"),
(255, "\xd9\xff", "\xda\x00\xff", "\xdb\x00\x00\x00\xff"),
(256, "\xda\x01\x00", "\xdb\x00\x00\x01\x00"),
(65535, "\xda\xff\xff", "\xdb\x00\x00\xff\xff"),
(65536, "\xdb\x00\x01\x00\x00"),
]
for test in TESTS:
it = iter(test)
size = next(it)
print 'STR', size
print '--'
for fmt in it:
print '0x' + fmt.encode('hex'), '=>',
field = '*' * size
c._send_request(RawInsert(c, space_id, "\x91" + fmt + field))
tuple = space.select(field)[0]
print len(tuple[0])== size and 'ok' or 'fail',
it2 = iter(test)
next(it2)
for fmt2 in it2:
tuple = c._send_request(RawSelect(c, space_id,
"\x91" + fmt2 + field))[0]
print len(tuple[0]) == size and 'ok' or 'fail',
tuple = space.delete(field)[0]
print len(tuple[0]) == size and 'ok' or 'fail',
print
print
print 'Test of schema_id in iproto.'
c = Connection('localhost', server.iproto.port)
c.connect()
s = c._socket
def test_request(req_header, req_body):
query_header = msgpack.dumps(req_header)
query_body = msgpack.dumps(req_body)
packet_len = len(query_header) + len(query_body)
query = msgpack.dumps(packet_len) + query_header + query_body
try:
s.send(query)
except OSError as e:
print ' => ', 'Failed to send request'
resp_len = ''
resp_headerbody = ''
resp_header = {}
resp_body = {}
try:
resp_len = s.recv(5)
resp_len = msgpack.loads(resp_len)
resp_headerbody = s.recv(resp_len)
unpacker = msgpack.Unpacker(use_list = True)
unpacker.feed(resp_headerbody)
resp_header = unpacker.unpack()
resp_body = unpacker.unpack()
except OSError as e:
print ' => ', 'Failed to recv response'
res = {}
res['header'] = resp_header
res['body'] = resp_body
return res
header = { IPROTO_CODE : REQUEST_TYPE_SELECT}
body = { IPROTO_SPACE_ID: space_id,
IPROTO_INDEX_ID: 0,
IPROTO_KEY: [],
IPROTO_ITERATOR: 2,
IPROTO_OFFSET: 0,
IPROTO_LIMIT: 1 }
resp = test_request(header, body)
print 'Normal connect done w/o errors:', resp['header'][0] == 0
print 'Got schema_id:', resp['header'][5] > 0
schema_id = resp['header'][5]
header = { IPROTO_CODE : REQUEST_TYPE_SELECT, 5 : 0 }
resp = test_request(header, body)
print 'Zero-schema_id connect done w/o errors:', resp['header'][0] == 0
print 'Same schema_id:', resp['header'][5] == schema_id
header = { IPROTO_CODE : REQUEST_TYPE_SELECT, 5 : schema_id }
resp = test_request(header, body)
print 'Normal connect done w/o errors:', resp['header'][0] == 0
print 'Same schema_id:', resp['header'][5] == schema_id
header = { IPROTO_CODE : REQUEST_TYPE_SELECT, 5 : schema_id + 1 }
resp = test_request(header, body)
print 'Wrong schema_id leads to error:', resp['header'][0] != 0
print 'Same schema_id:', resp['header'][5] == schema_id
admin("space2 = box.schema.create_space('test2')")
header = { IPROTO_CODE : REQUEST_TYPE_SELECT, 5 : schema_id }
resp = test_request(header, body)
print 'Schema changed -> error:', resp['header'][0] != 0
print 'Got another schema_id:', resp['header'][5] != schema_id
c.close()
admin("space:drop()")
admin("space2:drop()")
#
# gh-1280 Segmentation fault on space.select(tuple()) or space.select([2])
#
admin("space = box.schema.create_space('gh1280', { engine = 'sophia' })")
admin("index = space:create_index('primary')")
admin("space:insert({1})")
admin("space:insert({2, 'Music'})")
admin("space:insert({3, 'Length', 93})")
iproto.py_con.space('gh1280').select([])
iproto.py_con.space('gh1280').select(list())
admin("space:drop()")
admin("box.schema.user.revoke('guest', 'read,write,execute', 'universe')")
| 31.149502
| 84
| 0.658276
|
794bc249aaee81b00150cbcab3011e6c2a0f63f5
| 200
|
py
|
Python
|
routing_only/no1_base_app.py
|
thinkAmi-sandbox/wsgi_framework-sample
|
ce9c3ef19aa179e429bb6c4facc27387fd12b1f7
|
[
"Unlicense"
] | 1
|
2018-05-14T15:35:39.000Z
|
2018-05-14T15:35:39.000Z
|
routing_only/no1_base_app.py
|
thinkAmi-sandbox/wsgi_framework-sample
|
ce9c3ef19aa179e429bb6c4facc27387fd12b1f7
|
[
"Unlicense"
] | null | null | null |
routing_only/no1_base_app.py
|
thinkAmi-sandbox/wsgi_framework-sample
|
ce9c3ef19aa179e429bb6c4facc27387fd12b1f7
|
[
"Unlicense"
] | 1
|
2018-12-07T13:07:47.000Z
|
2018-12-07T13:07:47.000Z
|
# python server.py no1_base_app:application
# URLに関係なく、同じ値を返す
def application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return [b"Hello, world."]
| 40
| 63
| 0.71
|
794bc2dd54f1564d83f3b87cbf9d8830e471eea6
| 4,016
|
py
|
Python
|
tensorflow/lite/testing/op_tests/batch_to_space_nd.py
|
chuanqi129/tensorflow
|
84eb083bb5328912dde064b8b0f61d28c6edbe43
|
[
"Apache-2.0"
] | 57
|
2017-09-03T07:08:31.000Z
|
2022-02-28T04:33:42.000Z
|
tensorflow/lite/testing/op_tests/batch_to_space_nd.py
|
chuanqi129/tensorflow
|
84eb083bb5328912dde064b8b0f61d28c6edbe43
|
[
"Apache-2.0"
] | 8
|
2019-03-13T23:13:47.000Z
|
2020-01-31T21:08:21.000Z
|
tensorflow/lite/testing/op_tests/batch_to_space_nd.py
|
chuanqi129/tensorflow
|
84eb083bb5328912dde064b8b0f61d28c6edbe43
|
[
"Apache-2.0"
] | 28
|
2017-03-25T13:48:09.000Z
|
2021-10-14T00:10:50.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for batch_to_space_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_batch_to_space_nd_tests(options):
"""Make a set of tests to do batch_to_space_nd."""
test_parameters = [
{
"dtype": [tf.float32, tf.int64, tf.int32],
"input_shape": [[12, 3, 3, 1]],
"block_shape": [[1, 4], [2, 2], [3, 4]],
"crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
},
# Single batch (no-op)
{
"dtype": [tf.float32],
"input_shape": [[1, 3, 3, 1]],
"block_shape": [[1, 1]],
"crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True],
"constant_crops": [True],
},
# Non-4D use case: 1 batch dimension, 3 spatial dimensions, 2 others.
{
"dtype": [tf.float32],
"input_shape": [[8, 2, 2, 2, 1, 1]],
"block_shape": [[2, 2, 2]],
"crops": [[[0, 0], [0, 0], [0, 0]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
},
# 3D use case.
{
"dtype": [tf.float32],
"input_shape": [[1, 3, 3]],
"block_shape": [[1]],
"crops": [[[0, 0]], [[1, 1]]],
"constant_block_shape": [True],
"constant_crops": [True],
},
]
def build_graph(parameters):
"""Build a batch_to_space graph given `parameters`."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
input_tensors = [input_tensor]
# Get block_shape either as a const or as a placeholder (tensor).
if parameters["constant_block_shape"]:
block_shape = parameters["block_shape"]
else:
shape = [len(parameters["block_shape"])]
block_shape = tf.compat.v1.placeholder(
dtype=tf.int32, name="shape", shape=shape)
input_tensors.append(block_shape)
# Get crops either as a const or as a placeholder (tensor).
if parameters["constant_crops"]:
crops = parameters["crops"]
else:
shape = [len(parameters["crops"]), 2]
crops = tf.compat.v1.placeholder(
dtype=tf.int32, name="crops", shape=shape)
input_tensors.append(crops)
out = tf.batch_to_space_nd(input_tensor, block_shape, crops)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_block_shape"]:
values.append(np.array(parameters["block_shape"]))
if not parameters["constant_crops"]:
values.append(np.array(parameters["crops"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| 36.844037
| 80
| 0.620518
|
794bc55c7f842d06068dac7343db0e49bf8b26fa
| 61,705
|
py
|
Python
|
sdk/storage/azure-storage-blob/azure/storage/blob/aio/_container_client_async.py
|
omziv/azure-sdk-for-python
|
3fbb0f9e1f86acc4e0a8cb13b790dccf32e3860f
|
[
"MIT"
] | 1
|
2021-09-16T02:33:52.000Z
|
2021-09-16T02:33:52.000Z
|
sdk/storage/azure-storage-blob/azure/storage/blob/aio/_container_client_async.py
|
omziv/azure-sdk-for-python
|
3fbb0f9e1f86acc4e0a8cb13b790dccf32e3860f
|
[
"MIT"
] | 2
|
2021-08-24T15:32:30.000Z
|
2021-08-24T23:21:34.000Z
|
sdk/storage/azure-storage-blob/azure/storage/blob/aio/_container_client_async.py
|
omziv/azure-sdk-for-python
|
3fbb0f9e1f86acc4e0a8cb13b790dccf32e3860f
|
[
"MIT"
] | 1
|
2016-04-19T22:15:47.000Z
|
2016-04-19T22:15:47.000Z
|
# pylint: disable=too-many-lines
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=invalid-overridden-method
import functools
from typing import ( # pylint: disable=unused-import
Union, Optional, Any, Iterable, AnyStr, Dict, List, IO, AsyncIterator,
TYPE_CHECKING
)
from azure.core.exceptions import HttpResponseError, ResourceNotFoundError
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.async_paging import AsyncItemPaged
from azure.core.pipeline import AsyncPipeline
from azure.core.pipeline.transport import AsyncHttpResponse
from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper
from .._shared.policies_async import ExponentialRetry
from .._shared.request_handlers import add_metadata_headers, serialize_iso
from .._shared.response_handlers import (
process_storage_error,
return_response_headers,
return_headers_and_deserialized)
from .._generated.aio import AzureBlobStorage
from .._generated.models import SignedIdentifier
from .._deserialize import deserialize_container_properties
from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions
from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name
from .._models import ContainerProperties, BlobType, BlobProperties # pylint: disable=unused-import
from ._list_blobs_helper import BlobPropertiesPaged, BlobPrefix
from ._lease_async import BlobLeaseClient
from ._blob_client_async import BlobClient
if TYPE_CHECKING:
from .._models import PublicAccess
from ._download_async import StorageStreamDownloader
from datetime import datetime
from .._models import ( # pylint: disable=unused-import
AccessPolicy,
StandardBlobTier,
PremiumPageBlobTier)
class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase):
"""A client to interact with a specific container, although that container
may not yet exist.
For operations relating to a specific blob within this container, a blob client can be
retrieved using the :func:`~get_blob_client` function.
:param str account_url:
The URI to the storage account. In order to create a client given the full URI to the container,
use the :func:`from_container_url` classmethod.
:param container_name:
The name of the container for the blob.
:type container_name: str
:param credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string,
an instance of a AzureSasCredential from azure.core.credentials, an account
shared access key, or an instance of a TokenCredentials class from azure.identity.
If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
- except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
:keyword str api_version:
The Storage API version to use for requests. Default value is '2019-07-07'.
Setting to an older version may result in reduced feature compatibility.
.. versionadded:: 12.2.0
:keyword str secondary_hostname:
The hostname of the secondary endpoint.
:keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
Defaults to 4*1024*1024, or 4MB.
:keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
:keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
:keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
:keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
:keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
:keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
or 4MB.
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_containers_async.py
:start-after: [START create_container_client_from_service]
:end-before: [END create_container_client_from_service]
:language: python
:dedent: 8
:caption: Get a ContainerClient from an existing BlobServiceClient.
.. literalinclude:: ../samples/blob_samples_containers_async.py
:start-after: [START create_container_client_sasurl]
:end-before: [END create_container_client_sasurl]
:language: python
:dedent: 12
:caption: Creating the container client directly.
"""
def __init__(
self, account_url, # type: str
container_name, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
):
# type: (...) -> None
kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
super(ContainerClient, self).__init__(
account_url,
container_name=container_name,
credential=credential,
**kwargs)
self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline)
default_api_version = self._client._config.version # pylint: disable=protected-access
self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access
@distributed_trace_async
async def create_container(self, metadata=None, public_access=None, **kwargs):
# type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None
"""
Creates a new container under the specified account. If the container
with the same name already exists, the operation fails.
:param metadata:
A dict with name_value pairs to associate with the
container as metadata. Example:{'Category':'test'}
:type metadata: dict[str, str]
:param ~azure.storage.blob.PublicAccess public_access:
Possible values include: 'container', 'blob'.
:keyword container_encryption_scope:
Specifies the default encryption scope to set on the container and use for
all future writes.
.. versionadded:: 12.2.0
:paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_containers_async.py
:start-after: [START create_container]
:end-before: [END create_container]
:language: python
:dedent: 16
:caption: Creating a container to store blobs.
"""
headers = kwargs.pop('headers', {})
headers.update(add_metadata_headers(metadata)) # type: ignore
timeout = kwargs.pop('timeout', None)
container_cpk_scope_info = get_container_cpk_scope_info(kwargs)
try:
return await self._client.container.create( # type: ignore
timeout=timeout,
access=public_access,
container_cpk_scope_info=container_cpk_scope_info,
cls=return_response_headers,
headers=headers,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def _rename_container(self, new_name, **kwargs):
# type: (str, **Any) -> ContainerClient
"""Renames a container.
Operation is successful only if the source container exists.
:param str new_name:
The new container name the user wants to rename to.
:keyword lease:
Specify this to perform only if the lease ID given
matches the active lease ID of the source container.
:paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: ~azure.storage.blob.ContainerClient
"""
lease = kwargs.pop('lease', None)
try:
kwargs['source_lease_id'] = lease.id # type: str
except AttributeError:
kwargs['source_lease_id'] = lease
try:
renamed_container = ContainerClient(
"{}://{}".format(self.scheme, self.primary_hostname), container_name=new_name,
credential=self.credential, api_version=self.api_version, _configuration=self._config,
_pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
key_resolver_function=self.key_resolver_function)
await renamed_container._client.container.rename(self.container_name, **kwargs) # pylint: disable = protected-access
return renamed_container
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def delete_container(
self, **kwargs):
# type: (Any) -> None
"""
Marks the specified container for deletion. The container and any blobs
contained within it are later deleted during garbage collection.
:keyword lease:
If specified, delete_container only succeeds if the
container's lease is active and matches this ID.
Required if the container has an active lease.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_containers_async.py
:start-after: [START delete_container]
:end-before: [END delete_container]
:language: python
:dedent: 16
:caption: Delete a container.
"""
lease = kwargs.pop('lease', None)
access_conditions = get_access_conditions(lease)
mod_conditions = get_modify_conditions(kwargs)
timeout = kwargs.pop('timeout', None)
try:
await self._client.container.delete(
timeout=timeout,
lease_access_conditions=access_conditions,
modified_access_conditions=mod_conditions,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def acquire_lease(
self, lease_duration=-1, # type: int
lease_id=None, # type: Optional[str]
**kwargs):
# type: (...) -> BlobLeaseClient
"""
Requests a new lease. If the container does not have an active lease,
the Blob service creates a lease on the container and returns a new
lease ID.
:param int lease_duration:
Specifies the duration of the lease, in seconds, or negative one
(-1) for a lease that never expires. A non-infinite lease can be
between 15 and 60 seconds. A lease duration cannot be changed
using renew or change. Default is -1 (infinite lease).
:param str lease_id:
Proposed lease ID, in a GUID string format. The Blob service returns
400 (Invalid request) if the proposed lease ID is not in the correct format.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: A BlobLeaseClient object, that can be run in a context manager.
:rtype: ~azure.storage.blob.aio.BlobLeaseClient
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_containers_async.py
:start-after: [START acquire_lease_on_container]
:end-before: [END acquire_lease_on_container]
:language: python
:dedent: 12
:caption: Acquiring a lease on the container.
"""
lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore
kwargs.setdefault('merge_span', True)
timeout = kwargs.pop('timeout', None)
await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs)
return lease
@distributed_trace_async
async def get_account_information(self, **kwargs):
# type: (**Any) -> Dict[str, str]
"""Gets information related to the storage account.
The information can also be retrieved if the user has a SAS to a container or blob.
The keys in the returned dictionary include 'sku_name' and 'account_kind'.
:returns: A dict of account information (SKU and account type).
:rtype: dict(str, str)
"""
try:
return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def get_container_properties(self, **kwargs):
# type: (**Any) -> ContainerProperties
"""Returns all user-defined metadata and system properties for the specified
container. The data returned does not include the container's list of blobs.
:keyword lease:
If specified, get_container_properties only succeeds if the
container's lease is active and matches this ID.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: Properties for the specified container within a container object.
:rtype: ~azure.storage.blob.ContainerProperties
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_containers_async.py
:start-after: [START get_container_properties]
:end-before: [END get_container_properties]
:language: python
:dedent: 16
:caption: Getting properties on the container.
"""
lease = kwargs.pop('lease', None)
access_conditions = get_access_conditions(lease)
timeout = kwargs.pop('timeout', None)
try:
response = await self._client.container.get_properties(
timeout=timeout,
lease_access_conditions=access_conditions,
cls=deserialize_container_properties,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
response.name = self.container_name
return response # type: ignore
@distributed_trace_async
async def exists(self, **kwargs):
# type: (**Any) -> bool
"""
Returns True if a container exists and returns False otherwise.
:kwarg int timeout:
The timeout parameter is expressed in seconds.
:returns: boolean
"""
try:
await self._client.container.get_properties(**kwargs)
return True
except HttpResponseError as error:
try:
process_storage_error(error)
except ResourceNotFoundError:
return False
@distributed_trace_async
async def set_container_metadata( # type: ignore
self, metadata=None, # type: Optional[Dict[str, str]]
**kwargs
):
# type: (...) -> Dict[str, Union[str, datetime]]
"""Sets one or more user-defined name-value pairs for the specified
container. Each call to this operation replaces all existing metadata
attached to the container. To remove all metadata from the container,
call this operation with no metadata dict.
:param metadata:
A dict containing name-value pairs to associate with the container as
metadata. Example: {'category':'test'}
:type metadata: dict[str, str]
:keyword lease:
If specified, set_container_metadata only succeeds if the
container's lease is active and matches this ID.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Container-updated property dict (Etag and last modified).
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_containers_async.py
:start-after: [START set_container_metadata]
:end-before: [END set_container_metadata]
:language: python
:dedent: 16
:caption: Setting metadata on the container.
"""
headers = kwargs.pop('headers', {})
headers.update(add_metadata_headers(metadata))
lease = kwargs.pop('lease', None)
access_conditions = get_access_conditions(lease)
mod_conditions = get_modify_conditions(kwargs)
timeout = kwargs.pop('timeout', None)
try:
return await self._client.container.set_metadata( # type: ignore
timeout=timeout,
lease_access_conditions=access_conditions,
modified_access_conditions=mod_conditions,
cls=return_response_headers,
headers=headers,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace
def _get_blob_service_client(self): # pylint: disable=client-method-missing-kwargs
# type: (...) -> BlobServiceClient
"""Get a client to interact with the container's parent service account.
Defaults to current container's credentials.
:returns: A BlobServiceClient.
:rtype: ~azure.storage.blob.BlobServiceClient
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_service_async.py
:start-after: [START get_blob_service_client_from_container_client]
:end-before: [END get_blob_service_client_from_container_client]
:language: python
:dedent: 8
:caption: Get blob service client from container object.
"""
from ._blob_service_client_async import BlobServiceClient
if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access
_pipeline = AsyncPipeline(
transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
policies=self._pipeline._impl_policies # pylint: disable = protected-access
)
else:
_pipeline = self._pipeline # pylint: disable = protected-access
return BlobServiceClient(
"{}://{}".format(self.scheme, self.primary_hostname),
credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
_location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function,
_pipeline=_pipeline)
@distributed_trace_async
async def get_container_access_policy(self, **kwargs):
# type: (Any) -> Dict[str, Any]
"""Gets the permissions for the specified container.
The permissions indicate whether container data may be accessed publicly.
:keyword lease:
If specified, get_container_access_policy only succeeds if the
container's lease is active and matches this ID.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Access policy information in a dict.
:rtype: dict[str, Any]
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_containers_async.py
:start-after: [START get_container_access_policy]
:end-before: [END get_container_access_policy]
:language: python
:dedent: 16
:caption: Getting the access policy on the container.
"""
lease = kwargs.pop('lease', None)
access_conditions = get_access_conditions(lease)
timeout = kwargs.pop('timeout', None)
try:
response, identifiers = await self._client.container.get_access_policy(
timeout=timeout,
lease_access_conditions=access_conditions,
cls=return_headers_and_deserialized,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
return {
'public_access': response.get('blob_public_access'),
'signed_identifiers': identifiers or []
}
@distributed_trace_async
async def set_container_access_policy(
self, signed_identifiers, # type: Dict[str, AccessPolicy]
public_access=None, # type: Optional[Union[str, PublicAccess]]
**kwargs # type: Any
): # type: (...) -> Dict[str, Union[str, datetime]]
"""Sets the permissions for the specified container or stored access
policies that may be used with Shared Access Signatures. The permissions
indicate whether blobs in a container may be accessed publicly.
:param signed_identifiers:
A dictionary of access policies to associate with the container. The
dictionary may contain up to 5 elements. An empty dictionary
will clear the access policies set on the service.
:type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy]
:param ~azure.storage.blob.PublicAccess public_access:
Possible values include: 'container', 'blob'.
:keyword lease:
Required if the container has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A datetime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified date/time.
:keyword ~datetime.datetime if_unmodified_since:
A datetime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Container-updated property dict (Etag and last modified).
:rtype: dict[str, str or ~datetime.datetime]
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_containers_async.py
:start-after: [START set_container_access_policy]
:end-before: [END set_container_access_policy]
:language: python
:dedent: 16
:caption: Setting access policy on the container.
"""
timeout = kwargs.pop('timeout', None)
lease = kwargs.pop('lease', None)
if len(signed_identifiers) > 5:
raise ValueError(
'Too many access policies provided. The server does not support setting '
'more than 5 access policies on a single resource.')
identifiers = []
for key, value in signed_identifiers.items():
if value:
value.start = serialize_iso(value.start)
value.expiry = serialize_iso(value.expiry)
identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore
signed_identifiers = identifiers # type: ignore
mod_conditions = get_modify_conditions(kwargs)
access_conditions = get_access_conditions(lease)
try:
return await self._client.container.set_access_policy(
container_acl=signed_identifiers or None,
timeout=timeout,
access=public_access,
lease_access_conditions=access_conditions,
modified_access_conditions=mod_conditions,
cls=return_response_headers,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace
def list_blobs(self, name_starts_with=None, include=None, **kwargs):
# type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> AsyncItemPaged[BlobProperties]
"""Returns a generator to list the blobs under the specified container.
The generator will lazily follow the continuation tokens returned by
the service.
:param str name_starts_with:
Filters the results to return only blobs whose names
begin with the specified prefix.
:param list[str] or str include:
Specifies one or more additional datasets to include in the response.
Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions',
'tags', 'versions'.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: An iterable (auto-paging) response of BlobProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties]
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_containers_async.py
:start-after: [START list_blobs_in_container]
:end-before: [END list_blobs_in_container]
:language: python
:dedent: 12
:caption: List the blobs in the container.
"""
if include and not isinstance(include, list):
include = [include]
results_per_page = kwargs.pop('results_per_page', None)
timeout = kwargs.pop('timeout', None)
command = functools.partial(
self._client.container.list_blob_flat_segment,
include=include,
timeout=timeout,
**kwargs)
return AsyncItemPaged(
command,
prefix=name_starts_with,
results_per_page=results_per_page,
page_iterator_class=BlobPropertiesPaged
)
@distributed_trace
def walk_blobs(
self, name_starts_with=None, # type: Optional[str]
include=None, # type: Optional[Any]
delimiter="/", # type: str
**kwargs # type: Optional[Any]
):
# type: (...) -> AsyncItemPaged[BlobProperties]
"""Returns a generator to list the blobs under the specified container.
The generator will lazily follow the continuation tokens returned by
the service. This operation will list blobs in accordance with a hierarchy,
as delimited by the specified delimiter character.
:param str name_starts_with:
Filters the results to return only blobs whose names
begin with the specified prefix.
:param list[str] include:
Specifies one or more additional datasets to include in the response.
Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'.
:param str delimiter:
When the request includes this parameter, the operation returns a BlobPrefix
element in the response body that acts as a placeholder for all blobs whose
names begin with the same substring up to the appearance of the delimiter
character. The delimiter may be a single character or a string.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: An iterable (auto-paging) response of BlobProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties]
"""
if include and not isinstance(include, list):
include = [include]
results_per_page = kwargs.pop('results_per_page', None)
timeout = kwargs.pop('timeout', None)
command = functools.partial(
self._client.container.list_blob_hierarchy_segment,
delimiter=delimiter,
include=include,
timeout=timeout,
**kwargs)
return BlobPrefix(
command,
prefix=name_starts_with,
results_per_page=results_per_page,
delimiter=delimiter)
@distributed_trace_async
async def upload_blob(
self, name, # type: Union[str, BlobProperties]
data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
blob_type=BlobType.BlockBlob, # type: Union[str, BlobType]
length=None, # type: Optional[int]
metadata=None, # type: Optional[Dict[str, str]]
**kwargs
):
# type: (...) -> BlobClient
"""Creates a new blob from a data source with automatic chunking.
:param name: The blob with which to interact. If specified, this value will override
a blob value specified in the blob URL.
:type name: str or ~azure.storage.blob.BlobProperties
:param data: The blob data to upload.
:param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
:param int length:
Number of bytes to read from the stream. This is optional, but
should be supplied for optimal performance.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
If True, upload_blob will overwrite the existing data. If set to False, the
operation will fail with ResourceExistsError. The exception to the above is with Append
blob types: if set to False and the data already exists, an error will not be raised
and the data will be appended to the existing blob. If set overwrite=True, then the existing
append blob will be deleted, and a new one created. Defaults to False.
:keyword ~azure.storage.blob.ContentSettings content_settings:
ContentSettings object used to set blob properties. Used to set content type, encoding,
language, disposition, md5, and cache control.
:keyword bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https, as https (the default), will
already validate. Note that this MD5 hash is not stored with the
blob. Also note that if enabled, the memory-efficient upload algorithm
will not be used, because computing the MD5 hash requires buffering
entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
:keyword lease:
Required if the container has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
A page blob tier value to set the blob to. The tier correlates to the size of the
blob and number of allowed IOPS. This is only applicable to page blobs on
premium storage accounts.
:keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
A standard blob tier value to set the blob to. For this version of the library,
this is only applicable to block blobs on standard storage accounts.
:keyword int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:keyword int max_concurrency:
Maximum number of parallel connections to use when the blob size exceeds
64MB.
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.2.0
:keyword str encoding:
Defaults to UTF-8.
:returns: A BlobClient to interact with the newly uploaded blob.
:rtype: ~azure.storage.blob.aio.BlobClient
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_containers_async.py
:start-after: [START upload_blob_to_container]
:end-before: [END upload_blob_to_container]
:language: python
:dedent: 12
:caption: Upload blob to the container.
"""
blob = self.get_blob_client(name)
kwargs.setdefault('merge_span', True)
timeout = kwargs.pop('timeout', None)
encoding = kwargs.pop('encoding', 'UTF-8')
await blob.upload_blob(
data,
blob_type=blob_type,
length=length,
metadata=metadata,
timeout=timeout,
encoding=encoding,
**kwargs
)
return blob
@distributed_trace_async
async def delete_blob(
self, blob, # type: Union[str, BlobProperties]
delete_snapshots=None, # type: Optional[str]
**kwargs
):
# type: (...) -> None
"""Marks the specified blob or snapshot for deletion.
The blob is later deleted during garbage collection.
Note that in order to delete a blob, you must delete all of its
snapshots. You can delete both at the same time with the delete_blob
operation.
If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot
and retains the blob or snapshot for specified number of days.
After specified number of days, blob's data is removed from the service during garbage collection.
Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]`
option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()`
:param blob: The blob with which to interact. If specified, this value will override
a blob value specified in the blob URL.
:type blob: str or ~azure.storage.blob.BlobProperties
:param str delete_snapshots:
Required if the blob has associated snapshots. Values include:
- "only": Deletes only the blobs snapshots.
- "include": Deletes the blob along with all snapshots.
:keyword str version_id:
The version id parameter is an opaque DateTime
value that, when present, specifies the version of the blob to delete.
.. versionadded:: 12.4.0
This keyword argument was introduced in API version '2019-12-12'.
:keyword lease:
Required if the blob has an active lease. Value can be a Lease object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
"""
blob = self.get_blob_client(blob) # type: ignore
kwargs.setdefault('merge_span', True)
timeout = kwargs.pop('timeout', None)
await blob.delete_blob( # type: ignore
delete_snapshots=delete_snapshots,
timeout=timeout,
**kwargs)
@distributed_trace_async
async def download_blob(self, blob, offset=None, length=None, **kwargs):
# type: (Union[str, BlobProperties], Optional[int], Optional[int], Any) -> StorageStreamDownloader
"""Downloads a blob to the StorageStreamDownloader. The readall() method must
be used to read all the content or readinto() must be used to download the blob into
a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks.
:param blob: The blob with which to interact. If specified, this value will override
a blob value specified in the blob URL.
:type blob: str or ~azure.storage.blob.BlobProperties
:param int offset:
Start of byte range to use for downloading a section of the blob.
Must be set if length is provided.
:param int length:
Number of bytes to read from the stream. This is optional, but
should be supplied for optimal performance.
:keyword bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https, as https (the default), will
already validate. Note that this MD5 hash is not stored with the
blob. Also note that if enabled, the memory-efficient upload algorithm
will not be used because computing the MD5 hash requires buffering
entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
:keyword lease:
Required if the blob has an active lease. If specified, download_blob only
succeeds if the blob's lease is active and matches this ID. Value can be a
BlobLeaseClient object or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword int max_concurrency:
The number of parallel connections with which to download.
:keyword str encoding:
Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
:keyword int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:returns: A streaming object. (StorageStreamDownloader)
:rtype: ~azure.storage.blob.aio.StorageStreamDownloader
"""
blob_client = self.get_blob_client(blob) # type: ignore
kwargs.setdefault('merge_span', True)
return await blob_client.download_blob(
offset=offset,
length=length,
**kwargs)
@distributed_trace_async
async def delete_blobs( # pylint: disable=arguments-differ
self, *blobs: List[Union[str, BlobProperties, dict]],
**kwargs
) -> AsyncIterator[AsyncHttpResponse]:
"""Marks the specified blobs or snapshots for deletion.
The blobs are later deleted during garbage collection.
Note that in order to delete blobs, you must delete all of their
snapshots. You can delete both at the same time with the delete_blobs operation.
If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots
and retains the blobs or snapshots for specified number of days.
After specified number of days, blobs' data is removed from the service during garbage collection.
Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]`
Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()`
:param blobs:
The blobs to delete. This can be a single blob, or multiple values can
be supplied, where each value is either the name of the blob (str) or BlobProperties.
.. note::
When the blob type is dict, here's a list of keys, value rules.
blob name:
key: 'name', value type: str
snapshot you want to delete:
key: 'snapshot', value type: str
whether to delete snapthots when deleting blob:
key: 'delete_snapshots', value: 'include' or 'only'
if the blob modified or not:
key: 'if_modified_since', 'if_unmodified_since', value type: datetime
etag:
key: 'etag', value type: str
match the etag or not:
key: 'match_condition', value type: MatchConditions
tags match condition:
key: 'if_tags_match_condition', value type: str
lease:
key: 'lease_id', value type: Union[str, LeaseClient]
timeout for subrequest:
key: 'timeout', value type: int
:type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties]
:keyword str delete_snapshots:
Required if a blob has associated snapshots. Values include:
- "only": Deletes only the blobs snapshots.
- "include": Deletes the blob along with all snapshots.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword bool raise_on_any_failure:
This is a boolean param which defaults to True. When this is set, an exception
is raised even if there is a single operation failure. For optimal performance,
this should be set to False
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: An async iterator of responses, one for each blob in order
:rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_common_async.py
:start-after: [START delete_multiple_blobs]
:end-before: [END delete_multiple_blobs]
:language: python
:dedent: 12
:caption: Deleting multiple blobs.
"""
if len(blobs) == 0:
return iter(list())
reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs)
return await self._batch_send(*reqs, **options)
@distributed_trace
async def set_standard_blob_tier_blobs(
self,
standard_blob_tier: Union[str, 'StandardBlobTier'],
*blobs: List[Union[str, BlobProperties, dict]],
**kwargs
) -> AsyncIterator[AsyncHttpResponse]:
"""This operation sets the tier on block blobs.
A block blob's tier determines Hot/Cool/Archive storage type.
This operation does not update the blob's ETag.
:param standard_blob_tier:
Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool',
'Archive'. The hot tier is optimized for storing data that is accessed
frequently. The cool storage tier is optimized for storing data that
is infrequently accessed and stored for at least a month. The archive
tier is optimized for storing data that is rarely accessed and stored
for at least six months with flexible latency requirements.
.. note::
If you want to set different tier on different blobs please set this positional parameter to None.
Then the blob tier on every BlobProperties will be taken.
:type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
:param blobs:
The blobs with which to interact. This can be a single blob, or multiple values can
be supplied, where each value is either the name of the blob (str) or BlobProperties.
.. note::
When the blob type is dict, here's a list of keys, value rules.
blob name:
key: 'name', value type: str
standard blob tier:
key: 'blob_tier', value type: StandardBlobTier
rehydrate priority:
key: 'rehydrate_priority', value type: RehydratePriority
lease:
key: 'lease_id', value type: Union[str, LeaseClient]
tags match condition:
key: 'if_tags_match_condition', value type: str
timeout for subrequest:
key: 'timeout', value type: int
:type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties]
:keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
Indicates the priority with which to rehydrate an archived blob
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:keyword bool raise_on_any_failure:
This is a boolean param which defaults to True. When this is set, an exception
is raised even if there is a single operation failure. For optimal performance,
this should be set to False.
:return: An async iterator of responses, one for each blob in order
:rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
"""
reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs)
return await self._batch_send(*reqs, **options)
@distributed_trace
async def set_premium_page_blob_tier_blobs(
self,
premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'],
*blobs: List[Union[str, BlobProperties, dict]],
**kwargs
) -> AsyncIterator[AsyncHttpResponse]:
"""Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts.
:param premium_page_blob_tier:
A page blob tier value to set on all blobs to. The tier correlates to the size of the
blob and number of allowed IOPS. This is only applicable to page blobs on
premium storage accounts.
.. note::
If you want to set different tier on different blobs please set this positional parameter to None.
Then the blob tier on every BlobProperties will be taken.
:type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
:param blobs: The blobs with which to interact. This can be a single blob, or multiple values can
be supplied, where each value is either the name of the blob (str) or BlobProperties.
.. note::
When the blob type is dict, here's a list of keys, value rules.
blob name:
key: 'name', value type: str
premium blob tier:
key: 'blob_tier', value type: PremiumPageBlobTier
lease:
key: 'lease_id', value type: Union[str, LeaseClient]
timeout for subrequest:
key: 'timeout', value type: int
:type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties]
:keyword int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:keyword bool raise_on_any_failure:
This is a boolean param which defaults to True. When this is set, an exception
is raised even if there is a single operation failure. For optimal performance,
this should be set to False.
:return: An async iterator of responses, one for each blob in order
:rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
"""
reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs)
return await self._batch_send(*reqs, **options)
def get_blob_client(
self, blob, # type: Union[BlobProperties, str]
snapshot=None # type: str
):
# type: (...) -> BlobClient
"""Get a client to interact with the specified blob.
The blob need not already exist.
:param blob:
The blob with which to interact.
:type blob: str or ~azure.storage.blob.BlobProperties
:param str snapshot:
The optional blob snapshot on which to operate. This can be the snapshot ID string
or the response returned from :func:`~BlobClient.create_snapshot()`.
:returns: A BlobClient.
:rtype: ~azure.storage.blob.aio.BlobClient
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_containers_async.py
:start-after: [START get_blob_client]
:end-before: [END get_blob_client]
:language: python
:dedent: 12
:caption: Get the blob client.
"""
blob_name = _get_blob_name(blob)
_pipeline = AsyncPipeline(
transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
policies=self._pipeline._impl_policies # pylint: disable = protected-access
)
return BlobClient(
self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot,
credential=self.credential, api_version=self.api_version, _configuration=self._config,
_pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
key_resolver_function=self.key_resolver_function)
| 50.953757
| 130
| 0.650304
|
794bc5b542a5c31360ed970052c6f0a46f96b251
| 20,691
|
py
|
Python
|
gammapy/data/observations.py
|
isu-veritas/gammapy
|
715b041d7d3925bd51109dc9534634263a2f2d12
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/data/observations.py
|
isu-veritas/gammapy
|
715b041d7d3925bd51109dc9534634263a2f2d12
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/data/observations.py
|
isu-veritas/gammapy
|
715b041d7d3925bd51109dc9534634263a2f2d12
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections.abc
import copy
import logging
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.time import Time
from astropy.units import Quantity
import astropy.units as u
from gammapy.utils.fits import LazyFitsData, earth_location_to_dict
from gammapy.utils.testing import Checker
from gammapy.utils.time import time_ref_to_dict, time_relative_to_ref
from astropy.utils import lazyproperty
from .event_list import EventList, EventListChecker
from .filters import ObservationFilter
from .gti import GTI
from .pointing import FixedPointingInfo
__all__ = ["Observation", "Observations"]
log = logging.getLogger(__name__)
class Observation:
"""In-memory observation.
Parameters
----------
obs_id : int
Observation id
obs_info : dict
Observation info dict
aeff : `~gammapy.irf.EffectiveAreaTable2D`
Effective area
edisp : `~gammapy.irf.EnergyDispersion2D`
Energy dispersion
psf : `~gammapy.irf.PSF3D`
Point spread function
bkg : `~gammapy.irf.Background3D`
Background rate model
rad_max: `~gammapy.irf.RadMax2D`
Only for point-like IRFs: RAD_MAX table (energy dependent RAD_MAX)
For a fixed RAD_MAX, create a RadMax2D with a single bin.
gti : `~gammapy.data.GTI`
Table with GTI start and stop time
events : `~gammapy.data.EventList`
Event list
obs_filter : `ObservationFilter`
Observation filter.
"""
aeff = LazyFitsData(cache=False)
edisp = LazyFitsData(cache=False)
psf = LazyFitsData(cache=False)
bkg = LazyFitsData(cache=False)
_rad_max = LazyFitsData(cache=False)
_events = LazyFitsData(cache=False)
_gti = LazyFitsData(cache=False)
def __init__(
self,
obs_id=None,
obs_info=None,
gti=None,
aeff=None,
edisp=None,
psf=None,
bkg=None,
rad_max=None,
events=None,
obs_filter=None,
):
self.obs_id = obs_id
self.obs_info = obs_info
self.aeff = aeff
self.edisp = edisp
self.psf = psf
self.bkg = bkg
self._rad_max = rad_max
self._gti = gti
self._events = events
self.obs_filter = obs_filter or ObservationFilter()
@property
def rad_max(self):
# prevent circular import
from gammapy.irf import RadMax2D
if self._rad_max is not None:
return self._rad_max
# load once to avoid trigger lazy loading it three times
aeff = self.aeff
if aeff is not None and aeff.is_pointlike:
self._rad_max = RadMax2D.from_irf(aeff)
return self._rad_max
edisp = self.edisp
if edisp is not None and edisp.is_pointlike:
self._rad_max = RadMax2D.from_irf(self.edisp)
return self._rad_max
@property
def available_irfs(self):
"""Which irfs are available"""
available_irf = []
for irf in ["aeff", "edisp", "psf", "bkg"]:
available = self.__dict__.get(irf, False)
available_hdu = self.__dict__.get(f"_{irf}_hdu", False)
if available or available_hdu:
available_irf.append(irf)
return available_irf
@property
def events(self):
events = self.obs_filter.filter_events(self._events)
return events
@property
def gti(self):
gti = self.obs_filter.filter_gti(self._gti)
return gti
@staticmethod
def _get_obs_info(pointing, deadtime_fraction, time_start, time_stop, reference_time, location):
"""Create obs info dict from in memory data"""
obs_info = {
"RA_PNT": pointing.icrs.ra.deg,
"DEC_PNT": pointing.icrs.dec.deg,
"DEADC": 1 - deadtime_fraction,
}
obs_info.update(time_ref_to_dict(reference_time))
obs_info['TSTART'] = time_relative_to_ref(time_start, obs_info).to_value(u.s)
obs_info['TSTOP'] = time_relative_to_ref(time_stop, obs_info).to_value(u.s)
if location is not None:
obs_info.update(earth_location_to_dict(location))
return obs_info
@classmethod
def create(
cls,
pointing,
location=None,
obs_id=0,
livetime=None,
tstart=None,
tstop=None,
irfs=None,
deadtime_fraction=0.0,
reference_time=Time("2000-01-01 00:00:00"),
):
"""Create an observation.
User must either provide the livetime, or the start and stop times.
Parameters
----------
pointing : `~astropy.coordinates.SkyCoord`
Pointing position
obs_id : int
Observation ID as identifier
livetime : ~astropy.units.Quantity`
Livetime exposure of the simulated observation
tstart: `~astropy.time.Time` or `~astropy.units.Quantity`
Start time of observation as `~astropy.time.Time` or duration
relative to `reference_time`
tstop: `astropy.time.Time` or `~astropy.units.Quantity`
Stop time of observation as `~astropy.time.Time` or duration
relative to `reference_time`
irfs: dict
IRFs used for simulating the observation: `bkg`, `aeff`, `psf`, `edisp`
deadtime_fraction : float, optional
Deadtime fraction, defaults to 0
reference_time : `~astropy.time.Time`
the reference time to use in GTI definition
Returns
-------
obs : `gammapy.data.MemoryObservation`
"""
if tstart is None:
tstart = reference_time.copy()
if tstop is None:
tstop = tstart + Quantity(livetime)
gti = GTI.create(tstart, tstop, reference_time=reference_time)
obs_info = cls._get_obs_info(
pointing=pointing,
deadtime_fraction=deadtime_fraction,
time_start=gti.time_start[0],
time_stop=gti.time_stop[0],
reference_time=reference_time,
location=location,
)
return cls(
obs_id=obs_id,
obs_info=obs_info,
gti=gti,
aeff=irfs.get("aeff"),
bkg=irfs.get("bkg"),
edisp=irfs.get("edisp"),
psf=irfs.get("psf"),
)
@property
def tstart(self):
"""Observation start time (`~astropy.time.Time`)."""
return self.gti.time_start[0]
@property
def tstop(self):
"""Observation stop time (`~astropy.time.Time`)."""
return self.gti.time_stop[0]
@property
def observation_time_duration(self):
"""Observation time duration in seconds (`~astropy.units.Quantity`).
The wall time, including dead-time.
"""
return self.gti.time_sum
@property
def observation_live_time_duration(self):
"""Live-time duration in seconds (`~astropy.units.Quantity`).
The dead-time-corrected observation time.
Computed as ``t_live = t_observation * (1 - f_dead)``
where ``f_dead`` is the dead-time fraction.
"""
return self.observation_time_duration * (
1 - self.observation_dead_time_fraction
)
@property
def observation_dead_time_fraction(self):
"""Dead-time fraction (float).
Defined as dead-time over observation time.
Dead-time is defined as the time during the observation
where the detector didn't record events:
https://en.wikipedia.org/wiki/Dead_time
https://ui.adsabs.harvard.edu/abs/2004APh....22..285F
The dead-time fraction is used in the live-time computation,
which in turn is used in the exposure and flux computation.
"""
return 1 - self.obs_info["DEADC"]
@lazyproperty
def fixed_pointing_info(self):
"""Fixed pointing info for this observation (`FixedPointingInfo`)."""
meta = self.obs_info.copy() if self.obs_info is not None else {}
if self.events is not None:
meta.update(self.events.table.meta)
return FixedPointingInfo(meta)
@property
def pointing_radec(self):
"""Pointing RA / DEC sky coordinates (`~astropy.coordinates.SkyCoord`)."""
return self.fixed_pointing_info.radec
@property
def pointing_altaz(self):
return self.fixed_pointing_info.altaz
@property
def pointing_zen(self):
"""Pointing zenith angle sky (`~astropy.units.Quantity`)."""
return self.fixed_pointing_info.altaz.zen
@property
def observatory_earth_location(self):
"""Observatory location (`~astropy.coordinates.EarthLocation`)."""
return self.fixed_pointing_info.location
@lazyproperty
def target_radec(self):
"""Target RA / DEC sky coordinates (`~astropy.coordinates.SkyCoord`)."""
lon, lat = (
self.obs_info.get("RA_OBJ", np.nan),
self.obs_info.get("DEC_OBJ", np.nan),
)
return SkyCoord(lon, lat, unit="deg", frame="icrs")
@property
def muoneff(self):
"""Observation muon efficiency."""
return self.obs_info.get("MUONEFF", 1)
def __str__(self):
ra = self.pointing_radec.ra.deg
dec = self.pointing_radec.dec.deg
pointing = f"{ra:.1f} deg, {dec:.1f} deg\n"
# TODO: Which target was observed?
# TODO: print info about available HDUs for this observation ...
return (
f"{self.__class__.__name__}\n\n"
f"\tobs id : {self.obs_id} \n "
f"\ttstart : {self.tstart.mjd:.2f}\n"
f"\ttstop : {self.tstop.mjd:.2f}\n"
f"\tduration : {self.observation_time_duration:.2f}\n"
f"\tpointing (icrs) : {pointing}\n"
f"\tdeadtime fraction : {self.observation_dead_time_fraction:.1%}\n"
)
def check(self, checks="all"):
"""Run checks.
This is a generator that yields a list of dicts.
"""
checker = ObservationChecker(self)
return checker.run(checks=checks)
def peek(self, figsize=(12, 10)):
"""Quick-look plots in a few panels.
Parameters
----------
figsize : tuple
Figure size
"""
import matplotlib.pyplot as plt
n_irfs = len(self.available_irfs)
fig, axes = plt.subplots(
nrows=n_irfs // 2,
ncols=2 + n_irfs % 2,
figsize=figsize,
gridspec_kw={"wspace": 0.25, "hspace": 0.25},
)
axes_dict = dict(zip(self.available_irfs, axes.flatten()))
if "aeff" in self.available_irfs:
self.aeff.plot(ax=axes_dict["aeff"])
axes_dict["aeff"].set_title("Effective area")
if "bkg" in self.available_irfs:
bkg = self.bkg
if not bkg.has_offset_axis:
bkg = bkg.to_2d()
bkg.plot(ax=axes_dict["bkg"])
axes_dict["bkg"].set_title("Background rate")
else:
logging.warning(f"No background model found for obs {self.obs_id}.")
if "psf" in self.available_irfs:
self.psf.plot_containment_radius_vs_energy(ax=axes_dict["psf"])
axes_dict["psf"].set_title("Point spread function")
else:
logging.warning(f"No PSF found for obs {self.obs_id}.")
if "edisp" in self.available_irfs:
self.edisp.plot_bias(ax=axes_dict["edisp"], add_cbar=True)
axes_dict["edisp"].set_title("Energy dispersion")
else:
logging.warning(f"No energy dispersion found for obs {self.obs_id}.")
def select_time(self, time_interval):
"""Select a time interval of the observation.
Parameters
----------
time_interval : `astropy.time.Time`
Start and stop time of the selected time interval.
For now we only support a single time interval.
Returns
-------
new_obs : `~gammapy.data.Observation`
A new observation instance of the specified time interval
"""
new_obs_filter = self.obs_filter.copy()
new_obs_filter.time_filter = time_interval
obs = copy.deepcopy(self)
obs.obs_filter = new_obs_filter
return obs
@classmethod
def read(cls, event_file, irf_file=None):
"""Create an Observation from a Event List and an (optional) IRF file.
Parameters
----------
event_file : str, Path
path to the .fits file containing the event list and the GTI
irf_file : str, Path
(optional) path to the .fits file containing the IRF components,
if not provided the IRF will be read from the event file
Returns
-------
observation : `~gammapy.data.Observation`
observation with the events and the irf read from the file
"""
from gammapy.irf.io import load_irf_dict_from_file
events = EventList.read(event_file)
gti = GTI.read(event_file)
irf_file = irf_file if irf_file is not None else event_file
irf_dict = load_irf_dict_from_file(irf_file)
obs_info = events.table.meta
return cls(
events=events,
gti=gti,
obs_info=obs_info,
obs_id=obs_info.get("OBS_ID"),
**irf_dict,
)
class Observations(collections.abc.MutableSequence):
"""Container class that holds a list of observations.
Parameters
----------
observations : list
A list of `~gammapy.data.Observation`
"""
def __init__(self, observations=None):
self._observations = observations or []
def __getitem__(self, key):
return self._observations[self.index(key)]
def __delitem__(self, key):
del self._observations[self.index(key)]
def __setitem__(self, key, obs):
if isinstance(obs, Observation):
self._observations[self.index(key)] = obs
else:
raise TypeError(f"Invalid type: {type(obs)!r}")
def insert(self, idx, obs):
if isinstance(obs, Observation):
self._observations.insert(idx, obs)
else:
raise TypeError(f"Invalid type: {type(obs)!r}")
def __len__(self):
return len(self._observations)
def __str__(self):
s = self.__class__.__name__ + "\n"
s += "Number of observations: {}\n".format(len(self))
for obs in self:
s += str(obs)
return s
def index(self, key):
if isinstance(key, (int, slice)):
return key
elif isinstance(key, str):
return self.ids.index(key)
elif isinstance(key, Observation):
return self._observations.index(key)
else:
raise TypeError(f"Invalid type: {type(key)!r}")
@property
def ids(self):
"""List of obs IDs (`list`)"""
return [str(obs.obs_id) for obs in self]
def select_time(self, time_intervals):
"""Select a time interval of the observations.
Parameters
----------
time_intervals : `astropy.time.Time` or list of `astropy.time.Time`
list of Start and stop time of the time intervals or one Time interval
Returns
-------
new_observations : `~gammapy.data.Observations`
A new Observations instance of the specified time intervals
"""
new_obs_list = []
if isinstance(time_intervals, Time):
time_intervals = [time_intervals]
for time_interval in time_intervals:
for obs in self:
if (obs.tstart < time_interval[1]) & (obs.tstop > time_interval[0]):
new_obs = obs.select_time(time_interval)
new_obs_list.append(new_obs)
return self.__class__(new_obs_list)
def _ipython_key_completions_(self):
return self.ids
class ObservationChecker(Checker):
"""Check an observation.
Checks data format and a bit about the content.
"""
CHECKS = {
"events": "check_events",
"gti": "check_gti",
"aeff": "check_aeff",
"edisp": "check_edisp",
"psf": "check_psf",
}
def __init__(self, observation):
self.observation = observation
def _record(self, level="info", msg=None):
return {"level": level, "obs_id": self.observation.obs_id, "msg": msg}
def check_events(self):
yield self._record(level="debug", msg="Starting events check")
try:
events = self.observation.events
except Exception:
yield self._record(level="warning", msg="Loading events failed")
return
yield from EventListChecker(events).run()
# TODO: split this out into a GTIChecker
def check_gti(self):
yield self._record(level="debug", msg="Starting gti check")
try:
gti = self.observation.gti
except Exception:
yield self._record(level="warning", msg="Loading GTI failed")
return
if len(gti.table) == 0:
yield self._record(level="error", msg="GTI table has zero rows")
columns_required = ["START", "STOP"]
for name in columns_required:
if name not in gti.table.colnames:
yield self._record(level="error", msg=f"Missing table column: {name!r}")
# TODO: Check that header keywords agree with table entries
# TSTART, TSTOP, MJDREFI, MJDREFF
# Check that START and STOP times are consecutive
# times = np.ravel(self.table['START'], self.table['STOP'])
# # TODO: not sure this is correct ... add test with a multi-gti table from Fermi.
# if not np.all(np.diff(times) >= 0):
# yield 'GTIs are not consecutive or sorted.'
# TODO: add reference times for all instruments and check for this
# Use TELESCOP header key to check which instrument it is.
def _check_times(self):
"""Check if various times are consistent.
The headers and tables of the FITS EVENTS and GTI extension
contain various observation and event time information.
"""
# http://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Data/Time_in_ScienceTools.html
# https://hess-confluence.desy.de/confluence/display/HESS/HESS+FITS+data+-+References+and+checks#HESSFITSdata-Referencesandchecks-Time
telescope_met_refs = {
"FERMI": Time("2001-01-01T00:00:00"),
"HESS": Time("2001-01-01T00:00:00"),
}
meta = self.dset.event_list.table.meta
telescope = meta["TELESCOP"]
if telescope in telescope_met_refs.keys():
dt = self.time_ref - telescope_met_refs[telescope]
if dt > self.accuracy["time"]:
yield self._record(
level="error", msg="Reference time incorrect for telescope"
)
def check_aeff(self):
yield self._record(level="debug", msg="Starting aeff check")
try:
aeff = self.observation.aeff
except Exception:
yield self._record(level="warning", msg="Loading aeff failed")
return
# Check that thresholds are meaningful for aeff
if (
"LO_THRES" in aeff.meta
and "HI_THRES" in aeff.meta
and aeff.meta["LO_THRES"] >= aeff.meta["HI_THRES"]
):
yield self._record(
level="error", msg="LO_THRES >= HI_THRES in effective area meta data"
)
# Check that data isn't all null
if np.max(aeff.data.data) <= 0:
yield self._record(
level="error", msg="maximum entry of effective area is <= 0"
)
def check_edisp(self):
yield self._record(level="debug", msg="Starting edisp check")
try:
edisp = self.observation.edisp
except Exception:
yield self._record(level="warning", msg="Loading edisp failed")
return
# Check that data isn't all null
if np.max(edisp.data.data) <= 0:
yield self._record(level="error", msg="maximum entry of edisp is <= 0")
def check_psf(self):
yield self._record(level="debug", msg="Starting psf check")
try:
self.observation.psf
except Exception:
yield self._record(level="warning", msg="Loading psf failed")
return
| 32.07907
| 142
| 0.601421
|
794bc66a361240d621510a3afb17354df6802cc9
| 1,267
|
py
|
Python
|
setup.py
|
Abstract-Horizon/pyros-client
|
f59512708eb84a05f4657b20479200366c0072b6
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Abstract-Horizon/pyros-client
|
f59512708eb84a05f4657b20479200366c0072b6
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Abstract-Horizon/pyros-client
|
f59512708eb84a05f4657b20479200366c0072b6
|
[
"Apache-2.0"
] | null | null | null |
################################################################################
# Copyright (C) 2016-2020 Abstract Horizon
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License v2.0
# which accompanies this distribution, and is available at
# https://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# Daniel Sendula - initial API and implementation
#
#################################################################################
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="pyros-client",
version="1.0.1",
author="Daniel Sendula",
description="Support library for PyROS",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Abstract-Horizon/pyros-client",
zip_safe=False, # Doesn't create an egg - easier to debug and hack on
packages=['pyros', 'pyroslib'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
install_requires=[
'paho-mqtt',
],
python_requires='>=3.6',
)
| 31.675
| 81
| 0.596685
|
794bc7db4df35273ce8e654d7513dd9d93f62fc8
| 1,400
|
py
|
Python
|
config/asgi.py
|
gp-shubham/django-graphql-boilerplate
|
0d19aa1bd0bac23d993d91942685f75d8bf0a238
|
[
"MIT"
] | null | null | null |
config/asgi.py
|
gp-shubham/django-graphql-boilerplate
|
0d19aa1bd0bac23d993d91942685f75d8bf0a238
|
[
"MIT"
] | null | null | null |
config/asgi.py
|
gp-shubham/django-graphql-boilerplate
|
0d19aa1bd0bac23d993d91942685f75d8bf0a238
|
[
"MIT"
] | 1
|
2021-07-03T05:36:11.000Z
|
2021-07-03T05:36:11.000Z
|
"""
ASGI config for Graphql Project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/asgi/
"""
import os
import sys
from pathlib import Path
from django.core.asgi import get_asgi_application
# This allows easy placement of apps within the interior
# graphql_project directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "graphql_project"))
# If DJANGO_SETTINGS_MODULE is unset, default to the local settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
# This application object is used by any ASGI server configured to use this file.
django_application = get_asgi_application()
# Apply ASGI middleware here.
# from helloworld.asgi import HelloWorldApplication
# application = HelloWorldApplication(application)
# Import websocket application here, so apps from django_application are loaded first
from config.websocket import websocket_application # noqa isort:skip
async def application(scope, receive, send):
if scope["type"] == "http":
await django_application(scope, receive, send)
elif scope["type"] == "websocket":
await websocket_application(scope, receive, send)
else:
raise NotImplementedError(f"Unknown scope type {scope['type']}")
| 34.146341
| 85
| 0.774286
|
794bc8c30978fd25bc135bcdf14c330fec9bfa97
| 801
|
py
|
Python
|
tests/providers/local/conftest.py
|
inmanta/terraform
|
d60855493e75d4eb570c4a357e79584153e51833
|
[
"Apache-2.0"
] | 1
|
2021-12-23T13:51:39.000Z
|
2021-12-23T13:51:39.000Z
|
tests/providers/local/conftest.py
|
inmanta/terraform
|
d60855493e75d4eb570c4a357e79584153e51833
|
[
"Apache-2.0"
] | 11
|
2022-01-04T14:03:10.000Z
|
2022-03-16T14:50:11.000Z
|
tests/providers/local/conftest.py
|
inmanta/terraform
|
d60855493e75d4eb570c4a357e79584153e51833
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2021 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
import pytest
from providers.local.helpers.local_provider import LocalProvider
@pytest.fixture(scope="package")
def provider() -> LocalProvider:
return LocalProvider()
| 32.04
| 76
| 0.742821
|
794bc8efbba73ea85c13a1a718b28ddd3837ab90
| 5,832
|
py
|
Python
|
dask_image/ndfilters/_utils.py
|
martinschorb/dask-image
|
03242d151db30b4adce3d6c7f43c05d1e7580bb5
|
[
"BSD-3-Clause"
] | null | null | null |
dask_image/ndfilters/_utils.py
|
martinschorb/dask-image
|
03242d151db30b4adce3d6c7f43c05d1e7580bb5
|
[
"BSD-3-Clause"
] | null | null | null |
dask_image/ndfilters/_utils.py
|
martinschorb/dask-image
|
03242d151db30b4adce3d6c7f43c05d1e7580bb5
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division
import collections
import inspect
import numbers
import re
import numpy
def _get_docstring(func):
# Drop the output parameter from the docstring.
split_doc_params = lambda s: re.subn( # noqa: E731
"( [A-Za-z]+ : )", "\0\\1", s)[0].split("\0")
drop_doc_param = lambda s: not s.startswith(" output : ") # noqa: E731
func_doc = "" if func.__doc__ is None else func.__doc__
cleaned_docstring = "".join([
l for l in split_doc_params(func_doc) if drop_doc_param(l) # noqa: E741, E501
])
cleaned_docstring = cleaned_docstring.replace('input', 'image')
cleaned_docstring = cleaned_docstring.replace('labels', 'label_image')
cleaned_docstring = cleaned_docstring.split('Examples')[0].strip()
docstring = """
Wrapped copy of "{mod_name}.{func_name}"
Excludes the output parameter as it would not work with Dask arrays.
Original docstring:
{doc}
""".format(
mod_name=inspect.getmodule(func).__name__,
func_name=func.__name__,
doc=cleaned_docstring,
)
return docstring
def _update_wrapper(func):
def _updater(wrapper):
wrapper.__name__ = func.__name__
wrapper.__doc__ = _get_docstring(func)
return wrapper
return _updater
def _get_depth_boundary(ndim, depth, boundary=None):
strlike = (bytes, str)
if not isinstance(ndim, numbers.Integral):
raise TypeError("Expected integer value for `ndim`.")
if ndim <= 0:
raise ValueError("Expected positive value for `ndim`.")
if isinstance(depth, numbers.Number):
depth = ndim * (depth,)
if not isinstance(depth, collections.Sized):
raise TypeError("Unexpected type for `depth`.")
if len(depth) != ndim:
raise ValueError("Expected `depth` to have a length equal to `ndim`.")
if isinstance(depth, collections.Sequence):
depth = dict(zip(range(ndim), depth))
if not isinstance(depth, collections.Mapping):
raise TypeError("Unexpected type for `depth`.")
if not all(map(lambda d: isinstance(d, numbers.Integral), depth.values())):
raise TypeError("Expected integer values for `depth`.")
if not all(map(lambda d: d >= 0, depth.values())):
raise ValueError("Expected positive semidefinite values for `depth`.")
depth = dict([(a, int(d)) for a, d in depth.items()])
if (boundary is None) or isinstance(boundary, strlike):
boundary = ndim * (boundary,)
if not isinstance(boundary, collections.Sized):
raise TypeError("Unexpected type for `boundary`.")
if len(boundary) != ndim:
raise ValueError(
"Expected `boundary` to have a length equal to `ndim`."
)
if isinstance(boundary, collections.Sequence):
boundary = dict(zip(range(ndim), boundary))
if not isinstance(boundary, collections.Mapping):
raise TypeError("Unexpected type for `boundary`.")
type_check = lambda b: (b is None) or isinstance(b, strlike) # noqa: E731
if not all(map(type_check, boundary.values())):
raise TypeError("Expected string-like values for `boundary`.")
return depth, boundary
def _get_size(ndim, size):
if not isinstance(ndim, numbers.Integral):
raise TypeError("The ndim must be of integral type.")
if isinstance(size, numbers.Number):
size = ndim * (size,)
size = numpy.array(size)
if size.ndim != 1:
raise RuntimeError("The size must have only one dimension.")
if len(size) != ndim:
raise RuntimeError(
"The size must have a length equal to the number of dimensions."
)
if not issubclass(size.dtype.type, numbers.Integral):
raise TypeError("The size must be of integral type.")
size = tuple(size)
return size
def _get_origin(size, origin=0):
size = numpy.array(size)
ndim = len(size)
if isinstance(origin, numbers.Number):
origin = ndim * (origin,)
origin = numpy.array(origin)
if not issubclass(origin.dtype.type, numbers.Integral):
raise TypeError("The origin must be of integral type.")
# Validate dimensions.
if origin.ndim != 1:
raise RuntimeError("The origin must have only one dimension.")
if len(origin) != ndim:
raise RuntimeError(
"The origin must have the same length as the number of dimensions"
" as the array being filtered."
)
# Validate origin is bounded.
if not (origin < ((size + 1) // 2)).all():
raise ValueError("The origin must be within the footprint.")
origin = tuple(origin)
return origin
def _get_depth(size, origin=0):
origin = numpy.array(_get_origin(size, origin))
size = numpy.array(size)
half_size = size // 2
depth = half_size + abs(origin)
depth = tuple(depth)
return depth
def _get_footprint(ndim, size=None, footprint=None):
# Verify that we only got size or footprint.
if size is None and footprint is None:
raise RuntimeError("Must provide either size or footprint.")
if size is not None and footprint is not None:
raise RuntimeError("Provide either size or footprint, but not both.")
# Get a footprint based on the size.
if size is not None:
size = _get_size(ndim, size)
footprint = numpy.ones(size, dtype=bool)
# Validate the footprint.
if footprint.ndim != ndim:
raise RuntimeError(
"The footprint must have the same number of dimensions as"
" the array being filtered."
)
if footprint.size == 0:
raise RuntimeError("The footprint must have only non-zero dimensions.")
# Convert to Boolean.
footprint = (footprint != 0)
return footprint
| 30.857143
| 86
| 0.646433
|
794bc9d02263144a81e2a4e55a587bdfe901de99
| 465
|
py
|
Python
|
configy/helpers.py
|
grigi/configy
|
86f6bdd3164f39e83e82e3527f5863032c0ed1e7
|
[
"MIT"
] | 3
|
2015-09-18T13:06:04.000Z
|
2021-08-10T16:37:21.000Z
|
configy/helpers.py
|
grigi/configy
|
86f6bdd3164f39e83e82e3527f5863032c0ed1e7
|
[
"MIT"
] | null | null | null |
configy/helpers.py
|
grigi/configy
|
86f6bdd3164f39e83e82e3527f5863032c0ed1e7
|
[
"MIT"
] | null | null | null |
'''
Contains helper tools for handling configuration
'''
def to_bool(val, default=False):
'''
Converts string to bool
True
'y', 'yes', '1', 't','true'
False
'n', 'no', '0', 'f', 'false'
else
defaults to default (False, by default)
'''
val = str(val).lower()
if val in ['y', 'yes', '1', 't', 'true']:
return True
if val in ['n', 'no', '0', 'f', 'false']:
return False
return default
| 20.217391
| 48
| 0.503226
|
794bca0043370333f5235f241ddb0a49ee77c0d9
| 588
|
py
|
Python
|
crawler/pipelines.py
|
lisathung/Manga-Crawler
|
feb606ad785bfb5c75979c16511a54c94dd710aa
|
[
"MIT"
] | null | null | null |
crawler/pipelines.py
|
lisathung/Manga-Crawler
|
feb606ad785bfb5c75979c16511a54c94dd710aa
|
[
"MIT"
] | null | null | null |
crawler/pipelines.py
|
lisathung/Manga-Crawler
|
feb606ad785bfb5c75979c16511a54c94dd710aa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import scrapy
from scrapy.pipelines.images import ImagesPipeline
class CustomImageNamePipeline(ImagesPipeline):
def get_media_requests(self, item, info):
return [Request(x, meta={'image_name': item["image_name"]})
for x in item.get('image_urls', [])]
def file_path(self, request, response=None, info=None):
return '%s.jpg' % request.meta['image_name']
| 34.588235
| 67
| 0.70068
|
794bcaa01121b38f463ad9728cf423b8422f636a
| 633
|
py
|
Python
|
examples/ch03/exercises_ch03/ex3_9.py
|
edson-gomes/Intro-to-Python
|
00a2f549916616b0f2036401573e35d66317f998
|
[
"MIT"
] | null | null | null |
examples/ch03/exercises_ch03/ex3_9.py
|
edson-gomes/Intro-to-Python
|
00a2f549916616b0f2036401573e35d66317f998
|
[
"MIT"
] | null | null | null |
examples/ch03/exercises_ch03/ex3_9.py
|
edson-gomes/Intro-to-Python
|
00a2f549916616b0f2036401573e35d66317f998
|
[
"MIT"
] | null | null | null |
"""(Separando os dígitos em um inteiro) No exercício 2.11, você escreveu um script para separar um inteiro de 5 dígitos em seus dígitos individuais e mostrá-los. Reimplemente seu script utilizando um laço que, em cada iteração,'pegue' um dígito (da esquerda para a direita) utilizando os operadores // e % para, então, mostrar esse dígito."""
numero = int(input('Informe um número entre 10000 e 99999: '))
divisor = 0
if (10000 <= numero <= 99999):
for i in range(5):
divisor = int(10000 / 10 ** i)
print(numero // divisor, end=' ')
numero %= divisor
else:
print('O número informado é inválido.')
| 48.692308
| 342
| 0.685624
|
794bcb106b0790928259187f7300017d86bc5ba9
| 93,695
|
py
|
Python
|
kowalski/api.py
|
dmitryduev/kowalski-dev
|
0d568dff8e3f25ed522127584a22dfcef08420d8
|
[
"MIT"
] | null | null | null |
kowalski/api.py
|
dmitryduev/kowalski-dev
|
0d568dff8e3f25ed522127584a22dfcef08420d8
|
[
"MIT"
] | 5
|
2020-05-12T07:29:40.000Z
|
2020-09-28T19:17:23.000Z
|
kowalski/api.py
|
dmitryduev/kowalski-dev
|
0d568dff8e3f25ed522127584a22dfcef08420d8
|
[
"MIT"
] | null | null | null |
from abc import ABC
from aiohttp import web, ClientSession
from aiohttp_swagger3 import SwaggerDocs, ReDocUiSettings
from astropy.io import fits
from astropy.visualization import (
AsymmetricPercentileInterval,
MinMaxInterval,
ZScaleInterval,
LinearStretch,
LogStretch,
AsinhStretch,
SqrtStretch,
ImageNormalize,
)
from ast import literal_eval
from bson.json_util import dumps, loads
import datetime
import gzip
import io
import jwt
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
from middlewares import auth_middleware, error_middleware, auth_required, admin_required
from motor.motor_asyncio import AsyncIOMotorClient
from multidict import MultiDict
import numpy as np
from odmantic import AIOEngine, EmbeddedModel, Field, Model
import pathlib
from pydantic import root_validator
from sshtunnel import SSHTunnelForwarder
import traceback
from typing import List, Mapping, Optional, Sequence, Union
from utils import (
add_admin,
check_password_hash,
generate_password_hash,
init_db,
load_config,
log,
radec_str2geojson,
uid,
)
import uvloop
config = load_config(config_file="config.yaml")["kowalski"]
class Handler:
@staticmethod
def success(message: str = "", data: Optional[Mapping] = None):
response = {"status": "success", "message": message}
if data is not None:
response["data"] = data
return web.json_response(response, status=200, dumps=dumps)
@staticmethod
def error(message: str = "", status: int = 400):
return web.json_response({"status": "error", "message": message}, status=status)
""" authentication and authorization """
def is_admin(username: str):
"""Check if user is admin
note: may want to change the logic to allow multiple users to be admins
:param username:
"""
return username == config["server"]["admin_username"]
# @routes.post('/api/auth')
async def auth_post(request: web.Request) -> web.Response:
"""
Authentication
---
summary: Get access token
tags:
- auth
requestBody:
required: true
content:
application/json:
schema:
type: object
required:
- username
- password
properties:
username:
type: string
password:
type: string
example:
username: user
password: PwD
responses:
'200':
description: access token
content:
application/json:
schema:
type: object
required:
- status
- token
properties:
status:
type: string
token:
type: string
example:
status: success
token: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyX2lkIjoiYWRtaW4iLCJleHAiOjE1OTE1NjE5MTl9.2emEp9EKf154WLJQwulofvXhTX7L0s9Y2-6_xI0Gx8w
'400':
description: username or password missing in requestBody
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
message:
type: string
examples:
missing username:
value:
status: error
message: missing username
missing password:
value:
status: error
message: missing password
'401':
description: bad credentials
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
message:
type: string
example:
status: error
message: wrong credentials
'500':
description: internal/unknown cause of failure
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
message:
type: string
example:
status: error
message: auth failed
"""
try:
try:
post_data = await request.json()
except AttributeError:
post_data = await request.post()
# must contain 'username' and 'password'
if ("username" not in post_data) or (len(post_data["username"]) == 0):
return web.json_response(
{"status": "error", "message": "missing username"}, status=400
)
if ("password" not in post_data) or (len(post_data["password"]) == 0):
return web.json_response(
{"status": "error", "message": "missing password"}, status=400
)
# connecting from penquins: check penquins version
if "penquins.__version__" in post_data:
penquins_version = post_data["penquins.__version__"]
if penquins_version not in config["misc"]["supported_penquins_versions"]:
return web.json_response(
{
"status": "error",
"message": "unsupported version of penquins: "
f'{post_data["penquins.__version__"]}',
},
status=400,
)
username = str(post_data["username"])
password = str(post_data["password"])
try:
# user exists and passwords match?
select = await request.app["mongo"].users.find_one({"_id": username})
if select is not None and check_password_hash(select["password"], password):
payload = {
"user_id": username,
"created_at": datetime.datetime.utcnow().strftime(
"%Y-%m-%dT%H:%M:%S.%f+00:00"
),
}
# optionally set expiration date
if request.app["JWT"]["JWT_EXP_DELTA_SECONDS"] is not None:
payload["exp"] = (
datetime.datetime.utcnow()
+ datetime.timedelta(
seconds=request.app["JWT"]["JWT_EXP_DELTA_SECONDS"]
)
).strftime("%Y-%m-%dT%H:%M:%S.%f+00:00")
jwt_token = jwt.encode(
payload=payload,
key=request.app["JWT"]["JWT_SECRET"],
algorithm=request.app["JWT"]["JWT_ALGORITHM"],
)
return web.json_response({"status": "success", "token": jwt_token})
else:
return web.json_response(
{"status": "error", "message": "wrong credentials"}, status=401
)
except Exception as _e:
log(_e)
_err = traceback.format_exc()
log(_err)
return web.json_response(
{"status": "error", "message": "wrong credentials"}, status=401
)
except Exception as _e:
log(_e)
_err = traceback.format_exc()
log(_err)
return web.json_response(
{"status": "error", "message": "auth failed"}, status=500
)
# @routes.get('/', name='ping', allow_head=False)
@auth_required
async def ping(request: web.Request) -> web.Response:
"""
ping/pong
:param request:
:return:
---
summary: ping/pong
tags:
- root
responses:
'200':
description: greetings to an authorized user
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
message:
type: string
example:
status: success
message: greetings from Kowalski!
"""
return web.json_response(
{"status": "success", "message": "greetings from Kowalski!"}, status=200
)
""" users """
# @routes.post('/api/users')
@admin_required
async def users_post(request: web.Request) -> web.Response:
"""
Add new user
:return:
---
summary: Add new user
tags:
- users
requestBody:
required: true
content:
application/json:
schema:
type: object
required:
- username
- password
properties:
username:
type: string
password:
type: string
email:
type: string
permissions:
type: string
example:
username: noone
password: nopas!
email: user@caltech.edu
responses:
'200':
description: added user
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [success]
message:
type: string
example:
status: success
message: added user noone
'400':
description: username or password missing in requestBody
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [error]
message:
type: string
example:
status: error
message: username and password must be set
'500':
description: internal/unknown cause of failure
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [error]
message:
type: string
example:
status: error
message: "failed to add user: <error message>"
"""
try:
_data = await request.json()
username = _data.get("username", "")
password = _data.get("password", "")
email = _data.get("email", None)
permissions = _data.get("permissions", dict())
if len(username) == 0 or len(password) == 0:
return web.json_response(
{"status": "error", "message": "username and password must be set"},
status=400,
)
# add user to coll_usr collection:
await request.app["mongo"].users.insert_one(
{
"_id": username,
"email": email,
"password": generate_password_hash(password),
"permissions": permissions,
"last_modified": datetime.datetime.now(),
}
)
return web.json_response(
{"status": "success", "message": f"added user {username}"}, status=200
)
except Exception as _e:
return web.json_response(
{"status": "error", "message": f"failed to add user: {_e}"}, status=500
)
# @routes.delete('/api/users/{username}')
@admin_required
async def users_delete(request: web.Request) -> web.Response:
"""
Remove user
:return:
---
summary: Remove user
tags:
- users
responses:
'200':
description: removed user
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [success]
message:
type: string
example:
status: success
message: removed user noone
'400':
description: username not found or is superuser
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [error]
message:
type: string
examples:
attempting superuser removal:
value:
status: error
message: cannot remove the superuser!
username not found:
value:
status: error
message: user noone not found
'500':
description: internal/unknown cause of failure
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [error]
message:
type: string
example:
status: error
message: "failed to remove user: <error message>"
"""
try:
# get query params
username = request.match_info["username"]
if username == config["server"]["admin_username"]:
return web.json_response(
{"status": "error", "message": "cannot remove the superuser!"},
status=400,
)
# try to remove the user:
r = await request.app["mongo"].users.delete_one({"_id": username})
if r.deleted_count != 0:
return web.json_response(
{"status": "success", "message": f"removed user {username}"}, status=200
)
else:
return web.json_response(
{"status": "error", "message": f"user {username} not found"}, status=400
)
except Exception as _e:
return web.json_response(
{"status": "error", "message": f"failed to remove user: {_e}"}, status=500
)
# @routes.put('/api/users/{username}')
@admin_required
async def users_put(request: web.Request) -> web.Response:
"""
Edit user data
:return:
---
summary: Edit user data
tags:
- users
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
username:
type: string
password:
type: string
example:
username: noone
responses:
'200':
description: edited user
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [success]
message:
type: string
example:
status: success
message: edited user noone
'400':
description: cannot rename superuser
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [error]
message:
type: string
examples:
attempting superuser renaming:
value:
status: error
message: cannot rename the superuser!
'500':
description: internal/unknown cause of failure
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [error]
message:
type: string
example:
status: error
message: "failed to remove user: <error message>"
"""
try:
_data = await request.json()
_id = request.match_info["username"]
username = _data.get("username", "")
password = _data.get("password", "")
if (
_id == config["server"]["admin_username"]
and username != config["server"]["admin_username"]
):
return web.json_response(
{"status": "error", "message": "cannot rename the superuser!"},
status=400,
)
# change username:
if (_id != username) and (len(username) > 0):
select = await request.app["mongo"].users.find_one({"_id": _id})
select["_id"] = username
await request.app["mongo"].users.insert_one(select)
await request.app["mongo"].users.delete_one({"_id": _id})
# change password:
if len(password) != 0:
await request.app["mongo"].users.update_one(
{"_id": username},
{
"$set": {"password": generate_password_hash(password)},
"$currentDate": {"last_modified": True},
},
)
return web.json_response(
{"status": "success", "message": f"edited user {_id}"}, status=200
)
except Exception as _e:
return web.json_response(
{"status": "error", "message": f"failed to edit user: {_e}"}, status=500
)
""" queries """
SYSTEM_COLLECTIONS = ("users", "filters", "queries")
QUERY_TYPES = (
"cone_search",
"count_documents",
"estimated_document_count",
"find",
"find_one",
"aggregate",
"info",
"near",
)
INFO_COMMANDS = (
"catalog_names",
"catalog_info",
"index_info",
"db_info",
)
FORBIDDEN_STAGES_QUERIES = {"$unionWith", "$out", "$merge"}
FORBIDDEN_STAGES_FILTERS = {"$lookup", "$unionWith", "$out", "$merge"}
ANGULAR_UNITS = ("arcsec", "arcmin", "deg", "rad")
class Query(Model, ABC):
"""Data model for queries for streamlined validation"""
query_type: str
query: Mapping
kwargs: dict = dict()
user: str
@staticmethod
def construct_filter(query: Mapping):
"""Check validity of query filter specs and preprocess if necessary
:param query: Mapping containing filter specification either as a Mapping or a literal_eval'uable str
:return:
"""
catalog_filter = query.get("filter")
if not isinstance(catalog_filter, (str, Mapping)):
raise ValueError("Unsupported filter specification")
if isinstance(catalog_filter, str):
# passed string? evaluate:
catalog_filter = literal_eval(catalog_filter.strip())
return catalog_filter
@staticmethod
def construct_projection(query):
"""Check validity of query projection specs and preprocess if necessary
:param query: Mapping containing projection specification either as a Mapping or a literal_eval'uable str
:return:
"""
catalog_projection = query.get("projection")
if catalog_projection is not None:
if not isinstance(catalog_projection, (str, Mapping)):
raise ValueError("Unsupported projection specification")
if isinstance(catalog_projection, str):
# passed string? evaluate:
catalog_projection = literal_eval(catalog_projection.strip())
else:
catalog_projection = dict()
return catalog_projection
@staticmethod
def angle_to_rad(angle: Union[float, int], units: str) -> float:
"""Convert angle to rad
:param angle: angle [deg]
:param units: str, one of ["arcsec", "arcmin", "deg"]
:return:
"""
angle_rad = float(angle)
if units not in ANGULAR_UNITS:
raise Exception(f"Angular units not in {ANGULAR_UNITS}")
if units == "arcsec":
angle_rad *= np.pi / 180 / 3600
elif units == "arcmin":
angle_rad *= np.pi / 180 / 60
elif units == "deg":
angle_rad *= np.pi / 180
return angle_rad
@staticmethod
def parse_object_coordinates(coordinates: Union[str, Sequence, Mapping]):
"""
Parse object coordinates in degrees/HMS_DMS
:param coordinates: object coordinates in decimal degrees
or strings "HH:MM:SS.SSS..." / "DD:MM:SS.SSS..."
or strings "HHhMMmSS.SSS...s" / "DDdMMmSS.SSS...s"
Options:
- str that is parsed either as ra dec for a single source or stringified Sequence, as below
- Sequence, such as [(ra1, dec1), (ra2, dec2), ..]
- Mapping, such as {'object_name': (ra1, dec1), ...}
:return:
"""
if isinstance(coordinates, str):
coordinates = coordinates.strip()
# comb coordinates for a single source:
if not coordinates.startswith(("[", "(", "{")):
a, b = coordinates.split()
if ("s" in coordinates) or (":" in coordinates):
coordinates = f"[('{a}', '{b}')]"
else:
coordinates = f"[({a}, {b})]"
coordinates = literal_eval(coordinates)
if isinstance(coordinates, Sequence):
object_coordinates = coordinates
# use coords as source ids replacing dots to keep Mongo happy:
object_names = [
str(obj_crd).replace(".", "_") for obj_crd in object_coordinates
]
elif isinstance(coordinates, Mapping):
object_names, object_coordinates = zip(*coordinates.items())
object_names = list(map(str, object_names))
object_names = [
object_name.replace(".", "_") for object_name in object_names
]
else:
raise ValueError("Unsupported object coordinate specs")
return object_names, object_coordinates
@staticmethod
def validate_kwargs(kwargs: Mapping, known_kwargs: Sequence) -> dict:
"""Allow only known kwargs:
check that kwargs.keys() are in known_kwargs and ditch those that are not
:param kwargs:
:param known_kwargs:
:return:
"""
return {
kk: vv
for kk, vv in kwargs.items()
if kk in [*known_kwargs, "max_time_ms", "comment"]
}
@root_validator
def check_query(cls, values):
"""Validate query and preprocess it if necessary"""
query_type = values.get("query_type")
query = values.get("query")
kwargs = values.get("kwargs")
user = values.get("user")
if query_type not in QUERY_TYPES:
raise KeyError(f"query_type {query_type} not in {str(QUERY_TYPES)}")
# this way, username will be propagated into mongodb's logs
kwargs["comment"] = user
if query.get("catalog") is not None:
catalog = query.get("catalog").strip()
if catalog in SYSTEM_COLLECTIONS and not is_admin(user):
raise ValueError("Protected collection")
if query.get("catalogs") is not None:
catalogs = query.get("catalogs")
for catalog in catalogs:
if catalog in SYSTEM_COLLECTIONS and not is_admin(user):
raise ValueError("Protected collection")
if query_type == "aggregate":
pipeline = query.get("pipeline")
if (not isinstance(pipeline, str)) and (not isinstance(pipeline, Sequence)):
raise ValueError("Unsupported pipeline specification")
if isinstance(pipeline, str):
# passed string? evaluate:
pipeline = literal_eval(pipeline.strip())
if len(pipeline) == 0:
raise ValueError("Pipeline must contain at least one stage")
stages = set([list(stage.keys())[0] for stage in pipeline])
if len(stages.intersection(FORBIDDEN_STAGES_QUERIES)):
raise ValueError(
f"Pipeline uses forbidden stages: {str(stages.intersection(FORBIDDEN_STAGES_QUERIES))}"
)
kwargs = cls.validate_kwargs(
kwargs=kwargs, known_kwargs=("allowDiskUse", "batchSize")
)
values["kwargs"] = kwargs
values["query"]["pipeline"] = pipeline
elif query_type == "cone_search":
# apply filter before positional query?
filter_first = kwargs.get("filter_first", False)
# cone search radius:
cone_search_radius = cls.angle_to_rad(
angle=query["object_coordinates"]["cone_search_radius"],
units=query["object_coordinates"]["cone_search_unit"],
)
object_names, object_coordinates = cls.parse_object_coordinates(
query["object_coordinates"]["radec"]
)
# reshuffle query to ease execution on Mongo side
query_preprocessed = dict()
for catalog in query["catalogs"]:
catalog = catalog.strip()
query_preprocessed[catalog] = dict()
# specifying filter is optional in this case
if "filter" in query["catalogs"][catalog]:
catalog_filter = cls.construct_filter(query["catalogs"][catalog])
else:
catalog_filter = dict()
# construct projection, which is always optional
catalog_projection = cls.construct_projection(
query["catalogs"][catalog]
)
# parse coordinate list
for oi, obj_crd in enumerate(object_coordinates):
# convert ra/dec into GeoJSON-friendly format
_ra, _dec = radec_str2geojson(*obj_crd)
object_position_query = dict()
object_position_query["coordinates.radec_geojson"] = {
"$geoWithin": {
"$centerSphere": [[_ra, _dec], cone_search_radius]
}
}
# use stringified object coordinates as dict keys and merge dicts with cat/obj queries:
if not filter_first:
query_preprocessed[catalog][object_names[oi]] = (
{**object_position_query, **catalog_filter},
{**catalog_projection},
)
else:
# place the filter expression in front of the positional query?
# this may be useful if an index exists to speed up the query
query_preprocessed[catalog][object_names[oi]] = (
{**catalog_filter, **object_position_query},
{**catalog_projection},
)
kwargs = cls.validate_kwargs(
kwargs=kwargs, known_kwargs=("skip", "hint", "limit", "sort")
)
values["kwargs"] = kwargs
values["query"] = query_preprocessed
elif query_type == "count_documents":
values["query"]["filter"] = cls.construct_filter(query)
kwargs = cls.validate_kwargs(
kwargs=kwargs, known_kwargs=("skip", "hint", "limit")
)
values["kwargs"] = kwargs
elif query_type == "find":
# construct filter
values["query"]["filter"] = cls.construct_filter(query)
# construct projection
values["query"]["projection"] = cls.construct_projection(query)
kwargs = cls.validate_kwargs(
kwargs=kwargs, known_kwargs=("skip", "hint", "limit", "sort")
)
values["kwargs"] = kwargs
elif query_type == "find_one":
values["query"]["filter"] = cls.construct_filter(query)
kwargs = cls.validate_kwargs(
kwargs=kwargs, known_kwargs=("skip", "hint", "limit", "sort")
)
values["kwargs"] = kwargs
elif query_type == "info":
command = query.get("command")
if command not in INFO_COMMANDS:
raise KeyError(f"command {command} not in {str(INFO_COMMANDS)}")
elif query_type == "near":
# apply filter before positional query?
filter_first = kwargs.get("filter_first", False)
min_distance = cls.angle_to_rad(
angle=query.get("min_distance", 0),
units=query.get("distance_units", "rad"),
)
max_distance = cls.angle_to_rad(
angle=query.get("max_distance", np.pi / 180 / 60), # default to 1'
units=query.get("distance_units", "rad"),
)
object_names, object_coordinates = cls.parse_object_coordinates(
query["radec"]
)
# reshuffle query to ease execution on Mongo side
query_preprocessed = dict()
for catalog in query["catalogs"]:
catalog = catalog.strip()
query_preprocessed[catalog] = dict()
# specifying filter is optional in this case
if "filter" in query["catalogs"][catalog]:
catalog_filter = cls.construct_filter(query["catalogs"][catalog])
else:
catalog_filter = dict()
# construct projection, which is always optional
catalog_projection = cls.construct_projection(
query["catalogs"][catalog]
)
# parse coordinate list
for oi, obj_crd in enumerate(object_coordinates):
# convert ra/dec into GeoJSON-friendly format
_ra, _dec = radec_str2geojson(*obj_crd)
object_position_query = dict()
object_position_query["coordinates.radec_geojson"] = {
"$nearSphere": [_ra, _dec],
"$minDistance": min_distance,
"$maxDistance": max_distance,
}
# use stringified object coordinates as dict keys and merge dicts with cat/obj queries:
if not filter_first:
query_preprocessed[catalog][object_names[oi]] = (
{**object_position_query, **catalog_filter},
{**catalog_projection},
)
else:
# place the filter expression in front of the positional query?
# this may be useful if an index exists to speed up the query
query_preprocessed[catalog][object_names[oi]] = (
{**catalog_filter, **object_position_query},
{**catalog_projection},
)
kwargs = cls.validate_kwargs(
kwargs=kwargs, known_kwargs=("skip", "hint", "limit", "sort")
)
values["kwargs"] = kwargs
values["query"] = query_preprocessed
return values
class QueryHandler(Handler):
"""Handlers to work with user queries"""
@auth_required
async def post(self, request: web.Request) -> web.Response:
"""Query Kowalski
---
summary: Query Kowalski
tags:
- queries
requestBody:
required: true
content:
application/json:
schema:
type: object
required:
- query_type
- query
properties:
query_type:
type: string
enum:
- aggregate
- cone_search
- count_documents
- estimated_document_count
- find
- find_one
- info
- near
query:
type: object
description: query. depends on query_type, see examples
oneOf:
- $ref: "#/components/schemas/aggregate"
- $ref: "#/components/schemas/cone_search"
- $ref: "#/components/schemas/count_documents"
- $ref: "#/components/schemas/estimated_document_count"
- $ref: "#/components/schemas/find"
- $ref: "#/components/schemas/find_one"
- $ref: "#/components/schemas/info"
- $ref: "#/components/schemas/near"
kwargs:
type: object
description: additional parameters. depends on query_type, see examples
oneOf:
- $ref: "#/components/schemas/aggregate_kwargs"
- $ref: "#/components/schemas/cone_search_kwargs"
- $ref: "#/components/schemas/count_documents_kwargs"
- $ref: "#/components/schemas/estimated_document_count_kwargs"
- $ref: "#/components/schemas/find_kwargs"
- $ref: "#/components/schemas/find_one_kwargs"
- $ref: "#/components/schemas/info_kwargs"
- $ref: "#/components/schemas/near_kwargs"
examples:
aggregate:
value:
"query_type": "aggregate"
"query": {
"catalog": "ZTF_alerts",
"pipeline": [
{'$match': {'candid': 1105522281015015000}},
{"$project": {"_id": 0, "candid": 1, "candidate.drb": 1}}
],
}
"kwargs": {
"max_time_ms": 2000
}
cone_search:
value:
"query_type": "cone_search"
"query": {
"object_coordinates": {
"cone_search_radius": 2,
"cone_search_unit": "arcsec",
"radec": {"object1": [71.6577756, -10.2263957]}
},
"catalogs": {
"ZTF_alerts": {
"filter": {},
"projection": {"_id": 0, "candid": 1, "objectId": 1}
}
}
}
"kwargs": {
"filter_first": False
}
find:
value:
"query_type": "find"
"query": {
"catalog": "ZTF_alerts",
"filter": {'candidate.drb': {"$gt": 0.9}},
"projection": {"_id": 0, "candid": 1, "candidate.drb": 1},
}
"kwargs": {
"sort": [["$natural", -1]],
"limit": 2
}
find_one:
value:
"query_type": "find_one"
"query": {
"catalog": "ZTF_alerts",
"filter": {}
}
info:
value:
"query_type": "info"
"query": {
"command": "catalog_names"
}
count_documents:
value:
"query_type": "count_documents"
"query": {
"catalog": "ZTF_alerts",
"filter": {"objectId": "ZTF20aakyoez"}
}
estimated_document_count:
value:
"query_type": "estimated_document_count"
"query": {
"catalog": "ZTF_alerts"
}
near:
value:
"query_type": "near"
"query": {
"max_distance": 30,
"distance_units": "arcsec",
"radec": {"object1": [71.6577756, -10.2263957]},
"catalogs": {
"ZTF_alerts": {
"filter": {},
"projection": {"_id": 0, "candid": 1, "objectId": 1}
}
}
}
"kwargs": {
"limit": 1
}
responses:
'200':
description: query result
content:
application/json:
schema:
type: object
required:
- user
- message
- status
properties:
status:
type: string
enum: [success]
message:
type: string
user:
type: string
kwargs:
type: object
data:
oneOf:
- type: number
- type: array
- type: object
examples:
aggregate:
value:
"status": "success"
"message": "Successfully executed query"
"data": [
{
"candid": 1105522281015015000,
"candidate": {
"drb": 0.999999463558197
}
}
]
cone_search:
value:
"status": "success"
"message": "Successfully executed query"
"data": {
"ZTF_alerts": {
"object1": [
{"objectId": "ZTF20aaelulu",
"candid": 1105522281015015000}
]
}
}
find:
value:
"status": "success"
"message": "Successfully executed query"
"data": [
{
"candid": 1127561444715015009,
"candidate": {
"drb": 0.9999618530273438
}
},
{
"candid": 1127107111615015007,
"candidate": {
"drb": 0.9986417293548584
}
}
]
info:
value:
"status": "success"
"message": "Successfully executed query"
"data": [
"ZTF_alerts_aux",
"ZTF_alerts"
]
count_documents:
value:
"status": "success"
"message": "Successfully executed query"
"data": 1
estimated_document_count:
value:
"status": "success"
"message": "Successfully executed query"
"data": 11
near:
value:
"status": "success"
"message": "Successfully executed query"
"data": {
"ZTF_alerts": {
"object1": [
{"objectId": "ZTF20aaelulu",
"candid": 1105522281015015000}
]
}
}
'400':
description: query parsing/execution error
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [error]
message:
type: string
examples:
unknown query type:
value:
status: error
message: "query_type not in ('cone_search', 'count_documents', 'estimated_document_count', 'find', 'find_one', 'aggregate', 'info')"
random error:
value:
status: error
message: "failure: <error message>"
"""
# allow both .json() and .post():
try:
query_spec = await request.json()
except AttributeError:
query_spec = await request.post()
# this is set by auth_middleware
query_spec["user"] = request.user
# validate and preprocess
query = Query(**query_spec)
# by default, long-running queries will be killed after config['misc']['max_time_ms'] ms
max_time_ms = int(
query.kwargs.get("max_time_ms", config["misc"]["max_time_ms"])
)
if max_time_ms < 1:
raise ValueError("max_time_ms must be int >= 1")
query.kwargs.pop("max_time_ms", None)
# execute query, depending on query.query_type
data = dict()
if query.query_type in ("cone_search", "near"):
# iterate over catalogs
for catalog in query.query:
data[catalog] = dict()
# iterate over objects:
for obj in query.query[catalog]:
# project?
if len(query.query[catalog][obj][1]) > 0:
cursor = request.app["mongo"][catalog].find(
query.query[catalog][obj][0],
query.query[catalog][obj][1],
max_time_ms=max_time_ms,
**query.kwargs,
)
# return the whole documents by default
else:
cursor = request.app["mongo"][catalog].find(
query.query[catalog][obj][0],
max_time_ms=max_time_ms,
**query.kwargs,
)
data[catalog][obj] = await cursor.to_list(length=None)
if query.query_type == "find":
catalog = query.query["catalog"]
catalog_filter = query.query["filter"]
catalog_projection = query.query["projection"]
# project?
if len(catalog_projection) > 0:
cursor = request.app["mongo"][catalog].find(
catalog_filter,
catalog_projection,
max_time_ms=max_time_ms,
**query.kwargs,
)
# return the whole documents by default
else:
cursor = request.app["mongo"][catalog].find(
catalog_filter,
max_time_ms=max_time_ms,
**query.kwargs,
)
if isinstance(cursor, (int, float, Sequence, Mapping)) or (cursor is None):
data = cursor
else:
data = await cursor.to_list(length=None)
if query.query_type == "find_one":
catalog = query.query["catalog"]
cursor = request.app["mongo"][catalog].find_one(
query.query["filter"],
max_time_ms=max_time_ms,
)
data = await cursor
if query.query_type == "count_documents":
catalog = query.query["catalog"]
cursor = request.app["mongo"][catalog].count_documents(
query.query["filter"],
maxTimeMS=max_time_ms,
)
data = await cursor
if query.query_type == "estimated_document_count":
catalog = query.query["catalog"]
cursor = request.app["mongo"][catalog].estimated_document_count(
maxTimeMS=max_time_ms,
)
data = await cursor
if query.query_type == "aggregate":
catalog = query.query["catalog"]
pipeline = query.query["pipeline"]
cursor = request.app["mongo"][catalog].aggregate(
pipeline,
allowDiskUse=query.kwargs.get("allowDiskUse", True),
maxTimeMS=max_time_ms,
)
data = await cursor.to_list(length=None)
if query.query_type == "info":
if query.query["command"] == "catalog_names":
# get available catalog names
catalog_names = await request.app["mongo"].list_collection_names()
data = [
catalog_name
for catalog_name in sorted(catalog_names)[::-1]
if catalog_name not in SYSTEM_COLLECTIONS
]
elif query.query["command"] == "catalog_info":
catalog = query.query["catalog"]
data = await request.app["mongo"].command("collstats", catalog)
elif query.query["command"] == "index_info":
catalog = query.query["catalog"]
data = await request.app["mongo"][catalog].index_information()
elif query.query["command"] == "db_info":
data = await request.app["mongo"].command("dbstats")
return self.success(message="Successfully executed query", data=data)
""" filters """
class FilterVersion(EmbeddedModel, ABC):
"""Data model for Filter versions"""
fid: str = Field(default_factory=uid)
pipeline: str
created_at: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
@root_validator
def check_min_stages(cls, values):
pipeline = values.get("pipeline")
if len(loads(pipeline)) == 0: # it is stored as a string
raise ValueError("pipeline must contain at least one stage")
return values
@root_validator
def check_forbidden_stages(cls, values):
pipeline = values.get("pipeline")
# check that only allowed stages are used in the pipeline
stages = set([list(stage.keys())[0] for stage in loads(pipeline)])
if len(stages.intersection(FORBIDDEN_STAGES_FILTERS)):
raise ValueError(
f"pipeline uses forbidden stages: {str(stages.intersection(FORBIDDEN_STAGES_FILTERS))}"
)
return values
class Config:
json_dumps = dumps
json_loads = loads
parse_doc_with_default_factories = True
class Filter(Model, ABC):
"""Data model for Filters"""
filter_id: int = Field(ge=1)
group_id: int = Field(ge=1)
catalog: str
permissions: List
autosave: bool = False
active: bool = True
update_annotations: bool = False
active_fid: Optional[str] = Field(min_length=6, max_length=6)
fv: List[FilterVersion] = list()
created_at: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
last_modified: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
class Config:
# collection name in MongoDB
collection = "filters"
json_dumps = dumps
json_loads = loads
parse_doc_with_default_factories = True
class FilterHandler(Handler):
"""Handlers to work with user-defined alert filters"""
@admin_required
async def get(self, request: web.Request) -> web.Response:
"""Retrieve filter by filter_id
:param request:
:return:
---
summary: Retrieve user-defined filters
tags:
- filters
parameters:
- in: query
name: filter_id
description: filter id
required: true
schema:
type: integer
minimum: 1
responses:
'200':
description: retrieved filter data
content:
application/json:
schema:
type: object
required:
- status
- message
- data
properties:
status:
type: string
enum: [success]
message:
type: string
data:
type: object
example:
"status": "success"
"message": "Retrieved filter id 1"
"data": {
"group_id": 1,
"filter_id": 1,
"catalog": "ZTF_alerts",
"permissions": [1, 2],
"autosave": false,
"update_annotations": false,
"active": true,
"active_fid": "nnsun9",
"fv": [
"fid": "nnsun9",
"pipeline": "<serialized extended json string>",
"created": {
"$date": 1584403506877
}
]
}
'400':
description: retrieval failed or internal/unknown cause of failure
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [error]
message:
type: string
example:
status: error
message: "failure: <error message>"
"""
filter_id = int(request.match_info["filter_id"])
filtr = await request.app["mongo_odm"].find_one(
Filter, Filter.filter_id == filter_id
)
if filtr is not None:
return self.success(
message=f"Retrieved filter id {filter_id}", data=filtr.doc()
)
return self.error(message=f"Filter id {filter_id} not found")
@admin_required
async def post(self, request: web.Request) -> web.Response:
"""Post user-defined alert filter, or a new version thereof
- store pipeline as serialized extended json string,
to be used with literal_eval to convert to dict at execution
- run a simple sanity check before saving
---
summary: Post user-defined alert filter, or a new version thereof
tags:
- filters
requestBody:
required: true
content:
application/json:
schema:
type: object
required:
- group_id
- filter_id
- catalog
- permissions
- pipeline
properties:
group_id:
type: integer
description: "[fritz] user group (science program) id"
minimum: 1
filter_id:
type: integer
description: "[fritz] science program filter id for this user group id"
minimum: 1
catalog:
type: string
description: "alert stream to filter"
enum: [ZTF_alerts, ZUDS_alerts]
permissions:
type: array
items:
type: integer
description: "permissions to access streams"
minItems: 1
autosave:
type: boolean
description: "automatically save passing alerts to group <group_id>"
default: false
update_annotations:
type: boolean
description: "update existing annotations for newly passing alerts"
default: false
pipeline:
type: array
items:
type: object
description: "user-defined aggregation pipeline stages in MQL"
minItems: 1
examples:
filter_1:
value:
"group_id": 1
"filter_id": 1
"catalog": ZTF_alerts
"permissions": [1, 2]
"pipeline": [
{
"$match": {
"candidate.drb": {
"$gt": 0.9999
},
"cross_matches.CLU_20190625.0": {
"$exists": False
}
}
},
{
"$addFields": {
"annotations.author": "dd",
"annotations.mean_rb": {"$avg": "$prv_candidates.rb"}
}
},
{
"$project": {
"_id": 0,
"candid": 1,
"objectId": 1,
"annotations": 1
}
}
]
filter_2:
value:
"group_id": 2
"filter_id": 1
"catalog": ZTF_alerts
"permissions": [1, 2, 3]
"autosave": true
"update_annotations": false
"pipeline": [
{
"$match": {
"candidate.drb": {
"$gt": 0.9999
},
"cross_matches.CLU_20190625.0": {
"$exists": True
}
}
},
{
"$addFields": {
"annotations.author": "dd",
"annotations.mean_rb": {"$avg": "$prv_candidates.rb"}
}
},
{
"$project": {
"_id": 0,
"candid": 1,
"objectId": 1,
"annotations": 1
}
}
]
responses:
'200':
description: filter successfully saved
content:
application/json:
schema:
type: object
required:
- status
- message
- data
properties:
status:
type: string
enum: [success]
message:
type: string
user:
type: string
data:
description: "contains unique filter identifier"
type: object
additionalProperties:
type: object
properties:
fid:
type: string
description: "generated unique filter identifier"
minLength: 6
maxLength: 6
example:
"status": "success"
"message": "saved filter: c3ig1t"
"data": {
"fid": "c3ig1t"
}
'400':
description: filter parsing/testing/saving error
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [error]
message:
type: string
example:
status: error
message: "failure: <error message>"
"""
# allow both .json() and .post():
try:
filter_spec = await request.json()
except AttributeError:
filter_spec = await request.post()
filter_new = Filter(**filter_spec)
# check if a filter for these (group_id, filter_id) already exists:
filter_existing = await request.app["mongo_odm"].find_one(
Filter,
Filter.filter_id == filter_new.filter_id,
Filter.group_id == filter_new.group_id,
)
# new filter version:
pipeline = filter_spec.get("pipeline")
if not isinstance(pipeline, str):
pipeline = dumps(pipeline)
filter_version = FilterVersion(pipeline=pipeline)
try:
# try on most recently ingested alert to check correctness
n_docs = await request.app["mongo"][
filter_new.catalog
].estimated_document_count()
log(f"Found {n_docs} documents in {filter_new.catalog} collection")
if n_docs > 0:
# get latest candid:
select = (
request.app["mongo"][filter_new.catalog]
.find({}, {"_id": 0, "candid": 1})
.sort([("$natural", -1)])
.limit(1)
)
alert = await select.to_list(length=1)
alert = alert[0]
# filter pipeline upstream: select current alert, ditch cutouts, and merge with aux data
# including archival photometry and cross-matches:
filter_pipeline_upstream = config["database"]["filters"][
filter_new.catalog
]
filter_template = filter_pipeline_upstream + loads(
filter_version.pipeline
)
# match candid
filter_template[0]["$match"]["candid"] = alert["candid"]
# match permissions for ZTF
if filter_new.catalog.startswith("ZTF"):
filter_template[0]["$match"]["candidate.programid"][
"$in"
] = filter_new.permissions
filter_template[3]["$project"]["prv_candidates"]["$filter"]["cond"][
"$and"
][0]["$in"][1] = filter_new.permissions
cursor = request.app["mongo"][filter_new.catalog].aggregate(
filter_template, allowDiskUse=False, maxTimeMS=3000
)
await cursor.to_list(length=None)
test_successful, test_message = (
True,
f"pipeline test for filter id {filter_new.filter_id} successful",
)
log(test_message)
else:
test_successful, test_message = (
True,
f"WARNING: No documents in {filter_new.catalog} collection, "
f"cannot properly test pipeline for filter id {filter_new.filter_id}",
)
log(test_message)
except Exception as e:
log(e)
test_successful, test_message = False, str(e)
if not test_successful:
return self.error(message=test_message)
# if a filter does not exist for (filter_id, group_id), create one:
if filter_existing is None:
filter_new.fv.append(filter_version)
filter_new.active_fid = filter_version.fid
filter_new.last_modified = datetime.datetime.now()
await request.app["mongo_odm"].save(filter_new)
else:
# already exists? push new filter version and reset active_fid:
filter_existing.fv.append(filter_version)
filter_existing.active_fid = filter_version.fid
filter_existing.last_modified = datetime.datetime.now()
# note: filters are defined on streams on SkyPortal,
# with non-modifiable catalog and permissions parameters, so it should not be possible to modify such here
await request.app["mongo_odm"].save(filter_existing)
return self.success(
message=test_message + f"\nsaved new filter version: {filter_version.fid}",
data=filter_version.doc(),
)
@admin_required
async def patch(self, request: web.Request) -> web.Response:
"""Update user-defined filter
:param request:
:return:
---
summary: "Modify existing filters: activate/deactivate, set active_fid, autosave, or update_annotations"
tags:
- filters
requestBody:
required: true
content:
application/json:
schema:
type: object
required:
- filter_id
properties:
filter_id:
type: integer
description: "[fritz] filter id for this group id"
minimum: 1
active:
type: boolean
description: "activate or deactivate filter"
active_fid:
description: "set fid as active version"
type: string
minLength: 6
maxLength: 6
autosave:
type: boolean
description: "autosave candidates that pass filter to corresponding group?"
update_annotations:
type: boolean
description: "update annotations for new candidates that previously passed filter?"
examples:
filter_1:
value:
"filter_id": 1
"active": false
filter_2:
value:
"filter_id": 5
"active_fid": "r7qiti"
filter_3:
value:
"filter_id": 1
"autosave": true
filter_4:
value:
"filter_id": 1
"update_annotations": true
responses:
'200':
description: filter updated
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [success]
message:
type: string
example:
status: success
message: "updated filter id 1"
data:
active: false
'400':
description: filter not found or removal failed
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [error]
message:
type: string
examples:
filter not found:
value:
status: error
message: Filter id 1 not found
"""
# allow both .json() and .post():
try:
filter_spec = await request.json()
except AttributeError:
filter_spec = await request.post()
filter_id = filter_spec.get("filter_id")
# check if a filter for these (group_id, filter_id) already exists:
filter_existing = await request.app["mongo_odm"].find_one(
Filter, Filter.filter_id == filter_id
)
if filter_existing is None:
return self.error(message=f"Filter id {filter_id} not found")
filter_doc = filter_existing.doc()
# note: partial model loading is not (yet?) available in odmantic + need a custom check on active_fid
for modifiable_field in (
"active",
"active_fid",
"autosave",
"update_annotations",
):
value = filter_spec.get(modifiable_field)
if value is not None:
if modifiable_field == "active_fid" and value not in [
filter_version["fid"] for filter_version in filter_doc["fv"]
]:
raise ValueError(
f"Cannot set active_fid to {value}: filter version fid not in filter.fv"
)
filter_doc[modifiable_field] = value
filter_existing = Filter.parse_doc(filter_doc)
await request.app["mongo_odm"].save(filter_existing)
return self.success(
message=f"Updated filter id {filter_id}", data=filter_existing.doc()
)
@admin_required
async def delete(self, request: web.Request) -> web.Response:
"""Delete user-defined filter for (group_id, filter_id) altogether
:param request:
:return:
---
summary: Delete user-defined filter by filter_id
tags:
- filters
parameters:
- in: query
name: filter_id
description: filter id
required: true
schema:
type: integer
minimum: 1
responses:
'200':
description: filter removed
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [success]
message:
type: string
example:
status: success
message: "Removed filter for group_id=1, filter_id=1"
'400':
description: filter not found or removal failed
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [error]
message:
type: string
examples:
filter not found:
value:
status: error
message: Filter id 1 not found
"""
filter_id = int(request.match_info["filter_id"])
r = await request.app["mongo"].filters.delete_one({"filter_id": filter_id})
if r.deleted_count != 0:
return self.success(message=f"Removed filter id {filter_id}")
return self.error(message=f"Filter id {filter_id} not found")
class ZTFTrigger(Model, ABC):
"""Data model for ZTF trigger for streamlined validation"""
queue_name: str
validity_window_mjd: List[float]
targets: List[dict]
queue_type: str
user: str
class ZTFDelete(Model, ABC):
"""Data model for ZTF queue deletion for streamlined validation"""
queue_name: str
user: str
class ZTFTriggerHandler(Handler):
"""Handlers to work with ZTF triggers"""
def __init__(self, test: bool = False):
"""Constructor for ZTF trigger class
:param test: is this a test trigger?
:return:
"""
self.test = test
@admin_required
async def get(self, request: web.Request) -> web.Response:
"""Retrieve ZTF queue
:param request:
:return:
---
summary: Get ZTF queue
tags:
- triggers
requestBody:
required: true
content:
application/json:
schema:
type: object
responses:
'200':
description: queue retrieved
content:
application/json:
schema:
type: object
'400':
description: query parsing/execution error
content:
application/json:
schema:
type: object
"""
if self.test:
return self.success(message="submitted")
server = SSHTunnelForwarder(
(config["ztf"]["mountain_ip"], config["ztf"]["mountain_port"]),
ssh_username=config["ztf"]["mountain_username"],
ssh_password=config["ztf"]["mountain_password"],
remote_bind_address=(
config["ztf"]["mountain_bind_ip"],
config["ztf"]["mountain_bind_port"],
),
)
server.start()
url = f"http://{server.local_bind_address[0]}:{server.local_bind_address[1]}/queues"
async with ClientSession() as client_session:
async with client_session.get(url, json={}, timeout=10) as response:
response_json = await response.json()
server.stop()
if response.status == 200:
return self.success(message="retrieved", data=response_json)
return self.error(message=f"ZTF queue query attempt rejected: {response.text}")
@admin_required
async def put(self, request: web.Request) -> web.Response:
"""Trigger ZTF
:param request:
:return:
---
summary: Trigger ZTF
tags:
- triggers
requestBody:
required: true
content:
application/json:
schema:
type: object
responses:
'200':
description: queue submitted
content:
application/json:
schema:
type: object
'400':
description: query parsing/execution error
content:
application/json:
schema:
type: object
"""
_data = await request.json()
# validate
ZTFTrigger(**_data)
if self.test:
return self.success(message="submitted")
server = SSHTunnelForwarder(
(config["ztf"]["mountain_ip"], config["ztf"]["mountain_port"]),
ssh_username=config["ztf"]["mountain_username"],
ssh_password=config["ztf"]["mountain_password"],
remote_bind_address=(
config["ztf"]["mountain_bind_ip"],
config["ztf"]["mountain_bind_port"],
),
)
server.start()
url = f"http://{server.local_bind_address[0]}:{server.local_bind_address[1]}/queues"
async with ClientSession() as client_session:
response = await client_session.put(url, json=_data, timeout=10)
server.stop()
if response.status == 201:
return self.success(message="submitted", data=dict(response.headers))
elif response.status == 200:
data = dict(response.headers)
return self.error(
message=f"Submitted queue {data['queue_name']} already exists",
status=409,
)
return self.error(message=f"ZTF trigger attempt rejected: {response.text}")
@admin_required
async def delete(self, request: web.Request) -> web.Response:
"""Delete ZTF request
:param request:
:return:
---
summary: Delete ZTF request
tags:
- triggers
requestBody:
required: true
content:
application/json:
schema:
type: object
responses:
'200':
description: queue removed
content:
application/json:
schema:
type: object
'400':
description: query parsing/execution error
content:
application/json:
schema:
type: object
"""
_data = await request.json()
# validate and preprocess
ZTFDelete(**_data)
if self.test:
return self.success(message="deleted")
server = SSHTunnelForwarder(
(config["ztf"]["mountain_ip"], config["ztf"]["mountain_port"]),
ssh_username=config["ztf"]["mountain_username"],
ssh_password=config["ztf"]["mountain_password"],
remote_bind_address=(
config["ztf"]["mountain_bind_ip"],
config["ztf"]["mountain_bind_port"],
),
)
server.start()
url = f"http://{server.local_bind_address[0]}:{server.local_bind_address[1]}/queues"
async with ClientSession() as client_session:
response = await client_session.delete(url, json=_data, timeout=10)
server.stop()
if response.status == 200:
return self.success(message="deleted", data=dict(response.headers))
return self.error(message=f"ZTF delete attempt rejected: {response.text}")
""" lab """
# @routes.get('/lab/ztf-alerts/{candid}/cutout/{cutout}/{file_format}', allow_head=False)
@auth_required
async def ztf_alert_get_cutout(request):
"""
Serve ZTF alert cutouts as fits or png
:param request:
:return:
---
summary: Serve ZTF alert cutout as fits or png
tags:
- lab
parameters:
- in: path
name: candid
description: "ZTF alert candid"
required: true
schema:
type: integer
- in: path
name: cutout
description: "retrieve science, template, or difference cutout image?"
required: true
schema:
type: string
enum: [science, template, difference]
- in: path
name: file_format
description: "response file format: original lossless FITS or rendered png"
required: true
schema:
type: string
enum: [fits, png]
- in: query
name: interval
description: "Interval to use when rendering png"
required: false
schema:
type: string
enum: [min_max, zscale]
- in: query
name: stretch
description: "Stretch to use when rendering png"
required: false
schema:
type: string
enum: [linear, log, asinh, sqrt]
- in: query
name: cmap
description: "Color map to use when rendering png"
required: false
schema:
type: string
enum: [bone, gray, cividis, viridis, magma]
responses:
'200':
description: retrieved cutout
content:
image/fits:
schema:
type: string
format: binary
image/png:
schema:
type: string
format: binary
'400':
description: retrieval failed
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [error]
message:
type: string
example:
status: error
message: "failure: <error message>"
"""
try:
candid = int(request.match_info["candid"])
cutout = request.match_info["cutout"].capitalize()
file_format = request.match_info["file_format"]
interval = request.query.get("interval")
stretch = request.query.get("stretch")
cmap = request.query.get("cmap", None)
known_cutouts = ["Science", "Template", "Difference"]
if cutout not in known_cutouts:
return web.json_response(
{
"status": "error",
"message": f"cutout {cutout} not in {str(known_cutouts)}",
},
status=400,
)
known_file_formats = ["fits", "png"]
if file_format not in known_file_formats:
return web.json_response(
{
"status": "error",
"message": f"file format {file_format} not in {str(known_file_formats)}",
},
status=400,
)
normalization_methods = {
"asymmetric_percentile": AsymmetricPercentileInterval(
lower_percentile=1, upper_percentile=100
),
"min_max": MinMaxInterval(),
"zscale": ZScaleInterval(nsamples=600, contrast=0.045, krej=2.5),
}
if interval is None:
interval = "asymmetric_percentile"
normalizer = normalization_methods.get(
interval.lower(),
AsymmetricPercentileInterval(lower_percentile=1, upper_percentile=100),
)
stretching_methods = {
"linear": LinearStretch,
"log": LogStretch,
"asinh": AsinhStretch,
"sqrt": SqrtStretch,
}
if stretch is None:
stretch = "log" if cutout != "Difference" else "linear"
stretcher = stretching_methods.get(stretch.lower(), LogStretch)()
if (cmap is None) or (
cmap.lower() not in ["bone", "gray", "cividis", "viridis", "magma"]
):
cmap = "bone"
else:
cmap = cmap.lower()
alert = await request.app["mongo"]["ZTF_alerts"].find_one(
{"candid": candid}, {f"cutout{cutout}": 1}, max_time_ms=10000
)
cutout_data = loads(dumps([alert[f"cutout{cutout}"]["stampData"]]))[0]
# unzipped fits name
fits_name = pathlib.Path(alert[f"cutout{cutout}"]["fileName"]).with_suffix("")
# unzip and flip about y axis on the server side
with gzip.open(io.BytesIO(cutout_data), "rb") as f:
with fits.open(io.BytesIO(f.read()), ignore_missing_simple=True) as hdu:
header = hdu[0].header
data_flipped_y = np.flipud(hdu[0].data)
if file_format == "fits":
hdu = fits.PrimaryHDU(data_flipped_y, header=header)
# hdu = fits.PrimaryHDU(data_flipped_y)
hdul = fits.HDUList([hdu])
stamp_fits = io.BytesIO()
hdul.writeto(fileobj=stamp_fits)
return web.Response(
body=stamp_fits.getvalue(),
content_type="image/fits",
headers=MultiDict(
{"Content-Disposition": f"Attachment;filename={fits_name}"}
),
)
if file_format == "png":
buff = io.BytesIO()
plt.close("all")
fig = plt.figure()
fig.set_size_inches(4, 4, forward=False)
ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])
ax.set_axis_off()
fig.add_axes(ax)
# replace nans with median:
img = np.array(data_flipped_y)
# replace dubiously large values
xl = np.greater(np.abs(img), 1e20, where=~np.isnan(img))
if img[xl].any():
img[xl] = np.nan
if np.isnan(img).any():
median = float(np.nanmean(img.flatten()))
img = np.nan_to_num(img, nan=median)
norm = ImageNormalize(img, stretch=stretcher)
img_norm = norm(img)
vmin, vmax = normalizer.get_limits(img_norm)
ax.imshow(img_norm, cmap=cmap, origin="lower", vmin=vmin, vmax=vmax)
plt.savefig(buff, dpi=42)
buff.seek(0)
plt.close("all")
return web.Response(body=buff, content_type="image/png")
except Exception as _e:
log(f"Got error: {str(_e)}")
_err = traceback.format_exc()
log(_err)
return web.json_response(
{"status": "error", "message": f"failure: {_err}"}, status=400
)
# @routes.get('/lab/zuds-alerts/{candid}/cutout/{cutout}/{file_format}', allow_head=False)
@auth_required
async def zuds_alert_get_cutout(request):
"""
Serve cutouts as fits or png
:param request:
:return:
---
summary: Serve ZUDS alert cutout as fits or png
tags:
- lab
parameters:
- in: path
name: candid
description: "ZUDS alert candid"
required: true
schema:
type: integer
- in: path
name: cutout
description: "retrieve science, template, or difference cutout image?"
required: true
schema:
type: string
enum: [science, template, difference]
- in: path
name: file_format
description: "response file format: original lossless FITS or rendered png"
required: true
schema:
type: string
enum: [fits, png]
- in: query
name: scaling
description: "Scaling to use when rendering png"
required: false
schema:
type: string
enum: [linear, log, arcsinh, zscale]
- in: query
name: cmap
description: "Color map to use when rendering png"
required: false
schema:
type: string
enum: [bone, gray, cividis, viridis, magma]
responses:
'200':
description: retrieved cutout
content:
image/fits:
schema:
type: string
format: binary
image/png:
schema:
type: string
format: binary
'400':
description: retrieval failed
content:
application/json:
schema:
type: object
required:
- status
- message
properties:
status:
type: string
enum: [error]
message:
type: string
example:
status: error
message: "failure: <error message>"
"""
try:
candid = int(request.match_info["candid"])
cutout = request.match_info["cutout"].capitalize()
file_format = request.match_info["file_format"]
scaling = request.query.get("scaling", None)
cmap = request.query.get("cmap", None)
known_cutouts = ["Science", "Template", "Difference"]
if cutout not in known_cutouts:
return web.json_response(
{
"status": "error",
"message": f"cutout {cutout} not in {str(known_cutouts)}",
},
status=400,
)
known_file_formats = ["fits", "png"]
if file_format not in known_file_formats:
return web.json_response(
{
"status": "error",
"message": f"file format {file_format} not in {str(known_file_formats)}",
},
status=400,
)
default_scaling = {"Science": "log", "Template": "log", "Difference": "linear"}
if (scaling is None) or (
scaling.lower() not in ("log", "linear", "zscale", "arcsinh")
):
scaling = default_scaling[cutout]
else:
scaling = scaling.lower()
if (cmap is None) or (
cmap.lower() not in ["bone", "gray", "cividis", "viridis", "magma"]
):
cmap = "bone"
else:
cmap = cmap.lower()
alert = await request.app["mongo"]["ZUDS_alerts"].find_one(
{"candid": candid}, {f"cutout{cutout}": 1}, max_time_ms=60000
)
cutout_data = loads(dumps([alert[f"cutout{cutout}"]]))[0]
# unzipped fits name
fits_name = f"{candid}.cutout{cutout}.fits"
# unzip and flip about y axis on the server side
with gzip.open(io.BytesIO(cutout_data), "rb") as f:
with fits.open(io.BytesIO(f.read()), ignore_missing_simple=True) as hdu:
header = hdu[0].header
# no need to flip it since Danny does that on his end
# data_flipped_y = np.flipud(hdu[0].data)
data_flipped_y = hdu[0].data
if file_format == "fits":
hdu = fits.PrimaryHDU(data_flipped_y, header=header)
# hdu = fits.PrimaryHDU(data_flipped_y)
hdul = fits.HDUList([hdu])
stamp_fits = io.BytesIO()
hdul.writeto(fileobj=stamp_fits)
return web.Response(
body=stamp_fits.getvalue(),
content_type="image/fits",
headers=MultiDict(
{"Content-Disposition": f"Attachment;filename={fits_name}"}
),
)
if file_format == "png":
buff = io.BytesIO()
plt.close("all")
fig = plt.figure()
fig.set_size_inches(4, 4, forward=False)
ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])
ax.set_axis_off()
fig.add_axes(ax)
# replace nans with median:
img = np.array(data_flipped_y)
# replace dubiously large values
xl = np.greater(np.abs(img), 1e20, where=~np.isnan(img))
if img[xl].any():
img[xl] = np.nan
if np.isnan(img).any():
median = float(np.nanmean(img.flatten()))
img = np.nan_to_num(img, nan=median)
if scaling == "log":
img[img <= 0] = np.median(img)
ax.imshow(img, cmap=cmap, norm=LogNorm(), origin="lower")
elif scaling == "linear":
ax.imshow(img, cmap=cmap, origin="lower")
elif scaling == "zscale":
interval = ZScaleInterval(
nsamples=img.shape[0] * img.shape[1], contrast=0.045, krej=2.5
)
limits = interval.get_limits(img)
ax.imshow(
img, origin="lower", cmap=cmap, vmin=limits[0], vmax=limits[1]
)
elif scaling == "arcsinh":
ax.imshow(np.arcsinh(img - np.median(img)), cmap=cmap, origin="lower")
plt.savefig(buff, dpi=42)
buff.seek(0)
plt.close("all")
return web.Response(body=buff, content_type="image/png")
except Exception as _e:
log(f"Got error: {str(_e)}")
_err = traceback.format_exc()
log(_err)
return web.json_response(
{"status": "error", "message": f"failure: {_err}"}, status=400
)
async def app_factory():
"""
App Factory
:return:
"""
# init db if necessary
await init_db(config=config)
# Database connection
mongodb_connection_string = (
f"mongodb://{config['database']['admin_username']}:{config['database']['admin_password']}@"
+ f"{config['database']['host']}:{config['database']['port']}"
)
if config["database"]["replica_set"] is not None:
mongodb_connection_string += f"/?replicaSet={config['database']['replica_set']}"
client = AsyncIOMotorClient(
mongodb_connection_string,
maxPoolSize=config["database"]["max_pool_size"],
)
mongo = client[config["database"]["db"]]
# admin to connect to this instance from outside using API
await add_admin(mongo, config=config)
# init app with auth and error handling middlewares
app = web.Application(middlewares=[auth_middleware, error_middleware])
# store mongo connection
app["mongo"] = mongo
# mark all enqueued tasks failed on startup
await app["mongo"].queries.update_many(
{"status": "enqueued"},
{"$set": {"status": "error", "last_modified": datetime.datetime.utcnow()}},
)
# graciously close mongo client on shutdown
async def close_mongo(_app):
_app["mongo"].client.close()
app.on_cleanup.append(close_mongo)
# use ODMantic to work with structured data such as Filters
engine = AIOEngine(motor_client=client, database=config["database"]["db"])
# ODM = Object Document Mapper
app["mongo_odm"] = engine
# set up JWT for user authentication/authorization
app["JWT"] = {
"JWT_SECRET": config["server"]["JWT_SECRET_KEY"],
"JWT_ALGORITHM": config["server"]["JWT_ALGORITHM"],
"JWT_EXP_DELTA_SECONDS": config["server"]["JWT_EXP_DELTA_SECONDS"],
}
# OpenAPI docs:
s = SwaggerDocs(
app,
redoc_ui_settings=ReDocUiSettings(path="/docs/api/"),
# swagger_ui_settings=SwaggerUiSettings(path="/docs/api/"),
validate=config["misc"]["openapi_validate"],
title=config["server"]["name"],
version=config["server"]["version"],
description=config["server"]["description"],
components="components_api.yaml",
)
# instantiate handler classes:
query_handler = QueryHandler()
filter_handler = FilterHandler()
ztf_trigger_handler = ZTFTriggerHandler()
ztf_trigger_handler_test = ZTFTriggerHandler(test=True)
# add routes manually
s.add_routes(
[
web.get("/", ping, name="root", allow_head=False),
# auth:
web.post("/api/auth", auth_post, name="auth"),
# users:
web.post("/api/users", users_post),
web.delete("/api/users/{username}", users_delete),
web.put("/api/users/{username}", users_put),
# queries:
web.post("/api/queries", query_handler.post),
# filters:
web.get(
r"/api/filters/{filter_id:[0-9]+}", filter_handler.get, allow_head=False
),
web.post("/api/filters", filter_handler.post),
web.patch("/api/filters", filter_handler.patch),
web.delete("/api/filters/{filter_id:[0-9]+}", filter_handler.delete),
# triggers
web.get("/api/triggers/ztf", ztf_trigger_handler.get),
web.put("/api/triggers/ztf", ztf_trigger_handler.put),
web.delete("/api/triggers/ztf", ztf_trigger_handler.delete),
web.put("/api/triggers/ztf.test", ztf_trigger_handler_test.put),
web.delete("/api/triggers/ztf.test", ztf_trigger_handler_test.delete),
# lab:
web.get(
"/lab/ztf-alerts/{candid}/cutout/{cutout}/{file_format}",
ztf_alert_get_cutout,
allow_head=False,
),
web.get(
"/lab/zuds-alerts/{candid}/cutout/{cutout}/{file_format}",
zuds_alert_get_cutout,
allow_head=False,
),
]
)
return app
uvloop.install()
if __name__ == "__main__":
web.run_app(app_factory(), port=config["server"]["port"])
| 32.991197
| 154
| 0.481936
|
794bcb4ad0f0f5831aa87b1b69f4dae995050d2f
| 18,227
|
py
|
Python
|
pycket/values_string.py
|
krono/pycket
|
5eff3401ce5cf34b16863b669ac9e274edabbe00
|
[
"MIT"
] | null | null | null |
pycket/values_string.py
|
krono/pycket
|
5eff3401ce5cf34b16863b669ac9e274edabbe00
|
[
"MIT"
] | null | null | null |
pycket/values_string.py
|
krono/pycket
|
5eff3401ce5cf34b16863b669ac9e274edabbe00
|
[
"MIT"
] | null | null | null |
from pycket.base import W_Object, SingletonMeta
from pycket.error import SchemeException
from pycket import config
from rpython.rlib import rerased, jit
from rpython.rlib.objectmodel import compute_hash, we_are_translated
from rpython.rlib.unicodedata import unicodedb_6_2_0 as unicodedb
from rpython.rlib.rstring import StringBuilder, UnicodeBuilder
@jit.elidable
def _is_ascii(s):
for c in s:
if ord(c) >= 128:
return False
return True
class W_String(W_Object):
errorname = "string"
_attrs_ = []
_settled_ = True
# factory methods
@staticmethod
def fromstr_utf8(s, immutable=False):
# try to see whether it's ascii first
if config.strategies:
if _is_ascii(s):
return W_String.fromascii(s, immutable)
u = s.decode("utf-8")
return W_String.fromunicode(u, immutable)
@staticmethod
def fromascii(s, immutable=False):
if not config.strategies:
u = s.decode("utf-8")
return W_String.fromunicode(u, immutable)
if not we_are_translated():
assert s.decode("ascii") == s
strategy = AsciiStringStrategy.singleton
storage = strategy.erase(s)
if immutable:
cls = W_AsciiImmutableString
else:
cls = W_MutableString
return cls(strategy, storage)
@staticmethod
def fromunicode(u, immutable=False):
strategy = UnicodeStringStrategy.singleton
storage = strategy.erase(u)
if immutable:
cls = W_UnicodeImmutableString
else:
cls = W_MutableString
return cls(strategy, storage)
cache = {}
@staticmethod
def make(val):
lup = W_String.cache.get(val, None)
if lup is None:
lup = W_String.fromstr_utf8(val, immutable=True)
W_String.cache[val] = lup
return lup
def make_immutable(self):
raise NotImplementedError("abstract base class")
def get_strategy(self):
raise NotImplementedError("abstract base class")
def get_storage(self):
raise NotImplementedError("abstract base class")
# methods that defer to the strategies
def as_str_ascii(self):
return self.get_strategy().as_str_ascii(self)
def as_str_utf8(self):
return self.get_strategy().as_str_utf8(self)
def as_unicode(self):
return self.get_strategy().as_unicode(self)
def as_charlist_ascii(self):
return self.get_strategy().as_charlist_ascii(self)
def as_charlist_utf8(self):
return self.get_strategy().as_charlist_utf8(self)
def as_unicharlist(self):
return self.get_strategy().as_unicharlist(self)
# string operations
def length(self):
return self.get_strategy().length(self)
def getitem(self, index):
return self.get_strategy().getitem(self, index)
def getslice(self, start, stop):
return self.get_strategy().getslice(self, start, stop)
def hash_equal(self):
return self.get_strategy().hash(self)
def equal(self, other):
if not isinstance(other, W_String):
return False
return self.get_strategy().eq(self, other)
def cmp(self, other):
"""
returns
- a negative number if self < other
- a positive number if self > other
- 0 if self == other
only the sign of the result is relevant, not the value
"""
return self.get_strategy().cmp(self, other)
def cmp_case_insensitive(self, other):
return self.get_strategy().cmp_case_insensitive(self, other)
def upper(self):
return self.get_strategy().upper(self)
def lower(self):
return self.get_strategy().lower(self)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.get_strategy(), self.get_storage())
def tostring(self):
from pypy.objspace.std.bytesobject import string_escape_encode
#return string_escape_encode(self.value, '"')
result = self.as_str_utf8()
assert result is not None
return result
def setitem(self, index, unichar):
raise SchemeException("can't mutate string")
def setslice(self, index, w_from, fromstart, fromend):
raise SchemeException("can't mutate string")
class W_MutableString(W_String):
def __init__(self, strategy, storage):
self.change_strategy(strategy, storage)
def get_strategy(self):
return self.strategy
def get_storage(self):
return self.storage
def change_strategy(self, strategy, storage):
self.strategy = strategy
self.storage = storage
def make_immutable(self):
try:
s = self.as_str_ascii()
except ValueError:
strategy = UnicodeStringStrategy.singleton
storage = strategy.erase(self.as_unicode())
return W_UnicodeImmutableString(strategy, storage)
else:
strategy = AsciiStringStrategy.singleton
storage = strategy.erase(s)
return W_AsciiImmutableString(strategy, storage)
def immutable(self):
return False
# mutation operations
def setitem(self, index, unichar):
return self.get_strategy().setitem(self, index, unichar)
def setslice(self, index, w_from, fromstart, fromend):
return self.get_strategy().setslice(self, index, w_from, fromstart, fromend)
class W_ImmutableString(W_String):
# abstract base class of immutable strings
# there are concrete subclasses for every immutable strategy
_immutable_fields_ = ['storage']
def __init__(self, strategy, storage):
self.storage = storage
assert strategy is self.get_strategy()
def make_immutable(self):
return self
def immutable(self):
return True
def get_storage(self):
return self.storage
class W_AsciiImmutableString(W_ImmutableString):
def get_strategy(self):
return AsciiStringStrategy.singleton
class W_UnicodeImmutableString(W_ImmutableString):
def get_strategy(self):
return UnicodeStringStrategy.singleton
class StringStrategy(object):
__metaclass__ = SingletonMeta
# strategy manipulation
def make_mutable(self, w_str):
raise NotImplementedError("abstract base class")
def as_str_ascii(self, w_str):
raise ValueError("can't convert")
def as_str_utf8(self, w_str):
raise NotImplementedError("abstract base class")
def as_unicode(self, w_str):
raise NotImplementedError("abstract base class")
def as_charlist_ascii(self, w_str):
raise ValueError("can't convert")
def as_charlist_utf8(self, w_str):
raise NotImplementedError("abstract base class")
def as_unicharlist(self, w_str):
raise NotImplementedError("abstract base class")
# string operations
def length(self, w_str):
raise NotImplementedError("abstract base class")
def getitem(self, w_str, index):
""" returns a unichar """
raise NotImplementedError("abstract base class")
def getslice(self, w_str, start, stop):
""" returns a W_String """
raise NotImplementedError("abstract base class")
def eq(self, w_str, w_other):
# base implementations, subclasses should do better ones
length = self.length(w_str)
if length != w_other.length():
return False
otherstrategy = w_other.get_strategy()
for i in range(length):
if self.getitem(w_str, i) != otherstrategy.getitem(w_other, i):
return False
return True
def cmp(self, w_str, w_other):
# base implementations, subclasses should do better ones
len1 = self.length(w_str)
len2 = w_other.length()
if len1 < len2:
cmplen = len1
else:
cmplen = len2
otherstrategy = w_other.get_strategy()
for i in range(cmplen):
diff = ord(self.getitem(w_str, i)) - ord(otherstrategy.getitem(w_other, i))
if diff:
return diff
i += 1
return len1 - len2
def cmp_case_insensitive(self, w_str, w_other):
# base implementations, subclasses should do better ones
len1 = self.length(w_str)
len2 = w_other.length()
if len1 < len2:
cmplen = len1
else:
cmplen = len2
otherstrategy = w_other.get_strategy()
for i in range(cmplen):
ch1 = unicodedb.tolower(ord(self.getitem(w_str, i)))
ch2 = unicodedb.tolower(ord(otherstrategy.getitem(w_other, i)))
diff = ch1 - ch2
if diff:
return diff
i += 1
return len1 - len2
def hash(self, w_str):
# potentially inefficient default
return compute_hash(w_str.as_unicode())
def upper(self, w_str):
raise NotImplementedError("abstract base class")
def lower(self, w_str):
raise NotImplementedError("abstract base class")
# mutation operations
def setitem(self, w_str, index, unichar):
raise NotImplementedError("abstract base class")
def setslice(self, w_str, index, w_from, fromstart, fromend):
raise NotImplementedError("abstract base class")
class ImmutableStringStrategy(StringStrategy):
def as_charlist_ascii(self, w_str):
return list(self.as_str_ascii(w_str))
def as_charlist_utf8(self, w_str):
return list(self.as_str_utf8(w_str))
def as_unicharlist(self, w_str):
return list(self.as_unicode(w_str))
def setitem(self, w_str, index, unichar):
self.make_mutable(w_str)
return w_str.setitem(index, unichar)
def setslice(self, w_str, index, w_from, fromstart, fromend):
self.make_mutable(w_str)
return w_str.setslice(index, w_from, fromstart, fromend)
class MutableStringStrategy(StringStrategy):
def as_str_ascii(self, w_str):
return "".join(self.as_charlist_ascii(w_str))
def as_str_utf8(self, w_str):
return "".join(self.as_charlist_utf8(w_str))
def as_unicode(self, w_str):
return u"".join(self.as_unicharlist(w_str))
def make_mutable(self, w_str):
pass
# mutation operations
def setitem(self, w_str, index, unichar):
raise NotImplementedError
def setslice(self, w_str, index, w_from, fromstart, fromend):
raise NotImplementedError("abstract base class")
class AsciiStringStrategy(ImmutableStringStrategy):
erase, unerase = rerased.new_static_erasing_pair("ascii-string-strategy")
def make_mutable(self, w_str):
strategy = AsciiMutableStringStrategy.singleton
storage = strategy.erase(self.as_charlist_ascii(w_str))
w_str.change_strategy(strategy, storage)
def as_str_utf8(self, w_str):
return self.unerase(w_str.get_storage())
as_str_ascii = as_str_utf8
def as_unicode(self, w_str):
return unicode(self.unerase(w_str.get_storage())) # change strategy?
# string operations
def length(self, w_str):
return len(self.unerase(w_str.get_storage()))
def getitem(self, w_str, index):
return unichr(ord(self.unerase(w_str.get_storage())[index]))
def getslice(self, w_str, start, stop):
v = self.unerase(w_str.get_storage())[start:stop]
return W_MutableString(self, self.erase(v))
def eq(self, w_str, w_other):
if w_other.get_strategy() is self:
return self.unerase(w_str.get_storage()) == self.unerase(w_other.get_storage())
return ImmutableStringStrategy.eq(self, w_str, w_other)
def hash(self, w_str):
return compute_hash(w_str.as_str_ascii())
def upper(self, w_str):
return W_String.fromascii(w_str.as_str_ascii().upper())
def lower(self, w_str):
return W_String.fromascii(w_str.as_str_ascii().lower())
class AsciiMutableStringStrategy(MutableStringStrategy):
erase, unerase = rerased.new_static_erasing_pair("ascii-mutable-string-strategy")
def make_unicode(self, w_str):
strategy = UnicodeMutableStringStrategy.singleton
storage = strategy.erase(self.as_unicharlist(w_str))
w_str.change_strategy(strategy, storage)
def as_charlist_utf8(self, w_str):
return self.unerase(w_str.get_storage())[:]
def as_unicharlist(self, w_str):
return [unichr(ord(c)) for c in self.unerase(w_str.get_storage())]
def as_str_utf8(self, w_str):
return "".join(self.unerase(w_str.get_storage()))
as_str_ascii = as_str_utf8
# string operations
def length(self, w_str):
return len(self.unerase(w_str.get_storage()))
def getitem(self, w_str, index):
return unichr(ord(self.unerase(w_str.get_storage())[index]))
def getslice(self, w_str, start, stop):
v = self.unerase(w_str.get_storage())[start:stop]
return W_MutableString(self, self.erase(v))
def eq(self, w_str, w_other):
if w_other.get_strategy() is self:
return self.unerase(w_str.get_storage()) == self.unerase(w_other.get_storage())
return MutableStringStrategy.eq(self, w_str, w_other)
# mutation operations
def setitem(self, w_str, index, unichar):
val = ord(unichar.value)
if val < 128:
self.unerase(w_str.get_storage())[index] = chr(val)
else:
self.make_unicode(w_str)
return w_str.setitem(index, unichar)
def setslice(self, w_str, index, w_from, fromstart, fromend):
target = self.unerase(w_str.get_storage())
# XXX inefficient
for sourceindex in range(fromstart, fromend):
char = ord(w_from.getitem(sourceindex))
assert char < 128 # XXX
target[index] = chr(char)
index += 1
def upper(self, w_str):
# XXX inefficient
return W_String.fromascii(w_str.as_str_ascii().upper())
def lower(self, w_str):
# XXX inefficient
return W_String.fromascii(w_str.as_str_ascii().lower())
class UnicodeStringStrategy(ImmutableStringStrategy):
erase, unerase = rerased.new_static_erasing_pair("unicode-string-strategy")
def make_mutable(self, w_str):
strategy = UnicodeMutableStringStrategy.singleton
storage = strategy.erase(self.as_unicharlist(w_str))
w_str.change_strategy(strategy, storage)
def as_str_ascii(self, w_str):
raise ValueError # XXX or check?
def as_str_utf8(self, w_str):
return self.unerase(w_str.get_storage()).encode("utf-8")
def as_unicode(self, w_str):
return self.unerase(w_str.get_storage())
# string operations
def length(self, w_str):
return len(self.unerase(w_str.get_storage()))
def getitem(self, w_str, index):
return self.unerase(w_str.get_storage())[index]
def getslice(self, w_str, start, stop):
v = self.unerase(w_str.get_storage())[start:stop]
return W_MutableString(self, self.erase(v))
def eq(self, w_str, w_other):
if w_other.get_strategy() is self:
return self.unerase(w_str.get_storage()) == self.unerase(w_other.get_storage())
return ImmutableStringStrategy.eq(self, w_str, w_other)
def upper(self, w_str):
value = self.unerase(w_str.get_storage())
builder = UnicodeBuilder(len(value))
for i, ch in enumerate(value):
builder.append(unichr(unicodedb.toupper(ord(ch))))
return W_MutableString(self, self.erase(builder.build()))
def lower(self, w_str):
value = self.unerase(w_str.get_storage())
builder = UnicodeBuilder(len(value))
for i, ch in enumerate(value):
builder.append(unichr(unicodedb.tolower(ord(ch))))
return W_MutableString(self, self.erase(builder.build()))
class UnicodeMutableStringStrategy(MutableStringStrategy):
erase, unerase = rerased.new_static_erasing_pair("unicode-mutable-string-strategy")
def as_charlist_ascii(self, w_str):
raise ValueError("can't convert")
def as_charlist_utf8(self, w_str):
return list(self.as_str_utf8(w_str))
def as_str_utf8(self, w_str):
return u''.join(self.unerase(w_str.get_storage())).encode('utf-8')
def as_unicharlist(self, w_str):
return self.unerase(w_str.get_storage())[:]
def as_unicode(self, w_str):
return u"".join(self.unerase(w_str.get_storage()))
# string operations
def length(self, w_str):
return len(self.unerase(w_str.get_storage()))
def getitem(self, w_str, index):
return self.unerase(w_str.get_storage())[index]
def getslice(self, w_str, start, stop):
v = self.unerase(w_str.get_storage())[start:stop]
return W_MutableString(self, self.erase(v))
def eq(self, w_str, w_other):
if w_other.get_strategy() is self:
return self.unerase(w_str.get_storage()) == self.unerase(w_other.get_storage())
return MutableStringStrategy.eq(self, w_str, w_other)
# mutation operations
def setitem(self, w_str, index, unichar):
self.unerase(w_str.get_storage())[index] = unichar.value
def setslice(self, w_str, index, w_from, fromstart, fromend):
target = self.unerase(w_str.get_storage())
# XXX inefficient
for sourceindex in range(fromstart, fromend):
target[index] = w_from.getitem(sourceindex)
index += 1
def upper(self, w_str):
# copy paste from above, but the types are different
value = self.unerase(w_str.get_storage())
builder = UnicodeBuilder(len(value))
for i, ch in enumerate(value):
builder.append(unichr(unicodedb.toupper(ord(ch))))
return W_MutableString(self, self.erase(list(builder.build())))
def lower(self, w_str):
value = self.unerase(w_str.get_storage())
builder = UnicodeBuilder(len(value))
for i, ch in enumerate(value):
builder.append(unichr(unicodedb.tolower(ord(ch))))
return W_MutableString(self, self.erase(list(builder.build())))
| 30.89322
| 96
| 0.654249
|
794bcb659c3b95c512e3489b131be74998ac04e6
| 8,673
|
py
|
Python
|
basic2/practice_3/cliff_td_comparison.py
|
linklab/e_learning_rl
|
16c11c17dfb304959cb80912e29d0540e6ed6cd5
|
[
"MIT"
] | 1
|
2020-11-22T06:30:45.000Z
|
2020-11-22T06:30:45.000Z
|
basic2/practice_3/cliff_td_comparison.py
|
linklab/e_learning_rl
|
16c11c17dfb304959cb80912e29d0540e6ed6cd5
|
[
"MIT"
] | null | null | null |
basic2/practice_3/cliff_td_comparison.py
|
linklab/e_learning_rl
|
16c11c17dfb304959cb80912e29d0540e6ed6cd5
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
import matplotlib.pyplot as plt
import random
from basic.practice_1.cliff import CliffGridWorld
# 그리드월드 높이와 너비
GRID_HEIGHT = 4
GRID_WIDTH = 12
NUM_ACTIONS = 4
# 탐색 확률
EPSILON = 0.1
# 스텝 사이즈
ALPHA = 0.5
# 감가율
GAMMA = 1.0
# 초기 상태와 종료 상태
START_STATE = (3, 0)
TERMINAL_STATES = [(3, 11)]
CLIFF_STATES = [(3, 1), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (3, 10)]
# 최대 에피소드
MAX_EPISODES = 500
# 총 실험 횟수 (성능에 대한 평균을 구하기 위함)
TOTAL_RUNS = 25
# 비어있는 행동 가치 테이블을 0~1 사이의 임의의 값으로 초기화하며 생성함
def generate_initial_q_value(env):
q_value = np.zeros((GRID_HEIGHT, GRID_WIDTH, env.NUM_ACTIONS))
for i in range(GRID_HEIGHT):
for j in range(GRID_WIDTH):
if (i, j) not in TERMINAL_STATES:
for action in env.ACTIONS:
q_value[i, j, action] = random.random()
return q_value
# 모든 상태에서 수행 가능한 행동에 맞춰 임의의 정책을 생성함
# 초기에 각 행동의 선택 확률은 모두 같음
def generate_initial_random_policy(env):
policy = dict()
for i in range(GRID_HEIGHT):
for j in range(GRID_WIDTH):
if (i, j) not in TERMINAL_STATES:
actions = []
action_probs = []
for action in env.ACTIONS:
actions.append(action)
action_probs.append(0.25)
policy[(i, j)] = (actions, action_probs)
return policy
# epsilon-탐욕적 정책 갱신
def update_epsilon_greedy_policy(env, state, q_value, policy):
max_prob_actions = [action_ for action_, value_ in enumerate(q_value[state[0], state[1], :]) if
value_ == np.max(q_value[state[0], state[1], :])]
actions = []
action_probs = []
for action in env.ACTIONS:
actions.append(action)
if action in max_prob_actions:
action_probs.append(
(1 - EPSILON) / len(max_prob_actions) + EPSILON / env.NUM_ACTIONS
)
else:
action_probs.append(
EPSILON / env.NUM_ACTIONS
)
policy[state] = (actions, action_probs)
def sarsa(env, q_value, policy, step_size=ALPHA):
episode_reward = 0.0
state = env.reset()
actions, prob = policy[state]
action = np.random.choice(actions, size=1, p=prob)[0]
done = False
while not done:
next_state, reward, done, _ = env.step(action)
episode_reward += reward
# Q-테이블 갱신
if done:
q_value[state[0], state[1], action] += step_size * (reward - q_value[state[0], state[1], action])
update_epsilon_greedy_policy(env, state, q_value, policy)
else:
next_actions, prob = policy[next_state]
next_action = np.random.choice(next_actions, size=1, p=prob)[0]
next_q = q_value[next_state[0], next_state[1], next_action]
q_value[state[0], state[1], action] += step_size * (reward + GAMMA * next_q - q_value[state[0], state[1], action])
update_epsilon_greedy_policy(env, state, q_value, policy)
state = next_state
action = next_action
return episode_reward
def q_learning(env, q_value, policy, step_size=ALPHA):
episode_reward = 0.0
state = env.reset()
done = False
while not done:
actions, prob = policy[state]
action = np.random.choice(actions, size=1, p=prob)[0]
next_state, reward, done, _ = env.step(action)
#print(state, actions, prob, action, next_state, reward)
episode_reward += reward
# Q-테이블 갱신
if done:
q_value[state[0], state[1], action] += step_size * (reward - q_value[state[0], state[1], action])
update_epsilon_greedy_policy(env, state, q_value, policy)
else:
# 새로운 상태에 대한 기대값 계산
max_next_q = np.max(q_value[next_state[0], next_state[1], :])
q_value[state[0], state[1], action] += step_size * (reward + GAMMA * max_next_q - q_value[state[0], state[1], action])
update_epsilon_greedy_policy(env, state, q_value, policy)
state = next_state
return episode_reward
def expected_sarsa(env, q_value, policy, step_size=ALPHA):
episode_reward = 0.0
state = env.reset()
done = False
while not done:
actions, prob = policy[state]
action = np.random.choice(actions, size=1, p=prob)[0]
next_state, reward, done, _ = env.step(action)
episode_reward += reward
# Q-테이블 갱신
if done:
q_value[state[0], state[1], action] += step_size * (reward - q_value[state[0], state[1], action])
update_epsilon_greedy_policy(env, state, q_value, policy)
else:
# 새로운 상태에 대한 기대값 계산
expected_next_q = 0.0
for action_ in env.ACTIONS:
action_prob = policy[next_state][1]
expected_next_q += action_prob[action_] * q_value[next_state[0], next_state[1], action_]
q_value[state[0], state[1], action] += step_size * (reward + GAMMA * expected_next_q - q_value[state[0], state[1], action])
update_epsilon_greedy_policy(env, state, q_value, policy)
state = next_state
return episode_reward
# print optimal policy
def print_optimal_policy(env, q_value):
optimal_policy = []
for i in range(0, GRID_HEIGHT):
optimal_policy.append([])
for j in range(0, GRID_WIDTH):
if (i, j) in TERMINAL_STATES:
optimal_policy[-1].append('G')
continue
if (i, j) in CLIFF_STATES:
optimal_policy[-1].append('-')
continue
best_action = np.argmax(q_value[i, j, :])
if best_action == env.ACTION_UP:
optimal_policy[-1].append('U')
elif best_action == env.ACTION_DOWN:
optimal_policy[-1].append('D')
elif best_action == env.ACTION_LEFT:
optimal_policy[-1].append('L')
elif best_action == env.ACTION_RIGHT:
optimal_policy[-1].append('R')
for row in optimal_policy:
print(row)
print()
def td_comparison(env):
rewards_expected_sarsa = np.zeros(MAX_EPISODES)
rewards_sarsa = np.zeros(MAX_EPISODES)
rewards_q_learning = np.zeros(MAX_EPISODES)
# Q-Table 변수 선언
q_table_sarsa = None
q_table_q_learning = None
q_table_expected_sarsa = None
for run in range(TOTAL_RUNS):
print("runs: {0}".format(run))
# 초기 Q-Table 생성
q_table_sarsa = generate_initial_q_value(env)
q_table_q_learning = generate_initial_q_value(env)
q_table_expected_sarsa = generate_initial_q_value(env)
# 초기 임의 정책 생성
policy_sarsa = generate_initial_random_policy(env)
policy_q_learning = generate_initial_random_policy(env)
policy_expected_sarsa = generate_initial_random_policy(env)
for episode in range(MAX_EPISODES):
rewards_sarsa[episode] += sarsa(env, q_table_sarsa, policy_sarsa)
rewards_q_learning[episode] += q_learning(env, q_table_q_learning, policy_q_learning)
rewards_expected_sarsa[episode] += expected_sarsa(env, q_table_expected_sarsa, policy_expected_sarsa)
# 총 25번의 수행에 대해 평균 계산
rewards_sarsa /= TOTAL_RUNS
rewards_q_learning /= TOTAL_RUNS
rewards_expected_sarsa /= TOTAL_RUNS
# 그래프 출력
plt.plot(rewards_sarsa, linestyle='-', color='darkorange', label='SARSA')
plt.plot(rewards_q_learning, linestyle=':', color='green', label='Q-Learning')
plt.plot(rewards_expected_sarsa, linestyle='-.', color='dodgerblue', label='Expected SARSA')
plt.xlabel('Episodes')
plt.ylabel('Episode rewards')
plt.ylim([-100, 0])
plt.legend()
plt.savefig('images/cliff_td_comparison.png')
plt.close()
# display optimal policy
print()
print('[SARSA의 학습된 Q-Table 기반 탐욕적 정책]')
print_optimal_policy(env, q_table_sarsa)
print('[Q-Learning의 수렴된 Q-Table 기반 탐욕적 정책]')
print_optimal_policy(env, q_table_q_learning)
print('[기대값 기반 SARSA의 수렴된 Q-Table 기반 탐욕적 정책]')
print_optimal_policy(env, q_table_expected_sarsa)
def cliff_td_comparison_main():
# 이미지 저장 경로 확인 및 생성
if not os.path.exists('images/'):
os.makedirs('images/')
env = CliffGridWorld(
height=GRID_HEIGHT,
width=GRID_WIDTH,
start_state=START_STATE,
terminal_states=TERMINAL_STATES,
transition_reward=-1.0,
terminal_reward=-1.0,
outward_reward=-1.0,
cliff_states=[(s, START_STATE, -100.0) for s in CLIFF_STATES]
)
td_comparison(env)
if __name__ == '__main__':
cliff_td_comparison_main()
| 30.975
| 135
| 0.617318
|
794bcb6a28ecd69992c718122e60bb205f3053fc
| 6,456
|
py
|
Python
|
api/tests/test_user_actions.py
|
jcuna/room-mgt
|
84c071b5f3a2e6276f0064fd3f5a8ea3d87b58f7
|
[
"MIT"
] | null | null | null |
api/tests/test_user_actions.py
|
jcuna/room-mgt
|
84c071b5f3a2e6276f0064fd3f5a8ea3d87b58f7
|
[
"MIT"
] | 21
|
2019-07-04T21:31:37.000Z
|
2022-02-26T09:50:57.000Z
|
api/tests/test_user_actions.py
|
jcuna/room-mgt
|
84c071b5f3a2e6276f0064fd3f5a8ea3d87b58f7
|
[
"MIT"
] | null | null | null |
from base64 import b64encode
from flask.testing import FlaskClient
from tests import endpoint, secret_key
from tests.injectors import resources
from tests.seeders import seed_project
class LocalUser(object):
id = None
token = None
pass
user = LocalUser()
def test_admin_create_user(client: FlaskClient, admin_login: dict):
project_resp = seed_project(client, admin_login)
resp = client.post(
endpoint('/users'),
json={
'first_name': 'John',
'last_name': 'Smith',
'email': 'jondmith@school.edu',
'roles': [1], # admin
'attributes': {'access': {'projects': [project_resp.json['id']]}}
},
headers=admin_login
)
assert 'id' in resp.json
assert resp.status_code == 200
assert len(resources.mails) == 1
user.id = resp.json['id']
def test_user_verifies_account(client: FlaskClient):
from dal.models import UserToken
verification_token = UserToken.query.first()
assert verification_token is not None
verification = client.get(endpoint('/user-tokens/badtoken'))
assert verification.status_code == 400
assert 'isValid' in verification.json
assert verification.json['isValid'] == False
verification = client.get(endpoint('/user-tokens/%s' % verification_token.token))
assert verification.status_code == 200
assert 'isValid' in verification.json
assert verification.json['isValid'] == True
user.token = verification_token.token
def test_user_activates_account(client: FlaskClient):
resp = client.post(
endpoint('/account/activate-pass'),
json={
'token': 'bad-token',
'pw': 'badpassword',
'pw2': 'badpassword',
}
)
assert resp.status_code == 400
assert 'Invalid token' in resp.json['error']
resp = client.post(
endpoint('/account/activate-pass'),
json={
'token': user.token,
'pw': 'badpassword',
'pw2': 'badpassword',
}
)
assert resp.status_code == 400
assert 'Invalid password' in resp.json['error']
resp = client.post(
endpoint('/account/activate-pass'),
json={
'token': user.token,
'pw': 'mY$P@ssw0rd',
'pw2': 'mY$P@ssw0rd',
}
)
assert resp.status_code == 200
def test_token_cannot_be_reused(client: FlaskClient):
verification = client.get(endpoint('/user-tokens/%s' % user.token))
assert verification.status_code == 400
assert 'isValid' in verification.json
assert verification.json['isValid'] == False
def test_new_user_can_login(client: FlaskClient):
auth = {
'Authorization': 'Basic ' + b64encode(b'jondmith@school.edu:mY$P@ssw0rd').decode()
}
login_resp = client.post(endpoint('/login'), headers=auth)
assert 'token' in login_resp.json, 'token expected'
assert login_resp.status_code == 200
def test_user_changes_password(client: FlaskClient):
from dal.models import UserToken
resp = client.put(endpoint('/users/reset-password'))
assert resp.status_code == 400
assert 'error' in resp.json
assert 'Missing email' in resp.json['error']
resp = client.put(endpoint('/users/reset-password'), json={'email': 'jondmith2@school.edu'})
assert resp.status_code == 200
assert len(resources.mails) == 1, 'no email has been sent because user does not exist'
resp = client.put(endpoint('/users/reset-password'), json={'email': 'jondmith@school.edu'})
assert resp.status_code == 200
assert UserToken.query.count() == 2
assert len(resources.mails) == 2, 'an email should have been sent'
token = UserToken.query.offset(1).first()
assert token is not None
resp = client.post(
endpoint('/account/activate-pass'),
json={
'token': token.token,
'pw': '@noth3rp@22w04d',
'pw2': '@noth3rp@22w04d',
}
)
assert resp.status_code == 200
auth = {
'Authorization': 'Basic ' + b64encode(b'jondmith@school.edu:@noth3rp@22w04d').decode()
}
login_resp = client.post(endpoint('/login'), headers=auth)
assert 'token' in login_resp.json, 'token expected'
assert login_resp.status_code == 200
def test_sending_messages(client: FlaskClient):
from dal.models import UserMessage
resp = client.post(
endpoint('/messages'),
json={
'user_id': user.id,
'subject': 'testing a subject',
'body': '<h1>Hello test</h1><p>This is the body</p>'
},
headers={'X-System-Token': 'secret_key'}
)
assert resp.status_code == 401
resp = client.post(
endpoint('/messages'),
json={
'user_id': user.id,
'subject': 'testing a subject',
'body': '<h1>Hello test</h1><p>This is the body</p>'
},
headers={'X-System-Token': secret_key}
)
assert resp.status_code == 200
messages = UserMessage.query.all()
assert len(messages) == 1
assert messages[0].user_id == user.id
assert messages[0].read == False
assert messages[0].subject == 'testing a subject'
assert messages[0].message == '<h1>Hello test</h1><p>This is the body</p>'
def test_get_user_messages(client: FlaskClient, admin_login):
from core.messages import send_message
from dal.models import User
admin = User.query.filter_by(email='testuser@testing.org').first()
send_message(admin.id, 'testing a subject', '<h1>Hello test</h1><p>This is the body</p>')
resp = client.get(endpoint('/messages'), headers=admin_login)
assert resp.status_code == 200
assert 'list' in resp.json
assert len(resp.json['list']) == 1
assert 'subject' in resp.json['list'][0]
assert 'id' in resp.json['list'][0]
assert 'message' in resp.json['list'][0]
assert 'read' in resp.json['list'][0]
assert 'date' in resp.json['list'][0]
assert resp.json['list'][0]['read'] == False
def test_mark_notification_read(client: FlaskClient, admin_login):
resp = client.get(endpoint('/messages'), headers=admin_login)
_id = resp.json['list'][0]['id']
resp = client.put(endpoint('/messages/%s' % _id))
assert resp.status_code == 200
resp = client.get(endpoint('/messages'), headers=admin_login)
assert 'read' in resp.json['list'][0]
assert resp.json['list'][0]['read'] == True
| 28.821429
| 96
| 0.630266
|
794bcb917398546e6f499649f0fa5435f49aa790
| 24
|
py
|
Python
|
dora/__init__.py
|
caiertl/dora
|
eac67a0af6f943dfe93c6ddef7bb2192bd2516fe
|
[
"CC0-1.0"
] | 4
|
2017-11-15T23:09:52.000Z
|
2018-08-21T11:28:46.000Z
|
dora/__init__.py
|
caiertl/dora
|
eac67a0af6f943dfe93c6ddef7bb2192bd2516fe
|
[
"CC0-1.0"
] | 1
|
2019-10-18T15:05:15.000Z
|
2019-11-02T03:22:24.000Z
|
dora/__init__.py
|
caiertl/dora
|
eac67a0af6f943dfe93c6ddef7bb2192bd2516fe
|
[
"CC0-1.0"
] | 1
|
2020-04-30T17:24:52.000Z
|
2020-04-30T17:24:52.000Z
|
__version__ = '0.2.1b0'
| 12
| 23
| 0.666667
|
794bcbf828a7c1ebada818d16dbda2838bcb8a10
| 1,405
|
py
|
Python
|
python/datawrangling/datawrangling.py
|
imjoseangel/100DaysOfCode
|
bff90569033e2b02a56e893bd45727125962aeb3
|
[
"MIT"
] | 1
|
2022-03-30T12:59:44.000Z
|
2022-03-30T12:59:44.000Z
|
python/datawrangling/datawrangling.py
|
imjoseangel/100DaysOfCode
|
bff90569033e2b02a56e893bd45727125962aeb3
|
[
"MIT"
] | null | null | null |
python/datawrangling/datawrangling.py
|
imjoseangel/100DaysOfCode
|
bff90569033e2b02a56e893bd45727125962aeb3
|
[
"MIT"
] | 3
|
2019-08-13T11:33:36.000Z
|
2022-03-08T22:00:09.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import pandas as pd
import numpy as np
# Get File Directory
WORK_DIR = os.path.dirname((os.path.realpath(__file__)))
# Loading the json data as python dictionary
df = pd.read_csv(WORK_DIR + "/data/titanic.csv")
head = df.head()
dropna = df.dropna()
over30 = df[df['Age'] > 30]
female = df[df['Sex'] == 'female']
over30female = df[(df['Age'] > 30) & (df['Sex'] == 'female')]
print(over30female)
bysex = df.groupby('Sex').Survived.value_counts()
print(bysex)
# Create an array of 200 elements at the interval of 1 sec.
data = pd.date_range('1/1/2016', periods=150, freq='s')
# Let's create timeseries data by assigning random values to
# integer to each values in data
time_series = pd.Series(np.random.randint(0, 500, len(data)), index=data)
print(time_series.head())
print("\n")
# Resample: bin 1 sec raws to minutes and summing the corresponding values
time_series = time_series.resample('1Min').sum()
print(time_series.head())
print("\n")
# Time zone conversion: Let's assume original timeseries was
# in UTC and we want to convert to US/Eastern
time_series_utc = time_series.tz_localize('UTC')
time_series_utc.tz_convert('US/Eastern')
result = df[(df['Age'] > 30) & (df['Sex'] == 'female')]
result.to_excel('result.xlsx')
| 27.019231
| 74
| 0.701779
|
794bcc194cbb321bf1fb61dcb7446cea90bbee34
| 3,312
|
py
|
Python
|
tests/eth2/beacon/state_machines/test_state_transition.py
|
mhchia/trinity
|
e40e475064ca4605887706e9b0e4f8e2349b10cd
|
[
"MIT"
] | null | null | null |
tests/eth2/beacon/state_machines/test_state_transition.py
|
mhchia/trinity
|
e40e475064ca4605887706e9b0e4f8e2349b10cd
|
[
"MIT"
] | null | null | null |
tests/eth2/beacon/state_machines/test_state_transition.py
|
mhchia/trinity
|
e40e475064ca4605887706e9b0e4f8e2349b10cd
|
[
"MIT"
] | null | null | null |
import pytest
from eth2.beacon.db.chain import BeaconChainDB
from eth2.beacon.state_machines.forks.serenity.blocks import (
SerenityBeaconBlock,
)
from eth2.beacon.tools.builder.proposer import (
create_mock_block,
)
from eth2.beacon.types.historical_batch import HistoricalBatch
@pytest.mark.parametrize(
(
'genesis_slot,'
),
[
(0),
]
)
@pytest.mark.parametrize(
(
'num_validators,'
'slots_per_epoch,'
'min_attestation_inclusion_delay,'
'target_committee_size,'
'shard_count,'
'state_slot,'
'slots_per_historical_root'
),
[
(10, 10, 1, 2, 2, 2, 8192),
# state.slot == SLOTS_PER_HISTORICAL_ROOT
(6, 6, 1, 2, 2, 8, 8),
# state.slot > SLOTS_PER_HISTORICAL_ROOT
(7, 7, 1, 2, 2, 9, 8),
# state.slot < SLOTS_PER_HISTORICAL_ROOT
(7, 7, 1, 2, 2, 7, 8),
# state.slot % SLOTS_PER_HISTORICAL_ROOT = 0
(11, 4, 1, 2, 2, 16, 8),
(16, 4, 1, 2, 2, 32, 8),
# updated_state.slot == SLOTS_PER_HISTORICAL_ROOT
(6, 4, 1, 2, 2, 7, 8),
# updated_state.slot % SLOTS_PER_HISTORICAL_ROOT = 0
(11, 4, 1, 2, 2, 15, 8),
(16, 4, 1, 2, 2, 31, 8),
]
)
def test_per_slot_transition(base_db,
genesis_block,
genesis_state,
fixture_sm_class,
config,
state_slot,
keymap):
chaindb = BeaconChainDB(base_db)
chaindb.persist_block(genesis_block, SerenityBeaconBlock)
chaindb.persist_state(genesis_state)
state = genesis_state
# Create a block
block = create_mock_block(
state=state,
config=config,
state_machine=fixture_sm_class(
chaindb,
genesis_block,
),
block_class=SerenityBeaconBlock,
parent_block=genesis_block,
keymap=keymap,
slot=state_slot,
)
# Store in chaindb
chaindb.persist_block(block, SerenityBeaconBlock)
# Get state machine instance
sm = fixture_sm_class(
chaindb,
block,
)
# Get state transition instance
st = sm.state_transition_class(sm.config)
# NOTE: we want to run both functions, however they are run independently
# so we have two function calls
updated_state = st.cache_state(state)
updated_state = st.per_slot_transition(updated_state)
# Ensure that slot gets increased by 1
assert updated_state.slot == state.slot + 1
# latest_block_roots
latest_block_roots_index = (updated_state.slot - 1) % st.config.SLOTS_PER_HISTORICAL_ROOT
assert updated_state.latest_block_roots[latest_block_roots_index] == block.previous_block_root
# historical_roots
if updated_state.slot % st.config.SLOTS_PER_HISTORICAL_ROOT == 0:
historical_batch = HistoricalBatch(
block_roots=state.latest_block_roots,
state_roots=state.latest_state_roots,
slots_per_historical_root=config.SLOTS_PER_HISTORICAL_ROOT,
)
assert updated_state.historical_roots[-1] == historical_batch.hash_tree_root
else:
assert updated_state.historical_roots == state.historical_roots
| 30.109091
| 98
| 0.619867
|
794bcc424dddeadbf5a66442604903c26090a77c
| 1,342
|
py
|
Python
|
build/lib/MyHeartCounts/download_minsetdata.py
|
AshleyLab/MyHeartCounts2.0
|
14b939cdb0f760eb891c91bf28e018d73362d03a
|
[
"MIT"
] | null | null | null |
build/lib/MyHeartCounts/download_minsetdata.py
|
AshleyLab/MyHeartCounts2.0
|
14b939cdb0f760eb891c91bf28e018d73362d03a
|
[
"MIT"
] | null | null | null |
build/lib/MyHeartCounts/download_minsetdata.py
|
AshleyLab/MyHeartCounts2.0
|
14b939cdb0f760eb891c91bf28e018d73362d03a
|
[
"MIT"
] | null | null | null |
# import libraries
from MyHeartCounts import MyHeartCounts
##############################
print('Start')
# Initilize a MyHeartCounts object
# MHC = MyHeartCounts(user_password_file_path = 'synapseAccess.txt',synapseCachePath ='/oak/stanford/groups/euan/projects/mhc/code/ali_code/data/synapseCache')
MHC = MyHeartCounts(user_password_file_path='../synapseAccess.txt',
synapseCachePath='/Users/ajaved/Three/MHC_DataBase/code/synapseCache')
# Rev up your engine!! -- Setting up of cache and other administrative scripts
MHC.start()
print('MHC ML Infrastructure up and running...')
# load a studies
MHC.loadStudy(studyName='HealthKitDataCollector', studyTable='syn3560085', limit = 1000)
#MHC.loadStudy(studyName='mindset_adequacy', studyTable=' syn18143711')
#MHC.loadStudy(studyName='AB_TestResults', studyTable='syn7188351')
# unquire users in our analysis. start with smallest, mindset
users = MHC.Studies[0].studyUsers
# get users of all studies
# we are down to 1044 users now. Let us see how much data they have in healthkit data collector. Lets start with 10 users just to check
# download all data
users = sorted(list(users))
for i in range(1043, 0, -5):
MHC.Studies[0].retrieve_blobs(blob_names=['data.csv'], healthCodes=users[i:i + 50], silent=False)
print(str(1044 - i) + ' of 1044 users downloaded.')
| 47.928571
| 159
| 0.742176
|
794bcd343047a914e811c1739e95dff14dc37a41
| 10,470
|
py
|
Python
|
refitt/apps/refitt/api.py
|
refitt/ref
|
3ccc398e7b95f77549ab77884b87f40abdd3effb
|
[
"Apache-2.0"
] | 4
|
2020-09-11T01:15:11.000Z
|
2021-05-12T16:46:48.000Z
|
refitt/apps/refitt/api.py
|
refitt/ref
|
3ccc398e7b95f77549ab77884b87f40abdd3effb
|
[
"Apache-2.0"
] | 12
|
2021-03-20T03:24:53.000Z
|
2022-02-19T03:20:43.000Z
|
refitt/apps/refitt/api.py
|
refitt/ref
|
3ccc398e7b95f77549ab77884b87f40abdd3effb
|
[
"Apache-2.0"
] | 2
|
2021-02-01T23:49:39.000Z
|
2021-12-11T19:01:23.000Z
|
# SPDX-FileCopyrightText: 2019-2021 REFITT Team
# SPDX-License-Identifier: Apache-2.0
"""Make authenticated requests to the API."""
# type annotations
from __future__ import annotations
from typing import List, Dict, Callable, Optional, IO, Any, Union
# standard libs
import os
import sys
import json
import functools
import logging
from io import BytesIO
from functools import cached_property
# external libs
from requests.exceptions import ConnectionError
from cmdkit.app import Application, exit_status
from cmdkit.cli import Interface, ArgumentError
from rich.console import Console
from rich.syntax import Syntax
# internal libs
from ...web import request
from ...web.api.response import STATUS_CODE
from ...core.exceptions import log_exception
from ...core import typing, ansi
# public interface
__all__ = ['APIClientApp', ]
PROGRAM = 'refitt api'
PADDING = ' ' * len(PROGRAM)
USAGE = f"""\
usage: {PROGRAM} [-h] <method> <route> [<options>...] [[-d DATA | @FILE] | [-f FILE]] [-r] [-x NODE]
{PADDING} [--download] [--no-headers]
{__doc__}\
"""
HELP = f"""\
{USAGE}
For POST requests, include JSON payloads with -d/--data inline or with @ preceded by
a local file path. To upload a raw file as an attachment use -f/--file.
Downloaded files are not dumped with a live TTY but will be otherwise, use --download
to save to the local filesystem.
Headers are displayed along with syntax highlighting if a TTY is detected.
Extract a member element from JSON responses with -x/--extract (e.g., '-x .Response.object').
Strip quotations for extracted string literals with -r/--raw.
URL parameters can be encoded inline, e.g.,
> refitt api get recommendation limit==1 join==true
arguments:
method HTTP method (e.g., GET/PUT/POST/DELETE).
route URL path (e.g., /object/1).
options... URL parameters (e.g., 'limit==1').
options:
-d, --data DATA | @FILE Raw inline content or file path.
-f, --file FILE Path to file for attachment. ('-' for stdin).
-x, --extract NODE JSON path for element (e.g., '.Response.user').
-r, --raw Strip quotes on single extracted string literal.
--no-headers Do now show headers for TTY.
--download Save file attachment.
-h, --help Show this message and exit.\
"""
# application logger
log = logging.getLogger('refitt')
class APIClientApp(Application):
"""Application class for requests module."""
interface = Interface(PROGRAM, USAGE, HELP)
method: str = None
interface.add_argument('method')
route: str = None
interface.add_argument('route')
options: List[str] = None
interface.add_argument('options', nargs='*', default=[])
show_headers: bool = True
interface.add_argument('--no-headers', action='store_false', dest='show_headers')
download: bool = True
interface.add_argument('--download', action='store_true')
data_source: Optional[str] = None
file_source: Optional[str] = None
post_interface = interface.add_mutually_exclusive_group()
post_interface.add_argument('-d', '--data', default=file_source, dest='data_source')
post_interface.add_argument('-f', '--file', default=file_source, dest='file_source')
extraction_path: Optional[str] = None
interface.add_argument('-x', '--extract', default=None, dest='extraction_path')
display_raw: bool = False
interface.add_argument('-r', '--raw', action='store_true', dest='display_raw')
exceptions = {
ConnectionError: functools.partial(log_exception, logger=log.error, status=exit_status.runtime_error),
**Application.exceptions,
}
def run(self) -> None:
"""Make web request."""
self.check_args()
self.apply_settings()
try:
self.format_output(**self.make_request())
except request.APIError as error:
response, = error.args
self.format_output(**{
'status': response.status_code,
'headers': {'Protocol': request.get_protocol(response),
'Version': request.get_protocol_version(response),
**response.headers},
'content': response.json()
})
def check_args(self):
"""Validate method, position arguments, etc."""
if self.file_source is not None and self.method.lower() != 'post':
raise ArgumentError(f'Cannot use -f/--file option for {self.method.upper()} request')
elif self.data_source is not None and self.method.lower() != 'post':
raise ArgumentError(f'Cannot use -d/--data option for {self.method.upper()} request')
for option in self.options:
if '==' not in option:
raise ArgumentError(f'Positional arguments should have equality syntax, \'{option}\'')
@property
def request_method(self) -> Callable[..., dict]:
"""Bound method of `request` module by accessing named `method`."""
method = self.method.lower()
try:
return getattr(request, method)
except AttributeError:
raise ArgumentError(f'Method not supported \'{method}\'')
@property
def endpoint(self) -> Callable[..., dict]:
"""Bound method from `refitt.web.request` called with the `route`."""
return functools.partial(self.request_method, self.route)
@cached_property
def files(self) -> Optional[Dict[str, IO]]:
"""Prepared file stream."""
if self.file_source is None:
return None
elif self.file_source == '-':
return {'<stdin>': BytesIO(sys.stdin.buffer.read())}
else:
with open(self.file_source, mode='rb') as stream:
return {os.path.basename(self.file_source): BytesIO(stream.read())}
@cached_property
def data(self) -> Optional[dict]:
"""Prepared JSON data."""
if self.data_source is None:
return None
elif not self.data_source.startswith('@'):
return json.loads(self.data_source)
else:
with open(self.data_source[1:], mode='r') as stream:
return json.load(stream)
@cached_property
def payload(self) -> Dict[str, Union[Dict[str, Any], Dict[str, IO]]]:
"""Mapping of request parameter and data/stream for request payload."""
if self.file_source:
return {'files': self.files}
elif self.data_source:
return {'json': self.data}
else:
return {}
def make_request(self) -> dict:
"""Issue web request."""
return self.endpoint(extract_response=False, raise_on_error=True,
**self.payload, **self.structured_options)
@property
def structured_options(self) -> dict:
"""Parse `{option}=={value}` positional arguments into dictionary."""
return {
option: typing.coerce(value) for option, value in [
arg.split('==') for arg in self.options
]
}
def format_output(self, status: int, headers: dict, content: dict) -> None:
"""Format and print response data from request."""
if sys.stdout.isatty() and self.show_headers:
self.format_headers(status, headers)
if headers['Content-Type'] == 'application/octet-stream':
self.format_octet_stream(content)
else:
self.format_json(content)
@staticmethod
def format_headers(status: int, headers: dict) -> None:
"""Display request info and headers."""
headers.pop('Connection', None)
protocol = headers.pop('Protocol')
version = headers.pop('Version')
print(f'{ansi.blue(protocol)}/{ansi.blue(version)} {ansi.cyan(str(status))} '
f'{ansi.cyan(STATUS_CODE[status])}')
for field, value in headers.items():
print(f'{ansi.cyan(field)}: {value}')
def format_octet_stream(self, content: dict) -> None:
"""Format output and save file to local disk if needed."""
if not self.download:
if sys.stdout.isatty():
print('---')
print(f'{ansi.red("Content-Disabled")}: use --download to save file')
else:
(filename, data), = content.items()
sys.stdout.buffer.write(data)
else:
(filename, data), = content.items()
self.save_local(filename, data)
def format_json(self, content: dict) -> None:
"""Format output for JSON content."""
if self.extraction_path is not None:
content = self.extract_partial(content, self.extraction_path)
if isinstance(content, (dict, list)):
content = json.dumps(content, indent=4)
if sys.stdout.isatty():
Console().print(Syntax(content, 'json',
word_wrap=True, theme='solarized-dark',
background_color='default'))
else:
print(content)
else:
content = json.dumps(content, indent=4) # formats special types
if self.display_raw:
content = content.strip('"')
print(content)
@staticmethod
def extract_partial(content: dict, path: str) -> Any:
"""Pull sections or values out of nested `content`."""
result = dict(content)
for section in path.strip('.').split('.'):
try:
result = result[section]
except KeyError as error:
raise RuntimeError(f'Element not found \'{path}\'') from error
return result
@staticmethod
def save_local(filename: str, data: bytes) -> None:
"""Attempt to save `data` as local file to `filename` path."""
name = filename.strip('./') # NOTE: safe path (e.g., no ../)
path = name
suffix = 1
while os.path.exists(path):
path = f'{name}.{suffix}'
suffix += 1
print()
print(f'Writing {len(data)} B to "{path}"')
with open(path, mode='wb') as stream:
stream.write(data)
print('Done.')
@staticmethod
def apply_settings() -> None:
"""Additional setup requirements before making web request."""
request.PERSIST_TOKEN = True
| 36.736842
| 110
| 0.605349
|
794bcd4d616b5c1d83e26dd54aa9239463dd3572
| 6,827
|
py
|
Python
|
mmdet/models/losses/ctdet_loss.py
|
lizhe960118/CenterNet
|
d1a0d13974e2316c6d127ca7860866cdd93bcfa7
|
[
"Apache-2.0"
] | 92
|
2019-08-12T09:31:38.000Z
|
2022-03-17T06:22:41.000Z
|
mmdet/models/losses/ctdet_loss.py
|
lizhe960118/CenterNet
|
d1a0d13974e2316c6d127ca7860866cdd93bcfa7
|
[
"Apache-2.0"
] | 4
|
2019-08-15T09:06:01.000Z
|
2020-12-25T06:46:36.000Z
|
mmdet/models/losses/ctdet_loss.py
|
lizhe960118/CenterNet
|
d1a0d13974e2316c6d127ca7860866cdd93bcfa7
|
[
"Apache-2.0"
] | 18
|
2019-09-05T01:29:14.000Z
|
2021-06-29T13:10:11.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from ..registry import LOSSES
# def gaussian_radius(det_size, min_overlap=0.7):
# height, width = det_size
# a1 = 1
# b1 = (height + width)
# c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
# sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
# r1 = (b1 + sq1) / 2
# a2 = 4
# b2 = 2 * (height + width)
# c2 = (1 - min_overlap) * width * height
# sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
# r2 = (b2 + sq2) / 2
# a3 = 4 * min_overlap
# b3 = -2 * min_overlap * (height + width)
# c3 = (min_overlap - 1) * width * height
# sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
# r3 = (b3 + sq3) / 2
# return min(r1, r2, r3)
# def gaussian2D(shape, sigma=1):
# m, n = [(ss - 1.) / 2. for ss in shape]
# y, x = np.ogrid[-m:m+1,-n:n+1]
# h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
# h[h < np.finfo(h.dtype).eps * h.max()] = 0
# return h
# def draw_umich_gaussian(heatmap, center, radius, k=1):
# diameter = 2 * radius + 1
# gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
# x, y = int(center[0]), int(center[1])
# height, width = heatmap.shape[0:2]
# left, right = min(x, radius), min(width - x, radius + 1)
# top, bottom = min(y, radius), min(height - y, radius + 1)
# masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
# masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
# if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
# np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
# return heatmap
def _neg_loss(pred, gt):
''' Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
# print(pred) # 几乎全部是0
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
# print("num_pos:", num_pos)
# print("pos_loss:", pos_loss)
# print("neg_loss:", neg_loss)
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
class FocalLoss(nn.Module):
'''nn.Module warpper for focal loss'''
def __init__(self):
super(FocalLoss, self).__init__()
self.neg_loss = _neg_loss
def forward(self, out, target):
return self.neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _tranpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
class RegL1Loss(nn.Module):
def __init__(self):
super(RegL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _tranpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
# print(target)
# import pdb; pdb.set_trace()
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
# loss = F.l1_loss(pred * mask, target * mask, size_average=False)
loss = F.l1_loss(pred * mask, target * mask, reduction='sum')
loss = loss / (mask.sum() + 1e-4)
return loss
@LOSSES.register_module
class CtdetLoss(torch.nn.Module):
def __init__(self):
super(CtdetLoss, self).__init__()
# self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
# self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
# RegLoss() if opt.reg_loss == 'sl1' else None
# self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
# NormRegL1Loss() if opt.norm_wh else \
# RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
self.crit = FocalLoss()
self.crit_reg = RegL1Loss()
# self.crit_wh = self.crit_reg
# self.opt = opt
# opts
self.num_stacks = 1
self.wh_weight = 0.1
self.off_weight = 1
self.hm_weight = 1
def forward(self, outputs, **kwargs):
batch = kwargs
hm_loss, wh_loss, off_loss = 0, 0, 0
for s in range(self.num_stacks):
output = outputs[s]
# for key, value in output.items():
# print(key, value.shape)
# if not opt.mse_loss:
output['hm'] = torch.clamp(output['hm'].sigmoid_(), min=1e-4, max=1-1e-4)
# output['hm'] = output['hm'].sigmoid_()
# if opt.eval_oracle_hm:
# output['hm'] = batch['hm']
# if opt.eval_oracle_wh:
# output['wh'] = torch.from_numpy(gen_oracle_map(
# batch['wh'].detach().cpu().numpy(),
# batch['ind'].detach().cpu().numpy(),
# output['wh'].shape[3], output['wh'].shape[2])).to(opt.device)
# if opt.eval_oracle_offset:
# output['reg'] = torch.from_numpy(gen_oracle_map(
# batch['reg'].detach().cpu().numpy(),
# batch['ind'].detach().cpu().numpy(),
# output['reg'].shape[3], output['reg'].shape[2])).to(opt.device)
hm_loss += self.crit(output['hm'], batch['hm']) / self.num_stacks
if self.wh_weight > 0:
wh_loss += self.crit_reg(
output['wh'], batch['reg_mask'],
batch['ind'], batch['wh']) / self.num_stacks
if self.off_weight > 0:
off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg']) / self.num_stacks
# loss = self.hm_weight * hm_loss + self.wh_weight * wh_loss + \
# self.off_weight * off_loss
losses = {'hm_loss': self.hm_weight * hm_loss,
'wh_loss': self.wh_weight * wh_loss, 'off_loss': self.off_weight * off_loss}
# loss_stats = {'loss': loss, 'hm_loss': hm_loss,
# 'wh_loss': wh_loss, 'off_loss': off_loss}
# return loss, loss_stats
return losses
| 36.508021
| 96
| 0.560422
|
794bcdd393a0352ab4dea5022f0f48ae278f182a
| 78
|
py
|
Python
|
snowplow_tracker/redis/__init__.py
|
DeyanDeyanov/snowplow-python-tracker
|
8fc7f1e5ed9c0e1b1faf8b477be8412b4e2630c2
|
[
"Apache-2.0"
] | 33
|
2015-02-07T12:25:32.000Z
|
2022-03-08T14:54:51.000Z
|
snowplow_tracker/redis/__init__.py
|
DeyanDeyanov/snowplow-python-tracker
|
8fc7f1e5ed9c0e1b1faf8b477be8412b4e2630c2
|
[
"Apache-2.0"
] | 145
|
2015-01-02T19:15:40.000Z
|
2022-03-04T16:12:21.000Z
|
snowplow_tracker/redis/__init__.py
|
kolonialno/snowplow-python-tracker
|
f4d4b9da49fe1584e82ee8f6230a54c789680097
|
[
"Apache-2.0"
] | 56
|
2015-01-02T19:04:20.000Z
|
2021-11-29T14:29:54.000Z
|
from .redis_emitter import RedisEmitter
from .redis_worker import RedisWorker
| 26
| 39
| 0.871795
|
794bcee81bd15f8a5dfdf6b51d57320ee64c233b
| 912
|
py
|
Python
|
packages/grid/apps/domain/src/main/routes/search/routes.py
|
exityan/PySyft
|
35166c487a5be57f9ad28929ed88a8ba6bdd5aeb
|
[
"Apache-2.0"
] | 425
|
2019-09-22T06:14:53.000Z
|
2022-03-30T02:17:34.000Z
|
packages/grid/apps/domain/src/main/routes/search/routes.py
|
Metrix1010/PySyft
|
6477f64b63dc285059c3766deab3993653cead2e
|
[
"Apache-2.0"
] | 352
|
2019-09-17T15:32:51.000Z
|
2022-03-12T01:07:35.000Z
|
packages/grid/apps/domain/src/main/routes/search/routes.py
|
Metrix1010/PySyft
|
6477f64b63dc285059c3766deab3993653cead2e
|
[
"Apache-2.0"
] | 208
|
2019-09-18T18:32:10.000Z
|
2022-03-24T01:10:11.000Z
|
# stdlib
import json
# third party
from flask import Response
from flask import request
from syft.grid.messages.network_search_message import NetworkSearchMessage
# grid relative
from ...core.task_handler import route_logic
from ..auth import error_handler
from ..auth import optional_token
from ..auth import token_required
from .blueprint import search_blueprint as search_route
@search_route.route("/", methods=["GET"])
@optional_token
def broadcast_search(current_user):
# Get request body
content = request.get_json()
if not content:
content = {}
status_code, response_msg = error_handler(
route_logic, 200, NetworkSearchMessage, current_user, content
)
response = response_msg if isinstance(response_msg, dict) else response_msg.content
return Response(
json.dumps(response),
status=status_code,
mimetype="application/json",
)
| 25.333333
| 87
| 0.743421
|
794bcf34089f3da539f3c9b3f22a3af137d1ddd6
| 866
|
py
|
Python
|
LeetCode/1.TwoSum.py
|
njnur/CP-Practice
|
5c0ba7455262ae5704748c397bee729c82887a9a
|
[
"MIT"
] | null | null | null |
LeetCode/1.TwoSum.py
|
njnur/CP-Practice
|
5c0ba7455262ae5704748c397bee729c82887a9a
|
[
"MIT"
] | null | null | null |
LeetCode/1.TwoSum.py
|
njnur/CP-Practice
|
5c0ba7455262ae5704748c397bee729c82887a9a
|
[
"MIT"
] | null | null | null |
from typing import List, Any
class Solution:
@staticmethod
def two_sum_bruteforce(nums: List[int], target: int) -> Any:
for index_1 in range(0, len(nums)):
num_to_find = target - nums[index_1]
for index_2 in range(index_1+1, len(nums)):
if num_to_find == nums[index_2]:
return [index_1, index_2]
return None
@staticmethod
def two_sum_hash_table(nums: List[int], target: int) -> Any:
hash_table = {}
for i in range(len(nums)):
complement = target - nums[i]
if complement in hash_table:
return i, hash_table[complement]
else:
hash_table[nums[i]] = i
return None
if __name__ == '__main__':
print(Solution().two_sum_bruteforce(
nums=[3, 2, 3],
target=6
))
| 27.0625
| 64
| 0.554273
|
794bd017b8ac5f1ae9b1a4000c49341261f79430
| 719
|
py
|
Python
|
abi.py
|
sonicskye/btcrelay-ui
|
56abff6382a7546d3340f9d0f6eda5093b6b531b
|
[
"MIT"
] | null | null | null |
abi.py
|
sonicskye/btcrelay-ui
|
56abff6382a7546d3340f9d0f6eda5093b6b531b
|
[
"MIT"
] | null | null | null |
abi.py
|
sonicskye/btcrelay-ui
|
56abff6382a7546d3340f9d0f6eda5093b6b531b
|
[
"MIT"
] | null | null | null |
'''
sonicskye@2020
Load JSON ABI from files
'''
import os
import json
import fire
#_PATH = os.path.dirname(os.path.realpath(__file__)) + "/db/" + dbname
ABI_PATH = os.path.dirname(os.path.realpath(__file__)) + "/abi/"
btcrelayabifilename = 'BTCRelay.json'
# read ABI from file
# https://stackabuse.com/reading-and-writing-json-to-a-file-in-python/
def getabi(filename):
with open(ABI_PATH + filename) as json_file:
data = json.load(json_file)
return data['abi']
def getabibtcrelay():
return getabi(btcrelayabifilename)
###################################### MAIN ########################################################
def main():
fire.Fire()
if __name__ == "__main__":
main()
| 19.432432
| 100
| 0.598053
|
794bd064809d0b7fa15aafac041a018de303a0f4
| 15,287
|
py
|
Python
|
notebooks/__code/kropff.py
|
neutronimaging/BraggEdgeFitting
|
233407fc000425ee79897e514964ef196ca27a08
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/__code/kropff.py
|
neutronimaging/BraggEdgeFitting
|
233407fc000425ee79897e514964ef196ca27a08
|
[
"BSD-3-Clause"
] | 2
|
2020-10-06T13:48:24.000Z
|
2020-10-07T16:21:46.000Z
|
notebooks/__code/kropff.py
|
neutronimaging/BraggEdgeFitting
|
233407fc000425ee79897e514964ef196ca27a08
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from qtpy import QtGui
from qtpy.QtWidgets import QFileDialog
from pathlib import Path
import pyqtgraph as pg
from __code.table_handler import TableHandler
from __code.bragg_edge_peak_fitting_gui_utility import GuiUtility
from __code.kropff_fitting_job_handler import KropffFittingJobHandler
from __code.file_handler import make_ascii_file_from_2dim_array
from __code.get import Get
from __code._utilities.dictionary import key_path_exists_in_dictionary
from __code.utilities import find_nearest_index
class Kropff:
def __init__(self, parent=None):
self.parent = parent
self.table_ui = {'high_lda': self.parent.ui.high_lda_tableWidget,
'low_lda' : self.parent.ui.low_lda_tableWidget,
'bragg_peak' : self.parent.ui.bragg_edge_tableWidget}
def reset_all_table(self):
self.reset_high_lambda_table()
self.reset_low_lambda_table()
self.reset_bragg_peak_table()
def reset_high_lambda_table(self):
self.clear_table(table_name='high_lda')
self.fill_table_with_minimum_contain(table_ui=self.table_ui['high_lda'])
def reset_low_lambda_table(self):
self.clear_table(table_name='low_lda')
self.fill_table_with_minimum_contain(table_ui=self.table_ui['low_lda'])
def reset_bragg_peak_table(self):
self.clear_table(table_name='bragg_peak')
self.fill_table_with_minimum_contain(table_ui=self.parent.ui.bragg_edge_tableWidget)
def clear_table(self, table_name='high_lambda', is_all=False):
"""remove all the rows of the table name specified, or all if is_all is True"""
if is_all:
for _key in self.table_ui.keys():
self.clear_table(table_name=_key)
else:
o_table = TableHandler(table_ui=self.table_ui[table_name])
o_table.remove_all_rows()
def fill_table_with_minimum_contain(self, table_ui=None):
fitting_input_dictionary = self.parent.fitting_input_dictionary
rois = fitting_input_dictionary['rois']
o_table = TableHandler(table_ui=table_ui)
nbr_column = o_table.table_ui.columnCount()
other_column_name = ["N/A" for _ in np.arange(nbr_column)]
for _row, _roi in enumerate(rois.keys()):
_roi_key = rois[_roi]
list_col_name = "{}; {}; {}; {}".format(_roi_key['x0'],
_roi_key['y0'],
_roi_key['width'],
_roi_key['height'])
col_name = [list_col_name] + other_column_name
o_table.insert_row(_row, col_name)
def fill_table_with_fitting_information(self):
fitting_input_dictionary = self.parent.fitting_input_dictionary
o_table = TableHandler(table_ui=self.table_ui['high_lda'])
_col = 1
for _row in fitting_input_dictionary['rois'].keys():
_entry = fitting_input_dictionary['rois'][_row]['fitting']['kropff']['high']
o_table.set_item_with_float(_row, _col, _entry['a0'])
o_table.set_item_with_float(_row, _col+1, _entry['b0'])
o_table.set_item_with_float(_row, _col+2, _entry['a0_error'])
o_table.set_item_with_float(_row, _col+3, _entry['b0_error'])
o_table = TableHandler(table_ui=self.table_ui['low_lda'])
_col = 1
for _row in fitting_input_dictionary['rois'].keys():
_entry = fitting_input_dictionary['rois'][_row]['fitting']['kropff']['low']
o_table.set_item_with_float(_row, _col, _entry['ahkl'])
o_table.set_item_with_float(_row, _col+1, _entry['bhkl'])
o_table.set_item_with_float(_row, _col+2, _entry['ahkl_error'])
o_table.set_item_with_float(_row, _col+3, _entry['bhkl_error'])
o_table = TableHandler(table_ui=self.table_ui['bragg_peak'])
_col = 1
for _row in fitting_input_dictionary['rois'].keys():
_entry = fitting_input_dictionary['rois'][_row]['fitting']['kropff']['bragg_peak']
o_table.set_item_with_float(_row, _col, _entry['ldahkl'])
o_table.set_item_with_float(_row, _col+1, _entry['tau'])
o_table.set_item_with_float(_row, _col+2, _entry['sigma'])
o_table.set_item_with_float(_row, _col+3, _entry['ldahkl_error'])
o_table.set_item_with_float(_row, _col+4, _entry['tau_error'])
o_table.set_item_with_float(_row, _col+5, _entry['sigma_error'])
def bragg_peak_right_click(self, position=None):
menu = QtGui.QMenu(self.parent)
selected_rows_submenu = QtGui.QMenu("Selected Rows")
menu.addMenu(selected_rows_submenu)
_fit = selected_rows_submenu.addAction("Fit")
_export = selected_rows_submenu.addAction("Export ...")
advanced_selection_submenu = QtGui.QMenu("Advanced Rows Selection")
menu.addMenu(advanced_selection_submenu)
_negative_thkl = advanced_selection_submenu.addAction("Where t_hkl < 0")
action = menu.exec_(QtGui.QCursor.pos())
if action == _fit:
self.fit_bragg_peak_selected_rows()
elif action == _export:
self.export_bragg_peak_profile()
elif action == _negative_thkl:
self.select_all_rows_with_negative_thkl()
QtGui.QGuiApplication.processEvents() # to close QFileDialog
def fit_bragg_peak_selected_rows(self):
o_gui = GuiUtility(parent=self.parent)
list_rows_selected = o_gui.get_rows_of_table_selected(table_ui=self.parent.ui.bragg_edge_tableWidget)
self.parent.kropff_fit_bragg_peak_region_of_selected_rows(list_row_to_fit=list_rows_selected)
def export_bragg_peak_profile(self):
working_dir = str(Path(self.parent.working_dir).parent)
_export_folder = QFileDialog.getExistingDirectory(self.parent,
directory=working_dir,
caption="Select Output Folder")
QtGui.QGuiApplication.processEvents() # to close QFileDialog
if _export_folder:
o_gui = GuiUtility(parent=self.parent)
list_row_selected = o_gui.get_rows_of_table_selected(table_ui=self.parent.ui.bragg_edge_tableWidget)
for row_selected in list_row_selected:
# make up output file name
name_of_row = o_gui.get_table_str_item(table_ui=self.parent.ui.bragg_edge_tableWidget,
row=row_selected,
column=0)
[x0, y0, width, height] = name_of_row.split("; ")
name_of_row_formatted = "x0{}_y0{}_width{}_height{}".format(x0,y0, width, height)
file_name = "kropff_bragg_peak_profile_{}.txt".format(name_of_row_formatted)
full_file_name = str(Path(_export_folder) / Path(file_name))
o_fit = KropffFittingJobHandler(parent=self.parent)
o_fit.prepare(kropff_tooldbox='bragg_peak')
x_axis = o_fit.xaxis_to_fit
y_axis = o_fit.list_yaxis_to_fit[row_selected]
a0 = self.parent.fitting_input_dictionary['rois'][row_selected]['fitting']['kropff']['high']['a0']
b0 = self.parent.fitting_input_dictionary['rois'][row_selected]['fitting']['kropff']['high']['b0']
ahkl = self.parent.fitting_input_dictionary['rois'][row_selected]['fitting']['kropff']['low']['ahkl']
bhkl = self.parent.fitting_input_dictionary['rois'][row_selected]['fitting']['kropff']['low']['bhkl']
metadata = ["# Bragg peak fitting of row {}".format(row_selected+1)]
metadata.append("# x0: {}".format(x0))
metadata.append("# y0: {}".format(y0))
metadata.append("# width: {}".format(width))
metadata.append("# height: {}".format(height))
metadata.append("# a0: {}".format(a0))
metadata.append("# b0: {}".format(b0))
metadata.append("# ahkl: {}".format(ahkl))
metadata.append("# bhkl: {}".format(bhkl))
metadata.append("#")
metadata.append("# lambda (Angstroms), average transmission")
make_ascii_file_from_2dim_array(metadata=metadata,
col1=x_axis,
col2=y_axis,
output_file_name=full_file_name)
message = "Exported {} file(s) in {}".format(len(list_row_selected), _export_folder)
self.parent.ui.statusbar.showMessage(message, 15000) # 15s
self.parent.ui.statusbar.setStyleSheet("color: green")
def select_all_rows_with_negative_thkl(self):
# activate table
self.parent.ui.bragg_edge_tableWidget.setFocus()
# switch to multi selection mode
self.parent.ui.kropff_bragg_peak_multi_selection.setChecked(True)
self.parent.ui.bragg_edge_tableWidget.setSelectionMode(2)
list_of_rows_to_select = []
fitting_input_dictionary_rois = self.parent.fitting_input_dictionary['rois']
for _row in fitting_input_dictionary_rois.keys():
_thkl = np.float(fitting_input_dictionary_rois[_row]['fitting']['kropff']['bragg_peak']['ldahkl'])
if _thkl < 0:
list_of_rows_to_select.append(_row)
o_gui = GuiUtility(parent=self.parent)
o_gui.select_rows_of_table(table_ui=self.parent.ui.bragg_edge_tableWidget,
list_of_rows=list_of_rows_to_select)
def update_fitting_plot(self):
self.parent.ui.fitting.clear()
o_get = Get(parent=self.parent)
part_of_fitting_dict = o_get.part_of_fitting_selected()
name_of_page = part_of_fitting_dict['name_of_page']
table_ui = part_of_fitting_dict['table_ui']
o_table = TableHandler(table_ui=table_ui)
list_row_selected = o_table.get_rows_of_table_selected()
x_axis_selected = o_get.x_axis_checked()
if list_row_selected is None:
# first fitting tab where we only display the full data with bragg peak selection
if self.parent.fitting_peak_ui:
self.parent.ui.fitting.removeItem(self.parent.fitting_peak_ui)
xaxis_dict = self.parent.fitting_input_dictionary['xaxis']
xaxis_index, xaxis_label = xaxis_dict[x_axis_selected]
[left_xaxis_index, right_xaxis_index] = self.parent.bragg_edge_range
xaxis = xaxis_index[left_xaxis_index: right_xaxis_index]
selected_roi = self.parent.fitting_input_dictionary['rois'][0]
yaxis = selected_roi['profile']
yaxis = yaxis[left_xaxis_index: right_xaxis_index]
yaxis = -np.log(yaxis)
self.parent.ui.fitting.plot(xaxis, yaxis,
pen=(self.parent.selection_roi_rgb[0],
self.parent.selection_roi_rgb[1],
self.parent.selection_roi_rgb[2]),
symbol='o')
self.parent.ui.fitting.setLabel("bottom", xaxis_label)
peak_range_index = self.parent.kropff_fitting_range['bragg_peak']
if peak_range_index[0] is None:
peak_range = self.parent.bragg_edge_range
else:
peak_range = [xaxis[peak_range_index[0]], xaxis[peak_range_index[1]]]
if self.parent.fitting_peak_ui:
self.parent.ui.fitting.removeItem(self.parent.fitting_peak_ui)
self.parent.fitting_peak_ui = pg.LinearRegionItem(values=peak_range,
orientation=None,
brush=None,
movable=True,
bounds=None)
self.parent.fitting_peak_ui.sigRegionChanged.connect(self.parent.fitting_range_changed)
self.parent.fitting_peak_ui.setZValue(-10)
self.parent.ui.fitting.addItem(self.parent.fitting_peak_ui)
else:
for row_selected in list_row_selected:
selected_roi = self.parent.fitting_input_dictionary['rois'][row_selected]
xaxis_dict = self.parent.fitting_input_dictionary['xaxis']
[left_xaxis_index, right_xaxis_index] = self.parent.bragg_edge_range
yaxis = selected_roi['profile']
xaxis_index, xaxis_label = xaxis_dict[x_axis_selected]
xaxis = xaxis_index[left_xaxis_index: right_xaxis_index]
yaxis = yaxis[left_xaxis_index: right_xaxis_index]
self.parent.ui.fitting.setLabel("bottom", xaxis_label)
self.parent.ui.fitting.setLabel("left", 'Cross Section (arbitrary units)')
yaxis = -np.log(yaxis)
self.parent.ui.fitting.plot(xaxis, yaxis,
pen=(self.parent.selection_roi_rgb[0],
self.parent.selection_roi_rgb[1],
self.parent.selection_roi_rgb[2]),
symbol='o')
peak_range_index = self.parent.kropff_fitting_range[name_of_page]
if peak_range_index[0] is None:
peak_range = self.parent.bragg_edge_range
else:
peak_range = [xaxis[peak_range_index[0]], xaxis[peak_range_index[1]]]
if self.parent.fitting_peak_ui:
self.parent.ui.fitting.removeItem(self.parent.fitting_peak_ui)
self.parent.fitting_peak_ui = pg.LinearRegionItem(values=peak_range,
orientation=None,
brush=None,
movable=False,
bounds=None)
self.parent.fitting_peak_ui.sigRegionChanged.connect(self.parent.fitting_range_changed)
self.parent.fitting_peak_ui.setZValue(-10)
self.parent.ui.fitting.addItem(self.parent.fitting_peak_ui)
o_gui = GuiUtility(parent=self.parent)
algo_name = o_gui.get_tab_selected(self.parent.ui.tab_algorithm).lower()
if key_path_exists_in_dictionary(dictionary=self.parent.fitting_input_dictionary,
tree_key=['rois', row_selected, 'fitting', algo_name,
name_of_page, 'xaxis_to_fit']):
# show fit only if tof scale selected
if x_axis_selected == 'lambda':
_entry = self.parent.fitting_input_dictionary['rois'][row_selected]['fitting'][algo_name][name_of_page]
xaxis = _entry['xaxis_to_fit']
yaxis = _entry['yaxis_fitted']
yaxis = -np.log(yaxis)
self.parent.ui.fitting.plot(xaxis, yaxis,
pen=(self.parent.fit_rgb[0],
self.parent.fit_rgb[1],
self.parent.fit_rgb[2]))
if peak_range_index[0] is None:
self.parent.fitting_range_changed()
self.parent.ui.fitting.setLabel("left", "Cross Section (arbitrary Units)")
def update_roi_labels(self):
[global_left_range, global_right_range] = self.parent.bragg_edge_range
[left_range, right_range] = list(self.parent.fitting_peak_ui.getRegion())
o_get = Get(parent=self.parent)
x_axis_selected = o_get.x_axis_checked()
xaxis_dict = self.parent.fitting_input_dictionary['xaxis']
xaxis_index, _ = xaxis_dict[x_axis_selected]
[left_xaxis_index, right_xaxis_index] = [global_left_range, global_right_range]
xaxis = xaxis_index[left_xaxis_index: right_xaxis_index]
left_index = find_nearest_index(array=xaxis, value=left_range)
right_index = find_nearest_index(array=xaxis, value=right_range)
xaxis_in_selected_axis = self.parent.fitting_input_dictionary['xaxis'][x_axis_selected][0][
global_left_range: global_right_range]
real_left_value = xaxis_in_selected_axis[left_index]
real_right_value = xaxis_in_selected_axis[right_index]
if x_axis_selected == 'lambda':
str_format = "{:02f}"
elif x_axis_selected == 'tof':
str_format = "{:04.2f}"
else:
str_format = "{}"
real_left_value = str_format.format(real_left_value)
real_right_value = str_format.format(real_right_value)
units = Get.units(name=x_axis_selected)
self.parent.ui.bragg_peak_range_from_value.setText(str(real_left_value))
self.parent.ui.bragg_peak_range_to_value.setText(str(real_right_value))
self.parent.ui.from_bragg_peak_range_units.setText(units)
self.parent.ui.to_bragg_peak_range_units.setText(units)
| 44.438953
| 109
| 0.69896
|
794bd254ebeec9f59c1cfaf9e015c31913a7212d
| 441
|
py
|
Python
|
student_portal/celery.py
|
danielmicaletti/music-academy-student-portal
|
ca779f71e69d6676ee77a6cd68a13c2225c886f9
|
[
"MIT"
] | null | null | null |
student_portal/celery.py
|
danielmicaletti/music-academy-student-portal
|
ca779f71e69d6676ee77a6cd68a13c2225c886f9
|
[
"MIT"
] | null | null | null |
student_portal/celery.py
|
danielmicaletti/music-academy-student-portal
|
ca779f71e69d6676ee77a6cd68a13c2225c886f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'student_portal.settings')
from django.conf import settings
app = Celery('student_portal')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| 22.05
| 74
| 0.770975
|
794bd30900a85f67092f9d4617cad02888df11bc
| 310
|
py
|
Python
|
pgx/__init__.py
|
tantiem/pgxtended
|
4f5c05ea0eebb3a6bebac1a35a74f0d161951772
|
[
"MIT"
] | null | null | null |
pgx/__init__.py
|
tantiem/pgxtended
|
4f5c05ea0eebb3a6bebac1a35a74f0d161951772
|
[
"MIT"
] | null | null | null |
pgx/__init__.py
|
tantiem/pgxtended
|
4f5c05ea0eebb3a6bebac1a35a74f0d161951772
|
[
"MIT"
] | null | null | null |
"""A wrapper module for pygame that makes all aspects of drawing and viewing much, much easier by compiling the boilerplate you would have used into a library."""
__all__ = ["pgxObject","Drawable","Undrawable","Transformable","Camera","AudioSource","Dynamic","Static","UI","Physical","PhysicsBody","Particle"]
| 155
| 163
| 0.754839
|
794bd373588ee2f71094e0290c4658add7d85c61
| 6,496
|
py
|
Python
|
dl85/supervised/regressors/quantile_regressor.py
|
valentinlemaire/pydl8.5
|
a846f3c36bacbbe01ff87c31413342069b0cf61b
|
[
"MIT"
] | null | null | null |
dl85/supervised/regressors/quantile_regressor.py
|
valentinlemaire/pydl8.5
|
a846f3c36bacbbe01ff87c31413342069b0cf61b
|
[
"MIT"
] | null | null | null |
dl85/supervised/regressors/quantile_regressor.py
|
valentinlemaire/pydl8.5
|
a846f3c36bacbbe01ff87c31413342069b0cf61b
|
[
"MIT"
] | null | null | null |
from sklearn.base import RegressorMixin
from ...predictors.quantile_predictor import DL85QuantilePredictor
from sklearn.neighbors import KernelDensity
import numpy as np
from math import floor, ceil
import json
class DL85QuantileRegressor(DL85QuantilePredictor, RegressorMixin):
"""An optimal binary decision tree regressor.
Parameters
----------
max_depth : int, default=1
Maximum depth of the tree to be found
min_sup : int, default=1
Minimum number of examples per leaf
max_error : int, default=0
Maximum allowed error. Default value stands for no bound. If no tree can be found that is strictly better, the model remains empty.
stop_after_better : bool, default=False
A parameter used to indicate if the search will stop after finding a tree better than max_error
time_limit : int, default=0
Allocated time in second(s) for the search. Default value stands for no limit. The best tree found within the time limit is stored, if this tree is better than max_error.
quantiles: array-like of floats, default = [0.5]
Quantile values to optimize.
quantile_estimation: str, default = "linear"
Quantile estimation method. Can be one of {"linear", "optimal"}
verbose : bool, default=False
A parameter used to switch on/off the print of what happens during the search
desc : bool, default=False
A parameter used to indicate if the sorting of the items is done in descending order of information gain
asc : bool, default=False
A parameter used to indicate if the sorting of the items is done in ascending order of information gain
repeat_sort : bool, default=False
A parameter used to indicate whether the sorting of items is done at each level of the lattice or only before the search
print_output : bool, default=False
A parameter used to indicate if the search output will be printed or not
Attributes
----------
tree_ : str
Outputted tree in serialized form; remains empty as long as no model is learned.
size_ : int
The size of the outputted tree
depth_ : int
Depth of the found tree
error_ : float
Error of the found tree
accuracy_ : float
Accuracy of the found tree on training set
lattice_size_ : int
The number of nodes explored before found the optimal tree
runtime_ : float
Time of the optimal decision tree search
timeout_ : bool
Whether the search reached timeout or not
classes_ : ndarray, shape (n_classes,)
The classes seen at :meth:`fit`.
"""
def __init__(
self,
max_depth=1,
min_sup=1,
max_errors=None,
stop_after_better=None,
time_limit=0,
quantiles=[0.5],
quantile_estimation = "linear",
verbose=False,
desc=False,
asc=False,
repeat_sort=False,
leaf_value_function=None,
print_output=False,
):
DL85QuantilePredictor.__init__(
self,
max_depth=max_depth,
min_sup=min_sup,
max_errors=max_errors,
stop_after_better=stop_after_better,
time_limit=time_limit,
quantiles=quantiles,
quantile_estimation=quantile_estimation,
verbose=verbose,
desc=desc,
asc=asc,
repeat_sort=repeat_sort,
leaf_value_function=leaf_value_function,
print_output=print_output,
)
self.to_redefine = self.leaf_value_function is None
self.backup_error = "quantile"
@staticmethod
def quantile_linear_estimation(tids, y, q):
return np.quantile(y[list(tids)], q)
@staticmethod
def quantile_optimal_estimation(tids, y, q):
N = len(tids)
h = (N-1)*q
y_sorted = sorted(y[list(tids)])
if q < 0.5:
return y_sorted[ceil(h)]
elif q == 0.5:
return (y_sorted[floor(h)] + y_sorted[ceil(h)])/2
elif q > 0.5:
return y_sorted[floor(h)]
def fit(self, X, y):
"""Implements the standard fitting function for a DL8.5 regressor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples, n_predictions)
The training output samples.
Returns
-------
self : object
Returns self.
"""
idx = np.argsort(y)
X = X[idx]
y = y[idx]
if self.to_redefine:
if self.quantile_estimation == "linear":
self.leaf_value_function = lambda tids, q: self.quantile_linear_estimation(tids, y, q)
elif self.quantile_estimation == "optimal":
self.leaf_value_function = lambda tids, q: self.quantile_optimal_estimation(tids, y, q)
# call fit method of the predictor
DL85QuantilePredictor.fit(self, X, y)
# Return the regressor
return self
def predict(self, X):
"""Implements the predict function for multiple quantile regressor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples, n_quantiles)
The predicted value for each sample is a vector of values corresponding to the quantiles.
"""
return DL85QuantilePredictor.predict(self, X)
def save(self, filename: str):
"""Saves the model in a file.
Parameters
----------
filename : str
The name of the file where the model will be saved.
"""
with open(filename, "w") as f:
attr_dict = self.__dict__
del attr_dict['leaf_value_function']
json.dump(self.__dict__, f)
@classmethod
def load(cls, filename: str):
"""Loads a model from a file.
Parameters
----------
filename : str
The name of the file where the model is saved.
Returns
-------
model : DL85QuantileRegressor
The loaded model.
"""
with open(filename, "r") as f:
attrs = json.load(f)
model = cls()
for attr, value in attrs.items():
setattr(model, attr, value)
return model
| 33.833333
| 178
| 0.611145
|
794bd3cf3df7b53ef1f2932a75c64eca33e1e219
| 405
|
py
|
Python
|
nexus/__init__.py
|
Xarrow/PyCharmWorkSpace
|
daf0c3c194d0439caf96f260e23319b2ad8c7e0d
|
[
"Apache-2.0"
] | 14
|
2016-02-26T12:15:43.000Z
|
2019-08-28T15:28:23.000Z
|
nexus/__init__.py
|
Xarrow/PyCharmWorkSpace
|
daf0c3c194d0439caf96f260e23319b2ad8c7e0d
|
[
"Apache-2.0"
] | null | null | null |
nexus/__init__.py
|
Xarrow/PyCharmWorkSpace
|
daf0c3c194d0439caf96f260e23319b2ad8c7e0d
|
[
"Apache-2.0"
] | 11
|
2016-02-21T13:26:16.000Z
|
2019-08-28T15:28:25.000Z
|
# -*- coding:utf-8 -*-
"""
Verion: 1.0
Author: helix
Site: https://iliangqunru.bitcron.com/
File: __init__.py.py
Time: 3/7/18
"""
import logging
import sys
import os
import requests
level = logging.DEBUG
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
datefmt = '%Y-%m-%d %H:%M'
logging.basicConfig(level=level, format=format, datefmt=datefmt)
logger = logging.getLogger(__name__)
| 19.285714
| 64
| 0.683951
|
794bd3e146f390ad5579832b25dec69fba29831c
| 918
|
py
|
Python
|
tests/kallisticore/utils/test_sanitizer.py
|
jpmorganchase/kallisti-core
|
d9dfcaa2ec3c9cd26dd37b5f2c39c3788a3d05aa
|
[
"Apache-2.0"
] | 1
|
2022-03-03T14:27:25.000Z
|
2022-03-03T14:27:25.000Z
|
tests/kallisticore/utils/test_sanitizer.py
|
jpmorganchase/kallisti-core
|
d9dfcaa2ec3c9cd26dd37b5f2c39c3788a3d05aa
|
[
"Apache-2.0"
] | null | null | null |
tests/kallisticore/utils/test_sanitizer.py
|
jpmorganchase/kallisti-core
|
d9dfcaa2ec3c9cd26dd37b5f2c39c3788a3d05aa
|
[
"Apache-2.0"
] | 1
|
2022-03-09T05:57:55.000Z
|
2022-03-09T05:57:55.000Z
|
from kallisticore.utils.sanitizer import Sanitizer
from unittest import TestCase
from tests.kallisticore.utils.fixture.trial_result_data import \
sanitizer_real_example_test, sanitizer_real_example_test_expected, \
sanitizer_theoretical_test, sanitizer_theoretical_test_expected
class TestSanitizer(TestCase):
def test_clean_sensitive_data_string(self):
test_string = 'test-string'
self.assertEqual(test_string,
Sanitizer.clean_sensitive_data(test_string))
def test_clean_sensitive_data_theoretical(self):
self.assertEqual(
sanitizer_theoretical_test_expected,
Sanitizer.clean_sensitive_data(sanitizer_theoretical_test))
def test_clean_sensitive_data_real_example(self):
self.assertEqual(
sanitizer_real_example_test_expected,
Sanitizer.clean_sensitive_data(sanitizer_real_example_test))
| 38.25
| 72
| 0.766885
|
794bd3e41ab752eba2517cd40aa8f18abd53f679
| 3,644
|
py
|
Python
|
src/64.py
|
cloudzfy/euler
|
b82efad753ee98375fd40ec4e3989be57828e82c
|
[
"MIT"
] | 12
|
2016-10-19T09:03:20.000Z
|
2021-01-10T10:53:23.000Z
|
src/64.py
|
cloudzfy/euler
|
b82efad753ee98375fd40ec4e3989be57828e82c
|
[
"MIT"
] | null | null | null |
src/64.py
|
cloudzfy/euler
|
b82efad753ee98375fd40ec4e3989be57828e82c
|
[
"MIT"
] | 6
|
2018-09-12T03:13:58.000Z
|
2021-07-07T00:29:43.000Z
|
# All square roots are periodic when written as continued fractions and can
# be written in the form:
# sqrt(N) = a0 + 1
# ------------------
# a1 + 1
# -------------
# a2 + 1
# --------
# a3 + ...
# For example, let us consider sqrt(23):
# sqrt(23) = 4 + sqrt(23) - 4 = 4 + 1 = 4 + 1
# ------------ ----------------
# 1 sqrt(23) - 3
# ------------ 1 + ------------
# sqrt(23) - 4 7
# If we continue we would get the following expansion:
# sqrt(23) = 4 + 1
# -------------------
# 1 + 1
# ---------------
# 3 + 1
# -----------
# 1 + 1
# -------
# 8 + ...
# The process can be summarised as follows:
# 1 sqrt(23) + 4 sqrt(23) - 3
# a0 = 4, ------------ = ------------ = 1 + ------------
# sqrt(23) - 4 7 7
# 7 7(sqrt(23) + 3) sqrt(23) - 3
# a1 = 1, ------------ = --------------- = 3 + ------------
# sqrt(23) - 3 14 2
# 2 2(sqrt(23) + 3) sqrt(23) - 4
# a2 = 3, ------------ = --------------- = 1 + ------------
# sqrt(23) - 3 14 7
# 7 7(sqrt(23) + 4)
# a3 = 1, ------------ = --------------- = 8 + sqrt(23) - 4
# sqrt(23) - 4 7
# 1 sqrt(23) + 4 sqrt(23) - 3
# a4 = 8, ------------ = ------------ = 1 + ------------
# sqrt(23) - 4 7 7
# 7 7(sqrt(23) + 3) sqrt(23) - 3
# a5 = 1, ------------ = --------------- = 3 + ------------
# sqrt(23) - 3 14 2
# 2 2(sqrt(23) + 3) sqrt(23) - 4
# a6 = 3, ------------ = --------------- = 1 + ------------
# sqrt(23) - 3 14 7
# 7 7(sqrt(23) + 4)
# a7 = 1, ------------ = --------------- = 8 + sqrt(23) - 4
# sqrt(23) - 4 7
# It can be seen that the sequence is repeating. For conciseness, we use the
# notation sqrt(23) = [4;(1,3,1,8)], to indicate that the block (1,3,1,8)
# repeats indefinitely.
# The first ten continued fraction representations of (irrational) square
# roots are:
# sqrt(2) = [1;(2)], period = 1
# sqrt(3) = [1;(1,2)], period = 2
# sqrt(5) = [2;(4)], period = 1
# sqrt(6) = [2;(2,4)], period = 2
# sqrt(7) = [2;(1,1,1,4)], period = 4
# sqrt(8) = [2;(1,4)], period = 2
# sqrt(10) = [3;(6)], period = 1
# sqrt(11) = [3;(3,6)], period = 2
# sqrt(12) = [3;(2,6)], period = 2
# sqrt(13) = [3;(1,1,1,1,6)], period = 5
# Exactly four continued fractions, for N <= 13, have an odd period.
# How many continued fractions for N <= 10000 have an odd period?
from math import sqrt
def is_square(num):
return int(sqrt(num)) == sqrt(num)
def get_period_length(num):
m = 0
d = 1
a0 = int(sqrt(num))
a = a0
period = []
while True:
m = d * a - m
d = (num - m * m) / d
a = (a0 + m) / d
if (a, m, d) in period:
return len(period) - period.index((a, m, d))
period.append((a, m, d))
ans = 0
for N in range(2, 10001):
if not is_square(N):
if get_period_length(N) % 2 == 1:
ans += 1
print ans
| 31.964912
| 76
| 0.325741
|
794bd3f8c1e88b95d9238d25687ce2a3333550b4
| 987
|
py
|
Python
|
cride/circles/serializers.py
|
CamiloYate09/crider_django
|
850e2657fdc1107cbe63274f18264ba0867c43dd
|
[
"MIT"
] | null | null | null |
cride/circles/serializers.py
|
CamiloYate09/crider_django
|
850e2657fdc1107cbe63274f18264ba0867c43dd
|
[
"MIT"
] | null | null | null |
cride/circles/serializers.py
|
CamiloYate09/crider_django
|
850e2657fdc1107cbe63274f18264ba0867c43dd
|
[
"MIT"
] | null | null | null |
"""Circle serialziers"""
# Django REST Framework
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
# Models
from cride.circles.models import Circle
class CircleSerializer(serializers.Serializer):
"""Circle serializer."""
name = serializers.CharField()
slug_name = serializers.SlugField()
rides_taken = serializers.IntegerField()
rides_offered = serializers.IntegerField()
members_limit = serializers.IntegerField()
class CreateCircleSerializer(serializers.Serializer):
"""Create circle serializer."""
name = serializers.CharField(max_length=140)
slug_name = serializers.CharField(max_length=40,
validators=[UniqueValidator(queryset=Circle.objects.all())]
)
about = serializers.CharField(max_length=255, required=True)
def create(self, data):
"""Create circle."""
return Circle.objects.create(**data)
| 30.84375
| 97
| 0.690983
|
794bd449cd2ea946a7fb468e80e1263540c06524
| 3,540
|
py
|
Python
|
test/e2e/batcher/test_raw_batcher.py
|
Iamlovingit/kserve
|
5091f0f87dfc0ecd91aff4722b603686ca53349b
|
[
"Apache-2.0"
] | null | null | null |
test/e2e/batcher/test_raw_batcher.py
|
Iamlovingit/kserve
|
5091f0f87dfc0ecd91aff4722b603686ca53349b
|
[
"Apache-2.0"
] | null | null | null |
test/e2e/batcher/test_raw_batcher.py
|
Iamlovingit/kserve
|
5091f0f87dfc0ecd91aff4722b603686ca53349b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
from kubernetes import client
from kserve import KServeClient
from kserve import constants
from kserve import V1beta1PredictorSpec
from kserve import V1beta1Batcher
from kserve import V1beta1SKLearnSpec
from kserve import V1beta1InferenceServiceSpec
from kserve import V1beta1InferenceService
from kubernetes.client import V1ResourceRequirements
from ..common.utils import predict_str
from ..common.utils import KSERVE_TEST_NAMESPACE
from concurrent import futures
kserve_client = KServeClient(config_file=os.environ.get("KUBECONFIG", "~/.kube/config"))
input_file = open('./data/iris_batch_input.json')
json_array = json.load(input_file)
def test_batcher_raw():
service_name = 'isvc-raw-sklearn-batcher'
annotations = dict()
annotations['serving.kserve.io/deploymentMode'] = 'RawDeployment'
predictor = V1beta1PredictorSpec(
batcher=V1beta1Batcher(
max_batch_size=32,
max_latency=5000,
),
min_replicas=1,
sklearn=V1beta1SKLearnSpec(
storage_uri="gs://kfserving-examples/models/sklearn/1.0/model",
resources=V1ResourceRequirements(
requests={"cpu": "100m", "memory": "256Mi"},
limits={"cpu": "100m", "memory": "256Mi"},
),
),
)
isvc = V1beta1InferenceService(api_version=constants.KSERVE_V1BETA1,
kind=constants.KSERVE_KIND,
metadata=client.V1ObjectMeta(
name=service_name, namespace=KSERVE_TEST_NAMESPACE,
annotations=annotations,
), spec=V1beta1InferenceServiceSpec(predictor=predictor),
)
kserve_client.create(isvc)
try:
kserve_client.wait_isvc_ready(service_name, namespace=KSERVE_TEST_NAMESPACE)
except RuntimeError as e:
print(kserve_client.api_instance.get_namespaced_custom_object("serving.knative.dev", "v1",
KSERVE_TEST_NAMESPACE,
"services", service_name + "-predictor-default"))
pods = kserve_client.core_api.list_namespaced_pod(KSERVE_TEST_NAMESPACE,
label_selector='serving.kserve.io/inferenceservice={}'.
format(service_name))
for pod in pods.items:
print(pod)
raise e
with futures.ThreadPoolExecutor(max_workers=4) as executor:
future_res = [
executor.submit(lambda: predict_str(service_name, json.dumps(item))) for item in json_array
]
results = [
f.result()["batchId"] for f in future_res
]
assert (all(x == results[0] for x in results))
kserve_client.delete(service_name, KSERVE_TEST_NAMESPACE)
| 40.689655
| 119
| 0.633333
|
794bd5842ea9b299252633eb28f71510ecbd7a36
| 37,651
|
py
|
Python
|
qcodes/dataset/measurements.py
|
cgranade/Qcodes
|
2d8fd0b8e0fa12d7921a96003318598ad347dd05
|
[
"MIT"
] | null | null | null |
qcodes/dataset/measurements.py
|
cgranade/Qcodes
|
2d8fd0b8e0fa12d7921a96003318598ad347dd05
|
[
"MIT"
] | null | null | null |
qcodes/dataset/measurements.py
|
cgranade/Qcodes
|
2d8fd0b8e0fa12d7921a96003318598ad347dd05
|
[
"MIT"
] | null | null | null |
import json
import logging
from time import monotonic
from collections import OrderedDict
from typing import (Callable, Union, Dict, Tuple, List, Sequence, cast,
MutableMapping, MutableSequence, Optional, Any)
from inspect import signature
from numbers import Number
import numpy as np
import qcodes as qc
from qcodes import Station
from qcodes.instrument.parameter import ArrayParameter, _BaseParameter, \
Parameter, MultiParameter
from qcodes.dataset.experiment_container import Experiment
from qcodes.dataset.param_spec import ParamSpec
from qcodes.dataset.data_set import DataSet
from qcodes.utils.helpers import NumpyJSONEncoder
log = logging.getLogger(__name__)
array_like_types = (tuple, list, np.ndarray)
res_type = Tuple[Union[_BaseParameter, str],
Union[str, int, float, np.dtype, np.ndarray]]
setpoints_type = Sequence[Union[str, _BaseParameter]]
numeric_types = Union[float, int]
class ParameterTypeError(Exception):
pass
def is_number(thing: Any) -> bool:
"""
Test if an object can be converted to a number UNLESS it is a string
"""
if isinstance(thing, str):
return False
try:
float(thing)
return True
except (ValueError, TypeError):
return False
class DataSaver:
"""
The class used by the Runner context manager to handle the datasaving to
the database.
"""
default_callback: Optional[dict] = None
def __init__(self, dataset: DataSet, write_period: numeric_types,
parameters: Dict[str, ParamSpec]) -> None:
self._dataset = dataset
if DataSaver.default_callback is not None \
and 'run_tables_subscription_callback' \
in DataSaver.default_callback:
callback = DataSaver.default_callback[
'run_tables_subscription_callback']
min_wait = DataSaver.default_callback[
'run_tables_subscription_min_wait']
min_count = DataSaver.default_callback[
'run_tables_subscription_min_count']
snapshot = dataset.get_metadata('snapshot')
self._dataset.subscribe(callback,
min_wait=min_wait,
min_count=min_count,
state={},
callback_kwargs={'run_id':
self._dataset.run_id,
'snapshot': snapshot})
self.write_period = float(write_period)
self.parameters = parameters
self._known_parameters = list(parameters.keys())
self._results: List[dict] = [] # will be filled by addResult
self._last_save_time = monotonic()
self._known_dependencies: Dict[str, List[str]] = {}
for param, parspec in parameters.items():
if parspec.depends_on != '':
self._known_dependencies.update(
{str(param): parspec.depends_on.split(', ')})
def add_result(self, *res_tuple: res_type) -> None:
"""
Add a result to the measurement results. Represents a measurement
point in the space of measurement parameters, e.g. in an experiment
varying two voltages and measuring two currents, a measurement point
is the four dimensional (v1, v2, c1, c2). The corresponding call
to this function would be (e.g.)
>> datasaver.add_result((v1, 0.1), (v2, 0.2), (c1, 5), (c2, -2.1))
For better performance, this function does not immediately write to
the database, but keeps the results in memory. Writing happens every
`write_period` seconds and during the __exit__ method if this class.
Regarding arrays: since arrays as binary blobs are (almost) worthless
in a relational database, this function "unravels" arrays passed to it.
That, in turn, forces us to impose rules on what can be saved in one
go. Any number of scalars and any number of arrays OF THE SAME LENGTH
can be passed to add_result. The scalars are duplicated to match the
arrays.
However, if the parameter is registered as array type the numpy arrays
are not unraveled but stored directly for improved performance.
Args:
res_tuple: a tuple with the first element being the parameter name
and the second element is the corresponding value(s) at this
measurement point. The function takes as many tuples as there
are results.
Raises:
ValueError: if a parameter name not registered in the parent
Measurement object is encountered.
ParameterTypeError: if a parameter is given a value not matching
its type.
"""
res: List[res_type] = []
# we iterate through the input twice. First we find any array and
# multiparameters that needs to be unbundled and collect the names
# of all parameters. This also allows users to call
# add_result with the arguments in any particular order, i.e. NOT
# enforcing that setpoints come before dependent variables.
input_size = 1
found_parameters: List[str] = []
inserting_as_arrays = False
inserting_unrolled_array = False
for partial_result in res_tuple:
parameter = partial_result[0]
if isinstance(parameter, MultiParameter):
# unpack parameters and potential setpoints from MultiParameter
# unlike regular Parameters and ArrayParameters we don't want
# to add the parameter it self only its components.
data = partial_result[1]
self._unbundle_multiparameter(parameter,
data,
res,
found_parameters)
else:
res.append(partial_result)
paramstr = str(parameter)
found_parameters.append(paramstr)
# unpack setpoints from array parameters and add them
# to the res list
if isinstance(parameter, ArrayParameter):
self._unbundle_arrayparameter(parameter,
res,
found_parameters)
for partial_result in res:
parameter = partial_result[0]
paramstr = str(parameter)
value = partial_result[1]
found_parameters.append(paramstr)
inserting_this_as_array = False
if paramstr not in self._known_parameters:
raise ValueError(f'Can not add a result for {paramstr}, no '
'such parameter registered in this '
'measurement.')
param_spec = self.parameters[paramstr]
if param_spec.type == 'array':
inserting_as_arrays = True
inserting_this_as_array = True
if any(isinstance(value, typ) for typ in array_like_types):
value = cast(np.ndarray, partial_result[1])
value = np.atleast_1d(value)
array_size = len(value.ravel())
if param_spec.type != 'array' and array_size > 1:
inserting_unrolled_array = True
if input_size > 1 and input_size != array_size:
raise ValueError('Incompatible array dimensions. Trying to'
f' add arrays of dimension {input_size} '
f'and {array_size}')
else:
input_size = array_size
elif is_number(value):
if inserting_this_as_array:
raise ValueError("Trying to insert into an ArrayType with "
"a scalar value")
if param_spec.type == 'text':
raise ValueError(f"It is not possible to save a numeric "
f"value for parameter {paramstr!r} "
f"because its type class is "
f"'text', not 'numeric'.")
elif isinstance(value, str):
if param_spec.type != 'text':
raise ValueError(f"It is not possible to save a string "
f"value for parameter {paramstr!r} "
f"because its type class is "
f"{param_spec.type!r}, not 'text'.")
else:
raise ValueError('Wrong value type received. '
f'Got {type(value)}, but only int, float, '
'str, tuple, list, and np.ndarray is '
'allowed.')
# Now check for missing setpoints
if paramstr in self._known_dependencies.keys():
stuffweneed = set(self._known_dependencies[paramstr])
stuffwehave = set(found_parameters)
if not stuffweneed.issubset(stuffwehave):
raise ValueError('Can not add this result; missing '
f'setpoint values for {paramstr}:'
f' {stuffweneed}.'
f' Values only given for'
f' {found_parameters}.')
if inserting_unrolled_array and inserting_as_arrays:
raise RuntimeError("Trying to insert multiple data values both "
"in array from and as numeric. This is not "
"possible.")
elif inserting_as_arrays:
input_size = 1
self._append_results(res, input_size)
if monotonic() - self._last_save_time > self.write_period:
self.flush_data_to_database()
self._last_save_time = monotonic()
def _append_results(self, res: Sequence[res_type],
input_size: int) -> None:
"""
A private method to add the data to actual queue of data to be written.
Args:
res: A sequence of the data to be added
input_size: The length of the data to be added. 1 if its
to be inserted as arrays.
"""
for index in range(input_size):
res_dict = {}
for partial_result in res:
param = str(partial_result[0])
value = partial_result[1]
param_spec = self.parameters[param]
if param_spec.type == 'array' and index == 0:
res_dict[param] = value
elif param_spec.type != 'array':
# For compatibility with the old Loop, setpoints are
# tuples of numbers (usually tuple(np.linspace(...))
if hasattr(value, '__len__') and not isinstance(value, str):
value = cast(Union[Sequence, np.ndarray], value)
if isinstance(value, np.ndarray):
# this is significantly faster than atleast_1d
# espcially for non 0D arrays
# because we already know that this is a numpy
# array and just one numpy array. atleast_1d
# performs additional checks.
if value.ndim == 0:
value = value.reshape(1)
value = value.ravel()
res_dict[param] = value[index]
else:
res_dict[param] = value
if len(res_dict) > 0:
self._results.append(res_dict)
def _unbundle_arrayparameter(self,
parameter: ArrayParameter,
res: List[res_type],
found_parameters: List[str]) -> None:
"""
Extract the setpoints from an ArrayParameter and add them to results
as a regular parameter tuple.
Args:
parameter: The ArrayParameter to extract setpoints from.
res: The result list to add to. Note that this is modified inplace
found_parameters: The list of all parameters that we know of by now
Note that this is modified in place.
"""
sp_names = parameter.setpoint_full_names
fallback_sp_name = f"{parameter.full_name}_setpoint"
self._unbundle_setpoints_from_param(parameter, sp_names,
fallback_sp_name,
parameter.setpoints,
res, found_parameters)
def _unbundle_setpoints_from_param(self, parameter: _BaseParameter,
sp_names: Sequence[str],
fallback_sp_name: str,
setpoints: Sequence,
res: List[res_type],
found_parameters: List[str]):
"""
Private function to unbundle setpoints from an ArrayParameter or
a subset of a MultiParameter.
Args:
parameter:
sp_names: Names of the setpoint axes
fallback_sp_name: Fallback name for setpoints in case sp_names
is None. The axis num is appended to this name to ensure all
setpoint axes names are unique.
setpoints: The actual setpoints i.e. `parameter.setpoints` for an
ArrayParameter and `parameter.setpoints[i]` for a MultiParameter
res: The result list the unpacked setpoints are added too.
Note that this will be modified in place.
found_parameters: The list of all parameters that we know of by now
This is modified in place with new parameters found here.
"""
setpoint_axes = []
setpoint_meta = []
if setpoints is None:
raise RuntimeError(f"{parameter.full_name} is an {type(parameter)} "
f"without setpoints. Cannot handle this.")
for i, sps in enumerate(setpoints):
if sp_names is not None:
spname = sp_names[i]
else:
spname = f'{fallback_sp_name}_{i}'
if spname not in self.parameters.keys():
raise RuntimeError('No setpoints registered for '
f'{type(parameter)} {parameter.full_name}!')
sps = np.array(sps)
while sps.ndim > 1:
# The outermost setpoint axis or an nD param is nD
# but the innermost is 1D. In all cases we just need
# the axis along one dim, the innermost one.
sps = sps[0]
setpoint_meta.append(spname)
found_parameters.append(spname)
setpoint_axes.append(sps)
output_grids = np.meshgrid(*setpoint_axes, indexing='ij')
for grid, meta in zip(output_grids, setpoint_meta):
res.append((meta, grid))
def _unbundle_multiparameter(self,
parameter: MultiParameter,
data: Union[tuple, list, np.ndarray],
res: List[res_type],
found_parameters: List[str]) -> None:
"""
Extract the subarrays and setpoints from an MultiParameter and
add them to res as a regular parameter tuple.
Args:
parameter: The MultiParameter to extract from
data: The acquired data for this parameter
res: The result list that the unpacked data and setpoints
is added too. Note that this will be modified in place.
found_parameters: The list of all parameters that we know of by now
This is modified in place with new parameters found here.
"""
for i in range(len(parameter.shapes)):
shape = parameter.shapes[i]
res.append((parameter.names[i], data[i]))
if shape != ():
# array parameter like part of the multiparameter
# need to find setpoints too
fallback_sp_name = f'{parameter.full_names[i]}_setpoint'
if parameter.setpoint_full_names[i] is not None:
sp_names = parameter.setpoint_full_names[i]
else:
sp_names = None
self._unbundle_setpoints_from_param(parameter, sp_names,
fallback_sp_name,
parameter.setpoints[i],
res, found_parameters)
def flush_data_to_database(self) -> None:
"""
Write the in-memory results to the database.
"""
log.debug('Flushing to database')
if self._results != []:
try:
write_point = self._dataset.add_results(self._results)
log.debug(f'Successfully wrote from index {write_point}')
self._results = []
except Exception as e:
log.warning(f'Could not commit to database; {e}')
else:
log.debug('No results to flush')
@property
def run_id(self) -> int:
return self._dataset.run_id
@property
def points_written(self) -> int:
return self._dataset.number_of_results
@property
def dataset(self):
return self._dataset
class Runner:
"""
Context manager for the measurement.
Lives inside a Measurement and should never be instantiated
outside a Measurement.
This context manager handles all the dirty business of writing data
to the database. Additionally, it may perform experiment bootstrapping
and clean-up after the measurement.
"""
def __init__(
self, enteractions: List, exitactions: List,
experiment: Experiment = None, station: Station = None,
write_period: numeric_types = None,
parameters: Dict[str, ParamSpec] = None,
name: str = '',
subscribers: Sequence[Tuple[Callable,
Union[MutableSequence,
MutableMapping]]] = None) -> None:
self.enteractions = enteractions
self.exitactions = exitactions
self.subscribers: Sequence[Tuple[Callable,
Union[MutableSequence,
MutableMapping]]]
if subscribers is None:
self.subscribers = []
else:
self.subscribers = subscribers
self.experiment = experiment
self.station = station
self.parameters = parameters
# here we use 5 s as a sane default, but that value should perhaps
# be read from some config file
self.write_period = float(write_period) \
if write_period is not None else 5.0
self.name = name if name else 'results'
def __enter__(self) -> DataSaver:
# TODO: should user actions really precede the dataset?
# first do whatever bootstrapping the user specified
for func, args in self.enteractions:
func(*args)
# next set up the "datasaver"
if self.experiment is not None:
self.ds = qc.new_data_set(
self.name, self.experiment.exp_id, conn=self.experiment.conn
)
else:
self.ds = qc.new_data_set(self.name)
# .. and give the dataset a snapshot as metadata
if self.station is None:
station = qc.Station.default
else:
station = self.station
if station:
self.ds.add_metadata('snapshot',
json.dumps({'station': station.snapshot()},
cls=NumpyJSONEncoder)
)
if self.parameters is not None:
for paramspec in self.parameters.values():
self.ds.add_parameter(paramspec)
else:
raise RuntimeError("No parameters supplied")
# register all subscribers
for (callble, state) in self.subscribers:
# We register with minimal waiting time.
# That should make all subscribers be called when data is flushed
# to the database
log.debug(f'Subscribing callable {callble} with state {state}')
self.ds.subscribe(callble, min_wait=0, min_count=1, state=state)
print(f'Starting experimental run with id: {self.ds.run_id}')
self.datasaver = DataSaver(dataset=self.ds,
write_period=self.write_period,
parameters=self.parameters)
return self.datasaver
def __exit__(self, exception_type, exception_value, traceback) -> None:
self.datasaver.flush_data_to_database()
# perform the "teardown" events
for func, args in self.exitactions:
func(*args)
# and finally mark the dataset as closed, thus
# finishing the measurement
self.ds.mark_complete()
self.ds.unsubscribe_all()
class Measurement:
"""
Measurement procedure container
Args:
exp: Specify the experiment to use. If not given
the default one is used.
station: The QCoDeS station to snapshot. If not given, the
default one is used.
"""
def __init__(self, exp: Optional[Experiment] = None,
station: Optional[qc.Station] = None) -> None:
self.exitactions: List[Tuple[Callable, Sequence]] = []
self.enteractions: List[Tuple[Callable, Sequence]] = []
self.subscribers: List[Tuple[Callable, Union[MutableSequence,
MutableMapping]]] = []
self.experiment = exp
self.station = station
self.parameters: Dict[str, ParamSpec] = OrderedDict()
self._write_period: Optional[float] = None
self.name = ''
@property
def write_period(self) -> float:
return self._write_period
@write_period.setter
def write_period(self, wp: numeric_types) -> None:
if not isinstance(wp, Number):
raise ValueError('The write period must be a number (of seconds).')
wp_float = float(wp)
if wp_float < 1e-3:
raise ValueError('The write period must be at least 1 ms.')
self._write_period = wp_float
def _registration_validation(
self, name: str, setpoints: Sequence[str] = None,
basis: Sequence[str] = None) -> Tuple[List[str], List[str]]:
"""
Helper function to do all the validation in terms of dependencies
when adding parameters, e.g. that no setpoints have setpoints etc.
Called by register_parameter and register_custom_parameter
Args:
name: Name of the parameter to register
setpoints: name(s) of the setpoint parameter(s)
basis: name(s) of the parameter(s) that this parameter is
inferred from
"""
# now handle setpoints
depends_on = []
if setpoints:
for sp in setpoints:
if sp not in list(self.parameters.keys()):
raise ValueError(f'Unknown setpoint: {sp}.'
' Please register that parameter first.')
elif sp == name:
raise ValueError('A parameter can not have itself as '
'setpoint.')
elif self.parameters[sp].depends_on != '':
raise ValueError("A parameter's setpoints can not have "
f"setpoints themselves. {sp} depends on"
f" {self.parameters[sp].depends_on}")
else:
depends_on.append(sp)
# now handle inferred parameters
inf_from = []
if basis:
for inff in basis:
if inff not in list(self.parameters.keys()):
raise ValueError(f'Unknown basis parameter: {inff}.'
' Please register that parameter first.')
elif inff == name:
raise ValueError('A parameter can not be inferred from'
'itself.')
else:
inf_from.append(inff)
return depends_on, inf_from
def register_parameter(
self, parameter: _BaseParameter,
setpoints: setpoints_type = None,
basis: setpoints_type = None,
paramtype: str = 'numeric') -> None:
"""
Add QCoDeS Parameter to the dataset produced by running this
measurement.
Args:
parameter: The parameter to add
setpoints: The Parameter representing the setpoints for this
parameter. If this parameter is a setpoint,
it should be left blank
basis: The parameters that this parameter is inferred from. If
this parameter is not inferred from any other parameters,
this should be left blank.
paramtype: type of the parameter, i.e. the SQL storage class
"""
# input validation
if paramtype not in ParamSpec.allowed_types:
raise RuntimeError("Trying to register a parameter with type "
f"{paramtype}. However, only "
f"{ParamSpec.allowed_types} are supported.")
if not isinstance(parameter, _BaseParameter):
raise ValueError('Can not register object of type {}. Can only '
'register a QCoDeS Parameter.'
''.format(type(parameter)))
# perhaps users will want a different name? But the name must be unique
# on a per-run basis
# we also use the name below, but perhaps is is better to have
# a more robust Parameter2String function?
name = str(parameter)
if isinstance(parameter, ArrayParameter):
self._register_arrayparameter(parameter,
setpoints,
basis,
paramtype)
elif isinstance(parameter, MultiParameter):
self._register_multiparameter(parameter,
setpoints,
basis,
paramtype,
)
elif isinstance(parameter, Parameter):
self._register_parameter(name,
parameter.label,
parameter.unit,
setpoints,
basis, paramtype)
else:
raise RuntimeError("Does not know how to register a parameter"
f"of type {type(parameter)}")
def _register_parameter(self, name: str,
label: str,
unit: str,
setpoints: setpoints_type,
basis: setpoints_type,
paramtype: str) -> None:
"""
Generate ParamSpecs and register them for an individual parameter
"""
if setpoints is not None:
sp_strings = [str(sp) for sp in setpoints]
else:
sp_strings = []
if basis is not None:
bs_strings = [str(bs) for bs in basis]
else:
bs_strings = []
# validate all dependencies
depends_on, inf_from = self._registration_validation(name, sp_strings,
bs_strings)
paramspec = ParamSpec(name=name,
paramtype=paramtype,
label=label,
unit=unit,
inferred_from=inf_from,
depends_on=depends_on)
# ensure the correct order
if name in self.parameters.keys():
self.parameters.pop(name)
self.parameters[name] = paramspec
log.info(f'Registered {name} in the Measurement.')
def _register_arrayparameter(self,
parameter: ArrayParameter,
setpoints: setpoints_type,
basis: setpoints_type,
paramtype: str, ) -> None:
"""
Register an Array paramter and the setpoints belonging to the
ArrayParameter
"""
name = str(parameter)
my_setpoints = list(setpoints) if setpoints else []
for i in range(len(parameter.shape)):
if parameter.setpoint_full_names is not None and \
parameter.setpoint_full_names[i] is not None:
spname = parameter.setpoint_full_names[i]
else:
spname = f'{name}_setpoint_{i}'
if parameter.setpoint_labels:
splabel = parameter.setpoint_labels[i]
else:
splabel = ''
if parameter.setpoint_units:
spunit = parameter.setpoint_units[i]
else:
spunit = ''
sp = ParamSpec(name=spname, paramtype=paramtype,
label=splabel, unit=spunit)
self.parameters[spname] = sp
my_setpoints += [spname]
self._register_parameter(name,
parameter.label,
parameter.unit,
my_setpoints,
basis,
paramtype)
def _register_multiparameter(self,
multiparameter: MultiParameter,
setpoints: setpoints_type,
basis: setpoints_type,
paramtype: str) -> None:
"""
Find the individual multiparameter components and their setpoints
and register these
"""
setpoints_lists = []
for i in range(len(multiparameter.shapes)):
shape = multiparameter.shapes[i]
name = multiparameter.full_names[i]
if shape is ():
my_setpoints = setpoints
else:
my_setpoints = list(setpoints) if setpoints else []
for j in range(len(shape)):
if multiparameter.setpoint_full_names is not None and \
multiparameter.setpoint_full_names[i] is not None:
spname = multiparameter.setpoint_full_names[i][j]
else:
spname = f'{name}_setpoint_{j}'
if multiparameter.setpoint_labels is not None and \
multiparameter.setpoint_labels[i] is not None:
splabel = multiparameter.setpoint_labels[i][j]
else:
splabel = ''
if multiparameter.setpoint_units is not None and \
multiparameter.setpoint_units[i] is not None:
spunit = multiparameter.setpoint_units[i][j]
else:
spunit = ''
sp = ParamSpec(name=spname, paramtype=paramtype,
label=splabel, unit=spunit)
self.parameters[spname] = sp
my_setpoints += [spname]
setpoints_lists.append(my_setpoints)
for i, setpoints in enumerate(setpoints_lists):
self._register_parameter(multiparameter.names[i],
multiparameter.labels[i],
multiparameter.units[i],
setpoints,
basis,
paramtype)
def register_custom_parameter(
self, name: str,
label: str = None, unit: str = None,
basis: setpoints_type = None,
setpoints: setpoints_type = None,
paramtype: str = 'numeric') -> None:
"""
Register a custom parameter with this measurement
Args:
name: The name that this parameter will have in the dataset. Must
be unique (will overwrite an existing parameter with the same
name!)
label: The label
unit: The unit
basis: A list of either QCoDeS Parameters or the names
of parameters already registered in the measurement that
this parameter is inferred from
setpoints: A list of either QCoDeS Parameters or the names of
of parameters already registered in the measurement that
are the setpoints of this parameter
paramtype: type of the parameter, i.e. the SQL storage class
"""
self._register_parameter(name,
label,
unit,
setpoints,
basis,
paramtype)
def unregister_parameter(self,
parameter: setpoints_type) -> None:
"""
Remove a custom/QCoDeS parameter from the dataset produced by
running this measurement
"""
if isinstance(parameter, _BaseParameter):
param = str(parameter)
elif isinstance(parameter, str):
param = parameter
else:
raise ValueError('Wrong input type. Must be a QCoDeS parameter or'
' the name (a string) of a parameter.')
if param not in self.parameters:
log.info(f'Tried to unregister {param}, but it was not'
'registered.')
return
for name, paramspec in self.parameters.items():
if param in paramspec.depends_on:
raise ValueError(f'Can not unregister {param}, it is a '
f'setpoint for {name}')
if param in paramspec.inferred_from:
raise ValueError(f'Can not unregister {param}, it is a '
f'basis for {name}')
self.parameters.pop(param)
log.info(f'Removed {param} from Measurement.')
def add_before_run(self, func: Callable, args: tuple) -> None:
"""
Add an action to be performed before the measurement.
Args:
func: Function to be performed
args: The arguments to said function
"""
# some tentative cheap checking
nargs = len(signature(func).parameters)
if len(args) != nargs:
raise ValueError('Mismatch between function call signature and '
'the provided arguments.')
self.enteractions.append((func, args))
def add_after_run(self, func: Callable, args: tuple) -> None:
"""
Add an action to be performed after the measurement.
Args:
func: Function to be performed
args: The arguments to said function
"""
# some tentative cheap checking
nargs = len(signature(func).parameters)
if len(args) != nargs:
raise ValueError('Mismatch between function call signature and '
'the provided arguments.')
self.exitactions.append((func, args))
def add_subscriber(self,
func: Callable,
state: Union[MutableSequence, MutableMapping]) -> None:
"""
Add a subscriber to the dataset of the measurement.
Args:
func: A function taking three positional arguments: a list of
tuples of parameter values, an integer, a mutable variable
(list or dict) to hold state/writes updates to.
state: The variable to hold the state.
"""
self.subscribers.append((func, state))
def run(self) -> Runner:
"""
Returns the context manager for the experimental run
"""
return Runner(self.enteractions, self.exitactions,
self.experiment, station=self.station,
write_period=self._write_period,
parameters=self.parameters,
name=self.name,
subscribers=self.subscribers)
| 42.543503
| 80
| 0.534674
|
794bd5d642c57feb7a077644ad05fa574bf6fca7
| 10,665
|
py
|
Python
|
python/ray/tune/analysis/experiment_analysis.py
|
sunho/ray
|
0ac8138b26cc66978df150c89ef291263f23c9a1
|
[
"Apache-2.0"
] | 2
|
2019-06-17T12:38:24.000Z
|
2020-11-11T07:52:26.000Z
|
python/ray/tune/analysis/experiment_analysis.py
|
sunho/ray
|
0ac8138b26cc66978df150c89ef291263f23c9a1
|
[
"Apache-2.0"
] | 3
|
2018-08-15T19:19:25.000Z
|
2021-06-30T01:54:46.000Z
|
python/ray/tune/analysis/experiment_analysis.py
|
sunho/ray
|
0ac8138b26cc66978df150c89ef291263f23c9a1
|
[
"Apache-2.0"
] | 2
|
2017-10-31T23:20:07.000Z
|
2019-11-13T20:16:03.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
try:
import pandas as pd
except ImportError:
pd = None
from ray.tune.error import TuneError
from ray.tune.result import EXPR_PROGRESS_FILE, EXPR_PARAM_FILE, CONFIG_PREFIX
logger = logging.getLogger(__name__)
class Analysis:
"""Analyze all results from a directory of experiments."""
def __init__(self, experiment_dir):
experiment_dir = os.path.expanduser(experiment_dir)
if not os.path.isdir(experiment_dir):
raise ValueError(
"{} is not a valid directory.".format(experiment_dir))
self._experiment_dir = experiment_dir
self._configs = {}
self._trial_dataframes = {}
if not pd:
logger.warning(
"pandas not installed. Run `pip install pandas` for "
"Analysis utilities.")
else:
self.fetch_trial_dataframes()
def dataframe(self, metric=None, mode=None):
"""Returns a pandas.DataFrame object constructed from the trials.
Args:
metric (str): Key for trial info to order on.
If None, uses last result.
mode (str): One of [min, max].
"""
rows = self._retrieve_rows(metric=metric, mode=mode)
all_configs = self.get_all_configs(prefix=True)
for path, config in all_configs.items():
if path in rows:
rows[path].update(config)
rows[path].update(logdir=path)
return pd.DataFrame(list(rows.values()))
def get_best_config(self, metric, mode="max"):
"""Retrieve the best config corresponding to the trial.
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
"""
rows = self._retrieve_rows(metric=metric, mode=mode)
all_configs = self.get_all_configs()
compare_op = max if mode == "max" else min
best_path = compare_op(rows, key=lambda k: rows[k][metric])
return all_configs[best_path]
def get_best_logdir(self, metric, mode="max"):
"""Retrieve the logdir corresponding to the best trial.
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
"""
df = self.dataframe(metric=metric, mode=mode)
if mode == "max":
return df.iloc[df[metric].idxmax()].logdir
elif mode == "min":
return df.iloc[df[metric].idxmin()].logdir
def fetch_trial_dataframes(self):
fail_count = 0
for path in self._get_trial_paths():
try:
self.trial_dataframes[path] = pd.read_csv(
os.path.join(path, EXPR_PROGRESS_FILE))
except Exception:
fail_count += 1
if fail_count:
logger.debug(
"Couldn't read results from {} paths".format(fail_count))
return self.trial_dataframes
def get_all_configs(self, prefix=False):
"""Returns a list of all configurations.
Parameters:
prefix (bool): If True, flattens the config dict
and prepends `config/`.
"""
fail_count = 0
for path in self._get_trial_paths():
try:
with open(os.path.join(path, EXPR_PARAM_FILE)) as f:
config = json.load(f)
if prefix:
for k in list(config):
config[CONFIG_PREFIX + k] = config.pop(k)
self._configs[path] = config
except Exception:
fail_count += 1
if fail_count:
logger.warning(
"Couldn't read config from {} paths".format(fail_count))
return self._configs
def _retrieve_rows(self, metric=None, mode=None):
assert mode is None or mode in ["max", "min"]
rows = {}
for path, df in self.trial_dataframes.items():
if mode == "max":
idx = df[metric].idxmax()
elif mode == "min":
idx = df[metric].idxmin()
else:
idx = -1
rows[path] = df.iloc[idx].to_dict()
return rows
def _get_trial_paths(self):
_trial_paths = []
for trial_path, _, files in os.walk(self._experiment_dir):
if EXPR_PROGRESS_FILE in files:
_trial_paths += [trial_path]
if not _trial_paths:
raise TuneError("No trials found in {}.".format(
self._experiment_dir))
return _trial_paths
@property
def trial_dataframes(self):
"""List of all dataframes of the trials."""
return self._trial_dataframes
class ExperimentAnalysis(Analysis):
"""Analyze results from a Tune experiment.
Parameters:
experiment_checkpoint_path (str): Path to a json file
representing an experiment state. Corresponds to
Experiment.local_dir/Experiment.name/experiment_state.json
Example:
>>> tune.run(my_trainable, name="my_exp", local_dir="~/tune_results")
>>> analysis = ExperimentAnalysis(
>>> experiment_checkpoint_path="~/tune_results/my_exp/state.json")
"""
def __init__(self, experiment_checkpoint_path, trials=None):
"""Initializer.
Args:
experiment_path (str): Path to where experiment is located.
trials (list|None): List of trials that can be accessed via
`analysis.trials`.
"""
with open(experiment_checkpoint_path) as f:
_experiment_state = json.load(f)
self._experiment_state = _experiment_state
if "checkpoints" not in _experiment_state:
raise TuneError("Experiment state invalid; no checkpoints found.")
self._checkpoints = _experiment_state["checkpoints"]
self.trials = trials
super(ExperimentAnalysis, self).__init__(
os.path.dirname(experiment_checkpoint_path))
def get_best_trial(self, metric, mode="max", scope="all"):
"""Retrieve the best trial object.
Compares all trials' scores on `metric`.
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
scope (str): One of [all, last]. If `scope=last`, only look at
each trial's final step for `metric`, and compare across
trials based on `mode=[min,max]`. If `scope=all`, find each
trial's min/max score for `metric` based on `mode`, and
compare trials based on `mode=[min,max]`.
"""
if mode not in ["max", "min"]:
raise ValueError(
"ExperimentAnalysis: attempting to get best trial for "
"metric {} for mode {} not in [\"max\", \"min\"]".format(
metric, mode))
if scope not in ["all", "last"]:
raise ValueError(
"ExperimentAnalysis: attempting to get best trial for "
"metric {} for scope {} not in [\"all\", \"last\"]".format(
metric, scope))
best_trial = None
best_metric_score = None
for trial in self.trials:
if metric not in trial.metric_analysis:
continue
if scope == "last":
metric_score = trial.metric_analysis[metric]["last"]
else:
metric_score = trial.metric_analysis[metric][mode]
if best_metric_score is None:
best_metric_score = metric_score
best_trial = trial
continue
if (mode == "max") and (best_metric_score < metric_score):
best_metric_score = metric_score
best_trial = trial
elif (mode == "min") and (best_metric_score > metric_score):
best_metric_score = metric_score
best_trial = trial
return best_trial
def get_best_config(self, metric, mode="max", scope="all"):
"""Retrieve the best config corresponding to the trial.
Compares all trials' scores on `metric`.
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
scope (str): One of [all, last]. If `scope=last`, only look at
each trial's final step for `metric`, and compare across
trials based on `mode=[min,max]`. If `scope=all`, find each
trial's min/max score for `metric` based on `mode`, and
compare trials based on `mode=[min,max]`.
"""
best_trial = self.get_best_trial(metric, mode, scope)
return best_trial.config if best_trial else None
def get_best_logdir(self, metric, mode="max", scope="all"):
"""Retrieve the logdir corresponding to the best trial.
Compares all trials' scores on `metric`.
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
scope (str): One of [all, last]. If `scope=last`, only look at
each trial's final step for `metric`, and compare across
trials based on `mode=[min,max]`. If `scope=all`, find each
trial's min/max score for `metric` based on `mode`, and
compare trials based on `mode=[min,max]`.
"""
best_trial = self.get_best_trial(metric, mode, scope)
return best_trial.logdir if best_trial else None
def stats(self):
"""Returns a dictionary of the statistics of the experiment."""
return self._experiment_state.get("stats")
def runner_data(self):
"""Returns a dictionary of the TrialRunner data."""
return self._experiment_state.get("runner_data")
def _get_trial_paths(self):
"""Overwrites Analysis to only have trials of one experiment."""
if self.trials:
_trial_paths = [t.logdir for t in self.trials]
else:
logger.warning("No `self.trials`. Drawing logdirs from checkpoint "
"file. This may result in some information that is "
"out of sync, as checkpointing is periodic.")
_trial_paths = [
checkpoint["logdir"] for checkpoint in self._checkpoints
]
if not _trial_paths:
raise TuneError("No trials found.")
return _trial_paths
| 36.649485
| 79
| 0.577215
|
794bd63a16a079b5c573121bb8641b7374c7a094
| 763
|
py
|
Python
|
input-generator.py
|
rfalke/hamming-distance-squared-benchmark
|
302d82ce3602b619c006a6d2e5dbef1a4bad9337
|
[
"MIT"
] | 1
|
2021-04-10T05:42:51.000Z
|
2021-04-10T05:42:51.000Z
|
input-generator.py
|
rfalke/hamming-distance-squared-benchmark
|
302d82ce3602b619c006a6d2e5dbef1a4bad9337
|
[
"MIT"
] | null | null | null |
input-generator.py
|
rfalke/hamming-distance-squared-benchmark
|
302d82ce3602b619c006a6d2e5dbef1a4bad9337
|
[
"MIT"
] | null | null | null |
import hashlib
def generate(name, num_entries, size):
entries = []
hashobj = hashlib.sha256()
hashobj.update("%d %d" % (num_entries, size))
for i in range(num_entries):
hashobj.update(str(i))
entries.append(hashobj.hexdigest()[:size / 4])
assert len(set(entries)) == num_entries
entries.sort()
f = open("sample-data/%d_%s.input" % (size, name), "w")
f.write("%d %d\n" % (size, num_entries))
for i in range(num_entries):
f.write("%s %d\n" % (entries[i], i))
f.close()
def main():
for size in [64, 256]:
generate("1k", 1000, size)
generate("10k", 10000, size)
generate("100k", 100000, size)
generate("1m", 1000000, size)
if __name__ == "__main__":
main()
| 25.433333
| 59
| 0.576671
|
794bd653cb236fd0d527096dd4ff7caf60549f70
| 2,952
|
py
|
Python
|
jupyter/utils.py
|
dmfigol/devnet-3627
|
172b956cb893d36e25f3c120c1b68a12b634090e
|
[
"MIT"
] | 1
|
2018-06-16T12:48:54.000Z
|
2018-06-16T12:48:54.000Z
|
jupyter/utils.py
|
dmfigol/devnet-3627
|
172b956cb893d36e25f3c120c1b68a12b634090e
|
[
"MIT"
] | null | null | null |
jupyter/utils.py
|
dmfigol/devnet-3627
|
172b956cb893d36e25f3c120c1b68a12b634090e
|
[
"MIT"
] | 2
|
2018-06-14T14:30:23.000Z
|
2018-06-14T15:51:40.000Z
|
from typing import Dict
from ruamel.yaml import YAML
from constants import (
NORMALIZED_INTERFACES,
INTERFACE_NAME_RE,
NEIGHBOR_SPLIT_RE,
CDP_NEIGHBOR_RE,
HOSTS_FILE,
DEVICE_USERNAME,
DEVICE_PASSWORD,
DEVICE_TYPE,
CONNECTION_TIMEOUT,
)
def normalize_interface_type(interface_type: str) -> str:
"""Normalizes interface type
For example, G is converted to GigabitEthernet, Te is converted to TenGigabitEthernet
"""
int_type = interface_type.strip().lower()
for norm_int_type in NORMALIZED_INTERFACES:
if norm_int_type.lower().startswith(int_type):
return norm_int_type
return int_type
def normalize_interface_name(interface_name: str) -> str:
"""Normalizes interface name
For example, Gi0/1 is converted to GigabitEthernet1,
Te1/1 is converted to TenGigabitEthernet1/1
"""
match = INTERFACE_NAME_RE.search(interface_name)
if match:
int_type = match.group("interface_type")
normalized_int_type = normalize_interface_type(int_type)
int_num = match.group("interface_num")
return normalized_int_type + int_num
raise ValueError(f"Does not recognize {interface_name} as an interface name")
def extract_hostname_from_fqdn(fqdn: str) -> str:
"""Extracts hostname from fqdn-like string
For example, R1.cisco.com -> R1, sw1 -> sw1"
"""
return fqdn.split(".")[0]
def parse_show_cdp_neighbors(cli_output: str) -> Dict[str, Dict[str, str]]:
"""Parses `show cdp neighbors` and returns a dictionary of neighbors and connected interfaces"""
result: Dict[str, Dict[str, str]] = {}
for neighbor_output in NEIGHBOR_SPLIT_RE.split(cli_output):
match = CDP_NEIGHBOR_RE.search(neighbor_output)
if match:
remote_fqdn = match.group("remote_fqdn")
local_interface = normalize_interface_name(match.group("local_interface"))
remote_interface = normalize_interface_name(match.group("remote_interface"))
remote_hostname = extract_hostname_from_fqdn(remote_fqdn)
result[local_interface] = {
"connected_device": {
"name": remote_hostname,
"port": remote_interface,
}
}
return dict(sorted(result.items()))
def get_devices_conn_params() -> Dict[str, Dict[str, str]]:
"""Creates a dictionary of connection parameters for SSH"""
result: Dict[str, Dict[str, str]] = {}
yaml = YAML()
with open(HOSTS_FILE, 'r') as f:
hosts = yaml.load(f)
for device, device_details in hosts["devices"]["routers"].items():
device_params = {
"host": device_details["host"],
"username": DEVICE_USERNAME,
"password": DEVICE_PASSWORD,
"device_type": DEVICE_TYPE,
"timeout": CONNECTION_TIMEOUT,
}
result[device] = device_params
return result
| 33.545455
| 100
| 0.661585
|
794bd70a542bd5c84469fd23eabe260691fb9554
| 4,668
|
py
|
Python
|
reid/engine/trainer.py
|
NothingToSay99/HOB-net
|
77d52fbd6cab5d24f6a724f146dc71e80759c1f9
|
[
"MIT"
] | 2
|
2021-08-02T01:03:11.000Z
|
2021-09-07T02:50:35.000Z
|
reid/engine/trainer.py
|
NothingToSay99/HOB-net
|
77d52fbd6cab5d24f6a724f146dc71e80759c1f9
|
[
"MIT"
] | null | null | null |
reid/engine/trainer.py
|
NothingToSay99/HOB-net
|
77d52fbd6cab5d24f6a724f146dc71e80759c1f9
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
import logging
import torch
import torch.nn as nn
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint, Timer
from ignite.metrics import RunningAverage
from utils.reid_metric import R1_mAP
global ITER
ITER = 0
def create_supervised_trainer(model, optimizer, loss_fn,
device=None):
"""
Factory function for creating a trainer for supervised models
Args:
model (`torch.nn.Module`): the model to train
optimizer (`torch.optim.Optimizer`): the optimizer to use
loss_fn (torch.nn loss function): the loss function to use
device (str, optional): device type specification (default: None).
Applies to both model and batches.
Returns:
Engine: a trainer engine with supervised update function
"""
if device:
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model.to(device)
def _update(engine, batch):
model.train()
optimizer.zero_grad()
img, target = batch
img = img.to(device) if torch.cuda.device_count() >= 1 else img
target = target.to(device) if torch.cuda.device_count() >= 1 else target
score, feat, x_,cls_score_ = model(img)
loss = loss_fn(score, feat, x_,cls_score_, target)
loss.backward()
optimizer.step()
# compute acc
acc = (score.max(1)[1] == target).float().mean()
return loss.item(), acc.item()
return Engine(_update)
def do_train(
cfg,
model,
train_loader,
val_loader,
optimizer,
scheduler,
loss_fn,
num_query,
start_epoch
):
log_period = cfg.SOLVER.LOG_PERIOD
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
eval_period = cfg.SOLVER.EVAL_PERIOD
output_dir = cfg.OUTPUT_DIR
device = cfg.MODEL.DEVICE
epochs = cfg.SOLVER.MAX_EPOCHS
logger = logging.getLogger("reid_baseline.train")
logger.info("Start training")
trainer = create_supervised_trainer(model, optimizer, loss_fn, device=device)
evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device=device)
checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, checkpoint_period, n_saved=10, require_empty=False)
timer = Timer(average=True)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {'model': model,
'optimizer': optimizer})
timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,
pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)
# average metric to attach on trainer
RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'avg_loss')
RunningAverage(output_transform=lambda x: x[1]).attach(trainer, 'avg_acc')
@trainer.on(Events.STARTED)
def start_training(engine):
engine.state.epoch = start_epoch
@trainer.on(Events.EPOCH_STARTED)
def adjust_learning_rate(engine):
scheduler.step()
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
global ITER
ITER += 1
if ITER % log_period == 0:
logger.info("Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}"
.format(engine.state.epoch, ITER, len(train_loader),
engine.state.metrics['avg_loss'], engine.state.metrics['avg_acc'],
scheduler.get_lr()[0]))
if len(train_loader) == ITER:
ITER = 0
# adding handlers using `trainer.on` decorator API
@trainer.on(Events.EPOCH_COMPLETED)
def print_times(engine):
logger.info('Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]'
.format(engine.state.epoch, timer.value() * timer.step_count,
train_loader.batch_size / timer.value()))
logger.info('-' * 10)
timer.reset()
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
if engine.state.epoch % eval_period == 0:
evaluator.run(val_loader)
cmc, mAP = evaluator.state.metrics['r1_mAP']
logger.info("Validation Results - Epoch: {}".format(engine.state.epoch))
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
trainer.run(train_loader, max_epochs=epochs)
| 35.633588
| 147
| 0.627035
|
794bd7ed69e1e978e635f64a2d7412ba530be4c8
| 4,375
|
py
|
Python
|
setup.py
|
PRITI1999/klever
|
ac80edf4301c15f6b63e35837f4ffbf7e3e68809
|
[
"Apache-2.0"
] | 1
|
2021-01-09T08:44:37.000Z
|
2021-01-09T08:44:37.000Z
|
setup.py
|
Abhik1998/klever
|
827bbd31b29e213bf74cb1d1b158153e62a2933e
|
[
"Apache-2.0"
] | 3
|
2021-03-19T09:15:16.000Z
|
2021-09-22T19:24:40.000Z
|
setup.py
|
Abhik1998/klever
|
827bbd31b29e213bf74cb1d1b158153e62a2933e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
VERSION = '3.0'
def get_fallback_version():
if os.path.isfile('version'):
with open('version') as fp:
return fp.read()
return VERSION
def package_files(package_directory):
paths = []
for (root, _, filenames) in os.walk(package_directory):
for filename in filenames:
path = os.path.relpath(
os.path.join(root, filename), start=package_directory
)
paths.append(path)
return paths
setuptools.setup(
name="klever",
use_scm_version={'fallback_version': get_fallback_version()},
author="ISP RAS",
author_email="ldv-project@linuxtesting.org",
url="http://forge.ispras.ru/projects/klever",
license="LICENSE",
description="Klever is a software verification framework",
long_description=open("README.md", encoding="utf8").read(),
python_requires=">=3.7",
packages=["klever"],
package_data={"klever": package_files("klever")},
entry_points={
"console_scripts": [
"klever-core=klever.core.__main__:main",
"klever-client-controller=klever.scheduler.main:client_controller",
"klever-debug-scheduler=klever.scheduler.main:debug_scheduler",
"klever-native-scheduler=klever.scheduler.main:native_scheduler",
"klever-scheduler-client=klever.scheduler.main:scheduler_client",
"klever-verifiercloud-scheduler=klever.scheduler.main:verifiercloud_scheduler",
"klever-node-check=klever.scheduler.controller.checks.node:main",
"klever-resources-check=klever.scheduler.controller.checks.resources:main",
"klever-schedulers-check=klever.scheduler.controller.checks.schedulers:main",
"klever-build=klever.cli.klever_build:klever_build",
"klever-download-job=klever.cli.cli:download_job",
"klever-download-marks=klever.cli.cli:download_marks",
"klever-download-progress=klever.cli.cli:download_progress",
"klever-download-results=klever.cli.cli:download_results",
"klever-start-preset-solution=klever.cli.cli:start_preset_solution",
"klever-start-solution=klever.cli.cli:start_solution",
"klever-update-preset-mark=klever.cli.cli:update_preset_mark",
"klever-update-job=klever.cli.cli:upload_job",
"klever-deploy-local=klever.deploys.local:main",
"klever-deploy-openstack=klever.deploys.openstack:main",
]
},
install_requires=[
"Django==3.0.6",
"BenchExec==1.18",
"clade==3.2.7",
"psycopg2",
"graphviz",
"celery",
"django_celery_results",
"djangorestframework",
"django-compressor",
"django-mptt",
"gunicorn",
"pika",
"python-slugify",
"pytz",
"jinja2",
"ply",
"pygments",
"requests",
"setuptools_scm",
"sortedcontainers",
"consulate"
],
extras_require={
"strict": open("requirements.txt", encoding="utf8").read().splitlines(),
"docs": ["sphinx", "sphinx_rtd_theme"],
"openstack": [
"python-novaclient",
"python-neutronclient",
"python-glanceclient",
"python-cinderclient",
"keystoneauth1",
"paramiko",
"pycryptodome"
]
},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
],
)
| 35.282258
| 91
| 0.636343
|
794bd8a38379e263664980615695cfce15eb9c0d
| 6,837
|
py
|
Python
|
config/settings/production.py
|
studebacon/Ghostwriter
|
1cefcaa4859707ee11b2c3617bc03f8b3b74f57d
|
[
"BSD-3-Clause"
] | null | null | null |
config/settings/production.py
|
studebacon/Ghostwriter
|
1cefcaa4859707ee11b2c3617bc03f8b3b74f57d
|
[
"BSD-3-Clause"
] | null | null | null |
config/settings/production.py
|
studebacon/Ghostwriter
|
1cefcaa4859707ee11b2c3617bc03f8b3b74f57d
|
[
"BSD-3-Clause"
] | null | null | null |
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list(
"DJANGO_ALLOWED_HOSTS", default=["ghostwriter.local", "localhost"]
)
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# MEDIA
# ------------------------------------------------------------------------------
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="Ghostwriter <noreply@specterops.io>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default="[Ghostwriter]")
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAILGUN_API_URL", default="https://api.mailgun.net/v3"),
}
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED
COMPRESS_ENABLED = env.bool("COMPRESS_ENABLED", default=True)
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_URL
COMPRESS_URL = STATIC_URL # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ["collectfast"] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
COLLECTFAST_STRATEGY = "collectfast.strategies.filesystem.FileSystemStrategy"
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console", "mail_admins"],
"propagate": True,
},
},
}
# Your stuff...
# ------------------------------------------------------------------------------
| 41.436364
| 100
| 0.592658
|
794bd938918a29d1f907d62493c0d878c0541c2c
| 5,332
|
py
|
Python
|
argparsetree/cmd.py
|
wildfish/argparsetree
|
424fb13847c4788cad7084233d008d14e17b901c
|
[
"MIT"
] | 1
|
2019-02-22T08:42:33.000Z
|
2019-02-22T08:42:33.000Z
|
argparsetree/cmd.py
|
wildfish/argparsetree
|
424fb13847c4788cad7084233d008d14e17b901c
|
[
"MIT"
] | null | null | null |
argparsetree/cmd.py
|
wildfish/argparsetree
|
424fb13847c4788cad7084233d008d14e17b901c
|
[
"MIT"
] | 2
|
2018-11-14T20:17:46.000Z
|
2020-11-11T14:47:59.000Z
|
from __future__ import print_function
from argparse import ArgumentParser, HelpFormatter
import sys
class BaseCommand(object):
"""
The base command object
:var description: The brief description of the command
:var sub_commands: A dictionary mapping names to sub commands. Each value should be a class
inheriting from ``BaseCommand``. If not set the first sentence of the docstring is used.
:var help: The full help text to display. If not set the docstring is used.
:var arg_parse_class: The class to use as the root argument parser (should extend or implement ``argparse.ArgumanetParser``)
:var formatter_class: The class to use for formatting help text
"""
description = None
help = None
sub_commands = {}
arg_parse_class = ArgumentParser
formatter_class = HelpFormatter
def __init__(self, name=None, argv=None):
"""
Creates the command
:param name: The name the command is registered to
:param argv: List of argument values. If ``None``, ``sys.argv[1:]`` is used.
"""
self.name = name
self._arg_parser = None
self.argv = argv if argv is not None else sys.argv[1:]
@property
def sub_parser_dest_name(self):
"""
The name of the argument the name of the sub command will be stored in
"""
if self.name:
return u'{0}__sub_command'.format(self.name)
return 'sub_command'
@property
def arg_parser(self):
if not self._arg_parser:
self._arg_parser = self.get_root_argparser()
self.add_args(self._arg_parser)
self.register_sub_commands(self._arg_parser)
return self._arg_parser
def parse_args(self):
"""
Parses the command line arguments
:return: The arguments taken from the command line
"""
return self.arg_parser.parse_args(self.argv)
def add_args(self, parser):
"""
Adds arguments to the argument parser. This is used to modify which arguments are processed by the command.
For a full description of the argument parser see https://docs.python.org/3/library/argparse.html.
:param parser: The argument parser object
"""
pass
def register_sub_commands(self, parser):
"""
Add any sub commands to the argument parser.
:param parser: The argument parser object
"""
sub_commands = self.get_sub_commands()
if sub_commands:
sub_parsers = parser.add_subparsers(dest=self.sub_parser_dest_name)
for name, cls in sub_commands.items():
cmd = cls(name)
sub_parser = sub_parsers.add_parser(name, help=name, description=cmd.get_help(), formatter_class=cmd.get_formatter_class())
cmd.add_args(sub_parser)
cmd.register_sub_commands(sub_parser)
def get_root_argparser(self):
"""
Gets the root argument parser object.
"""
return self.arg_parse_class(description=self.get_help(), formatter_class=self.get_formatter_class())
def get_sub_commands(self):
"""
Gets a dictionary mapping names to sub commands. Values should be classes inheriting from Base.
:return: The list of sub commands.
"""
return self.sub_commands
def get_description(self):
"""
Gets the description of the command. If its not supplied the first sentence of the doc string is used.
"""
if self.description:
return self.description
elif self.__doc__ and self.__doc__.strip():
return self.__doc__.strip().split('.')[0] + '.'
else:
return ''
def get_help(self):
"""
Gets the help text for the command. If its not supplied the doc string is used.
"""
if self.help:
return self.help
elif self.__doc__ and self.__doc__.strip():
return self.__doc__.strip()
else:
return ''
def get_formatter_class(self):
"""
Gets the formatter class for formatting help text
"""
return self.formatter_class
def action(self, args):
"""
Performs the action of the command.
This should be implemented by sub classes.
:param args: The arguments parsed from parse_args
:return: The status code of the action (0 on success)
"""
self.arg_parser.print_help()
return 1
def run(self, args=None):
"""
Runs the command passing in the parsed arguments.
:param args: The arguments to run the command with. If ``None`` the arguments
are gathered from the argument parser. This is automatically set when calling
sub commands and in most cases should not be set for the root command.
:return: The status code of the action (0 on success)
"""
args = args or self.parse_args()
sub_command_name = getattr(args, self.sub_parser_dest_name, None)
if sub_command_name:
sub_commands = self.get_sub_commands()
cmd_cls = sub_commands[sub_command_name]
return cmd_cls(sub_command_name).run(args)
return self.action(args) or 0
| 32.91358
| 139
| 0.631095
|
794bd95504035e63e91b678d036eab419c6fab07
| 9,930
|
py
|
Python
|
utils/build/configure.py
|
espe-org/hermes
|
e2e00c6177e1b1feb7519e9b140f3b46ca111f88
|
[
"MIT"
] | null | null | null |
utils/build/configure.py
|
espe-org/hermes
|
e2e00c6177e1b1feb7519e9b140f3b46ca111f88
|
[
"MIT"
] | null | null | null |
utils/build/configure.py
|
espe-org/hermes
|
e2e00c6177e1b1feb7519e9b140f3b46ca111f88
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import platform
import shutil
import subprocess
import sys
import time
import warnings
def build_dir_suffix(args):
suffices = []
if args.enable_asan:
suffices += ["asan"]
if args.enable_ubsan:
suffices += ["ubsan"]
if args.enable_tsan:
suffices += ["tsan"]
if args.distribute:
suffices += ["release"]
if args.is_32_bit:
suffices += ["32"]
if args.wasm:
suffices += ["wasm", args.emscripten_platform]
return ("_" + "_".join(suffices)) if suffices else ""
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("hermes_build_dir", type=str, nargs="?", default=None)
parser.add_argument(
"--build-system",
type=str,
dest="build_system",
default="Ninja",
help="Generator to pass into CMake",
)
parser.add_argument(
"--cmake-flags",
type=str,
dest="cmake_flags",
default="",
help="Additional flags to pass to CMake",
)
parser.add_argument(
"--build-type",
type=str,
dest="build_type",
choices=["MinSizeRel", "Debug"],
default=None,
help="Optimization level of build",
)
parser.add_argument(
"--http-proxy",
type=str,
dest="http_proxy",
default=os.environ.get("HTTP_PROXY", ""),
)
parser.add_argument("--distribute", action="store_true")
parser.add_argument("--32-bit", dest="is_32_bit", action="store_true")
parser.add_argument("--enable-asan", dest="enable_asan", action="store_true")
parser.add_argument("--enable-ubsan", dest="enable_ubsan", action="store_true")
parser.add_argument("--enable-tsan", dest="enable_tsan", action="store_true")
parser.add_argument(
"--enable-trace-pc-guard", dest="enable_trace_pc_guard", action="store_true"
)
parser.add_argument("--icu", type=str, dest="icu_root", default="")
parser.add_argument("--fbsource", type=str, dest="fbsource_dir", default="")
parser.add_argument("--opcode-stats", dest="opcode_stats", action="store_true")
parser.add_argument(
"--basic-block-profiler", dest="basic_block_profiler", action="store_true"
)
parser.add_argument(
"--warnings-as-errors", dest="warnings_as_errors", action="store_true"
)
parser.add_argument("--static-link", dest="static_link", action="store_true")
parser.add_argument(
"--wasm",
action="store_true",
help="Build Hermes as WebAssembly instead of a native binary",
)
parser.add_argument(
"--emscripten-root",
dest="emscripten_root",
help="Path to the root of emscripten. Use emsdk to download",
)
parser.add_argument(
"--emscripten-platform",
dest="emscripten_platform",
choices=("upstream", "fastcomp"),
default="fastcomp",
help="Use either the upstream emscripten backend based on LLVM or the "
"fastcomp backend",
)
args = parser.parse_args()
if args.icu_root:
args.icu_root = os.path.realpath(args.icu_root)
if args.fbsource_dir:
args.fbsource_dir = os.path.realpath(args.fbsource_dir)
if args.emscripten_root:
args.emscripten_root = os.path.realpath(args.emscripten_root)
if args.wasm:
# Check that if wasm is specified, that emscripten_root is also specified.
if not args.emscripten_root:
raise ValueError("WASM build requested, but emscripten-root not given")
if not os.path.exists(args.emscripten_root):
raise ValueError(
"WASM build requested, but emscripten-root doesn't exist: "
+ args.emscripten_root
)
if not args.build_type:
if args.distribute:
# WASM doesn't need to be built to be small.
args.build_type = "Release" if args.wasm else "MinSizeRel"
else:
args.build_type = "Debug"
if not args.hermes_build_dir:
args.hermes_build_dir = "build" + build_dir_suffix(args)
args.hermes_build_dir = os.path.realpath(args.hermes_build_dir)
# Guess the ICU directory based on platform.
if not args.icu_root and platform.system() == "Linux":
icu_prefs = [
"/mnt/gvfs/third-party2/icu/4e8f3e00e1c7d7315fd006903a9ff7f073dfc02b/53.1/gcc-5-glibc-2.23/9bc6787",
"/mnt/gvfs/third-party2/icu/4e8f3e00e1c7d7315fd006903a9ff7f073dfc02b/53.1/gcc-4.8.1-glibc-2.17/c3f970a/",
]
for pref in icu_prefs:
if os.path.exists(pref):
args.icu_root = pref
break
return args
def run_command(cmd, **kwargs):
print("+ " + " ".join(cmd))
retries = kwargs.pop("retries", 0)
seconds_between_retries = kwargs.pop("seconds_between_retries", 10)
while True:
try:
return subprocess.check_call(
cmd, stdout=sys.stdout, stderr=sys.stderr, **kwargs
)
except subprocess.CalledProcessError as e:
if retries == 0:
raise
retries -= 1
print("Command failed, retrying soon: " + str(e))
time.sleep(seconds_between_retries)
print("Retrying...")
def which(cmd):
if sys.version_info >= (3, 3):
# On Python 3.3 and above, use shutil.which for a quick error message.
resolved = shutil.which(cmd)
if not resolved:
raise Exception("{} not found on PATH".format(cmd))
return resolved
else:
# Manually check PATH
for p in os.environ["PATH"].split(os.path.pathsep):
p = os.path.join(p, cmd)
if "PATHEXT" in os.environ:
# try out adding each extension to the PATH as well
for ext in os.environ["PATHEXT"].split(os.path.pathsep):
# Add the extension.
p_and_extension = p + ext
if os.path.exists(p_and_extension) and os.access(
p_and_extension, os.X_OK
):
return p_and_extension
else:
if os.path.isfile(p) and os.access(p, os.X_OK):
return p
raise Exception("{} not found on PATH".format(cmd))
def python_executable_flag():
if sys.executable and sys.version_info.major < 3:
warnings.warn(
"Configuring CMake with Python2. "
"Python3 is recommended for the configuration of the Hermes build"
)
return ["-DPYTHON_EXECUTABLE={}".format(sys.executable or which("python"))]
def main():
args = parse_args()
print(
"Building hermes using {} into {}".format(
args.build_system, args.hermes_build_dir + os.path.sep
)
)
try:
os.mkdir(args.hermes_build_dir)
except OSError:
# It's alright if the file already exists.
pass
cmake_flags = (
args.cmake_flags.split()
+ python_executable_flag()
+ ["-DCMAKE_BUILD_TYPE=" + args.build_type]
)
if (
platform.system() == "Windows"
and platform.machine().endswith("64")
and "Visual Studio" in args.build_system
):
cmake_flags += ["-Ax64"]
if args.opcode_stats:
cmake_flags += ["-DHERMESVM_PROFILER_OPCODE=ON"]
if args.basic_block_profiler:
cmake_flags += ["-DHERMESVM_PROFILER_BB=ON"]
if args.warnings_as_errors:
cmake_flags += ["-DHERMES_ENABLE_WERROR=ON"]
if args.static_link:
cmake_flags += ["-DHERMES_STATIC_LINK=ON"]
if args.enable_asan:
cmake_flags += ["-DHERMES_ENABLE_ADDRESS_SANITIZER=ON"]
if args.enable_ubsan:
cmake_flags += ["-DHERMES_ENABLE_UNDEFINED_BEHAVIOR_SANITIZER=ON"]
if args.enable_tsan:
cmake_flags += ["-DHERMES_ENABLE_THREAD_SANITIZER=ON"]
if args.enable_trace_pc_guard:
cmake_flags += ["-DHERMES_ENABLE_TRACE_PC_GUARD=ON"]
if args.fbsource_dir:
cmake_flags += ["-DFBSOURCE_DIR=" + args.fbsource_dir]
if args.wasm:
cmake_flags += [
"-DCMAKE_TOOLCHAIN_FILE={}".format(
os.path.join(
args.emscripten_root,
"cmake",
"Modules",
"Platform",
"Emscripten.cmake",
)
),
"-DCMAKE_EXE_LINKER_FLAGS="
"-s NODERAWFS=1 -s WASM=1 -s ALLOW_MEMORY_GROWTH=1",
"-DEMSCRIPTEN_FASTCOMP=" + str(int(args.emscripten_platform == "fastcomp")),
]
if args.icu_root:
cmake_flags += ["-DICU_ROOT=" + args.icu_root]
elif os.environ.get("SANDCASTLE") and platform.system() not in (
"macos",
"Darwin",
"Windows",
):
raise Exception("No ICU path provided on sandcastle")
print("CMake flags: {}".format(" ".join(cmake_flags)))
hermes_src_dir = os.path.realpath(__file__)
# The hermes directory is three directories up from this file.
# If this file is moved, make sure to update this.
for _ in range(3):
hermes_src_dir = os.path.dirname(hermes_src_dir)
cmake = which("cmake")
# Print the CMake version to assist in diagnosing issues.
print(
"CMake version:\n{}".format(
subprocess.check_output([cmake, "--version"], stderr=subprocess.STDOUT)
)
)
run_command(
[cmake, hermes_src_dir, "-G", args.build_system] + cmake_flags,
env=os.environ,
cwd=args.hermes_build_dir,
)
if __name__ == "__main__":
main()
| 34.241379
| 117
| 0.606244
|
794bd984de5b6ee786040c91c92b83dc732bb341
| 17,822
|
py
|
Python
|
include/ClientGUIDialogsManage.py
|
sorashi/hydrus
|
0544a75d2117904b42e935d264ae35ded5cbf36a
|
[
"WTFPL"
] | null | null | null |
include/ClientGUIDialogsManage.py
|
sorashi/hydrus
|
0544a75d2117904b42e935d264ae35ded5cbf36a
|
[
"WTFPL"
] | null | null | null |
include/ClientGUIDialogsManage.py
|
sorashi/hydrus
|
0544a75d2117904b42e935d264ae35ded5cbf36a
|
[
"WTFPL"
] | null | null | null |
from . import ClientConstants as CC
from . import ClientGUICommon
from . import ClientGUIListCtrl
from . import ClientGUIDialogs
from . import ClientGUIShortcuts
from . import ClientRatings
from . import HydrusConstants as HC
from . import HydrusData
from . import HydrusExceptions
from . import HydrusGlobals as HG
from . import HydrusNATPunch
from . import HydrusNetwork
import itertools
import os
import traceback
from . import QtPorting as QP
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from qtpy import QtGui as QG
from . import QtPorting as QP
# Option Enums
class DialogManageRatings( ClientGUIDialogs.Dialog ):
def __init__( self, parent, media ):
self._hashes = set()
for m in media:
self._hashes.update( m.GetHashes() )
ClientGUIDialogs.Dialog.__init__( self, parent, 'manage ratings for ' + HydrusData.ToHumanInt( len( self._hashes ) ) + ' files', position = 'topleft' )
#
like_services = HG.client_controller.services_manager.GetServices( ( HC.LOCAL_RATING_LIKE, ) )
numerical_services = HG.client_controller.services_manager.GetServices( ( HC.LOCAL_RATING_NUMERICAL, ) )
self._panels = []
if len( like_services ) > 0:
self._panels.append( self._LikePanel( self, like_services, media ) )
if len( numerical_services ) > 0:
self._panels.append( self._NumericalPanel( self, numerical_services, media ) )
self._apply = QW.QPushButton( 'apply', self )
self._apply.clicked.connect( self.EventOK )
self._apply.setObjectName( 'HydrusAccept' )
self._cancel = QW.QPushButton( 'cancel', self )
self._cancel.clicked.connect( self.reject )
self._cancel.setObjectName( 'HydrusCancel' )
#
buttonbox = QP.HBoxLayout()
QP.AddToLayout( buttonbox, self._apply, CC.FLAGS_VCENTER )
QP.AddToLayout( buttonbox, self._cancel, CC.FLAGS_VCENTER )
vbox = QP.VBoxLayout()
for panel in self._panels:
QP.AddToLayout( vbox, panel, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, buttonbox, CC.FLAGS_BUTTON_SIZER )
self.setLayout( vbox )
size_hint = self.sizeHint()
QP.SetInitialSize( self, size_hint )
#
self._my_shortcut_handler = ClientGUIShortcuts.ShortcutsHandler( self, [ 'global', 'media' ] )
def EventOK( self ):
try:
service_keys_to_content_updates = {}
for panel in self._panels:
sub_service_keys_to_content_updates = panel.GetContentUpdates()
service_keys_to_content_updates.update( sub_service_keys_to_content_updates )
if len( service_keys_to_content_updates ) > 0:
HG.client_controller.Write( 'content_updates', service_keys_to_content_updates )
finally:
self.done( QW.QDialog.Accepted )
def ProcessApplicationCommand( self, command ):
command_processed = True
command_type = command.GetCommandType()
data = command.GetData()
if command_type == CC.APPLICATION_COMMAND_TYPE_SIMPLE:
action = data
if action == 'manage_file_ratings':
self.EventOK()
else:
command_processed = False
else:
command_processed = False
return command_processed
class _LikePanel( QW.QWidget ):
def __init__( self, parent, services, media ):
QW.QWidget.__init__( self, parent )
self._services = services
self._media = media
self._service_keys_to_controls = {}
self._service_keys_to_original_ratings_states = {}
rows = []
for service in self._services:
name = service.GetName()
service_key = service.GetServiceKey()
rating_state = ClientRatings.GetLikeStateFromMedia( self._media, service_key )
control = ClientGUICommon.RatingLikeDialog( self, service_key )
control.SetRatingState( rating_state )
self._service_keys_to_controls[ service_key ] = control
self._service_keys_to_original_ratings_states[ service_key ] = rating_state
rows.append( ( name + ': ', control ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows, expand_text = True )
self.setLayout( gridbox )
def GetContentUpdates( self ):
service_keys_to_content_updates = {}
hashes = { hash for hash in itertools.chain.from_iterable( ( media.GetHashes() for media in self._media ) ) }
for ( service_key, control ) in list(self._service_keys_to_controls.items()):
original_rating_state = self._service_keys_to_original_ratings_states[ service_key ]
rating_state = control.GetRatingState()
if rating_state != original_rating_state:
if rating_state == ClientRatings.LIKE: rating = 1
elif rating_state == ClientRatings.DISLIKE: rating = 0
else: rating = None
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_RATINGS, HC.CONTENT_UPDATE_ADD, ( rating, hashes ) )
service_keys_to_content_updates[ service_key ] = ( content_update, )
return service_keys_to_content_updates
class _NumericalPanel( QW.QWidget ):
def __init__( self, parent, services, media ):
QW.QWidget.__init__( self, parent )
self._services = services
self._media = media
self._service_keys_to_controls = {}
self._service_keys_to_original_ratings_states = {}
rows = []
for service in self._services:
name = service.GetName()
service_key = service.GetServiceKey()
( rating_state, rating ) = ClientRatings.GetNumericalStateFromMedia( self._media, service_key )
control = ClientGUICommon.RatingNumericalDialog( self, service_key )
if rating_state != ClientRatings.SET:
control.SetRatingState( rating_state )
else:
control.SetRating( rating )
self._service_keys_to_controls[ service_key ] = control
self._service_keys_to_original_ratings_states[ service_key ] = ( rating_state, rating )
rows.append( ( name + ': ', control ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows, expand_text = True )
self.setLayout( gridbox )
def GetContentUpdates( self ):
service_keys_to_content_updates = {}
hashes = { hash for hash in itertools.chain.from_iterable( ( media.GetHashes() for media in self._media ) ) }
for ( service_key, control ) in list(self._service_keys_to_controls.items()):
( original_rating_state, original_rating ) = self._service_keys_to_original_ratings_states[ service_key ]
rating_state = control.GetRatingState()
if rating_state == ClientRatings.NULL:
rating = None
else:
rating = control.GetRating()
if rating != original_rating:
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_RATINGS, HC.CONTENT_UPDATE_ADD, ( rating, hashes ) )
service_keys_to_content_updates[ service_key ] = ( content_update, )
return service_keys_to_content_updates
class DialogManageUPnP( ClientGUIDialogs.Dialog ):
def __init__( self, parent ):
title = 'manage local upnp'
ClientGUIDialogs.Dialog.__init__( self, parent, title )
self._status_st = ClientGUICommon.BetterStaticText( self )
listctrl_panel = ClientGUIListCtrl.BetterListCtrlPanel( self )
columns = [ ( 'description', -1 ), ( 'internal ip', 17 ), ( 'internal port', 7 ), ( 'external port', 7 ), ( 'prototcol', 5 ), ( 'lease', 12 ) ]
self._mappings_list_ctrl = ClientGUIListCtrl.BetterListCtrl( listctrl_panel, 'manage_upnp_mappings', 12, 36, columns, self._ConvertDataToListCtrlTuples, delete_key_callback = self._Remove, activation_callback = self._Edit )
listctrl_panel.SetListCtrl( self._mappings_list_ctrl )
listctrl_panel.AddButton( 'add custom mapping', self._Add )
listctrl_panel.AddButton( 'edit mapping', self._Edit, enabled_only_on_selection = True )
listctrl_panel.AddButton( 'remove mapping', self._Remove, enabled_only_on_selection = True )
self._ok = QW.QPushButton( 'ok', self )
self._ok.clicked.connect( self.EventOK )
self._ok.setObjectName( 'HydrusAccept' )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._status_st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, listctrl_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._ok, CC.FLAGS_LONE_BUTTON )
self.setLayout( vbox )
size_hint = self.sizeHint()
size_hint.setWidth( max( size_hint.width(), 760 ) )
QP.SetInitialSize( self, size_hint )
#
self._mappings = []
self._mappings_list_ctrl.Sort( 0 )
self._started_external_ip_fetch = False
self._RefreshMappings()
def _Add( self ):
do_refresh = False
external_port = HC.DEFAULT_SERVICE_PORT
protocol = 'TCP'
internal_port = HC.DEFAULT_SERVICE_PORT
description = 'hydrus service'
duration = 0
with ClientGUIDialogs.DialogInputUPnPMapping( self, external_port, protocol, internal_port, description, duration ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
( external_port, protocol, internal_port, description, duration ) = dlg.GetInfo()
for ( existing_description, existing_internal_ip, existing_internal_port, existing_external_port, existing_protocol, existing_lease ) in self._mappings:
if external_port == existing_external_port and protocol == existing_protocol:
QW.QMessageBox.critical( self, 'Error', 'That external port already exists!' )
return
internal_client = HydrusNATPunch.GetLocalIP()
HydrusNATPunch.AddUPnPMapping( internal_client, internal_port, external_port, protocol, description, duration = duration )
do_refresh = True
if do_refresh:
self._RefreshMappings()
def _ConvertDataToListCtrlTuples( self, mapping ):
( description, internal_ip, internal_port, external_port, protocol, duration ) = mapping
if duration == 0:
pretty_duration = 'indefinite'
else:
pretty_duration = HydrusData.TimeDeltaToPrettyTimeDelta( duration )
display_tuple = ( description, internal_ip, str( internal_port ), str( external_port ), protocol, pretty_duration )
sort_tuple = mapping
return ( display_tuple, sort_tuple )
def _Edit( self ):
do_refresh = False
selected_mappings = self._mappings_list_ctrl.GetData( only_selected = True )
for selected_mapping in selected_mappings:
( description, internal_ip, internal_port, external_port, protocol, duration ) = selected_mapping
with ClientGUIDialogs.DialogInputUPnPMapping( self, external_port, protocol, internal_port, description, duration ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
( external_port, protocol, internal_port, description, duration ) = dlg.GetInfo()
HydrusNATPunch.RemoveUPnPMapping( external_port, protocol )
internal_client = HydrusNATPunch.GetLocalIP()
HydrusNATPunch.AddUPnPMapping( internal_client, internal_port, external_port, protocol, description, duration = duration )
do_refresh = True
else:
break
if do_refresh:
self._RefreshMappings()
def _RefreshExternalIP( self ):
def qt_code( external_ip_text ):
if not self or not QP.isValid( self ):
return
self._status_st.setText( external_ip_text )
def THREADdo_it():
try:
external_ip = HydrusNATPunch.GetExternalIP()
external_ip_text = 'External IP: {}'.format( external_ip )
except Exception as e:
external_ip_text = 'Error finding external IP: ' + str( e )
QP.CallAfter( qt_code, external_ip_text )
self._status_st.setText( 'Loading external IP\u2026' )
HG.client_controller.CallToThread( THREADdo_it )
def _RefreshMappings( self ):
def qt_code( mappings ):
if not self or not QP.isValid( self ):
return
self._mappings = mappings
self._mappings_list_ctrl.SetData( self._mappings )
self._status_st.setText( '' )
if not self._started_external_ip_fetch:
self._started_external_ip_fetch = True
self._RefreshExternalIP()
def THREADdo_it():
try:
mappings = HydrusNATPunch.GetUPnPMappings()
except Exception as e:
HydrusData.ShowException( e )
QP.CallAfter( QW.QMessageBox.critical, self, 'Error', 'Could not load mappings:'+os.linesep*2+str(e) )
return
QP.CallAfter( qt_code, mappings )
self._status_st.setText( 'Refreshing mappings--please wait\u2026' )
self._mappings_list_ctrl.SetData( [] )
HG.client_controller.CallToThread( THREADdo_it )
def _Remove( self ):
do_refresh = False
selected_mappings = self._mappings_list_ctrl.GetData( only_selected = True )
for selected_mapping in selected_mappings:
( description, internal_ip, internal_port, external_port, protocol, duration ) = selected_mapping
HydrusNATPunch.RemoveUPnPMapping( external_port, protocol )
do_refresh = True
if do_refresh:
self._RefreshMappings()
def EventOK( self ):
self.done( QW.QDialog.Accepted )
| 32.942699
| 231
| 0.512681
|
794bda3b9cb64f08cad298202c85e44ef7dc70a6
| 415
|
py
|
Python
|
5KYU/flatten/test.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 4
|
2021-07-17T22:48:03.000Z
|
2022-03-25T14:10:58.000Z
|
5KYU/flatten/test.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | null | null | null |
5KYU/flatten/test.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 3
|
2021-06-14T14:18:16.000Z
|
2022-03-16T06:02:02.000Z
|
import unittest
import main
class TestFlatten(unittest.TestCase):
def test_one(self):
""" Should return flattens arguments into a single array """
self.assertEqual([1, 2, 3, 4, 5, 6, 7], main.flatten(1, [2, 3], 4, 5, [6, [7]]))
self.assertEqual(['a', 'b', 2, 3, None, 4, 'c'], main.flatten('a', ['b', 2], 3, None, [[4], ['c']]))
if __name__ == '__main__':
unittest.main()
| 31.923077
| 108
| 0.559036
|
794bda9c5b9c485f61a52d97d6da461e4f46c26c
| 113
|
py
|
Python
|
jupyterblog/attention/attention.py
|
vimarshc/jupyterblog
|
433fb170cfbc73a3e98b22b25bc7177af04f32f9
|
[
"Apache-2.0"
] | null | null | null |
jupyterblog/attention/attention.py
|
vimarshc/jupyterblog
|
433fb170cfbc73a3e98b22b25bc7177af04f32f9
|
[
"Apache-2.0"
] | 2
|
2021-05-20T21:55:08.000Z
|
2021-09-28T05:36:06.000Z
|
jupyterblog/attention/attention.py
|
vimarshc/jupyterblog
|
433fb170cfbc73a3e98b22b25bc7177af04f32f9
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: attention.attention.ipynb (unless otherwise specified).
__all__ = []
| 37.666667
| 99
| 0.761062
|
794bda9d18c9b1443b443c3d307a7b5fa028dc25
| 3,958
|
py
|
Python
|
sppas/sppas/src/dependencies/grako/rendering.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/sppas/src/dependencies/grako/rendering.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/sppas/src/dependencies/grako/rendering.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
The Renderer class provides the infrastructure for generating template-based
code. It's used by the .grammars module for parser generation.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
import string
from sppas.src.dependencies.grako.util import indent, isiter, strtype, trim, ustr
def render(item, join='', **fields):
"""Render the given item
"""
if item is None:
return ''
elif isinstance(item, strtype):
return item
elif isinstance(item, Renderer):
return item.render(join=join, **fields)
elif isiter(item):
return join.join(render(e, **fields) for e in iter(item) if e is not None)
elif isinstance(item, (int, float)):
return item
else:
return ustr(item)
class RenderingFormatter(string.Formatter):
def render(self, item, join='', **fields):
return render(item, join=join, **fields)
def format_field(self, value, spec):
if ':' not in spec:
return super(RenderingFormatter, self).format_field(
self.render(value),
spec
)
ind, sep, fmt = spec.split(':')
if sep == '\\n':
sep = '\n'
if not ind:
ind = 0
mult = 0
elif '*' in ind:
ind, mult = ind.split('*')
else:
mult = 4
ind = int(ind)
mult = int(mult)
if not fmt:
fmt = '%s'
if isiter(value):
return indent(sep.join(fmt % self.render(v) for v in value), ind, mult)
else:
return indent(fmt % self.render(value), ind, mult)
class Renderer(object):
"""Renders the fileds in the current object using a template
provided statically, on the constructor, or as a parameter
to render().
Fields with a leading underscore are not made available to
the template. Additional fields may be made available by
overriding render_fields().
"""
template = '{__class__}'
_counter = itertools.count()
_formatter = RenderingFormatter()
def __init__(self, template=None):
if template is not None:
self.template = template
@classmethod
def counter(cls):
return next(cls._counter)
@classmethod
def reset_counter(cls):
Renderer._counter = itertools.count()
@property
def formatter(self):
return self._formatter
@formatter.setter
def formatter(self, value):
self._formatter = value
def rend(self, item, join='', **fields):
"""A shortcut for self._formatter.render()
"""
return self._formatter.render(item, join=join, **fields)
def indent(self, item, ind=1, multiplier=4):
return indent(self.rend(item), indent=ind, multiplier=4)
def trim(self, item, tabwidth=4):
return trim(self.rend(item), tabwidth=tabwidth)
def render_fields(self, fields):
"""Pre-render fields before rendering the template.
"""
pass
def render(self, template=None, **fields):
fields.update(__class__=self.__class__.__name__)
fields.update({k: v for k, v in vars(self).items() if not k.startswith('_')})
override = self.render_fields(fields)
if override is not None:
template = override
elif template is None:
template = self.template
try:
return self._formatter.format(trim(template), **fields)
except KeyError:
# find the missing key
keys = (p[1] for p in self._formatter.parse(template))
for key in keys:
if key and key not in fields:
raise KeyError(key, type(self))
raise
def __str__(self):
return self.render()
def __repr__(self):
return str(self)
| 28.271429
| 85
| 0.588934
|
794bdb9caf14f16511fe6db9c60da260a0668c1e
| 2,349
|
py
|
Python
|
cms/tests/test_streamforms.py
|
DBonbon/pythoneatstail
|
3f9e8ddd07f891653c82aa396a96709914113f3a
|
[
"MIT"
] | 4
|
2021-06-18T10:26:24.000Z
|
2021-11-06T03:50:04.000Z
|
cms/tests/test_streamforms.py
|
DBonbon/pythoneatstail
|
3f9e8ddd07f891653c82aa396a96709914113f3a
|
[
"MIT"
] | 10
|
2020-06-03T01:48:21.000Z
|
2021-09-22T19:02:14.000Z
|
cms/tests/test_streamforms.py
|
DBonbon/pythoneatstail
|
3f9e8ddd07f891653c82aa396a96709914113f3a
|
[
"MIT"
] | 2
|
2020-12-06T08:49:36.000Z
|
2021-02-28T23:19:20.000Z
|
from django.test import TestCase
from ..wagtailstreamforms_fields import ReCaptchaField
from wagtailstreamforms.models import Form
import json
from ..models import AdvancedFormSetting
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http import QueryDict
from ..wagtailstreamforms_hooks import email_submission
from django.core import mail
class TestWagtailStreamforms(TestCase):
def test_recaptcha_field(self):
field = ReCaptchaField().get_formfield({})
self.assertTrue(field.required)
def test_form(self):
form = Form.objects.create(
title="Form",
template_name="streamforms/form_block.html",
slug="form",
fields=json.dumps(
[
{
"type": "singleline",
"value": {"label": "singleline", "required": True},
"id": "9c46e208-e53a-4562-81f6-3fb3f34520f2",
},
{
"type": "multifile",
"value": {"label": "multifile", "required": True},
"id": "91bac05f-754b-41a3-b038-ac7850e6f951",
},
]
),
)
return form
def test_send_email(self):
instance = self.test_form()
advancedformsetting = AdvancedFormSetting.objects.create(form=instance, to_address="mail@example.com")
data_dict = {
"singleline": 'text',
"form_id": instance.pk,
"form_reference": "some-ref",
}
uploadedfile = SimpleUploadedFile("file.mp4", b"file_content", content_type="video/mp4")
files_dict = QueryDict(mutable=True)
files_dict.update({"multifile": uploadedfile})
files_dict.update({"multifile": uploadedfile})
form_class = instance.get_form(data=data_dict, files=files_dict)
assert form_class.is_valid()
# Send message.
email_submission(instance, form_class)
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
# Verify that the subject of the first message is correct.
expected_subject = 'New Form Submission : %s' % instance.title
self.assertEqual(mail.outbox[0].subject, expected_subject)
| 35.059701
| 110
| 0.59685
|
794bdd5493e56574a7c73df526cf88841cd102bb
| 300
|
py
|
Python
|
RecoLuminosity/LumiProducer/python/lumiProducer_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
RecoLuminosity/LumiProducer/python/lumiProducer_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
RecoLuminosity/LumiProducer/python/lumiProducer_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
lumiProducer=cms.EDProducer("LumiProducer",
connect=cms.string(''),
lumiversion=cms.untracked.string(''),
ncacheEntries=cms.untracked.uint32(5)
)
| 37.5
| 65
| 0.496667
|
794bdd948a28fae4d7a03f992fdf18775dc790e7
| 4,549
|
py
|
Python
|
imperative/python/megengine/optimizer/adam.py
|
kxz18/MegEngine
|
88c1eedbd716805244b35bdda57c3cea5efe734d
|
[
"Apache-2.0"
] | 5,168
|
2020-03-19T06:10:04.000Z
|
2022-03-31T11:11:54.000Z
|
imperative/python/megengine/optimizer/adam.py
|
kxz18/MegEngine
|
88c1eedbd716805244b35bdda57c3cea5efe734d
|
[
"Apache-2.0"
] | 286
|
2020-03-25T01:36:23.000Z
|
2022-03-31T10:26:33.000Z
|
imperative/python/megengine/optimizer/adam.py
|
kxz18/MegEngine
|
88c1eedbd716805244b35bdda57c3cea5efe734d
|
[
"Apache-2.0"
] | 515
|
2020-03-19T06:10:05.000Z
|
2022-03-30T09:15:59.000Z
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
from typing import Iterable, Tuple, Union
from ..functional.inplace import _inplace_add_
from ..tensor import Parameter, tensor
from .optimizer import Optimizer
class Adam(Optimizer):
r"""Implements Adam algorithm proposed in `"Adam: A Method for Stochastic Optimization" <https://arxiv.org/abs/1412.6980>`_.
Args:
params: iterable of parameters to optimize or dicts defining
parameter groups.
lr: learning rate.
betas: coefficients used for computing running averages of gradient
and its square. Default: (0.9, 0.999)
eps: term added to the denominator to improve numerical stability. Default: 1e-8
weight_decay: weight decay (L2 penalty). Default: 0
"""
def __init__(
self,
params: Union[Iterable[Parameter], dict],
lr: float,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0.0,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, weight_decay=weight_decay, betas=betas, eps=eps)
super().__init__(params, defaults)
self._disable_type_convert = True
def _create_state(self, param_group):
for param in param_group["params"]:
self._add_state(param, "exp_avg")
self._add_state(param, "exp_avg_sq")
self._add_state(param, "step", initializer=0.0)
def _updates(self, param_group):
lr = param_group["lr"]
weight_decay = param_group["weight_decay"]
eps = param_group["eps"]
beta0, beta1 = param_group["betas"]
def make_scalar(val):
return tensor(val)
# since `conver_inputs` is disabled for param updates,
# scalar should be explicitly tansforred to tensor
_lr, _neg_lr = map(make_scalar, (lr, -lr))
_weight_decay = make_scalar(weight_decay)
_eps = make_scalar(eps)
_beta0, _beta1 = map(make_scalar, (beta0, beta1))
c1, c05 = map(make_scalar, (1.0, 0.5))
inplace_mode = int(os.getenv("MEGENGINE_INPLACE_UPDATE", "0"))
if inplace_mode:
# reduce device sync
c1_sub_beta0, c1_sub_beta1 = map(make_scalar, (1 - beta0, 1 - beta1))
for param in param_group["params"]:
if param.grad is None:
continue
grad = param.grad
if weight_decay != 0.0:
grad = grad + param * _weight_decay
states = self._state[param]
step, exp_avg, exp_avg_sq = (
states["step"],
states["exp_avg"],
states["exp_avg_sq"],
)
if inplace_mode:
_inplace_add_(step, c1, alpha=c1, beta=c1)
_inplace_add_(exp_avg, grad, alpha=_beta0, beta=c1_sub_beta0)
_inplace_add_(
exp_avg_sq, grad * grad, alpha=_beta1, beta=c1_sub_beta1,
)
delta = (exp_avg / (c1 - _beta0 ** step)) / (
(exp_avg_sq / (c1 - _beta1 ** step)) ** c05 + _eps
)
_inplace_add_(param, delta, alpha=c1, beta=_neg_lr)
continue
# step = step + c1
step += c1
# exp_avg = _beta0 * exp_avg + grad * (c1 - _beta0)
exp_avg *= _beta0
exp_avg += grad * (c1 - _beta0)
# exp_avg_sq = _beta1 * exp_avg_sq + (c1 - _beta1) * (grad * grad)
exp_avg_sq *= _beta1
exp_avg_sq += (c1 - _beta1) * (grad * grad)
delta = (exp_avg / (c1 - _beta0 ** step)) / (
(exp_avg_sq / (c1 - _beta1 ** step)) ** c05 + _eps
)
param -= _lr * delta
| 36.103175
| 128
| 0.575511
|
794bdf5fca17d80cfb7507f3c55c9dc077c6b56a
| 4,829
|
py
|
Python
|
core/platform/email/dev_mode_email_services.py
|
aasiffaizal/oppia
|
1a8634a435bec10f407e9f3c95f62bd467c5b5f7
|
[
"Apache-2.0"
] | null | null | null |
core/platform/email/dev_mode_email_services.py
|
aasiffaizal/oppia
|
1a8634a435bec10f407e9f3c95f62bd467c5b5f7
|
[
"Apache-2.0"
] | 1
|
2021-09-24T20:37:12.000Z
|
2021-09-24T21:27:54.000Z
|
core/platform/email/dev_mode_email_services.py
|
aasiffaizal/oppia
|
1a8634a435bec10f407e9f3c95f62bd467c5b5f7
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides email services api to log emails in DEV_MODE."""
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import textwrap
import python_utils
from typing import Dict, List, Optional, Union # isort:skip
def send_email_to_recipients(
sender_email: str,
recipient_emails: List[str],
subject: str,
plaintext_body: str,
html_body: str,
bcc: Optional[List[str]] = None,
reply_to: Optional[str] = None,
recipient_variables: Optional[
Dict[str, Dict[str, Union[str, float]]]] = None
) -> bool:
"""Prints information about sent emails to the terminal console, in order
to model sending an email in development mode.
Args:
sender_email: str. The email address of the sender. This should be in
the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS. Format must be utf-8.
recipient_emails: list(str). The email addresses of the recipients.
Format must be utf-8.
subject: str. The subject line of the email. Format must be utf-8.
plaintext_body: str. The plaintext body of the email. Format must
be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Format must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Format must
be utf-8.
reply_to: str|None. Optional argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name>
reply_id is the unique id of the sender. Format must be utf-8.
recipient_variables: dict|None. Optional argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables =
{"bob@example.com": {"first":"Bob", "id":1},
"alice@example.com": {"first":"Alice", "id":2}}
subject = 'Hey, %recipient.first%’
More info about this format at:
https://documentation.mailgun.com/en/
latest/user_manual.html#batch-sending
Returns:
bool. Whether the emails are "sent" successfully.
"""
# Show the first 3 emails in the recipient list.
recipient_email_list_str = ' '.join(
['%s' %
(recipient_email,) for recipient_email in recipient_emails[:3]])
if len(recipient_emails) > 3:
recipient_email_list_str += (
'... Total: %s emails.' % (
python_utils.UNICODE(len(recipient_emails))))
# Show the first 3 emails in bcc email list.
if bcc:
bcc_email_list_str = ' '.join(
['%s' %
(bcc_email,) for bcc_email in bcc[:3]])
if len(bcc) > 3:
bcc_email_list_str += (
'... Total: %s emails.' % python_utils.UNICODE(len(bcc)))
msg = (
"""
EmailService.SendMail
From: %s
To: %s
Subject: %s
Body:
Content-type: text/plain
Data length: %d
Body:
Content-type: text/html
Data length: %d
""" % (
sender_email, recipient_email_list_str, subject,
len(plaintext_body), len(html_body)))
optional_msg_description = (
"""
Bcc: %s
Reply_to: %s
Recipient Variables:
Length: %d
""" % (
bcc_email_list_str if bcc else 'None',
reply_to if reply_to else 'None',
len(recipient_variables) if recipient_variables else 0))
logging.info(
textwrap.dedent(msg) + textwrap.dedent(optional_msg_description))
logging.info(
'You are not currently sending out real emails since this is a' +
' dev environment. Emails are sent out in the production' +
' environment.')
# Returns True signifying that the "send_email_to_recipients" action was
# successful.
return True
| 37.726563
| 79
| 0.623317
|
794bdf657e0bb377bda76f31d641af3de1c04f1f
| 6,418
|
py
|
Python
|
build/PureCloudPlatformClientV2/models/scheduling_testing_options_request.py
|
cjohnson-ctl/platform-client-sdk-python
|
38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100
|
[
"MIT"
] | 1
|
2021-10-08T20:46:45.000Z
|
2021-10-08T20:46:45.000Z
|
libs/PureCloudPlatformClientV2/models/scheduling_testing_options_request.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | null | null | null |
libs/PureCloudPlatformClientV2/models/scheduling_testing_options_request.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class SchedulingTestingOptionsRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
SchedulingTestingOptionsRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'fast_scheduling': 'bool',
'delay_scheduling': 'bool',
'fail_scheduling': 'bool',
'populate_warnings': 'bool'
}
self.attribute_map = {
'fast_scheduling': 'fastScheduling',
'delay_scheduling': 'delayScheduling',
'fail_scheduling': 'failScheduling',
'populate_warnings': 'populateWarnings'
}
self._fast_scheduling = None
self._delay_scheduling = None
self._fail_scheduling = None
self._populate_warnings = None
@property
def fast_scheduling(self):
"""
Gets the fast_scheduling of this SchedulingTestingOptionsRequest.
Whether to enable fast scheduling
:return: The fast_scheduling of this SchedulingTestingOptionsRequest.
:rtype: bool
"""
return self._fast_scheduling
@fast_scheduling.setter
def fast_scheduling(self, fast_scheduling):
"""
Sets the fast_scheduling of this SchedulingTestingOptionsRequest.
Whether to enable fast scheduling
:param fast_scheduling: The fast_scheduling of this SchedulingTestingOptionsRequest.
:type: bool
"""
self._fast_scheduling = fast_scheduling
@property
def delay_scheduling(self):
"""
Gets the delay_scheduling of this SchedulingTestingOptionsRequest.
Whether to force delayed scheduling
:return: The delay_scheduling of this SchedulingTestingOptionsRequest.
:rtype: bool
"""
return self._delay_scheduling
@delay_scheduling.setter
def delay_scheduling(self, delay_scheduling):
"""
Sets the delay_scheduling of this SchedulingTestingOptionsRequest.
Whether to force delayed scheduling
:param delay_scheduling: The delay_scheduling of this SchedulingTestingOptionsRequest.
:type: bool
"""
self._delay_scheduling = delay_scheduling
@property
def fail_scheduling(self):
"""
Gets the fail_scheduling of this SchedulingTestingOptionsRequest.
Whether to force scheduling to fail
:return: The fail_scheduling of this SchedulingTestingOptionsRequest.
:rtype: bool
"""
return self._fail_scheduling
@fail_scheduling.setter
def fail_scheduling(self, fail_scheduling):
"""
Sets the fail_scheduling of this SchedulingTestingOptionsRequest.
Whether to force scheduling to fail
:param fail_scheduling: The fail_scheduling of this SchedulingTestingOptionsRequest.
:type: bool
"""
self._fail_scheduling = fail_scheduling
@property
def populate_warnings(self):
"""
Gets the populate_warnings of this SchedulingTestingOptionsRequest.
Whether to populate warnings in the generated schedule
:return: The populate_warnings of this SchedulingTestingOptionsRequest.
:rtype: bool
"""
return self._populate_warnings
@populate_warnings.setter
def populate_warnings(self, populate_warnings):
"""
Sets the populate_warnings of this SchedulingTestingOptionsRequest.
Whether to populate warnings in the generated schedule
:param populate_warnings: The populate_warnings of this SchedulingTestingOptionsRequest.
:type: bool
"""
self._populate_warnings = populate_warnings
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 30.708134
| 96
| 0.623091
|
794be096b7309a18f9fe225642bcaafb5058df78
| 37,590
|
py
|
Python
|
tensorflow/python/kernel_tests/segment_reduction_ops_test.py
|
Jibanprakash/tensorflow
|
a8ae26ae1aa7a33b48cca8bf12c42ab7503a45cf
|
[
"Apache-2.0"
] | 54
|
2018-05-29T19:52:44.000Z
|
2021-11-30T10:41:12.000Z
|
tensorflow/python/kernel_tests/segment_reduction_ops_test.py
|
caelean/tensorflow
|
dcb10b1d557168646204239bea6ca5bf1abc40a3
|
[
"Apache-2.0"
] | 20
|
2017-12-06T18:20:54.000Z
|
2021-11-10T09:54:23.000Z
|
tensorflow/python/kernel_tests/segment_reduction_ops_test.py
|
caelean/tensorflow
|
dcb10b1d557168646204239bea6ca5bf1abc40a3
|
[
"Apache-2.0"
] | 31
|
2018-09-11T02:17:17.000Z
|
2021-12-15T10:33:35.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for segment reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class SegmentReductionHelper(test.TestCase):
def _input(self, input_shape, dtype=dtypes_lib.int32):
num_elem = 1
for x in input_shape:
num_elem *= x
values = np.arange(1, num_elem + 1)
np_values = values.reshape(input_shape).astype(dtype.as_numpy_dtype)
# Add a non-zero imaginary component to complex types.
if dtype.is_complex:
np_values -= 1j * np_values
return constant_op.constant(
np_values, shape=input_shape, dtype=dtype), np_values
def _segmentReduce(self, indices, x, op1, op2=None, num_segments=None,
initial_value=0):
if not x.size:
return np.array([])
indices = np.asarray(indices)
if num_segments is None:
num_segments = indices[-1] + 1
output = [None] * num_segments
slice_shape = x.shape[indices.ndim:]
x_flat = x.reshape((indices.size,) + slice_shape)
for i, index in enumerate(indices.ravel()):
if (output[index] is not None) and op1 == np.max:
for j in range(0, output[index].shape[0]):
output[index][j] = op1([output[index][j], x_flat[i][j]])
elif output[index] is not None:
output[index] = op1(output[index], x_flat[i])
else:
output[index] = x_flat[i]
# zero initialize values that are still uncalcuated.
initial_value_slice = np.ones(slice_shape) * initial_value
output = [o if o is not None else initial_value_slice for o in output]
if op2 is not None:
output = [op2(o) for o in output]
output = [o.reshape(slice_shape) for o in output]
return np.array(output)
def _mean_cum_op(self, x, y):
return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2)
def _mean_reduce_op(self, x):
return x[0] / x[1] if isinstance(x, tuple) else x
def _sqrt_n_reduce_op(self, x):
return x[0] / np.sqrt(x[1]) if isinstance(x, tuple) else x
class SegmentReductionOpTest(SegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128
]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, math_ops.segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.segment_mean),
(np.ndarray.__mul__, None, math_ops.segment_prod),
(np.minimum, None, math_ops.segment_min),
(np.maximum, None, math_ops.segment_max)]
# A subset of ops has been enabled for complex numbers
complex_ops_list = [(np.add, None, math_ops.segment_sum),
(np.ndarray.__mul__, None, math_ops.segment_prod),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.segment_mean)]
n = 10
shape = [n, 2]
indices = [i // 3 for i in range(n)]
for dtype in dtypes:
if dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
curr_ops_list = complex_ops_list
else:
curr_ops_list = ops_list
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtype)
for np_op1, np_op2, tf_op in curr_ops_list:
np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2)
s = tf_op(data=tf_x, segment_ids=indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
def testSegmentIdsShape(self):
shape = [4, 4]
tf_x, _ = self._input(shape)
indices = constant_op.constant([0, 1, 2, 2], shape=[2, 2])
with self.assertRaises(ValueError):
math_ops.segment_sum(data=tf_x, segment_ids=indices)
def testSegmentIdsSize(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape)
indices = [0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment_ids should be the same size"):
s.eval()
def testSegmentIdsValid(self):
# This is a baseline for the following SegmentIdsInvalid* tests.
shape = [4, 4]
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, 1]
result = math_ops.segment_sum(data=tf_x, segment_ids=indices).eval()
self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result)
def testSegmentIdsGreaterThanZero(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)
indices = [1, 1, 2, 2]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
def testSegmentIdsHole(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 3, 3]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
def testSegmentIdsInvalid1(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [-1, -1, 0, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id -1 out of range \[0, 1\), possibly because "
"'segment_ids' input is not sorted."):
s.eval()
def testSegmentIdsInvalid2(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids are not increasing"):
s.eval()
def testSegmentIdsInvalid3(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 2, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), possibly "
"because 'segment_ids' input is not sorted."):
s.eval()
def testSegmentIdsInvalid4(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, -1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testSegmentIdsInvalid5(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, -2]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testGradient(self):
shape = [4, 4]
indices = [0, 1, 2, 2]
for tf_op in [
math_ops.segment_sum, math_ops.segment_mean, math_ops.segment_min,
math_ops.segment_max
]:
with self.test_session():
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, segment_ids=indices)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
class UnsortedSegmentTest(SegmentReductionHelper):
def __init__(self, methodName='runTest'):
# Each item is np_op1, np_op2, tf_op, initial_value functor
self.ops_list = [(np.add, None,
math_ops.unsorted_segment_sum, lambda t: 0),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.unsorted_segment_mean, lambda t: 0),
(self._mean_cum_op, self._sqrt_n_reduce_op,
math_ops.unsorted_segment_sqrt_n, lambda t: 0),
(np.ndarray.__mul__, None,
math_ops.unsorted_segment_prod, lambda t: 1),
(np.minimum, None,
math_ops.unsorted_segment_min, lambda t: t.max),
(np.maximum, None,
math_ops.unsorted_segment_max, lambda t: t.min)]
# A subset of ops has been enabled for complex numbers
self.complex_ops_list = [(np.add, None,
math_ops.unsorted_segment_sum, lambda t: 0)]
self.differentiable_dtypes = [dtypes_lib.float16, dtypes_lib.float32,
dtypes_lib.float64]
self.all_dtypes = (self.differentiable_dtypes +
[dtypes_lib.bfloat16,
dtypes_lib.int64, dtypes_lib.int32,
dtypes_lib.complex64, dtypes_lib.complex128])
super(UnsortedSegmentTest, self).__init__(methodName=methodName)
def testValues(self):
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in self.all_dtypes:
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
tf_x, np_x = self._input(shape, dtype=dtype)
for use_gpu in [True, False]:
with self.test_session(use_gpu=True):
for np_op1, np_op2, tf_op, init_op in ops_list:
# sqrt_n doesn't support integers
if (np_op2 == self._sqrt_n_reduce_op and dtype.is_integer):
continue
# todo(philjd): enable this test once real_div supports bfloat16
if (np_op2 in [self._sqrt_n_reduce_op, self._mean_reduce_op] and
dtype == dtypes_lib.bfloat16):
continue
np_ans = self._segmentReduce(
indices, np_x, np_op1, np_op2, num_segments=num_segments,
initial_value=init_op(dtype))
s = tf_op(tf_x, segment_ids=indices, num_segments=num_segments)
tf_ans = s.eval()
if dtype is dtypes_lib.bfloat16:
tf_ans = tf_ans.astype(np.float32)
self.assertAllClose(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
def testNumSegmentsTypes(self):
dtypes = [dtypes_lib.int32, dtypes_lib.int64]
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in dtypes:
with self.test_session(use_gpu=True):
tf_x, np_x = self._input(shape)
num_segments_constant = constant_op.constant(
num_segments, dtype=dtype)
np_ans = self._segmentReduce(
indices, np_x, np.add, op2=None, num_segments=num_segments)
s = math_ops.unsorted_segment_sum(
data=tf_x,
segment_ids=indices,
num_segments=num_segments_constant)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
def testGradients(self):
num_cols = 2
indices_flat = np.array([0, 4, 0, -1, 3, -1, 4, 7, 7, 3])
num_segments = max(indices_flat) + 3
for dtype in self.differentiable_dtypes:
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
# test CPU and GPU as tf.gather behaves differently on each device
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
for _, _, tf_op, _ in ops_list:
tf_x, np_x = self._input(shape, dtype=dtype)
s = tf_op(tf_x, indices, num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [num_segments, num_cols],
x_init_value=np_x,
delta=1)
self.assertAllClose(jacob_t, jacob_n)
def testProdGrad(self):
# additional test for the prod gradient to ensure correct handling of zeros
values = np.array([0, 0, 1, 0, 2, 2, 3, 3, 3], dtype=np.float32)
indices = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32)
indices_neg = np.array([-1, 0, 0, -1, 1, 1, -1, 2, 2], dtype=np.int32)
values_tf = constant_op.constant(values)
# ground truth partial derivatives
gradients_indices = np.zeros((9, 3), dtype=np.float32)
gradients_indices_neg = np.zeros((9, 3), dtype=np.float32)
# the derivative w.r.t. to the other segments is zero, so here we only
# explicitly set the grad values for the corresponding segment
gradients_indices[range(9), indices] = [0, 0, 0, 4, 0, 0, 9, 9, 9]
gradients_indices_neg[range(9), indices_neg] = [0, 1, 0, 0, 2, 2, 0, 3, 3]
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
for ind, grad_gt in [(indices, gradients_indices),
(indices_neg, gradients_indices_neg)]:
s = math_ops.unsorted_segment_prod(values_tf,
constant_op.constant(ind), 3)
jacob_t, jacob_n = gradient_checker.compute_gradient(
values_tf, (9,), s, (3,), x_init_value=values, delta=1)
self.assertAllClose(jacob_t, jacob_n)
self.assertAllClose(jacob_t, grad_gt)
def testGradientMatchesSegmentSum(self):
# Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
# and compare the outputs, which should be identical.
# NB: for this test to work, indices must be valid for SegmentSum, namely
# it must be sorted, the indices must be contiguous, and num_segments
# must be max(indices) + 1.
indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
n = len(indices)
num_cols = 2
shape = [n, num_cols]
num_segments = max(indices) + 1
for dtype in self.differentiable_dtypes:
with self.test_session(use_gpu=True):
tf_x, np_x = self._input(shape, dtype=dtype)
# Results from UnsortedSegmentSum
unsorted_s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
unsorted_jacob_t, unsorted_jacob_n = (
gradient_checker.compute_gradient(tf_x, shape, unsorted_s,
[num_segments, num_cols],
x_init_value=np_x, delta=1))
# Results from SegmentSum
sorted_s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
sorted_jacob_t, sorted_jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
sorted_s, [num_segments, num_cols],
x_init_value=np_x,
delta=1)
self.assertAllClose(unsorted_jacob_t, sorted_jacob_t)
self.assertAllClose(unsorted_jacob_n, sorted_jacob_n)
def testBadIndices(self):
# Note: GPU kernel does not return the out-of-range error needed for this
# test, so this test is marked as cpu-only.
# Note: With PR #13055 a negative index will be ignored silently.
with self.test_session(use_gpu=False):
for bad in [[2]], [[7]]:
unsorted = math_ops.unsorted_segment_sum([[17]], bad, num_segments=2)
with self.assertRaisesOpError(
r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
unsorted.eval()
def testEmptySecondDimension(self):
dtypes = [np.float16, np.float32, np.float64, np.int64, np.int32,
np.complex64, np.complex128]
with self.test_session(use_gpu=True):
for dtype in dtypes:
for itype in (np.int32, np.int64):
data = np.zeros((2, 0), dtype=dtype)
segment_ids = np.array([0, 1], dtype=itype)
unsorted = math_ops.unsorted_segment_sum(data, segment_ids, 2)
self.assertAllEqual(unsorted.eval(), np.zeros((2, 0), dtype=dtype))
def testDropNegatives(self):
# Note: the test is done by replacing segment_ids with 8 to -1
# for index and replace values generated by numpy with 0.
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in self.all_dtypes:
with self.test_session(use_gpu=True):
tf_x, np_x = self._input(shape, dtype=dtype)
np_ans = self._segmentReduce(
indices, np_x, np.add, op2=None, num_segments=num_segments)
# Replace np_ans[8] with 0 for the value
np_ans[8:] = 0
# Replace 8 with -1 in indices
np.place(indices, indices == 8, [-1])
s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
class SparseSegmentReductionHelper(SegmentReductionHelper):
def _sparse_input(self, input_shape, num_indices, dtype=dtypes_lib.int32):
a, b = super(SparseSegmentReductionHelper, self)._input(input_shape, dtype)
indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32)
return (constant_op.constant(
indices, dtype=dtypes_lib.int32), indices, a, b)
def _sparseSegmentReduce(self,
x,
indices,
segment_indices,
op1,
op2=None,
num_segments=None):
return self._segmentReduce(
segment_indices, x[indices], op1, op2, num_segments=num_segments)
class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32
]
mean_dtypes = [dtypes_lib.float32, dtypes_lib.float64]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, math_ops.sparse_segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.sparse_segment_mean)]
n = 400
shape = [n, 2]
segment_indices = []
for i in range(20):
for _ in range(i + 1):
segment_indices.append(i)
num_indices = len(segment_indices)
for dtype in dtypes:
with self.test_session(use_gpu=False):
tf_indices, np_indices, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtype)
for np_op1, np_op2, tf_op in ops_list:
if tf_op == math_ops.sparse_segment_mean and dtype not in mean_dtypes:
continue
np_ans = self._sparseSegmentReduce(np_x, np_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
def testSegmentIdsHole(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum), (
self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]
segment_indices = [0, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
def testWithNumSegments(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum_with_num_segments),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.sparse_segment_mean_with_num_segments)]
segment_indices = [0, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
num_segments = 5
with self.test_session(use_gpu=False):
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(
np_x,
tf_indices,
segment_indices,
np_op1,
np_op2,
num_segments=num_segments)
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
def testWithEmptySegments(self):
tf_x = constant_op.constant([], shape=[0, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments
]
segment_indices = []
tf_indices = []
num_segments = 5
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
tf_ans = s.eval()
self.assertAllClose(np.zeros([5, 4]), tf_ans)
def testSegmentIdsGreaterThanZero(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum), (
self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]
segment_indices = [1, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
def testValid(self):
# Baseline for the test*Invalid* methods below.
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
s.eval()
def testIndicesInvalid1(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, -1, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[1\] == -1 out of range \[0, 10\)"):
s.eval()
def testIndicesInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[3\] == 10 out of range \[0, 10\)"):
s.eval()
def testSegmentsInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 0, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids are not increasing"):
s.eval()
def testSegmentsInvalid3(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), possibly because "
"'segment_ids' input is not sorted"):
s.eval()
def testSegmentsInvalid4(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"Segment id -1 out of range \[0, 2\), possibly because "
"'segment_ids' input is not sorted"):
s.eval()
def testSegmentsInvalid6(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testSegmentsInvalid7(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testSegmentWithNumSegmentsValid(self):
# Baseline for the test*WithNumSegmentsInvalid* methods below.
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]
num_segments = 5
segment_indices = [0, 1, 3, 3]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
s.eval()
def testSegmentWithNumSegmentsInvalid1(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]
num_segments = 5
segment_indices = [0, 1, 3, 5]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
with self.assertRaisesOpError("segment ids must be < num_segments"):
s.eval()
def testSegmentWithNumSegmentsInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]
num_segments = -2
segment_indices = [0, 1, 3, 3]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
with self.assertRaisesRegexp(
ValueError, "Cannot specify a negative value for num_segments"):
tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
def testGradient(self):
shape = [10, 4]
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
for tf_op in [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]:
with self.test_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
def testGradientWithEmptySegmentsAtEnd(self):
shape = [10, 4]
num_segments = 5
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
for tf_op in [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]:
with self.test_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtypes_lib.float64)
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [5, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
def testGradientValid(self):
# Baseline for the testGradient*Invalid* methods below.
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
s.eval()
def testGradientIndicesInvalid1(self):
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index 10 out of range \[0, 10\)"):
s.eval()
def testGradientIndicesInvalid2(self):
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, -1, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index -1 out of range \[0, 10\)"):
s.eval()
def testGradientSegmentsInvalid1(self):
tf_x, _ = self._input(
[3, 4], dtype=dtypes_lib.float32) # expecting 3 segments
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 1, 4] # 5 segments
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError("Invalid number of segments"):
s.eval()
def testGradientSegmentsInvalid2(self):
tf_x, _ = self._input([1, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 1 out of range \[0, 1\)"):
s.eval()
def testGradientSegmentsInvalid3(self):
tf_x, _ = self._input([2, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id -1 out of range \[0, 2\)"):
s.eval()
def testGradientSegmentsInvalid4(self):
tf_x, _ = self._input([0, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, -1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 0 out of range \[0, 0\)"):
s.eval()
class SegmentReductionOpBenchmark(test.Benchmark):
outer_dim_options = [2**x for x in range(9, 14, 2)]
ratio_options = [2**x for x in range(1, 6, 2)]
inner_dim_options = [2**x for x in range(9, 14, 2)]
# randomly generated sizes with less alignments
inner_dim_options += [
1120, 1215, 1856, 1302, 1329, 1531, 1313, 1672, 1851, 1584
]
dtype_options = [np.float32, np.float64]
options = (outer_dim_options, ratio_options, inner_dim_options, dtype_options)
# pylint: disable=g-long-lambda
op_functors = [lambda vc, vs, seg_ids:
("sorted", math_ops.segment_sum(vc, vs)),
lambda vc, vs, seg_ids:
("unsorted",
math_ops.unsorted_segment_sum(vc, vs, seg_ids[-1]+1))]
# pylint: enable=g-long-lambda
repeat = 10
def _npTypeToStr(self, t):
if t == np.float32:
return "fp32"
if t == np.float64:
return "fp64"
def _runGraph(self, op_functor, outer_dim, ratio, inner_dim, dtype):
output_outer_dim = int(outer_dim / ratio)
const = np.random.randint(5, size=(outer_dim, inner_dim))
seg_ids = np.sort(np.random.randint(output_outer_dim, size=outer_dim))
vs = variables.Variable(seg_ids.astype(np.int32))
with ops.device("/gpu:0"):
vc = variables.Variable(const.astype(dtype))
name, op = op_functor(vc, vs, seg_ids)
with session.Session() as sess:
variables.global_variables_initializer().run()
r = self.run_op_benchmark(
sess,
op,
min_iters=self.repeat,
name="_".join(
map(str,
[name, outer_dim, ratio, inner_dim,
self._npTypeToStr(dtype)])))
return name, r["wall_time"]
def benchmarkSegmentSumGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):
op_functor = self.op_functors[0]
with ops.Graph().as_default():
self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)
def benchmarkUnsortedSegmentSumGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):
op_functor = self.op_functors[1]
with ops.Graph().as_default():
self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)
if __name__ == "__main__":
test.main()
| 40.37594
| 80
| 0.635089
|
794be115f18e678b4cce708fed1325cc0bcf8279
| 4,244
|
py
|
Python
|
utils/model.py
|
Shenmue-Mods/ShenmueDKPy
|
bab62b86ed1c5b32703c39cc8109cf4f24a7152e
|
[
"MIT"
] | null | null | null |
utils/model.py
|
Shenmue-Mods/ShenmueDKPy
|
bab62b86ed1c5b32703c39cc8109cf4f24a7152e
|
[
"MIT"
] | null | null | null |
utils/model.py
|
Shenmue-Mods/ShenmueDKPy
|
bab62b86ed1c5b32703c39cc8109cf4f24a7152e
|
[
"MIT"
] | null | null | null |
from enum import Enum
from utils.vector import *
from utils.matrix import *
from utils.math import *
class BoneID(Enum):
Root = 0
Spine = 1
Hip = 14
UpperLeg_R = 16
LowerLeg_R = 17
Foot_R = 18
FootToes_R = 19
UpperLeg_L = 21
LowerLeg_L = 22
Foot_L = 23
FootToes_L = 24
Shoulder_R = 4
UpperArm_R = 5
LowerArm_R = 6
Wrist_R = 7
RiggedHand_R = 8
Hand_R = 191
HandIndexUpper_R = 28
HandIndexLower_R = 29
HandFingerUpper_R = 31
HandFingerLower_R = 32
HandThumb_R = 25
Shoulder_L = 9
UpperArm_L = 10
LowerArm_L = 11
Wrist_L = 12
RiggedHand_L = 13
Hand_L = 190
HandIndexUpper_L = 43
HandIndexLower_L = 44
HandFingerUpper_L = 46
HandFingerLower_L = 47
HandThumb_L = 40
Head = 189
Jaw = 188
Unknown63 = 63
Null = 0xFF
class IKBoneID(Enum):
Root = 0
Hip = 1
Unknown4 = 4
UpperLeg_R = 5
FootIKTarget_R = 8
Foot_R = 9
Unknown10 = 10
Unknown11 = 11
UpperLeg_L = 12
Unknown14 = 14
FootIKTarget_L = 15
Foot_L = 16
Unknown17 = 17
Torso = 18
Unknown19 = 19
UpperTorsoIKTarget = 20
Unknown21 = 21
Unknown22 = 22
HeadLookAtTarget = 23
Unknown24 = 24
Shoulder_R = 25
Arm_R = 26
Unknown27 = 27
Unknown28 = 28
HandIKTarget_R = 29
Hand_R = 30
Shoulder_L = 31
Arm_L = 32
Unknown34 = 34
HandIKTarget_L = 33
Unknown35 = 35
Hand_L = 36
Unknown37 = 37
Unknown38 = 38
Unknown39 = 39
Unknown40 = 40
Unknown41 = 41
Unknown42 = 42
Null = 0xFF
class ModelNode:
def __init__(self, model):
self.model = model
self.index = 0
self.id = 0
self.position = Vector3()
self.rotation = Vector3()
self.scale = Vector3()
self.center = Vector3()
self.radius = 0.0
self.child = None
self.next_sibling = None
self.parent = None
self.name = ''
def get_bone_id(self):
return BoneID(self.id & 0xFF)
def get_all_nodes(self, include_siblings=True, include_children=True):
result = [self]
if self.child is not None and include_children:
result.extend(self.child.get_all_nodes())
if self.next_sibling is not None and include_siblings:
result.extend(self.next_sibling.get_all_nodes())
return result
def get_global_position(self) -> Vector3:
matrix = self.get_transform_matrix()
pos = self.center.transformed(matrix)
return pos
def get_transform_matrix_self(self) -> Matrix4:
rot_x = Matrix4.create_from_axis_angle(Vector3.unit_x(), degree_to_radian(self.rotation.x))
rot_y = Matrix4.create_from_axis_angle(Vector3.unit_y(), degree_to_radian(self.rotation.y))
rot_z = Matrix4.create_from_axis_angle(Vector3.unit_z(), degree_to_radian(self.rotation.z))
scale = Matrix4.create_scale(self.scale)
translate = Matrix4.create_translation(self.position)
m = scale * rot_x * rot_y * rot_z * translate
return copy.deepcopy(m)
def get_transform_matrix(self) -> Matrix4:
matrix = Matrix4.identity()
if self.parent is not None:
matrix = self.parent.get_transform_matrix()
return self.get_transform_matrix_self() * matrix
def get_centered_transform_matrix_self(self) -> Matrix4:
rot_x = Matrix4.create_from_axis_angle(Vector3.unit_x(), self.rotation.x)
rot_y = Matrix4.create_from_axis_angle(Vector3.unit_y(), self.rotation.y)
rot_z = Matrix4.create_from_axis_angle(Vector3.unit_z(), self.rotation.z)
scale = Matrix4.create_scale(self.scale)
translate = Matrix4.create_translation(self.position)
center = Matrix4.create_translation(self.center)
return scale * rot_x * rot_y * rot_z * translate * center
def get_centered_transform_matrix(self) -> Matrix4:
matrix = Matrix4.identity()
if self.parent is not None:
matrix = self.parent.get_centered_transform_matrix()
return self.get_centered_transform_matrix_self() * matrix
class Model:
def __init__(self):
self.root_node = None
| 26.525
| 99
| 0.64114
|
794be14f343573f684e9a703309a329dd2d8c7ef
| 622
|
py
|
Python
|
qa/rpc-tests/python-supcoinrpc/setup.py
|
Voyacoin/Voyacoin
|
4030c52983749f0e0ff3a20c0d67ced3f5b35b14
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/python-supcoinrpc/setup.py
|
Voyacoin/Voyacoin
|
4030c52983749f0e0ff3a20c0d67ced3f5b35b14
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/python-supcoinrpc/setup.py
|
Voyacoin/Voyacoin
|
4030c52983749f0e0ff3a20c0d67ced3f5b35b14
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
from distutils.core import setup
setup(name='python-voyacoinrpc',
version='0.1',
description='Enhanced version of python-jsonrpc for use with Voyacoin',
long_description=open('README').read(),
author='Jeff Garzik',
author_email='<jgarzik@exmulti.com>',
maintainer='Jeff Garzik',
maintainer_email='<jgarzik@exmulti.com>',
url='http://www.github.com/jgarzik/python-voyacoinrpc',
packages=['voyacoinrpc'],
classifiers=['License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)', 'Operating System :: OS Independent'])
| 38.875
| 139
| 0.686495
|
794be22f2fb6962fcac1ecae1265566bd3fe1c3f
| 7,229
|
py
|
Python
|
scripts/jenkins/build.py
|
farrepa/cla_frontend
|
a789ad96cf91daf755784e3a5ed11350a85bddf6
|
[
"MIT"
] | null | null | null |
scripts/jenkins/build.py
|
farrepa/cla_frontend
|
a789ad96cf91daf755784e3a5ed11350a85bddf6
|
[
"MIT"
] | null | null | null |
scripts/jenkins/build.py
|
farrepa/cla_frontend
|
a789ad96cf91daf755784e3a5ed11350a85bddf6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import logging
import os
import random
import signal
import subprocess
import sys
from Queue import Queue
logging.basicConfig(level="INFO")
log = logging.getLogger(__name__)
PROJECT_NAME = "cla_frontend"
background_processes = Queue()
def parse_args():
parser = argparse.ArgumentParser(description="Build project ready for testing by Jenkins.")
parser.add_argument("envname", help="e.g. integration, production, etc.")
parser.add_argument(
"--backend-hash",
default="",
help="cla_backend *commit hash* to run tests against; " "defaults to latest develop branch commit",
)
parser.add_argument("--skip-tests", nargs="*", choices=("django", "karma"), help="skip tests: django, karma")
return parser.parse_args()
def run(command, background=False, **kwargs):
if "shell" not in kwargs:
kwargs["shell"] = True
log.info("Running {command}".format(command=command))
if background:
process = subprocess.Popen(command, **kwargs)
background_processes.put(process)
return process
return_code = subprocess.call(command, **kwargs)
if return_code:
sys.exit(return_code)
def make_virtualenv(env):
venv_path = "/tmp/jenkins/envs/{project}-{env}".format(project=PROJECT_NAME, env=env)
if not os.path.isdir(venv_path):
run("/usr/local/bin/virtualenv {path}".format(path=venv_path))
return venv_path
def install_dependencies(venv_path):
run("{venv}/bin/pip install -U setuptools pip wheel".format(venv=venv_path))
run("{venv}/bin/pip install -r requirements/jenkins.txt".format(venv=venv_path))
def clean_pyc():
run("find . -name '*.pyc' -delete")
def wait_until_available(url):
wget = run(
("wget {url} -O/dev/null -t 20 --retry-connrefused --waitretry=2 " "-T 60").format(url=url), background=True
)
wget.wait()
def remove_old_static_assets():
run("rm -rf cla_frontend/assets-src/vendor")
# run('rm -rf cla_frontend/assets')
def update_static_assets(venv_path):
run("%s/bin/python manage.py builddata constants_json" % venv_path)
bundle = run("bundle install", background=True)
npm_prune = run("npm prune", background=True)
bower_prune = run("bower prune", background=True)
npm_prune.wait()
npm = run("npm install", background=True)
bower_prune.wait()
bower = run("bower install", background=True)
npm.wait()
npm_update = run("npm update", background=True)
npm_update.wait()
bower.wait()
bundle.wait()
gulp = run("gulp build", background=True)
gulp.wait()
def run_python_tests(venv_path):
return run(
("%s/bin/python manage.py jenkins --coverage-rcfile=.coveragerc " "--settings=cla_frontend.settings.jenkins")
% venv_path,
background=True,
)
def _port(start_from=8100, up_to=8299):
port = random.randint(start_from, up_to)
while True:
yield port
port += 1
gen_port = _port()
def run_server(env, backend_hash, jenkins_build_path):
venv = "/tmp/jenkins/envs/cla_backend-%s" % env
project_dir = "/srv/jenkins/shared-backend/%s-%s" % (PROJECT_NAME, env)
if not os.path.isdir(project_dir):
os.makedirs(project_dir)
if not os.path.isdir(os.path.join(project_dir, ".git")):
run(
"cd {project_dir} && git clone https://github.com/ministryofjustice/cla_backend.git .".format(
project_dir=project_dir
)
)
if backend_hash:
run(
"cd {project_dir} && git fetch --prune && git checkout -f {backend_hash}".format(
project_dir=project_dir, backend_hash=backend_hash
)
)
else:
run(
"cd {project_dir} && git fetch --prune && git checkout develop && git pull".format(project_dir=project_dir)
)
backend_port = next(gen_port)
os.environ["CLA_BACKEND_PORT"] = str(backend_port)
os.environ["BACKEND_TEST_DB_SUFFIX"] = "4%s" % PROJECT_NAME
fixtures = (
"initial_groups.json",
"kb_from_knowledgebase.json",
"initial_category.json",
"test_provider.json",
"initial_mattertype.json",
"test_auth_clients.json",
"initial_media_codes.json",
"test_rotas.json",
"test_casearchived.json",
"test_providercases.json",
"test_provider_allocations.json",
"initial_complaint_categories",
)
log_stdout = os.path.join(jenkins_build_path, "cla_backend.stdout.log")
log_stderr = os.path.join(jenkins_build_path, "cla_backend.stderr.log")
run(
(
"cd {project_dir} && "
"{venv}/bin/python manage.py testserver {fixtures} "
"--addrport {port} --noinput "
"--settings=cla_backend.settings.jenkins "
"1> {log_stdout} "
"2> {log_stderr}"
).format(
project_dir=project_dir,
venv=venv,
fixtures=" ".join(fixtures),
port=backend_port,
log_stdout=log_stdout,
log_stderr=log_stderr,
),
background=True,
)
def run_integration_tests(venv_path, jenkins_build_path, skip_tests):
run_karma = "karma" not in skip_tests
wait_until_available("http://localhost:{port}/admin/".format(port=os.environ.get("CLA_BACKEND_PORT")))
frontend_port = next(gen_port)
os.environ["CLA_FRONTEND_PORT"] = str(frontend_port)
os.path.join(jenkins_build_path, "cla_frontend.stdout.log")
os.path.join(jenkins_build_path, "cla_frontend.stderr.log")
if run_karma:
run("npm run test-single-run", background=True)
def kill_child_processes(pid, sig=signal.SIGTERM):
ps_cmd = subprocess.Popen("ps -o pid --ppid {0} --noheaders".format(pid), shell=True, stdout=subprocess.PIPE)
ps_out = ps_cmd.stdout.read()
ps_cmd.wait()
for pid_str in ps_out.split("\n")[:-1]:
os.kill(int(pid_str), sig)
def kill_all_background_processes():
while not background_processes.empty():
process = background_processes.get()
try:
kill_child_processes(process.pid)
process.kill()
except OSError:
pass
def main():
try:
jenkins_workspace_path = os.environ["WORKSPACE"]
jenkins_build_path = os.path.join(jenkins_workspace_path, "..", "builds", os.environ["BUILD_NUMBER"])
jenkins_build_path = os.path.abspath(jenkins_build_path)
args = parse_args()
skip_tests = set(args.skip_tests)
venv_path = make_virtualenv(args.envname)
install_dependencies(venv_path)
remove_old_static_assets()
update_static_assets(venv_path)
clean_pyc()
python_tests = None
if "django" not in skip_tests:
python_tests = run_python_tests(venv_path)
if {"karma"} - skip_tests:
run_server(args.envname, args.backend_hash, jenkins_build_path)
if python_tests:
python_tests.wait()
run_integration_tests(venv_path, jenkins_build_path, skip_tests)
finally:
kill_all_background_processes()
if __name__ == "__main__":
main()
| 29.627049
| 119
| 0.647392
|
794be2646df69c79b9f0f417383ba1f2d981a69e
| 2,684
|
py
|
Python
|
openGaussBase/testcase/GUC/CONNECTIONAUTHENTICATION/Opengauss_Function_Guc_Connectionauthentication_Case0102.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/GUC/CONNECTIONAUTHENTICATION/Opengauss_Function_Guc_Connectionauthentication_Case0102.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/GUC/CONNECTIONAUTHENTICATION/Opengauss_Function_Guc_Connectionauthentication_Case0102.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : GUC
Case Name : unix_socket_group参数使用gs_guc reload设置为空值
Description : 1、查看unix_socket_group默认值;
source /opt/opengauss810/env;
gs_guc check -D /opt/opengauss810/cluster/dn1
-c unix_socket_group
2、使用设置gs_guc reload设置unix_socket_group为空值
gs_guc reload -D /opt/opengauss810/cluster/dn1
-c "unix_socket_group=' '"
3、重启使其生效。
Expect : 1、显示默认值;
2、参数修改失败;
3、重启失败。
History :
"""
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
COMMONSH = CommonSH('PrimaryDbUser')
class Deletaduit(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.constant = Constant()
self.log.info('==Connectionauthentication_Case0102.py start==')
self.rootNode = Node()
self.dbUserNode1 = Node(node='PrimaryDbUser')
status = COMMONSH.get_db_cluster_status()
self.assertTrue("Degraded" in status or "Normal" in status)
def test_startdb(self):
self.log.info("查询该参数默认值")
result = COMMONSH.execute_gsguc('check', ' ', 'unix_socket_group')
self.assertTrue(result)
self.log.info("设置unix_socket_group为空值,重启使其生效")
result = COMMONSH.execute_gsguc('reload',
self.constant.GSGUC_SUCCESS_MSG,
f'unix_socket_group=\' \'')
self.assertTrue(result)
self.log.info("重启使其生效,并校验预期结果")
result = COMMONSH.restart_db_cluster()
self.assertFalse(result)
def tearDown(self):
self.log.info("恢复默认值")
result = COMMONSH.execute_gsguc('set', self.constant.GSGUC_SUCCESS_MSG,
f'unix_socket_group=\'\'')
self.assertTrue(result)
COMMONSH.restart_db_cluster()
status = COMMONSH.get_db_cluster_status()
self.assertTrue("Degraded" in status or "Normal" in status)
self.log.info('==Connectionauthentication_Case0102.py finish==')
| 36.27027
| 84
| 0.649404
|
794be2e0d369670cd783faf88664fcc7b8566548
| 3,824
|
py
|
Python
|
integration_tests/test_suites/daemon-test-suite/test_memory.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
integration_tests/test_suites/daemon-test-suite/test_memory.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
integration_tests/test_suites/daemon-test-suite/test_memory.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
import inspect
import os
import time
from contextlib import contextmanager
import objgraph
from dagster import RunRequest, pipeline, repository, schedule, sensor, solid
from dagster.core.test_utils import instance_for_test
from dagster.core.workspace.load_target import PythonFileTarget
from dagster.daemon.controller import daemon_controller_from_instance
@solid()
def foo_solid(_):
pass
@pipeline
def foo_pipeline():
foo_solid()
@pipeline
def other_foo_pipeline():
foo_solid()
@schedule(
pipeline_name="foo_pipeline",
cron_schedule="*/1 * * * *",
)
def always_run_schedule(_context):
return {}
@sensor(pipeline_name="foo_pipeline", minimum_interval_seconds=10)
def always_on_sensor(_context):
return RunRequest(run_key=None, run_config={}, tags={})
@repository
def example_repo():
return [foo_pipeline, always_run_schedule, always_on_sensor]
@contextmanager
def get_example_repository_location(instance):
load_target = workspace_load_target()
origin = load_target.create_origins()[0]
with origin.create_single_location(instance) as location:
yield location
def workspace_load_target():
return PythonFileTarget(
python_file=__file__,
attribute=None,
working_directory=os.path.dirname(__file__),
location_name=None,
)
@contextmanager
def get_example_repo(instance):
with get_example_repository_location(instance) as location:
yield location.get_repository("example_repo")
def test_no_memory_leaks():
with instance_for_test(
overrides={
"run_coordinator": {
"module": "dagster.core.run_coordinator",
"class": "QueuedRunCoordinator",
},
"run_launcher": {
"class": "DefaultRunLauncher",
"module": "dagster.core.launcher.default_run_launcher",
"config": {
"wait_for_processes": False,
},
},
}
) as instance:
with get_example_repo(instance) as repo:
external_schedule = repo.get_external_schedule("always_run_schedule")
external_sensor = repo.get_external_sensor("always_on_sensor")
instance.start_schedule(external_schedule)
instance.start_sensor(external_sensor)
with daemon_controller_from_instance(
instance,
workspace_load_target=workspace_load_target(),
wait_for_processes_on_exit=True,
) as controller:
start_time = time.time()
growth = objgraph.growth(
limit=10,
filter=lambda obj: inspect.getmodule(obj)
and "dagster" in inspect.getmodule(obj).__name__,
)
while True:
time.sleep(30)
controller.check_daemon_threads()
controller.check_daemon_heartbeats()
growth = objgraph.growth(
limit=10,
filter=lambda obj: inspect.getmodule(obj)
and "dagster" in inspect.getmodule(obj).__name__,
)
if not growth:
print( # pylint: disable=print-call
f"Memory stopped growing after {int(time.time() - start_time)} seconds"
)
break
if (time.time() - start_time) > 300:
raise Exception(
"Memory still growing after 5 minutes. Most recent growth: "
+ str(growth)
)
print("Growth: " + str(growth)) # pylint: disable=print-call
| 29.19084
| 99
| 0.592573
|
794be52fff63e35939e5a316fc4167e0ee9f142e
| 769
|
py
|
Python
|
notifications/sms.py
|
btsimon97/ble-surveillance
|
a76d8f9f0e40572dd2e638279dd10635c4d8b702
|
[
"MIT"
] | 1
|
2021-05-19T18:39:53.000Z
|
2021-05-19T18:39:53.000Z
|
notifications/sms.py
|
btsimon97/ble-surveillance
|
a76d8f9f0e40572dd2e638279dd10635c4d8b702
|
[
"MIT"
] | null | null | null |
notifications/sms.py
|
btsimon97/ble-surveillance
|
a76d8f9f0e40572dd2e638279dd10635c4d8b702
|
[
"MIT"
] | null | null | null |
import os
import configparser
from twilio.rest import Client
# this function sends the final sms
def send_sms_message(sms_config, recipient, msg):
account_sid = sms_config.get('sms', 'twilio_account_sid')
auth_token = sms_config.get('sms', 'twilio_auth_token')
sender = sms_config.get('sms', 'sender_phone_number')
client = Client(account_sid, auth_token)
return client.messages.create(
body=msg,
from_=sender,
to=recipient
)
# External function called by main notification server code.
def send_sms(sms_config, sms_type, channel_data, bluetooth_devices=None):
if sms_type == "detection":
for recipient in channel_data['recipients']:
send_sms_message(sms_config, recipient, bluetooth_devices)
| 32.041667
| 73
| 0.724317
|
794be731800c6052e7592714300f0b8850b56d2f
| 674
|
py
|
Python
|
examples/copy_existing_event.py
|
ftnext/connpass-ops-playbook
|
6f57380c92f37f998092499dab2974de45a4e6b9
|
[
"MIT"
] | null | null | null |
examples/copy_existing_event.py
|
ftnext/connpass-ops-playbook
|
6f57380c92f37f998092499dab2974de45a4e6b9
|
[
"MIT"
] | 7
|
2021-08-21T14:01:10.000Z
|
2021-11-08T14:48:36.000Z
|
examples/copy_existing_event.py
|
ftnext/connpass-ops-playbook
|
6f57380c92f37f998092499dab2974de45a4e6b9
|
[
"MIT"
] | null | null | null |
"""Firefoxでconnpassにある既存のイベントをコピーするまでを自動化"""
import argparse
from connpass_ops_playbook.decorators import logged_in, using_firefox
from connpass_ops_playbook.plays import copy_existing_event
from helium import kill_browser
@using_firefox
@logged_in
def show_copy_popup(url):
copy_existing_event(url, human_confirms=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("url")
args = parser.parse_args()
show_copy_popup(args.url)
while True:
user_input = input("qで終了: ")
cleaned_input = user_input.rstrip().lower()
if cleaned_input == "q":
kill_browser()
break
| 23.241379
| 69
| 0.719585
|
794be89d9018b51e1d65a462a261d06ce6689792
| 7,315
|
py
|
Python
|
MAIN.py
|
Mrprogrammernobrainz/informaticscalc
|
5dbdd29128cb45f59efac7cc407f2e474b454ccd
|
[
"Unlicense"
] | null | null | null |
MAIN.py
|
Mrprogrammernobrainz/informaticscalc
|
5dbdd29128cb45f59efac7cc407f2e474b454ccd
|
[
"Unlicense"
] | null | null | null |
MAIN.py
|
Mrprogrammernobrainz/informaticscalc
|
5dbdd29128cb45f59efac7cc407f2e474b454ccd
|
[
"Unlicense"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import random as rand
import time as tm
def plus():
print("Вы выбрали 'Сложение', ответ будет дан в десятичной\nУкажите первое число")
firstnum = int(input())
print("Укажите систему счисления первого числа")
firstnumsys = int(input())
print("Укажите второе число")
secnum = int(input())
print("Укажите систему счисления второго числа")
secnumsys = int(input())
print(f"Вы хотите сложить число {firstnum} в {firstnumsys} сс с {secnum} в {secnumsys} сс")
actualans = 0
# переводим firstnum в 10 сс
ans = 0
save1 = firstnum
save2 = secnum
for i in range(len(str(firstnum))):
currentnum = firstnum % 10
ans = ans + currentnum * firstnumsys ** i
firstnum = firstnum // 10
# ans - число firstnum в 10 сс
actualans = actualans + ans
for i in range(len(str(secnum))):
currentnum = secnum % 10
ans = ans + currentnum * secnumsys ** i
secnum = secnum // 10
actualans = actualans + ans // 2
tm.sleep(1.5)
print(f"Сумма {save1} в {firstnumsys} сс и {save2} в {secnumsys} сс равна {actualans} в 10 сс\n-------------------------")
tm.sleep(3)
invite()
def translate():
wanttoexit4 = ""
print("Вы выбрали 'Перевод'\nУкажите систему счисления, в которой находится ваше число изначально")
beginsys = int(input())
print("Укажите систему, в которую вы хотите перевести")
endsys = int(input())
print("Укажите само число")
translatenum = int(input())
print(f"Вы собираетесь перевести число {translatenum} из {beginsys} сс в {endsys}")
save = translatenum
if endsys == 10:
ans = 0
for i in range(len(str(translatenum))):
currentnum = translatenum % 10
ans = ans + currentnum * beginsys ** i
translatenum = translatenum // 10
print(f"------------------------- \nЧисло {save} в {beginsys} cc равно числу {ans} в {endsys} сс")
elif beginsys == 10:
ansmassive = []
while translatenum > endsys:
if translatenum <= endsys:
break
ansmassive.append(translatenum % endsys)
translatenum = translatenum // endsys
ansmassive.append(translatenum)
ansmassive.reverse()
answercycle = ""
for i in range(len(ansmassive)):
answercycle = answercycle + str(ansmassive[i])
print(f"------------------------- \nЧисло {save} в {beginsys} cc равно числу {answercycle} в {endsys} сс\n-------------------------")
elif beginsys != 10 and endsys != 10:
ans = 0
for i in range(len(str(translatenum))):
currentnum = translatenum % 10
ans = ans + currentnum * beginsys ** i
translatenum = translatenum // 10
#число в 10 равно ans
ansmassive = []
while ans > endsys:
if ans <= endsys:
break
ansmassive.append(ans % endsys)
ans = ans // endsys
ansmassive.append(ans)
ansmassive.reverse()
answercycle = ""
for i in range(len(ansmassive)):
answercycle = answercycle + str(ansmassive[i])
print(
f"------------------------- \nЧисло {save} в {beginsys} cc равно числу {answercycle} в {endsys} сс\n-------------------------")
tm.sleep(3.5)
invite()
def navigationone(x):
if x == 1:
translate()
if x == 2:
plus()
else:
print("Введенного вами вариантиа не существует\n")
tm.sleep(0.8)
systemnums()
def systemnums():
print("Вы выбрали 'Действие со системами счисления'\nЧто именно вы хотите сделать? Укажите соответствующий номер: \n 1.Перевод \n 2.Сложение\nВнимание! Программа корректно работает только с системами ниже одиннадцатиричной! ")
chooseslideone = int(input())
navigationone(chooseslideone)
def builddiagramm():
slidezerochoise = 4
wanttoexit4 = ""
while wanttoexit4 != "Да" or wanttoexit4 != "ДА" or wanttoexit4 != 'да':
if wanttoexit4 == "ДА" or wanttoexit4 == "Да" or wanttoexit4 == 'да':
break
diagrammorgraph = 1
if diagrammorgraph == 1:
print("Вы выбрали построить круговую диаграмму")
print("Введите название диаграммы")
namediagr = input()
print("Введите в одну строчку наименования секторов в одну строчку")
diagrammnames = list(map(str, input().split()))
print("Введите в одну строчку значения секторов\nВводить требуется в том же порядке, что и наименования")
diagrammvalues = list(map(int, input().split()))
while len(diagrammnames) != len(diagrammvalues):
print("Один из ваших наборов данных длиннее другого, введите сначала наименования, а заетм и числа заново")
diagrammnames = list(map(str, input().split()))
diagrammvalues = list(map(int, input().split()))
plt.figure(num=1, figsize=(6, 6))
plt.axes(aspect=1)
plt.title(namediagr, size=14)
plt.pie(diagrammvalues, labels=diagrammnames, shadow=True, autopct='%1.1f%%')
plt.show()
tm.sleep(2)
print("-------------------------\nВы хотите выйти в меню? Для ответа напишите 'Да' или 'Нет' ")
wanttoexit4 = input()
invite()
def randomnum():
slidezerochoise = 4
wanttoexit4 = ""
while wanttoexit4 != "Да" or wanttoexit4 != "ДА" or wanttoexit4 != 'да':
if wanttoexit4 == "ДА" or wanttoexit4 == "Да" or wanttoexit4 == 'да':
break
print("Вы выбрали 'Сгенерировать случайное число в диапозоне' \nНапишите в одну строчку два числа: начало диапозона и его конец")
startrandom, endrandom = map(int, input().split())
while endrandom < startrandom:
print("Конец диапозона меньше чем начало, введите числа заново")
startrandom, endrandom = map(int, input().split())
print(f"Сгенерированное число в диапозоне от {startrandom} до {endrandom} равно {rand.randint(startrandom, endrandom)}")
tm.sleep(1)
print("-------------------------\nВы хотите выйти в меню? Для ответа напишите 'Да' или 'Нет' ")
wanttoexit4 = input()
invite()
def navigation(chosenslide):
if chosenslide == 3:
randomnum()
elif chosenslide == 2:
builddiagramm()
elif chosenslide == 1:
systemnums()
else:
print("Введенного вами варианта не существует\n")
tm.sleep(0.8)
invite()
def invite():
global currentslide
currentslide = 0
# создадим переменную, которая будет контролировать текущий слайд
print(
"Выберите одну из доступных функций: \n1. Действия со системами счисления \n2. Посторить круговую диаграмму \n3. Сгенерировать случайное число в диапозоне")
print("------------------------- \nУкажите цифру желаемого варианта")
slidezerochoise = int(input())
navigation(slidezerochoise)
#короче готово, я задрался уже, буду просто контент резать (30.20.2021)
invite()
| 41.327684
| 233
| 0.582502
|
794be8d67d22ab5de43e7db395b9bb4d2db4d70b
| 842
|
py
|
Python
|
testfixtures/__init__.py
|
foobacca/testfixtures
|
c4f893ec0ac40ffa998d97152639536f54c8436d
|
[
"MIT"
] | null | null | null |
testfixtures/__init__.py
|
foobacca/testfixtures
|
c4f893ec0ac40ffa998d97152639536f54c8436d
|
[
"MIT"
] | null | null | null |
testfixtures/__init__.py
|
foobacca/testfixtures
|
c4f893ec0ac40ffa998d97152639536f54c8436d
|
[
"MIT"
] | null | null | null |
class singleton(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return '<%s>' % self.name
__str__ = __repr__
not_there = singleton('not_there')
from testfixtures.comparison import (
Comparison, StringComparison, RoundComparison, compare, diff, RangeComparison
)
from testfixtures.tdatetime import test_datetime, test_date, test_time
from testfixtures.logcapture import LogCapture, log_capture
from testfixtures.outputcapture import OutputCapture
from testfixtures.resolve import resolve
from testfixtures.replace import Replacer, Replace, replace
from testfixtures.shouldraise import ShouldRaise, should_raise
from testfixtures.shouldwarn import ShouldWarn, ShouldNotWarn
from testfixtures.tempdirectory import TempDirectory, tempdir
from testfixtures.utils import wrap, generator
| 33.68
| 81
| 0.805226
|
794be912ca6c06a9f862dcbd698fbc353326eaec
| 16,573
|
py
|
Python
|
dask/dataframe/io/tests/test_sql.py
|
McToel/dask
|
24c31a2f2a7f390c21f1aa2dc4d8a407cb3cb850
|
[
"BSD-3-Clause"
] | null | null | null |
dask/dataframe/io/tests/test_sql.py
|
McToel/dask
|
24c31a2f2a7f390c21f1aa2dc4d8a407cb3cb850
|
[
"BSD-3-Clause"
] | null | null | null |
dask/dataframe/io/tests/test_sql.py
|
McToel/dask
|
24c31a2f2a7f390c21f1aa2dc4d8a407cb3cb850
|
[
"BSD-3-Clause"
] | null | null | null |
import io
from contextlib import contextmanager
import pytest
# import dask
from dask.dataframe.io.sql import read_sql_query, read_sql_table, read_sql
from dask.dataframe.utils import assert_eq
from dask.utils import tmpfile
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
pytest.importorskip("sqlalchemy")
pytest.importorskip("sqlite3")
np = pytest.importorskip("numpy")
data = """
name,number,age,negish
Alice,0,33,-5
Bob,1,40,-3
Chris,2,22,3
Dora,3,16,5
Edith,4,53,0
Francis,5,30,0
Garreth,6,20,0
"""
df = pd.read_csv(io.StringIO(data), index_col="number")
@pytest.fixture
def db():
with tmpfile() as f:
uri = "sqlite:///%s" % f
df.to_sql("test", uri, index=True, if_exists="replace")
yield uri
def test_empty(db):
from sqlalchemy import Column, Integer, MetaData, Table, create_engine
with tmpfile() as f:
uri = "sqlite:///%s" % f
metadata = MetaData()
engine = create_engine(uri)
table = Table(
"empty_table",
metadata,
Column("id", Integer, primary_key=True),
Column("col2", Integer),
)
metadata.create_all(engine)
dask_df = read_sql_table(table.name, uri, index_col="id", npartitions=1)
assert dask_df.index.name == "id"
# The dtype of the empty result might no longer be as expected
# assert dask_df.col2.dtype == np.dtype("int64")
pd_dataframe = dask_df.compute()
assert pd_dataframe.empty is True
@pytest.mark.filterwarnings(
"ignore:The default dtype for empty Series " "will be 'object' instead of 'float64'"
)
@pytest.mark.parametrize("use_head", [True, False])
def test_single_column(db, use_head):
from sqlalchemy import Column, Integer, MetaData, Table, create_engine
with tmpfile() as f:
uri = "sqlite:///%s" % f
metadata = MetaData()
engine = create_engine(uri)
table = Table(
"single_column",
metadata,
Column("id", Integer, primary_key=True),
)
metadata.create_all(engine)
test_data = pd.DataFrame({"id": list(range(50))}).set_index("id")
test_data.to_sql(table.name, uri, index=True, if_exists="replace")
if use_head:
dask_df = read_sql_table(table.name, uri, index_col="id", npartitions=2)
else:
dask_df = read_sql_table(
table.name,
uri,
head_rows=0,
npartitions=2,
meta=test_data.iloc[:0],
index_col="id",
)
assert dask_df.index.name == "id"
assert dask_df.npartitions == 2
pd_dataframe = dask_df.compute()
assert_eq(test_data, pd_dataframe)
def test_passing_engine_as_uri_raises_helpful_error(db):
# https://github.com/dask/dask/issues/6473
from sqlalchemy import create_engine
df = pd.DataFrame([{"i": i, "s": str(i) * 2} for i in range(4)])
ddf = dd.from_pandas(df, npartitions=2)
with tmpfile() as f:
db = "sqlite:///%s" % f
engine = create_engine(db)
with pytest.raises(ValueError, match="Expected URI to be a string"):
ddf.to_sql("test", engine, if_exists="replace")
@pytest.mark.skip(
reason="Requires a postgres server. Sqlite does not support multiple schemas."
)
def test_empty_other_schema():
from sqlalchemy import DDL, Column, Integer, MetaData, Table, create_engine, event
# Database configurations.
pg_host = "localhost"
pg_port = "5432"
pg_user = "user"
pg_pass = "pass"
pg_db = "db"
db_url = "postgresql://%s:%s@%s:%s/%s" % (pg_user, pg_pass, pg_host, pg_port, pg_db)
# Create an empty table in a different schema.
table_name = "empty_table"
schema_name = "other_schema"
engine = create_engine(db_url)
metadata = MetaData()
table = Table(
table_name,
metadata,
Column("id", Integer, primary_key=True),
Column("col2", Integer),
schema=schema_name,
)
# Create the schema and the table.
event.listen(
metadata, "before_create", DDL("CREATE SCHEMA IF NOT EXISTS %s" % schema_name)
)
metadata.create_all(engine)
# Read the empty table from the other schema.
dask_df = read_sql_table(
table.name, db_url, index_col="id", schema=table.schema, npartitions=1
)
# Validate that the retrieved table is empty.
assert dask_df.index.name == "id"
assert dask_df.col2.dtype == np.dtype("int64")
pd_dataframe = dask_df.compute()
assert pd_dataframe.empty is True
# Drop the schema and the table.
engine.execute("DROP SCHEMA IF EXISTS %s CASCADE" % schema_name)
def test_needs_rational(db):
import datetime
now = datetime.datetime.now()
d = datetime.timedelta(seconds=1)
df = pd.DataFrame(
{
"a": list("ghjkl"),
"b": [now + i * d for i in range(5)],
"c": [True, True, False, True, True],
}
)
df = df.append(
[
{"a": "x", "b": now + d * 1000, "c": None},
{"a": None, "b": now + d * 1001, "c": None},
]
)
with tmpfile() as f:
uri = "sqlite:///%s" % f
df.to_sql("test", uri, index=False, if_exists="replace")
# one partition contains NULL
data = read_sql_table("test", uri, npartitions=2, index_col="b")
df2 = df.set_index("b")
assert_eq(data, df2.astype({"c": bool})) # bools are coerced
# one partition contains NULL, but big enough head
data = read_sql_table("test", uri, npartitions=2, index_col="b", head_rows=12)
df2 = df.set_index("b")
assert_eq(data, df2)
# empty partitions
data = read_sql_table("test", uri, npartitions=20, index_col="b")
part = data.get_partition(12).compute()
assert part.dtypes.tolist() == ["O", bool]
assert part.empty
df2 = df.set_index("b")
assert_eq(data, df2.astype({"c": bool}))
# explicit meta
data = read_sql_table("test", uri, npartitions=2, index_col="b", meta=df2[:0])
part = data.get_partition(1).compute()
assert part.dtypes.tolist() == ["O", "O"]
df2 = df.set_index("b")
assert_eq(data, df2)
def test_simple(db):
# single chunk
data = read_sql_table("test", db, npartitions=2, index_col="number").compute()
assert (data.name == df.name).all()
assert data.index.name == "number"
assert_eq(data, df)
def test_npartitions(db):
data = read_sql_table(
"test", db, columns=list(df.columns), npartitions=2, index_col="number"
)
assert len(data.divisions) == 3
assert (data.name.compute() == df.name).all()
data = read_sql_table(
"test", db, columns=["name"], npartitions=6, index_col="number"
)
assert_eq(data, df[["name"]])
data = read_sql_table(
"test",
db,
columns=list(df.columns),
bytes_per_chunk="2 GiB",
index_col="number",
)
assert data.npartitions == 1
assert (data.name.compute() == df.name).all()
data_1 = read_sql_table(
"test",
db,
columns=list(df.columns),
bytes_per_chunk=2 ** 30,
index_col="number",
head_rows=1,
)
assert data_1.npartitions == 1
assert (data_1.name.compute() == df.name).all()
data = read_sql_table(
"test",
db,
columns=list(df.columns),
bytes_per_chunk=250,
index_col="number",
head_rows=1,
)
assert data.npartitions == 2
def test_divisions(db):
data = read_sql_table(
"test", db, columns=["name"], divisions=[0, 2, 4], index_col="number"
)
assert data.divisions == (0, 2, 4)
assert data.index.max().compute() == 4
assert_eq(data, df[["name"]][df.index <= 4])
def test_division_or_partition(db):
with pytest.raises(TypeError):
read_sql_table(
"test",
db,
columns=["name"],
index_col="number",
divisions=[0, 2, 4],
npartitions=3,
)
out = read_sql_table("test", db, index_col="number", bytes_per_chunk=100)
m = out.map_partitions(
lambda d: d.memory_usage(deep=True, index=True).sum()
).compute()
assert (50 < m).all() and (m < 200).all()
assert_eq(out, df)
def test_meta(db):
data = read_sql_table(
"test", db, index_col="number", meta=dd.from_pandas(df, npartitions=1)
).compute()
assert (data.name == df.name).all()
assert data.index.name == "number"
assert_eq(data, df)
def test_meta_no_head_rows(db):
data = read_sql_table(
"test",
db,
index_col="number",
meta=dd.from_pandas(df, npartitions=1),
npartitions=2,
head_rows=0,
)
assert len(data.divisions) == 3
data = data.compute()
assert (data.name == df.name).all()
assert data.index.name == "number"
assert_eq(data, df)
data = read_sql_table(
"test",
db,
index_col="number",
meta=dd.from_pandas(df, npartitions=1),
divisions=[0, 3, 6],
head_rows=0,
)
assert len(data.divisions) == 3
data = data.compute()
assert (data.name == df.name).all()
assert data.index.name == "number"
assert_eq(data, df)
def test_no_meta_no_head_rows(db):
with pytest.raises(ValueError):
read_sql_table("test", db, index_col="number", head_rows=0, npartitions=1)
def test_limits(db):
data = read_sql_table("test", db, npartitions=2, index_col="number", limits=[1, 4])
assert data.index.min().compute() == 1
assert data.index.max().compute() == 4
def test_datetimes():
import datetime
now = datetime.datetime.now()
d = datetime.timedelta(seconds=1)
df = pd.DataFrame(
{"a": list("ghjkl"), "b": [now + i * d for i in range(2, -3, -1)]}
)
with tmpfile() as f:
uri = "sqlite:///%s" % f
df.to_sql("test", uri, index=False, if_exists="replace")
data = read_sql_table("test", uri, npartitions=2, index_col="b")
assert data.index.dtype.kind == "M"
assert data.divisions[0] == df.b.min()
df2 = df.set_index("b")
assert_eq(data.map_partitions(lambda x: x.sort_index()), df2.sort_index())
def test_extra_connection_engine_keywords(capsys, db):
data = read_sql_table(
"test", db, npartitions=2, index_col="number", engine_kwargs={"echo": False}
).compute()
# no captured message from the stdout with the echo=False parameter (this is the default)
out, err = capsys.readouterr()
assert "SELECT" not in out
assert_eq(data, df)
# with the echo=True sqlalchemy parameter, you should get all SQL queries in the stdout
data = read_sql_table(
"test", db, npartitions=2, index_col="number", engine_kwargs={"echo": True}
).compute()
out, err = capsys.readouterr()
assert "WHERE" in out
assert "FROM" in out
assert "SELECT" in out
assert "AND" in out
assert ">= ?" in out
assert "< ?" in out
assert "<= ?" in out
assert_eq(data, df)
def test_query(db):
import sqlalchemy as sa
from sqlalchemy import sql
s1 = sql.select([sql.column("number"), sql.column("name")]).select_from(
sql.table("test")
)
out = read_sql_query(s1, db, npartitions=2, index_col="number")
assert_eq(out, df[["name"]])
s2 = sql.select(
[
sa.cast(sql.column("number"), sa.types.BigInteger).label("number"),
sql.column("name"),
]
).where(sql.column("number") >= 5).select_from(sql.table("test"))
out = read_sql_query(s2, db, npartitions=2, index_col="number")
assert_eq(out, df.loc[5:, ["name"]])
def test_query_index_from_query(db):
from sqlalchemy import sql
number = sql.column("number")
name = sql.column("name")
s1 = sql.select([
number, name, sql.func.length(name).label("lenname")
]
).select_from(sql.table("test"))
out = read_sql_query(
s1, db, npartitions=2, index_col="lenname"
)
lenname_df = df.copy()
lenname_df["lenname"] = lenname_df['name'].str.len()
lenname_df = lenname_df.reset_index().set_index("lenname")
assert_eq(out, lenname_df.loc[:, ["number", "name"]])
def test_query_with_meta(db):
from sqlalchemy import sql
data = {
'name': pd.Series([], name='name', dtype='str'),
'age': pd.Series([], name='age', dtype='int'),
}
index = pd.Index([], name='number', dtype='int')
meta = pd.DataFrame(data, index=index)
s1 = sql.select([sql.column("number"), sql.column("name"), sql.column("age")]).select_from(
sql.table("test")
)
out = read_sql_query(s1, db, npartitions=2, index_col="number", meta=meta)
assert_eq(out, df[["name", "age"]])
def test_no_character_index_without_divisions(db):
# attempt to read the sql table with a character index and no divisions
with pytest.raises(TypeError):
read_sql_table("test", db, npartitions=2, index_col="name", divisions=None)
def test_read_sql(db):
from sqlalchemy import sql
s = sql.select([sql.column("number"), sql.column("name")]).select_from(
sql.table("test")
)
out = read_sql(s, db, npartitions=2, index_col="number")
assert_eq(out, df[["name"]])
data = read_sql_table("test", db, npartitions=2, index_col="number").compute()
assert (data.name == df.name).all()
assert data.index.name == "number"
assert_eq(data, df)
@contextmanager
def tmp_db_uri():
with tmpfile() as f:
yield "sqlite:///%s" % f
@pytest.mark.parametrize("npartitions", (1, 2))
@pytest.mark.parametrize("parallel", (False, True))
def test_to_sql(npartitions, parallel):
df_by_age = df.set_index("age")
df_appended = pd.concat(
[
df,
df,
]
)
ddf = dd.from_pandas(df, npartitions)
ddf_by_age = ddf.set_index("age")
# Simple round trip test: use existing "number" index_col
with tmp_db_uri() as uri:
ddf.to_sql("test", uri, parallel=parallel)
result = read_sql_table("test", uri, "number")
assert_eq(df, result)
# Test writing no index, and reading back in with one of the other columns as index (`read_sql_table` requires
# an index_col)
with tmp_db_uri() as uri:
ddf.to_sql("test", uri, parallel=parallel, index=False)
result = read_sql_table("test", uri, "negish")
assert_eq(df.set_index("negish"), result)
result = read_sql_table("test", uri, "age")
assert_eq(df_by_age, result)
# Index by "age" instead
with tmp_db_uri() as uri:
ddf_by_age.to_sql("test", uri, parallel=parallel)
result = read_sql_table("test", uri, "age")
assert_eq(df_by_age, result)
# Index column can't have "object" dtype if no partitions are provided
with tmp_db_uri() as uri:
ddf.set_index("name").to_sql("test", uri)
with pytest.raises(
TypeError,
match='Provided index column is of type "object". If divisions is not provided the index column type must be numeric or datetime.', # noqa: E501
):
read_sql_table("test", uri, "name")
# Test various "if_exists" values
with tmp_db_uri() as uri:
ddf.to_sql("test", uri)
# Writing a table that already exists fails
with pytest.raises(ValueError, match="Table 'test' already exists"):
ddf.to_sql("test", uri)
ddf.to_sql("test", uri, parallel=parallel, if_exists="append")
result = read_sql_table("test", uri, "number")
assert_eq(df_appended, result)
ddf_by_age.to_sql("test", uri, parallel=parallel, if_exists="replace")
result = read_sql_table("test", uri, "age")
assert_eq(df_by_age, result)
# Verify number of partitions returned, when compute=False
with tmp_db_uri() as uri:
result = ddf.to_sql("test", uri, parallel=parallel, compute=False)
# the first result is from the "meta" insert
actual = len(result.compute())
assert actual == npartitions
def test_to_sql_kwargs():
ddf = dd.from_pandas(df, 2)
with tmp_db_uri() as uri:
ddf.to_sql("test", uri, method="multi")
with pytest.raises(
TypeError, match="to_sql\\(\\) got an unexpected keyword argument 'unknown'"
):
ddf.to_sql("test", uri, unknown=None)
| 30.465074
| 158
| 0.608942
|
794be9f75abbbe1b7e373a378ed892a467ed00c3
| 936
|
py
|
Python
|
evamap_web/views.py
|
benjimor/EvaMap-Web
|
6b9ad8445b2621c18872521861cfe0ceed2cce20
|
[
"MIT"
] | 1
|
2020-01-13T11:51:25.000Z
|
2020-01-13T11:51:25.000Z
|
evamap_web/views.py
|
benj-moreau/EvaMap-Web
|
6b9ad8445b2621c18872521861cfe0ceed2cce20
|
[
"MIT"
] | 5
|
2021-03-18T23:39:08.000Z
|
2022-02-10T12:41:02.000Z
|
evamap_web/views.py
|
benjimor/EvaMap-Web
|
6b9ad8445b2621c18872521861cfe0ceed2cce20
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_exempt
from EvaMap.EvaMap import EvaMap
import json
@require_http_methods(['GET'])
def home(request):
return render(request, 'home.html')
@require_http_methods(['GET'])
def form(request):
return render(request, 'form.html')
@csrf_exempt
@require_http_methods(['POST'])
def result(request):
ontology = request.POST.get('ontology')
mapping = request.POST.get('mapping')
dataset = request.POST.get('dataset')
evamap = EvaMap(ontology, mapping, dataset)
evamap.evaluate_mapping()
result = evamap.get_complet_result()
total_score = evamap.get_total_score()
sorted_result = sorted(result, key=lambda res: (res['score'], res['name']))
return render(request, 'result.html', {'total_score': total_score, 'detailed_result': json.dumps(sorted_result)})
| 31.2
| 117
| 0.740385
|
794be9fcc5ea16c77e3227ab2672790f9d48688b
| 6,033
|
py
|
Python
|
tests/fixtures.py
|
scorphus/holmes-api
|
6b3c76d4299fecf2d8799d7b5c3c6a6442cacd59
|
[
"MIT"
] | null | null | null |
tests/fixtures.py
|
scorphus/holmes-api
|
6b3c76d4299fecf2d8799d7b5c3c6a6442cacd59
|
[
"MIT"
] | null | null | null |
tests/fixtures.py
|
scorphus/holmes-api
|
6b3c76d4299fecf2d8799d7b5c3c6a6442cacd59
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import datetime
import factory
import factory.alchemy
import hashlib
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from holmes.models import (
Domain, Page, Review, Violation, Fact, Key, KeysCategory, Request,
User, Limiter, DomainsViolationsPrefs, UsersViolationsPrefs
)
from uuid import uuid4
sqlalchemy_echo = logging.getLogger('nose').getEffectiveLevel() < logging.INFO
engine = create_engine(
"mysql+mysqldb://root@localhost:3306/test_holmes",
convert_unicode=True,
pool_size=1,
max_overflow=0,
echo=sqlalchemy_echo
)
maker = sessionmaker(bind=engine, autoflush=True)
db = scoped_session(maker)
class BaseFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = super(BaseFactory, cls)._create(target_class, *args, **kwargs)
if (hasattr(cls, '_meta')
and cls._meta is not None
and hasattr(cls._meta, 'sqlalchemy_session')
and cls._meta.sqlalchemy_session is not None):
cls._meta.sqlalchemy_session.flush()
return instance
class DomainFactory(BaseFactory):
class Meta:
model = Domain
name = factory.Sequence(lambda n: 'domain-{0}.com'.format(n))
url = factory.Sequence(lambda n: 'http://my-site-{0}.com/'.format(n))
is_active = True
class PageFactory(BaseFactory):
class Meta:
model = Page
url = factory.Sequence(lambda n: 'http://my-site.com/{0}/'.format(n))
url_hash = None
uuid = factory.LazyAttribute(lambda a: uuid4())
created_date = None
last_review_date = None
last_modified = None
expires = None
domain = factory.SubFactory(DomainFactory)
last_review = None
violations_count = 0
last_review_uuid = None
score = 0.0
@classmethod
def _adjust_kwargs(cls, **kwargs):
kwargs['url_hash'] = hashlib.sha512(kwargs['url']).hexdigest()
return kwargs
class ReviewFactory(BaseFactory):
class Meta:
model = Review
facts = factory.LazyAttribute(lambda a: [])
violations = factory.LazyAttribute(lambda a: [])
is_complete = False
is_active = False
created_date = None
completed_date = None
uuid = factory.LazyAttribute(lambda a: uuid4())
domain = factory.SubFactory(DomainFactory)
page = factory.SubFactory(PageFactory)
@classmethod
def _adjust_kwargs(cls, **kwargs):
if 'page' in kwargs:
kwargs['domain'] = kwargs['page'].domain
if 'page' in kwargs and 'uuid' in kwargs:
kwargs['page'].last_review_uuid = kwargs['uuid']
if 'number_of_violations' in kwargs:
number_of_violations = kwargs['number_of_violations']
del kwargs['number_of_violations']
if 'page' in kwargs:
kwargs['page'].violations_count = number_of_violations
violations = []
for i in range(number_of_violations):
key = Key.get_or_create(db, 'key.%d' % i, 'category.%d' % (i % 3))
violations.append(
Violation(
key=key,
value="value %d" % i,
points=i,
domain=kwargs['page'].domain,
review_is_active=kwargs['is_active']
)
)
kwargs['violations'] = violations
if 'number_of_facts' in kwargs:
number_of_facts = kwargs['number_of_facts']
del kwargs['number_of_facts']
facts = []
for i in range(number_of_facts):
key = Key.get_or_create(db, 'key.%d' % i, 'category.%d' % (i % 3))
facts.append(Fact(key=key, value="value %d" % i))
kwargs['facts'] = facts
return kwargs
class KeysCategoryFactory(BaseFactory):
class Meta:
model = KeysCategory
name = factory.Sequence(lambda n: 'category-{0}'.format(n))
class KeyFactory(BaseFactory):
class Meta:
model = Key
name = factory.Sequence(lambda n: 'key-{0}'.format(n))
category = factory.SubFactory(KeysCategoryFactory)
class FactFactory(BaseFactory):
class Meta:
model = Fact
key = factory.SubFactory(KeyFactory)
value = None
class ViolationFactory(BaseFactory):
class Meta:
model = Violation
key = factory.SubFactory(KeyFactory)
value = None
points = 0
domain = factory.SubFactory(DomainFactory)
review_is_active = True
class RequestFactory(BaseFactory):
class Meta:
model = Request
domain_name = 'g1.globo.com'
url = 'http://g1.globo.com'
effective_url = 'http://g1.globo.com/'
status_code = 301
response_time = 0.23
completed_date = datetime.date(2013, 02, 12)
review_url = 'http://globo.com/'
class UserFactory(BaseFactory):
class Meta:
model = User
fullname = 'Marcelo Jorge Vieira'
email = 'marcelo.vieira@corp.globo.com'
is_superuser = True
last_login = datetime.datetime(2013, 12, 11, 10, 9, 8)
locale = 'en_US'
class LimiterFactory(BaseFactory):
class Meta:
model = Limiter
url = factory.Sequence(lambda n: 'http://my-site-{0}.com/'.format(n))
url_hash = None
value = 10
@classmethod
def _adjust_kwargs(cls, **kwargs):
kwargs['url_hash'] = hashlib.sha512(kwargs['url']).hexdigest()
return kwargs
class DomainsViolationsPrefsFactory(BaseFactory):
class Meta:
model = DomainsViolationsPrefs
domain = factory.SubFactory(DomainFactory)
key = factory.SubFactory(KeyFactory)
value = 'whatever'
class UsersViolationsPrefsFactory(BaseFactory):
class Meta:
model = UsersViolationsPrefs
user = factory.SubFactory(UserFactory)
key = factory.SubFactory(KeyFactory)
is_active = True
| 25.892704
| 82
| 0.633516
|
794bea22dd32ece55fe3bf73d3febac21a3b4d1e
| 4,472
|
py
|
Python
|
parl/algorithms/paddle/sac.py
|
wangzelong0663/PARL
|
9954a18827fd720585b346ec83cdaf9683feb52c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-29T15:49:21.000Z
|
2021-09-29T15:49:21.000Z
|
parl/algorithms/paddle/sac.py
|
wangzelong0663/PARL
|
9954a18827fd720585b346ec83cdaf9683feb52c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
parl/algorithms/paddle/sac.py
|
wangzelong0663/PARL
|
9954a18827fd720585b346ec83cdaf9683feb52c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-03-04T07:20:51.000Z
|
2022-03-04T07:20:51.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import parl
import paddle
from paddle.distribution import Normal
import paddle.nn.functional as F
from copy import deepcopy
__all__ = ['SAC']
class SAC(parl.Algorithm):
def __init__(self,
model,
gamma=None,
tau=None,
alpha=None,
actor_lr=None,
critic_lr=None):
""" SAC algorithm
Args:
model(parl.Model): forward network of actor and critic.
gamma(float): discounted factor for reward computation
tau (float): decay coefficient when updating the weights of self.target_model with self.model
alpha (float): temperature parameter determines the relative importance of the entropy against the reward
actor_lr (float): learning rate of the actor model
critic_lr (float): learning rate of the critic model
"""
assert isinstance(gamma, float)
assert isinstance(tau, float)
assert isinstance(alpha, float)
assert isinstance(actor_lr, float)
assert isinstance(critic_lr, float)
self.gamma = gamma
self.tau = tau
self.alpha = alpha
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.model = model
self.target_model = deepcopy(self.model)
self.actor_optimizer = paddle.optimizer.Adam(
learning_rate=actor_lr, parameters=self.model.get_actor_params())
self.critic_optimizer = paddle.optimizer.Adam(
learning_rate=critic_lr, parameters=self.model.get_critic_params())
def predict(self, obs):
act_mean, _ = self.model.policy(obs)
action = paddle.tanh(act_mean)
return action
def sample(self, obs):
act_mean, act_log_std = self.model.policy(obs)
normal = Normal(act_mean, act_log_std.exp())
# for reparameterization trick (mean + std*N(0,1))
x_t = normal.sample([1])
action = paddle.tanh(x_t)
log_prob = normal.log_prob(x_t)
# Enforcing Action Bound
log_prob -= paddle.log((1 - action.pow(2)) + 1e-6)
log_prob = paddle.sum(log_prob, axis=-1, keepdim=True)
return action[0], log_prob[0]
def learn(self, obs, action, reward, next_obs, terminal):
critic_loss = self._critic_learn(obs, action, reward, next_obs,
terminal)
actor_loss = self._actor_learn(obs)
self.sync_target()
return critic_loss, actor_loss
def _critic_learn(self, obs, action, reward, next_obs, terminal):
with paddle.no_grad():
next_action, next_log_pro = self.sample(next_obs)
q1_next, q2_next = self.target_model.value(next_obs, next_action)
target_Q = paddle.minimum(q1_next,
q2_next) - self.alpha * next_log_pro
terminal = paddle.cast(terminal, dtype='float32')
target_Q = reward + self.gamma * (1. - terminal) * target_Q
cur_q1, cur_q2 = self.model.value(obs, action)
critic_loss = F.mse_loss(cur_q1, target_Q) + F.mse_loss(
cur_q2, target_Q)
self.critic_optimizer.clear_grad()
critic_loss.backward()
self.critic_optimizer.step()
return critic_loss
def _actor_learn(self, obs):
act, log_pi = self.sample(obs)
q1_pi, q2_pi = self.model.value(obs, act)
min_q_pi = paddle.minimum(q1_pi, q2_pi)
actor_loss = ((self.alpha * log_pi) - min_q_pi).mean()
self.actor_optimizer.clear_grad()
actor_loss.backward()
self.actor_optimizer.step()
return actor_loss
def sync_target(self, decay=None):
if decay is None:
decay = 1.0 - self.tau
self.model.sync_weights_to(self.target_model, decay=decay)
| 37.266667
| 117
| 0.635063
|
794bea4c28fcbae0bddb5a57263b501254274d6f
| 192
|
py
|
Python
|
job_board/users/middleware.py
|
lokesh1729/job_board
|
e9d266e7bbda041cdf30b3e7a926ac95c80def9f
|
[
"MIT"
] | null | null | null |
job_board/users/middleware.py
|
lokesh1729/job_board
|
e9d266e7bbda041cdf30b3e7a926ac95c80def9f
|
[
"MIT"
] | null | null | null |
job_board/users/middleware.py
|
lokesh1729/job_board
|
e9d266e7bbda041cdf30b3e7a926ac95c80def9f
|
[
"MIT"
] | null | null | null |
from django.utils.deprecation import MiddlewareMixin
from . import utils
class RoleMiddleware(MiddlewareMixin):
def process_request(self, request):
utils.set_user_role(request)
| 21.333333
| 52
| 0.78125
|
794beafc562750cf21ecced2a6c21c5d3b73450c
| 4,409
|
py
|
Python
|
mcconf/UpdateConf.py
|
OmniTroid/mcconf
|
b248f0fce911b02e4cc8213c1b0e0ddc0105b6f8
|
[
"MIT"
] | 1
|
2022-03-03T12:17:40.000Z
|
2022-03-03T12:17:40.000Z
|
mcconf/UpdateConf.py
|
OmniTroid/mcconf
|
b248f0fce911b02e4cc8213c1b0e0ddc0105b6f8
|
[
"MIT"
] | null | null | null |
mcconf/UpdateConf.py
|
OmniTroid/mcconf
|
b248f0fce911b02e4cc8213c1b0e0ddc0105b6f8
|
[
"MIT"
] | null | null | null |
import io
import yaml
import json
import configparser
import pprint
from pathlib import Path
from typing import Callable
import dictcombiner.dictcombiner as dc
import fs2conf as fc
class UpdateConf:
def __init__(self, args):
self.args = args
self.serverconf = Path(args['serverconf'])
self.rolesdir = Path(args['rolesdir'])
self.serverdir = Path(args['serverdir'])
self.conf = {}
self.metaconf = {}
if not self.serverconf.is_file():
print('ERROR: ' + confdir + ' does not exist')
raise FileNotFoundError
if not self.rolesdir.is_dir():
print('ERROR: ' + rolesdir + ' does not exist')
raise NotADirectoryError
if not self.serverdir.exists():
print('ERROR: ' + serverdir + ' does not exist')
raise NotADirectoryError
roledirs = [
Path(self.rolesdir, role) for role in
json.loads(open(self.serverconf).read())['roles']
]
self.combined_conf = fc.combine_dirs(roledirs)
pprint.pprint(self.combined_conf)
self.metaconf = self.combined_conf['metaconf.json']
self.conf = self.combined_conf['conf']
def update_all(self):
self.update_server_properties()
self.update_bukkit_yml()
self.update_spigot_yml()
self.update_paper_yml()
#self.update_plugin_confs()
def update_server_properties(self):
baseconf_path = Path(self.serverdir, 'server.properties')
subconf_dir = Path(self.confdir, 'server.properties')
self.update_conf(
baseconf_path, subconf_dir,
self.read_properties_file, self.write_properties_file)
def update_bukkit_yml(self):
self.update_core_yml('bukkit')
def update_spigot_yml(self):
self.update_core_yml('spigot')
def update_paper_yml(self):
self.update_core_yml('paper')
def update_plugin_confs(self):
plugins_conf_dir = Path(self.confdir, 'plugins')
if not plugins_conf_dir.exists():
print('INFO: ' + str(plugins_conf_dir) + ' not found, skipping')
return
for dir_ in plugins_conf_dir.iterdir():
conf_dir = Path(dir_)
# This is where we will add exceptions for plugins that dont have config.yml
conf_name = 'config.yml'
baseconf_path = Path(self.serverdir, 'plugins', conf_dir.name, 'config.yml')
# Wrapper for updating core yml files (bukkit.yml, spigot.yml etc.)
def update_core_yml(self, name : str):
core_yml_path = Path(self.serverdir, name + '.yml')
sub_yml_dir = Path(self.confdir, name + '.yml')
self.update_conf(
core_yml_path, sub_yml_dir,
self.read_yml, self.write_yml)
def update_conf(self,
baseconf_path : Path, subconf_dir : Path,
read_func : Callable, write_func : Callable):
if not baseconf_path.exists():
print('ERROR: ' + str(baseconf_path) + ' not found.')
return
# This means the file has no config, which is perfectly valid
if not subconf_dir.exists():
print('INFO: ' + str(subconf_dir) + ' not found, skipping.')
return
original_path = Path(baseconf_path.parent, 'original_' + baseconf_path.name)
# Use the original file if we have it
# This means the script has been run before
# This ensures idempotency
if original_path.exists():
baseconf = read_func(original_path)
else:
baseconf = read_func(baseconf_path)
subconf_files = sorted(
[Path(filename) for filename in subconf_dir.iterdir()],
key=lambda f: f.name)
confs = [baseconf]
for subconf_file in subconf_files:
confs.append(read_func(subconf_file))
result_conf = dc.combine_dicts(confs)
if not original_path.exists():
baseconf_path.rename(original_path)
write_func(baseconf_path, result_conf)
### Read and write helpers
def read_properties_file(self, path : Path) -> dict:
## HACK: so server.properties actually lacks the section header.
## configparse doesn't like this so we have to hack around that.
conf_str = '[default]\n'
with open(path) as file:
data = file.read()
conf_str = conf_str + data
tmp_conf = configparser.ConfigParser()
tmp_conf.read_string(conf_str)
conf = dict(tmp_conf.items('default'))
return conf
def write_properties_file(self, path : Path, data : dict):
with open(path, 'w') as file:
tmp_dict = {'default': data}
tmp_conf = configparser.ConfigParser()
tmp_conf.read_dict(tmp_dict)
tmp_conf.write(file)
def read_yml(self, path : Path) -> dict:
with open(path) as file:
data = yaml.safe_load(file)
return data
def write_yml(self, path : Path, data : dict):
with open(path, 'w') as file:
file.write(yaml.dump(data))
| 27.216049
| 79
| 0.718303
|
794beb73c2dbfc21e4a3f91c3042b4b65b64f061
| 6,502
|
py
|
Python
|
ferminet/tests/train_test.py
|
AllanChain/ferminet
|
75dc8475e2aa479484d601cc6819bc7e9f577058
|
[
"Apache-2.0"
] | null | null | null |
ferminet/tests/train_test.py
|
AllanChain/ferminet
|
75dc8475e2aa479484d601cc6819bc7e9f577058
|
[
"Apache-2.0"
] | null | null | null |
ferminet/tests/train_test.py
|
AllanChain/ferminet
|
75dc8475e2aa479484d601cc6819bc7e9f577058
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ferminet.train."""
import itertools
import os
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import chex
from ferminet import base_config
from ferminet import train
from ferminet.configs import atom
from ferminet.configs import diatomic
from ferminet.utils import units
from jax import numpy as jnp
from jax import test_util as jtu
import pyscf
FLAGS = flags.FLAGS
# Default flags are sufficient so mark FLAGS as parsed so we can run the tests
# with py.test, which imports this file rather than runs it.
FLAGS.mark_as_parsed()
def setUpModule():
# Allow chex_n_cpu_devices to be set via an environment variable as well as
# --chex_n_cpu_devices to play nicely with pytest.
fake_devices = os.environ.get('FERMINET_CHEX_N_CPU_DEVICES')
if fake_devices is not None:
fake_devices = int(fake_devices)
try:
chex.set_n_cpu_devices(n=fake_devices)
except RuntimeError:
# jax has already been initialised (e.g. because this is being run with
# other tests via a test runner such as pytest)
logging.info('JAX already initialised so cannot set number of CPU devices. '
'Using a single device in train_test.')
def _config_params():
for params in itertools.product(('Li', 'LiH'), ('kfac', 'adam')):
yield params
for optimizer in ('kfac', 'adam'):
yield ('H', optimizer)
yield ('Li', 'lamb')
class QmcTest(jtu.JaxTestCase):
def setUp(self):
super(QmcTest, self).setUp()
# disable use of temp directory in pyscf.
# Test calculations are small enough to fit in RAM and we don't need
# checkpoint files.
pyscf.lib.param.TMPDIR = None
@parameterized.parameters(_config_params())
def test_training_step(self, system, optimizer):
if system in ('H', 'Li'):
cfg = atom.get_config()
cfg.system.atom = system
else:
cfg = diatomic.get_config()
cfg.system.molecule_name = system
cfg.network.detnet.hidden_dims = ((16, 4),) * 2
cfg.network.detnet.determinants = 2
cfg.batch_size = 32
cfg.pretrain.iterations = 10
cfg.mcmc.burn_in = 10
cfg.optim.optimizer = optimizer
cfg.optim.iterations = 3
cfg.debug.check_nan = True
cfg.log.save_path = self.create_tempdir().full_path
cfg = base_config.resolve(cfg)
# Calculation is too small to test the results for accuracy. Test just to
# ensure they actually run without a top-level error.
train.train(cfg)
MOL_STRINGS = [
'H 0 0 -1; H 0 0 1', 'O 0 0 0; H 0 1 0; H 0 0 1', 'H 0 0 0; Cl 0 0 1.1',
]
class QmcPyscfMolTest(jtu.JaxTestCase):
def setUp(self):
super(QmcPyscfMolTest, self).setUp()
# disable use of temp directory in pyscf.
# Test calculations are small enough to fit in RAM and we don't need
# checkpoint files.
pyscf.lib.param.TMPDIR = None
@parameterized.parameters(
(mol_string, optimizer)
for mol_string, optimizer in zip(MOL_STRINGS[:2], ('adam', 'kfac')))
def test_training_step_pyscf(self, mol_string, optimizer):
mol = pyscf.gto.Mole()
mol.build(
atom=mol_string,
basis='sto-3g', unit='bohr')
cfg = base_config.default()
cfg.system.pyscf_mol = mol
cfg.network.detnet.hidden_dims = ((16, 4),) * 2
cfg.network.detnet.determinants = 2
cfg.batch_size = 32
cfg.pretrain.iterations = 10
cfg.mcmc.burn_in = 10
cfg.optim.optimizer = optimizer
cfg.optim.iterations = 3
cfg.debug.check_nan = True
cfg.log.save_path = self.create_tempdir().full_path
cfg = base_config.resolve(cfg)
# Calculation is too small to test the results for accuracy. Test just to
# ensure they actually run without a top-level error.
train.train(cfg)
@parameterized.parameters(MOL_STRINGS)
def test_conversion_pyscf(self, mol_string):
mol = pyscf.gto.Mole()
mol.build(
atom=mol_string,
basis='sto-3g', unit='bohr')
cfg = base_config.default()
cfg.system.pyscf_mol = mol
cfg = train.pyscf_to_molecule(cfg)
# Assert that the alpha and beta electrons are the same
self.assertEqual(mol.nelec, cfg.system.electrons)
# Assert that the basis are the same
self.assertEqual(mol.basis, cfg.pretrain.basis)
# Assert that atom symbols are the same
self.assertEqual([mol.atom_symbol(i) for i in range(mol.natm)],
[atom.symbol for atom in cfg.system.molecule])
# Assert that atom coordinates are the same
pyscf_coords = [mol.atom_coords()[i] for i in range(mol.natm)]
internal_coords = [jnp.array(atom.coords) for atom in cfg.system.molecule]
self.assertAllClose(pyscf_coords, internal_coords)
def test_conversion_pyscf_ang(self):
mol = pyscf.gto.Mole()
mol.build(
atom='H 0 0 -1; H 0 0 1',
basis='sto-3g', unit='ang')
cfg = base_config.default()
cfg.system.pyscf_mol = mol
cfg = train.pyscf_to_molecule(cfg)
# Assert that the coordinates are now in bohr internally
bohr_coords = [[0, 0, -units.BOHR_ANGSTROM], [0, 0, units.BOHR_ANGSTROM]]
self.assertAllClose([atom.coords for atom in cfg.system.molecule],
bohr_coords,
check_dtypes=False)
# Assert that the alpha and beta electrons are the same
self.assertEqual(mol.nelec, cfg.system.electrons)
# Assert that the basis are the same
self.assertEqual(mol.basis, cfg.pretrain.basis)
# Assert that atom symbols are the same
self.assertEqual([mol.atom_symbol(i) for i in range(mol.natm)],
[atom.symbol for atom in cfg.system.molecule])
# Assert that atom coordinates are the same
pyscf_coords = [mol.atom_coords()[i] for i in range(mol.natm)]
internal_coords = [jnp.array(atom.coords) for atom in cfg.system.molecule]
self.assertAllClose(pyscf_coords, internal_coords)
if __name__ == '__main__':
absltest.main()
| 35.922652
| 80
| 0.699016
|
794bebd3503c8ca083d3b90833046da36ccf01e3
| 8,917
|
py
|
Python
|
training/tf_training/v2_write_training.py
|
godmoves/PhoenixGo
|
8c813d10315660626f18e3985bbcb87ea5c684a5
|
[
"Apache-2.0"
] | 11
|
2018-07-18T07:47:35.000Z
|
2021-04-20T23:11:36.000Z
|
training/tf_training/v2_write_training.py
|
godmoves/PhoenixGo
|
8c813d10315660626f18e3985bbcb87ea5c684a5
|
[
"Apache-2.0"
] | 2
|
2018-08-18T06:40:42.000Z
|
2019-02-03T14:18:50.000Z
|
training/tf_training/v2_write_training.py
|
godmoves/PhoenixGo
|
8c813d10315660626f18e3985bbcb87ea5c684a5
|
[
"Apache-2.0"
] | 2
|
2018-10-20T12:05:20.000Z
|
2018-12-13T02:56:45.000Z
|
#!/usr/bin/env python3
#
# Used to dump training games in V2 format from MongoDB or V1 chunk files.
#
# Usage: v2_write_training [chunk_prefix]
# If run without a chunk_prefix it reads from MongoDB.
# With a chunk prefix, it uses all chunk files with that prefix
# as input.
#
# Sets up a dataflow pipeline that:
# 1. Reads from input (MongoDB or v1 chunk files)
# 2. Split into a test set and a training set.
# 3. Converts from v1 format to v2 format.
# 4. Shuffle V2 records.
# 5. Write out to compressed v2 chunk files.
#
from chunkparser import ChunkParser
import glob
import gzip
import itertools
import multiprocessing as mp
import numpy as np
import pymongo
import sys
def mongo_fetch_games(q_out, num_games):
"""
Read V1 format games from MongoDB and put them
in the output queue (q_out)
Reads a network list from MongoDB from most recents,
and then reads games produced by those network until
'num_games' has been read.
"""
client = pymongo.MongoClient()
db = client.test
# MongoDB closes idle cursors after 10 minutes unless specific
# options are given. That means this query will time out before
# we finish. Rather than keeping it alive, increase the default
# batch size so we're sure to get all networks in the first fetch.
networks = db.networks.find(None, {"_id": False, "hash": True}).\
sort("_id", pymongo.DESCENDING).batch_size(5000)
game_count = 0
for net in networks:
print("Searching for {}".format(net['hash']))
games = db.games.\
find({"networkhash": net['hash']},
{"_id": False, "data": True})
for game in games:
game_data = game['data']
q_out.put(game_data.encode("ascii"))
game_count += 1
if game_count >= num_games:
q_out.put('STOP')
return
if game_count % 1000 == 0:
print("{} games".format(game_count))
def disk_fetch_games(q_out, prefix):
"""
Fetch chunk files off disk.
Chunk files can be either v1 or v2 format.
"""
files = glob.glob(prefix + "*.gz")
for f in files:
with gzip.open(f, 'rb') as chunk_file:
v = chunk_file.read()
q_out.put(v)
print("In {}".format(f))
q_out.put('STOP')
def fake_fetch_games(q_out, num_games):
"""
Generate V1 format fake games. Used for testing and benchmarking
"""
for _ in range(num_games):
# Generate a 200 move 'game'
# Generate a random game move.
# 1. 18 binary planes of length 361
planes = [np.random.randint(2, size=361).tolist()
for plane in range(16)]
stm = float(np.random.randint(2))
planes.append([stm] * 361)
planes.append([1. - stm] * 361)
# 2. 362 probs
probs = np.random.randint(3, size=362).tolist()
# 3. And a winner: 1 or -1
winner = [2 * float(np.random.randint(2)) - 1]
# Convert that to a v1 text record.
items = []
for p in range(16):
# generate first 360 bits
h = np.packbits([int(x) for x in planes[p][0:360]]).tobytes().hex()
# then add the stray single bit
h += str(planes[p][360]) + "\n"
items.append(h)
# then side to move
items.append(str(int(planes[17][0])) + "\n")
# then probabilities
items.append(' '.join([str(x) for x in probs]) + "\n")
# and finally if the side to move is a winner
items.append(str(int(winner[0])) + "\n")
game = ''.join(items)
game = game * 200
game = game.encode('ascii')
q_out.put(game)
q_out.put('STOP')
def queue_gen(q, out_qs):
"""
Turn a queue into a generator
Yields items pulled from 'q' until a 'STOP' item is seen.
The STOP item will be propogated to all the queues in
the list 'out_qs' (if any).
"""
while True:
try:
item = q.get()
except Exception:
break
if item == 'STOP':
break
yield item
# There might be multiple workers reading from this queue,
# and they all need to be stopped, so put the STOP token
# back in the queue.
q.put('STOP')
# Stop any listed output queues as well
for x in out_qs:
x.put('STOP')
def split_train_test(q_in, q_train, q_test):
"""
Stream a stream of chunks into separate train and test
pools. 10% of the chunks are assigned to test.
Uses hash sharding, so multiple runs will split chunks
in the same way.
"""
for item in queue_gen(q_in, [q_train, q_test]):
# Use the hash of the game to determine the split. This means
# that test games will never be used for training.
h = hash(item) & 0xfff
if h < 0.1 * 0xfff:
# a test game.
q_test.put(item)
else:
q_train.put(item)
class QueueChunkSrc:
def __init__(self, q):
self.q = q
self.gen = None
def next(self):
print("Queue next")
if self.gen is None:
self.gen = queue_gen(self.q, [])
try:
return next(self.gen)
except Exception:
return None
def chunk_parser(q_in, q_out, shuffle_size, chunk_size):
"""
Parse input chunks from 'q_in', shuffle, and put
chunks of moves in v2 format into 'q_out'
Each output chunk contains 'chunk_size' moves.
Moves are shuffled in a buffer of 'shuffle_size' moves.
(A 2^20 items shuffle buffer is ~ 2.2GB of RAM).
"""
workers = max(1, mp.cpu_count() - 2)
parse = ChunkParser(QueueChunkSrc(q_in),
shuffle_size=shuffle_size,
workers=workers)
gen = parse.v2_gen()
while True:
s = list(itertools.islice(gen, chunk_size))
if not len(s):
break
s = b''.join(s)
q_out.put(s)
q_out.put('STOP')
def chunk_writer(q_in, namesrc):
"""
Write a batch of moves out to disk as a compressed file.
Filenames are taken from the generator 'namegen'.
"""
for chunk in queue_gen(q_in, []):
filename = namesrc.next()
chunk_file = gzip.open(filename, 'w', 1)
chunk_file.write(chunk)
chunk_file.close()
print("chunk_writer completed")
class NameSrc:
"""
Generator a sequence of names, starting with 'prefix'.
"""
def __init__(self, prefix):
self.prefix = prefix
self.n = 0
def next(self):
print("Name next")
self.n += 1
return self.prefix + "{:0>8d}.gz".format(self.n)
def main(args):
# Build the pipeline.
procs = []
# Read from input.
q_games = mp.SimpleQueue()
if args:
prefix = args.pop(0)
print("Reading from chunkfiles {}".format(prefix))
procs.append(mp.Process(
target=disk_fetch_games, args=(q_games, prefix)))
else:
print("Reading from MongoDB")
# procs.append(mp.Process(target=fake_fetch_games, args=(q_games, 20)))
procs.append(mp.Process(
target=mongo_fetch_games, args=(q_games, 275000)))
# Split into train/test
q_test = mp.SimpleQueue()
q_train = mp.SimpleQueue()
procs.append(mp.Process(target=split_train_test,
args=(q_games, q_train, q_test)))
# Convert v1 to v2 format and shuffle, writing 8192 moves per chunk.
q_write_train = mp.SimpleQueue()
q_write_test = mp.SimpleQueue()
# Shuffle buffer is ~ 2.2GB of RAM with 2^20 (~1e6) entries. A game is ~500 moves, so
# there's ~2000 games in the shuffle buffer. Selecting 8k moves gives an expected
# number of ~4 moves from the same game in a given chunk file.
#
# The output files are in parse.py via another 1e6 sized shuffle buffer. At 8192 moves
# per chunk, there's ~ 128 chunks in the shuffle buffer. With a batch size of 4096,
# the expected max number of moves from the same game in the batch is < 1.14
procs.append(mp.Process(target=chunk_parser, args=(
q_train, q_write_train, 1 << 20, 8192)))
procs.append(mp.Process(target=chunk_parser, args=(
q_test, q_write_test, 1 << 16, 8192)))
# Write to output files
procs.append(mp.Process(target=chunk_writer,
args=(q_write_train, NameSrc('train_'))))
procs.append(mp.Process(target=chunk_writer,
args=(q_write_test, NameSrc('test_'))))
# Start all the child processes running.
for p in procs:
p.start()
# Wait for everything to finish.
for p in procs:
p.join()
# All done!
if __name__ == "__main__":
mp.set_start_method('spawn')
main(sys.argv[1:])
| 31.397887
| 90
| 0.592688
|
794bec2aa1fccaf679bbe8ed61194a234159bee0
| 885
|
py
|
Python
|
test_threads.py
|
jtessier-cit/soft8023-darts
|
51fb480b4c21a0cf49ffe1ef6e8e30855aee744e
|
[
"MIT"
] | null | null | null |
test_threads.py
|
jtessier-cit/soft8023-darts
|
51fb480b4c21a0cf49ffe1ef6e8e30855aee744e
|
[
"MIT"
] | null | null | null |
test_threads.py
|
jtessier-cit/soft8023-darts
|
51fb480b4c21a0cf49ffe1ef6e8e30855aee744e
|
[
"MIT"
] | null | null | null |
from dao import darts_match_dao_thread_safe_singleton, darts_match_dao
from domain import darts_match
import threading
class AddThread(threading.Thread):
def __init__(self, thread_id, name, match):
threading.Thread.__init__(self)
self.threadID = thread_id
self.name = name
self.match = match
self.dao = darts_match_dao_thread_safe_singleton.DartsMatchDao.get_instance()
# self.dao = darts_match_dao.DartsMatchDao()
def run(self):
print("Starting " + self.name)
self.dao.add(self.match)
print("Exiting " + self.name)
dart_match1 = darts_match.DartsMatch('501', 'Dupe', 'Dup2')
thread1 = AddThread(1, "Thread-1", dart_match1)
dart_match2 = darts_match.DartsMatch('501', 'Dupe', 'Dup2')
thread2 = AddThread(2, "Thread-2", dart_match2)
thread1.start()
thread2.start()
print("Exiting main thread.")
| 27.65625
| 85
| 0.699435
|
794bec8e6a98e039ea7252cb2b235f866bdc0874
| 3,566
|
py
|
Python
|
ultracart/models/item_enrollment123.py
|
UltraCart/rest_api_v2_sdk_python
|
d734ea13fabc7a57872ff68bac06861edb8fd882
|
[
"Apache-2.0"
] | 1
|
2018-03-15T16:56:23.000Z
|
2018-03-15T16:56:23.000Z
|
ultracart/models/item_enrollment123.py
|
UltraCart/rest_api_v2_sdk_python
|
d734ea13fabc7a57872ff68bac06861edb8fd882
|
[
"Apache-2.0"
] | null | null | null |
ultracart/models/item_enrollment123.py
|
UltraCart/rest_api_v2_sdk_python
|
d734ea13fabc7a57872ff68bac06861edb8fd882
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ItemEnrollment123(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'enrollment123_product_code': 'str'
}
attribute_map = {
'enrollment123_product_code': 'enrollment123_product_code'
}
def __init__(self, enrollment123_product_code=None): # noqa: E501
"""ItemEnrollment123 - a model defined in Swagger""" # noqa: E501
self._enrollment123_product_code = None
self.discriminator = None
if enrollment123_product_code is not None:
self.enrollment123_product_code = enrollment123_product_code
@property
def enrollment123_product_code(self):
"""Gets the enrollment123_product_code of this ItemEnrollment123. # noqa: E501
Enrolment 123 product code # noqa: E501
:return: The enrollment123_product_code of this ItemEnrollment123. # noqa: E501
:rtype: str
"""
return self._enrollment123_product_code
@enrollment123_product_code.setter
def enrollment123_product_code(self, enrollment123_product_code):
"""Sets the enrollment123_product_code of this ItemEnrollment123.
Enrolment 123 product code # noqa: E501
:param enrollment123_product_code: The enrollment123_product_code of this ItemEnrollment123. # noqa: E501
:type: str
"""
self._enrollment123_product_code = enrollment123_product_code
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ItemEnrollment123, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ItemEnrollment123):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.220339
| 114
| 0.606282
|
794bed8686d76852ee47f464690d9a920b98e8a1
| 7,680
|
py
|
Python
|
SVHN/SVHN_recognizer_single_digit.py
|
leoagneau/Bib_Racer
|
83c90bb3177ca13a78bee3ff0e800fbf0dd8484e
|
[
"MIT"
] | 2
|
2020-04-26T23:13:51.000Z
|
2020-04-27T14:40:13.000Z
|
SVHN/SVHN_recognizer_single_digit.py
|
leoagneau/Bib_Racer
|
83c90bb3177ca13a78bee3ff0e800fbf0dd8484e
|
[
"MIT"
] | null | null | null |
SVHN/SVHN_recognizer_single_digit.py
|
leoagneau/Bib_Racer
|
83c90bb3177ca13a78bee3ff0e800fbf0dd8484e
|
[
"MIT"
] | null | null | null |
############################
### Prepare SVHN dataset ###
############################
import os
import numpy as np
import h5py
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
import torchvision
from torchvision import datasets, models, transforms as T
import time
import copy
BS=16
path = os.getcwd()
### To read digitStruct.mat and image files separately
# img_path = os.path.join(path, 'SVHN', 'images')
# digi_file = os.path.join(img_path, 'digitStruct.mat')
# f = h5py.File(digi_file, 'r')
# names = f['digitStruct/name']
# bboxs = f['digitStruct/bbox']
### Get filename from index
# https://stackoverflow.com/questions/41176258/h5py-access-data-in-datasets-in-svhn
# https://stackoverflow.com/a/56388672/3243870
def get_img_name(f, idx=0):
img_name = ''.join(map(chr, f[names[idx][0]][()].flatten()))
return(img_name)
### Get bounding box from index
# elements in bbox struct: height, width, top, left, label
bbox_prop = ['height', 'left', 'top', 'width', 'label']
def get_img_boxes(f, idx=0):
"""
get the 'height', 'left', 'top', 'width', 'label' of bounding boxes of an image
:param f: h5py.File
:param idx: index of the image
:return: dictionary
"""
meta = { key : [] for key in bbox_prop}
box = f[bboxs[idx][0]]
for key in box.keys():
if box[key].shape[0] == 1:
meta[key].append(int(box[key][0][0]))
else:
for i in range(box[key].shape[0]):
meta[key].append(int(f[box[key][i][0]][()].item()))
return meta
import matplotlib.pyplot as plt
def imshow(inp, title=None):
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001)
plt.show()
def save_check_pt(epoch, best_model_wts, optimizer, best_acc, PATH):
# https://pytorch.org/tutorials/beginner/saving_loading_models.html#saving-loading-a-general-checkpoint-for-inference-and-or-resuming-training
torch.save({
'epoch': epoch,
'model_state_dict': best_model_wts,
'optimizer_state_dict': optimizer.state_dict(),
'best_acc': best_acc
}, PATH)
def train_model(dataloaders, dataset_sizes, device, model, criterion, optimizer, scheduler, num_epochs=25, test=False):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'test']:
if (phase == 'train'):
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
# Iterate over data
iter = 0
itertotal = dataset_sizes[phase] //BS + 1
epoch_start_T = time.time()
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# Zero the parameter gradients
optimizer.zero_grad()
# Forward pass
# Track history only in training phase
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# Backward + optimize only if in training phase
if (phase == 'train'):
loss.backward()
optimizer.step()
# Statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
iter += 1
if ((iter - 1) % 100 == 0):
epoch_elapsed = time.time() - epoch_start_T
print('{}/{}, time elapsed: {:0f}m {:0f}s'.format(iter, itertotal, epoch_elapsed // 60,
epoch_elapsed % 60))
if (test and iter == 3):
print(iter)
break
if (phase == 'train'):
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# Deep copy the model
if (phase == 'test' and epoch_acc > best_acc):
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if (((epoch + 1) % 10) == 0):
save_check_pt(epoch, best_model_wts, optimizer, best_acc, os.path.join(path, 'densenet_weights_{}.pth'.format(epoch+1)))
print()
time_elapsed = time.time() - since
print('Training completes in {:0f}m {:0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# Load best model weights
model.load_state_dict(best_model_wts)
return(model)
def main():
# To deal with num_workers>1 in DataLoader, which is needed only in Windows environment
# https://github.com/pytorch/pytorch/issues/11139
# https://stackoverflow.com/questions/33038209/python-multiprocessing-fails-for-example-code
import multiprocessing
multiprocessing.freeze_support()
data_transforms = {
'train': T.Compose([
T.RandomResizedCrop(224),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': T.Compose([
T.RandomResizedCrop(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
}
SVHN_path = os.path.join(path, 'mat')
SVHN = {x:datasets.SVHN(SVHN_path, split=x, transform=data_transforms[x]) for x in ['train', 'test']}
dataloaders = {x: DataLoader(SVHN[x], batch_size=BS, shuffle=True, num_workers=4) for x in ['train', 'test']}
dataset_sizes = {x: len(SVHN[x]) for x in ['train', 'test']}
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
print('Device:', device)
# # Get a batch of training data
# inputs, classes = next(iter(dataloaders['train']))
# # Make a grid from batch
# out = torchvision.utils.make_grid(inputs)
# imshow(out, title=[x for x in classes])
########################
### Prepare DenseNet ###
########################
densenet = models.densenet161(pretrained=True)
num_ftrs = densenet.classifier.in_features
densenet.classifier = nn.Linear(num_ftrs, 10)
densenet = densenet.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(densenet.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
densenet = train_model(dataloaders, dataset_sizes, device, densenet, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=1, test=True)
torch.save(densenet.state_dict(), os.path.join(path, 'densenet_weights_final.pth'))
if __name__ == "__main__":
main()
| 34.909091
| 142
| 0.584635
|
794bee2bad59e798cd3ccc978182aee0bedfcd3c
| 1,955
|
py
|
Python
|
tests/test_card.py
|
shyam81295/UnoGame
|
1938408b4fce791048d283b898f2a46b763a5449
|
[
"Apache-2.0"
] | null | null | null |
tests/test_card.py
|
shyam81295/UnoGame
|
1938408b4fce791048d283b898f2a46b763a5449
|
[
"Apache-2.0"
] | 13
|
2020-06-26T21:03:52.000Z
|
2020-07-23T22:44:30.000Z
|
tests/test_card.py
|
shyam81295/UnoGame
|
1938408b4fce791048d283b898f2a46b763a5449
|
[
"Apache-2.0"
] | null | null | null |
# Filename: tests/test_card.py
# Description: Tests class 'Card'
#
# Copyright (C) 2020 Shyam Singh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import unittest
from Card import Card
from Exceptions import InvalidInstantiationError
import constant
if sys.platform == "win32":
pwd = os.getenv("PYTHONPATH")
else:
pwd = os.getenv("PWD")
sys.path.append(pwd)
class TestCard(unittest.TestCase):
def setUp(self):
pass
def test_card_initialise(self):
card1 = Card(
color=constant.COLOR_WILD, cardtype=constant.WILD_PLUS4_CARD
)
self.assertEqual(card1.cardtype, constant.WILD_PLUS4_CARD)
self.assertEqual(card1.color, constant.COLOR_WILD)
card2 = Card(color=constant.COLOR4, cardtype=constant.SKIP_CARD)
self.assertEqual(card2.cardtype, constant.SKIP_CARD)
self.assertEqual(card2.color, constant.COLOR4)
def test_card_initialise_failed(self):
self.assertRaises(InvalidInstantiationError, Card)
self.assertRaises(
InvalidInstantiationError,
Card,
cardtype=constant.WILD_PLUS4_CARD,
color=constant.COLOR4,
)
self.assertRaises(
InvalidInstantiationError,
Card,
cardtype=constant.SKIP_CARD,
color=constant.COLOR_WILD,
)
if __name__ == "__main__":
unittest.main()
| 28.75
| 76
| 0.681841
|
794bee6e16cd788033b20f63a79ea4d26f335db9
| 2,929
|
py
|
Python
|
src/engine/SCons/Tool/gnulink.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | 1
|
2019-09-18T06:37:02.000Z
|
2019-09-18T06:37:02.000Z
|
src/engine/SCons/Tool/gnulink.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
src/engine/SCons/Tool/gnulink.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
"""SCons.Tool.gnulink
Tool-specific initialization for the gnu linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gnulink.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import SCons.Util
import SCons.Tool
import os
import sys
import re
import link
def generate(env):
"""Add Builders and construction variables for gnulink to an Environment."""
link.generate(env)
if env['PLATFORM'] == 'hpux':
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared -fPIC')
# __RPATH is set to $_RPATH in the platform specification if that
# platform supports it.
env['RPATHPREFIX'] = '-Wl,-rpath='
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
# OpenBSD doesn't usually use SONAME for libraries
use_soname = not sys.platform.startswith('openbsd')
link._setup_versioned_lib_variables(env, tool = 'gnulink', use_soname = use_soname)
env['LINKCALLBACKS'] = link._versioned_lib_callbacks()
# For backward-compatibility with older SCons versions
env['SHLIBVERSIONFLAGS'] = SCons.Util.CLVar('-Wl,-Bsymbolic')
def exists(env):
# TODO: sync with link.smart_link() to choose a linker
linkers = { 'CXX': ['g++'], 'CC': ['gcc'] }
alltools = []
for langvar, linktools in linkers.items():
if langvar in env: # use CC over CXX when user specified CC but not CXX
return SCons.Tool.FindTool(linktools, env)
alltools.extend(linktools)
return SCons.Tool.FindTool(alltools, env) # find CXX or CC
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 36.160494
| 106
| 0.726869
|
794bee899ab9e4ca0b4c4e862b8b74480a72e404
| 155
|
py
|
Python
|
src/app/schemas/token.py
|
FelisCatusKR/DBMS-term-project
|
bdcf3671b7189df934552df6cfd45464e3b858f3
|
[
"MIT"
] | null | null | null |
src/app/schemas/token.py
|
FelisCatusKR/DBMS-term-project
|
bdcf3671b7189df934552df6cfd45464e3b858f3
|
[
"MIT"
] | null | null | null |
src/app/schemas/token.py
|
FelisCatusKR/DBMS-term-project
|
bdcf3671b7189df934552df6cfd45464e3b858f3
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
class Token(BaseModel):
access_token: str
token_type: str
class TokenPayload(BaseModel):
user_id: int = None
| 15.5
| 30
| 0.735484
|
794beeec3742ad891974929129f524a5afd5b2ae
| 3,708
|
py
|
Python
|
convs-v4.py
|
latot/SubsWorker
|
1733298ab584a85171e5365480b1dcb3656791fb
|
[
"MIT"
] | null | null | null |
convs-v4.py
|
latot/SubsWorker
|
1733298ab584a85171e5365480b1dcb3656791fb
|
[
"MIT"
] | null | null | null |
convs-v4.py
|
latot/SubsWorker
|
1733298ab584a85171e5365480b1dcb3656791fb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import magic
import enzyme
from subprocess import Popen, PIPE
import tempfile
import os.path
import pysubs2
def check_file(file_):
if not os.path.isfile(file_):
raise NameError("the file don't exist")
def read_mkv(file_):
with open(file_, 'rb') as f:
return enzyme.MKV(f)
def check_sub(data):
if len(data.subtitle_tracks) == 0:
raise NameError("this file don't have any subtitle")
def execute(command):
p = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
return p.returncode, stdout, stderr
def read_file(file_):
t = tempfile.NamedTemporaryFile()
c, o, e = execute('LANG="en_US.utf8" mkvmerge -J "{}" "{}"'.format(t.name, file_)
#read json
return json
def get_mime(file_):
tmp = magic.from_file(file_, mime=True).decode('utf-8')
if tmp == "text/plain":
code, out, error = execute('mkvmerge -i "{}"'.format(file_))
tmp = str(out).split("(")[-1].split(")")[0]
return tmp
def get_subs(file_):
ret = []
mime = get_mime(file_)
if mime == "video/x-matroska":
mkv = read_mkv(file_)
check_sub(mkv)
for k in mkv.subtitle_tracks:
t = tempfile.NamedTemporaryFile()
code, out, error = execute('mkvextract tracks "{}" {}:"{}"'.format(file_, k.number - 1, t.name))
if code != 0: raise NameError("error extracting data from {}:\n{}".format(file_, error))
ret.append([k.number - 1, t])
elif mime in ("S_TEXT/ASS", "S_TEXT/SSA", "S_TEXT/SRT", "SubRip/SRT"): #"S_HDMV/PGS"
ret.append([0, file_])
else:
raise NameError("error, format not supported")
return ret
def close_subs(list):
for i in list:
i[1].close()
class one_sub:
def __init__(self, id_, file_, codec_):
self.id = id_
self.file = file_
self.codec_id = codec_
class super_subs:
def __setitem__(self, key, value):
self.master[key] = value
def __getitem__(self, key):
return self.master[key]
def __init__(self, file_):
check_file(file_)
self.dom = file_
self.mime = get_mime(file_)
self.master = []
if self.mime == "video/x-matroska":
mkv = read_mkv(file_)
check_sub(mkv)
for k in mkv.subtitle_tracks:
t = tempfile.NamedTemporaryFile()
code, out, error = execute('mkvextract tracks "{}" {}:"{}"'.format(file_, k.number - 1, t.name))
if code != 0: raise NameError("error extracting data from {}:\n{}".format(file_, error))
self.master.append(one_sub(k.number - 1, t, k.codec_id))
elif self.mime in ("S_TEXT/ASS", "S_TEXT/SSA", "S_TEXT/SRT", "SubRip/SRT"): #"S_HDMV/PGS"
self.master.append(one_sub(0, file_, self.mime))
else:
raise NameError('error, "{}" format not supported'.format(self.mime))
import argparse
parser = argparse.ArgumentParser(
description='Sync subs with others',
)
parser.add_argument('os', help='subtitle to be synced')
parser.add_argument('ns', help='subtitle with right timing')
parser.add_argument('o', help='output file, without extension')
parser.add_argument('-v', '--video', dest='v', default=False, help="file with high video")
args = parser.parse_args()
check_file(args.os)
check_file(args.ns)
mos = get_subs(args.os)
mns = get_subs(args.ns)
##for now use the first
sos = pysubs2.load(mos[0][1].name)
sns = pysubs2.load(mns[0][1].name)
diff = sns[0].start - sos[0].start
if abs(diff) > 2000:
print("First File:")
for i in range(0, 20):
print("{} - {}".format(i, sos[i].text.encode("utf-8")))
print("\nSecond File:")
for i in range(0, 20):
print("{} - {}".format(i, sns[i].text.encode("utf-8")))
i1 = int(raw_input('Line for first file:'))
i2 = int(raw_input('Line for second file:'))
diff = sns[i2].start - sos[i1].start
sos.shift(ms=diff)
sos.save("{}.{}".format(args.o, sos.format))
close_subs(mos)
close_subs(mns)
| 28.090909
| 100
| 0.6726
|
794befeabf488764c28a26fafbe24b342df53989
| 324
|
py
|
Python
|
jupyterworkflow/tests/test_data.py
|
vedraiyani/MyJupyterWorkflow
|
cc97c3d6166e7d917099fdcd9e227371c11c5521
|
[
"MIT"
] | null | null | null |
jupyterworkflow/tests/test_data.py
|
vedraiyani/MyJupyterWorkflow
|
cc97c3d6166e7d917099fdcd9e227371c11c5521
|
[
"MIT"
] | null | null | null |
jupyterworkflow/tests/test_data.py
|
vedraiyani/MyJupyterWorkflow
|
cc97c3d6166e7d917099fdcd9e227371c11c5521
|
[
"MIT"
] | null | null | null |
# python -m pytest jupyterworkflow
from jupyterworkflow.data import get_fremont_data
import pandas as pd
import numpy as np
def test_fremont_data():
df=get_fremont_data()
assert all(df.columns==['East', 'West', 'Total'])
assert isinstance(df.index,pd.DatetimeIndex)
assert len(np.unique(df.index.time))==24
| 29.454545
| 53
| 0.743827
|
794bf0ede7b16152b7d5828d2bbab0ad7b68951d
| 867
|
py
|
Python
|
integration/dev/users/management/commands/createsuperuser_dev.py
|
AlexandraAlter/django-cornucopia
|
1681ccbc5e98736e61f6afb1b78931dda9547486
|
[
"MIT"
] | null | null | null |
integration/dev/users/management/commands/createsuperuser_dev.py
|
AlexandraAlter/django-cornucopia
|
1681ccbc5e98736e61f6afb1b78931dda9547486
|
[
"MIT"
] | null | null | null |
integration/dev/users/management/commands/createsuperuser_dev.py
|
AlexandraAlter/django-cornucopia
|
1681ccbc5e98736e61f6afb1b78931dda9547486
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.contrib.auth.hashers import make_password
from users.models import User
class Command(BaseCommand):
help = 'Sets up a default superuser for development'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
if not User.objects.filter(username=settings.SUPERUSER_USERNAME).exists():
user = settings.SUPERUSER_USERNAME
password = make_password(settings.SUPERUSER_PASSWORD)
admin = User(username=user, password=password, is_staff=True, is_superuser=True)
admin.save()
self.stdout.write(self.style.SUCCESS('Successfully set up user "%s"' % user))
else:
self.stdout.write(self.style.SUCCESS('Already set up user "%s"' % user))
| 36.125
| 92
| 0.693195
|
794bf18c4d832d03e9150719799fe6567d20db2d
| 1,315
|
py
|
Python
|
src/Filtering/ImageIntensity/ConvertRGBImageToGrayscaleImage/Code.py
|
aaron-bray/ITKExamples
|
7ad0d445bb0139cf010e0e1cc906dccce97dda7c
|
[
"Apache-2.0"
] | null | null | null |
src/Filtering/ImageIntensity/ConvertRGBImageToGrayscaleImage/Code.py
|
aaron-bray/ITKExamples
|
7ad0d445bb0139cf010e0e1cc906dccce97dda7c
|
[
"Apache-2.0"
] | null | null | null |
src/Filtering/ImageIntensity/ConvertRGBImageToGrayscaleImage/Code.py
|
aaron-bray/ITKExamples
|
7ad0d445bb0139cf010e0e1cc906dccce97dda7c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import itk
if len(sys.argv) != 3:
print('Usage: ' + sys.argv[0] + ' <InputFileName> <OutputFileName>')
sys.exit(1)
inputFileName = sys.argv[1]
outputFileName = sys.argv[2]
Dimension = 2
ComponentType = itk.UC
InputPixelType = itk.RGBPixel[ComponentType]
InputImageType = itk.Image[InputPixelType, Dimension]
OutputPixelType = itk.UC
OutputImageType = itk.Image[OutputPixelType, Dimension]
reader = itk.ImageFileReader[InputImageType].New()
reader.SetFileName(inputFileName)
rgbFilter = itk.RGBToLuminanceImageFilter.New(reader)
writer = itk.ImageFileWriter[OutputImageType].New()
writer.SetFileName(outputFileName)
writer.SetInput(rgbFilter.GetOutput())
writer.Update()
| 28.586957
| 74
| 0.765779
|
794bf2132d476804c7448e1098e5b79069fedd59
| 8,605
|
py
|
Python
|
domainbed/scripts/sweep.py
|
alexrame/domainbedresearch
|
6255da9aedf4584115324f8cf3a45be6e9004602
|
[
"MIT"
] | 1
|
2022-03-15T16:30:14.000Z
|
2022-03-15T16:30:14.000Z
|
domainbed/scripts/sweep.py
|
alexrame/domainbedresearch
|
6255da9aedf4584115324f8cf3a45be6e9004602
|
[
"MIT"
] | null | null | null |
domainbed/scripts/sweep.py
|
alexrame/domainbedresearch
|
6255da9aedf4584115324f8cf3a45be6e9004602
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Run sweeps
"""
import argparse
import copy
import getpass
import hashlib
import json
import os
import random
import shutil
import time
import uuid
import numpy as np
import torch
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed.lib import misc
from domainbed import command_launchers
import tqdm
import shlex
class Job:
NOT_LAUNCHED = 'Not launched'
INCOMPLETE = 'Incomplete'
DONE = 'Done'
def __init__(self, train_args, sweep_output_dir):
args_str = json.dumps(train_args, sort_keys=True)
args_hash = hashlib.md5(args_str.encode('utf-8')).hexdigest()
self.output_dir = os.path.join(sweep_output_dir, args_hash)
self.train_args = copy.deepcopy(train_args)
self.train_args['output_dir'] = self.output_dir
command = ['python', '-m', 'domainbed.scripts.train']
for k, v in sorted(self.train_args.items()):
if k == "hp":
for (name, value) in v:
command.append(f"--hp {name} {value}")
else:
if isinstance(v, list):
v = ' '.join([str(v_) for v_ in v])
elif isinstance(v, str):
v = shlex.quote(v)
command.append(f'--{k} {v}')
self.command_str = ' '.join(command)
print(self.command_str)
if os.path.exists(os.path.join(self.output_dir, 'done')):
self.state = Job.DONE
elif os.path.exists(self.output_dir):
self.state = Job.INCOMPLETE
else:
self.state = Job.NOT_LAUNCHED
def __str__(self):
job_info = (self.train_args['dataset'],
self.train_args['algorithm'],
self.train_args['test_envs'],
self.train_args['hparams_seed'])
return '{}: {} {}'.format(
self.state,
self.output_dir,
job_info)
@staticmethod
def launch(jobs, launcher_fn):
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
@staticmethod
def delete(jobs):
print('Deleting...')
for job in jobs:
shutil.rmtree(job.output_dir)
print(f'Deleted {len(jobs)} jobs!')
def all_test_env_combinations(n):
"""
For a dataset with n >= 3 envs, return all combinations of 1 and 2 test
envs.
"""
assert(n >= 3)
for i in range(n):
yield [i]
for j in range(i+1, n):
yield [i, j]
def make_args_list(
n_trials, dataset_names, algorithms, n_hparams_from, n_hparams, steps, data_dir, task,
holdout_fraction, single_test_envs, hparams, test_envs, hp, sweep_id, seed
):
args_list = []
for trial_seed in range(n_trials):
trial_seed += seed
for dataset in dataset_names:
for algorithm in algorithms:
if test_envs is not None:
if not single_test_envs:
all_test_envs = [test_envs]
else:
all_test_envs = test_envs
elif single_test_envs:
all_test_envs = [
[i] for i in range(datasets.num_environments(dataset))]
else:
all_test_envs = all_test_env_combinations(
datasets.num_environments(dataset))
print(all_test_envs)
for _test_envs in all_test_envs:
for hparams_seed in range(n_hparams_from, n_hparams):
train_args = {}
train_args['dataset'] = dataset
train_args['algorithm'] = algorithm
train_args['test_envs'] = _test_envs
train_args['holdout_fraction'] = holdout_fraction
train_args['hparams_seed'] = hparams_seed
train_args['data_dir'] = data_dir
train_args['task'] = task
train_args["sweep_id"] = sweep_id
train_args['trial_seed'] = trial_seed
train_args['seed'] = misc.seed_hash(
dataset, algorithm, _test_envs, hparams_seed, trial_seed
)
if steps is not None:
train_args['steps'] = steps
if hparams is not None:
train_args['hparams'] = hparams
if hp is not None:
train_args["hp"] = hp
args_list.append(train_args)
return args_list
def ask_for_confirmation():
response = input('Are you sure? (y/n) ')
if not response.lower().strip()[:1] == "y":
print('Nevermind!')
exit(0)
DATASETS = [d for d in datasets.DATASETS if "Debug" not in d]
if __name__ == "__main__":
# --n_hparams 20 --n_trials 3
parser = argparse.ArgumentParser(description='Run a sweep')
parser.add_argument('command', choices=['launch', 'delete_incomplete'])
parser.add_argument('--datasets', nargs='+', type=str, default=DATASETS)
parser.add_argument('--algorithms', nargs='+', type=str, default=algorithms.ALGORITHMS)
parser.add_argument('--task', type=str, default="domain_generalization")
parser.add_argument('--n_hparams_from', type=int, default=0)
parser.add_argument('--n_hparams', type=int, default=20)
parser.add_argument('--output_dir', type=str, required=True)
parser.add_argument('--data_dir', type=str, default="default")
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n_trials', type=int, default=3)
parser.add_argument('--command_launcher', type=str,
default="local")
parser.add_argument('--steps', type=int, default=None)
parser.add_argument('--hparams', type=str, default=None)
parser.add_argument("--hp", nargs=2, action="append")
parser.add_argument('--holdout_fraction', type=float, default=0.2)
parser.add_argument('--single_test_envs', action='store_true')
parser.add_argument('--skip_confirmation', action='store_true')
parser.add_argument("--test_envs", default=None, nargs="+")
args = parser.parse_args()
if args.data_dir == "default":
if os.environ.get("DATAQUICK", "none") not in [None, "none"]:
args.data_dir = os.path.join(os.environ.get("DATAQUICK"), "dataplace/domainbed/")
else:
args.data_dir = os.path.join(
os.environ.get("DATA", ""),
"data/domainbed/")
args_list = make_args_list(
n_trials=args.n_trials,
dataset_names=args.datasets,
algorithms=args.algorithms,
n_hparams_from=args.n_hparams_from,
n_hparams=args.n_hparams,
steps=args.steps,
data_dir=args.data_dir,
task=args.task,
holdout_fraction=args.holdout_fraction,
single_test_envs=args.single_test_envs,
hparams=args.hparams,
test_envs=args.test_envs,
hp=args.hp,
sweep_id=os.path.split(args.output_dir)[-1],
seed=args.seed
)
jobs = [Job(train_args, args.output_dir) for train_args in args_list]
for job in jobs:
print(job)
print("{} jobs: {} done, {} incomplete, {} not launched.".format(
len(jobs),
len([j for j in jobs if j.state == Job.DONE]),
len([j for j in jobs if j.state == Job.INCOMPLETE]),
len([j for j in jobs if j.state == Job.NOT_LAUNCHED]))
)
if args.command == 'launch':
to_launch = [j for j in jobs if j.state in [Job.NOT_LAUNCHED, Job.INCOMPLETE]]
print(f'About to launch {len(to_launch)} jobs.')
if False and not args.skip_confirmation:
ask_for_confirmation()
launcher_fn = command_launchers.REGISTRY[args.command_launcher]
Job.launch(to_launch, launcher_fn)
elif args.command == 'delete_incomplete':
to_delete = [j for j in jobs if j.state == Job.INCOMPLETE]
print(f'About to delete {len(to_delete)} jobs.')
if not args.skip_confirmation:
ask_for_confirmation()
Job.delete(to_delete)
| 37.576419
| 94
| 0.587798
|
794bf29aacfee77d7e9541fcef74ccc27f85e0b4
| 2,771
|
py
|
Python
|
hello_python_source_py3/chapter 06/4/player.py
|
AnthonyBriggs/Python-101
|
e6c7584fd6791bb5d7d05fd419faa46dc7148f61
|
[
"MIT"
] | 3
|
2017-08-02T23:40:55.000Z
|
2018-07-02T14:59:07.000Z
|
hello_python_source_py3/chapter 06/4/player.py
|
AnthonyBriggs/Python-101
|
e6c7584fd6791bb5d7d05fd419faa46dc7148f61
|
[
"MIT"
] | null | null | null |
hello_python_source_py3/chapter 06/4/player.py
|
AnthonyBriggs/Python-101
|
e6c7584fd6791bb5d7d05fd419faa46dc7148f61
|
[
"MIT"
] | null | null | null |
import shlex
class Player(object):
def __init__(self, location):
self.location = location
self.location.here.append(self)
self.playing = True
self.inventory = []
def get_input(self):
return input(">")
def inv(self, player, noun):
result = ["You have:"]
if self.inventory:
result += [x.name for x in self.inventory]
else:
result += ["nothing!"]
return result
def quit(self, player, noun):
self.playing = False
return ["bye bye!"]
actions = ['quit', 'inv', 'get', 'drop']
def get(self, player, noun):
return [noun + "? I can't see that here."]
def drop(self, player, noun):
return [noun + "? I don't have that!"]
def find_handler(self, verb, noun):
# Try and find the object
if noun != "":
object = [x for x in self.location.here + self.inventory
if x is not self and
x.name == noun and
verb in x.actions]
if len(object) > 0:
return getattr(object[0], verb)
# if that fails, look in location and self
if verb.lower() in self.actions:
return getattr(self, verb)
elif verb.lower() in self.location.actions:
return getattr(self.location, verb)
def process_input(self, input):
parts = shlex.split(input)
if len(parts) == 0:
return []
if len(parts) == 1:
parts.append("") # blank noun
verb = parts[0]
noun = ' '.join(parts[1:])
handler = self.find_handler(verb, noun)
if handler is None:
return [input+"? I don't know how to do that!"]
if '__call__' not in dir(handler):
return handler
return handler(self, noun)
if __name__ == '__main__':
import cave
empty_cave = cave.Cave(
"Empty Cave",
"A desolate, empty cave, waiting for someone to fill it.")
import item
sword = item.Item("sword", "A pointy sword.", empty_cave)
coin = item.Item("coin", "A shiny gold coin. "
"Your first piece of treasure!", empty_cave)
player = Player(empty_cave)
print(player.location.name)
print(player.location.description)
while player.playing:
input = player.get_input()
result = player.process_input(input)
print("\n".join(result))
"""
look self.look
inv self.inv
go north self.location.go
north self.location.go
look sword sword.look
get sword sword.get
kill orc orc.kill
"""
| 28.56701
| 68
| 0.528329
|
794bf3ddd5b3626d276f027aa805bffe4cd1a5c9
| 1,959
|
py
|
Python
|
community/cloud-foundation/templates/logsink/logsink.py
|
BedrockSolutions/deploymentmanager-samples
|
9042cc471817a4152645bc51a0e22badba129aff
|
[
"Apache-2.0"
] | null | null | null |
community/cloud-foundation/templates/logsink/logsink.py
|
BedrockSolutions/deploymentmanager-samples
|
9042cc471817a4152645bc51a0e22badba129aff
|
[
"Apache-2.0"
] | null | null | null |
community/cloud-foundation/templates/logsink/logsink.py
|
BedrockSolutions/deploymentmanager-samples
|
9042cc471817a4152645bc51a0e22badba129aff
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This template creates a logsink (logging sink)."""
def generate_config(context):
""" Entry point for the deployment resources. """
project_id = context.env['project']
name = context.properties.get('name', context.env['name'])
if context.properties['destinationType'] == 'pubsub':
destination = 'pubsub.googleapis.com/projects/{}/topics/{}'.format(
project_id,
context.properties['destinationName']
)
elif context.properties['destinationType'] == 'storage':
destination = 'storage.googleapis.com/{}'.format(
context.properties['destinationName']
)
elif context.properties['destinationType'] == 'bigquery':
destination = 'bigquery.googleapis.com/projects/{}/datasets/{}'.format(
project_id,
context.properties['destinationName']
)
properties = {
'name': name,
'sink': name,
'destination': destination,
'uniqueWriterIdentity': context.properties['uniqueWriterIdentity']
}
sink_filter = context.properties.get('filter')
if sink_filter:
properties['filter'] = sink_filter
resources = [
{
'name': context.env['name'],
'type': 'logging.v2.sink',
'properties': properties
}
]
return {'resources': resources}
| 33.775862
| 79
| 0.651353
|
794bf4224fe090049f23c4520266dfda05c88510
| 6,347
|
py
|
Python
|
gob14_varios.py
|
dlunna/gob.selenium
|
9c663e78b880b3363731f6e6f5b6899a972ef7c0
|
[
"MIT"
] | null | null | null |
gob14_varios.py
|
dlunna/gob.selenium
|
9c663e78b880b3363731f6e6f5b6899a972ef7c0
|
[
"MIT"
] | null | null | null |
gob14_varios.py
|
dlunna/gob.selenium
|
9c663e78b880b3363731f6e6f5b6899a972ef7c0
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
# para base de datos
import mysql.connector
tiempo = 0.2
mydb = mysql.connector.connect(
host="sisinv.upp.edu.mx",
user="inventario2019",
passwd="1a2s3d4f.",
database="inventario2019",
)
# inicializando la base de gatos
mycursor = mydb.cursor()
# Haciendo una busqueda en la base de datos
mycursor.execute("SELECT * FROM inventario_variosmodel")
mycpu = mycursor.fetchall()
# Driver de Chrome
driver = webdriver.Chrome(executable_path='/var/www/pyfy/gob.selenium/chromedriver')
#Abrir página de login
driver.get('http://sirit.hidalgo.gob.mx/')
time.sleep(tiempo)
usuario = driver.find_element_by_id("email")
usuario.send_keys("mi_usuario_de_SIRIT")
usuario.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("password")
clave.send_keys("mi_contraseña_de_SIRIT")
clave.send_keys(Keys.ENTER)
clave = driver.find_element_by_id("login")
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
for row in mycpu:
print(row)
## Abriendo otra ventana
driver.execute_script("window.open('');")
driver.switch_to.window(driver.window_handles[1])
driver.get("http://sirit.hidalgo.gob.mx/Inicio/HARDWARE/EQUIPO%20DE%20COMPUTO%20PERSONAL")
time.sleep(tiempo)
## Ingresando información
clave = driver.find_element_by_id("personal_inventario")
clave.send_keys(row[1])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("personal_tipo")
clave.send_keys(row[2])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("personal_marca")
clave.send_keys(row[3])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("personal_modelo")
clave.send_keys(row[4])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("personal_procesador")
clave.send_keys(row[5])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("Velocidad_Procesador")
clave.send_keys(row[6])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("personal_Ram")
clave.send_keys(row[7])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("personal_SO")
clave.send_keys(row[8])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("Version_SO")
clave.send_keys(row[9])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("personal_Almacenamiento")
clave.send_keys(row[10])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("puertos_USB")
clave.send_keys(row[11])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("personal_arquitectura")
clave.send_keys(row[12])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("personal_licencia")
clave.send_keys("Si")
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
mensaje5 = row[14]
mensaje5a = mensaje5.find("Alambrica")
print("Alambrica :: ", mensaje5a)
if mensaje5a >= 0:
clave = driver.find_element_by_id("personal_conexion0")
clave.click()
time.sleep(tiempo)
#mensaje5 = row[14]
mensaje5a = mensaje5.find("Inalambrica")
print("Inalambrica :: ", mensaje5a)
if mensaje5a >= 0:
clave = driver.find_element_by_id("personal_conexion1")
clave.click()
time.sleep(tiempo)
##Fin de CPU
# -------------------------------------------------------------
## Inicio Monitor
if row[15] != "NULL":
clave = driver.find_element_by_id("AddMonitor")
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("monitor_inventario")
clave.send_keys(row[15])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("marca_monitor")
clave.send_keys(row[16])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("modelo")
clave.send_keys(row[17])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("tamanio")
clave.send_keys(row[18])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("tipo_pantalla")
clave.send_keys(row[19])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
clave = driver.find_element_by_id("resolucion")
clave.send_keys(row[20])
clave.send_keys(Keys.ENTER)
time.sleep(tiempo)
mensaje5 = row[21]
mensaje5a = mensaje5.find("Display Port")
print("B/N :: ", mensaje5a)
if mensaje5a >= 0:
clave = driver.find_element_by_xpath("/html/body/div/div/div[3]/form/div[6]/div/div[16]/div/div[7]/label/div/label[1]/input")
clave.click()
time.sleep(tiempo)
mensaje5 = row[21]
mensaje5a = mensaje5.find("VGA")
print("Color :: ", mensaje5a)
if mensaje5a >= 0:
clave = driver.find_element_by_xpath("/html/body/div/div/div[3]/form/div[6]/div/div[16]/div/div[7]/label/div/label[2]/input")
clave.click()
time.sleep(tiempo)
mensaje5 = row[21]
mensaje5a = mensaje5.find("HDMI")
print("Color :: ", mensaje5a)
if mensaje5a >= 0:
clave = driver.find_element_by_xpath("/html/body/div/div/div[3]/form/div[6]/div/div[16]/div/div[7]/label/div/label[3]/input")
clave.click()
time.sleep(tiempo)
## Fin de Monitor
#-------------------------GUARDANDO-----------------#
buscar_por_xpath = driver.find_element_by_xpath("/html/body/div/div/div[3]/form/button")
#time.sleep(tiempo)
buscar_por_xpath.send_keys(Keys.ENTER)
time.sleep(5)
# -------------------------GUARDANDO-----------------#
## Cerrando la pestaña
driver.close()
driver.switch_to.window(driver.window_handles[0])
#Fin del FOR
#Cerrando ventana del navegador
driver.switch_to.window(driver.window_handles[0])
driver.close()
| 29.798122
| 137
| 0.661257
|
794bf5775d552c07d51b64d53d160c9d18cd0246
| 1,758
|
py
|
Python
|
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ReleaseETLJobRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ReleaseETLJobRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ReleaseETLJobRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class ReleaseETLJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ReleaseETLJob','emr')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ReleaseId(self):
return self.get_query_params().get('ReleaseId')
def set_ReleaseId(self,ReleaseId):
self.add_query_param('ReleaseId',ReleaseId)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id)
| 35.877551
| 75
| 0.750853
|
794bf671abc67b55eecb3c7c3d67d805c6df8bfd
| 17,566
|
py
|
Python
|
homeassistant/components/homekit/type_media_players.py
|
brapp0111/home-assistant
|
a59484bc8eadc59bc8f1413e7ab268571d8f835e
|
[
"Apache-2.0"
] | 1
|
2021-03-05T17:29:45.000Z
|
2021-03-05T17:29:45.000Z
|
homeassistant/components/homekit/type_media_players.py
|
brapp0111/home-assistant
|
a59484bc8eadc59bc8f1413e7ab268571d8f835e
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/homekit/type_media_players.py
|
brapp0111/home-assistant
|
a59484bc8eadc59bc8f1413e7ab268571d8f835e
|
[
"Apache-2.0"
] | 1
|
2020-06-01T12:43:03.000Z
|
2020-06-01T12:43:03.000Z
|
"""Class to hold all media player accessories."""
import logging
from pyhap.const import CATEGORY_SWITCH, CATEGORY_TELEVISION
from homeassistant.components.media_player import (
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
DOMAIN,
SERVICE_SELECT_SOURCE,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_SELECT_SOURCE,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_STOP,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
STATE_STANDBY,
STATE_UNKNOWN,
)
from . import TYPES
from .accessories import HomeAccessory
from .const import (
CHAR_ACTIVE,
CHAR_ACTIVE_IDENTIFIER,
CHAR_CONFIGURED_NAME,
CHAR_CURRENT_VISIBILITY_STATE,
CHAR_IDENTIFIER,
CHAR_INPUT_SOURCE_TYPE,
CHAR_IS_CONFIGURED,
CHAR_MUTE,
CHAR_NAME,
CHAR_ON,
CHAR_REMOTE_KEY,
CHAR_SLEEP_DISCOVER_MODE,
CHAR_VOLUME,
CHAR_VOLUME_CONTROL_TYPE,
CHAR_VOLUME_SELECTOR,
CONF_FEATURE_LIST,
FEATURE_ON_OFF,
FEATURE_PLAY_PAUSE,
FEATURE_PLAY_STOP,
FEATURE_TOGGLE_MUTE,
SERV_INPUT_SOURCE,
SERV_SWITCH,
SERV_TELEVISION,
SERV_TELEVISION_SPEAKER,
)
_LOGGER = logging.getLogger(__name__)
MEDIA_PLAYER_KEYS = {
# 0: "Rewind",
# 1: "FastForward",
# 2: "NextTrack",
# 3: "PreviousTrack",
# 4: "ArrowUp",
# 5: "ArrowDown",
# 6: "ArrowLeft",
# 7: "ArrowRight",
# 8: "Select",
# 9: "Back",
# 10: "Exit",
11: SERVICE_MEDIA_PLAY_PAUSE,
# 15: "Information",
}
MODE_FRIENDLY_NAME = {
FEATURE_ON_OFF: "Power",
FEATURE_PLAY_PAUSE: "Play/Pause",
FEATURE_PLAY_STOP: "Play/Stop",
FEATURE_TOGGLE_MUTE: "Mute",
}
@TYPES.register("MediaPlayer")
class MediaPlayer(HomeAccessory):
"""Generate a Media Player accessory."""
def __init__(self, *args):
"""Initialize a Switch accessory object."""
super().__init__(*args, category=CATEGORY_SWITCH)
state = self.hass.states.get(self.entity_id)
self._flag = {
FEATURE_ON_OFF: False,
FEATURE_PLAY_PAUSE: False,
FEATURE_PLAY_STOP: False,
FEATURE_TOGGLE_MUTE: False,
}
self.chars = {
FEATURE_ON_OFF: None,
FEATURE_PLAY_PAUSE: None,
FEATURE_PLAY_STOP: None,
FEATURE_TOGGLE_MUTE: None,
}
feature_list = self.config[CONF_FEATURE_LIST]
if FEATURE_ON_OFF in feature_list:
name = self.generate_service_name(FEATURE_ON_OFF)
serv_on_off = self.add_preload_service(SERV_SWITCH, CHAR_NAME)
serv_on_off.configure_char(CHAR_NAME, value=name)
self.chars[FEATURE_ON_OFF] = serv_on_off.configure_char(
CHAR_ON, value=False, setter_callback=self.set_on_off
)
if FEATURE_PLAY_PAUSE in feature_list:
name = self.generate_service_name(FEATURE_PLAY_PAUSE)
serv_play_pause = self.add_preload_service(SERV_SWITCH, CHAR_NAME)
serv_play_pause.configure_char(CHAR_NAME, value=name)
self.chars[FEATURE_PLAY_PAUSE] = serv_play_pause.configure_char(
CHAR_ON, value=False, setter_callback=self.set_play_pause
)
if FEATURE_PLAY_STOP in feature_list:
name = self.generate_service_name(FEATURE_PLAY_STOP)
serv_play_stop = self.add_preload_service(SERV_SWITCH, CHAR_NAME)
serv_play_stop.configure_char(CHAR_NAME, value=name)
self.chars[FEATURE_PLAY_STOP] = serv_play_stop.configure_char(
CHAR_ON, value=False, setter_callback=self.set_play_stop
)
if FEATURE_TOGGLE_MUTE in feature_list:
name = self.generate_service_name(FEATURE_TOGGLE_MUTE)
serv_toggle_mute = self.add_preload_service(SERV_SWITCH, CHAR_NAME)
serv_toggle_mute.configure_char(CHAR_NAME, value=name)
self.chars[FEATURE_TOGGLE_MUTE] = serv_toggle_mute.configure_char(
CHAR_ON, value=False, setter_callback=self.set_toggle_mute
)
self.update_state(state)
def generate_service_name(self, mode):
"""Generate name for individual service."""
return f"{self.display_name} {MODE_FRIENDLY_NAME[mode]}"
def set_on_off(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug('%s: Set switch state for "on_off" to %s', self.entity_id, value)
self._flag[FEATURE_ON_OFF] = True
service = SERVICE_TURN_ON if value else SERVICE_TURN_OFF
params = {ATTR_ENTITY_ID: self.entity_id}
self.call_service(DOMAIN, service, params)
def set_play_pause(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug(
'%s: Set switch state for "play_pause" to %s', self.entity_id, value
)
self._flag[FEATURE_PLAY_PAUSE] = True
service = SERVICE_MEDIA_PLAY if value else SERVICE_MEDIA_PAUSE
params = {ATTR_ENTITY_ID: self.entity_id}
self.call_service(DOMAIN, service, params)
def set_play_stop(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug(
'%s: Set switch state for "play_stop" to %s', self.entity_id, value
)
self._flag[FEATURE_PLAY_STOP] = True
service = SERVICE_MEDIA_PLAY if value else SERVICE_MEDIA_STOP
params = {ATTR_ENTITY_ID: self.entity_id}
self.call_service(DOMAIN, service, params)
def set_toggle_mute(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug(
'%s: Set switch state for "toggle_mute" to %s', self.entity_id, value
)
self._flag[FEATURE_TOGGLE_MUTE] = True
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_MEDIA_VOLUME_MUTED: value}
self.call_service(DOMAIN, SERVICE_VOLUME_MUTE, params)
def update_state(self, new_state):
"""Update switch state after state changed."""
current_state = new_state.state
if self.chars[FEATURE_ON_OFF]:
hk_state = current_state not in (
STATE_OFF,
STATE_UNKNOWN,
STATE_STANDBY,
"None",
)
if not self._flag[FEATURE_ON_OFF]:
_LOGGER.debug(
'%s: Set current state for "on_off" to %s', self.entity_id, hk_state
)
if self.chars[FEATURE_ON_OFF].value != hk_state:
self.chars[FEATURE_ON_OFF].set_value(hk_state)
self._flag[FEATURE_ON_OFF] = False
if self.chars[FEATURE_PLAY_PAUSE]:
hk_state = current_state == STATE_PLAYING
if not self._flag[FEATURE_PLAY_PAUSE]:
_LOGGER.debug(
'%s: Set current state for "play_pause" to %s',
self.entity_id,
hk_state,
)
if self.chars[FEATURE_PLAY_PAUSE].value != hk_state:
self.chars[FEATURE_PLAY_PAUSE].set_value(hk_state)
self._flag[FEATURE_PLAY_PAUSE] = False
if self.chars[FEATURE_PLAY_STOP]:
hk_state = current_state == STATE_PLAYING
if not self._flag[FEATURE_PLAY_STOP]:
_LOGGER.debug(
'%s: Set current state for "play_stop" to %s',
self.entity_id,
hk_state,
)
if self.chars[FEATURE_PLAY_STOP].value != hk_state:
self.chars[FEATURE_PLAY_STOP].set_value(hk_state)
self._flag[FEATURE_PLAY_STOP] = False
if self.chars[FEATURE_TOGGLE_MUTE]:
current_state = new_state.attributes.get(ATTR_MEDIA_VOLUME_MUTED)
if not self._flag[FEATURE_TOGGLE_MUTE]:
_LOGGER.debug(
'%s: Set current state for "toggle_mute" to %s',
self.entity_id,
current_state,
)
if self.chars[FEATURE_TOGGLE_MUTE].value != current_state:
self.chars[FEATURE_TOGGLE_MUTE].set_value(current_state)
self._flag[FEATURE_TOGGLE_MUTE] = False
@TYPES.register("TelevisionMediaPlayer")
class TelevisionMediaPlayer(HomeAccessory):
"""Generate a Television Media Player accessory."""
def __init__(self, *args):
"""Initialize a Switch accessory object."""
super().__init__(*args, category=CATEGORY_TELEVISION)
state = self.hass.states.get(self.entity_id)
self._flag = {
CHAR_ACTIVE: False,
CHAR_ACTIVE_IDENTIFIER: False,
CHAR_MUTE: False,
}
self.support_select_source = False
self.sources = []
# Add additional characteristics if volume or input selection supported
self.chars_tv = []
self.chars_speaker = []
features = self.hass.states.get(self.entity_id).attributes.get(
ATTR_SUPPORTED_FEATURES, 0
)
if features & (SUPPORT_PLAY | SUPPORT_PAUSE):
self.chars_tv.append(CHAR_REMOTE_KEY)
if features & SUPPORT_VOLUME_MUTE or features & SUPPORT_VOLUME_STEP:
self.chars_speaker.extend(
(CHAR_NAME, CHAR_ACTIVE, CHAR_VOLUME_CONTROL_TYPE, CHAR_VOLUME_SELECTOR)
)
if features & SUPPORT_VOLUME_SET:
self.chars_speaker.append(CHAR_VOLUME)
if features & SUPPORT_SELECT_SOURCE:
self.support_select_source = True
serv_tv = self.add_preload_service(SERV_TELEVISION, self.chars_tv)
self.set_primary_service(serv_tv)
serv_tv.configure_char(CHAR_CONFIGURED_NAME, value=self.display_name)
serv_tv.configure_char(CHAR_SLEEP_DISCOVER_MODE, value=True)
self.char_active = serv_tv.configure_char(
CHAR_ACTIVE, setter_callback=self.set_on_off
)
if CHAR_REMOTE_KEY in self.chars_tv:
self.char_remote_key = serv_tv.configure_char(
CHAR_REMOTE_KEY, setter_callback=self.set_remote_key
)
if CHAR_VOLUME_SELECTOR in self.chars_speaker:
serv_speaker = self.add_preload_service(
SERV_TELEVISION_SPEAKER, self.chars_speaker
)
serv_tv.add_linked_service(serv_speaker)
name = f"{self.display_name} Volume"
serv_speaker.configure_char(CHAR_NAME, value=name)
serv_speaker.configure_char(CHAR_ACTIVE, value=1)
self.char_mute = serv_speaker.configure_char(
CHAR_MUTE, value=False, setter_callback=self.set_mute
)
volume_control_type = 1 if CHAR_VOLUME in self.chars_speaker else 2
serv_speaker.configure_char(
CHAR_VOLUME_CONTROL_TYPE, value=volume_control_type
)
self.char_volume_selector = serv_speaker.configure_char(
CHAR_VOLUME_SELECTOR, setter_callback=self.set_volume_step
)
if CHAR_VOLUME in self.chars_speaker:
self.char_volume = serv_speaker.configure_char(
CHAR_VOLUME, setter_callback=self.set_volume
)
if self.support_select_source:
self.sources = self.hass.states.get(self.entity_id).attributes.get(
ATTR_INPUT_SOURCE_LIST, []
)
self.char_input_source = serv_tv.configure_char(
CHAR_ACTIVE_IDENTIFIER, setter_callback=self.set_input_source
)
for index, source in enumerate(self.sources):
serv_input = self.add_preload_service(
SERV_INPUT_SOURCE, [CHAR_IDENTIFIER, CHAR_NAME]
)
serv_tv.add_linked_service(serv_input)
serv_input.configure_char(CHAR_CONFIGURED_NAME, value=source)
serv_input.configure_char(CHAR_NAME, value=source)
serv_input.configure_char(CHAR_IDENTIFIER, value=index)
serv_input.configure_char(CHAR_IS_CONFIGURED, value=True)
input_type = 3 if "hdmi" in source.lower() else 0
serv_input.configure_char(CHAR_INPUT_SOURCE_TYPE, value=input_type)
serv_input.configure_char(CHAR_CURRENT_VISIBILITY_STATE, value=False)
_LOGGER.debug("%s: Added source %s.", self.entity_id, source)
self.update_state(state)
def set_on_off(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug('%s: Set switch state for "on_off" to %s', self.entity_id, value)
self._flag[CHAR_ACTIVE] = True
service = SERVICE_TURN_ON if value else SERVICE_TURN_OFF
params = {ATTR_ENTITY_ID: self.entity_id}
self.call_service(DOMAIN, service, params)
def set_mute(self, value):
"""Move switch state to value if call came from HomeKit."""
_LOGGER.debug(
'%s: Set switch state for "toggle_mute" to %s', self.entity_id, value
)
self._flag[CHAR_MUTE] = True
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_MEDIA_VOLUME_MUTED: value}
self.call_service(DOMAIN, SERVICE_VOLUME_MUTE, params)
def set_volume(self, value):
"""Send volume step value if call came from HomeKit."""
_LOGGER.debug("%s: Set volume to %s", self.entity_id, value)
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_MEDIA_VOLUME_LEVEL: value}
self.call_service(DOMAIN, SERVICE_VOLUME_SET, params)
def set_volume_step(self, value):
"""Send volume step value if call came from HomeKit."""
_LOGGER.debug("%s: Step volume by %s", self.entity_id, value)
service = SERVICE_VOLUME_DOWN if value else SERVICE_VOLUME_UP
params = {ATTR_ENTITY_ID: self.entity_id}
self.call_service(DOMAIN, service, params)
def set_input_source(self, value):
"""Send input set value if call came from HomeKit."""
_LOGGER.debug("%s: Set current input to %s", self.entity_id, value)
source = self.sources[value]
self._flag[CHAR_ACTIVE_IDENTIFIER] = True
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_INPUT_SOURCE: source}
self.call_service(DOMAIN, SERVICE_SELECT_SOURCE, params)
def set_remote_key(self, value):
"""Send remote key value if call came from HomeKit."""
_LOGGER.debug("%s: Set remote key to %s", self.entity_id, value)
service = MEDIA_PLAYER_KEYS.get(value)
if service:
# Handle Play Pause
if service == SERVICE_MEDIA_PLAY_PAUSE:
state = self.hass.states.get(self.entity_id).state
if state in (STATE_PLAYING, STATE_PAUSED):
service = (
SERVICE_MEDIA_PLAY
if state == STATE_PAUSED
else SERVICE_MEDIA_PAUSE
)
params = {ATTR_ENTITY_ID: self.entity_id}
self.call_service(DOMAIN, service, params)
def update_state(self, new_state):
"""Update Television state after state changed."""
current_state = new_state.state
# Power state television
hk_state = 0
if current_state not in ("None", STATE_OFF, STATE_UNKNOWN):
hk_state = 1
if not self._flag[CHAR_ACTIVE]:
_LOGGER.debug(
"%s: Set current active state to %s", self.entity_id, hk_state
)
if self.char_active.value != hk_state:
self.char_active.set_value(hk_state)
self._flag[CHAR_ACTIVE] = False
# Set mute state
if CHAR_VOLUME_SELECTOR in self.chars_speaker:
current_mute_state = new_state.attributes.get(ATTR_MEDIA_VOLUME_MUTED)
if not self._flag[CHAR_MUTE]:
_LOGGER.debug(
"%s: Set current mute state to %s",
self.entity_id,
current_mute_state,
)
if self.char_mute.value != current_mute_state:
self.char_mute.set_value(current_mute_state)
self._flag[CHAR_MUTE] = False
# Set active input
if self.support_select_source:
source_name = new_state.attributes.get(ATTR_INPUT_SOURCE)
if self.sources and not self._flag[CHAR_ACTIVE_IDENTIFIER]:
_LOGGER.debug(
"%s: Set current input to %s", self.entity_id, source_name
)
if source_name in self.sources:
index = self.sources.index(source_name)
if self.char_input_source.value != index:
self.char_input_source.set_value(index)
else:
_LOGGER.warning(
"%s: Sources out of sync. Restart Home Assistant",
self.entity_id,
)
if self.char_input_source.value != 0:
self.char_input_source.set_value(0)
self._flag[CHAR_ACTIVE_IDENTIFIER] = False
| 38.862832
| 88
| 0.626267
|
794bf74f0b7d6a369fe3afa6bf5a3edd10b3887d
| 817
|
py
|
Python
|
app/core/tests/test_commands.py
|
lyx999000/jobfindingrecord-app-api
|
c74419164747c04bf28ba1358381854105555c10
|
[
"MIT"
] | null | null | null |
app/core/tests/test_commands.py
|
lyx999000/jobfindingrecord-app-api
|
c74419164747c04bf28ba1358381854105555c10
|
[
"MIT"
] | null | null | null |
app/core/tests/test_commands.py
|
lyx999000/jobfindingrecord-app-api
|
c74419164747c04bf28ba1358381854105555c10
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count,1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count,6)
| 35.521739
| 74
| 0.680539
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.