max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
testcases/cloud_admin/services_up_test.py
|
tbeckham/eutester
| 0
|
6300
|
<gh_stars>0
#!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: clarkmatthew
import eucaops
from eutester.eutestcase import EutesterTestCase
import time
class MyTestCase(EutesterTestCase):
def __init__(self, config_file=None, password=None):
self.setuptestcase()
self.setup_parser()
self.parser.add_argument("--timeout", default=600)
self.get_args()
def clean_method(self):
self.debug('No clean_method defined for this test')
pass
def wait_for_services_operational(self, timeout=None):
"""
Definition:
Test attempts to query the state of a subset of core services. The test will continue to poll the system
until it finds an ENABLED instance of each service. In the HA case it will wait for an ENABLED and DISABLED
instance of each.
"""
timeout= timeout or self.args.timeout
last_err = ""
elapsed = 0
start = time.time()
self.tester = None
while (not self.tester and elapsed < timeout):
elapsed = int(time.time() - start)
self.status('Attempting to create tester object. Elapsed:' + str(elapsed))
try:
self.tester = eucaops.Eucaops(config_file=self.args.config_file, password=self.args.password)
except Exception, e:
tb = eucaops.Eucaops.get_traceback()
last_err = str(tb) + "\n" + str(e)
print 'Services not up because of: ' + last_err + '\n'
if not self.tester:
raise Exception(str(last_err) + 'Could not create tester object after elapsed:' + str(elapsed))
timeout = timeout - elapsed
self.status('starting wait for all services operational, timeout:' + str(timeout))
self.tester.service_manager.wait_for_all_services_operational(timeout)
self.status('All services are up')
self.tester.service_manager.print_services_list()
if __name__ == "__main__":
testcase = MyTestCase()
### Use the list of tests passed from config/command line to determine what subset of tests to run
### or use a predefined list "VolumeTagging", "InstanceTagging", "SnapshotTagging", "ImageTagging"
list = testcase.args.tests or ["wait_for_services_operational"]
### Convert test suite methods to EutesterUnitTest objects
unit_list = [ ]
for test in list:
unit_list.append( testcase.create_testunit_by_name(test) )
### Run the EutesterUnitTest objects, dont worry about clean on exit until we need it for this method
result = testcase.run_test_case_list(unit_list,clean_on_exit=False)
exit(result)
|
#!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: clarkmatthew
import eucaops
from eutester.eutestcase import EutesterTestCase
import time
class MyTestCase(EutesterTestCase):
def __init__(self, config_file=None, password=None):
self.setuptestcase()
self.setup_parser()
self.parser.add_argument("--timeout", default=600)
self.get_args()
def clean_method(self):
self.debug('No clean_method defined for this test')
pass
def wait_for_services_operational(self, timeout=None):
"""
Definition:
Test attempts to query the state of a subset of core services. The test will continue to poll the system
until it finds an ENABLED instance of each service. In the HA case it will wait for an ENABLED and DISABLED
instance of each.
"""
timeout= timeout or self.args.timeout
last_err = ""
elapsed = 0
start = time.time()
self.tester = None
while (not self.tester and elapsed < timeout):
elapsed = int(time.time() - start)
self.status('Attempting to create tester object. Elapsed:' + str(elapsed))
try:
self.tester = eucaops.Eucaops(config_file=self.args.config_file, password=self.args.password)
except Exception, e:
tb = eucaops.Eucaops.get_traceback()
last_err = str(tb) + "\n" + str(e)
print 'Services not up because of: ' + last_err + '\n'
if not self.tester:
raise Exception(str(last_err) + 'Could not create tester object after elapsed:' + str(elapsed))
timeout = timeout - elapsed
self.status('starting wait for all services operational, timeout:' + str(timeout))
self.tester.service_manager.wait_for_all_services_operational(timeout)
self.status('All services are up')
self.tester.service_manager.print_services_list()
if __name__ == "__main__":
testcase = MyTestCase()
### Use the list of tests passed from config/command line to determine what subset of tests to run
### or use a predefined list "VolumeTagging", "InstanceTagging", "SnapshotTagging", "ImageTagging"
list = testcase.args.tests or ["wait_for_services_operational"]
### Convert test suite methods to EutesterUnitTest objects
unit_list = [ ]
for test in list:
unit_list.append( testcase.create_testunit_by_name(test) )
### Run the EutesterUnitTest objects, dont worry about clean on exit until we need it for this method
result = testcase.run_test_case_list(unit_list,clean_on_exit=False)
exit(result)
|
en
| 0.730769
|
#!/usr/bin/python # Software License Agreement (BSD License) # # Copyright (c) 2009-2011, Eucalyptus Systems, Inc. # All rights reserved. # # Redistribution and use of this software in source and binary forms, with or # without modification, are permitted provided that the following conditions # are met: # # Redistributions of source code must retain the above # copyright notice, this list of conditions and the # following disclaimer. # # Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other # materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Author: clarkmatthew Definition: Test attempts to query the state of a subset of core services. The test will continue to poll the system until it finds an ENABLED instance of each service. In the HA case it will wait for an ENABLED and DISABLED instance of each. ### Use the list of tests passed from config/command line to determine what subset of tests to run ### or use a predefined list "VolumeTagging", "InstanceTagging", "SnapshotTagging", "ImageTagging" ### Convert test suite methods to EutesterUnitTest objects ### Run the EutesterUnitTest objects, dont worry about clean on exit until we need it for this method
| 1.595747
| 2
|
intValues.py
|
jules552/ProjetISN
| 0
|
6301
|
MAP = 1
SPEED = 1.5
VELOCITYRESET = 6
WIDTH = 1280
HEIGHT = 720
X = WIDTH / 2 - 50
Y = HEIGHT / 2 - 50
MOUSER = 325
TICKRATES = 120
nfc = False
raspberry = False
|
MAP = 1
SPEED = 1.5
VELOCITYRESET = 6
WIDTH = 1280
HEIGHT = 720
X = WIDTH / 2 - 50
Y = HEIGHT / 2 - 50
MOUSER = 325
TICKRATES = 120
nfc = False
raspberry = False
|
none
| 1
| 1.195565
| 1
|
|
April/Apr_25_2019/builder.py
|
while1618/DailyCodingProblem
| 1
|
6302
|
<reponame>while1618/DailyCodingProblem
# This problem was asked by Facebook.
#
# A builder is looking to build a row of N houses that can be of K different colors.
# He has a goal of minimizing cost while ensuring that no two neighboring houses are of the same color.
#
# Given an N by K matrix where the nth row and kth column represents the cost to build the nth house with kth color,
# return the minimum cost which achieves this goal.
|
# This problem was asked by Facebook.
#
# A builder is looking to build a row of N houses that can be of K different colors.
# He has a goal of minimizing cost while ensuring that no two neighboring houses are of the same color.
#
# Given an N by K matrix where the nth row and kth column represents the cost to build the nth house with kth color,
# return the minimum cost which achieves this goal.
|
en
| 0.973544
|
# This problem was asked by Facebook. # # A builder is looking to build a row of N houses that can be of K different colors. # He has a goal of minimizing cost while ensuring that no two neighboring houses are of the same color. # # Given an N by K matrix where the nth row and kth column represents the cost to build the nth house with kth color, # return the minimum cost which achieves this goal.
| 3.00817
| 3
|
experiments/delaney/plot.py
|
pfnet-research/bayesgrad
| 57
|
6303
|
import argparse
import numpy as np
import os
import sys
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from saliency.visualizer.smiles_visualizer import SmilesVisualizer
def visualize(dir_path):
parent_dir = os.path.dirname(dir_path)
saliency_vanilla = np.load(os.path.join(dir_path, "saliency_vanilla.npy"))
saliency_smooth = np.load(os.path.join(dir_path, "saliency_smooth.npy"))
saliency_bayes = np.load(os.path.join(dir_path, "saliency_bayes.npy"))
visualizer = SmilesVisualizer()
os.makedirs(os.path.join(parent_dir, "result_vanilla"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_smooth"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_bayes"), exist_ok=True)
test_idx = np.load(os.path.join(dir_path, "test_idx.npy"))
answer = np.load(os.path.join(dir_path, "answer.npy"))
output = np.load(os.path.join(dir_path, "output.npy"))
smiles_all = np.load(os.path.join(parent_dir, "smiles.npy"))
def calc_range(saliency):
vmax = float('-inf')
vmin = float('inf')
for v in saliency:
vmax = max(vmax, np.max(v))
vmin = min(vmin, np.min(v))
return vmin, vmax
v_range_vanilla = calc_range(saliency_vanilla)
v_range_smooth = calc_range(saliency_smooth)
v_range_bayes = calc_range(saliency_bayes)
def get_scaler(v_range):
def scaler(saliency_):
saliency = np.copy(saliency_)
minv, maxv = v_range
if maxv == minv:
saliency = np.zeros_like(saliency)
else:
pos = saliency >= 0.0
saliency[pos] = saliency[pos]/maxv
nega = saliency < 0.0
saliency[nega] = saliency[nega]/(np.abs(minv))
return saliency
return scaler
scaler_vanilla = get_scaler(v_range_vanilla)
scaler_smooth = get_scaler(v_range_smooth)
scaler_bayes = get_scaler(v_range_bayes)
def color(x):
if x > 0:
# Red for positive value
return 1., 1. - x, 1. - x
else:
# Blue for negative value
x *= -1
return 1. - x, 1. - x, 1.
for i, id in enumerate(test_idx):
smiles = smiles_all[id]
out = output[i]
ans = answer[i]
# legend = "t:{}, p:{}".format(ans, out)
legend = ''
ext = '.png' # '.svg'
# visualizer.visualize(
# saliency_vanilla[id], smiles, save_filepath=os.path.join(parent_dir, "result_vanilla", str(id) + ext),
# visualize_ratio=1.0, legend=legend, scaler=scaler_vanilla, color_fn=color)
# visualizer.visualize(
# saliency_smooth[id], smiles, save_filepath=os.path.join(parent_dir, "result_smooth", str(id) + ext),
# visualize_ratio=1.0, legend=legend, scaler=scaler_smooth, color_fn=color)
visualizer.visualize(
saliency_bayes[id], smiles, save_filepath=os.path.join(parent_dir, "result_bayes", str(id) + ext),
visualize_ratio=1.0, legend=legend, scaler=scaler_bayes, color_fn=color)
def plot_result(prediction, answer, save_filepath='result.png'):
plt.scatter(prediction, answer, marker='.')
plt.plot([-100, 100], [-100, 100], c='r')
max_v = max(np.max(prediction), np.max(answer))
min_v = min(np.min(prediction), np.min(answer))
plt.xlim([min_v-0.1, max_v+0.1])
plt.xlabel("prediction")
plt.ylim([min_v-0.1, max_v+0.1])
plt.ylabel("ground truth")
plt.savefig(save_filepath)
plt.close()
def main():
parser = argparse.ArgumentParser(
description='Regression with own dataset.')
parser.add_argument('--dirpath', '-d', type=str, default='./results/M_30_3_32_32')
args = parser.parse_args()
path = args.dirpath
n_split = 5
output = []
answer = []
for i in range(n_split):
suffix = str(i) + "-" + str(n_split)
output.append(np.load(os.path.join(path, suffix, "output.npy")))
answer.append(np.load(os.path.join(path, suffix, "answer.npy")))
output = np.concatenate(output)
answer = np.concatenate(answer)
plot_result(output, answer, save_filepath=os.path.join(path, "result.png"))
for i in range(n_split):
suffix = str(i) + "-" + str(n_split)
print(suffix)
visualize(os.path.join(path, suffix))
if __name__ == '__main__':
main()
|
import argparse
import numpy as np
import os
import sys
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from saliency.visualizer.smiles_visualizer import SmilesVisualizer
def visualize(dir_path):
parent_dir = os.path.dirname(dir_path)
saliency_vanilla = np.load(os.path.join(dir_path, "saliency_vanilla.npy"))
saliency_smooth = np.load(os.path.join(dir_path, "saliency_smooth.npy"))
saliency_bayes = np.load(os.path.join(dir_path, "saliency_bayes.npy"))
visualizer = SmilesVisualizer()
os.makedirs(os.path.join(parent_dir, "result_vanilla"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_smooth"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_bayes"), exist_ok=True)
test_idx = np.load(os.path.join(dir_path, "test_idx.npy"))
answer = np.load(os.path.join(dir_path, "answer.npy"))
output = np.load(os.path.join(dir_path, "output.npy"))
smiles_all = np.load(os.path.join(parent_dir, "smiles.npy"))
def calc_range(saliency):
vmax = float('-inf')
vmin = float('inf')
for v in saliency:
vmax = max(vmax, np.max(v))
vmin = min(vmin, np.min(v))
return vmin, vmax
v_range_vanilla = calc_range(saliency_vanilla)
v_range_smooth = calc_range(saliency_smooth)
v_range_bayes = calc_range(saliency_bayes)
def get_scaler(v_range):
def scaler(saliency_):
saliency = np.copy(saliency_)
minv, maxv = v_range
if maxv == minv:
saliency = np.zeros_like(saliency)
else:
pos = saliency >= 0.0
saliency[pos] = saliency[pos]/maxv
nega = saliency < 0.0
saliency[nega] = saliency[nega]/(np.abs(minv))
return saliency
return scaler
scaler_vanilla = get_scaler(v_range_vanilla)
scaler_smooth = get_scaler(v_range_smooth)
scaler_bayes = get_scaler(v_range_bayes)
def color(x):
if x > 0:
# Red for positive value
return 1., 1. - x, 1. - x
else:
# Blue for negative value
x *= -1
return 1. - x, 1. - x, 1.
for i, id in enumerate(test_idx):
smiles = smiles_all[id]
out = output[i]
ans = answer[i]
# legend = "t:{}, p:{}".format(ans, out)
legend = ''
ext = '.png' # '.svg'
# visualizer.visualize(
# saliency_vanilla[id], smiles, save_filepath=os.path.join(parent_dir, "result_vanilla", str(id) + ext),
# visualize_ratio=1.0, legend=legend, scaler=scaler_vanilla, color_fn=color)
# visualizer.visualize(
# saliency_smooth[id], smiles, save_filepath=os.path.join(parent_dir, "result_smooth", str(id) + ext),
# visualize_ratio=1.0, legend=legend, scaler=scaler_smooth, color_fn=color)
visualizer.visualize(
saliency_bayes[id], smiles, save_filepath=os.path.join(parent_dir, "result_bayes", str(id) + ext),
visualize_ratio=1.0, legend=legend, scaler=scaler_bayes, color_fn=color)
def plot_result(prediction, answer, save_filepath='result.png'):
plt.scatter(prediction, answer, marker='.')
plt.plot([-100, 100], [-100, 100], c='r')
max_v = max(np.max(prediction), np.max(answer))
min_v = min(np.min(prediction), np.min(answer))
plt.xlim([min_v-0.1, max_v+0.1])
plt.xlabel("prediction")
plt.ylim([min_v-0.1, max_v+0.1])
plt.ylabel("ground truth")
plt.savefig(save_filepath)
plt.close()
def main():
parser = argparse.ArgumentParser(
description='Regression with own dataset.')
parser.add_argument('--dirpath', '-d', type=str, default='./results/M_30_3_32_32')
args = parser.parse_args()
path = args.dirpath
n_split = 5
output = []
answer = []
for i in range(n_split):
suffix = str(i) + "-" + str(n_split)
output.append(np.load(os.path.join(path, suffix, "output.npy")))
answer.append(np.load(os.path.join(path, suffix, "answer.npy")))
output = np.concatenate(output)
answer = np.concatenate(answer)
plot_result(output, answer, save_filepath=os.path.join(path, "result.png"))
for i in range(n_split):
suffix = str(i) + "-" + str(n_split)
print(suffix)
visualize(os.path.join(path, suffix))
if __name__ == '__main__':
main()
|
en
| 0.136242
|
# Red for positive value # Blue for negative value # legend = "t:{}, p:{}".format(ans, out) # '.svg' # visualizer.visualize( # saliency_vanilla[id], smiles, save_filepath=os.path.join(parent_dir, "result_vanilla", str(id) + ext), # visualize_ratio=1.0, legend=legend, scaler=scaler_vanilla, color_fn=color) # visualizer.visualize( # saliency_smooth[id], smiles, save_filepath=os.path.join(parent_dir, "result_smooth", str(id) + ext), # visualize_ratio=1.0, legend=legend, scaler=scaler_smooth, color_fn=color)
| 2.306596
| 2
|
public/js/tinymice/plugins/bootstrap/jquery-file-tree/connectors/jqueryFileTree.py
|
btybug/main.albumbugs
| 13
|
6304
|
#
# jQuery File Tree
# Python/Django connector script
# By <NAME>
#
import os
import urllib
def dirlist(request):
r=['<ul class="jqueryFileTree" style="display: none;">']
try:
r=['<ul class="jqueryFileTree" style="display: none;">']
d=urllib.unquote(request.POST.get('dir','c:\\temp'))
for f in os.listdir(d):
ff=os.path.join(d,f)
if os.path.isdir(ff):
r.append('<li class="directory collapsed"><a href="#" rel="%s/">%s</a></li>' % (ff,f))
else:
e=os.path.splitext(f)[1][1:] # get .ext and remove dot
r.append('<li class="file ext_%s"><a href="#" rel="%s">%s</a></li>' % (e,ff,f))
r.append('</ul>')
except Exception,e:
r.append('Could not load directory: %s' % str(e))
r.append('</ul>')
return HttpResponse(''.join(r))
|
#
# jQuery File Tree
# Python/Django connector script
# By <NAME>
#
import os
import urllib
def dirlist(request):
r=['<ul class="jqueryFileTree" style="display: none;">']
try:
r=['<ul class="jqueryFileTree" style="display: none;">']
d=urllib.unquote(request.POST.get('dir','c:\\temp'))
for f in os.listdir(d):
ff=os.path.join(d,f)
if os.path.isdir(ff):
r.append('<li class="directory collapsed"><a href="#" rel="%s/">%s</a></li>' % (ff,f))
else:
e=os.path.splitext(f)[1][1:] # get .ext and remove dot
r.append('<li class="file ext_%s"><a href="#" rel="%s">%s</a></li>' % (e,ff,f))
r.append('</ul>')
except Exception,e:
r.append('Could not load directory: %s' % str(e))
r.append('</ul>')
return HttpResponse(''.join(r))
|
en
| 0.528374
|
# # jQuery File Tree # Python/Django connector script # By <NAME> # # get .ext and remove dot
| 2.138865
| 2
|
gpytorch/lazy/chol_lazy_tensor.py
|
harvineet/gpytorch
| 0
|
6305
|
<filename>gpytorch/lazy/chol_lazy_tensor.py
#!/usr/bin/env python3
import torch
from .lazy_tensor import LazyTensor
from .root_lazy_tensor import RootLazyTensor
from .. import settings
class CholLazyTensor(RootLazyTensor):
def __init__(self, chol):
if isinstance(chol, LazyTensor): # Probably is an instance of NonLazyTensor
chol = chol.evaluate()
# Check that we have a lower triangular matrix
if settings.debug.on():
mask = torch.ones(chol.shape[-2:], dtype=chol.dtype, device=chol.device).triu_(1)
if torch.max(chol.mul(mask)).item() > 1e-3 and torch.equal(chol, chol):
raise RuntimeError("CholLazyVaraiable should take a lower-triangular matrix in the constructor.")
# Run super constructor
super(CholLazyTensor, self).__init__(chol)
@property
def _chol(self):
if not hasattr(self, "_chol_memo"):
self._chol_memo = self.root.evaluate()
return self._chol_memo
@property
def _chol_diag(self):
if not hasattr(self, "_chol_diag_memo"):
self._chol_diag_memo = self._chol.diagonal(dim1=-2, dim2=-1).clone()
return self._chol_diag_memo
def inv_quad_logdet(self, inv_quad_rhs=None, logdet=False, reduce_inv_quad=True):
inv_quad_term = None
logdet_term = None
if inv_quad_rhs is not None:
inv_quad_term, _ = super(CholLazyTensor, self).inv_quad_logdet(
inv_quad_rhs, logdet=False, reduce_inv_quad=reduce_inv_quad
)
if logdet:
logdet_term = self._chol_diag.pow(2).log().sum(-1)
return inv_quad_term, logdet_term
|
<filename>gpytorch/lazy/chol_lazy_tensor.py
#!/usr/bin/env python3
import torch
from .lazy_tensor import LazyTensor
from .root_lazy_tensor import RootLazyTensor
from .. import settings
class CholLazyTensor(RootLazyTensor):
def __init__(self, chol):
if isinstance(chol, LazyTensor): # Probably is an instance of NonLazyTensor
chol = chol.evaluate()
# Check that we have a lower triangular matrix
if settings.debug.on():
mask = torch.ones(chol.shape[-2:], dtype=chol.dtype, device=chol.device).triu_(1)
if torch.max(chol.mul(mask)).item() > 1e-3 and torch.equal(chol, chol):
raise RuntimeError("CholLazyVaraiable should take a lower-triangular matrix in the constructor.")
# Run super constructor
super(CholLazyTensor, self).__init__(chol)
@property
def _chol(self):
if not hasattr(self, "_chol_memo"):
self._chol_memo = self.root.evaluate()
return self._chol_memo
@property
def _chol_diag(self):
if not hasattr(self, "_chol_diag_memo"):
self._chol_diag_memo = self._chol.diagonal(dim1=-2, dim2=-1).clone()
return self._chol_diag_memo
def inv_quad_logdet(self, inv_quad_rhs=None, logdet=False, reduce_inv_quad=True):
inv_quad_term = None
logdet_term = None
if inv_quad_rhs is not None:
inv_quad_term, _ = super(CholLazyTensor, self).inv_quad_logdet(
inv_quad_rhs, logdet=False, reduce_inv_quad=reduce_inv_quad
)
if logdet:
logdet_term = self._chol_diag.pow(2).log().sum(-1)
return inv_quad_term, logdet_term
|
en
| 0.743061
|
#!/usr/bin/env python3 # Probably is an instance of NonLazyTensor # Check that we have a lower triangular matrix # Run super constructor
| 2.061807
| 2
|
pirates/audio/AmbientManagerBase.py
|
ksmit799/POTCO-PS
| 8
|
6306
|
<filename>pirates/audio/AmbientManagerBase.py
# File: A (Python 2.4)
from pandac.PandaModules import AudioSound
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import LerpFunc, Sequence
from direct.showbase.DirectObject import DirectObject
class AmbientSound:
notify = DirectNotifyGlobal.directNotify.newCategory('AmbientSound')
def __init__(self, path, masterAmbientVolume, loop = True, isMusic = False):
self.isMusic = isMusic
if self.isMusic:
self.sfx = loader.loadMusic(path)
else:
self.sfx = loader.loadSfx(path)
self.path = path
self.loop = loop
self.setLoop(loop)
self.setVolume(0)
self.masterAmbientVolume = masterAmbientVolume
self.reloadAttempt = 0
self.curPriority = 0
self.duration = 0
self.finalVolume = 0
self.startVolume = 0
self.activeInterval = None
def unload(self):
if self.activeInterval:
self.activeInterval.finish()
del self.activeInterval
self.sfx.stop()
del self.sfx
def play(self):
self.sfx.play()
def getVolume(self):
return self.sfx.getVolume()
def setVolume(self, vol):
self.sfx.setVolume(vol)
def getLoop(self):
return self.sfx.getLoop()
def setLoop(self, loop):
self.sfx.setLoop(loop)
def set3dAttributes(self, *args):
self.sfx.set3dAttributes(*args)
def requestChangeVolume(self, duration, finalVolume, priority):
if priority < self.curPriority:
return None
self.curPriority = priority
if not self.sfx.getActive():
if self.reloadAttempt < 1:
self.reloadAttempt += 1
if self.isMusic:
self.sfx = loader.loadMusic(self.path)
else:
self.sfx = loader.loadSfx(self.path)
if self.sfx:
self.sfx.setLoop(self.loop)
self.duration = duration
self.startVolume = self.getVolume()
self.finalVolume = finalVolume
if self.activeInterval:
self.activeInterval.pause()
del self.activeInterval
self.activeInterval = Sequence(LerpFunc(self.changeVolumeTask, fromData = self.startVolume, toData = self.finalVolume, duration = self.duration))
self.activeInterval.start()
def changeMasterAmbientVolume(self, newMasterAmbientVolume):
if not self.masterAmbientVolume == newMasterAmbientVolume:
self.masterAmbientVolume = newMasterAmbientVolume
if self.activeInterval and self.activeInterval.isPlaying():
pass
elif self.sfx.status() == 2:
newVol = float(self.finalVolume) * self.masterAmbientVolume
self.sfx.setVolume(newVol)
def changeVolumeTask(self, t):
curVolume = t * self.masterAmbientVolume
self.sfx.setVolume(curVolume)
if not hasattr(self, 'reportCounter'):
self.reportCounter = 0
self.reportCounter += 1
if self.reportCounter % 10 == 0:
pass
1
if curVolume > 0 and self.sfx.status() == 1:
self.sfx.play()
if curVolume <= 0 and self.sfx.status() == 2:
self.sfx.stop()
self.curPriority = 0
class AmbientManagerBase(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('AmbientManagerBase')
def __init__(self):
self.ambientDict = { }
self.masterAmbientVolume = 1.0
def load(self, name, path, looping = True, isMusic = False):
retval = False
if self.ambientDict.has_key(name):
if self.ambientDict[name].path == path:
self.notify.warning('ambient name=%s path=%s already loaded' % (name, path))
else:
self.notify.warning('ambient name %s is already bound to %s' % self.ambientDict[name].path)
else:
newAmbient = AmbientSound(path, self.masterAmbientVolume, looping, isMusic)
self.ambientDict[name] = newAmbient
def unload(self, name):
if self.ambientDict.has_key(name):
self.ambientDict[name].unload()
del self.ambientDict[name]
else:
self.notify.warning('music: %s not in ambientDict' % name)
def requestFadeIn(self, name, duration = 5, finalVolume = 1.0, priority = 0):
self.requestChangeVolume(name, duration, finalVolume, priority)
def requestFadeOut(self, name, duration = 5, finalVolume = 0.0, priority = 0):
self.requestChangeVolume(name, duration, finalVolume, priority)
def requestChangeVolume(self, name, duration, finalVolume, priority = 0):
if self.ambientDict.has_key(name):
self.ambientDict[name].requestChangeVolume(duration, finalVolume, priority)
def delete(self):
for name in self.ambientDict.keys():
self.ambientDict[name].unload()
self.ambientDict = { }
def silence(self):
for name in self.ambientDict.keys():
self.ambientDict[name].requestChangeVolume(0.0, 0.0, priority = 1)
def changeMasterAmbientVolume(self, newMasterAmbientVolume):
if not newMasterAmbientVolume == self.masterAmbientVolume:
self.masterAmbientVolume = newMasterAmbientVolume
for name in self.ambientDict.keys():
self.ambientDict[name].changeMasterAmbientVolume(self.masterAmbientVolume)
|
<filename>pirates/audio/AmbientManagerBase.py
# File: A (Python 2.4)
from pandac.PandaModules import AudioSound
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import LerpFunc, Sequence
from direct.showbase.DirectObject import DirectObject
class AmbientSound:
notify = DirectNotifyGlobal.directNotify.newCategory('AmbientSound')
def __init__(self, path, masterAmbientVolume, loop = True, isMusic = False):
self.isMusic = isMusic
if self.isMusic:
self.sfx = loader.loadMusic(path)
else:
self.sfx = loader.loadSfx(path)
self.path = path
self.loop = loop
self.setLoop(loop)
self.setVolume(0)
self.masterAmbientVolume = masterAmbientVolume
self.reloadAttempt = 0
self.curPriority = 0
self.duration = 0
self.finalVolume = 0
self.startVolume = 0
self.activeInterval = None
def unload(self):
if self.activeInterval:
self.activeInterval.finish()
del self.activeInterval
self.sfx.stop()
del self.sfx
def play(self):
self.sfx.play()
def getVolume(self):
return self.sfx.getVolume()
def setVolume(self, vol):
self.sfx.setVolume(vol)
def getLoop(self):
return self.sfx.getLoop()
def setLoop(self, loop):
self.sfx.setLoop(loop)
def set3dAttributes(self, *args):
self.sfx.set3dAttributes(*args)
def requestChangeVolume(self, duration, finalVolume, priority):
if priority < self.curPriority:
return None
self.curPriority = priority
if not self.sfx.getActive():
if self.reloadAttempt < 1:
self.reloadAttempt += 1
if self.isMusic:
self.sfx = loader.loadMusic(self.path)
else:
self.sfx = loader.loadSfx(self.path)
if self.sfx:
self.sfx.setLoop(self.loop)
self.duration = duration
self.startVolume = self.getVolume()
self.finalVolume = finalVolume
if self.activeInterval:
self.activeInterval.pause()
del self.activeInterval
self.activeInterval = Sequence(LerpFunc(self.changeVolumeTask, fromData = self.startVolume, toData = self.finalVolume, duration = self.duration))
self.activeInterval.start()
def changeMasterAmbientVolume(self, newMasterAmbientVolume):
if not self.masterAmbientVolume == newMasterAmbientVolume:
self.masterAmbientVolume = newMasterAmbientVolume
if self.activeInterval and self.activeInterval.isPlaying():
pass
elif self.sfx.status() == 2:
newVol = float(self.finalVolume) * self.masterAmbientVolume
self.sfx.setVolume(newVol)
def changeVolumeTask(self, t):
curVolume = t * self.masterAmbientVolume
self.sfx.setVolume(curVolume)
if not hasattr(self, 'reportCounter'):
self.reportCounter = 0
self.reportCounter += 1
if self.reportCounter % 10 == 0:
pass
1
if curVolume > 0 and self.sfx.status() == 1:
self.sfx.play()
if curVolume <= 0 and self.sfx.status() == 2:
self.sfx.stop()
self.curPriority = 0
class AmbientManagerBase(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('AmbientManagerBase')
def __init__(self):
self.ambientDict = { }
self.masterAmbientVolume = 1.0
def load(self, name, path, looping = True, isMusic = False):
retval = False
if self.ambientDict.has_key(name):
if self.ambientDict[name].path == path:
self.notify.warning('ambient name=%s path=%s already loaded' % (name, path))
else:
self.notify.warning('ambient name %s is already bound to %s' % self.ambientDict[name].path)
else:
newAmbient = AmbientSound(path, self.masterAmbientVolume, looping, isMusic)
self.ambientDict[name] = newAmbient
def unload(self, name):
if self.ambientDict.has_key(name):
self.ambientDict[name].unload()
del self.ambientDict[name]
else:
self.notify.warning('music: %s not in ambientDict' % name)
def requestFadeIn(self, name, duration = 5, finalVolume = 1.0, priority = 0):
self.requestChangeVolume(name, duration, finalVolume, priority)
def requestFadeOut(self, name, duration = 5, finalVolume = 0.0, priority = 0):
self.requestChangeVolume(name, duration, finalVolume, priority)
def requestChangeVolume(self, name, duration, finalVolume, priority = 0):
if self.ambientDict.has_key(name):
self.ambientDict[name].requestChangeVolume(duration, finalVolume, priority)
def delete(self):
for name in self.ambientDict.keys():
self.ambientDict[name].unload()
self.ambientDict = { }
def silence(self):
for name in self.ambientDict.keys():
self.ambientDict[name].requestChangeVolume(0.0, 0.0, priority = 1)
def changeMasterAmbientVolume(self, newMasterAmbientVolume):
if not newMasterAmbientVolume == self.masterAmbientVolume:
self.masterAmbientVolume = newMasterAmbientVolume
for name in self.ambientDict.keys():
self.ambientDict[name].changeMasterAmbientVolume(self.masterAmbientVolume)
|
en
| 0.682188
|
# File: A (Python 2.4)
| 2.317198
| 2
|
test/tests/import_test.py
|
jmgc/pyston
| 1
|
6307
|
<gh_stars>1-10
import import_target
print import_target.x
import import_target
import_target.foo()
c = import_target.C()
print import_target.import_nested_target.y
import_target.import_nested_target.bar()
d = import_target.import_nested_target.D()
print "testing importfrom:"
from import_target import x as z
print z
import_nested_target = 15
from import_nested_target import y
print "This should still be 15:",import_nested_target
import import_nested_target
print import_nested_target.__name__
print import_nested_target.y
import_target.import_nested_target.y = import_nested_target.y + 1
print import_nested_target.y
print z
print y
print __name__
print __import__("import_target") is import_target
import sys
import _multiprocessing
del _multiprocessing
del sys.modules["_multiprocessing"]
import _multiprocessing
import time
del time
del sys.modules["time"]
import time
print time.sleep(0)
|
import import_target
print import_target.x
import import_target
import_target.foo()
c = import_target.C()
print import_target.import_nested_target.y
import_target.import_nested_target.bar()
d = import_target.import_nested_target.D()
print "testing importfrom:"
from import_target import x as z
print z
import_nested_target = 15
from import_nested_target import y
print "This should still be 15:",import_nested_target
import import_nested_target
print import_nested_target.__name__
print import_nested_target.y
import_target.import_nested_target.y = import_nested_target.y + 1
print import_nested_target.y
print z
print y
print __name__
print __import__("import_target") is import_target
import sys
import _multiprocessing
del _multiprocessing
del sys.modules["_multiprocessing"]
import _multiprocessing
import time
del time
del sys.modules["time"]
import time
print time.sleep(0)
|
none
| 1
| 2.363302
| 2
|
|
hexrd/ui/matrix_editor.py
|
HEXRD/hexrdgui
| 13
|
6308
|
import numpy as np
from PySide2.QtCore import QSignalBlocker, Signal
from PySide2.QtWidgets import QGridLayout, QWidget
from hexrd.ui.scientificspinbox import ScientificDoubleSpinBox
DEFAULT_ENABLED_STYLE_SHEET = 'background-color: white'
DEFAULT_DISABLED_STYLE_SHEET = 'background-color: #F0F0F0'
INVALID_MATRIX_STYLE_SHEET = 'background-color: red'
class MatrixEditor(QWidget):
data_modified = Signal()
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data
# If this is not None, then only the elements present in the
# list (as (i, j) items) will be enabled.
self._enabled_elements = None
# If this is set, it will be called every time the data updates
# to apply equality constraints.
self._apply_constraints_func = None
# Whether or not the matrix is currently invalid
self.matrix_invalid = False
# Reason the matrix is currently invalid
self.matrix_invalid_reason = ''
self.setLayout(QGridLayout())
self.add_spin_boxes()
self.update_gui()
def add_spin_boxes(self):
layout = self.layout()
for i in range(self.rows):
for j in range(self.cols):
sb = self.create_spin_box()
layout.addWidget(sb, i, j)
def create_spin_box(self):
sb = ScientificDoubleSpinBox()
sb.setKeyboardTracking(False)
sb.valueChanged.connect(self.element_modified)
return sb
def element_modified(self):
self.update_data()
@property
def data(self):
return self._data
@data.setter
def data(self, v):
if not np.array_equal(self._data, v):
if self._data.shape != v.shape:
msg = (f'Shape {v.shape} does not match original shape '
f'{self._data.shape}')
raise AttributeError(msg)
self._data = v
self.reset_disabled_values()
self.update_gui()
@property
def rows(self):
return self.data.shape[0]
@property
def cols(self):
return self.data.shape[1]
def update_data(self):
self.data[:] = self.gui_data
self.apply_constraints()
self.data_modified.emit()
def update_gui(self):
self.gui_data = self.data
@property
def gui_data(self):
row_range = range(self.rows)
col_range = range(self.cols)
return [[self.gui_value(i, j) for j in col_range] for i in row_range]
@gui_data.setter
def gui_data(self, v):
blockers = [QSignalBlocker(w) for w in self.all_widgets] # noqa: F841
for i in range(self.rows):
for j in range(self.cols):
self.set_gui_value(i, j, v[i][j])
@property
def all_widgets(self):
row_range = range(self.rows)
col_range = range(self.cols)
return [self.widget(i, j) for j in col_range for i in row_range]
@property
def enabled_widgets(self):
widgets = []
for i in range(self.rows):
for j in range(self.cols):
if (i, j) in self.enabled_elements:
widgets.append(self.widget(i, j))
return widgets
def widget(self, row, col):
return self.layout().itemAtPosition(row, col).widget()
def gui_value(self, row, col):
return self.widget(row, col).value()
def set_gui_value(self, row, col, val):
self.widget(row, col).setValue(val)
def set_matrix_invalid(self, s):
self.matrix_invalid = True
self.matrix_invalid_reason = s
self.update_tooltips()
self.update_enable_states()
def set_matrix_valid(self):
self.matrix_invalid = False
self.matrix_invalid_reason = ''
self.update_tooltips()
self.update_enable_states()
def update_tooltips(self):
if self.matrix_invalid:
tooltip = self.matrix_invalid_reason
else:
tooltip = ''
for w in self.enabled_widgets:
w.setToolTip(tooltip)
def update_enable_states(self):
enable_all = self.enabled_elements is None
for i in range(self.rows):
for j in range(self.cols):
w = self.widget(i, j)
enable = enable_all or (i, j) in self.enabled_elements
w.setEnabled(enable)
enabled_str = 'enabled' if enable else 'disabled'
style_sheet = getattr(self, f'{enabled_str}_style_sheet')
w.setStyleSheet(style_sheet)
def reset_disabled_values(self):
# Resets all disabled values to zero, then applies constraints
for i in range(self.rows):
for j in range(self.cols):
if not self.widget(i, j).isEnabled():
self.data[i, j] = 0.0
self.apply_constraints()
self.update_gui()
@property
def enabled_style_sheet(self):
if self.matrix_invalid:
return INVALID_MATRIX_STYLE_SHEET
return DEFAULT_ENABLED_STYLE_SHEET
@property
def disabled_style_sheet(self):
return DEFAULT_DISABLED_STYLE_SHEET
@property
def enabled_elements(self):
return self._enabled_elements
@enabled_elements.setter
def enabled_elements(self, v):
if self._enabled_elements != v:
self._enabled_elements = v
self.update_enable_states()
self.reset_disabled_values()
@property
def apply_constraints_func(self):
return self._apply_constraints_func
@apply_constraints_func.setter
def apply_constraints_func(self, v):
if self._apply_constraints_func != v:
self._apply_constraints_func = v
self.apply_constraints()
def apply_constraints(self):
if (func := self.apply_constraints_func) is None:
return
func(self.data)
self.update_gui()
if __name__ == '__main__':
import sys
from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout
if len(sys.argv) < 2:
sys.exit('Usage: <script> <matrix_size>')
rows, cols = [int(x) for x in sys.argv[1].split('x')]
data = np.ones((rows, cols))
app = QApplication(sys.argv)
dialog = QDialog()
layout = QVBoxLayout()
dialog.setLayout(layout)
editor = MatrixEditor(data)
layout.addWidget(editor)
# def constraints(x):
# x[2][2] = x[1][1]
# editor.enabled_elements = [(1, 1), (3, 4)]
# editor.apply_constraints_func = constraints
def on_data_modified():
print(f'Data modified: {editor.data}')
editor.data_modified.connect(on_data_modified)
dialog.finished.connect(app.quit)
dialog.show()
app.exec_()
|
import numpy as np
from PySide2.QtCore import QSignalBlocker, Signal
from PySide2.QtWidgets import QGridLayout, QWidget
from hexrd.ui.scientificspinbox import ScientificDoubleSpinBox
DEFAULT_ENABLED_STYLE_SHEET = 'background-color: white'
DEFAULT_DISABLED_STYLE_SHEET = 'background-color: #F0F0F0'
INVALID_MATRIX_STYLE_SHEET = 'background-color: red'
class MatrixEditor(QWidget):
data_modified = Signal()
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data
# If this is not None, then only the elements present in the
# list (as (i, j) items) will be enabled.
self._enabled_elements = None
# If this is set, it will be called every time the data updates
# to apply equality constraints.
self._apply_constraints_func = None
# Whether or not the matrix is currently invalid
self.matrix_invalid = False
# Reason the matrix is currently invalid
self.matrix_invalid_reason = ''
self.setLayout(QGridLayout())
self.add_spin_boxes()
self.update_gui()
def add_spin_boxes(self):
layout = self.layout()
for i in range(self.rows):
for j in range(self.cols):
sb = self.create_spin_box()
layout.addWidget(sb, i, j)
def create_spin_box(self):
sb = ScientificDoubleSpinBox()
sb.setKeyboardTracking(False)
sb.valueChanged.connect(self.element_modified)
return sb
def element_modified(self):
self.update_data()
@property
def data(self):
return self._data
@data.setter
def data(self, v):
if not np.array_equal(self._data, v):
if self._data.shape != v.shape:
msg = (f'Shape {v.shape} does not match original shape '
f'{self._data.shape}')
raise AttributeError(msg)
self._data = v
self.reset_disabled_values()
self.update_gui()
@property
def rows(self):
return self.data.shape[0]
@property
def cols(self):
return self.data.shape[1]
def update_data(self):
self.data[:] = self.gui_data
self.apply_constraints()
self.data_modified.emit()
def update_gui(self):
self.gui_data = self.data
@property
def gui_data(self):
row_range = range(self.rows)
col_range = range(self.cols)
return [[self.gui_value(i, j) for j in col_range] for i in row_range]
@gui_data.setter
def gui_data(self, v):
blockers = [QSignalBlocker(w) for w in self.all_widgets] # noqa: F841
for i in range(self.rows):
for j in range(self.cols):
self.set_gui_value(i, j, v[i][j])
@property
def all_widgets(self):
row_range = range(self.rows)
col_range = range(self.cols)
return [self.widget(i, j) for j in col_range for i in row_range]
@property
def enabled_widgets(self):
widgets = []
for i in range(self.rows):
for j in range(self.cols):
if (i, j) in self.enabled_elements:
widgets.append(self.widget(i, j))
return widgets
def widget(self, row, col):
return self.layout().itemAtPosition(row, col).widget()
def gui_value(self, row, col):
return self.widget(row, col).value()
def set_gui_value(self, row, col, val):
self.widget(row, col).setValue(val)
def set_matrix_invalid(self, s):
self.matrix_invalid = True
self.matrix_invalid_reason = s
self.update_tooltips()
self.update_enable_states()
def set_matrix_valid(self):
self.matrix_invalid = False
self.matrix_invalid_reason = ''
self.update_tooltips()
self.update_enable_states()
def update_tooltips(self):
if self.matrix_invalid:
tooltip = self.matrix_invalid_reason
else:
tooltip = ''
for w in self.enabled_widgets:
w.setToolTip(tooltip)
def update_enable_states(self):
enable_all = self.enabled_elements is None
for i in range(self.rows):
for j in range(self.cols):
w = self.widget(i, j)
enable = enable_all or (i, j) in self.enabled_elements
w.setEnabled(enable)
enabled_str = 'enabled' if enable else 'disabled'
style_sheet = getattr(self, f'{enabled_str}_style_sheet')
w.setStyleSheet(style_sheet)
def reset_disabled_values(self):
# Resets all disabled values to zero, then applies constraints
for i in range(self.rows):
for j in range(self.cols):
if not self.widget(i, j).isEnabled():
self.data[i, j] = 0.0
self.apply_constraints()
self.update_gui()
@property
def enabled_style_sheet(self):
if self.matrix_invalid:
return INVALID_MATRIX_STYLE_SHEET
return DEFAULT_ENABLED_STYLE_SHEET
@property
def disabled_style_sheet(self):
return DEFAULT_DISABLED_STYLE_SHEET
@property
def enabled_elements(self):
return self._enabled_elements
@enabled_elements.setter
def enabled_elements(self, v):
if self._enabled_elements != v:
self._enabled_elements = v
self.update_enable_states()
self.reset_disabled_values()
@property
def apply_constraints_func(self):
return self._apply_constraints_func
@apply_constraints_func.setter
def apply_constraints_func(self, v):
if self._apply_constraints_func != v:
self._apply_constraints_func = v
self.apply_constraints()
def apply_constraints(self):
if (func := self.apply_constraints_func) is None:
return
func(self.data)
self.update_gui()
if __name__ == '__main__':
import sys
from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout
if len(sys.argv) < 2:
sys.exit('Usage: <script> <matrix_size>')
rows, cols = [int(x) for x in sys.argv[1].split('x')]
data = np.ones((rows, cols))
app = QApplication(sys.argv)
dialog = QDialog()
layout = QVBoxLayout()
dialog.setLayout(layout)
editor = MatrixEditor(data)
layout.addWidget(editor)
# def constraints(x):
# x[2][2] = x[1][1]
# editor.enabled_elements = [(1, 1), (3, 4)]
# editor.apply_constraints_func = constraints
def on_data_modified():
print(f'Data modified: {editor.data}')
editor.data_modified.connect(on_data_modified)
dialog.finished.connect(app.quit)
dialog.show()
app.exec_()
|
en
| 0.764652
|
#F0F0F0' # If this is not None, then only the elements present in the # list (as (i, j) items) will be enabled. # If this is set, it will be called every time the data updates # to apply equality constraints. # Whether or not the matrix is currently invalid # Reason the matrix is currently invalid # noqa: F841 # Resets all disabled values to zero, then applies constraints # def constraints(x): # x[2][2] = x[1][1] # editor.enabled_elements = [(1, 1), (3, 4)] # editor.apply_constraints_func = constraints
| 2.326754
| 2
|
data/train/python/990aa6cbf16ed34f5030609c03ab43c0f0ed8c2aurls.py
|
harshp8l/deep-learning-lang-detection
| 84
|
6309
|
from django.conf.urls.defaults import *
urlpatterns = patterns('pytorque.views',
(r'^$', 'central_dispatch_view'),
(r'^browse$', 'central_dispatch_view'),
(r'^monitor$', 'central_dispatch_view'),
(r'^submit$', 'central_dispatch_view'),
(r'^stat$', 'central_dispatch_view'),
(r'^login/$', 'login'),
(r'^logout/$', 'logout'),
# (r'^$', 'central_dispatch_view'),
(r'^user/(?P<username>\w{0,50})/$', 'index'),
(r'^user/(?P<username>\w{0,50})/browse$', 'browse'),
# (r'^user/(?P<username>\w{0,50})/monitor', 'monitor'),
# (r'^user/(?P<username>\w{0,50})/submit', 'submit'),
# (r'^user/(?P<username>\w{0,50})/stat', 'stat'),
)
|
from django.conf.urls.defaults import *
urlpatterns = patterns('pytorque.views',
(r'^$', 'central_dispatch_view'),
(r'^browse$', 'central_dispatch_view'),
(r'^monitor$', 'central_dispatch_view'),
(r'^submit$', 'central_dispatch_view'),
(r'^stat$', 'central_dispatch_view'),
(r'^login/$', 'login'),
(r'^logout/$', 'logout'),
# (r'^$', 'central_dispatch_view'),
(r'^user/(?P<username>\w{0,50})/$', 'index'),
(r'^user/(?P<username>\w{0,50})/browse$', 'browse'),
# (r'^user/(?P<username>\w{0,50})/monitor', 'monitor'),
# (r'^user/(?P<username>\w{0,50})/submit', 'submit'),
# (r'^user/(?P<username>\w{0,50})/stat', 'stat'),
)
|
en
| 0.280912
|
# (r'^$', 'central_dispatch_view'), # (r'^user/(?P<username>\w{0,50})/monitor', 'monitor'), # (r'^user/(?P<username>\w{0,50})/submit', 'submit'), # (r'^user/(?P<username>\w{0,50})/stat', 'stat'),
| 1.729648
| 2
|
checkerpy/types/all/typedtuple.py
|
yedivanseven/CheckerPy
| 1
|
6310
|
from typing import Tuple, Union, Any, Sequence
from collections import deque, defaultdict, OrderedDict
from ...validators.one import JustLen
from ...functional.mixins import CompositionClassMixin
from ..one import Just
dict_keys = type({}.keys())
odict_keys = type(OrderedDict({}).keys())
dict_values = type({}.values())
odict_values = type(OrderedDict({}).values())
dict_items = type({}.items())
odict_items = type(OrderedDict({}).items())
NAMED_TYPES = (frozenset, slice, range,
deque, defaultdict, OrderedDict,
dict_keys, dict_values, dict_items,
odict_keys, odict_values, odict_items)
TypesT = Union[type, Sequence[type]]
class TypedTuple(CompositionClassMixin):
"""Checks for different type(s) of each element in a defined-length tuple.
Parameters
----------
value : tuple
The tuple to check the length and element types of.
name : str, optional
The name of the tuple to check the length and the element type(s) of.
Defaults to None.
types : tuple(type), tuple(tuple(type))
Tuple of the length to check for with either one type for each element
of `value` or a tuple of types for each element of `value`. Use the
ellipsis literal ... to skip type checking of the tuple element at
that position.
Returns
-------
tuple
The tuple passed in.
Methods
-------
o(callable) : CompositionOf
Daisy-chains the tuple length and type checker to another `callable`,
returning the functional composition of both. The argument `types` is
passed through to the `TypedTuple` checker when when calling the
composition.
Raises
------
WrongTypeError
If `value` is not a tuple or if any of its elements do not have (one
of) the permitted type(s).
LenError
If the tuple passed in does not have the same length as `types` or
if the type specification does not have a meaningful length.
TypeError
If `types` is not a tuple or any of its elements are not of type type.
See Also
--------
All, JustLen, CompositionOf
"""
def __new__(cls, value: tuple, name=None, *, types=(), **kwargs) -> tuple:
cls.__name = str(name) if name is not None else ''
cls.__string = cls.__name or str(value)
types, length = cls.__valid(types)
value = JustLen.JustTuple(value, name=name, length=length)
for index, element in enumerate(value):
if not cls.__is_or_contains_ellipsis(types[index]):
element_name = f'element {index} in tuple {cls.__string}'
_ = Just(types[index])(element, name=element_name)
return value
@classmethod
def __valid(cls, types: Sequence[TypesT]) -> Tuple[TypesT, int]:
if type(types) not in (tuple, list, deque):
message = cls.__wrong_type_message_for(types)
raise TypeError(message)
return types, len(types)
@staticmethod
def __wrong_type_message_for(types: Any) -> str:
type_name = type(types).__name__
if isinstance(types, NAMED_TYPES):
of_type = type_name
else:
of_type = f'{type_name} like {types}'
return f'Type of types argument must be tuple, not {of_type}!'
@staticmethod
def __is_or_contains_ellipsis(types: TypesT) -> bool:
is_ellipsis = types is ...
try:
contains_ellipsis = ... in types
except TypeError:
contains_ellipsis = False
return is_ellipsis or contains_ellipsis
|
from typing import Tuple, Union, Any, Sequence
from collections import deque, defaultdict, OrderedDict
from ...validators.one import JustLen
from ...functional.mixins import CompositionClassMixin
from ..one import Just
dict_keys = type({}.keys())
odict_keys = type(OrderedDict({}).keys())
dict_values = type({}.values())
odict_values = type(OrderedDict({}).values())
dict_items = type({}.items())
odict_items = type(OrderedDict({}).items())
NAMED_TYPES = (frozenset, slice, range,
deque, defaultdict, OrderedDict,
dict_keys, dict_values, dict_items,
odict_keys, odict_values, odict_items)
TypesT = Union[type, Sequence[type]]
class TypedTuple(CompositionClassMixin):
"""Checks for different type(s) of each element in a defined-length tuple.
Parameters
----------
value : tuple
The tuple to check the length and element types of.
name : str, optional
The name of the tuple to check the length and the element type(s) of.
Defaults to None.
types : tuple(type), tuple(tuple(type))
Tuple of the length to check for with either one type for each element
of `value` or a tuple of types for each element of `value`. Use the
ellipsis literal ... to skip type checking of the tuple element at
that position.
Returns
-------
tuple
The tuple passed in.
Methods
-------
o(callable) : CompositionOf
Daisy-chains the tuple length and type checker to another `callable`,
returning the functional composition of both. The argument `types` is
passed through to the `TypedTuple` checker when when calling the
composition.
Raises
------
WrongTypeError
If `value` is not a tuple or if any of its elements do not have (one
of) the permitted type(s).
LenError
If the tuple passed in does not have the same length as `types` or
if the type specification does not have a meaningful length.
TypeError
If `types` is not a tuple or any of its elements are not of type type.
See Also
--------
All, JustLen, CompositionOf
"""
def __new__(cls, value: tuple, name=None, *, types=(), **kwargs) -> tuple:
cls.__name = str(name) if name is not None else ''
cls.__string = cls.__name or str(value)
types, length = cls.__valid(types)
value = JustLen.JustTuple(value, name=name, length=length)
for index, element in enumerate(value):
if not cls.__is_or_contains_ellipsis(types[index]):
element_name = f'element {index} in tuple {cls.__string}'
_ = Just(types[index])(element, name=element_name)
return value
@classmethod
def __valid(cls, types: Sequence[TypesT]) -> Tuple[TypesT, int]:
if type(types) not in (tuple, list, deque):
message = cls.__wrong_type_message_for(types)
raise TypeError(message)
return types, len(types)
@staticmethod
def __wrong_type_message_for(types: Any) -> str:
type_name = type(types).__name__
if isinstance(types, NAMED_TYPES):
of_type = type_name
else:
of_type = f'{type_name} like {types}'
return f'Type of types argument must be tuple, not {of_type}!'
@staticmethod
def __is_or_contains_ellipsis(types: TypesT) -> bool:
is_ellipsis = types is ...
try:
contains_ellipsis = ... in types
except TypeError:
contains_ellipsis = False
return is_ellipsis or contains_ellipsis
|
en
| 0.757027
|
Checks for different type(s) of each element in a defined-length tuple. Parameters ---------- value : tuple The tuple to check the length and element types of. name : str, optional The name of the tuple to check the length and the element type(s) of. Defaults to None. types : tuple(type), tuple(tuple(type)) Tuple of the length to check for with either one type for each element of `value` or a tuple of types for each element of `value`. Use the ellipsis literal ... to skip type checking of the tuple element at that position. Returns ------- tuple The tuple passed in. Methods ------- o(callable) : CompositionOf Daisy-chains the tuple length and type checker to another `callable`, returning the functional composition of both. The argument `types` is passed through to the `TypedTuple` checker when when calling the composition. Raises ------ WrongTypeError If `value` is not a tuple or if any of its elements do not have (one of) the permitted type(s). LenError If the tuple passed in does not have the same length as `types` or if the type specification does not have a meaningful length. TypeError If `types` is not a tuple or any of its elements are not of type type. See Also -------- All, JustLen, CompositionOf
| 2.840825
| 3
|
data/analyzer/linux/lib/common/abstracts.py
|
iswenhao/Panda-Sandbox
| 2
|
6311
|
<gh_stars>1-10
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.api.process import Process
from lib.exceptions.exceptions import CuckooPackageError
class Package(object):
"""Base abstract analysis package."""
PATHS = []
def __init__(self, options={}):
"""@param options: options dict."""
self.options = options
self.pids = []
def set_pids(self, pids):
"""Update list of monitored PIDs in the package context.
@param pids: list of pids.
"""
self.pids = pids
def start(self):
"""Run analysis package.
@raise NotImplementedError: this method is abstract.
"""
raise NotImplementedError
def check(self):
"""Check."""
return True
def execute(self, cmd):
"""Start an executable for analysis.
@param path: executable path
@param args: executable arguments
@return: process pid
"""
p = Process()
if not p.execute(cmd):
raise CuckooPackageError("Unable to execute the initial process, "
"analysis aborted.")
return p.pid
def package_files(self):
"""A list of files to upload to host.
The list should be a list of tuples (<path on guest>, <name of file in package_files folder>).
(package_files is a folder that will be created in analysis folder).
"""
return None
def finish(self):
"""Finish run.
If specified to do so, this method dumps the memory of
all running processes.
"""
if self.options.get("procmemdump"):
for pid in self.pids:
p = Process(pid=pid)
p.dump_memory()
return True
def get_pids(self):
return []
class Auxiliary(object):
priority = 0
def get_pids(self):
return []
|
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.api.process import Process
from lib.exceptions.exceptions import CuckooPackageError
class Package(object):
"""Base abstract analysis package."""
PATHS = []
def __init__(self, options={}):
"""@param options: options dict."""
self.options = options
self.pids = []
def set_pids(self, pids):
"""Update list of monitored PIDs in the package context.
@param pids: list of pids.
"""
self.pids = pids
def start(self):
"""Run analysis package.
@raise NotImplementedError: this method is abstract.
"""
raise NotImplementedError
def check(self):
"""Check."""
return True
def execute(self, cmd):
"""Start an executable for analysis.
@param path: executable path
@param args: executable arguments
@return: process pid
"""
p = Process()
if not p.execute(cmd):
raise CuckooPackageError("Unable to execute the initial process, "
"analysis aborted.")
return p.pid
def package_files(self):
"""A list of files to upload to host.
The list should be a list of tuples (<path on guest>, <name of file in package_files folder>).
(package_files is a folder that will be created in analysis folder).
"""
return None
def finish(self):
"""Finish run.
If specified to do so, this method dumps the memory of
all running processes.
"""
if self.options.get("procmemdump"):
for pid in self.pids:
p = Process(pid=pid)
p.dump_memory()
return True
def get_pids(self):
return []
class Auxiliary(object):
priority = 0
def get_pids(self):
return []
|
en
| 0.758281
|
# Copyright (C) 2014-2016 Cuckoo Foundation. # This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org # See the file 'docs/LICENSE' for copying permission. Base abstract analysis package. @param options: options dict. Update list of monitored PIDs in the package context. @param pids: list of pids. Run analysis package. @raise NotImplementedError: this method is abstract. Check. Start an executable for analysis. @param path: executable path @param args: executable arguments @return: process pid A list of files to upload to host. The list should be a list of tuples (<path on guest>, <name of file in package_files folder>). (package_files is a folder that will be created in analysis folder). Finish run. If specified to do so, this method dumps the memory of all running processes.
| 2.050851
| 2
|
rdmo/options/apps.py
|
Raspeanut/rdmo
| 1
|
6312
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class OptionsConfig(AppConfig):
name = 'rdmo.options'
verbose_name = _('Options')
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class OptionsConfig(AppConfig):
name = 'rdmo.options'
verbose_name = _('Options')
|
none
| 1
| 1.377516
| 1
|
|
main/admin.py
|
sirodoht/mal
| 2
|
6313
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from main import models
class Admin(UserAdmin):
list_display = ("id", "username", "email", "date_joined", "last_login")
admin.site.register(models.User, Admin)
class DocumentAdmin(admin.ModelAdmin):
list_display = ("id", "title")
admin.site.register(models.Document, DocumentAdmin)
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from main import models
class Admin(UserAdmin):
list_display = ("id", "username", "email", "date_joined", "last_login")
admin.site.register(models.User, Admin)
class DocumentAdmin(admin.ModelAdmin):
list_display = ("id", "title")
admin.site.register(models.Document, DocumentAdmin)
|
none
| 1
| 2.080429
| 2
|
|
cloudshell/cli/configurator.py
|
QualiSystems/cloudshell-cli
| 4
|
6314
|
<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from cloudshell.cli.factory.session_factory import (
CloudInfoAccessKeySessionFactory,
GenericSessionFactory,
SessionFactory,
)
from cloudshell.cli.service.cli import CLI
from cloudshell.cli.session.ssh_session import SSHSession
from cloudshell.cli.session.telnet_session import TelnetSession
ABC = ABCMeta("ABC", (object,), {"__slots__": ()})
if sys.version_info >= (3, 0):
from functools import lru_cache
else:
from functools32 import lru_cache
class CLIServiceConfigurator(object):
REGISTERED_SESSIONS = (CloudInfoAccessKeySessionFactory(SSHSession), TelnetSession)
"""Using factories instead of """
def __init__(
self,
resource_config,
logger,
cli=None,
registered_sessions=None,
reservation_context=None,
):
"""Initialize CLI service configurator.
:param cloudshell.shell.standards.resource_config_generic_models.GenericCLIConfig resource_config: # noqa: E501
:param logging.Logger logger:
:param cloudshell.cli.service.cli.CLI cli:
:param registered_sessions: Session types and order
:param cloudshell.shell.core.driver_context.ReservationContextDetails reservation_context:
"""
self._cli = cli or CLI()
self._resource_config = resource_config
self._logger = logger
self._registered_sessions = registered_sessions or self.REGISTERED_SESSIONS
self._reservation_context = reservation_context
@property
def _cli_type(self):
"""Connection type property [ssh|telnet|console|auto]."""
return self._resource_config.cli_connection_type
@property
@lru_cache()
def _session_dict(self):
session_dict = defaultdict(list)
for sess in self._registered_sessions:
session_dict[sess.SESSION_TYPE.lower()].append(sess)
return session_dict
def initialize_session(self, session):
if not isinstance(session, SessionFactory):
session = GenericSessionFactory(session)
return session.init_session(
self._resource_config, self._logger, self._reservation_context
)
def _defined_sessions(self):
return [
self.initialize_session(sess)
for sess in self._session_dict.get(
self._cli_type.lower(), self._registered_sessions
)
]
def get_cli_service(self, command_mode):
"""Use cli.get_session to open CLI connection and switch into required mode.
:param CommandMode command_mode: operation mode, can be
default_mode/enable_mode/config_mode/etc.
:return: created session in provided mode
:rtype: cloudshell.cli.service.session_pool_context_manager.SessionPoolContextManager # noqa: E501
"""
return self._cli.get_session(
self._defined_sessions(), command_mode, self._logger
)
class AbstractModeConfigurator(ABC, CLIServiceConfigurator):
"""Used by shells to run enable/config command."""
@property
@abstractmethod
def enable_mode(self):
pass
@property
@abstractmethod
def config_mode(self):
pass
def enable_mode_service(self):
return self.get_cli_service(self.enable_mode)
def config_mode_service(self):
return self.get_cli_service(self.config_mode)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from cloudshell.cli.factory.session_factory import (
CloudInfoAccessKeySessionFactory,
GenericSessionFactory,
SessionFactory,
)
from cloudshell.cli.service.cli import CLI
from cloudshell.cli.session.ssh_session import SSHSession
from cloudshell.cli.session.telnet_session import TelnetSession
ABC = ABCMeta("ABC", (object,), {"__slots__": ()})
if sys.version_info >= (3, 0):
from functools import lru_cache
else:
from functools32 import lru_cache
class CLIServiceConfigurator(object):
REGISTERED_SESSIONS = (CloudInfoAccessKeySessionFactory(SSHSession), TelnetSession)
"""Using factories instead of """
def __init__(
self,
resource_config,
logger,
cli=None,
registered_sessions=None,
reservation_context=None,
):
"""Initialize CLI service configurator.
:param cloudshell.shell.standards.resource_config_generic_models.GenericCLIConfig resource_config: # noqa: E501
:param logging.Logger logger:
:param cloudshell.cli.service.cli.CLI cli:
:param registered_sessions: Session types and order
:param cloudshell.shell.core.driver_context.ReservationContextDetails reservation_context:
"""
self._cli = cli or CLI()
self._resource_config = resource_config
self._logger = logger
self._registered_sessions = registered_sessions or self.REGISTERED_SESSIONS
self._reservation_context = reservation_context
@property
def _cli_type(self):
"""Connection type property [ssh|telnet|console|auto]."""
return self._resource_config.cli_connection_type
@property
@lru_cache()
def _session_dict(self):
session_dict = defaultdict(list)
for sess in self._registered_sessions:
session_dict[sess.SESSION_TYPE.lower()].append(sess)
return session_dict
def initialize_session(self, session):
if not isinstance(session, SessionFactory):
session = GenericSessionFactory(session)
return session.init_session(
self._resource_config, self._logger, self._reservation_context
)
def _defined_sessions(self):
return [
self.initialize_session(sess)
for sess in self._session_dict.get(
self._cli_type.lower(), self._registered_sessions
)
]
def get_cli_service(self, command_mode):
"""Use cli.get_session to open CLI connection and switch into required mode.
:param CommandMode command_mode: operation mode, can be
default_mode/enable_mode/config_mode/etc.
:return: created session in provided mode
:rtype: cloudshell.cli.service.session_pool_context_manager.SessionPoolContextManager # noqa: E501
"""
return self._cli.get_session(
self._defined_sessions(), command_mode, self._logger
)
class AbstractModeConfigurator(ABC, CLIServiceConfigurator):
"""Used by shells to run enable/config command."""
@property
@abstractmethod
def enable_mode(self):
pass
@property
@abstractmethod
def config_mode(self):
pass
def enable_mode_service(self):
return self.get_cli_service(self.enable_mode)
def config_mode_service(self):
return self.get_cli_service(self.config_mode)
|
en
| 0.490575
|
#!/usr/bin/python # -*- coding: utf-8 -*- Using factories instead of Initialize CLI service configurator. :param cloudshell.shell.standards.resource_config_generic_models.GenericCLIConfig resource_config: # noqa: E501 :param logging.Logger logger: :param cloudshell.cli.service.cli.CLI cli: :param registered_sessions: Session types and order :param cloudshell.shell.core.driver_context.ReservationContextDetails reservation_context: Connection type property [ssh|telnet|console|auto]. Use cli.get_session to open CLI connection and switch into required mode. :param CommandMode command_mode: operation mode, can be default_mode/enable_mode/config_mode/etc. :return: created session in provided mode :rtype: cloudshell.cli.service.session_pool_context_manager.SessionPoolContextManager # noqa: E501 Used by shells to run enable/config command.
| 2.035434
| 2
|
examples/ingenerator.py
|
quynhanh-ngx/pytago
| 206
|
6315
|
def main():
n = 111
gen = (n * 7 for x in range(10))
if 777 in gen:
print("Yes!")
if __name__ == '__main__':
main()
|
def main():
n = 111
gen = (n * 7 for x in range(10))
if 777 in gen:
print("Yes!")
if __name__ == '__main__':
main()
|
none
| 1
| 3.247231
| 3
|
|
source/packages/scs-pm-server/src/python-server/app.py
|
amittkSharma/scs_predictive_maintenance
| 0
|
6316
|
import json
import logging
import joblib
import pandas as pd
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
app = Flask(__name__)
CORS(app)
@app.route("/api/machinePrediction", methods=['GET'])
def home():
incomingMachineId = request.args.get('machineId')
modelPath = request.args.get('modelPath')
column_names = request.args.get('columnNames')
data_points = request.args.get('dataPoints')
app.logger.info('Received machine id is %s', incomingMachineId)
app.logger.info('Model path is %s', modelPath)
json_object = json.loads(data_points)
pairs = json_object.items()
vitals_value = []
for key, value in pairs:
vitals_value.append(value)
modelObj = joblib.load(modelPath)
data = [vitals_value]
df = pd.DataFrame(data=data, columns = column_names)
modelPrediction = modelObj.predict(df)
app.logger.info('Model prediction is: %s', modelPrediction)
return jsonify(modelPrediction[0])
if __name__ == "__main__":
app.run(debug=True)
# To start the server
# python3 app.py
|
import json
import logging
import joblib
import pandas as pd
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
app = Flask(__name__)
CORS(app)
@app.route("/api/machinePrediction", methods=['GET'])
def home():
incomingMachineId = request.args.get('machineId')
modelPath = request.args.get('modelPath')
column_names = request.args.get('columnNames')
data_points = request.args.get('dataPoints')
app.logger.info('Received machine id is %s', incomingMachineId)
app.logger.info('Model path is %s', modelPath)
json_object = json.loads(data_points)
pairs = json_object.items()
vitals_value = []
for key, value in pairs:
vitals_value.append(value)
modelObj = joblib.load(modelPath)
data = [vitals_value]
df = pd.DataFrame(data=data, columns = column_names)
modelPrediction = modelObj.predict(df)
app.logger.info('Model prediction is: %s', modelPrediction)
return jsonify(modelPrediction[0])
if __name__ == "__main__":
app.run(debug=True)
# To start the server
# python3 app.py
|
en
| 0.319447
|
# To start the server # python3 app.py
| 2.673555
| 3
|
tests/test_remove_from_dependee_chain.py
|
ess-dmsc/nexus-constructor
| 3
|
6317
|
import pytest
from PySide2.QtGui import QVector3D
from nexus_constructor.model.component import Component
from nexus_constructor.model.dataset import Dataset
from nexus_constructor.model.instrument import Instrument
from nexus_constructor.model.value_type import ValueTypes
values = Dataset(
name="scalar_value",
type=ValueTypes.DOUBLE,
size=[1],
values=90.0,
parent_node=None,
)
@pytest.fixture
def instrument():
return Instrument(parent_node=None)
def test_remove_from_beginning_1(instrument):
component1 = Component("component1", instrument)
rot = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot
assert len(rot.dependents) == 1
rot.remove_from_dependee_chain()
assert component1.depends_on is None
def test_remove_from_beginning_2(instrument):
component1 = Component("component1", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component1.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot1
rot1.depends_on = rot2
assert len(rot2.dependents) == 1
rot1.remove_from_dependee_chain()
assert len(rot2.dependents) == 1
assert rot2.dependents[0] == component1
assert component1.depends_on == rot2
def test_remove_from_beginning_3(instrument):
component1 = Component("component1", instrument)
component2 = Component("component2", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component2.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot1
component2.depends_on = rot2
rot1.depends_on = rot2
assert len(rot2.dependents) == 2
rot1.remove_from_dependee_chain()
assert len(rot2.dependents) == 2
assert component2 in rot2.dependents
assert component1 in rot2.dependents
assert component1.depends_on == rot2
assert component1.transforms.link.linked_component == component2
def test_remove_from_middle():
component1 = Component("component1", instrument)
component2 = Component("component2", instrument)
component3 = Component("component3", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component2.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot3 = component3.add_rotation(
name="rotation3",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot1
component2.depends_on = rot2
component3.depends_on = rot3
component1.transforms.link.linked_component = component2
component2.transforms.link.linked_component = component3
rot2.remove_from_dependee_chain()
assert rot1.depends_on == rot3
assert component1.transforms.link.linked_component == component3
assert rot1 in rot3.dependents
assert component3 in rot3.dependents
def test_remove_from_end():
component1 = Component("component1", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component1.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
depends_on=rot1,
)
rot3 = component1.add_rotation(
name="rotation3",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
depends_on=rot2,
)
component1.depends_on = rot3
rot1.remove_from_dependee_chain()
assert rot1.depends_on is None
assert not rot1.dependents
assert component1.depends_on == rot3
assert rot2.dependents[0] == rot3
assert len(component1.transforms) == 2
|
import pytest
from PySide2.QtGui import QVector3D
from nexus_constructor.model.component import Component
from nexus_constructor.model.dataset import Dataset
from nexus_constructor.model.instrument import Instrument
from nexus_constructor.model.value_type import ValueTypes
values = Dataset(
name="scalar_value",
type=ValueTypes.DOUBLE,
size=[1],
values=90.0,
parent_node=None,
)
@pytest.fixture
def instrument():
return Instrument(parent_node=None)
def test_remove_from_beginning_1(instrument):
component1 = Component("component1", instrument)
rot = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot
assert len(rot.dependents) == 1
rot.remove_from_dependee_chain()
assert component1.depends_on is None
def test_remove_from_beginning_2(instrument):
component1 = Component("component1", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component1.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot1
rot1.depends_on = rot2
assert len(rot2.dependents) == 1
rot1.remove_from_dependee_chain()
assert len(rot2.dependents) == 1
assert rot2.dependents[0] == component1
assert component1.depends_on == rot2
def test_remove_from_beginning_3(instrument):
component1 = Component("component1", instrument)
component2 = Component("component2", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component2.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot1
component2.depends_on = rot2
rot1.depends_on = rot2
assert len(rot2.dependents) == 2
rot1.remove_from_dependee_chain()
assert len(rot2.dependents) == 2
assert component2 in rot2.dependents
assert component1 in rot2.dependents
assert component1.depends_on == rot2
assert component1.transforms.link.linked_component == component2
def test_remove_from_middle():
component1 = Component("component1", instrument)
component2 = Component("component2", instrument)
component3 = Component("component3", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component2.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot3 = component3.add_rotation(
name="rotation3",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot1
component2.depends_on = rot2
component3.depends_on = rot3
component1.transforms.link.linked_component = component2
component2.transforms.link.linked_component = component3
rot2.remove_from_dependee_chain()
assert rot1.depends_on == rot3
assert component1.transforms.link.linked_component == component3
assert rot1 in rot3.dependents
assert component3 in rot3.dependents
def test_remove_from_end():
component1 = Component("component1", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component1.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
depends_on=rot1,
)
rot3 = component1.add_rotation(
name="rotation3",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
depends_on=rot2,
)
component1.depends_on = rot3
rot1.remove_from_dependee_chain()
assert rot1.depends_on is None
assert not rot1.dependents
assert component1.depends_on == rot3
assert rot2.dependents[0] == rot3
assert len(component1.transforms) == 2
|
none
| 1
| 2.094067
| 2
|
|
fastmvsnet/train1.py
|
molspace/FastMVS_experiments
| 0
|
6318
|
<filename>fastmvsnet/train1.py
#!/usr/bin/env python
import argparse
import os.path as osp
import logging
import time
import sys
sys.path.insert(0, osp.dirname(__file__) + '/..')
import torch
import torch.nn as nn
from fastmvsnet.config import load_cfg_from_file
from fastmvsnet.utils.io import mkdir
from fastmvsnet.utils.logger import setup_logger
from fastmvsnet.utils.torch_utils import set_random_seed
from fastmvsnet.model1 import build_pointmvsnet as build_model
from fastmvsnet.solver import build_optimizer, build_scheduler
from fastmvsnet.utils.checkpoint import Checkpointer
from fastmvsnet.dataset1 import build_data_loader
from fastmvsnet.utils.tensorboard_logger import TensorboardLogger
from fastmvsnet.utils.metric_logger import MetricLogger
from fastmvsnet.utils.file_logger import file_logger
def parse_args():
parser = argparse.ArgumentParser(description="PyTorch Fast-MVSNet Training")
parser.add_argument(
"--cfg",
dest="config_file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
return args
def train_model(model,
loss_fn,
metric_fn,
image_scales,
inter_scales,
isFlow,
data_loader,
optimizer,
curr_epoch,
tensorboard_logger,
log_period=1,
output_dir="",
):
logger = logging.getLogger("fastmvsnet.train")
meters = MetricLogger(delimiter=" ")
model.train()
end = time.time()
total_iteration = data_loader.__len__()
path_list = []
for iteration, data_batch in enumerate(data_loader):
data_time = time.time() - end
curr_ref_img_path = data_batch["ref_img_path"]
path_list.extend(curr_ref_img_path)
data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)}
preds = model(data_batch, image_scales, inter_scales, isFlow)
optimizer.zero_grad()
loss_dict = loss_fn(preds, data_batch, isFlow)
metric_dict = metric_fn(preds, data_batch, isFlow)
losses = sum(loss_dict.values())
#print("LOSS DICT", loss_dict['coarse_loss'])
#print("LOSSES", loss_dict.values())
meters.update(loss=losses, **loss_dict, **metric_dict)
losses.backward()
# print(poop)
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
if iteration % log_period == 0:
logger.info(
meters.delimiter.join(
[
"EPOCH: {epoch:2d}",
"iter: {iter:4d}",
"{meters}",
"lr: {lr:.2e}",
"max mem: {memory:.0f}",
]
).format(
epoch=curr_epoch,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / (1024.0 ** 2),
)
)
tensorboard_logger.add_scalars(loss_dict, curr_epoch * total_iteration + iteration, prefix="train")
tensorboard_logger.add_scalars(metric_dict, curr_epoch * total_iteration + iteration, prefix="train")
if iteration % (100 * log_period) == 0:
file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix="train")
return meters
def validate_model(model,
loss_fn,
metric_fn,
image_scales,
inter_scales,
isFlow,
data_loader,
curr_epoch,
tensorboard_logger,
log_period=1,
output_dir="",
):
logger = logging.getLogger("fastmvsnet.validate")
meters = MetricLogger(delimiter=" ")
model.train()
end = time.time()
total_iteration = data_loader.__len__()
with torch.no_grad():
for iteration, data_batch in enumerate(data_loader):
data_time = time.time() - end
curr_ref_img_path = data_batch["ref_img_path"]
data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)}
preds = model(data_batch, image_scales, inter_scales, isFlow)
loss_dict = loss_fn(preds, data_batch, isFlow)
metric_dict = metric_fn(preds, data_batch, isFlow)
losses = sum(loss_dict.values())
meters.update(loss=losses, **loss_dict, **metric_dict)
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
if iteration % log_period == 0:
logger.info(
meters.delimiter.join(
[
"EPOCH: {epoch:2d}",
"iter: {iter:4d}",
"{meters}",
]
).format(
epoch=curr_epoch,
iter=iteration,
meters=str(meters),
)
)
tensorboard_logger.add_scalars(meters.meters, curr_epoch * total_iteration + iteration, prefix="valid")
if iteration % (100 * log_period) == 0:
file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix="valid")
return meters
def train(cfg, output_dir=""):
logger = logging.getLogger("fastmvsnet.trainer")
# build model
set_random_seed(cfg.RNG_SEED)
model, loss_fn, metric_fn = build_model(cfg)
logger.info("Build model:\n{}".format(str(model)))
model = nn.DataParallel(model).cuda()
# build optimizer
optimizer = build_optimizer(cfg, model)
# build lr scheduler
scheduler = build_scheduler(cfg, optimizer)
# build checkpointer
checkpointer = Checkpointer(model,
optimizer=optimizer,
scheduler=scheduler,
save_dir=output_dir,
logger=logger)
checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT, resume=cfg.AUTO_RESUME)
ckpt_period = cfg.TRAIN.CHECKPOINT_PERIOD
# build data loader
train_data_loader = build_data_loader(cfg, mode="train")
val_period = cfg.TRAIN.VAL_PERIOD
val_data_loader = build_data_loader(cfg, mode="val") if val_period > 0 else None
# build tensorboard logger (optionally by comment)
tensorboard_logger = TensorboardLogger(output_dir)
# train
max_epoch = cfg.SCHEDULER.MAX_EPOCH
start_epoch = checkpoint_data.get("epoch", 0)
best_metric_name = "best_{}".format(cfg.TRAIN.VAL_METRIC)
best_metric = checkpoint_data.get(best_metric_name, None)
logger.info("Start training from epoch {}".format(start_epoch))
for epoch in range(start_epoch, max_epoch):
cur_epoch = epoch + 1
scheduler.step()
start_time = time.time()
train_meters = train_model(model,
loss_fn,
metric_fn,
image_scales=cfg.MODEL.TRAIN.IMG_SCALES,
inter_scales=cfg.MODEL.TRAIN.INTER_SCALES,
isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH),
data_loader=train_data_loader,
optimizer=optimizer,
curr_epoch=epoch,
tensorboard_logger=tensorboard_logger,
log_period=cfg.TRAIN.LOG_PERIOD,
output_dir=output_dir,
)
epoch_time = time.time() - start_time
logger.info("Epoch[{}]-Train {} total_time: {:.2f}s".format(
cur_epoch, train_meters.summary_str, epoch_time))
# checkpoint
if cur_epoch % ckpt_period == 0 or cur_epoch == max_epoch:
checkpoint_data["epoch"] = cur_epoch
checkpoint_data[best_metric_name] = best_metric
checkpointer.save("model_{:03d}".format(cur_epoch), **checkpoint_data)
# validate
if val_period < 1:
continue
if cur_epoch % val_period == 0 or cur_epoch == max_epoch:
val_meters = validate_model(model,
loss_fn,
metric_fn,
image_scales=cfg.MODEL.VAL.IMG_SCALES,
inter_scales=cfg.MODEL.VAL.INTER_SCALES,
isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH),
data_loader=val_data_loader,
curr_epoch=epoch,
tensorboard_logger=tensorboard_logger,
log_period=cfg.TEST.LOG_PERIOD,
output_dir=output_dir,
)
logger.info("Epoch[{}]-Val {}".format(cur_epoch, val_meters.summary_str))
# best validation
cur_metric = val_meters.meters[cfg.TRAIN.VAL_METRIC].global_avg
if best_metric is None or cur_metric > best_metric:
best_metric = cur_metric
checkpoint_data["epoch"] = cur_epoch
checkpoint_data[best_metric_name] = best_metric
checkpointer.save("model_best", **checkpoint_data)
logger.info("Best val-{} = {}".format(cfg.TRAIN.VAL_METRIC, best_metric))
return model
def main():
args = parse_args()
num_gpus = torch.cuda.device_count()
cfg = load_cfg_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
config_path = osp.splitext(args.config_file)[0]
config_path = config_path.replace("configs", "outputs1")
output_dir = output_dir.replace('@', config_path)
mkdir(output_dir)
logger = setup_logger("fastmvsnet", output_dir, prefix="train")
logger.info("Using {} GPUs".format(num_gpus))
logger.info(args)
logger.info("Loaded configuration file {}".format(args.config_file))
logger.info("Running with config:\n{}".format(cfg))
train(cfg, output_dir)
if __name__ == "__main__":
main()
|
<filename>fastmvsnet/train1.py
#!/usr/bin/env python
import argparse
import os.path as osp
import logging
import time
import sys
sys.path.insert(0, osp.dirname(__file__) + '/..')
import torch
import torch.nn as nn
from fastmvsnet.config import load_cfg_from_file
from fastmvsnet.utils.io import mkdir
from fastmvsnet.utils.logger import setup_logger
from fastmvsnet.utils.torch_utils import set_random_seed
from fastmvsnet.model1 import build_pointmvsnet as build_model
from fastmvsnet.solver import build_optimizer, build_scheduler
from fastmvsnet.utils.checkpoint import Checkpointer
from fastmvsnet.dataset1 import build_data_loader
from fastmvsnet.utils.tensorboard_logger import TensorboardLogger
from fastmvsnet.utils.metric_logger import MetricLogger
from fastmvsnet.utils.file_logger import file_logger
def parse_args():
parser = argparse.ArgumentParser(description="PyTorch Fast-MVSNet Training")
parser.add_argument(
"--cfg",
dest="config_file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
return args
def train_model(model,
loss_fn,
metric_fn,
image_scales,
inter_scales,
isFlow,
data_loader,
optimizer,
curr_epoch,
tensorboard_logger,
log_period=1,
output_dir="",
):
logger = logging.getLogger("fastmvsnet.train")
meters = MetricLogger(delimiter=" ")
model.train()
end = time.time()
total_iteration = data_loader.__len__()
path_list = []
for iteration, data_batch in enumerate(data_loader):
data_time = time.time() - end
curr_ref_img_path = data_batch["ref_img_path"]
path_list.extend(curr_ref_img_path)
data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)}
preds = model(data_batch, image_scales, inter_scales, isFlow)
optimizer.zero_grad()
loss_dict = loss_fn(preds, data_batch, isFlow)
metric_dict = metric_fn(preds, data_batch, isFlow)
losses = sum(loss_dict.values())
#print("LOSS DICT", loss_dict['coarse_loss'])
#print("LOSSES", loss_dict.values())
meters.update(loss=losses, **loss_dict, **metric_dict)
losses.backward()
# print(poop)
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
if iteration % log_period == 0:
logger.info(
meters.delimiter.join(
[
"EPOCH: {epoch:2d}",
"iter: {iter:4d}",
"{meters}",
"lr: {lr:.2e}",
"max mem: {memory:.0f}",
]
).format(
epoch=curr_epoch,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / (1024.0 ** 2),
)
)
tensorboard_logger.add_scalars(loss_dict, curr_epoch * total_iteration + iteration, prefix="train")
tensorboard_logger.add_scalars(metric_dict, curr_epoch * total_iteration + iteration, prefix="train")
if iteration % (100 * log_period) == 0:
file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix="train")
return meters
def validate_model(model,
loss_fn,
metric_fn,
image_scales,
inter_scales,
isFlow,
data_loader,
curr_epoch,
tensorboard_logger,
log_period=1,
output_dir="",
):
logger = logging.getLogger("fastmvsnet.validate")
meters = MetricLogger(delimiter=" ")
model.train()
end = time.time()
total_iteration = data_loader.__len__()
with torch.no_grad():
for iteration, data_batch in enumerate(data_loader):
data_time = time.time() - end
curr_ref_img_path = data_batch["ref_img_path"]
data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)}
preds = model(data_batch, image_scales, inter_scales, isFlow)
loss_dict = loss_fn(preds, data_batch, isFlow)
metric_dict = metric_fn(preds, data_batch, isFlow)
losses = sum(loss_dict.values())
meters.update(loss=losses, **loss_dict, **metric_dict)
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
if iteration % log_period == 0:
logger.info(
meters.delimiter.join(
[
"EPOCH: {epoch:2d}",
"iter: {iter:4d}",
"{meters}",
]
).format(
epoch=curr_epoch,
iter=iteration,
meters=str(meters),
)
)
tensorboard_logger.add_scalars(meters.meters, curr_epoch * total_iteration + iteration, prefix="valid")
if iteration % (100 * log_period) == 0:
file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix="valid")
return meters
def train(cfg, output_dir=""):
logger = logging.getLogger("fastmvsnet.trainer")
# build model
set_random_seed(cfg.RNG_SEED)
model, loss_fn, metric_fn = build_model(cfg)
logger.info("Build model:\n{}".format(str(model)))
model = nn.DataParallel(model).cuda()
# build optimizer
optimizer = build_optimizer(cfg, model)
# build lr scheduler
scheduler = build_scheduler(cfg, optimizer)
# build checkpointer
checkpointer = Checkpointer(model,
optimizer=optimizer,
scheduler=scheduler,
save_dir=output_dir,
logger=logger)
checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT, resume=cfg.AUTO_RESUME)
ckpt_period = cfg.TRAIN.CHECKPOINT_PERIOD
# build data loader
train_data_loader = build_data_loader(cfg, mode="train")
val_period = cfg.TRAIN.VAL_PERIOD
val_data_loader = build_data_loader(cfg, mode="val") if val_period > 0 else None
# build tensorboard logger (optionally by comment)
tensorboard_logger = TensorboardLogger(output_dir)
# train
max_epoch = cfg.SCHEDULER.MAX_EPOCH
start_epoch = checkpoint_data.get("epoch", 0)
best_metric_name = "best_{}".format(cfg.TRAIN.VAL_METRIC)
best_metric = checkpoint_data.get(best_metric_name, None)
logger.info("Start training from epoch {}".format(start_epoch))
for epoch in range(start_epoch, max_epoch):
cur_epoch = epoch + 1
scheduler.step()
start_time = time.time()
train_meters = train_model(model,
loss_fn,
metric_fn,
image_scales=cfg.MODEL.TRAIN.IMG_SCALES,
inter_scales=cfg.MODEL.TRAIN.INTER_SCALES,
isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH),
data_loader=train_data_loader,
optimizer=optimizer,
curr_epoch=epoch,
tensorboard_logger=tensorboard_logger,
log_period=cfg.TRAIN.LOG_PERIOD,
output_dir=output_dir,
)
epoch_time = time.time() - start_time
logger.info("Epoch[{}]-Train {} total_time: {:.2f}s".format(
cur_epoch, train_meters.summary_str, epoch_time))
# checkpoint
if cur_epoch % ckpt_period == 0 or cur_epoch == max_epoch:
checkpoint_data["epoch"] = cur_epoch
checkpoint_data[best_metric_name] = best_metric
checkpointer.save("model_{:03d}".format(cur_epoch), **checkpoint_data)
# validate
if val_period < 1:
continue
if cur_epoch % val_period == 0 or cur_epoch == max_epoch:
val_meters = validate_model(model,
loss_fn,
metric_fn,
image_scales=cfg.MODEL.VAL.IMG_SCALES,
inter_scales=cfg.MODEL.VAL.INTER_SCALES,
isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH),
data_loader=val_data_loader,
curr_epoch=epoch,
tensorboard_logger=tensorboard_logger,
log_period=cfg.TEST.LOG_PERIOD,
output_dir=output_dir,
)
logger.info("Epoch[{}]-Val {}".format(cur_epoch, val_meters.summary_str))
# best validation
cur_metric = val_meters.meters[cfg.TRAIN.VAL_METRIC].global_avg
if best_metric is None or cur_metric > best_metric:
best_metric = cur_metric
checkpoint_data["epoch"] = cur_epoch
checkpoint_data[best_metric_name] = best_metric
checkpointer.save("model_best", **checkpoint_data)
logger.info("Best val-{} = {}".format(cfg.TRAIN.VAL_METRIC, best_metric))
return model
def main():
args = parse_args()
num_gpus = torch.cuda.device_count()
cfg = load_cfg_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
config_path = osp.splitext(args.config_file)[0]
config_path = config_path.replace("configs", "outputs1")
output_dir = output_dir.replace('@', config_path)
mkdir(output_dir)
logger = setup_logger("fastmvsnet", output_dir, prefix="train")
logger.info("Using {} GPUs".format(num_gpus))
logger.info(args)
logger.info("Loaded configuration file {}".format(args.config_file))
logger.info("Running with config:\n{}".format(cfg))
train(cfg, output_dir)
if __name__ == "__main__":
main()
|
en
| 0.527216
|
#!/usr/bin/env python #print("LOSS DICT", loss_dict['coarse_loss']) #print("LOSSES", loss_dict.values()) # print(poop) # build model # build optimizer # build lr scheduler # build checkpointer # build data loader # build tensorboard logger (optionally by comment) # train # checkpoint # validate # best validation
| 1.988347
| 2
|
modulo2/3-detectores/3.2-detector/models.py
|
fossabot/unifacisa-visao-computacional
| 0
|
6319
|
<reponame>fossabot/unifacisa-visao-computacional
# Estrutura básica para projetos de Machine Learning e Deep Learning
# Por <NAME>.
from torch import nn, relu
import torch.nn.functional as F
import torch.optim as optim
import torch
from torchvision import models
class ResNet(nn.Module):
def __init__(self, saida, pretreinado=True):
super(ResNet, self).__init__()
resnet = models.resnet34(pretrained=pretreinado)
layers = list(resnet.children())[:8]
self.features1 = nn.Sequential(*layers[:6])
self.features2 = nn.Sequential(*layers[6:])
self.classificador = nn.Sequential(nn.BatchNorm1d(512), nn.Linear(512, saida))
def forward(self, x):
x = self.features1(x)
x = self.features2(x)
x = F.relu(x)
x = nn.AdaptiveAvgPool2d((1,1))(x)
x = x.view(x.shape[0], -1)
return self.classificador(x)
|
# Estrutura básica para projetos de Machine Learning e Deep Learning
# Por <NAME>.
from torch import nn, relu
import torch.nn.functional as F
import torch.optim as optim
import torch
from torchvision import models
class ResNet(nn.Module):
def __init__(self, saida, pretreinado=True):
super(ResNet, self).__init__()
resnet = models.resnet34(pretrained=pretreinado)
layers = list(resnet.children())[:8]
self.features1 = nn.Sequential(*layers[:6])
self.features2 = nn.Sequential(*layers[6:])
self.classificador = nn.Sequential(nn.BatchNorm1d(512), nn.Linear(512, saida))
def forward(self, x):
x = self.features1(x)
x = self.features2(x)
x = F.relu(x)
x = nn.AdaptiveAvgPool2d((1,1))(x)
x = x.view(x.shape[0], -1)
return self.classificador(x)
|
pt
| 0.639243
|
# Estrutura básica para projetos de Machine Learning e Deep Learning # Por <NAME>.
| 3.619089
| 4
|
python/setup.py
|
sbrodeur/evert
| 28
|
6320
|
<filename>python/setup.py
#!/usr/bin/env python
# Copyright (c) 2017, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
"""
setup.py file for installing Python bindings using SWIG
"""
from distutils.core import setup, Extension
evert_module = Extension('_evert',
define_macros = [('MAJOR_VERSION', '1'),
('MINOR_VERSION', '0')],
include_dirs = ['../include'],
sources=['../src/elBeam.cpp',
'../src/elBSP.cpp',
'../src/elGLUT.cpp',
'../src/elListener.cpp',
'../src/elOrientedPoint.cpp',
'../src/elPathSolution.cpp',
'../src/elPolygon.cpp',
'../src/elRay.cpp',
'../src/elRoom.cpp',
'../src/elSource.cpp',
'../src/elTimer.cpp',
'../src/elVector.cpp',
'../src/elViewer.cpp',
'evert.i'],
libraries = ['GL', 'GLU', 'glut'],
library_dirs = [],
language='c++',
swig_opts=['-c++', '-I../include'],
#extra_compile_args=['-std=c++11'],
)
setup (name = 'evert',
version = '1.0',
author = "<NAME>",
description = """Accelerated beam tracing algorithm""",
ext_modules = [evert_module],
py_modules = ["evert"],
)
|
<filename>python/setup.py
#!/usr/bin/env python
# Copyright (c) 2017, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
"""
setup.py file for installing Python bindings using SWIG
"""
from distutils.core import setup, Extension
evert_module = Extension('_evert',
define_macros = [('MAJOR_VERSION', '1'),
('MINOR_VERSION', '0')],
include_dirs = ['../include'],
sources=['../src/elBeam.cpp',
'../src/elBSP.cpp',
'../src/elGLUT.cpp',
'../src/elListener.cpp',
'../src/elOrientedPoint.cpp',
'../src/elPathSolution.cpp',
'../src/elPolygon.cpp',
'../src/elRay.cpp',
'../src/elRoom.cpp',
'../src/elSource.cpp',
'../src/elTimer.cpp',
'../src/elVector.cpp',
'../src/elViewer.cpp',
'evert.i'],
libraries = ['GL', 'GLU', 'glut'],
library_dirs = [],
language='c++',
swig_opts=['-c++', '-I../include'],
#extra_compile_args=['-std=c++11'],
)
setup (name = 'evert',
version = '1.0',
author = "<NAME>",
description = """Accelerated beam tracing algorithm""",
ext_modules = [evert_module],
py_modules = ["evert"],
)
|
en
| 0.702897
|
#!/usr/bin/env python # Copyright (c) 2017, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE. setup.py file for installing Python bindings using SWIG #extra_compile_args=['-std=c++11'], Accelerated beam tracing algorithm
| 1.034543
| 1
|
somegame/fps_osd.py
|
kodo-pp/somegame-but-not-that-one
| 0
|
6321
|
import pygame
from loguru import logger
from somegame.osd import OSD
class FpsOSD(OSD):
def __init__(self, game):
super().__init__(game)
logger.info('Loading font')
self.font = pygame.font.Font(pygame.font.get_default_font(), 32)
def draw(self, surface):
fps = self.game.get_average_fps()
fps_text = '<unknown>' if fps is None else '{:.1f}'.format(fps)
tmp_surf = self.font.render('{} FPS'.format(fps_text), True, (255, 255, 255))
surface.blit(tmp_surf, (0, 0))
|
import pygame
from loguru import logger
from somegame.osd import OSD
class FpsOSD(OSD):
def __init__(self, game):
super().__init__(game)
logger.info('Loading font')
self.font = pygame.font.Font(pygame.font.get_default_font(), 32)
def draw(self, surface):
fps = self.game.get_average_fps()
fps_text = '<unknown>' if fps is None else '{:.1f}'.format(fps)
tmp_surf = self.font.render('{} FPS'.format(fps_text), True, (255, 255, 255))
surface.blit(tmp_surf, (0, 0))
|
none
| 1
| 2.660189
| 3
|
|
python/chronos/test/bigdl/chronos/data/experimental/test_xshardstsdataset.py
|
sgwhat/BigDL
| 0
|
6322
|
<reponame>sgwhat/BigDL
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
import pandas as pd
import random
import os
from unittest import TestCase
from bigdl.chronos.data import TSDataset
from bigdl.chronos.data.experimental import XShardsTSDataset
from bigdl.orca.data.pandas import read_csv
from bigdl.orca.common import init_orca_context, stop_orca_context, OrcaContext
from pandas.testing import assert_frame_equal
from numpy.testing import assert_array_almost_equal
def generate_spark_df():
init_orca_context(cores=8)
sc = OrcaContext.get_spark_context()
rdd = sc.range(0, 100)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1, ).astype(np.float)),
int(np.random.randint(0, 2, size=())),
int(x))).toDF(["feature", "id", "date"])
return df
def get_ugly_ts_df():
data = np.random.random_sample((100, 5))
mask = np.random.random_sample((100, 5))
newmask = mask.copy()
mask[newmask >= 0.4] = 2
mask[newmask < 0.4] = 1
mask[newmask < 0.2] = 0
data[mask == 0] = None
data[mask == 1] = np.nan
df = pd.DataFrame(data, columns=['a', 'b', 'c', 'd', 'e'])
df['a'][0] = np.nan # make sure column 'a' has a N/A
df["datetime"] = pd.date_range('1/1/2019', periods=100)
df.loc[50:100, "datetime"] = pd.date_range('1/1/2019', periods=50)
df["id"] = np.array(['00']*50 + ['01']*50)
return df
class TestXShardsTSDataset(TestCase):
def setUp(self):
self.resource_path = os.path.join(os.path.split(__file__)[0], "../../resources/")
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
# stop possible active_spark_context
from pyspark import SparkContext
from bigdl.orca.ray import OrcaRayContext
if SparkContext._active_spark_context is not None:
print("Stopping spark_orca context")
sc = SparkContext.getOrCreate()
if sc.getConf().get("spark.master").startswith("spark://"):
from bigdl.dllib.nncontext import stop_spark_standalone
stop_spark_standalone()
sc.stop()
def test_xshardstsdataset_initialization(self):
shards_single = read_csv(os.path.join(self.resource_path, "single.csv"))
tsdata = XShardsTSDataset.from_xshards(shards_single, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
assert tsdata._id_list == [0]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 1
tsdata = XShardsTSDataset.from_xshards(shards_single, dt_col="datetime",
target_col=["value"],
extra_feature_col="extra feature", id_col="id")
assert tsdata._id_list == [0]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 1
tsdata = XShardsTSDataset.from_xshards(shards_single, dt_col="datetime",
target_col=["value"],
extra_feature_col="extra feature")
assert tsdata._id_list == ["0"]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 1
def test_xshardstsdataset_initialization_multiple(self):
shards_multiple = read_csv(os.path.join(self.resource_path, "multiple.csv"))
# legal input
tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime",
target_col="value",
extra_feature_col=["extra feature"], id_col="id")
assert tsdata._id_list == [0, 1]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 2
tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime",
target_col=["value"],
extra_feature_col="extra feature", id_col="id")
assert tsdata._id_list == [0, 1]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 2
tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime",
target_col=["value"],
extra_feature_col="extra feature")
assert tsdata._id_list == ['0']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 1
def test_xshardstsdataset_split(self):
shards_multiple = read_csv(os.path.join(self.resource_path, "multiple.csv"))
# only train and test
tsdata_train, tsdata_valid, tsdata_test =\
XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id",
with_split=True, val_ratio=0, test_ratio=0.1)
# standard split with all three sets
tsdata_train, tsdata_valid, tsdata_test =\
XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id",
with_split=True, val_ratio=0.1, test_ratio=0.1,
largest_look_back=5, largest_horizon=2)
tsdata_train.feature_col.append("new extra feature")
assert len(tsdata_train.feature_col) == 2
assert len(tsdata_valid.feature_col) == 1
assert len(tsdata_test.feature_col) == 1
tsdata_train.target_col[0] = "new value"
assert tsdata_train.target_col[0] == "new value"
assert tsdata_valid.target_col[0] != "new value"
assert tsdata_test.target_col[0] != "new value"
def test_xshardstsdataset_roll_multiple_id(self):
shards_multiple = read_csv(os.path.join(self.resource_path, "multiple.csv"))
horizon = random.randint(1, 10)
lookback = random.randint(1, 20)
tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime",
target_col="value",
extra_feature_col=["extra feature"], id_col="id")
with pytest.raises(RuntimeError):
tsdata.to_xshards()
# roll train
tsdata.roll(lookback=lookback, horizon=horizon)
shards_numpy = tsdata.to_xshards()
collected_numpy = shards_numpy.collect() # collect and valid
x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0)
y = np.concatenate([collected_numpy[i]['y'] for i in range(len(collected_numpy))], axis=0)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=["extra feature"], target_col="value")
shards_numpy = tsdata.to_xshards()
collected_numpy = shards_numpy.collect() # collect and valid
x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0)
y = np.concatenate([collected_numpy[i]['y'] for i in range(len(collected_numpy))], axis=0)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=[], target_col="value")
shards_numpy = tsdata.to_xshards()
collected_numpy = shards_numpy.collect() # collect and valid
x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0)
y = np.concatenate([collected_numpy[i]['y'] for i in range(len(collected_numpy))], axis=0)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 1)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1)
# roll test
horizon = 0
lookback = random.randint(1, 20)
tsdata.roll(lookback=lookback, horizon=horizon)
shards_numpy = tsdata.to_xshards()
collected_numpy = shards_numpy.collect() # collect and valid
x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2)
def test_xshardstsdataset_impute(self):
from tempfile import TemporaryDirectory
tmp_df = get_ugly_ts_df()
with TemporaryDirectory() as tmpdir:
file_name = os.path.join(tmpdir, 'impute.csv')
tmp_df.to_csv(file_name, index=False)
shards_tmp = read_csv(file_name)
for val in ["last", "const", "linear"]:
tsdata = XShardsTSDataset.from_xshards(shards_tmp,
dt_col="datetime", target_col="e",
extra_feature_col=["a", "b", "c", "d"], id_col="id")
tsdata.impute(mode=val)
collected_df = tsdata.shards.collect()
collected_df = pd.concat(collected_df, axis=0)
assert collected_df.isna().sum().sum() == 0
assert len(collected_df) == 100
def test_xshardstsdataset_sparkdf(self):
df = generate_spark_df()
# with id
tsdata = XShardsTSDataset.from_sparkdf(df, dt_col="date",
target_col="feature",
id_col="id")
tsdata.roll(lookback=4, horizon=2)
data = tsdata.to_xshards().collect()
assert data[0]['x'].shape[1] == 4
assert data[0]['x'].shape[2] == 1
assert data[0]['y'].shape[1] == 2
assert data[0]['y'].shape[2] == 1
assert tsdata.shards.num_partitions() == 2
# with only 1 id
tsdata = XShardsTSDataset.from_sparkdf(df, dt_col="date",
target_col="feature")
tsdata.roll(lookback=4, horizon=2)
data = tsdata.to_xshards().collect()
assert data[0]['x'].shape[1] == 4
assert data[0]['x'].shape[2] == 1
assert data[0]['y'].shape[1] == 2
assert data[0]['y'].shape[2] == 1
assert tsdata.shards.num_partitions() == 1
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
import pandas as pd
import random
import os
from unittest import TestCase
from bigdl.chronos.data import TSDataset
from bigdl.chronos.data.experimental import XShardsTSDataset
from bigdl.orca.data.pandas import read_csv
from bigdl.orca.common import init_orca_context, stop_orca_context, OrcaContext
from pandas.testing import assert_frame_equal
from numpy.testing import assert_array_almost_equal
def generate_spark_df():
init_orca_context(cores=8)
sc = OrcaContext.get_spark_context()
rdd = sc.range(0, 100)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1, ).astype(np.float)),
int(np.random.randint(0, 2, size=())),
int(x))).toDF(["feature", "id", "date"])
return df
def get_ugly_ts_df():
data = np.random.random_sample((100, 5))
mask = np.random.random_sample((100, 5))
newmask = mask.copy()
mask[newmask >= 0.4] = 2
mask[newmask < 0.4] = 1
mask[newmask < 0.2] = 0
data[mask == 0] = None
data[mask == 1] = np.nan
df = pd.DataFrame(data, columns=['a', 'b', 'c', 'd', 'e'])
df['a'][0] = np.nan # make sure column 'a' has a N/A
df["datetime"] = pd.date_range('1/1/2019', periods=100)
df.loc[50:100, "datetime"] = pd.date_range('1/1/2019', periods=50)
df["id"] = np.array(['00']*50 + ['01']*50)
return df
class TestXShardsTSDataset(TestCase):
def setUp(self):
self.resource_path = os.path.join(os.path.split(__file__)[0], "../../resources/")
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
# stop possible active_spark_context
from pyspark import SparkContext
from bigdl.orca.ray import OrcaRayContext
if SparkContext._active_spark_context is not None:
print("Stopping spark_orca context")
sc = SparkContext.getOrCreate()
if sc.getConf().get("spark.master").startswith("spark://"):
from bigdl.dllib.nncontext import stop_spark_standalone
stop_spark_standalone()
sc.stop()
def test_xshardstsdataset_initialization(self):
shards_single = read_csv(os.path.join(self.resource_path, "single.csv"))
tsdata = XShardsTSDataset.from_xshards(shards_single, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
assert tsdata._id_list == [0]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 1
tsdata = XShardsTSDataset.from_xshards(shards_single, dt_col="datetime",
target_col=["value"],
extra_feature_col="extra feature", id_col="id")
assert tsdata._id_list == [0]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 1
tsdata = XShardsTSDataset.from_xshards(shards_single, dt_col="datetime",
target_col=["value"],
extra_feature_col="extra feature")
assert tsdata._id_list == ["0"]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 1
def test_xshardstsdataset_initialization_multiple(self):
shards_multiple = read_csv(os.path.join(self.resource_path, "multiple.csv"))
# legal input
tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime",
target_col="value",
extra_feature_col=["extra feature"], id_col="id")
assert tsdata._id_list == [0, 1]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 2
tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime",
target_col=["value"],
extra_feature_col="extra feature", id_col="id")
assert tsdata._id_list == [0, 1]
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 2
tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime",
target_col=["value"],
extra_feature_col="extra feature")
assert tsdata._id_list == ['0']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata.shards.num_partitions() == 1
def test_xshardstsdataset_split(self):
shards_multiple = read_csv(os.path.join(self.resource_path, "multiple.csv"))
# only train and test
tsdata_train, tsdata_valid, tsdata_test =\
XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id",
with_split=True, val_ratio=0, test_ratio=0.1)
# standard split with all three sets
tsdata_train, tsdata_valid, tsdata_test =\
XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id",
with_split=True, val_ratio=0.1, test_ratio=0.1,
largest_look_back=5, largest_horizon=2)
tsdata_train.feature_col.append("new extra feature")
assert len(tsdata_train.feature_col) == 2
assert len(tsdata_valid.feature_col) == 1
assert len(tsdata_test.feature_col) == 1
tsdata_train.target_col[0] = "new value"
assert tsdata_train.target_col[0] == "new value"
assert tsdata_valid.target_col[0] != "new value"
assert tsdata_test.target_col[0] != "new value"
def test_xshardstsdataset_roll_multiple_id(self):
shards_multiple = read_csv(os.path.join(self.resource_path, "multiple.csv"))
horizon = random.randint(1, 10)
lookback = random.randint(1, 20)
tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime",
target_col="value",
extra_feature_col=["extra feature"], id_col="id")
with pytest.raises(RuntimeError):
tsdata.to_xshards()
# roll train
tsdata.roll(lookback=lookback, horizon=horizon)
shards_numpy = tsdata.to_xshards()
collected_numpy = shards_numpy.collect() # collect and valid
x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0)
y = np.concatenate([collected_numpy[i]['y'] for i in range(len(collected_numpy))], axis=0)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=["extra feature"], target_col="value")
shards_numpy = tsdata.to_xshards()
collected_numpy = shards_numpy.collect() # collect and valid
x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0)
y = np.concatenate([collected_numpy[i]['y'] for i in range(len(collected_numpy))], axis=0)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=[], target_col="value")
shards_numpy = tsdata.to_xshards()
collected_numpy = shards_numpy.collect() # collect and valid
x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0)
y = np.concatenate([collected_numpy[i]['y'] for i in range(len(collected_numpy))], axis=0)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 1)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1)
# roll test
horizon = 0
lookback = random.randint(1, 20)
tsdata.roll(lookback=lookback, horizon=horizon)
shards_numpy = tsdata.to_xshards()
collected_numpy = shards_numpy.collect() # collect and valid
x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2)
def test_xshardstsdataset_impute(self):
from tempfile import TemporaryDirectory
tmp_df = get_ugly_ts_df()
with TemporaryDirectory() as tmpdir:
file_name = os.path.join(tmpdir, 'impute.csv')
tmp_df.to_csv(file_name, index=False)
shards_tmp = read_csv(file_name)
for val in ["last", "const", "linear"]:
tsdata = XShardsTSDataset.from_xshards(shards_tmp,
dt_col="datetime", target_col="e",
extra_feature_col=["a", "b", "c", "d"], id_col="id")
tsdata.impute(mode=val)
collected_df = tsdata.shards.collect()
collected_df = pd.concat(collected_df, axis=0)
assert collected_df.isna().sum().sum() == 0
assert len(collected_df) == 100
def test_xshardstsdataset_sparkdf(self):
df = generate_spark_df()
# with id
tsdata = XShardsTSDataset.from_sparkdf(df, dt_col="date",
target_col="feature",
id_col="id")
tsdata.roll(lookback=4, horizon=2)
data = tsdata.to_xshards().collect()
assert data[0]['x'].shape[1] == 4
assert data[0]['x'].shape[2] == 1
assert data[0]['y'].shape[1] == 2
assert data[0]['y'].shape[2] == 1
assert tsdata.shards.num_partitions() == 2
# with only 1 id
tsdata = XShardsTSDataset.from_sparkdf(df, dt_col="date",
target_col="feature")
tsdata.roll(lookback=4, horizon=2)
data = tsdata.to_xshards().collect()
assert data[0]['x'].shape[1] == 4
assert data[0]['x'].shape[2] == 1
assert data[0]['y'].shape[1] == 2
assert data[0]['y'].shape[2] == 1
assert tsdata.shards.num_partitions() == 1
|
en
| 0.843222
|
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # make sure column 'a' has a N/A # stop possible active_spark_context # legal input # only train and test # standard split with all three sets # roll train # collect and valid # collect and valid # collect and valid # roll test # collect and valid # with id # with only 1 id
| 2.159255
| 2
|
zoom_functions.py
|
WXSD-Sales/ZoomToWebex
| 1
|
6323
|
import json
import tornado.gen
import traceback
from base64 import b64encode
from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError
from settings import Settings
from mongo_db_controller import ZoomUserDB
@tornado.gen.coroutine
def zoomRefresh(zoom_user):
url = "https://zoom.us/oauth/token"
payload = "grant_type=refresh_token&"
payload += "refresh_token={0}".format(zoom_user.get('refresh_token'))
#we need to base 64 encode it
#and then decode it to acsii as python 3 stores it as a byte string
userAndPass = b64encode("{0}:{1}".format(Settings.zoom_client_id, Settings.zoom_client_secret).encode()).decode("ascii")
headers = {
'authorization': 'Basic {0}'.format(userAndPass),
'content-type': "application/x-www-form-urlencoded"
}
request = HTTPRequest(url, method="POST", headers=headers, body=payload)
http_client = AsyncHTTPClient()
print(zoom_user)
print('making zoomRefresh')
print(payload)
try:
response = yield http_client.fetch(request)
resp = json.loads(response.body.decode("utf-8"))
print("zoomRefresh /access_token Response: {0}".format(resp))
zoom_user = ZoomUserDB.db.insert_user(zoom_user['person_id'], resp['access_token'], resp['expires_in'], resp['refresh_token'], "zoom")
print('new zoom_user:{0}'.format(zoom_user))
except HTTPError as he:
print('zoomRefresh HTTPError:')
print(he.code)
print(he.response.body)
if he.code == 401:
ZoomUserDB.db.delete_user(zoom_user['person_id'], "zoom")
zoom_user = None
raise tornado.gen.Return(zoom_user)
@tornado.gen.coroutine
def zoomGET(endpoint_url, zoom_user):
url = "https://api.zoom.us/v2{0}".format(endpoint_url)
headers = {"Authorization":"Bearer {0}".format(zoom_user.get('token'))}
request = HTTPRequest(url, method="GET", headers=headers)
http_client = AsyncHTTPClient()
response = None
try:
response = yield http_client.fetch(request)
body = response.body.decode('utf-8')
response = json.loads(body)
except HTTPError as he:
if he.code == 401:
print('token may be expired, attempting refresh')
zoom_user = yield zoomRefresh(zoom_user)
if zoom_user:
response, zoom_user = yield zoomGET(endpoint_url, zoom_user)
else:
try:
print(he.response.body)
except Exception as e:
pass
traceback.print_exc()
raise tornado.gen.Return((response, zoom_user))
|
import json
import tornado.gen
import traceback
from base64 import b64encode
from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError
from settings import Settings
from mongo_db_controller import ZoomUserDB
@tornado.gen.coroutine
def zoomRefresh(zoom_user):
url = "https://zoom.us/oauth/token"
payload = "grant_type=refresh_token&"
payload += "refresh_token={0}".format(zoom_user.get('refresh_token'))
#we need to base 64 encode it
#and then decode it to acsii as python 3 stores it as a byte string
userAndPass = b64encode("{0}:{1}".format(Settings.zoom_client_id, Settings.zoom_client_secret).encode()).decode("ascii")
headers = {
'authorization': 'Basic {0}'.format(userAndPass),
'content-type': "application/x-www-form-urlencoded"
}
request = HTTPRequest(url, method="POST", headers=headers, body=payload)
http_client = AsyncHTTPClient()
print(zoom_user)
print('making zoomRefresh')
print(payload)
try:
response = yield http_client.fetch(request)
resp = json.loads(response.body.decode("utf-8"))
print("zoomRefresh /access_token Response: {0}".format(resp))
zoom_user = ZoomUserDB.db.insert_user(zoom_user['person_id'], resp['access_token'], resp['expires_in'], resp['refresh_token'], "zoom")
print('new zoom_user:{0}'.format(zoom_user))
except HTTPError as he:
print('zoomRefresh HTTPError:')
print(he.code)
print(he.response.body)
if he.code == 401:
ZoomUserDB.db.delete_user(zoom_user['person_id'], "zoom")
zoom_user = None
raise tornado.gen.Return(zoom_user)
@tornado.gen.coroutine
def zoomGET(endpoint_url, zoom_user):
url = "https://api.zoom.us/v2{0}".format(endpoint_url)
headers = {"Authorization":"Bearer {0}".format(zoom_user.get('token'))}
request = HTTPRequest(url, method="GET", headers=headers)
http_client = AsyncHTTPClient()
response = None
try:
response = yield http_client.fetch(request)
body = response.body.decode('utf-8')
response = json.loads(body)
except HTTPError as he:
if he.code == 401:
print('token may be expired, attempting refresh')
zoom_user = yield zoomRefresh(zoom_user)
if zoom_user:
response, zoom_user = yield zoomGET(endpoint_url, zoom_user)
else:
try:
print(he.response.body)
except Exception as e:
pass
traceback.print_exc()
raise tornado.gen.Return((response, zoom_user))
|
en
| 0.838452
|
#we need to base 64 encode it #and then decode it to acsii as python 3 stores it as a byte string
| 2.35067
| 2
|
crypten/mpc/__init__.py
|
gmuraru/CrypTen
| 0
|
6324
|
<gh_stars>0
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from crypten.mpc import primitives # noqa: F401
from crypten.mpc import provider # noqa: F40
from .context import run_multiprocess
from .mpc import MPCTensor
from .ptype import ptype
__all__ = ["MPCTensor", "primitives", "provider", "ptype", "run_multiprocess"]
# the different private type attributes of an mpc encrypted tensor
arithmetic = ptype.arithmetic
binary = ptype.binary
# Set provider
__SUPPORTED_PROVIDERS = {
"TFP": provider.TrustedFirstParty,
"TTP": provider.TrustedThirdParty,
"HE": provider.HomomorphicProvider,
}
__default_provider = __SUPPORTED_PROVIDERS[
os.environ.get("CRYPTEN_PROVIDER_NAME", "TFP")
]
def set_default_provider(new_default_provider):
global __default_provider
assert_msg = "Provider %s is not supported" % new_default_provider
if isinstance(new_default_provider, str):
assert new_default_provider in __SUPPORTED_PROVIDERS.keys(), assert_msg
else:
assert new_default_provider in __SUPPORTED_PROVIDERS.values(), assert_msg
__default_provider = new_default_provider
os.environ["CRYPTEN_PROVIDER_NAME"] = new_default_provider.NAME
def get_default_provider():
return __default_provider
def ttp_required():
return __default_provider == provider.TrustedThirdParty
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from crypten.mpc import primitives # noqa: F401
from crypten.mpc import provider # noqa: F40
from .context import run_multiprocess
from .mpc import MPCTensor
from .ptype import ptype
__all__ = ["MPCTensor", "primitives", "provider", "ptype", "run_multiprocess"]
# the different private type attributes of an mpc encrypted tensor
arithmetic = ptype.arithmetic
binary = ptype.binary
# Set provider
__SUPPORTED_PROVIDERS = {
"TFP": provider.TrustedFirstParty,
"TTP": provider.TrustedThirdParty,
"HE": provider.HomomorphicProvider,
}
__default_provider = __SUPPORTED_PROVIDERS[
os.environ.get("CRYPTEN_PROVIDER_NAME", "TFP")
]
def set_default_provider(new_default_provider):
global __default_provider
assert_msg = "Provider %s is not supported" % new_default_provider
if isinstance(new_default_provider, str):
assert new_default_provider in __SUPPORTED_PROVIDERS.keys(), assert_msg
else:
assert new_default_provider in __SUPPORTED_PROVIDERS.values(), assert_msg
__default_provider = new_default_provider
os.environ["CRYPTEN_PROVIDER_NAME"] = new_default_provider.NAME
def get_default_provider():
return __default_provider
def ttp_required():
return __default_provider == provider.TrustedThirdParty
|
en
| 0.832175
|
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # noqa: F401 # noqa: F40 # the different private type attributes of an mpc encrypted tensor # Set provider
| 2.003813
| 2
|
contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/pyflakes.py
|
lahosken/pants
| 0
|
6325
|
<reponame>lahosken/pants<filename>contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/pyflakes.py<gh_stars>0
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pyflakes.checker import Checker as FlakesChecker
from pants.contrib.python.checks.tasks.checkstyle.common import CheckstylePlugin, Nit
class FlakeError(Nit):
# TODO(wickman) There is overlap between this and Flake8 -- consider integrating
# checkstyle plug-ins into the PEP8 tool directly so that this can be inherited
# by flake8.
# Code reference is here: http://flake8.readthedocs.org/en/latest/warnings.html
CLASS_ERRORS = {
'DuplicateArgument': 'F831',
'ImportShadowedByLoopVar': 'F402',
'ImportStarUsed': 'F403',
'LateFutureImport': 'F404',
'Redefined': 'F810',
'RedefinedInListComp': 'F812',
'RedefinedWhileUnused': 'F811',
'UndefinedExport': 'F822',
'UndefinedLocal': 'F823',
'UndefinedName': 'F821',
'UnusedImport': 'F401',
'UnusedVariable': 'F841',
}
def __init__(self, python_file, flake_message):
line_range = python_file.line_range(flake_message.lineno)
super(FlakeError, self).__init__(
self.get_error_code(flake_message),
Nit.ERROR,
python_file.filename,
flake_message.message % flake_message.message_args,
line_range,
python_file.lines[line_range])
@classmethod
def get_error_code(cls, message):
return cls.CLASS_ERRORS.get(message.__class__.__name__, 'F999')
class PyflakesChecker(CheckstylePlugin):
"""Detect common coding errors via the pyflakes package."""
def nits(self):
checker = FlakesChecker(self.python_file.tree, self.python_file.filename)
for message in sorted(checker.messages, key=lambda msg: msg.lineno):
if FlakeError.get_error_code(message) not in self.options.ignore:
yield FlakeError(self.python_file, message)
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pyflakes.checker import Checker as FlakesChecker
from pants.contrib.python.checks.tasks.checkstyle.common import CheckstylePlugin, Nit
class FlakeError(Nit):
# TODO(wickman) There is overlap between this and Flake8 -- consider integrating
# checkstyle plug-ins into the PEP8 tool directly so that this can be inherited
# by flake8.
# Code reference is here: http://flake8.readthedocs.org/en/latest/warnings.html
CLASS_ERRORS = {
'DuplicateArgument': 'F831',
'ImportShadowedByLoopVar': 'F402',
'ImportStarUsed': 'F403',
'LateFutureImport': 'F404',
'Redefined': 'F810',
'RedefinedInListComp': 'F812',
'RedefinedWhileUnused': 'F811',
'UndefinedExport': 'F822',
'UndefinedLocal': 'F823',
'UndefinedName': 'F821',
'UnusedImport': 'F401',
'UnusedVariable': 'F841',
}
def __init__(self, python_file, flake_message):
line_range = python_file.line_range(flake_message.lineno)
super(FlakeError, self).__init__(
self.get_error_code(flake_message),
Nit.ERROR,
python_file.filename,
flake_message.message % flake_message.message_args,
line_range,
python_file.lines[line_range])
@classmethod
def get_error_code(cls, message):
return cls.CLASS_ERRORS.get(message.__class__.__name__, 'F999')
class PyflakesChecker(CheckstylePlugin):
"""Detect common coding errors via the pyflakes package."""
def nits(self):
checker = FlakesChecker(self.python_file.tree, self.python_file.filename)
for message in sorted(checker.messages, key=lambda msg: msg.lineno):
if FlakeError.get_error_code(message) not in self.options.ignore:
yield FlakeError(self.python_file, message)
|
en
| 0.807509
|
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). # TODO(wickman) There is overlap between this and Flake8 -- consider integrating # checkstyle plug-ins into the PEP8 tool directly so that this can be inherited # by flake8. # Code reference is here: http://flake8.readthedocs.org/en/latest/warnings.html Detect common coding errors via the pyflakes package.
| 2.029884
| 2
|
pharmrep/forum/models.py
|
boyombo/pharmrep
| 0
|
6326
|
<gh_stars>0
from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
class Forum(models.Model):
title = models.CharField(max_length=60)
description = models.TextField(blank=True, default='')
updated = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now=True)
creator = models.ForeignKey(User, blank=True, null=True)
def __unicode__(self):
return self.title
def num_posts(self):
return sum([t.num_posts() for t in self.topic_set.all()])
def last_post(self):
if self.topic_set.count():
last = None
for t in self.topic_set.all():
l = t.last_post()
if l:
if not last: last = l
elif l.created > last.created: last = l
return last
class Topic(models.Model):
title = models.CharField(max_length=60)
description = models.TextField(max_length=10000, blank=True, null=True)
forum = models.ForeignKey(Forum)
created = models.DateTimeField(auto_now=True)
creator = models.ForeignKey(User, blank=True, null=True)
updated = models.DateTimeField(auto_now=True)
closed = models.BooleanField(blank=True, default=False)
def num_posts(self):
return self.post_set.count()
def num_replies(self):
return max(0, self.post_set.count() - 1)
def last_post(self):
if self.post_set.count():
return self.post_set.order_by("created")[0]
def __unicode__(self):
return unicode(self.creator) + " - " + self.title
class Post(models.Model):
title = models.CharField(max_length=60)
created = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(User, blank=True, null=True)
updated = models.DateTimeField(auto_now=True)
topic = models.ForeignKey(Topic)
body = models.TextField(max_length=10000)
user_ip = models.GenericIPAddressField(blank=True, null=True)
def __unicode__(self):
return u"%s - %s - %s" % (self.creator, self.topic, self.title)
def short(self):
return u"%s - %s\n%s" % (self.creator, self.title, self.created.strftime("%b %d, %I:%M %p"))
short.allow_tags = True
class ProfaneWord(models.Model):
word = models.CharField(max_length=60)
def __unicode__(self):
return self.word
|
from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
class Forum(models.Model):
title = models.CharField(max_length=60)
description = models.TextField(blank=True, default='')
updated = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now=True)
creator = models.ForeignKey(User, blank=True, null=True)
def __unicode__(self):
return self.title
def num_posts(self):
return sum([t.num_posts() for t in self.topic_set.all()])
def last_post(self):
if self.topic_set.count():
last = None
for t in self.topic_set.all():
l = t.last_post()
if l:
if not last: last = l
elif l.created > last.created: last = l
return last
class Topic(models.Model):
title = models.CharField(max_length=60)
description = models.TextField(max_length=10000, blank=True, null=True)
forum = models.ForeignKey(Forum)
created = models.DateTimeField(auto_now=True)
creator = models.ForeignKey(User, blank=True, null=True)
updated = models.DateTimeField(auto_now=True)
closed = models.BooleanField(blank=True, default=False)
def num_posts(self):
return self.post_set.count()
def num_replies(self):
return max(0, self.post_set.count() - 1)
def last_post(self):
if self.post_set.count():
return self.post_set.order_by("created")[0]
def __unicode__(self):
return unicode(self.creator) + " - " + self.title
class Post(models.Model):
title = models.CharField(max_length=60)
created = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(User, blank=True, null=True)
updated = models.DateTimeField(auto_now=True)
topic = models.ForeignKey(Topic)
body = models.TextField(max_length=10000)
user_ip = models.GenericIPAddressField(blank=True, null=True)
def __unicode__(self):
return u"%s - %s - %s" % (self.creator, self.topic, self.title)
def short(self):
return u"%s - %s\n%s" % (self.creator, self.title, self.created.strftime("%b %d, %I:%M %p"))
short.allow_tags = True
class ProfaneWord(models.Model):
word = models.CharField(max_length=60)
def __unicode__(self):
return self.word
|
none
| 1
| 2.229975
| 2
|
|
iri-node/fabfile.py
|
jinnerbichler/home-automflashion
| 8
|
6327
|
<gh_stars>1-10
import time
from fabric.api import run, env, task, put, cd, local, sudo
env.use_ssh_config = True
env.hosts = ['iota_node']
@task(default=True)
def iri():
run('mkdir -p /srv/private-tangle/')
with cd('/srv/private-tangle'):
put('.', '.')
run('docker-compose --project-name private-tangle pull')
run('docker-compose --project-name private-tangle up -d --force-recreate iri')
@task
def tools():
with cd('/srv/private-tangle'):
put('.', '.')
run('docker-compose --project-name private-tangle pull')
run('docker-compose --project-name private-tangle up -d --no-deps --force-recreate coordinator explorer')
run('docker-compose --project-name private-tangle logs -f --tail 100 coordinator explorer')
@task
def stop():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle stop')
@task
def stop_coord():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle stop coordinator')
@task
def down():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle down -v')
@task
def logs():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle logs -f --tail 100')
@task
def logs_coord():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle logs -f --tail 100 coordinator')
@task
def logs_all():
with cd('/srv/private-tangle'):
run('docker-compose logs -f')
@task
def reset():
# stop services and delete database
down()
time.sleep(1)
run('rm -rf /srv/private-tangle/testnet_db/')
# restart all services
iri()
time.sleep(5)
tools()
|
import time
from fabric.api import run, env, task, put, cd, local, sudo
env.use_ssh_config = True
env.hosts = ['iota_node']
@task(default=True)
def iri():
run('mkdir -p /srv/private-tangle/')
with cd('/srv/private-tangle'):
put('.', '.')
run('docker-compose --project-name private-tangle pull')
run('docker-compose --project-name private-tangle up -d --force-recreate iri')
@task
def tools():
with cd('/srv/private-tangle'):
put('.', '.')
run('docker-compose --project-name private-tangle pull')
run('docker-compose --project-name private-tangle up -d --no-deps --force-recreate coordinator explorer')
run('docker-compose --project-name private-tangle logs -f --tail 100 coordinator explorer')
@task
def stop():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle stop')
@task
def stop_coord():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle stop coordinator')
@task
def down():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle down -v')
@task
def logs():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle logs -f --tail 100')
@task
def logs_coord():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle logs -f --tail 100 coordinator')
@task
def logs_all():
with cd('/srv/private-tangle'):
run('docker-compose logs -f')
@task
def reset():
# stop services and delete database
down()
time.sleep(1)
run('rm -rf /srv/private-tangle/testnet_db/')
# restart all services
iri()
time.sleep(5)
tools()
|
en
| 0.972916
|
# stop services and delete database # restart all services
| 1.880043
| 2
|
features.py
|
ptorresmanque/MachineLearning_v2.0
| 0
|
6328
|
import sqlite3
from random import randint, choice
import numpy as np
conn = sqlite3.connect('ej.db')
c = conn.cursor()
#OBTENIENDO TAMAnOS MAXIMOS MINIMOS Y PROMEDIO#
c.execute('SELECT MAX(alto) FROM features')
resultado = c.fetchone()
if resultado:
altoMax = resultado[0]
c.execute('SELECT MIN(alto) FROM features')
resultado = c.fetchone()
if resultado:
altoMin = resultado[0]
altoProm = abs((altoMax + altoMin) / 2)
#print altoMax , altoProm , altoMin
arrAlto = [altoMax , altoProm , altoMin]
c.execute('SELECT MAX(ancho) FROM features')
resultado = c.fetchone()
if resultado:
anchoMax = resultado[0]
c.execute('SELECT MIN(ancho) FROM features')
resultado = c.fetchone()
if resultado:
anchoMin = resultado[0]
anchoProm = abs((anchoMax + anchoMin) / 2)
anchoMaxProm = abs((anchoMax + anchoProm) / 2)
anchoMinProm = abs((anchoMin + anchoProm) / 2)
arrAncho = [anchoMax, anchoMaxProm, anchoProm, anchoMinProm, anchoMin]
#### CREANDO CLASES NEGATIVAS
for i in range(0,3):
for j in range(0,5):
for _ in range(10):
negAncho = arrAncho[j]
negAlto = arrAlto[i]
rand_alto_max = int(negAlto * 1.5)
rand_alto_min = int(negAlto * 0.5)
r3 = rand_alto_max * 2
rand_ancho_max = int(negAncho*1.5)
rand_ancho_min = int(negAncho*0.5)
r33 = rand_ancho_max * 2
f1 = choice([np.random.randint(1, rand_alto_min), np.random.randint(rand_alto_max, r3)])
f2 = choice([np.random.randint(1, rand_ancho_min), np.random.randint(rand_ancho_max, r33)])
c.execute("insert into features (ancho, alto, area, clase) values (?, ?, ?, ?)",
(f2, f1, f2*f1, 0))
conn.commit()
conn.close()
|
import sqlite3
from random import randint, choice
import numpy as np
conn = sqlite3.connect('ej.db')
c = conn.cursor()
#OBTENIENDO TAMAnOS MAXIMOS MINIMOS Y PROMEDIO#
c.execute('SELECT MAX(alto) FROM features')
resultado = c.fetchone()
if resultado:
altoMax = resultado[0]
c.execute('SELECT MIN(alto) FROM features')
resultado = c.fetchone()
if resultado:
altoMin = resultado[0]
altoProm = abs((altoMax + altoMin) / 2)
#print altoMax , altoProm , altoMin
arrAlto = [altoMax , altoProm , altoMin]
c.execute('SELECT MAX(ancho) FROM features')
resultado = c.fetchone()
if resultado:
anchoMax = resultado[0]
c.execute('SELECT MIN(ancho) FROM features')
resultado = c.fetchone()
if resultado:
anchoMin = resultado[0]
anchoProm = abs((anchoMax + anchoMin) / 2)
anchoMaxProm = abs((anchoMax + anchoProm) / 2)
anchoMinProm = abs((anchoMin + anchoProm) / 2)
arrAncho = [anchoMax, anchoMaxProm, anchoProm, anchoMinProm, anchoMin]
#### CREANDO CLASES NEGATIVAS
for i in range(0,3):
for j in range(0,5):
for _ in range(10):
negAncho = arrAncho[j]
negAlto = arrAlto[i]
rand_alto_max = int(negAlto * 1.5)
rand_alto_min = int(negAlto * 0.5)
r3 = rand_alto_max * 2
rand_ancho_max = int(negAncho*1.5)
rand_ancho_min = int(negAncho*0.5)
r33 = rand_ancho_max * 2
f1 = choice([np.random.randint(1, rand_alto_min), np.random.randint(rand_alto_max, r3)])
f2 = choice([np.random.randint(1, rand_ancho_min), np.random.randint(rand_ancho_max, r33)])
c.execute("insert into features (ancho, alto, area, clase) values (?, ?, ?, ?)",
(f2, f1, f2*f1, 0))
conn.commit()
conn.close()
|
en
| 0.132159
|
#OBTENIENDO TAMAnOS MAXIMOS MINIMOS Y PROMEDIO# #print altoMax , altoProm , altoMin #### CREANDO CLASES NEGATIVAS
| 2.996721
| 3
|
dev/ideas/cython/playing_around.py
|
achilleas-k/brian2
| 0
|
6329
|
<filename>dev/ideas/cython/playing_around.py
from pylab import *
import cython
import time, timeit
from brian2.codegen.runtime.cython_rt.modified_inline import modified_cython_inline
import numpy
from scipy import weave
import numexpr
import theano
from theano import tensor as tt
tau = 20 * 0.001
N = 1000000
b = 1.2 # constant current mean, the modulation varies
freq = 10.0
t = 0.0
dt = 0.0001
_array_neurongroup_a = a = linspace(.05, 0.75, N)
_array_neurongroup_v = v = rand(N)
ns = {'_array_neurongroup_a': a, '_array_neurongroup_v': v,
'_N': N,
'dt': dt, 't': t, 'tau': tau, 'b': b, 'freq': freq,# 'sin': numpy.sin,
'pi': pi,
}
code = '''
cdef int _idx
cdef int _vectorisation_idx
cdef int N = <int>_N
cdef double a, v, _v
#cdef double [:] _cy_array_neurongroup_a = _array_neurongroup_a
#cdef double [:] _cy_array_neurongroup_v = _array_neurongroup_v
cdef double* _cy_array_neurongroup_a = &(_array_neurongroup_a[0])
cdef double* _cy_array_neurongroup_v = &(_array_neurongroup_v[0])
for _idx in range(N):
_vectorisation_idx = _idx
a = _cy_array_neurongroup_a[_idx]
v = _cy_array_neurongroup_v[_idx]
_v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)
#_v = a*b+0.0001*sin(v)
#_v = a*b+0.0001*v
v = _v
_cy_array_neurongroup_v[_idx] = v
'''
def timefunc_cython_inline():
cython.inline(code, locals=ns)
f_mod, f_arg_list = modified_cython_inline(code, locals=ns, globals={})
def timefunc_cython_modified_inline():
f_mod.__invoke(*f_arg_list)
#modified_cython_inline(code, locals=ns)
def timefunc_python():
for _idx in xrange(N):
_vectorisation_idx = _idx
a = _array_neurongroup_a[_idx]
v = _array_neurongroup_v[_idx]
_v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)
v = _v
_array_neurongroup_v[_idx] = v
def timefunc_numpy():
_v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)
v[:] = _v
def timefunc_numpy_smart():
_sin_term = sin(2.0*freq*pi*t)
_exp_term = exp(-dt/tau)
_a_term = (_sin_term-_sin_term*_exp_term)
_v = v
_v *= _exp_term
_v += a*_a_term
_v += -b*_exp_term + b
def timefunc_numpy_blocked():
ext = exp(-dt/tau)
sit = sin(2.0*freq*pi*t)
bs = 20000
for i in xrange(0, N, bs):
ab = a[i:i+bs]
vb = v[i:i+bs]
absit = ab*sit + b
vb *= ext
vb += absit
vb -= absit*ext
def timefunc_numexpr():
v[:] = numexpr.evaluate('a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)')
def timefunc_numexpr_smart():
_sin_term = sin(2.0*freq*pi*t)
_exp_term = exp(-dt/tau)
_a_term = (_sin_term-_sin_term*_exp_term)
_const_term = -b*_exp_term + b
#v[:] = numexpr.evaluate('a*_a_term+v*_exp_term+_const_term')
numexpr.evaluate('a*_a_term+v*_exp_term+_const_term', out=v)
def timefunc_weave(*args):
code = '''
// %s
int N = _N;
for(int _idx=0; _idx<N; _idx++)
{
double a = _array_neurongroup_a[_idx];
double v = _array_neurongroup_v[_idx];
double _v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau);
v = _v;
_array_neurongroup_v[_idx] = v;
}
''' % str(args)
weave.inline(code, ns.keys(), ns, compiler='gcc', extra_compile_args=list(args))
def timefunc_weave_slow():
timefunc_weave('-O3', '-march=native')
def timefunc_weave_fast():
timefunc_weave('-O3', '-march=native', '-ffast-math')
def get_theano_func():
a = tt.dvector('a')
v = tt.dvector('v')
freq = tt.dscalar('freq')
t = tt.dscalar('t')
dt = tt.dscalar('dt')
tau = tt.dscalar('tau')
return theano.function([a, v, freq, t, dt, tau],
a*tt.sin(2.0*freq*pi*t) + b + v*tt.exp(-dt/tau) + (-a*tt.sin(2.0*freq*pi*t) - b)*tt.exp(-dt/tau))
# return theano.function([a, v],
# a*tt.sin(2.0*freq*pi*t) + b + v*tt.exp(-dt/tau) + (-a*tt.sin(2.0*freq*pi*t) - b)*tt.exp(-dt/tau))
theano.config.gcc.cxxflags = '-O3 -ffast-math'
theano_func = get_theano_func()
#print theano.pp(theano_func.maker.fgraph.outputs[0])
#print
#theano.printing.debugprint(theano_func.maker.fgraph.outputs[0])
#theano.printing.pydotprint(theano_func, 'func.png')
#exit()
def timefunc_theano():
v[:] = theano_func(a, v, freq, t, dt, tau)
def dotimeit(f):
v[:] = 1
f()
print '%s: %.2f' % (f.__name__.replace('timefunc_', ''),
timeit.timeit(f.__name__+'()', setup='from __main__ import '+f.__name__, number=100))
def check_values(f):
v[:] = 1
v[:5] = linspace(0, 1, 5)
f()
print '%s: %s' % (f.__name__.replace('timefunc_', ''), v[:5])
if __name__=='__main__':
funcs = [#timefunc_cython_inline,
timefunc_cython_modified_inline,
timefunc_numpy,
timefunc_numpy_smart,
timefunc_numpy_blocked,
timefunc_numexpr,
timefunc_numexpr_smart,
timefunc_weave_slow,
timefunc_weave_fast,
timefunc_theano,
]
if 1:
print 'Values'
print '======'
for f in funcs:
check_values(f)
print
if 1:
print 'Times'
print '====='
for f in funcs:
dotimeit(f)
|
<filename>dev/ideas/cython/playing_around.py
from pylab import *
import cython
import time, timeit
from brian2.codegen.runtime.cython_rt.modified_inline import modified_cython_inline
import numpy
from scipy import weave
import numexpr
import theano
from theano import tensor as tt
tau = 20 * 0.001
N = 1000000
b = 1.2 # constant current mean, the modulation varies
freq = 10.0
t = 0.0
dt = 0.0001
_array_neurongroup_a = a = linspace(.05, 0.75, N)
_array_neurongroup_v = v = rand(N)
ns = {'_array_neurongroup_a': a, '_array_neurongroup_v': v,
'_N': N,
'dt': dt, 't': t, 'tau': tau, 'b': b, 'freq': freq,# 'sin': numpy.sin,
'pi': pi,
}
code = '''
cdef int _idx
cdef int _vectorisation_idx
cdef int N = <int>_N
cdef double a, v, _v
#cdef double [:] _cy_array_neurongroup_a = _array_neurongroup_a
#cdef double [:] _cy_array_neurongroup_v = _array_neurongroup_v
cdef double* _cy_array_neurongroup_a = &(_array_neurongroup_a[0])
cdef double* _cy_array_neurongroup_v = &(_array_neurongroup_v[0])
for _idx in range(N):
_vectorisation_idx = _idx
a = _cy_array_neurongroup_a[_idx]
v = _cy_array_neurongroup_v[_idx]
_v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)
#_v = a*b+0.0001*sin(v)
#_v = a*b+0.0001*v
v = _v
_cy_array_neurongroup_v[_idx] = v
'''
def timefunc_cython_inline():
cython.inline(code, locals=ns)
f_mod, f_arg_list = modified_cython_inline(code, locals=ns, globals={})
def timefunc_cython_modified_inline():
f_mod.__invoke(*f_arg_list)
#modified_cython_inline(code, locals=ns)
def timefunc_python():
for _idx in xrange(N):
_vectorisation_idx = _idx
a = _array_neurongroup_a[_idx]
v = _array_neurongroup_v[_idx]
_v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)
v = _v
_array_neurongroup_v[_idx] = v
def timefunc_numpy():
_v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)
v[:] = _v
def timefunc_numpy_smart():
_sin_term = sin(2.0*freq*pi*t)
_exp_term = exp(-dt/tau)
_a_term = (_sin_term-_sin_term*_exp_term)
_v = v
_v *= _exp_term
_v += a*_a_term
_v += -b*_exp_term + b
def timefunc_numpy_blocked():
ext = exp(-dt/tau)
sit = sin(2.0*freq*pi*t)
bs = 20000
for i in xrange(0, N, bs):
ab = a[i:i+bs]
vb = v[i:i+bs]
absit = ab*sit + b
vb *= ext
vb += absit
vb -= absit*ext
def timefunc_numexpr():
v[:] = numexpr.evaluate('a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)')
def timefunc_numexpr_smart():
_sin_term = sin(2.0*freq*pi*t)
_exp_term = exp(-dt/tau)
_a_term = (_sin_term-_sin_term*_exp_term)
_const_term = -b*_exp_term + b
#v[:] = numexpr.evaluate('a*_a_term+v*_exp_term+_const_term')
numexpr.evaluate('a*_a_term+v*_exp_term+_const_term', out=v)
def timefunc_weave(*args):
code = '''
// %s
int N = _N;
for(int _idx=0; _idx<N; _idx++)
{
double a = _array_neurongroup_a[_idx];
double v = _array_neurongroup_v[_idx];
double _v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau);
v = _v;
_array_neurongroup_v[_idx] = v;
}
''' % str(args)
weave.inline(code, ns.keys(), ns, compiler='gcc', extra_compile_args=list(args))
def timefunc_weave_slow():
timefunc_weave('-O3', '-march=native')
def timefunc_weave_fast():
timefunc_weave('-O3', '-march=native', '-ffast-math')
def get_theano_func():
a = tt.dvector('a')
v = tt.dvector('v')
freq = tt.dscalar('freq')
t = tt.dscalar('t')
dt = tt.dscalar('dt')
tau = tt.dscalar('tau')
return theano.function([a, v, freq, t, dt, tau],
a*tt.sin(2.0*freq*pi*t) + b + v*tt.exp(-dt/tau) + (-a*tt.sin(2.0*freq*pi*t) - b)*tt.exp(-dt/tau))
# return theano.function([a, v],
# a*tt.sin(2.0*freq*pi*t) + b + v*tt.exp(-dt/tau) + (-a*tt.sin(2.0*freq*pi*t) - b)*tt.exp(-dt/tau))
theano.config.gcc.cxxflags = '-O3 -ffast-math'
theano_func = get_theano_func()
#print theano.pp(theano_func.maker.fgraph.outputs[0])
#print
#theano.printing.debugprint(theano_func.maker.fgraph.outputs[0])
#theano.printing.pydotprint(theano_func, 'func.png')
#exit()
def timefunc_theano():
v[:] = theano_func(a, v, freq, t, dt, tau)
def dotimeit(f):
v[:] = 1
f()
print '%s: %.2f' % (f.__name__.replace('timefunc_', ''),
timeit.timeit(f.__name__+'()', setup='from __main__ import '+f.__name__, number=100))
def check_values(f):
v[:] = 1
v[:5] = linspace(0, 1, 5)
f()
print '%s: %s' % (f.__name__.replace('timefunc_', ''), v[:5])
if __name__=='__main__':
funcs = [#timefunc_cython_inline,
timefunc_cython_modified_inline,
timefunc_numpy,
timefunc_numpy_smart,
timefunc_numpy_blocked,
timefunc_numexpr,
timefunc_numexpr_smart,
timefunc_weave_slow,
timefunc_weave_fast,
timefunc_theano,
]
if 1:
print 'Values'
print '======'
for f in funcs:
check_values(f)
print
if 1:
print 'Times'
print '====='
for f in funcs:
dotimeit(f)
|
en
| 0.205753
|
# constant current mean, the modulation varies # 'sin': numpy.sin, cdef int _idx
cdef int _vectorisation_idx
cdef int N = <int>_N
cdef double a, v, _v
#cdef double [:] _cy_array_neurongroup_a = _array_neurongroup_a
#cdef double [:] _cy_array_neurongroup_v = _array_neurongroup_v
cdef double* _cy_array_neurongroup_a = &(_array_neurongroup_a[0])
cdef double* _cy_array_neurongroup_v = &(_array_neurongroup_v[0])
for _idx in range(N):
_vectorisation_idx = _idx
a = _cy_array_neurongroup_a[_idx]
v = _cy_array_neurongroup_v[_idx]
_v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)
#_v = a*b+0.0001*sin(v)
#_v = a*b+0.0001*v
v = _v
_cy_array_neurongroup_v[_idx] = v #modified_cython_inline(code, locals=ns) #v[:] = numexpr.evaluate('a*_a_term+v*_exp_term+_const_term') // %s
int N = _N;
for(int _idx=0; _idx<N; _idx++)
{
double a = _array_neurongroup_a[_idx];
double v = _array_neurongroup_v[_idx];
double _v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau);
v = _v;
_array_neurongroup_v[_idx] = v;
} # return theano.function([a, v], # a*tt.sin(2.0*freq*pi*t) + b + v*tt.exp(-dt/tau) + (-a*tt.sin(2.0*freq*pi*t) - b)*tt.exp(-dt/tau)) #print theano.pp(theano_func.maker.fgraph.outputs[0]) #print #theano.printing.debugprint(theano_func.maker.fgraph.outputs[0]) #theano.printing.pydotprint(theano_func, 'func.png') #exit() #timefunc_cython_inline,
| 1.938592
| 2
|
azbankgateways/views/__init__.py
|
lordmahyar/az-iranian-bank-gateways
| 196
|
6330
|
<reponame>lordmahyar/az-iranian-bank-gateways<gh_stars>100-1000
from .banks import callback_view, go_to_bank_gateway
from .samples import sample_payment_view, sample_result_view
|
from .banks import callback_view, go_to_bank_gateway
from .samples import sample_payment_view, sample_result_view
|
none
| 1
| 1.033667
| 1
|
|
dev/unittest/update.py
|
PowerDNS/exabgp
| 8
|
6331
|
#!/usr/bin/env python
# encoding: utf-8
"""
update.py
Created by <NAME> on 2009-09-06.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
import unittest
from exabgp.configuration.environment import environment
env = environment.setup('')
from exabgp.bgp.message.update.update import *
from exabgp.bgp.message.update.attribute.community import to_Community
from exabgp.bgp.message.update.attribute.community import Community, Communities
class TestData (unittest.TestCase):
def test_2_prefix (self):
self.assertEqual(str(to_NLRI('10.0.0.0','24')),'10.0.0.0/24')
def test_6_prefix (self):
self.assertEqual(to_NLRI('1.2.3.4','0').pack(),''.join([chr(c) for c in [0,]]))
def test_7_prefix (self):
self.assertEqual(to_NLRI('1.2.3.4','8').pack(),''.join([chr(c) for c in [8,1,]]))
def test_8_prefix (self):
self.assertEqual(to_NLRI('1.2.3.4','16').pack(),''.join([chr(c) for c in [16,1,2]]))
def test_9_prefix (self):
self.assertEqual(to_NLRI('1.2.3.4','24').pack(),''.join([chr(c) for c in [24,1,2,3]]))
def test_10_prefix (self):
self.assertEqual(to_NLRI('1.2.3.4','32').pack(),''.join([chr(c) for c in [32,1,2,3,4]]))
def test_1_community (self):
self.assertEqual(Community(256),256)
def test_2_community (self):
self.assertEqual(to_Community('0x100'),256)
def test_3_community (self):
self.assertEqual(to_Community('1:1'),65537)
def test_4_community (self):
communities = Communities()
community = to_Community('1:1')
communities.add(community)
self.assertEqual(communities.pack(),''.join([chr(c) for c in [0xc0,0x08,0x04,0x00,0x01,0x00,0x01]]))
def test_1_ipv4 (self):
header = ''.join([chr(c) for c in [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x22, 0x2]])
message = ''.join([chr(c) for c in [0x0, 0x0, 0x0, 0xb, 0x40, 0x1, 0x1, 0x0, 0x40, 0x2, 0x4, 0x2, 0x1, 0xfd, 0xe8, 0x18, 0xa, 0x0, 0x1]])
update = new_Update(message)
self.assertEqual(str(update.nlri[0]),'10.0.1.0/24')
def test_1_ipv6_1 (self):
header = ''.join([chr(c) for c in [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x47, 0x2]])
message = ''.join([chr(c) for c in [0x0, 0x0, 0x0, 0x30, 0x40, 0x1, 0x1, 0x0, 0x50, 0x2, 0x0, 0x4, 0x2, 0x1, 0xff, 0xfe, 0x80, 0x4, 0x4, 0x0, 0x0, 0x0, 0x0, 0x80, 0xe, 0x1a, 0x0, 0x2, 0x1, 0x10, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x12, 0x34, 0x56, 0x78]])
update = to_Update([],[to_NLRI('1234:5678::',32)])
self.assertEqual(str(update.nlri[0]),'1234:5678::/32')
def test_1_ipv6_2 (self):
route = RouteIP('1234:5678::',64)
route.next_hop = '8765:fdf8:f53e:61e4::18'
announced = route.announce(1,1)
message = announced[19:]
update = new_Update(message)
print update.nlri
print update.withdraw
print update.attributes[MPRNLRI.ID][0]
# def test_2_ipv4_broken (self):
# header = ''.join([chr(c) for c in h])
# message = ''.join([chr(c) for c in m])
# message = ''.join([chr(c) for c in [0x0, 0x0, 0x0, 0xf, 0x40, 0x1, 0x1, 0x0, 0x40, 0x2, 0x4, 0x2, 0x1, 0xfd, 0xe8, 0x0, 0x0, 0x0, 0x0, 0x18, 0xa, 0x0, 0x1]])
# update = new_Update(message)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# encoding: utf-8
"""
update.py
Created by <NAME> on 2009-09-06.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
import unittest
from exabgp.configuration.environment import environment
env = environment.setup('')
from exabgp.bgp.message.update.update import *
from exabgp.bgp.message.update.attribute.community import to_Community
from exabgp.bgp.message.update.attribute.community import Community, Communities
class TestData (unittest.TestCase):
def test_2_prefix (self):
self.assertEqual(str(to_NLRI('10.0.0.0','24')),'10.0.0.0/24')
def test_6_prefix (self):
self.assertEqual(to_NLRI('1.2.3.4','0').pack(),''.join([chr(c) for c in [0,]]))
def test_7_prefix (self):
self.assertEqual(to_NLRI('1.2.3.4','8').pack(),''.join([chr(c) for c in [8,1,]]))
def test_8_prefix (self):
self.assertEqual(to_NLRI('1.2.3.4','16').pack(),''.join([chr(c) for c in [16,1,2]]))
def test_9_prefix (self):
self.assertEqual(to_NLRI('1.2.3.4','24').pack(),''.join([chr(c) for c in [24,1,2,3]]))
def test_10_prefix (self):
self.assertEqual(to_NLRI('1.2.3.4','32').pack(),''.join([chr(c) for c in [32,1,2,3,4]]))
def test_1_community (self):
self.assertEqual(Community(256),256)
def test_2_community (self):
self.assertEqual(to_Community('0x100'),256)
def test_3_community (self):
self.assertEqual(to_Community('1:1'),65537)
def test_4_community (self):
communities = Communities()
community = to_Community('1:1')
communities.add(community)
self.assertEqual(communities.pack(),''.join([chr(c) for c in [0xc0,0x08,0x04,0x00,0x01,0x00,0x01]]))
def test_1_ipv4 (self):
header = ''.join([chr(c) for c in [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x22, 0x2]])
message = ''.join([chr(c) for c in [0x0, 0x0, 0x0, 0xb, 0x40, 0x1, 0x1, 0x0, 0x40, 0x2, 0x4, 0x2, 0x1, 0xfd, 0xe8, 0x18, 0xa, 0x0, 0x1]])
update = new_Update(message)
self.assertEqual(str(update.nlri[0]),'10.0.1.0/24')
def test_1_ipv6_1 (self):
header = ''.join([chr(c) for c in [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x47, 0x2]])
message = ''.join([chr(c) for c in [0x0, 0x0, 0x0, 0x30, 0x40, 0x1, 0x1, 0x0, 0x50, 0x2, 0x0, 0x4, 0x2, 0x1, 0xff, 0xfe, 0x80, 0x4, 0x4, 0x0, 0x0, 0x0, 0x0, 0x80, 0xe, 0x1a, 0x0, 0x2, 0x1, 0x10, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x12, 0x34, 0x56, 0x78]])
update = to_Update([],[to_NLRI('1234:5678::',32)])
self.assertEqual(str(update.nlri[0]),'1234:5678::/32')
def test_1_ipv6_2 (self):
route = RouteIP('1234:5678::',64)
route.next_hop = '8765:fdf8:f53e:61e4::18'
announced = route.announce(1,1)
message = announced[19:]
update = new_Update(message)
print update.nlri
print update.withdraw
print update.attributes[MPRNLRI.ID][0]
# def test_2_ipv4_broken (self):
# header = ''.join([chr(c) for c in h])
# message = ''.join([chr(c) for c in m])
# message = ''.join([chr(c) for c in [0x0, 0x0, 0x0, 0xf, 0x40, 0x1, 0x1, 0x0, 0x40, 0x2, 0x4, 0x2, 0x1, 0xfd, 0xe8, 0x0, 0x0, 0x0, 0x0, 0x18, 0xa, 0x0, 0x1]])
# update = new_Update(message)
if __name__ == '__main__':
unittest.main()
|
en
| 0.429651
|
#!/usr/bin/env python # encoding: utf-8 update.py Created by <NAME> on 2009-09-06. Copyright (c) 2009-2013 Exa Networks. All rights reserved. # def test_2_ipv4_broken (self): # header = ''.join([chr(c) for c in h]) # message = ''.join([chr(c) for c in m]) # message = ''.join([chr(c) for c in [0x0, 0x0, 0x0, 0xf, 0x40, 0x1, 0x1, 0x0, 0x40, 0x2, 0x4, 0x2, 0x1, 0xfd, 0xe8, 0x0, 0x0, 0x0, 0x0, 0x18, 0xa, 0x0, 0x1]]) # update = new_Update(message)
| 2.312634
| 2
|
nuscenes/eval/detection/evaluate.py
|
WJ-Lai/NightFusion
| 0
|
6332
|
# nuScenes dev-kit.
# Code written by <NAME> & <NAME>, 2018.
# Licensed under the Creative Commons [see licence.txt]
import argparse
import json
import os
import random
import time
from typing import Tuple, Dict, Any
import numpy as np
from nuscenes import NuScenes
from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.constants import TP_METRICS
from nuscenes.eval.detection.data_classes import DetectionConfig, MetricDataList, DetectionMetrics, EvalBoxes
from nuscenes.eval.detection.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes
from nuscenes.eval.detection.render import summary_plot, class_pr_curve, class_tp_curve, dist_pr_curve, visualize_sample
class NuScenesEval:
"""
This is the official nuScenes detection evaluation code.
Results are written to the provided output_dir.
nuScenes uses the following metrics:
- Mean Average Precision (mAP): Uses center-distance as matching criterion; averaged over distance thresholds.
- True Positive (TP) metrics: Average of translation, velocity, scale, orientation and attribute errors.
- nuScenes Detection Score (NDS): The weighted sum of the above.
Here is an overview of the functions in this method:
- init: Loads GT annotations an predictions stored in JSON format and filters the boxes.
- run: Performs evaluation and dumps the metric data to disk.
- render: Renders various plots and dumps to disk.
We assume that:
- Every sample_token is given in the results, although there may be not predictions for that sample.
Please see https://github.com/nutonomy/nuscenes-devkit for more details.
"""
def __init__(self,
nusc: NuScenes,
config: DetectionConfig,
result_path: str,
eval_set: str,
output_dir: str = None,
verbose: bool = True):
"""
Initialize a NuScenesEval object.
:param nusc: A NuScenes object.
:param config: A DetectionConfig object.
:param result_path: Path of the nuScenes JSON result file.
:param eval_set: The dataset split to evaluate on, e.g. train or val.
:param output_dir: Folder to save plots and results to.
:param verbose: Whether to print to stdout.
"""
self.nusc = nusc
self.result_path = result_path
self.eval_set = eval_set
self.output_dir = output_dir
self.verbose = verbose
self.cfg = config
# Make dirs.
self.plot_dir = os.path.join(self.output_dir, 'plots')
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.isdir(self.plot_dir):
os.makedirs(self.plot_dir)
# Load data.
self.pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, verbose=verbose)
self.gt_boxes = load_gt(self.nusc, self.eval_set, verbose=verbose)
assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \
"Samples in split doesn't match samples in predictions."
# Add center distances.
self.pred_boxes = add_center_dist(nusc, self.pred_boxes)
self.gt_boxes = add_center_dist(nusc, self.gt_boxes)
# Filter boxes (distance, points per box, etc.).
if verbose:
print('Filtering predictions')
self.pred_boxes = filter_eval_boxes(nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose)
if verbose:
print('Filtering ground truth annotations')
self.gt_boxes = filter_eval_boxes(nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose)
self.sample_tokens = self.gt_boxes.sample_tokens
def evaluate(self) -> Tuple[DetectionMetrics, MetricDataList]:
"""
Performs the actual evaluation.
:return: A tuple of high-level and the raw metric data.
"""
start_time = time.time()
# -----------------------------------
# Step 1: Accumulate metric data for all classes and distance thresholds.
# -----------------------------------
if self.verbose:
print('Accumulating metric data')
metric_data_list = MetricDataList()
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
md = accumulate(self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn, dist_th)
metric_data_list.set(class_name, dist_th, md)
# -----------------------------------
# Step 2: Calculate metrics from the data.
# -----------------------------------
if self.verbose:
print('Calculating metrics')
metrics = DetectionMetrics(self.cfg)
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
metric_data = metric_data_list[(class_name, dist_th)]
ap = calc_ap(metric_data, self.cfg.min_recall, self.cfg.min_precision)
metrics.add_label_ap(class_name, dist_th, ap)
for metric_name in TP_METRICS:
metric_data = metric_data_list[(class_name, self.cfg.dist_th_tp)]
if class_name in ['traffic_cone'] and metric_name in ['attr_err', 'vel_err', 'orient_err']:
tp = np.nan
elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']:
tp = np.nan
else:
tp = calc_tp(metric_data, self.cfg.min_recall, metric_name)
metrics.add_label_tp(class_name, metric_name, tp)
metrics.add_runtime(time.time() - start_time)
return metrics, metric_data_list
def render(self, metrics: DetectionMetrics, md_list: MetricDataList) -> None:
"""
Renders various PR and TP curves.
:param metrics: DetectionMetrics instance.
:param md_list: MetricDataList instance.
"""
def savepath(name):
return os.path.join(self.plot_dir, name + '.pdf')
summary_plot(md_list, metrics, min_precision=self.cfg.min_precision, min_recall=self.cfg.min_recall,
dist_th_tp=self.cfg.dist_th_tp, savepath=savepath('summary'))
for detection_name in self.cfg.class_names:
class_pr_curve(md_list, metrics, detection_name, self.cfg.min_precision, self.cfg.min_recall,
savepath=savepath(detection_name + '_pr'))
class_tp_curve(md_list, metrics, detection_name, self.cfg.min_recall, self.cfg.dist_th_tp,
savepath=savepath(detection_name + '_tp'))
for dist_th in self.cfg.dist_ths:
dist_pr_curve(md_list, metrics, dist_th, self.cfg.min_precision, self.cfg.min_recall,
savepath=savepath('dist_pr_' + str(dist_th)))
def main(self,
plot_examples: int = 0,
render_curves: bool = True) -> Dict[str, Any]:
"""
Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots.
:param plot_examples: How many example visualizations to write to disk.
:param render_curves: Whether to render PR and TP curves to disk.
:return: A dict that stores the high-level metrics and meta data.
"""
if plot_examples > 0:
# Select a random but fixed subset to plot.
random.seed(43)
sample_tokens = list(self.sample_tokens)
random.shuffle(sample_tokens)
sample_tokens = sample_tokens[:plot_examples]
# Visualize samples.
example_dir = os.path.join(self.output_dir, 'examples')
if not os.path.isdir(example_dir):
os.mkdir(example_dir)
for sample_token in sample_tokens:
visualize_sample(self.nusc,
sample_token,
self.gt_boxes if self.eval_set != 'test' else EvalBoxes(),
# Don't render test GT.
self.pred_boxes,
eval_range=max(self.cfg.class_range.values()),
savepath=os.path.join(example_dir, '{}.png'.format(sample_token)))
# Run evaluation.
metrics, metric_data_list = self.evaluate()
# Render PR and TP curves.
if render_curves:
self.render(metrics, metric_data_list)
# Dump the metric data, meta and metrics to disk.
if self.verbose:
print('Saving metrics to: %s' % self.output_dir)
metrics_summary = metrics.serialize()
metrics_summary['meta'] = self.meta.copy()
with open(os.path.join(self.output_dir, 'metrics_summary.json'), 'w') as f:
json.dump(metrics_summary, f, indent=2)
with open(os.path.join(self.output_dir, 'metrics_details.json'), 'w') as f:
json.dump(metric_data_list.serialize(), f, indent=2)
# Print high-level metrics.
print('mAP: %.4f' % (metrics_summary['mean_ap']))
err_name_mapping = {
'trans_err': 'mATE',
'scale_err': 'mASE',
'orient_err': 'mAOE',
'vel_err': 'mAVE',
'attr_err': 'mAAE'
}
for tp_name, tp_val in metrics_summary['tp_errors'].items():
print('%s: %.4f' % (err_name_mapping[tp_name], tp_val))
print('NDS: %.4f' % (metrics_summary['nd_score']))
print('Eval time: %.1fs' % metrics_summary['eval_time'])
return metrics_summary
if __name__ == "__main__":
# Settings.
parser = argparse.ArgumentParser(description='Evaluate nuScenes result submission.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('result_path', type=str, help='The submission as a JSON file.')
parser.add_argument('--output_dir', type=str, default='~/nuscenes-metrics',
help='Folder to store result metrics, graphs and example visualizations.')
parser.add_argument('--eval_set', type=str, default='val',
help='Which dataset split to evaluate on, train, val or test.')
parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes',
help='Default nuScenes data directory.')
parser.add_argument('--version', type=str, default='v1.0-trainval',
help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.')
parser.add_argument('--config_name', type=str, default='cvpr_2019',
help='Name of the configuration to use for evaluation, e.g. cvpr_2019.')
parser.add_argument('--plot_examples', type=int, default=10,
help='How many example visualizations to write to disk.')
parser.add_argument('--render_curves', type=int, default=1,
help='Whether to render PR and TP curves to disk.')
parser.add_argument('--verbose', type=int, default=1,
help='Whether to print to stdout.')
args = parser.parse_args()
result_path_ = os.path.expanduser(args.result_path)
output_dir_ = os.path.expanduser(args.output_dir)
eval_set_ = args.eval_set
dataroot_ = args.dataroot
version_ = args.version
config_name_ = args.config_name
plot_examples_ = args.plot_examples
render_curves_ = bool(args.render_curves)
verbose_ = bool(args.verbose)
cfg_ = config_factory(config_name_)
nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_)
nusc_eval = NuScenesEval(nusc_, config=cfg_, result_path=result_path_, eval_set=eval_set_,
output_dir=output_dir_, verbose=verbose_)
nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_)
|
# nuScenes dev-kit.
# Code written by <NAME> & <NAME>, 2018.
# Licensed under the Creative Commons [see licence.txt]
import argparse
import json
import os
import random
import time
from typing import Tuple, Dict, Any
import numpy as np
from nuscenes import NuScenes
from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.constants import TP_METRICS
from nuscenes.eval.detection.data_classes import DetectionConfig, MetricDataList, DetectionMetrics, EvalBoxes
from nuscenes.eval.detection.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes
from nuscenes.eval.detection.render import summary_plot, class_pr_curve, class_tp_curve, dist_pr_curve, visualize_sample
class NuScenesEval:
"""
This is the official nuScenes detection evaluation code.
Results are written to the provided output_dir.
nuScenes uses the following metrics:
- Mean Average Precision (mAP): Uses center-distance as matching criterion; averaged over distance thresholds.
- True Positive (TP) metrics: Average of translation, velocity, scale, orientation and attribute errors.
- nuScenes Detection Score (NDS): The weighted sum of the above.
Here is an overview of the functions in this method:
- init: Loads GT annotations an predictions stored in JSON format and filters the boxes.
- run: Performs evaluation and dumps the metric data to disk.
- render: Renders various plots and dumps to disk.
We assume that:
- Every sample_token is given in the results, although there may be not predictions for that sample.
Please see https://github.com/nutonomy/nuscenes-devkit for more details.
"""
def __init__(self,
nusc: NuScenes,
config: DetectionConfig,
result_path: str,
eval_set: str,
output_dir: str = None,
verbose: bool = True):
"""
Initialize a NuScenesEval object.
:param nusc: A NuScenes object.
:param config: A DetectionConfig object.
:param result_path: Path of the nuScenes JSON result file.
:param eval_set: The dataset split to evaluate on, e.g. train or val.
:param output_dir: Folder to save plots and results to.
:param verbose: Whether to print to stdout.
"""
self.nusc = nusc
self.result_path = result_path
self.eval_set = eval_set
self.output_dir = output_dir
self.verbose = verbose
self.cfg = config
# Make dirs.
self.plot_dir = os.path.join(self.output_dir, 'plots')
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.isdir(self.plot_dir):
os.makedirs(self.plot_dir)
# Load data.
self.pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, verbose=verbose)
self.gt_boxes = load_gt(self.nusc, self.eval_set, verbose=verbose)
assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \
"Samples in split doesn't match samples in predictions."
# Add center distances.
self.pred_boxes = add_center_dist(nusc, self.pred_boxes)
self.gt_boxes = add_center_dist(nusc, self.gt_boxes)
# Filter boxes (distance, points per box, etc.).
if verbose:
print('Filtering predictions')
self.pred_boxes = filter_eval_boxes(nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose)
if verbose:
print('Filtering ground truth annotations')
self.gt_boxes = filter_eval_boxes(nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose)
self.sample_tokens = self.gt_boxes.sample_tokens
def evaluate(self) -> Tuple[DetectionMetrics, MetricDataList]:
"""
Performs the actual evaluation.
:return: A tuple of high-level and the raw metric data.
"""
start_time = time.time()
# -----------------------------------
# Step 1: Accumulate metric data for all classes and distance thresholds.
# -----------------------------------
if self.verbose:
print('Accumulating metric data')
metric_data_list = MetricDataList()
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
md = accumulate(self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn, dist_th)
metric_data_list.set(class_name, dist_th, md)
# -----------------------------------
# Step 2: Calculate metrics from the data.
# -----------------------------------
if self.verbose:
print('Calculating metrics')
metrics = DetectionMetrics(self.cfg)
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
metric_data = metric_data_list[(class_name, dist_th)]
ap = calc_ap(metric_data, self.cfg.min_recall, self.cfg.min_precision)
metrics.add_label_ap(class_name, dist_th, ap)
for metric_name in TP_METRICS:
metric_data = metric_data_list[(class_name, self.cfg.dist_th_tp)]
if class_name in ['traffic_cone'] and metric_name in ['attr_err', 'vel_err', 'orient_err']:
tp = np.nan
elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']:
tp = np.nan
else:
tp = calc_tp(metric_data, self.cfg.min_recall, metric_name)
metrics.add_label_tp(class_name, metric_name, tp)
metrics.add_runtime(time.time() - start_time)
return metrics, metric_data_list
def render(self, metrics: DetectionMetrics, md_list: MetricDataList) -> None:
"""
Renders various PR and TP curves.
:param metrics: DetectionMetrics instance.
:param md_list: MetricDataList instance.
"""
def savepath(name):
return os.path.join(self.plot_dir, name + '.pdf')
summary_plot(md_list, metrics, min_precision=self.cfg.min_precision, min_recall=self.cfg.min_recall,
dist_th_tp=self.cfg.dist_th_tp, savepath=savepath('summary'))
for detection_name in self.cfg.class_names:
class_pr_curve(md_list, metrics, detection_name, self.cfg.min_precision, self.cfg.min_recall,
savepath=savepath(detection_name + '_pr'))
class_tp_curve(md_list, metrics, detection_name, self.cfg.min_recall, self.cfg.dist_th_tp,
savepath=savepath(detection_name + '_tp'))
for dist_th in self.cfg.dist_ths:
dist_pr_curve(md_list, metrics, dist_th, self.cfg.min_precision, self.cfg.min_recall,
savepath=savepath('dist_pr_' + str(dist_th)))
def main(self,
plot_examples: int = 0,
render_curves: bool = True) -> Dict[str, Any]:
"""
Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots.
:param plot_examples: How many example visualizations to write to disk.
:param render_curves: Whether to render PR and TP curves to disk.
:return: A dict that stores the high-level metrics and meta data.
"""
if plot_examples > 0:
# Select a random but fixed subset to plot.
random.seed(43)
sample_tokens = list(self.sample_tokens)
random.shuffle(sample_tokens)
sample_tokens = sample_tokens[:plot_examples]
# Visualize samples.
example_dir = os.path.join(self.output_dir, 'examples')
if not os.path.isdir(example_dir):
os.mkdir(example_dir)
for sample_token in sample_tokens:
visualize_sample(self.nusc,
sample_token,
self.gt_boxes if self.eval_set != 'test' else EvalBoxes(),
# Don't render test GT.
self.pred_boxes,
eval_range=max(self.cfg.class_range.values()),
savepath=os.path.join(example_dir, '{}.png'.format(sample_token)))
# Run evaluation.
metrics, metric_data_list = self.evaluate()
# Render PR and TP curves.
if render_curves:
self.render(metrics, metric_data_list)
# Dump the metric data, meta and metrics to disk.
if self.verbose:
print('Saving metrics to: %s' % self.output_dir)
metrics_summary = metrics.serialize()
metrics_summary['meta'] = self.meta.copy()
with open(os.path.join(self.output_dir, 'metrics_summary.json'), 'w') as f:
json.dump(metrics_summary, f, indent=2)
with open(os.path.join(self.output_dir, 'metrics_details.json'), 'w') as f:
json.dump(metric_data_list.serialize(), f, indent=2)
# Print high-level metrics.
print('mAP: %.4f' % (metrics_summary['mean_ap']))
err_name_mapping = {
'trans_err': 'mATE',
'scale_err': 'mASE',
'orient_err': 'mAOE',
'vel_err': 'mAVE',
'attr_err': 'mAAE'
}
for tp_name, tp_val in metrics_summary['tp_errors'].items():
print('%s: %.4f' % (err_name_mapping[tp_name], tp_val))
print('NDS: %.4f' % (metrics_summary['nd_score']))
print('Eval time: %.1fs' % metrics_summary['eval_time'])
return metrics_summary
if __name__ == "__main__":
# Settings.
parser = argparse.ArgumentParser(description='Evaluate nuScenes result submission.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('result_path', type=str, help='The submission as a JSON file.')
parser.add_argument('--output_dir', type=str, default='~/nuscenes-metrics',
help='Folder to store result metrics, graphs and example visualizations.')
parser.add_argument('--eval_set', type=str, default='val',
help='Which dataset split to evaluate on, train, val or test.')
parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes',
help='Default nuScenes data directory.')
parser.add_argument('--version', type=str, default='v1.0-trainval',
help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.')
parser.add_argument('--config_name', type=str, default='cvpr_2019',
help='Name of the configuration to use for evaluation, e.g. cvpr_2019.')
parser.add_argument('--plot_examples', type=int, default=10,
help='How many example visualizations to write to disk.')
parser.add_argument('--render_curves', type=int, default=1,
help='Whether to render PR and TP curves to disk.')
parser.add_argument('--verbose', type=int, default=1,
help='Whether to print to stdout.')
args = parser.parse_args()
result_path_ = os.path.expanduser(args.result_path)
output_dir_ = os.path.expanduser(args.output_dir)
eval_set_ = args.eval_set
dataroot_ = args.dataroot
version_ = args.version
config_name_ = args.config_name
plot_examples_ = args.plot_examples
render_curves_ = bool(args.render_curves)
verbose_ = bool(args.verbose)
cfg_ = config_factory(config_name_)
nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_)
nusc_eval = NuScenesEval(nusc_, config=cfg_, result_path=result_path_, eval_set=eval_set_,
output_dir=output_dir_, verbose=verbose_)
nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_)
|
en
| 0.712255
|
# nuScenes dev-kit. # Code written by <NAME> & <NAME>, 2018. # Licensed under the Creative Commons [see licence.txt] This is the official nuScenes detection evaluation code. Results are written to the provided output_dir. nuScenes uses the following metrics: - Mean Average Precision (mAP): Uses center-distance as matching criterion; averaged over distance thresholds. - True Positive (TP) metrics: Average of translation, velocity, scale, orientation and attribute errors. - nuScenes Detection Score (NDS): The weighted sum of the above. Here is an overview of the functions in this method: - init: Loads GT annotations an predictions stored in JSON format and filters the boxes. - run: Performs evaluation and dumps the metric data to disk. - render: Renders various plots and dumps to disk. We assume that: - Every sample_token is given in the results, although there may be not predictions for that sample. Please see https://github.com/nutonomy/nuscenes-devkit for more details. Initialize a NuScenesEval object. :param nusc: A NuScenes object. :param config: A DetectionConfig object. :param result_path: Path of the nuScenes JSON result file. :param eval_set: The dataset split to evaluate on, e.g. train or val. :param output_dir: Folder to save plots and results to. :param verbose: Whether to print to stdout. # Make dirs. # Load data. # Add center distances. # Filter boxes (distance, points per box, etc.). Performs the actual evaluation. :return: A tuple of high-level and the raw metric data. # ----------------------------------- # Step 1: Accumulate metric data for all classes and distance thresholds. # ----------------------------------- # ----------------------------------- # Step 2: Calculate metrics from the data. # ----------------------------------- Renders various PR and TP curves. :param metrics: DetectionMetrics instance. :param md_list: MetricDataList instance. Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots. :param plot_examples: How many example visualizations to write to disk. :param render_curves: Whether to render PR and TP curves to disk. :return: A dict that stores the high-level metrics and meta data. # Select a random but fixed subset to plot. # Visualize samples. # Don't render test GT. # Run evaluation. # Render PR and TP curves. # Dump the metric data, meta and metrics to disk. # Print high-level metrics. # Settings.
| 2.1697
| 2
|
tests/get_problem_atcoder.py
|
aberent/api-client
| 0
|
6333
|
<reponame>aberent/api-client
import unittest
from onlinejudge_api.main import main
class DownloadAtCoderTest(unittest.TestCase):
def test_icpc2013spring_a(self):
"""This problem contains both words `Input` and `Output` for the headings for sample outputs.
"""
url = 'http://jag2013spring.contest.atcoder.jp/tasks/icpc2013spring_a'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/jag2013spring/tasks/icpc2013spring_a",
"tests": [{
"input": "2 2\n2 \n1 >= 3\n2 <= 5\n2\n1 >= 4\n2 >= 3\n",
"output": "Yes\n"
}, {
"input": "2 2\n2 \n1 >= 5\n2 >= 5\n2\n1 <= 4\n2 <= 3\n",
"output": "Yes\n"
}, {
"input": "2 2\n2 \n1 >= 3\n2 <= 3\n2\n1 <= 2\n2 >= 5\n",
"output": "No\n"
}, {
"input": "1 2\n2\n1 <= 10\n1 >= 15\n",
"output": "No\n"
}, {
"input": "5 5\n3\n2 <= 1\n3 <= 1\n4 <= 1\n4\n2 >= 2\n3 <= 1\n4 <= 1\n5 <= 1\n3\n3 >= 2\n4 <= 1\n5 <= 1\n2\n4 >= 2\n5 <= 1\n1\n5 >= 2 \n",
"output": "Yes\n"
}],
"name": "Everlasting Zero",
"context": {
"contest": {
"name": "Japan Alumni Group Spring Contest 2013",
"url": "https://atcoder.jp/contests/jag2013spring"
},
"alphabet": "A"
},
"memoryLimit": 128,
"timeLimit": 5000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_arc035_a(self):
"""This problem uses <code> tags in the descriptoin text in the sample section.
"""
url = 'http://arc035.contest.atcoder.jp/tasks/arc035_a'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/arc035/tasks/arc035_a",
"tests": [{
"input": "ab*\n",
"output": "YES\n"
}, {
"input": "abc\n",
"output": "NO\n"
}, {
"input": "a*bc*\n",
"output": "YES\n"
}, {
"input": "***\n",
"output": "YES\n"
}],
"name": "\u9ad8\u6a4b\u304f\u3093\u3068\u56de\u6587",
"context": {
"contest": {
"name": "AtCoder Regular Contest 035",
"url": "https://atcoder.jp/contests/arc035"
},
"alphabet": "A"
},
"memoryLimit": 256,
"timeLimit": 2000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_abc114_c(self):
"""This tests a problem which uses a new-style format HTML.
"""
url = 'https://atcoder.jp/contests/abc114/tasks/abc114_c'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/abc114/tasks/abc114_c",
"tests": [{
"input": "575\n",
"output": "4\n"
}, {
"input": "3600\n",
"output": "13\n"
}, {
"input": "999999999\n",
"output": "26484\n"
}],
"name": "755",
"context": {
"contest": {
"name": "AtCoder Beginner Contest 114",
"url": "https://atcoder.jp/contests/abc114"
},
"alphabet": "C"
},
"memoryLimit": 1024,
"timeLimit": 2000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_call_download_atcoder_abc003_4(self):
"""This tests a problem which uses an old-style format HTML.
"""
url = 'https://atcoder.jp/contests/abc003/tasks/abc003_4'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/abc003/tasks/abc003_4",
"tests": [{
"input": "3 2\n2 2\n2 2\n",
"output": "12\n"
}, {
"input": "4 5\n3 1\n3 0\n",
"output": "10\n"
}, {
"input": "23 18\n15 13\n100 95\n",
"output": "364527243\n"
}, {
"input": "30 30\n24 22\n145 132\n",
"output": "976668549\n"
}],
"name": "AtCoder\u793e\u306e\u51ac",
"context": {
"contest": {
"name": "AtCoder Beginner Contest 003",
"url": "https://atcoder.jp/contests/abc003"
},
"alphabet": "D"
},
"memoryLimit": 64,
"timeLimit": 2000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_agc036_b(self):
"""In this problem, a sample output is empty.
"""
url = 'https://atcoder.jp/contests/agc036/tasks/agc036_b'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/agc036/tasks/agc036_b",
"tests": [{
"input": "3 2\n1 2 3\n",
"output": "2 3\n"
}, {
"input": "5 10\n1 2 3 2 3\n",
"output": "3\n"
}, {
"input": "6 1000000000000\n1 1 2 2 3 3\n",
"output": "\n"
}, {
"input": "11 97\n3 1 4 1 5 9 2 6 5 3 5\n",
"output": "9 2 6\n"
}],
"name": "<NAME>",
"context": {
"contest": {
"name": "AtCoder Grand Contest 036",
"url": "https://atcoder.jp/contests/agc036"
},
"alphabet": "B"
},
"memoryLimit": 1024,
"timeLimit": 2000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_tenka1_2014_qualA_e(self):
"""This problem uses an unusual HTML markup.
.. seealso::
https://github.com/kmyk/online-judge-tools/issues/618
"""
url = 'https://atcoder.jp/contests/tenka1-2014-quala/tasks/tenka1_2014_qualA_e'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/tenka1-2014-quala/tasks/tenka1_2014_qualA_e",
"tests": [{
"input": "5 3\nAAB\nABB\nCDE\nFFH\nGHH\n2\n1 1\n2 3\n",
"output": "15\n7\n"
}, {
"input": "2 2\nAB\nBA\n2\n1 1\n2 1\n",
"output": "2\n2\n"
}, {
"input": "5 5\nAABAA\nACDEA\nAFGHA\nAIJKA\nAAAAA\n1\n3 1\n",
"output": "25\n"
}],
"name": "\u30d1\u30ba\u30eb\u306e\u79fb\u52d5",
"context": {
"contest": {
"name": "\u5929\u4e0b\u4e00\u30d7\u30ed\u30b0\u30e9\u30de\u30fc\u30b3\u30f3\u30c6\u30b9\u30c82014\u4e88\u9078A",
"url": "https://atcoder.jp/contests/tenka1-2014-quala"
},
"alphabet": "E"
},
"memoryLimit": 256,
"timeLimit": 5000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_non_existing_problem(self):
"""This tests an non-existing problem.
"""
url = 'http://abc001.contest.atcoder.jp/tasks/abc001_100'
expected = {
"status": "error",
"messages": ["requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://atcoder.jp/contests/abc001/tasks/abc001_100"],
"result": None,
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_impossible_problem(self):
"""This tests a problem impossible to parse sample cases.
"""
url = 'https://chokudai001.contest.atcoder.jp/tasks/chokudai_001_a'
expected = {
"status": "error",
"messages": ["onlinejudge.type.SampleParseError: failed to parse samples"],
"result": None,
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
|
import unittest
from onlinejudge_api.main import main
class DownloadAtCoderTest(unittest.TestCase):
def test_icpc2013spring_a(self):
"""This problem contains both words `Input` and `Output` for the headings for sample outputs.
"""
url = 'http://jag2013spring.contest.atcoder.jp/tasks/icpc2013spring_a'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/jag2013spring/tasks/icpc2013spring_a",
"tests": [{
"input": "2 2\n2 \n1 >= 3\n2 <= 5\n2\n1 >= 4\n2 >= 3\n",
"output": "Yes\n"
}, {
"input": "2 2\n2 \n1 >= 5\n2 >= 5\n2\n1 <= 4\n2 <= 3\n",
"output": "Yes\n"
}, {
"input": "2 2\n2 \n1 >= 3\n2 <= 3\n2\n1 <= 2\n2 >= 5\n",
"output": "No\n"
}, {
"input": "1 2\n2\n1 <= 10\n1 >= 15\n",
"output": "No\n"
}, {
"input": "5 5\n3\n2 <= 1\n3 <= 1\n4 <= 1\n4\n2 >= 2\n3 <= 1\n4 <= 1\n5 <= 1\n3\n3 >= 2\n4 <= 1\n5 <= 1\n2\n4 >= 2\n5 <= 1\n1\n5 >= 2 \n",
"output": "Yes\n"
}],
"name": "Everlasting Zero",
"context": {
"contest": {
"name": "Japan Alumni Group Spring Contest 2013",
"url": "https://atcoder.jp/contests/jag2013spring"
},
"alphabet": "A"
},
"memoryLimit": 128,
"timeLimit": 5000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_arc035_a(self):
"""This problem uses <code> tags in the descriptoin text in the sample section.
"""
url = 'http://arc035.contest.atcoder.jp/tasks/arc035_a'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/arc035/tasks/arc035_a",
"tests": [{
"input": "ab*\n",
"output": "YES\n"
}, {
"input": "abc\n",
"output": "NO\n"
}, {
"input": "a*bc*\n",
"output": "YES\n"
}, {
"input": "***\n",
"output": "YES\n"
}],
"name": "\u9ad8\u6a4b\u304f\u3093\u3068\u56de\u6587",
"context": {
"contest": {
"name": "AtCoder Regular Contest 035",
"url": "https://atcoder.jp/contests/arc035"
},
"alphabet": "A"
},
"memoryLimit": 256,
"timeLimit": 2000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_abc114_c(self):
"""This tests a problem which uses a new-style format HTML.
"""
url = 'https://atcoder.jp/contests/abc114/tasks/abc114_c'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/abc114/tasks/abc114_c",
"tests": [{
"input": "575\n",
"output": "4\n"
}, {
"input": "3600\n",
"output": "13\n"
}, {
"input": "999999999\n",
"output": "26484\n"
}],
"name": "755",
"context": {
"contest": {
"name": "AtCoder Beginner Contest 114",
"url": "https://atcoder.jp/contests/abc114"
},
"alphabet": "C"
},
"memoryLimit": 1024,
"timeLimit": 2000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_call_download_atcoder_abc003_4(self):
"""This tests a problem which uses an old-style format HTML.
"""
url = 'https://atcoder.jp/contests/abc003/tasks/abc003_4'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/abc003/tasks/abc003_4",
"tests": [{
"input": "3 2\n2 2\n2 2\n",
"output": "12\n"
}, {
"input": "4 5\n3 1\n3 0\n",
"output": "10\n"
}, {
"input": "23 18\n15 13\n100 95\n",
"output": "364527243\n"
}, {
"input": "30 30\n24 22\n145 132\n",
"output": "976668549\n"
}],
"name": "AtCoder\u793e\u306e\u51ac",
"context": {
"contest": {
"name": "AtCoder Beginner Contest 003",
"url": "https://atcoder.jp/contests/abc003"
},
"alphabet": "D"
},
"memoryLimit": 64,
"timeLimit": 2000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_agc036_b(self):
"""In this problem, a sample output is empty.
"""
url = 'https://atcoder.jp/contests/agc036/tasks/agc036_b'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/agc036/tasks/agc036_b",
"tests": [{
"input": "3 2\n1 2 3\n",
"output": "2 3\n"
}, {
"input": "5 10\n1 2 3 2 3\n",
"output": "3\n"
}, {
"input": "6 1000000000000\n1 1 2 2 3 3\n",
"output": "\n"
}, {
"input": "11 97\n3 1 4 1 5 9 2 6 5 3 5\n",
"output": "9 2 6\n"
}],
"name": "<NAME>",
"context": {
"contest": {
"name": "AtCoder Grand Contest 036",
"url": "https://atcoder.jp/contests/agc036"
},
"alphabet": "B"
},
"memoryLimit": 1024,
"timeLimit": 2000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_tenka1_2014_qualA_e(self):
"""This problem uses an unusual HTML markup.
.. seealso::
https://github.com/kmyk/online-judge-tools/issues/618
"""
url = 'https://atcoder.jp/contests/tenka1-2014-quala/tasks/tenka1_2014_qualA_e'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://atcoder.jp/contests/tenka1-2014-quala/tasks/tenka1_2014_qualA_e",
"tests": [{
"input": "5 3\nAAB\nABB\nCDE\nFFH\nGHH\n2\n1 1\n2 3\n",
"output": "15\n7\n"
}, {
"input": "2 2\nAB\nBA\n2\n1 1\n2 1\n",
"output": "2\n2\n"
}, {
"input": "5 5\nAABAA\nACDEA\nAFGHA\nAIJKA\nAAAAA\n1\n3 1\n",
"output": "25\n"
}],
"name": "\u30d1\u30ba\u30eb\u306e\u79fb\u52d5",
"context": {
"contest": {
"name": "\u5929\u4e0b\u4e00\u30d7\u30ed\u30b0\u30e9\u30de\u30fc\u30b3\u30f3\u30c6\u30b9\u30c82014\u4e88\u9078A",
"url": "https://atcoder.jp/contests/tenka1-2014-quala"
},
"alphabet": "E"
},
"memoryLimit": 256,
"timeLimit": 5000
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_non_existing_problem(self):
"""This tests an non-existing problem.
"""
url = 'http://abc001.contest.atcoder.jp/tasks/abc001_100'
expected = {
"status": "error",
"messages": ["requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://atcoder.jp/contests/abc001/tasks/abc001_100"],
"result": None,
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_impossible_problem(self):
"""This tests a problem impossible to parse sample cases.
"""
url = 'https://chokudai001.contest.atcoder.jp/tasks/chokudai_001_a'
expected = {
"status": "error",
"messages": ["onlinejudge.type.SampleParseError: failed to parse samples"],
"result": None,
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
|
en
| 0.799499
|
This problem contains both words `Input` and `Output` for the headings for sample outputs. This problem uses <code> tags in the descriptoin text in the sample section. This tests a problem which uses a new-style format HTML. This tests a problem which uses an old-style format HTML. In this problem, a sample output is empty. This problem uses an unusual HTML markup. .. seealso:: https://github.com/kmyk/online-judge-tools/issues/618 This tests an non-existing problem. This tests a problem impossible to parse sample cases.
| 3.215197
| 3
|
odm/libexec/odm_tenant.py
|
UMCollab/ODM
| 2
|
6334
|
<reponame>UMCollab/ODM
#!/usr/bin/env python3
# This file is part of ODM and distributed under the terms of the
# MIT license. See COPYING.
import json
import sys
import odm.cli
def main():
cli = odm.cli.CLI(['action'])
client = cli.client
if cli.args.action == 'list-users':
print(json.dumps(client.list_users(), indent=2))
elif cli.args.action == 'list-sites':
print(json.dumps(client.list_sites(), indent=2))
elif cli.args.action == 'list-groups':
print(json.dumps(client.list_groups(), indent=2))
else:
print('Unsupported action {}'.format(cli.args.action), file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# This file is part of ODM and distributed under the terms of the
# MIT license. See COPYING.
import json
import sys
import odm.cli
def main():
cli = odm.cli.CLI(['action'])
client = cli.client
if cli.args.action == 'list-users':
print(json.dumps(client.list_users(), indent=2))
elif cli.args.action == 'list-sites':
print(json.dumps(client.list_sites(), indent=2))
elif cli.args.action == 'list-groups':
print(json.dumps(client.list_groups(), indent=2))
else:
print('Unsupported action {}'.format(cli.args.action), file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
|
en
| 0.792455
|
#!/usr/bin/env python3 # This file is part of ODM and distributed under the terms of the # MIT license. See COPYING.
| 2.215681
| 2
|
tests/test_tag_value_parser.py
|
quaresmajose/tools-python
| 74
|
6335
|
# Copyright (c) 2014 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from unittest import TestCase
import spdx
from spdx.parsers.tagvalue import Parser
from spdx.parsers.lexers.tagvalue import Lexer
from spdx.parsers.tagvaluebuilders import Builder
from spdx.parsers.loggers import StandardLogger
from spdx.version import Version
class TestLexer(TestCase):
maxDiff = None
def setUp(self):
self.l = Lexer()
self.l.build()
def test_document(self):
data = '''
SPDXVersion: SPDX-2.1
# Comment.
DataLicense: CC0-1.0
DocumentName: Sample_Document-V2.1
SPDXID: SPDXRef-DOCUMENT
DocumentNamespace: https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301
DocumentComment: <text>This is a sample spreadsheet</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'DOC_VERSION', 'SPDXVersion', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDX-2.1', 2)
self.token_assert_helper(self.l.token(), 'DOC_LICENSE', 'DataLicense', 4)
self.token_assert_helper(self.l.token(), 'LINE', 'CC0-1.0', 4)
self.token_assert_helper(self.l.token(), 'DOC_NAME', 'DocumentName', 5)
self.token_assert_helper(self.l.token(), 'LINE', 'Sample_Document-V2.1',
5)
self.token_assert_helper(self.l.token(), 'SPDX_ID', 'SPDXID', 6)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-DOCUMENT', 6)
self.token_assert_helper(self.l.token(), 'DOC_NAMESPACE',
'DocumentNamespace', 7)
self.token_assert_helper(self.l.token(), 'LINE',
'https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301',
7)
self.token_assert_helper(self.l.token(), 'DOC_COMMENT', 'DocumentComment', 8)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>This is a sample spreadsheet</text>', 8)
def test_external_document_references(self):
data = '''
ExternalDocumentRef:DocumentRef-spdx-tool-2.1 http://spdx.org/spdxdocs/spdx-tools-v2.1-3F2504E0-4F89-41D3-9A0C-0305E82C3301 SHA1: d6a770ba38583ed4bb4525bd96e50461655d2759
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'EXT_DOC_REF',
'ExternalDocumentRef', 2)
self.token_assert_helper(self.l.token(), 'DOC_REF_ID',
'DocumentRef-spdx-tool-2.1', 2)
self.token_assert_helper(self.l.token(), 'DOC_URI',
'http://spdx.org/spdxdocs/spdx-tools-v2.1-3F25'
'04E0-4F89-41D3-9A0C-0305E82C3301', 2)
self.token_assert_helper(self.l.token(), 'EXT_DOC_REF_CHKSUM',
'SHA1: '
'd6a770ba38583ed4bb4525bd96e50461655d2759', 2)
def test_creation_info(self):
data = '''
## Creation Information
Creator: Person: <NAME>
Creator: Organization: Source Auditor Inc.
Creator: Tool: SourceAuditor-V1.2
Created: 2010-02-03T00:00:00Z
CreatorComment: <text>This is an example of an SPDX
spreadsheet format</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 3)
self.token_assert_helper(self.l.token(), 'PERSON_VALUE', "Person: <NAME>", 3)
self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 4)
self.token_assert_helper(self.l.token(), 'ORG_VALUE', 'Organization: Source Auditor Inc.', 4)
self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 5)
self.token_assert_helper(self.l.token(), 'TOOL_VALUE', 'Tool: SourceAuditor-V1.2', 5)
self.token_assert_helper(self.l.token(), 'CREATED', 'Created', 6)
self.token_assert_helper(self.l.token(), 'DATE', '2010-02-03T00:00:00Z', 6)
def test_review_info(self):
data = '''
Reviewer: Person: Joe Reviewer
ReviewDate: 2010-02-10T00:00:00Z
ReviewComment: <text>This is just an example.
Some of the non-standard licenses look like they are actually
BSD 3 clause licenses</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'REVIEWER', 'Reviewer', 2)
self.token_assert_helper(self.l.token(), 'PERSON_VALUE', "Person: <NAME>", 2)
self.token_assert_helper(self.l.token(), 'REVIEW_DATE', 'ReviewDate', 3)
self.token_assert_helper(self.l.token(), 'DATE', '2010-02-10T00:00:00Z', 3)
self.token_assert_helper(self.l.token(), 'REVIEW_COMMENT', 'ReviewComment', 4)
self.token_assert_helper(self.l.token(), 'TEXT', '''<text>This is just an example.
Some of the non-standard licenses look like they are actually
BSD 3 clause licenses</text>''', 4)
def test_pacakage(self):
data = '''
SPDXID: SPDXRef-Package
FilesAnalyzed: False
PackageChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12
PackageVerificationCode: 4e3211c67a2d28fced849ee1bb76e7391b93feba (SpdxTranslatorSpdx.rdf, SpdxTranslatorSpdx.txt)
ExternalRef: SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:
ExternalRefComment: <text>Some comment about the package.</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'SPDX_ID', 'SPDXID', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-Package', 2)
self.token_assert_helper(self.l.token(), 'PKG_FILES_ANALYZED', 'FilesAnalyzed', 3)
self.token_assert_helper(self.l.token(), 'LINE', 'False', 3)
self.token_assert_helper(self.l.token(), 'PKG_CHKSUM', 'PackageChecksum', 4)
self.token_assert_helper(self.l.token(), 'CHKSUM', 'SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12', 4)
self.token_assert_helper(self.l.token(), 'PKG_VERF_CODE', 'PackageVerificationCode', 5)
self.token_assert_helper(self.l.token(), 'LINE', '4e3211c67a2d28fced849ee1bb76e7391b93feba (SpdxTranslatorSpdx.rdf, SpdxTranslatorSpdx.txt)', 5)
self.token_assert_helper(self.l.token(), 'PKG_EXT_REF', 'ExternalRef', 6)
self.token_assert_helper(self.l.token(), 'LINE', 'SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:', 6)
self.token_assert_helper(self.l.token(), 'PKG_EXT_REF_COMMENT', 'ExternalRefComment', 7)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some comment about the package.</text>', 7)
def test_unknown_tag(self):
data = '''
SomeUnknownTag: SomeUnknownValue
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'UNKNOWN_TAG', 'SomeUnknownTag', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SomeUnknownValue', 2)
def test_snippet(self):
data = '''
SnippetSPDXID: SPDXRef-Snippet
SnippetLicenseComments: <text>Some lic comment.</text>
SnippetCopyrightText: <text>Some cr text.</text>
SnippetComment: <text>Some snippet comment.</text>
SnippetName: from linux kernel
SnippetFromFileSPDXID: SPDXRef-DoapSource
SnippetLicenseConcluded: Apache-2.0
LicenseInfoInSnippet: Apache-2.0
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'SNIPPET_SPDX_ID', 'SnippetSPDXID', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-Snippet', 2)
self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_COMMENT', 'SnippetLicenseComments', 3)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some lic comment.</text>', 3)
self.token_assert_helper(self.l.token(), 'SNIPPET_CR_TEXT', 'SnippetCopyrightText', 4)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some cr text.</text>', 4)
self.token_assert_helper(self.l.token(), 'SNIPPET_COMMENT', 'SnippetComment', 5)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some snippet comment.</text>', 5)
self.token_assert_helper(self.l.token(), 'SNIPPET_NAME', 'SnippetName', 6)
self.token_assert_helper(self.l.token(), 'LINE', 'from linux kernel', 6)
self.token_assert_helper(self.l.token(), 'SNIPPET_FILE_SPDXID',
'SnippetFromFileSPDXID', 7)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-DoapSource', 7)
self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_CONC',
'SnippetLicenseConcluded', 8)
self.token_assert_helper(self.l.token(), 'LINE', 'Apache-2.0', 8)
self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_INFO',
'LicenseInfoInSnippet', 9)
self.token_assert_helper(self.l.token(), 'LINE', 'Apache-2.0', 9)
def token_assert_helper(self, token, ttype, value, line):
assert token.type == ttype
assert token.value == value
assert token.lineno == line
class TestParser(TestCase):
maxDiff = None
document_str = '\n'.join([
'SPDXVersion: SPDX-2.1',
'DataLicense: CC0-1.0',
'DocumentName: Sample_Document-V2.1',
'SPDXID: SPDXRef-DOCUMENT',
'DocumentComment: <text>Sample Comment</text>',
'DocumentNamespace: https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301'
])
creation_str = '\n'.join([
'Creator: Person: Bob (<EMAIL>)',
'Creator: Organization: Acme.',
'Created: 2010-02-03T00:00:00Z',
'CreatorComment: <text>Sample Comment</text>'
])
review_str = '\n'.join([
'Reviewer: Person: Bob the Reviewer',
'ReviewDate: 2010-02-10T00:00:00Z',
'ReviewComment: <text>Bob was Here.</text>',
'Reviewer: Person: Alice the Reviewer',
'ReviewDate: 2011-02-10T00:00:00Z',
'ReviewComment: <text>Alice was also here.</text>'
])
package_str = '\n'.join([
'PackageName: Test',
'SPDXID: SPDXRef-Package',
'PackageVersion: Version 0.9.2',
'PackageDownloadLocation: http://example.com/test',
'FilesAnalyzed: True',
'PackageSummary: <text>Test package</text>',
'PackageSourceInfo: <text>Version 1.0 of test</text>',
'PackageFileName: test-1.0.zip',
'PackageSupplier: Organization:ACME',
'PackageOriginator: Organization:ACME',
'PackageChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12',
'PackageVerificationCode: 4e3211c67a2d28fced849ee1bb76e7391b93feba (something.rdf, something.txt)',
'PackageDescription: <text>A package.</text>',
'PackageComment: <text>Comment on the package.</text>',
'PackageCopyrightText: <text> Copyright 2014 Acme Inc.</text>',
'PackageLicenseDeclared: Apache-2.0',
'PackageLicenseConcluded: (LicenseRef-2.0 and Apache-2.0)',
'PackageLicenseInfoFromFiles: Apache-1.0',
'PackageLicenseInfoFromFiles: Apache-2.0',
'PackageLicenseComments: <text>License Comments</text>',
'ExternalRef: SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:',
'ExternalRefComment: <text>Some comment about the package.</text>'
])
file_str = '\n'.join([
'FileName: testfile.java',
'SPDXID: SPDXRef-File',
'FileType: SOURCE',
'FileChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12',
'LicenseConcluded: Apache-2.0',
'LicenseInfoInFile: Apache-2.0',
'FileCopyrightText: <text>Copyright 2014 Acme Inc.</text>',
'ArtifactOfProjectName: AcmeTest',
'ArtifactOfProjectHomePage: http://www.acme.org/',
'ArtifactOfProjectURI: http://www.acme.org/',
'FileComment: <text>Very long file</text>'
])
unknown_tag_str = 'SomeUnknownTag: SomeUnknownValue'
snippet_str = '\n'.join([
'SnippetSPDXID: SPDXRef-Snippet',
'SnippetLicenseComments: <text>Some lic comment.</text>',
'SnippetCopyrightText: <text> Copyright 2008-2010 <NAME> </text>',
'SnippetComment: <text>Some snippet comment.</text>',
'SnippetName: from linux kernel',
'SnippetFromFileSPDXID: SPDXRef-DoapSource',
'SnippetLicenseConcluded: Apache-2.0',
'LicenseInfoInSnippet: Apache-2.0',
])
complete_str = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}'.format(document_str, creation_str, review_str, package_str, file_str, snippet_str)
def setUp(self):
self.p = Parser(Builder(), StandardLogger())
self.p.build()
def test_doc(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert document.version == Version(major=2, minor=1)
assert document.data_license.identifier == 'CC0-1.0'
assert document.name == 'Sample_Document-V2.1'
assert document.spdx_id == 'SPDXRef-DOCUMENT'
assert document.comment == 'Sample Comment'
assert document.namespace == 'https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301'
def test_creation_info(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.creation_info.creators) == 2
assert document.creation_info.comment == 'Sample Comment'
assert (document.creation_info.created_iso_format == '2010-02-03T00:00:00Z')
def test_review(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.reviews) == 2
def test_package(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert document.package.name == 'Test'
assert document.package.spdx_id == 'SPDXRef-Package'
assert document.package.version == 'Version 0.9.2'
assert len(document.package.licenses_from_files) == 2
assert (document.package.conc_lics.identifier == 'LicenseRef-2.0 AND Apache-2.0')
assert document.package.files_analyzed == True
assert document.package.comment == 'Comment on the package.'
assert document.package.pkg_ext_refs[-1].category == 'SECURITY'
assert document.package.pkg_ext_refs[-1].pkg_ext_ref_type == 'cpe23Type'
assert document.package.pkg_ext_refs[-1].locator == 'cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:'
assert document.package.pkg_ext_refs[-1].comment == 'Some comment about the package.'
def test_file(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.package.files) == 1
spdx_file = document.package.files[0]
assert spdx_file.name == 'testfile.java'
assert spdx_file.spdx_id == 'SPDXRef-File'
assert spdx_file.type == spdx.file.FileType.SOURCE
assert len(spdx_file.artifact_of_project_name) == 1
assert len(spdx_file.artifact_of_project_home) == 1
assert len(spdx_file.artifact_of_project_uri) == 1
def test_unknown_tag(self):
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
saved_out = sys.stdout
sys.stdout = StringIO()
document, error = self.p.parse(self.unknown_tag_str)
self.assertEqual(sys.stdout.getvalue(), 'Found unknown tag : SomeUnknownTag at line: 1\n')
sys.stdout = saved_out
assert error
assert document is not None
def test_snippet(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.snippet) == 1
assert document.snippet[-1].spdx_id == 'SPDXRef-Snippet'
assert document.snippet[-1].name == 'from linux kernel'
assert document.snippet[-1].comment == 'Some snippet comment.'
assert document.snippet[-1].copyright == ' Copyright 2008-2010 <NAME> '
assert document.snippet[-1].license_comment == 'Some lic comment.'
assert document.snippet[-1].snip_from_file_spdxid == 'SPDXRef-DoapSource'
assert document.snippet[-1].conc_lics.identifier == 'Apache-2.0'
assert document.snippet[-1].licenses_in_snippet[-1].identifier == 'Apache-2.0'
|
# Copyright (c) 2014 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from unittest import TestCase
import spdx
from spdx.parsers.tagvalue import Parser
from spdx.parsers.lexers.tagvalue import Lexer
from spdx.parsers.tagvaluebuilders import Builder
from spdx.parsers.loggers import StandardLogger
from spdx.version import Version
class TestLexer(TestCase):
maxDiff = None
def setUp(self):
self.l = Lexer()
self.l.build()
def test_document(self):
data = '''
SPDXVersion: SPDX-2.1
# Comment.
DataLicense: CC0-1.0
DocumentName: Sample_Document-V2.1
SPDXID: SPDXRef-DOCUMENT
DocumentNamespace: https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301
DocumentComment: <text>This is a sample spreadsheet</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'DOC_VERSION', 'SPDXVersion', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDX-2.1', 2)
self.token_assert_helper(self.l.token(), 'DOC_LICENSE', 'DataLicense', 4)
self.token_assert_helper(self.l.token(), 'LINE', 'CC0-1.0', 4)
self.token_assert_helper(self.l.token(), 'DOC_NAME', 'DocumentName', 5)
self.token_assert_helper(self.l.token(), 'LINE', 'Sample_Document-V2.1',
5)
self.token_assert_helper(self.l.token(), 'SPDX_ID', 'SPDXID', 6)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-DOCUMENT', 6)
self.token_assert_helper(self.l.token(), 'DOC_NAMESPACE',
'DocumentNamespace', 7)
self.token_assert_helper(self.l.token(), 'LINE',
'https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301',
7)
self.token_assert_helper(self.l.token(), 'DOC_COMMENT', 'DocumentComment', 8)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>This is a sample spreadsheet</text>', 8)
def test_external_document_references(self):
data = '''
ExternalDocumentRef:DocumentRef-spdx-tool-2.1 http://spdx.org/spdxdocs/spdx-tools-v2.1-3F2504E0-4F89-41D3-9A0C-0305E82C3301 SHA1: d6a770ba38583ed4bb4525bd96e50461655d2759
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'EXT_DOC_REF',
'ExternalDocumentRef', 2)
self.token_assert_helper(self.l.token(), 'DOC_REF_ID',
'DocumentRef-spdx-tool-2.1', 2)
self.token_assert_helper(self.l.token(), 'DOC_URI',
'http://spdx.org/spdxdocs/spdx-tools-v2.1-3F25'
'04E0-4F89-41D3-9A0C-0305E82C3301', 2)
self.token_assert_helper(self.l.token(), 'EXT_DOC_REF_CHKSUM',
'SHA1: '
'd6a770ba38583ed4bb4525bd96e50461655d2759', 2)
def test_creation_info(self):
data = '''
## Creation Information
Creator: Person: <NAME>
Creator: Organization: Source Auditor Inc.
Creator: Tool: SourceAuditor-V1.2
Created: 2010-02-03T00:00:00Z
CreatorComment: <text>This is an example of an SPDX
spreadsheet format</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 3)
self.token_assert_helper(self.l.token(), 'PERSON_VALUE', "Person: <NAME>", 3)
self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 4)
self.token_assert_helper(self.l.token(), 'ORG_VALUE', 'Organization: Source Auditor Inc.', 4)
self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 5)
self.token_assert_helper(self.l.token(), 'TOOL_VALUE', 'Tool: SourceAuditor-V1.2', 5)
self.token_assert_helper(self.l.token(), 'CREATED', 'Created', 6)
self.token_assert_helper(self.l.token(), 'DATE', '2010-02-03T00:00:00Z', 6)
def test_review_info(self):
data = '''
Reviewer: Person: Joe Reviewer
ReviewDate: 2010-02-10T00:00:00Z
ReviewComment: <text>This is just an example.
Some of the non-standard licenses look like they are actually
BSD 3 clause licenses</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'REVIEWER', 'Reviewer', 2)
self.token_assert_helper(self.l.token(), 'PERSON_VALUE', "Person: <NAME>", 2)
self.token_assert_helper(self.l.token(), 'REVIEW_DATE', 'ReviewDate', 3)
self.token_assert_helper(self.l.token(), 'DATE', '2010-02-10T00:00:00Z', 3)
self.token_assert_helper(self.l.token(), 'REVIEW_COMMENT', 'ReviewComment', 4)
self.token_assert_helper(self.l.token(), 'TEXT', '''<text>This is just an example.
Some of the non-standard licenses look like they are actually
BSD 3 clause licenses</text>''', 4)
def test_pacakage(self):
data = '''
SPDXID: SPDXRef-Package
FilesAnalyzed: False
PackageChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12
PackageVerificationCode: 4e3211c67a2d28fced849ee1bb76e7391b93feba (SpdxTranslatorSpdx.rdf, SpdxTranslatorSpdx.txt)
ExternalRef: SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:
ExternalRefComment: <text>Some comment about the package.</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'SPDX_ID', 'SPDXID', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-Package', 2)
self.token_assert_helper(self.l.token(), 'PKG_FILES_ANALYZED', 'FilesAnalyzed', 3)
self.token_assert_helper(self.l.token(), 'LINE', 'False', 3)
self.token_assert_helper(self.l.token(), 'PKG_CHKSUM', 'PackageChecksum', 4)
self.token_assert_helper(self.l.token(), 'CHKSUM', 'SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12', 4)
self.token_assert_helper(self.l.token(), 'PKG_VERF_CODE', 'PackageVerificationCode', 5)
self.token_assert_helper(self.l.token(), 'LINE', '4e3211c67a2d28fced849ee1bb76e7391b93feba (SpdxTranslatorSpdx.rdf, SpdxTranslatorSpdx.txt)', 5)
self.token_assert_helper(self.l.token(), 'PKG_EXT_REF', 'ExternalRef', 6)
self.token_assert_helper(self.l.token(), 'LINE', 'SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:', 6)
self.token_assert_helper(self.l.token(), 'PKG_EXT_REF_COMMENT', 'ExternalRefComment', 7)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some comment about the package.</text>', 7)
def test_unknown_tag(self):
data = '''
SomeUnknownTag: SomeUnknownValue
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'UNKNOWN_TAG', 'SomeUnknownTag', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SomeUnknownValue', 2)
def test_snippet(self):
data = '''
SnippetSPDXID: SPDXRef-Snippet
SnippetLicenseComments: <text>Some lic comment.</text>
SnippetCopyrightText: <text>Some cr text.</text>
SnippetComment: <text>Some snippet comment.</text>
SnippetName: from linux kernel
SnippetFromFileSPDXID: SPDXRef-DoapSource
SnippetLicenseConcluded: Apache-2.0
LicenseInfoInSnippet: Apache-2.0
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'SNIPPET_SPDX_ID', 'SnippetSPDXID', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-Snippet', 2)
self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_COMMENT', 'SnippetLicenseComments', 3)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some lic comment.</text>', 3)
self.token_assert_helper(self.l.token(), 'SNIPPET_CR_TEXT', 'SnippetCopyrightText', 4)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some cr text.</text>', 4)
self.token_assert_helper(self.l.token(), 'SNIPPET_COMMENT', 'SnippetComment', 5)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some snippet comment.</text>', 5)
self.token_assert_helper(self.l.token(), 'SNIPPET_NAME', 'SnippetName', 6)
self.token_assert_helper(self.l.token(), 'LINE', 'from linux kernel', 6)
self.token_assert_helper(self.l.token(), 'SNIPPET_FILE_SPDXID',
'SnippetFromFileSPDXID', 7)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-DoapSource', 7)
self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_CONC',
'SnippetLicenseConcluded', 8)
self.token_assert_helper(self.l.token(), 'LINE', 'Apache-2.0', 8)
self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_INFO',
'LicenseInfoInSnippet', 9)
self.token_assert_helper(self.l.token(), 'LINE', 'Apache-2.0', 9)
def token_assert_helper(self, token, ttype, value, line):
assert token.type == ttype
assert token.value == value
assert token.lineno == line
class TestParser(TestCase):
maxDiff = None
document_str = '\n'.join([
'SPDXVersion: SPDX-2.1',
'DataLicense: CC0-1.0',
'DocumentName: Sample_Document-V2.1',
'SPDXID: SPDXRef-DOCUMENT',
'DocumentComment: <text>Sample Comment</text>',
'DocumentNamespace: https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301'
])
creation_str = '\n'.join([
'Creator: Person: Bob (<EMAIL>)',
'Creator: Organization: Acme.',
'Created: 2010-02-03T00:00:00Z',
'CreatorComment: <text>Sample Comment</text>'
])
review_str = '\n'.join([
'Reviewer: Person: Bob the Reviewer',
'ReviewDate: 2010-02-10T00:00:00Z',
'ReviewComment: <text>Bob was Here.</text>',
'Reviewer: Person: Alice the Reviewer',
'ReviewDate: 2011-02-10T00:00:00Z',
'ReviewComment: <text>Alice was also here.</text>'
])
package_str = '\n'.join([
'PackageName: Test',
'SPDXID: SPDXRef-Package',
'PackageVersion: Version 0.9.2',
'PackageDownloadLocation: http://example.com/test',
'FilesAnalyzed: True',
'PackageSummary: <text>Test package</text>',
'PackageSourceInfo: <text>Version 1.0 of test</text>',
'PackageFileName: test-1.0.zip',
'PackageSupplier: Organization:ACME',
'PackageOriginator: Organization:ACME',
'PackageChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12',
'PackageVerificationCode: 4e3211c67a2d28fced849ee1bb76e7391b93feba (something.rdf, something.txt)',
'PackageDescription: <text>A package.</text>',
'PackageComment: <text>Comment on the package.</text>',
'PackageCopyrightText: <text> Copyright 2014 Acme Inc.</text>',
'PackageLicenseDeclared: Apache-2.0',
'PackageLicenseConcluded: (LicenseRef-2.0 and Apache-2.0)',
'PackageLicenseInfoFromFiles: Apache-1.0',
'PackageLicenseInfoFromFiles: Apache-2.0',
'PackageLicenseComments: <text>License Comments</text>',
'ExternalRef: SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:',
'ExternalRefComment: <text>Some comment about the package.</text>'
])
file_str = '\n'.join([
'FileName: testfile.java',
'SPDXID: SPDXRef-File',
'FileType: SOURCE',
'FileChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12',
'LicenseConcluded: Apache-2.0',
'LicenseInfoInFile: Apache-2.0',
'FileCopyrightText: <text>Copyright 2014 Acme Inc.</text>',
'ArtifactOfProjectName: AcmeTest',
'ArtifactOfProjectHomePage: http://www.acme.org/',
'ArtifactOfProjectURI: http://www.acme.org/',
'FileComment: <text>Very long file</text>'
])
unknown_tag_str = 'SomeUnknownTag: SomeUnknownValue'
snippet_str = '\n'.join([
'SnippetSPDXID: SPDXRef-Snippet',
'SnippetLicenseComments: <text>Some lic comment.</text>',
'SnippetCopyrightText: <text> Copyright 2008-2010 <NAME> </text>',
'SnippetComment: <text>Some snippet comment.</text>',
'SnippetName: from linux kernel',
'SnippetFromFileSPDXID: SPDXRef-DoapSource',
'SnippetLicenseConcluded: Apache-2.0',
'LicenseInfoInSnippet: Apache-2.0',
])
complete_str = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}'.format(document_str, creation_str, review_str, package_str, file_str, snippet_str)
def setUp(self):
self.p = Parser(Builder(), StandardLogger())
self.p.build()
def test_doc(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert document.version == Version(major=2, minor=1)
assert document.data_license.identifier == 'CC0-1.0'
assert document.name == 'Sample_Document-V2.1'
assert document.spdx_id == 'SPDXRef-DOCUMENT'
assert document.comment == 'Sample Comment'
assert document.namespace == 'https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301'
def test_creation_info(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.creation_info.creators) == 2
assert document.creation_info.comment == 'Sample Comment'
assert (document.creation_info.created_iso_format == '2010-02-03T00:00:00Z')
def test_review(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.reviews) == 2
def test_package(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert document.package.name == 'Test'
assert document.package.spdx_id == 'SPDXRef-Package'
assert document.package.version == 'Version 0.9.2'
assert len(document.package.licenses_from_files) == 2
assert (document.package.conc_lics.identifier == 'LicenseRef-2.0 AND Apache-2.0')
assert document.package.files_analyzed == True
assert document.package.comment == 'Comment on the package.'
assert document.package.pkg_ext_refs[-1].category == 'SECURITY'
assert document.package.pkg_ext_refs[-1].pkg_ext_ref_type == 'cpe23Type'
assert document.package.pkg_ext_refs[-1].locator == 'cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:'
assert document.package.pkg_ext_refs[-1].comment == 'Some comment about the package.'
def test_file(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.package.files) == 1
spdx_file = document.package.files[0]
assert spdx_file.name == 'testfile.java'
assert spdx_file.spdx_id == 'SPDXRef-File'
assert spdx_file.type == spdx.file.FileType.SOURCE
assert len(spdx_file.artifact_of_project_name) == 1
assert len(spdx_file.artifact_of_project_home) == 1
assert len(spdx_file.artifact_of_project_uri) == 1
def test_unknown_tag(self):
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
saved_out = sys.stdout
sys.stdout = StringIO()
document, error = self.p.parse(self.unknown_tag_str)
self.assertEqual(sys.stdout.getvalue(), 'Found unknown tag : SomeUnknownTag at line: 1\n')
sys.stdout = saved_out
assert error
assert document is not None
def test_snippet(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.snippet) == 1
assert document.snippet[-1].spdx_id == 'SPDXRef-Snippet'
assert document.snippet[-1].name == 'from linux kernel'
assert document.snippet[-1].comment == 'Some snippet comment.'
assert document.snippet[-1].copyright == ' Copyright 2008-2010 <NAME> '
assert document.snippet[-1].license_comment == 'Some lic comment.'
assert document.snippet[-1].snip_from_file_spdxid == 'SPDXRef-DoapSource'
assert document.snippet[-1].conc_lics.identifier == 'Apache-2.0'
assert document.snippet[-1].licenses_in_snippet[-1].identifier == 'Apache-2.0'
|
en
| 0.604532
|
# Copyright (c) 2014 <NAME> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. SPDXVersion: SPDX-2.1 # Comment. DataLicense: CC0-1.0 DocumentName: Sample_Document-V2.1 SPDXID: SPDXRef-DOCUMENT DocumentNamespace: https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301 DocumentComment: <text>This is a sample spreadsheet</text> ExternalDocumentRef:DocumentRef-spdx-tool-2.1 http://spdx.org/spdxdocs/spdx-tools-v2.1-3F2504E0-4F89-41D3-9A0C-0305E82C3301 SHA1: d6a770ba38583ed4bb4525bd96e50461655d2759 ## Creation Information Creator: Person: <NAME> Creator: Organization: Source Auditor Inc. Creator: Tool: SourceAuditor-V1.2 Created: 2010-02-03T00:00:00Z CreatorComment: <text>This is an example of an SPDX spreadsheet format</text> Reviewer: Person: Joe Reviewer ReviewDate: 2010-02-10T00:00:00Z ReviewComment: <text>This is just an example. Some of the non-standard licenses look like they are actually BSD 3 clause licenses</text> <text>This is just an example. Some of the non-standard licenses look like they are actually BSD 3 clause licenses</text> SPDXID: SPDXRef-Package FilesAnalyzed: False PackageChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12 PackageVerificationCode: 4e3211c67a2d28fced849ee1bb76e7391b93feba (SpdxTranslatorSpdx.rdf, SpdxTranslatorSpdx.txt) ExternalRef: SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*: ExternalRefComment: <text>Some comment about the package.</text> SomeUnknownTag: SomeUnknownValue SnippetSPDXID: SPDXRef-Snippet SnippetLicenseComments: <text>Some lic comment.</text> SnippetCopyrightText: <text>Some cr text.</text> SnippetComment: <text>Some snippet comment.</text> SnippetName: from linux kernel SnippetFromFileSPDXID: SPDXRef-DoapSource SnippetLicenseConcluded: Apache-2.0 LicenseInfoInSnippet: Apache-2.0
| 2.114914
| 2
|
mount_drives.py
|
DT-was-an-ET/fanshim-python-pwm
| 0
|
6336
|
# Standard library imports
from subprocess import call as subprocess_call
from utility import fileexists
from time import sleep as time_sleep
from datetime import datetime
mount_try = 1
not_yet = True
done = False
start_time = datetime.now()
if fileexists("/home/rpi4-sftp/usb/drive_present.txt"):
when_usba = 0
else:
when_usba = -1
if fileexists("/home/duck-sftp/usb/drive_present.txt"):
when_usbb = 0
else:
when_usbb = -1
if fileexists("/home/pi/mycloud/drive_present.txt"):
when_mycloud = 0
else:
when_mycloud = -1
while (mount_try < 30) and not_yet:
try:
usba_mounted = fileexists("/home/rpi4-sftp/usb/drive_present.txt")
usbb_mounted = fileexists("/home/duck-sftp/usb/drive_present.txt")
mycloud_mounted = fileexists("/home/pi/mycloud/drive_present.txt")
if not(usba_mounted and usbb_mounted and mycloud_mounted):
print("Something Needs mounting this is try number: ", mount_try)
subprocess_call(["sudo", "mount", "-a"])
mount_try += 1
usba_mounted_after = fileexists("/home/rpi4-sftp/usb/drive_present.txt")
usbb_mounted_after = fileexists("/home/duck-sftp/usb/drive_present.txt")
mycloud_mounted_after = fileexists("/home/pi/mycloud/drive_present.txt")
if not(usba_mounted) and usba_mounted_after:
when_usba = round((datetime.now() - start_time).total_seconds(),2)
if not(usbb_mounted) and usbb_mounted_after:
when_usbb = round((datetime.now() - start_time).total_seconds(),2)
if not(mycloud_mounted) and mycloud_mounted_after:
when_mycloud = round((datetime.now() - start_time).total_seconds(),2)
if usba_mounted_after and usbb_mounted_after and mycloud_mounted_after:
print("Success at :",when_usba,when_usbb,when_mycloud, " secs from start")
not_yet = False
done = True
except:
print("Count: ", count," error")
time_sleep(1)
if done:
print("Great!")
else:
print("Failed to do all or drive_present.txt file not present; Times :",when_usba,when_usbb,when_mycloud)
while True:
time_sleep(20000)
|
# Standard library imports
from subprocess import call as subprocess_call
from utility import fileexists
from time import sleep as time_sleep
from datetime import datetime
mount_try = 1
not_yet = True
done = False
start_time = datetime.now()
if fileexists("/home/rpi4-sftp/usb/drive_present.txt"):
when_usba = 0
else:
when_usba = -1
if fileexists("/home/duck-sftp/usb/drive_present.txt"):
when_usbb = 0
else:
when_usbb = -1
if fileexists("/home/pi/mycloud/drive_present.txt"):
when_mycloud = 0
else:
when_mycloud = -1
while (mount_try < 30) and not_yet:
try:
usba_mounted = fileexists("/home/rpi4-sftp/usb/drive_present.txt")
usbb_mounted = fileexists("/home/duck-sftp/usb/drive_present.txt")
mycloud_mounted = fileexists("/home/pi/mycloud/drive_present.txt")
if not(usba_mounted and usbb_mounted and mycloud_mounted):
print("Something Needs mounting this is try number: ", mount_try)
subprocess_call(["sudo", "mount", "-a"])
mount_try += 1
usba_mounted_after = fileexists("/home/rpi4-sftp/usb/drive_present.txt")
usbb_mounted_after = fileexists("/home/duck-sftp/usb/drive_present.txt")
mycloud_mounted_after = fileexists("/home/pi/mycloud/drive_present.txt")
if not(usba_mounted) and usba_mounted_after:
when_usba = round((datetime.now() - start_time).total_seconds(),2)
if not(usbb_mounted) and usbb_mounted_after:
when_usbb = round((datetime.now() - start_time).total_seconds(),2)
if not(mycloud_mounted) and mycloud_mounted_after:
when_mycloud = round((datetime.now() - start_time).total_seconds(),2)
if usba_mounted_after and usbb_mounted_after and mycloud_mounted_after:
print("Success at :",when_usba,when_usbb,when_mycloud, " secs from start")
not_yet = False
done = True
except:
print("Count: ", count," error")
time_sleep(1)
if done:
print("Great!")
else:
print("Failed to do all or drive_present.txt file not present; Times :",when_usba,when_usbb,when_mycloud)
while True:
time_sleep(20000)
|
en
| 0.559237
|
# Standard library imports
| 2.5951
| 3
|
home/views.py
|
Kshitij-Kumar-Singh-Chauhan/docon
| 0
|
6337
|
from django.http.response import HttpResponse
from django.shortcuts import render
from django.shortcuts import redirect, render
from cryptography.fernet import Fernet
from .models import Book, UserDetails
from .models import Contact
from .models import Book
from .models import Report
from .models import Diagnostic
from datetime import datetime
# Create your views here.
def homePage(request):
if(request.method == 'POST'):
email = request.POST.get('email')
password = request.POST.get('password')
try:
object = UserDetails.objects.get(email = email)
key1 = object.key
key1=key1[2:-1]
key1 = bytes(key1,'utf-8')
f = Fernet(key1)
truepassword = <PASSWORD>.password
truepassword = <PASSWORD>[2:-1]
truepassword = bytes(truepassword,'utf-8')
truepassword = f.decrypt(truepassword).decode('utf-8')
except:
object = None
if(object==None):
context = {
'message': "Email Does Not Exist"
}
return render(request,"login.html",context)
elif(password == truepassword):
if object.profession == "PATIENT":
object1=UserDetails.objects.filter(profession="DOCTOR")
# name=(object.name)
# appointment(request,email,name)
context1={
'message':'Welcome '+object.name,
'mail' : object.email,
'doctors':object1
}
return render(request,"index.html",context1)
else:
context2={
'message':'Welcome '+object.name,
'mail' : object.email
}
return render(request,"dindex.html",context2)
else:
return redirect("/")
else:
return render(request,"login.html",{})
def signUpPage(request):
if(request.method == 'POST'):
name = request.POST.get('name')
email = request.POST.get('email')
password = request.POST.get('password')
passwordVerif = request.POST.get('passwordVerif')
profession = request.POST.get('user')
data = request.POST.get('data')
if(email ==''):
context = {
'message': "Please enter Email ID"
}
return render(request,"signup.html",context)
elif(password == <PASSWORD>):
key = Fernet.generate_key()
f = Fernet(key)
password = bytes(password,'<PASSWORD>')
token = f.encrypt(password)
key = str(key)
print(key)
UserDetails.objects.create(email=email, name=name , password=token, key = key, profession=profession, data=data)
return redirect("/")
else:
context = {
'message': "Password doesn't match"
}
return render(request,"signup.html",context)
else:
return render(request,"signup.html",{})
# def index(request):
# context={ 'alpha': 'This is sent'}
# if request.method=='POST':
# pass
# else: return render(request, 'index.html',context)
#HttpResponse('This is homepage')
def about(request):
return render(request, 'about.html')
def services(request):
return render(request, 'services.html')
def contact(request):
if request.method == "POST":
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('phone')
address = request.POST.get('address')
contact = Contact(email=email , name=name, phone=phone,address=address,date=datetime.today())
contact.save()
# messages.success(request, 'Your message has been sent !')
return render(request,"contact.html")
def book(request):
if request.method == "POST":
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('phone')
address = request.POST.get('address')
book = Book(email=email , name=name, phone=phone,problem=address,date=datetime.today())
book.save()
return render(request,"book.html")
def report(request):
if request.method == "POST":
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('phone')
message = request.POST.get('message')
report = Report(email=email , name=name, phone=phone, message=message, date=datetime.today())
report.save()
return render(request,"report.html")
def diag(request):
if request.method == "POST":
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('phone')
tests = request.POST.get('drop1')
tests = str(tests)
if(email ==''):
context = {
'message': "Please enter Email ID"
}
return render(request,"diag.html",context)
else:
diag = Diagnostic(email=email , name=name, phone=phone, tests=tests, date=datetime.today())
diag.save()
# messages.success(request, 'Your message has been sent !')
return render(request,"diag.html")
# def appointment(request,email,name):
# if request.method == "POST":
# problem = request.POST.get('problem')
# book = Appoint(problem=problem, email=email, name=name)
# book.save()
# return render(request,"index.html")
|
from django.http.response import HttpResponse
from django.shortcuts import render
from django.shortcuts import redirect, render
from cryptography.fernet import Fernet
from .models import Book, UserDetails
from .models import Contact
from .models import Book
from .models import Report
from .models import Diagnostic
from datetime import datetime
# Create your views here.
def homePage(request):
if(request.method == 'POST'):
email = request.POST.get('email')
password = request.POST.get('password')
try:
object = UserDetails.objects.get(email = email)
key1 = object.key
key1=key1[2:-1]
key1 = bytes(key1,'utf-8')
f = Fernet(key1)
truepassword = <PASSWORD>.password
truepassword = <PASSWORD>[2:-1]
truepassword = bytes(truepassword,'utf-8')
truepassword = f.decrypt(truepassword).decode('utf-8')
except:
object = None
if(object==None):
context = {
'message': "Email Does Not Exist"
}
return render(request,"login.html",context)
elif(password == truepassword):
if object.profession == "PATIENT":
object1=UserDetails.objects.filter(profession="DOCTOR")
# name=(object.name)
# appointment(request,email,name)
context1={
'message':'Welcome '+object.name,
'mail' : object.email,
'doctors':object1
}
return render(request,"index.html",context1)
else:
context2={
'message':'Welcome '+object.name,
'mail' : object.email
}
return render(request,"dindex.html",context2)
else:
return redirect("/")
else:
return render(request,"login.html",{})
def signUpPage(request):
if(request.method == 'POST'):
name = request.POST.get('name')
email = request.POST.get('email')
password = request.POST.get('password')
passwordVerif = request.POST.get('passwordVerif')
profession = request.POST.get('user')
data = request.POST.get('data')
if(email ==''):
context = {
'message': "Please enter Email ID"
}
return render(request,"signup.html",context)
elif(password == <PASSWORD>):
key = Fernet.generate_key()
f = Fernet(key)
password = bytes(password,'<PASSWORD>')
token = f.encrypt(password)
key = str(key)
print(key)
UserDetails.objects.create(email=email, name=name , password=token, key = key, profession=profession, data=data)
return redirect("/")
else:
context = {
'message': "Password doesn't match"
}
return render(request,"signup.html",context)
else:
return render(request,"signup.html",{})
# def index(request):
# context={ 'alpha': 'This is sent'}
# if request.method=='POST':
# pass
# else: return render(request, 'index.html',context)
#HttpResponse('This is homepage')
def about(request):
return render(request, 'about.html')
def services(request):
return render(request, 'services.html')
def contact(request):
if request.method == "POST":
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('phone')
address = request.POST.get('address')
contact = Contact(email=email , name=name, phone=phone,address=address,date=datetime.today())
contact.save()
# messages.success(request, 'Your message has been sent !')
return render(request,"contact.html")
def book(request):
if request.method == "POST":
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('phone')
address = request.POST.get('address')
book = Book(email=email , name=name, phone=phone,problem=address,date=datetime.today())
book.save()
return render(request,"book.html")
def report(request):
if request.method == "POST":
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('phone')
message = request.POST.get('message')
report = Report(email=email , name=name, phone=phone, message=message, date=datetime.today())
report.save()
return render(request,"report.html")
def diag(request):
if request.method == "POST":
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('phone')
tests = request.POST.get('drop1')
tests = str(tests)
if(email ==''):
context = {
'message': "Please enter Email ID"
}
return render(request,"diag.html",context)
else:
diag = Diagnostic(email=email , name=name, phone=phone, tests=tests, date=datetime.today())
diag.save()
# messages.success(request, 'Your message has been sent !')
return render(request,"diag.html")
# def appointment(request,email,name):
# if request.method == "POST":
# problem = request.POST.get('problem')
# book = Appoint(problem=problem, email=email, name=name)
# book.save()
# return render(request,"index.html")
|
en
| 0.700204
|
# Create your views here. # name=(object.name) # appointment(request,email,name) # def index(request): # context={ 'alpha': 'This is sent'} # if request.method=='POST': # pass # else: return render(request, 'index.html',context) #HttpResponse('This is homepage') # messages.success(request, 'Your message has been sent !') # messages.success(request, 'Your message has been sent !') # def appointment(request,email,name): # if request.method == "POST": # problem = request.POST.get('problem') # book = Appoint(problem=problem, email=email, name=name) # book.save() # return render(request,"index.html")
| 2.195432
| 2
|
hkube_python_wrapper/storage/base_storage_manager.py
|
kube-HPC/python-wrapper.hkube
| 1
|
6338
|
class BaseStorageManager(object):
def __init__(self, adpter):
self.adapter = adpter
def put(self, options):
try:
return self.adapter.put(options)
except Exception:
raise Exception('Failed to write data to storage')
def get(self, options):
try:
data = self.adapter.get(options)
return data
except Exception as e:
raise Exception('Failed to read data from storage' + str(e))
def list(self, options):
try:
return self.adapter.list(options)
except Exception:
raise Exception('Failed to list storage data')
def listPrefix(self, options):
try:
return self.adapter.listPrefix(options)
except Exception:
raise Exception('Failed to listPrefix storage data')
def delete(self, options):
try:
self.adapter.delete(options)
except Exception:
raise Exception('Failed to delete storage data')
|
class BaseStorageManager(object):
def __init__(self, adpter):
self.adapter = adpter
def put(self, options):
try:
return self.adapter.put(options)
except Exception:
raise Exception('Failed to write data to storage')
def get(self, options):
try:
data = self.adapter.get(options)
return data
except Exception as e:
raise Exception('Failed to read data from storage' + str(e))
def list(self, options):
try:
return self.adapter.list(options)
except Exception:
raise Exception('Failed to list storage data')
def listPrefix(self, options):
try:
return self.adapter.listPrefix(options)
except Exception:
raise Exception('Failed to listPrefix storage data')
def delete(self, options):
try:
self.adapter.delete(options)
except Exception:
raise Exception('Failed to delete storage data')
|
none
| 1
| 3.049694
| 3
|
|
compressor/tests/templatetags.py
|
bigmlcom/django_compressor
| 0
|
6339
|
<reponame>bigmlcom/django_compressor
from __future__ import with_statement
import os
import sys
from mock import Mock
from django.template import Template, Context, TemplateSyntaxError
from django.test import TestCase
from compressor.conf import settings
from compressor.signals import post_compress
from compressor.tests.base import css_tag, test_dir
def render(template_string, context_dict=None):
"""
A shortcut for testing template output.
"""
if context_dict is None:
context_dict = {}
c = Context(context_dict)
t = Template(template_string)
return t.render(c).strip()
class TemplatetagTestCase(TestCase):
def setUp(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = True
self.context = {'MEDIA_URL': settings.COMPRESS_URL}
def tearDown(self):
settings.COMPRESS_ENABLED = self.old_enabled
def test_empty_tag(self):
template = u"""{% load compress %}{% compress js %}{% block js %}
{% endblock %}{% endcompress %}"""
self.assertEqual(u'', render(template, self.context))
def test_css_tag(self):
template = u"""{% load compress %}{% compress css %}
<link rel="stylesheet" href="{{ MEDIA_URL }}css/one.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
<link rel="stylesheet" href="{{ MEDIA_URL }}css/two.css" type="text/css">
{% endcompress %}"""
out = css_tag("/media/CACHE/css/e41ba2cc6982.css")
self.assertEqual(out, render(template, self.context))
def test_uppercase_rel(self):
template = u"""{% load compress %}{% compress css %}
<link rel="StyleSheet" href="{{ MEDIA_URL }}css/one.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
<link rel="StyleSheet" href="{{ MEDIA_URL }}css/two.css" type="text/css">
{% endcompress %}"""
out = css_tag("/media/CACHE/css/e41ba2cc6982.css")
self.assertEqual(out, render(template, self.context))
def test_nonascii_css_tag(self):
template = u"""{% load compress %}{% compress css %}
<link rel="stylesheet" href="{{ MEDIA_URL }}css/nonasc.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
{% endcompress %}
"""
out = css_tag("/media/CACHE/css/799f6defe43c.css")
self.assertEqual(out, render(template, self.context))
def test_js_tag(self):
template = u"""{% load compress %}{% compress js %}
<script src="{{ MEDIA_URL }}js/one.js" type="text/javascript"></script>
<script type="text/javascript">obj.value = "value";</script>
{% endcompress %}
"""
out = u'<script type="text/javascript" src="/media/CACHE/js/066cd253eada.js"></script>'
self.assertEqual(out, render(template, self.context))
def test_nonascii_js_tag(self):
template = u"""{% load compress %}{% compress js %}
<script src="{{ MEDIA_URL }}js/nonasc.js" type="text/javascript"></script>
<script type="text/javascript">var test_value = "\u2014";</script>
{% endcompress %}
"""
out = u'<script type="text/javascript" src="/media/CACHE/js/e214fe629b28.js"></script>'
self.assertEqual(out, render(template, self.context))
def test_nonascii_latin1_js_tag(self):
template = u"""{% load compress %}{% compress js %}
<script src="{{ MEDIA_URL }}js/nonasc-latin1.js" type="text/javascript" charset="latin-1"></script>
<script type="text/javascript">var test_value = "\u2014";</script>
{% endcompress %}
"""
out = u'<script type="text/javascript" src="/media/CACHE/js/be9e078b5ca7.js"></script>'
self.assertEqual(out, render(template, self.context))
def test_compress_tag_with_illegal_arguments(self):
template = u"""{% load compress %}{% compress pony %}
<script type="pony/application">unicorn</script>
{% endcompress %}"""
self.assertRaises(TemplateSyntaxError, render, template, {})
def test_debug_toggle(self):
template = u"""{% load compress %}{% compress js %}
<script src="{{ MEDIA_URL }}js/one.js" type="text/javascript"></script>
<script type="text/javascript">obj.value = "value";</script>
{% endcompress %}
"""
class MockDebugRequest(object):
GET = {settings.COMPRESS_DEBUG_TOGGLE: 'true'}
context = dict(self.context, request=MockDebugRequest())
out = u"""<script src="/media/js/one.js" type="text/javascript"></script>
<script type="text/javascript">obj.value = "value";</script>"""
self.assertEqual(out, render(template, context))
def test_named_compress_tag(self):
template = u"""{% load compress %}{% compress js inline foo %}
<script type="text/javascript">obj.value = "value";</script>
{% endcompress %}
"""
def listener(sender, **kwargs):
pass
callback = Mock(wraps=listener)
post_compress.connect(callback)
render(template)
args, kwargs = callback.call_args
context = kwargs['context']
self.assertEqual('foo', context['compressed']['name'])
class PrecompilerTemplatetagTestCase(TestCase):
def setUp(self):
self.old_enabled = settings.COMPRESS_ENABLED
self.old_precompilers = settings.COMPRESS_PRECOMPILERS
precompiler = os.path.join(test_dir, 'precompiler.py')
python = sys.executable
settings.COMPRESS_ENABLED = True
settings.COMPRESS_PRECOMPILERS = (
('text/coffeescript', '%s %s' % (python, precompiler)),
)
self.context = {'MEDIA_URL': settings.COMPRESS_URL}
def tearDown(self):
settings.COMPRESS_ENABLED = self.old_enabled
settings.COMPRESS_PRECOMPILERS = self.old_precompilers
def test_compress_coffeescript_tag(self):
template = u"""{% load compress %}{% compress js %}
<script type="text/coffeescript"># this is a comment.</script>
{% endcompress %}"""
out = script(src="/media/CACHE/js/e920d58f166d.js")
self.assertEqual(out, render(template, self.context))
def test_compress_coffeescript_tag_and_javascript_tag(self):
template = u"""{% load compress %}{% compress js %}
<script type="text/coffeescript"># this is a comment.</script>
<script type="text/javascript"># this too is a comment.</script>
{% endcompress %}"""
out = script(src="/media/CACHE/js/ef6b32a54575.js")
self.assertEqual(out, render(template, self.context))
def test_coffeescript_and_js_tag_with_compress_enabled_equals_false(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = False
try:
template = u"""{% load compress %}{% compress js %}
<script type="text/coffeescript"># this is a comment.</script>
<script type="text/javascript"># this too is a comment.</script>
{% endcompress %}"""
out = (script('# this is a comment.\n') + '\n' +
script('# this too is a comment.'))
self.assertEqual(out, render(template, self.context))
finally:
settings.COMPRESS_ENABLED = self.old_enabled
def test_compress_coffeescript_tag_compress_enabled_is_false(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = False
try:
template = u"""{% load compress %}{% compress js %}
<script type="text/coffeescript"># this is a comment.</script>
{% endcompress %}"""
out = script("# this is a comment.\n")
self.assertEqual(out, render(template, self.context))
finally:
settings.COMPRESS_ENABLED = self.old_enabled
def test_compress_coffeescript_file_tag_compress_enabled_is_false(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = False
try:
template = u"""
{% load compress %}{% compress js %}
<script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.coffee">
</script>
{% endcompress %}"""
out = script(src="/media/CACHE/js/one.95cfb869eead.js")
self.assertEqual(out, render(template, self.context))
finally:
settings.COMPRESS_ENABLED = self.old_enabled
def test_multiple_file_order_conserved(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = False
try:
template = u"""
{% load compress %}{% compress js %}
<script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.coffee">
</script>
<script src="{{ MEDIA_URL }}js/one.js"></script>
<script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.js">
</script>
{% endcompress %}"""
out = '\n'.join([
script(src="/media/CACHE/js/one.95cfb869eead.js"),
script(scripttype="", src="/media/js/one.js"),
script(src="/media/CACHE/js/one.81a2cd965815.js"),])
self.assertEqual(out, render(template, self.context))
finally:
settings.COMPRESS_ENABLED = self.old_enabled
def script(content="", src="", scripttype="text/javascript"):
"""
returns a unicode text html script element.
>>> script('#this is a comment', scripttype="text/applescript")
'<script type="text/applescript">#this is a comment</script>'
"""
out_script = u'<script '
if scripttype:
out_script += u'type="%s" ' % scripttype
if src:
out_script += u'src="%s" ' % src
return out_script[:-1] + u'>%s</script>' % content
|
from __future__ import with_statement
import os
import sys
from mock import Mock
from django.template import Template, Context, TemplateSyntaxError
from django.test import TestCase
from compressor.conf import settings
from compressor.signals import post_compress
from compressor.tests.base import css_tag, test_dir
def render(template_string, context_dict=None):
"""
A shortcut for testing template output.
"""
if context_dict is None:
context_dict = {}
c = Context(context_dict)
t = Template(template_string)
return t.render(c).strip()
class TemplatetagTestCase(TestCase):
def setUp(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = True
self.context = {'MEDIA_URL': settings.COMPRESS_URL}
def tearDown(self):
settings.COMPRESS_ENABLED = self.old_enabled
def test_empty_tag(self):
template = u"""{% load compress %}{% compress js %}{% block js %}
{% endblock %}{% endcompress %}"""
self.assertEqual(u'', render(template, self.context))
def test_css_tag(self):
template = u"""{% load compress %}{% compress css %}
<link rel="stylesheet" href="{{ MEDIA_URL }}css/one.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
<link rel="stylesheet" href="{{ MEDIA_URL }}css/two.css" type="text/css">
{% endcompress %}"""
out = css_tag("/media/CACHE/css/e41ba2cc6982.css")
self.assertEqual(out, render(template, self.context))
def test_uppercase_rel(self):
template = u"""{% load compress %}{% compress css %}
<link rel="StyleSheet" href="{{ MEDIA_URL }}css/one.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
<link rel="StyleSheet" href="{{ MEDIA_URL }}css/two.css" type="text/css">
{% endcompress %}"""
out = css_tag("/media/CACHE/css/e41ba2cc6982.css")
self.assertEqual(out, render(template, self.context))
def test_nonascii_css_tag(self):
template = u"""{% load compress %}{% compress css %}
<link rel="stylesheet" href="{{ MEDIA_URL }}css/nonasc.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
{% endcompress %}
"""
out = css_tag("/media/CACHE/css/799f6defe43c.css")
self.assertEqual(out, render(template, self.context))
def test_js_tag(self):
template = u"""{% load compress %}{% compress js %}
<script src="{{ MEDIA_URL }}js/one.js" type="text/javascript"></script>
<script type="text/javascript">obj.value = "value";</script>
{% endcompress %}
"""
out = u'<script type="text/javascript" src="/media/CACHE/js/066cd253eada.js"></script>'
self.assertEqual(out, render(template, self.context))
def test_nonascii_js_tag(self):
template = u"""{% load compress %}{% compress js %}
<script src="{{ MEDIA_URL }}js/nonasc.js" type="text/javascript"></script>
<script type="text/javascript">var test_value = "\u2014";</script>
{% endcompress %}
"""
out = u'<script type="text/javascript" src="/media/CACHE/js/e214fe629b28.js"></script>'
self.assertEqual(out, render(template, self.context))
def test_nonascii_latin1_js_tag(self):
template = u"""{% load compress %}{% compress js %}
<script src="{{ MEDIA_URL }}js/nonasc-latin1.js" type="text/javascript" charset="latin-1"></script>
<script type="text/javascript">var test_value = "\u2014";</script>
{% endcompress %}
"""
out = u'<script type="text/javascript" src="/media/CACHE/js/be9e078b5ca7.js"></script>'
self.assertEqual(out, render(template, self.context))
def test_compress_tag_with_illegal_arguments(self):
template = u"""{% load compress %}{% compress pony %}
<script type="pony/application">unicorn</script>
{% endcompress %}"""
self.assertRaises(TemplateSyntaxError, render, template, {})
def test_debug_toggle(self):
template = u"""{% load compress %}{% compress js %}
<script src="{{ MEDIA_URL }}js/one.js" type="text/javascript"></script>
<script type="text/javascript">obj.value = "value";</script>
{% endcompress %}
"""
class MockDebugRequest(object):
GET = {settings.COMPRESS_DEBUG_TOGGLE: 'true'}
context = dict(self.context, request=MockDebugRequest())
out = u"""<script src="/media/js/one.js" type="text/javascript"></script>
<script type="text/javascript">obj.value = "value";</script>"""
self.assertEqual(out, render(template, context))
def test_named_compress_tag(self):
template = u"""{% load compress %}{% compress js inline foo %}
<script type="text/javascript">obj.value = "value";</script>
{% endcompress %}
"""
def listener(sender, **kwargs):
pass
callback = Mock(wraps=listener)
post_compress.connect(callback)
render(template)
args, kwargs = callback.call_args
context = kwargs['context']
self.assertEqual('foo', context['compressed']['name'])
class PrecompilerTemplatetagTestCase(TestCase):
def setUp(self):
self.old_enabled = settings.COMPRESS_ENABLED
self.old_precompilers = settings.COMPRESS_PRECOMPILERS
precompiler = os.path.join(test_dir, 'precompiler.py')
python = sys.executable
settings.COMPRESS_ENABLED = True
settings.COMPRESS_PRECOMPILERS = (
('text/coffeescript', '%s %s' % (python, precompiler)),
)
self.context = {'MEDIA_URL': settings.COMPRESS_URL}
def tearDown(self):
settings.COMPRESS_ENABLED = self.old_enabled
settings.COMPRESS_PRECOMPILERS = self.old_precompilers
def test_compress_coffeescript_tag(self):
template = u"""{% load compress %}{% compress js %}
<script type="text/coffeescript"># this is a comment.</script>
{% endcompress %}"""
out = script(src="/media/CACHE/js/e920d58f166d.js")
self.assertEqual(out, render(template, self.context))
def test_compress_coffeescript_tag_and_javascript_tag(self):
template = u"""{% load compress %}{% compress js %}
<script type="text/coffeescript"># this is a comment.</script>
<script type="text/javascript"># this too is a comment.</script>
{% endcompress %}"""
out = script(src="/media/CACHE/js/ef6b32a54575.js")
self.assertEqual(out, render(template, self.context))
def test_coffeescript_and_js_tag_with_compress_enabled_equals_false(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = False
try:
template = u"""{% load compress %}{% compress js %}
<script type="text/coffeescript"># this is a comment.</script>
<script type="text/javascript"># this too is a comment.</script>
{% endcompress %}"""
out = (script('# this is a comment.\n') + '\n' +
script('# this too is a comment.'))
self.assertEqual(out, render(template, self.context))
finally:
settings.COMPRESS_ENABLED = self.old_enabled
def test_compress_coffeescript_tag_compress_enabled_is_false(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = False
try:
template = u"""{% load compress %}{% compress js %}
<script type="text/coffeescript"># this is a comment.</script>
{% endcompress %}"""
out = script("# this is a comment.\n")
self.assertEqual(out, render(template, self.context))
finally:
settings.COMPRESS_ENABLED = self.old_enabled
def test_compress_coffeescript_file_tag_compress_enabled_is_false(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = False
try:
template = u"""
{% load compress %}{% compress js %}
<script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.coffee">
</script>
{% endcompress %}"""
out = script(src="/media/CACHE/js/one.95cfb869eead.js")
self.assertEqual(out, render(template, self.context))
finally:
settings.COMPRESS_ENABLED = self.old_enabled
def test_multiple_file_order_conserved(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = False
try:
template = u"""
{% load compress %}{% compress js %}
<script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.coffee">
</script>
<script src="{{ MEDIA_URL }}js/one.js"></script>
<script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.js">
</script>
{% endcompress %}"""
out = '\n'.join([
script(src="/media/CACHE/js/one.95cfb869eead.js"),
script(scripttype="", src="/media/js/one.js"),
script(src="/media/CACHE/js/one.81a2cd965815.js"),])
self.assertEqual(out, render(template, self.context))
finally:
settings.COMPRESS_ENABLED = self.old_enabled
def script(content="", src="", scripttype="text/javascript"):
"""
returns a unicode text html script element.
>>> script('#this is a comment', scripttype="text/applescript")
'<script type="text/applescript">#this is a comment</script>'
"""
out_script = u'<script '
if scripttype:
out_script += u'type="%s" ' % scripttype
if src:
out_script += u'src="%s" ' % src
return out_script[:-1] + u'>%s</script>' % content
|
en
| 0.226065
|
A shortcut for testing template output. {% load compress %}{% compress js %}{% block js %} {% endblock %}{% endcompress %} {% load compress %}{% compress css %} <link rel="stylesheet" href="{{ MEDIA_URL }}css/one.css" type="text/css"> <style type="text/css">p { border:5px solid green;}</style> <link rel="stylesheet" href="{{ MEDIA_URL }}css/two.css" type="text/css"> {% endcompress %} {% load compress %}{% compress css %} <link rel="StyleSheet" href="{{ MEDIA_URL }}css/one.css" type="text/css"> <style type="text/css">p { border:5px solid green;}</style> <link rel="StyleSheet" href="{{ MEDIA_URL }}css/two.css" type="text/css"> {% endcompress %} {% load compress %}{% compress css %} <link rel="stylesheet" href="{{ MEDIA_URL }}css/nonasc.css" type="text/css"> <style type="text/css">p { border:5px solid green;}</style> {% endcompress %} {% load compress %}{% compress js %} <script src="{{ MEDIA_URL }}js/one.js" type="text/javascript"></script> <script type="text/javascript">obj.value = "value";</script> {% endcompress %} {% load compress %}{% compress js %} <script src="{{ MEDIA_URL }}js/nonasc.js" type="text/javascript"></script> <script type="text/javascript">var test_value = "\u2014";</script> {% endcompress %} {% load compress %}{% compress js %} <script src="{{ MEDIA_URL }}js/nonasc-latin1.js" type="text/javascript" charset="latin-1"></script> <script type="text/javascript">var test_value = "\u2014";</script> {% endcompress %} {% load compress %}{% compress pony %} <script type="pony/application">unicorn</script> {% endcompress %} {% load compress %}{% compress js %} <script src="{{ MEDIA_URL }}js/one.js" type="text/javascript"></script> <script type="text/javascript">obj.value = "value";</script> {% endcompress %} <script src="/media/js/one.js" type="text/javascript"></script> <script type="text/javascript">obj.value = "value";</script> {% load compress %}{% compress js inline foo %} <script type="text/javascript">obj.value = "value";</script> {% endcompress %} {% load compress %}{% compress js %} <script type="text/coffeescript"># this is a comment.</script> {% endcompress %} {% load compress %}{% compress js %} <script type="text/coffeescript"># this is a comment.</script> <script type="text/javascript"># this too is a comment.</script> {% endcompress %} {% load compress %}{% compress js %} <script type="text/coffeescript"># this is a comment.</script> <script type="text/javascript"># this too is a comment.</script> {% endcompress %} {% load compress %}{% compress js %} <script type="text/coffeescript"># this is a comment.</script> {% endcompress %} {% load compress %}{% compress js %} <script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.coffee"> </script> {% endcompress %} {% load compress %}{% compress js %} <script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.coffee"> </script> <script src="{{ MEDIA_URL }}js/one.js"></script> <script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.js"> </script> {% endcompress %} returns a unicode text html script element. >>> script('#this is a comment', scripttype="text/applescript") '<script type="text/applescript">#this is a comment</script>'
| 2.19613
| 2
|
cle/cle/backends/relocations/generic.py
|
Ruide/angr-dev
| 0
|
6340
|
<reponame>Ruide/angr-dev
from ...address_translator import AT
from ...errors import CLEOperationError
from . import Relocation
import struct
import logging
l = logging.getLogger('cle.relocations.generic')
class GenericAbsoluteReloc(Relocation):
@property
def value(self):
return self.resolvedby.rebased_addr
class GenericAbsoluteAddendReloc(Relocation):
@property
def value(self):
return self.resolvedby.rebased_addr + self.addend
class GenericPCRelativeAddendReloc(Relocation):
@property
def value(self):
return self.resolvedby.rebased_addr + self.addend - self.rebased_addr
class GenericJumpslotReloc(Relocation):
@property
def value(self):
if self.is_rela:
return self.resolvedby.rebased_addr + self.addend
else:
return self.resolvedby.rebased_addr
class GenericRelativeReloc(Relocation):
@property
def value(self):
return self.owner_obj.mapped_base + self.addend
def resolve_symbol(self, solist, bypass_compatibility=False):
self.resolve(None)
return True
class GenericCopyReloc(Relocation):
@property
def value(self):
return self.resolvedby.owner_obj.memory.read_addr_at(self.resolvedby.relative_addr)
class MipsGlobalReloc(GenericAbsoluteReloc):
pass
class MipsLocalReloc(Relocation):
def relocate(self, solist, bypass_compatibility=False): # pylint: disable=unused-argument
if self.owner_obj.mapped_base == 0:
self.resolve(None)
return True # don't touch local relocations on the main bin
delta = self.owner_obj.mapped_base - self.owner_obj._dynamic['DT_MIPS_BASE_ADDRESS']
if delta == 0:
self.resolve(None)
return True
val = self.owner_obj.memory.read_addr_at(self.relative_addr)
newval = val + delta
self.owner_obj.memory.write_addr_at(self.relative_addr, newval)
self.resolve(None)
return True
class RelocTruncate32Mixin(object):
"""
A mix-in class for relocations that cover a 32-bit field regardless of the architecture's address word length.
"""
# If True, 32-bit truncated value must equal to its original when zero-extended
check_zero_extend = False
# If True, 32-bit truncated value must equal to its original when sign-extended
check_sign_extend = False
def relocate(self, solist, bypass_compatibility=False): # pylint: disable=unused-argument
if not self.resolve_symbol(solist):
return False
arch_bits = self.owner_obj.arch.bits
assert arch_bits >= 32 # 16-bit makes no sense here
val = self.value % (2**arch_bits) # we must truncate it to native range first
if (self.check_zero_extend and val >> 32 != 0 or
self.check_sign_extend and val >> 32 != ((1 << (arch_bits - 32)) - 1)
if ((val >> 31) & 1) == 1 else 0):
raise CLEOperationError("relocation truncated to fit: %s; consider making"
" relevant addresses fit in the 32-bit address space." % self.__class__.__name__)
by = struct.pack(self.owner_obj.arch.struct_fmt(32), val % (2**32))
self.owner_obj.memory.write_bytes(self.dest_addr, by)
|
from ...address_translator import AT
from ...errors import CLEOperationError
from . import Relocation
import struct
import logging
l = logging.getLogger('cle.relocations.generic')
class GenericAbsoluteReloc(Relocation):
@property
def value(self):
return self.resolvedby.rebased_addr
class GenericAbsoluteAddendReloc(Relocation):
@property
def value(self):
return self.resolvedby.rebased_addr + self.addend
class GenericPCRelativeAddendReloc(Relocation):
@property
def value(self):
return self.resolvedby.rebased_addr + self.addend - self.rebased_addr
class GenericJumpslotReloc(Relocation):
@property
def value(self):
if self.is_rela:
return self.resolvedby.rebased_addr + self.addend
else:
return self.resolvedby.rebased_addr
class GenericRelativeReloc(Relocation):
@property
def value(self):
return self.owner_obj.mapped_base + self.addend
def resolve_symbol(self, solist, bypass_compatibility=False):
self.resolve(None)
return True
class GenericCopyReloc(Relocation):
@property
def value(self):
return self.resolvedby.owner_obj.memory.read_addr_at(self.resolvedby.relative_addr)
class MipsGlobalReloc(GenericAbsoluteReloc):
pass
class MipsLocalReloc(Relocation):
def relocate(self, solist, bypass_compatibility=False): # pylint: disable=unused-argument
if self.owner_obj.mapped_base == 0:
self.resolve(None)
return True # don't touch local relocations on the main bin
delta = self.owner_obj.mapped_base - self.owner_obj._dynamic['DT_MIPS_BASE_ADDRESS']
if delta == 0:
self.resolve(None)
return True
val = self.owner_obj.memory.read_addr_at(self.relative_addr)
newval = val + delta
self.owner_obj.memory.write_addr_at(self.relative_addr, newval)
self.resolve(None)
return True
class RelocTruncate32Mixin(object):
"""
A mix-in class for relocations that cover a 32-bit field regardless of the architecture's address word length.
"""
# If True, 32-bit truncated value must equal to its original when zero-extended
check_zero_extend = False
# If True, 32-bit truncated value must equal to its original when sign-extended
check_sign_extend = False
def relocate(self, solist, bypass_compatibility=False): # pylint: disable=unused-argument
if not self.resolve_symbol(solist):
return False
arch_bits = self.owner_obj.arch.bits
assert arch_bits >= 32 # 16-bit makes no sense here
val = self.value % (2**arch_bits) # we must truncate it to native range first
if (self.check_zero_extend and val >> 32 != 0 or
self.check_sign_extend and val >> 32 != ((1 << (arch_bits - 32)) - 1)
if ((val >> 31) & 1) == 1 else 0):
raise CLEOperationError("relocation truncated to fit: %s; consider making"
" relevant addresses fit in the 32-bit address space." % self.__class__.__name__)
by = struct.pack(self.owner_obj.arch.struct_fmt(32), val % (2**32))
self.owner_obj.memory.write_bytes(self.dest_addr, by)
|
en
| 0.837832
|
# pylint: disable=unused-argument # don't touch local relocations on the main bin A mix-in class for relocations that cover a 32-bit field regardless of the architecture's address word length. # If True, 32-bit truncated value must equal to its original when zero-extended # If True, 32-bit truncated value must equal to its original when sign-extended # pylint: disable=unused-argument # 16-bit makes no sense here # we must truncate it to native range first
| 1.946811
| 2
|
codes/Lib/site-packages/openpyxl/writer/tests/test_style.py
|
charlescayno/automation
| 0
|
6341
|
# Copyright (c) 2010-2014 openpyxl
import pytest
from openpyxl.styles.borders import Border, Side
from openpyxl.styles.fills import GradientFill
from openpyxl.styles.colors import Color
from openpyxl.writer.styles import StyleWriter
from openpyxl.tests.helper import get_xml, compare_xml
class DummyWorkbook:
style_properties = []
def test_write_gradient_fill():
fill = GradientFill(degree=90, stop=[Color(theme=0), Color(theme=4)])
writer = StyleWriter(DummyWorkbook())
writer._write_gradient_fill(writer._root, fill)
xml = get_xml(writer._root)
expected = """<?xml version="1.0" ?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<gradientFill degree="90" type="linear">
<stop position="0">
<color theme="0"/>
</stop>
<stop position="1">
<color theme="4"/>
</stop>
</gradientFill>
</styleSheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_borders():
borders = Border()
writer = StyleWriter(DummyWorkbook())
writer._write_border(writer._root, borders)
xml = get_xml(writer._root)
expected = """<?xml version="1.0"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
</styleSheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
|
# Copyright (c) 2010-2014 openpyxl
import pytest
from openpyxl.styles.borders import Border, Side
from openpyxl.styles.fills import GradientFill
from openpyxl.styles.colors import Color
from openpyxl.writer.styles import StyleWriter
from openpyxl.tests.helper import get_xml, compare_xml
class DummyWorkbook:
style_properties = []
def test_write_gradient_fill():
fill = GradientFill(degree=90, stop=[Color(theme=0), Color(theme=4)])
writer = StyleWriter(DummyWorkbook())
writer._write_gradient_fill(writer._root, fill)
xml = get_xml(writer._root)
expected = """<?xml version="1.0" ?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<gradientFill degree="90" type="linear">
<stop position="0">
<color theme="0"/>
</stop>
<stop position="1">
<color theme="4"/>
</stop>
</gradientFill>
</styleSheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_borders():
borders = Border()
writer = StyleWriter(DummyWorkbook())
writer._write_border(writer._root, borders)
xml = get_xml(writer._root)
expected = """<?xml version="1.0"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
</styleSheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
|
en
| 0.273973
|
# Copyright (c) 2010-2014 openpyxl <?xml version="1.0" ?> <styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"> <gradientFill degree="90" type="linear"> <stop position="0"> <color theme="0"/> </stop> <stop position="1"> <color theme="4"/> </stop> </gradientFill> </styleSheet> <?xml version="1.0"?> <styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"> <border> <left/> <right/> <top/> <bottom/> <diagonal/> </border> </styleSheet>
| 2.421049
| 2
|
ringapp/migrations/0009_auto_20150116_1759.py
|
rschwiebert/RingApp
| 10
|
6342
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ringapp', '0008_auto_20150116_1755'),
]
operations = [
migrations.AlterModelTable(
name='invariance',
table='invariance',
),
migrations.AlterModelTable(
name='invarianttype',
table='invariant_types',
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ringapp', '0008_auto_20150116_1755'),
]
operations = [
migrations.AlterModelTable(
name='invariance',
table='invariance',
),
migrations.AlterModelTable(
name='invarianttype',
table='invariant_types',
),
]
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 1.379405
| 1
|
front-end/testsuite-python-lib/Python-3.1/Lib/json/tests/test_dump.py
|
MalloyPower/parsing-python
| 1
|
6343
|
from unittest import TestCase
from io import StringIO
import json
class TestDump(TestCase):
def test_dump(self):
sio = StringIO()
json.dump({}, sio)
self.assertEquals(sio.getvalue(), '{}')
def test_dumps(self):
self.assertEquals(json.dumps({}), '{}')
def test_encode_truefalse(self):
self.assertEquals(json.dumps(
{True: False, False: True}, sort_keys=True),
'{"false": true, "true": false}')
self.assertEquals(json.dumps(
{2: 3.0, 4.0: 5, False: 1, 6: True}, sort_keys=True),
'{"false": 1, "2": 3.0, "4.0": 5, "6": true}')
|
from unittest import TestCase
from io import StringIO
import json
class TestDump(TestCase):
def test_dump(self):
sio = StringIO()
json.dump({}, sio)
self.assertEquals(sio.getvalue(), '{}')
def test_dumps(self):
self.assertEquals(json.dumps({}), '{}')
def test_encode_truefalse(self):
self.assertEquals(json.dumps(
{True: False, False: True}, sort_keys=True),
'{"false": true, "true": false}')
self.assertEquals(json.dumps(
{2: 3.0, 4.0: 5, False: 1, 6: True}, sort_keys=True),
'{"false": 1, "2": 3.0, "4.0": 5, "6": true}')
|
none
| 1
| 3.025893
| 3
|
|
src/resources/clients/python_client/visitstate.py
|
visit-dav/vis
| 226
|
6344
|
<reponame>visit-dav/vis
import sys
class RPCType(object):
CloseRPC = 0
DetachRPC = 1
AddWindowRPC = 2
DeleteWindowRPC = 3
SetWindowLayoutRPC = 4
SetActiveWindowRPC = 5
ClearWindowRPC = 6
ClearAllWindowsRPC = 7
OpenDatabaseRPC = 8
CloseDatabaseRPC = 9
ActivateDatabaseRPC = 10
CheckForNewStatesRPC = 11
CreateDatabaseCorrelationRPC = 12
AlterDatabaseCorrelationRPC = 13
DeleteDatabaseCorrelationRPC = 14
ReOpenDatabaseRPC = 15
ReplaceDatabaseRPC = 16
OverlayDatabaseRPC = 17
OpenComputeEngineRPC = 18
CloseComputeEngineRPC = 19
AnimationSetNFramesRPC = 20
AnimationPlayRPC = 21
AnimationReversePlayRPC = 22
AnimationStopRPC = 23
TimeSliderNextStateRPC = 24
TimeSliderPreviousStateRPC = 25
SetTimeSliderStateRPC = 26
SetActiveTimeSliderRPC = 27
AddPlotRPC = 28
SetPlotFrameRangeRPC = 29
DeletePlotKeyframeRPC = 30
MovePlotKeyframeRPC = 31
DeleteActivePlotsRPC = 32
HideActivePlotsRPC = 33
DrawPlotsRPC = 34
DisableRedrawRPC = 35
RedrawRPC = 36
SetActivePlotsRPC = 37
ChangeActivePlotsVarRPC = 38
AddOperatorRPC = 39
AddInitializedOperatorRPC = 40
PromoteOperatorRPC = 41
DemoteOperatorRPC = 42
RemoveOperatorRPC = 43
RemoveLastOperatorRPC = 44
RemoveAllOperatorsRPC = 45
SaveWindowRPC = 46
SetDefaultPlotOptionsRPC = 47
SetPlotOptionsRPC = 48
SetDefaultOperatorOptionsRPC = 49
SetOperatorOptionsRPC = 50
WriteConfigFileRPC = 51
ConnectToMetaDataServerRPC = 52
IconifyAllWindowsRPC = 53
DeIconifyAllWindowsRPC = 54
ShowAllWindowsRPC = 55
HideAllWindowsRPC = 56
UpdateColorTableRPC = 57
SetAnnotationAttributesRPC = 58
SetDefaultAnnotationAttributesRPC = 59
ResetAnnotationAttributesRPC = 60
SetKeyframeAttributesRPC = 61
SetPlotSILRestrictionRPC = 62
SetViewAxisArrayRPC = 63
SetViewCurveRPC = 64
SetView2DRPC = 65
SetView3DRPC = 66
ResetPlotOptionsRPC = 67
ResetOperatorOptionsRPC = 68
SetAppearanceRPC = 69
ProcessExpressionsRPC = 70
SetLightListRPC = 71
SetDefaultLightListRPC = 72
ResetLightListRPC = 73
SetAnimationAttributesRPC = 74
SetWindowAreaRPC = 75
PrintWindowRPC = 76
ResetViewRPC = 77
RecenterViewRPC = 78
ToggleAllowPopupRPC = 79
ToggleMaintainViewModeRPC = 80
ToggleBoundingBoxModeRPC = 81
ToggleCameraViewModeRPC = 82
TogglePerspectiveViewRPC = 83
ToggleSpinModeRPC = 84
ToggleLockTimeRPC = 85
ToggleLockToolsRPC = 86
ToggleLockViewModeRPC = 87
ToggleFullFrameRPC = 88
UndoViewRPC = 89
RedoViewRPC = 90
InvertBackgroundRPC = 91
ClearPickPointsRPC = 92
SetWindowModeRPC = 93
EnableToolRPC = 94
SetToolUpdateModeRPC = 95
CopyViewToWindowRPC = 96
CopyLightingToWindowRPC = 97
CopyAnnotationsToWindowRPC = 98
CopyPlotsToWindowRPC = 99
ClearCacheRPC = 100
ClearCacheForAllEnginesRPC = 101
SetViewExtentsTypeRPC = 102
ClearRefLinesRPC = 103
SetRenderingAttributesRPC = 104
QueryRPC = 105
CloneWindowRPC = 106
SetMaterialAttributesRPC = 107
SetDefaultMaterialAttributesRPC = 108
ResetMaterialAttributesRPC = 109
SetPlotDatabaseStateRPC = 110
DeletePlotDatabaseKeyframeRPC = 111
MovePlotDatabaseKeyframeRPC = 112
ClearViewKeyframesRPC = 113
DeleteViewKeyframeRPC = 114
MoveViewKeyframeRPC = 115
SetViewKeyframeRPC = 116
OpenMDServerRPC = 117
EnableToolbarRPC = 118
HideToolbarsRPC = 119
HideToolbarsForAllWindowsRPC = 120
ShowToolbarsRPC = 121
ShowToolbarsForAllWindowsRPC = 122
SetToolbarIconSizeRPC = 123
SaveViewRPC = 124
SetGlobalLineoutAttributesRPC = 125
SetPickAttributesRPC = 126
ExportColorTableRPC = 127
ExportEntireStateRPC = 128
ImportEntireStateRPC = 129
ImportEntireStateWithDifferentSourcesRPC = 130
ResetPickAttributesRPC = 131
AddAnnotationObjectRPC = 132
HideActiveAnnotationObjectsRPC = 133
DeleteActiveAnnotationObjectsRPC = 134
RaiseActiveAnnotationObjectsRPC = 135
LowerActiveAnnotationObjectsRPC = 136
SetAnnotationObjectOptionsRPC = 137
SetDefaultAnnotationObjectListRPC = 138
ResetAnnotationObjectListRPC = 139
ResetPickLetterRPC = 140
SetDefaultPickAttributesRPC = 141
ChooseCenterOfRotationRPC = 142
SetCenterOfRotationRPC = 143
SetQueryOverTimeAttributesRPC = 144
SetDefaultQueryOverTimeAttributesRPC = 145
ResetQueryOverTimeAttributesRPC = 146
ResetLineoutColorRPC = 147
SetInteractorAttributesRPC = 148
SetDefaultInteractorAttributesRPC = 149
ResetInteractorAttributesRPC = 150
GetProcInfoRPC = 151
SendSimulationCommandRPC = 152
UpdateDBPluginInfoRPC = 153
ExportDBRPC = 154
SetTryHarderCyclesTimesRPC = 155
OpenClientRPC = 156
OpenGUIClientRPC = 157
OpenCLIClientRPC = 158
SuppressQueryOutputRPC = 159
SetQueryFloatFormatRPC = 160
SetMeshManagementAttributesRPC = 161
SetDefaultMeshManagementAttributesRPC = 162
ResetMeshManagementAttributesRPC = 163
ResizeWindowRPC = 164
MoveWindowRPC = 165
MoveAndResizeWindowRPC = 166
SetStateLoggingRPC = 167
ConstructDataBinningRPC = 168
RequestMetaDataRPC = 169
SetTreatAllDBsAsTimeVaryingRPC = 170
SetCreateMeshQualityExpressionsRPC = 171
SetCreateTimeDerivativeExpressionsRPC = 172
SetCreateVectorMagnitudeExpressionsRPC = 173
CopyActivePlotsRPC = 174
SetPlotFollowsTimeRPC = 175
TurnOffAllLocksRPC = 176
SetDefaultFileOpenOptionsRPC = 177
SetSuppressMessagesRPC = 178
ApplyNamedSelectionRPC = 179
CreateNamedSelectionRPC = 180
DeleteNamedSelectionRPC = 181
LoadNamedSelectionRPC = 182
SaveNamedSelectionRPC = 183
SetNamedSelectionAutoApplyRPC = 184
UpdateNamedSelectionRPC = 185
InitializeNamedSelectionVariablesRPC = 186
MenuQuitRPC = 187
SetPlotDescriptionRPC = 188
MovePlotOrderTowardFirstRPC = 189
MovePlotOrderTowardLastRPC = 190
SetPlotOrderToFirstRPC = 191
SetPlotOrderToLastRPC = 192
RenamePickLabelRPC = 193
GetQueryParametersRPC = 194
DDTConnectRPC = 195
DDTFocusRPC = 196
ReleaseToDDTRPC = 197
MaxRPC = 198
|
import sys
class RPCType(object):
CloseRPC = 0
DetachRPC = 1
AddWindowRPC = 2
DeleteWindowRPC = 3
SetWindowLayoutRPC = 4
SetActiveWindowRPC = 5
ClearWindowRPC = 6
ClearAllWindowsRPC = 7
OpenDatabaseRPC = 8
CloseDatabaseRPC = 9
ActivateDatabaseRPC = 10
CheckForNewStatesRPC = 11
CreateDatabaseCorrelationRPC = 12
AlterDatabaseCorrelationRPC = 13
DeleteDatabaseCorrelationRPC = 14
ReOpenDatabaseRPC = 15
ReplaceDatabaseRPC = 16
OverlayDatabaseRPC = 17
OpenComputeEngineRPC = 18
CloseComputeEngineRPC = 19
AnimationSetNFramesRPC = 20
AnimationPlayRPC = 21
AnimationReversePlayRPC = 22
AnimationStopRPC = 23
TimeSliderNextStateRPC = 24
TimeSliderPreviousStateRPC = 25
SetTimeSliderStateRPC = 26
SetActiveTimeSliderRPC = 27
AddPlotRPC = 28
SetPlotFrameRangeRPC = 29
DeletePlotKeyframeRPC = 30
MovePlotKeyframeRPC = 31
DeleteActivePlotsRPC = 32
HideActivePlotsRPC = 33
DrawPlotsRPC = 34
DisableRedrawRPC = 35
RedrawRPC = 36
SetActivePlotsRPC = 37
ChangeActivePlotsVarRPC = 38
AddOperatorRPC = 39
AddInitializedOperatorRPC = 40
PromoteOperatorRPC = 41
DemoteOperatorRPC = 42
RemoveOperatorRPC = 43
RemoveLastOperatorRPC = 44
RemoveAllOperatorsRPC = 45
SaveWindowRPC = 46
SetDefaultPlotOptionsRPC = 47
SetPlotOptionsRPC = 48
SetDefaultOperatorOptionsRPC = 49
SetOperatorOptionsRPC = 50
WriteConfigFileRPC = 51
ConnectToMetaDataServerRPC = 52
IconifyAllWindowsRPC = 53
DeIconifyAllWindowsRPC = 54
ShowAllWindowsRPC = 55
HideAllWindowsRPC = 56
UpdateColorTableRPC = 57
SetAnnotationAttributesRPC = 58
SetDefaultAnnotationAttributesRPC = 59
ResetAnnotationAttributesRPC = 60
SetKeyframeAttributesRPC = 61
SetPlotSILRestrictionRPC = 62
SetViewAxisArrayRPC = 63
SetViewCurveRPC = 64
SetView2DRPC = 65
SetView3DRPC = 66
ResetPlotOptionsRPC = 67
ResetOperatorOptionsRPC = 68
SetAppearanceRPC = 69
ProcessExpressionsRPC = 70
SetLightListRPC = 71
SetDefaultLightListRPC = 72
ResetLightListRPC = 73
SetAnimationAttributesRPC = 74
SetWindowAreaRPC = 75
PrintWindowRPC = 76
ResetViewRPC = 77
RecenterViewRPC = 78
ToggleAllowPopupRPC = 79
ToggleMaintainViewModeRPC = 80
ToggleBoundingBoxModeRPC = 81
ToggleCameraViewModeRPC = 82
TogglePerspectiveViewRPC = 83
ToggleSpinModeRPC = 84
ToggleLockTimeRPC = 85
ToggleLockToolsRPC = 86
ToggleLockViewModeRPC = 87
ToggleFullFrameRPC = 88
UndoViewRPC = 89
RedoViewRPC = 90
InvertBackgroundRPC = 91
ClearPickPointsRPC = 92
SetWindowModeRPC = 93
EnableToolRPC = 94
SetToolUpdateModeRPC = 95
CopyViewToWindowRPC = 96
CopyLightingToWindowRPC = 97
CopyAnnotationsToWindowRPC = 98
CopyPlotsToWindowRPC = 99
ClearCacheRPC = 100
ClearCacheForAllEnginesRPC = 101
SetViewExtentsTypeRPC = 102
ClearRefLinesRPC = 103
SetRenderingAttributesRPC = 104
QueryRPC = 105
CloneWindowRPC = 106
SetMaterialAttributesRPC = 107
SetDefaultMaterialAttributesRPC = 108
ResetMaterialAttributesRPC = 109
SetPlotDatabaseStateRPC = 110
DeletePlotDatabaseKeyframeRPC = 111
MovePlotDatabaseKeyframeRPC = 112
ClearViewKeyframesRPC = 113
DeleteViewKeyframeRPC = 114
MoveViewKeyframeRPC = 115
SetViewKeyframeRPC = 116
OpenMDServerRPC = 117
EnableToolbarRPC = 118
HideToolbarsRPC = 119
HideToolbarsForAllWindowsRPC = 120
ShowToolbarsRPC = 121
ShowToolbarsForAllWindowsRPC = 122
SetToolbarIconSizeRPC = 123
SaveViewRPC = 124
SetGlobalLineoutAttributesRPC = 125
SetPickAttributesRPC = 126
ExportColorTableRPC = 127
ExportEntireStateRPC = 128
ImportEntireStateRPC = 129
ImportEntireStateWithDifferentSourcesRPC = 130
ResetPickAttributesRPC = 131
AddAnnotationObjectRPC = 132
HideActiveAnnotationObjectsRPC = 133
DeleteActiveAnnotationObjectsRPC = 134
RaiseActiveAnnotationObjectsRPC = 135
LowerActiveAnnotationObjectsRPC = 136
SetAnnotationObjectOptionsRPC = 137
SetDefaultAnnotationObjectListRPC = 138
ResetAnnotationObjectListRPC = 139
ResetPickLetterRPC = 140
SetDefaultPickAttributesRPC = 141
ChooseCenterOfRotationRPC = 142
SetCenterOfRotationRPC = 143
SetQueryOverTimeAttributesRPC = 144
SetDefaultQueryOverTimeAttributesRPC = 145
ResetQueryOverTimeAttributesRPC = 146
ResetLineoutColorRPC = 147
SetInteractorAttributesRPC = 148
SetDefaultInteractorAttributesRPC = 149
ResetInteractorAttributesRPC = 150
GetProcInfoRPC = 151
SendSimulationCommandRPC = 152
UpdateDBPluginInfoRPC = 153
ExportDBRPC = 154
SetTryHarderCyclesTimesRPC = 155
OpenClientRPC = 156
OpenGUIClientRPC = 157
OpenCLIClientRPC = 158
SuppressQueryOutputRPC = 159
SetQueryFloatFormatRPC = 160
SetMeshManagementAttributesRPC = 161
SetDefaultMeshManagementAttributesRPC = 162
ResetMeshManagementAttributesRPC = 163
ResizeWindowRPC = 164
MoveWindowRPC = 165
MoveAndResizeWindowRPC = 166
SetStateLoggingRPC = 167
ConstructDataBinningRPC = 168
RequestMetaDataRPC = 169
SetTreatAllDBsAsTimeVaryingRPC = 170
SetCreateMeshQualityExpressionsRPC = 171
SetCreateTimeDerivativeExpressionsRPC = 172
SetCreateVectorMagnitudeExpressionsRPC = 173
CopyActivePlotsRPC = 174
SetPlotFollowsTimeRPC = 175
TurnOffAllLocksRPC = 176
SetDefaultFileOpenOptionsRPC = 177
SetSuppressMessagesRPC = 178
ApplyNamedSelectionRPC = 179
CreateNamedSelectionRPC = 180
DeleteNamedSelectionRPC = 181
LoadNamedSelectionRPC = 182
SaveNamedSelectionRPC = 183
SetNamedSelectionAutoApplyRPC = 184
UpdateNamedSelectionRPC = 185
InitializeNamedSelectionVariablesRPC = 186
MenuQuitRPC = 187
SetPlotDescriptionRPC = 188
MovePlotOrderTowardFirstRPC = 189
MovePlotOrderTowardLastRPC = 190
SetPlotOrderToFirstRPC = 191
SetPlotOrderToLastRPC = 192
RenamePickLabelRPC = 193
GetQueryParametersRPC = 194
DDTConnectRPC = 195
DDTFocusRPC = 196
ReleaseToDDTRPC = 197
MaxRPC = 198
|
none
| 1
| 1.673345
| 2
|
|
tests/__init__.py
|
zhangyiming07/QT4C
| 53
|
6345
|
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making QT4C available.
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
# QT4C is licensed under the BSD 3-Clause License, except for the third-party components listed below.
# A copy of the BSD 3-Clause License is included in this file.
#
'''单元测试
'''
import unittest
import os
import sys
test_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(test_dir))
def main():
runner = unittest.TextTestRunner(verbosity=10 + sys.argv.count('-v'))
suite = unittest.TestLoader().discover(test_dir, pattern='test_*.py')
raise SystemExit(not runner.run(suite).wasSuccessful())
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making QT4C available.
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
# QT4C is licensed under the BSD 3-Clause License, except for the third-party components listed below.
# A copy of the BSD 3-Clause License is included in this file.
#
'''单元测试
'''
import unittest
import os
import sys
test_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(test_dir))
def main():
runner = unittest.TextTestRunner(verbosity=10 + sys.argv.count('-v'))
suite = unittest.TestLoader().discover(test_dir, pattern='test_*.py')
raise SystemExit(not runner.run(suite).wasSuccessful())
if __name__ == '__main__':
main()
|
en
| 0.909727
|
# -*- coding: utf-8 -*- # # Tencent is pleased to support the open source community by making QT4C available. # Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. # QT4C is licensed under the BSD 3-Clause License, except for the third-party components listed below. # A copy of the BSD 3-Clause License is included in this file. # 单元测试
| 1.681595
| 2
|
brute/brute_build.py
|
sweetsbeats/starter-snake-python
| 0
|
6346
|
from cffi import FFI
ffibuilder = FFI()
ffibuilder.cdef("""
int test(int t);
""")
ffibuilder.set_source("_pi_cffi",
"""
#include "brute.h"
""",
sources=['brute.c'])
if __name__ == "__main__":
ffibuilder.compile(verbose = True)
|
from cffi import FFI
ffibuilder = FFI()
ffibuilder.cdef("""
int test(int t);
""")
ffibuilder.set_source("_pi_cffi",
"""
#include "brute.h"
""",
sources=['brute.c'])
if __name__ == "__main__":
ffibuilder.compile(verbose = True)
|
uk
| 0.106864
|
int test(int t); #include "brute.h"
| 1.234411
| 1
|
src/board.py
|
JNotelddim/python-snake
| 0
|
6347
|
"""Board Module"""
import copy
from typing import Tuple, List
from src.coordinate import Coordinate
from src.snake import Snake
class Board:
"""Track the cooardinates for all snakes and food in the game."""
def __init__(self, data):
self._data = data
self._snakes = None
self._foods = None
@property
def snakes(self) -> List[Snake]:
"""Retreive the list of snakes from the board data."""
if self._snakes is None:
snakes = [Snake(snake_data) for snake_data in self._data['snakes']]
self._snakes = snakes
return self._snakes
@property
def foods(self) -> List[Coordinate]:
"""Retreive the list of food from the board data."""
if self._foods is None:
self._foods = [Coordinate(food_data) for food_data in self._data['food']]
return self._foods
@property
def width(self) -> int:
"""Get width of the board -- note: it's a square."""
return self._data['width']
def is_coordinate_in_bounds(self, coordinate) -> bool:
"""Check whether or not the Coordinate is within the bounds of the Board."""
is_wall = (coordinate.x == -1 or coordinate.x == self.width
or coordinate.y == -1 or coordinate.y == self.width)
return not is_wall
def get_other_snakes(self, exclude_id) -> List[Snake]:
"""Get the List of Snakes whose IDs don't match the given ID."""
return [snake for snake in self.snakes if snake.id != exclude_id]
def advance_snake_along_path(self, snake_id: str, path: List[Coordinate]):
"""Return a new board with our snake advanced along given path."""
new_board = copy.deepcopy(self)
return new_board.__help_advance_snake_along_path(snake_id, path)
def __help_advance_snake_along_path(self, snake_id: str, path: List[Coordinate]):
"""Do the actual advancement of the snake along the path."""
me = next((snake for snake in self.snakes if snake.id == snake_id), None)
if not me:
raise ValueError("No snake for given id!")
me.coordinates += path
me.coordinates = me.coordinates[len(path):]
me.coordinates.reverse()
me.coordinates.append(me.coordinates[-1])
print("new coords:")
for coord in me.coordinates:
print(coord)
return self
|
"""Board Module"""
import copy
from typing import Tuple, List
from src.coordinate import Coordinate
from src.snake import Snake
class Board:
"""Track the cooardinates for all snakes and food in the game."""
def __init__(self, data):
self._data = data
self._snakes = None
self._foods = None
@property
def snakes(self) -> List[Snake]:
"""Retreive the list of snakes from the board data."""
if self._snakes is None:
snakes = [Snake(snake_data) for snake_data in self._data['snakes']]
self._snakes = snakes
return self._snakes
@property
def foods(self) -> List[Coordinate]:
"""Retreive the list of food from the board data."""
if self._foods is None:
self._foods = [Coordinate(food_data) for food_data in self._data['food']]
return self._foods
@property
def width(self) -> int:
"""Get width of the board -- note: it's a square."""
return self._data['width']
def is_coordinate_in_bounds(self, coordinate) -> bool:
"""Check whether or not the Coordinate is within the bounds of the Board."""
is_wall = (coordinate.x == -1 or coordinate.x == self.width
or coordinate.y == -1 or coordinate.y == self.width)
return not is_wall
def get_other_snakes(self, exclude_id) -> List[Snake]:
"""Get the List of Snakes whose IDs don't match the given ID."""
return [snake for snake in self.snakes if snake.id != exclude_id]
def advance_snake_along_path(self, snake_id: str, path: List[Coordinate]):
"""Return a new board with our snake advanced along given path."""
new_board = copy.deepcopy(self)
return new_board.__help_advance_snake_along_path(snake_id, path)
def __help_advance_snake_along_path(self, snake_id: str, path: List[Coordinate]):
"""Do the actual advancement of the snake along the path."""
me = next((snake for snake in self.snakes if snake.id == snake_id), None)
if not me:
raise ValueError("No snake for given id!")
me.coordinates += path
me.coordinates = me.coordinates[len(path):]
me.coordinates.reverse()
me.coordinates.append(me.coordinates[-1])
print("new coords:")
for coord in me.coordinates:
print(coord)
return self
|
en
| 0.910115
|
Board Module Track the cooardinates for all snakes and food in the game. Retreive the list of snakes from the board data. Retreive the list of food from the board data. Get width of the board -- note: it's a square. Check whether or not the Coordinate is within the bounds of the Board. Get the List of Snakes whose IDs don't match the given ID. Return a new board with our snake advanced along given path. Do the actual advancement of the snake along the path.
| 3.854335
| 4
|
personalized_nlp/datasets/wiki/base.py
|
CLARIN-PL/personalized-nlp
| 0
|
6348
|
<reponame>CLARIN-PL/personalized-nlp
import os
import zipfile
from typing import List
import pandas as pd
import urllib
from personalized_nlp.settings import STORAGE_DIR
from personalized_nlp.utils.data_splitting import split_texts
from personalized_nlp.datasets.datamodule_base import BaseDataModule
class WikiDataModule(BaseDataModule):
def __init__(
self,
split_sizes: List[float] = [0.55, 0.15, 0.15, 0.15],
**kwargs,
):
super().__init__(**kwargs)
self.data_dir = STORAGE_DIR / 'wiki_data'
self.annotation_column = ''
self.word_stats_annotation_column = ''
self.embeddings_path = ''
self.train_split_names = ['present', 'past']
self.val_split_names = ['future1']
self.test_split_names = ['future2']
self.split_sizes = split_sizes
os.makedirs(self.data_dir / 'embeddings', exist_ok=True)
@property
def class_dims(self):
return [2]
@property
def texts_clean(self):
texts = self.data.text.to_list()
texts = [c.replace('NEWLINE_TOKEN', ' ') for c in texts]
return texts
def _remap_column_names(self, df):
mapping = {'rev_id': 'text_id',
'worker_id': 'annotator_id', 'comment': 'text'}
df.columns = [mapping.get(col, col) for col in df.columns]
return df
def prepare_data(self) -> None:
self.data = pd.read_csv(
self.data_dir / (self.annotation_column + '_annotated_comments.tsv'), sep='\t')
self.data = self._remap_column_names(self.data)
self.data['text'] = self.data['text'].str.replace(
'NEWLINE_TOKEN', ' ')
self.annotators = pd.read_csv(
self.data_dir / (self.annotation_column + '_worker_demographics.tsv'), sep='\t')
self.annotators = self._remap_column_names(self.annotators)
self.annotations = pd.read_csv(
self.data_dir / (self.annotation_column + '_annotations.tsv'), sep='\t')
self.annotations = self._remap_column_names(self.annotations)
self._assign_splits()
personal_df = self.annotations_with_data.loc[self.annotations_with_data.split == 'past']
self.compute_annotator_biases(personal_df)
def _assign_splits(self):
self.data = split_texts(self.data, self.split_sizes)
|
import os
import zipfile
from typing import List
import pandas as pd
import urllib
from personalized_nlp.settings import STORAGE_DIR
from personalized_nlp.utils.data_splitting import split_texts
from personalized_nlp.datasets.datamodule_base import BaseDataModule
class WikiDataModule(BaseDataModule):
def __init__(
self,
split_sizes: List[float] = [0.55, 0.15, 0.15, 0.15],
**kwargs,
):
super().__init__(**kwargs)
self.data_dir = STORAGE_DIR / 'wiki_data'
self.annotation_column = ''
self.word_stats_annotation_column = ''
self.embeddings_path = ''
self.train_split_names = ['present', 'past']
self.val_split_names = ['future1']
self.test_split_names = ['future2']
self.split_sizes = split_sizes
os.makedirs(self.data_dir / 'embeddings', exist_ok=True)
@property
def class_dims(self):
return [2]
@property
def texts_clean(self):
texts = self.data.text.to_list()
texts = [c.replace('NEWLINE_TOKEN', ' ') for c in texts]
return texts
def _remap_column_names(self, df):
mapping = {'rev_id': 'text_id',
'worker_id': 'annotator_id', 'comment': 'text'}
df.columns = [mapping.get(col, col) for col in df.columns]
return df
def prepare_data(self) -> None:
self.data = pd.read_csv(
self.data_dir / (self.annotation_column + '_annotated_comments.tsv'), sep='\t')
self.data = self._remap_column_names(self.data)
self.data['text'] = self.data['text'].str.replace(
'NEWLINE_TOKEN', ' ')
self.annotators = pd.read_csv(
self.data_dir / (self.annotation_column + '_worker_demographics.tsv'), sep='\t')
self.annotators = self._remap_column_names(self.annotators)
self.annotations = pd.read_csv(
self.data_dir / (self.annotation_column + '_annotations.tsv'), sep='\t')
self.annotations = self._remap_column_names(self.annotations)
self._assign_splits()
personal_df = self.annotations_with_data.loc[self.annotations_with_data.split == 'past']
self.compute_annotator_biases(personal_df)
def _assign_splits(self):
self.data = split_texts(self.data, self.split_sizes)
|
none
| 1
| 2.695222
| 3
|
|
App/migrations/0010_remove_user_percentage_preferences_user_preferences.py
|
dlanghorne0428/StudioMusicPlayer
| 0
|
6349
|
<filename>App/migrations/0010_remove_user_percentage_preferences_user_preferences.py
# Generated by Django 4.0 on 2022-03-03 02:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0009_alter_song_holiday_alter_songfileinput_holiday'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='percentage_preferences',
),
migrations.AddField(
model_name='user',
name='preferences',
field=models.JSONField(null=True),
),
]
|
<filename>App/migrations/0010_remove_user_percentage_preferences_user_preferences.py
# Generated by Django 4.0 on 2022-03-03 02:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0009_alter_song_holiday_alter_songfileinput_holiday'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='percentage_preferences',
),
migrations.AddField(
model_name='user',
name='preferences',
field=models.JSONField(null=True),
),
]
|
en
| 0.840696
|
# Generated by Django 4.0 on 2022-03-03 02:15
| 1.369389
| 1
|
venv/Lib/site-packages/captcha/conf/settings.py
|
Rudeus3Greyrat/admin-management
| 1
|
6350
|
import os
import warnings
from django.conf import settings
CAPTCHA_FONT_PATH = getattr(settings, 'CAPTCHA_FONT_PATH', os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'fonts/Vera.ttf')))
CAPTCHA_FONT_SIZE = getattr(settings, 'CAPTCHA_FONT_SIZE', 22)
CAPTCHA_LETTER_ROTATION = getattr(settings, 'CAPTCHA_LETTER_ROTATION', (-35, 35))
CAPTCHA_BACKGROUND_COLOR = getattr(settings, 'CAPTCHA_BACKGROUND_COLOR', '#ffffff')
CAPTCHA_FOREGROUND_COLOR = getattr(settings, 'CAPTCHA_FOREGROUND_COLOR', '#001100')
CAPTCHA_CHALLENGE_FUNCT = getattr(settings, 'CAPTCHA_CHALLENGE_FUNCT', 'captcha.helpers.random_char_challenge')
CAPTCHA_NOISE_FUNCTIONS = getattr(settings, 'CAPTCHA_NOISE_FUNCTIONS', ('captcha.helpers.noise_arcs', 'captcha.helpers.noise_dots',))
CAPTCHA_FILTER_FUNCTIONS = getattr(settings, 'CAPTCHA_FILTER_FUNCTIONS', ('captcha.helpers.post_smooth',))
CAPTCHA_WORDS_DICTIONARY = getattr(settings, 'CAPTCHA_WORDS_DICTIONARY', '/usr/share/dict/words')
CAPTCHA_PUNCTUATION = getattr(settings, 'CAPTCHA_PUNCTUATION', '''_"',.;:-''')
CAPTCHA_FLITE_PATH = getattr(settings, 'CAPTCHA_FLITE_PATH', None)
CAPTCHA_SOX_PATH = getattr(settings, 'CAPTCHA_SOX_PATH', None)
CAPTCHA_TIMEOUT = getattr(settings, 'CAPTCHA_TIMEOUT', 5) # Minutes
CAPTCHA_LENGTH = int(getattr(settings, 'CAPTCHA_LENGTH', 4)) # Chars
# CAPTCHA_IMAGE_BEFORE_FIELD = getattr(settings, 'CAPTCHA_IMAGE_BEFORE_FIELD', True)
CAPTCHA_DICTIONARY_MIN_LENGTH = getattr(settings, 'CAPTCHA_DICTIONARY_MIN_LENGTH', 0)
CAPTCHA_DICTIONARY_MAX_LENGTH = getattr(settings, 'CAPTCHA_DICTIONARY_MAX_LENGTH', 99)
CAPTCHA_IMAGE_SIZE = getattr(settings, 'CAPTCHA_IMAGE_SIZE', None)
CAPTCHA_IMAGE_TEMPLATE = getattr(settings, 'CAPTCHA_IMAGE_TEMPLATE', 'captcha/image.html')
CAPTCHA_HIDDEN_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_HIDDEN_FIELD_TEMPLATE', 'captcha/hidden_field.html')
CAPTCHA_TEXT_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_TEXT_FIELD_TEMPLATE', 'captcha/text_field.html')
if getattr(settings, 'CAPTCHA_FIELD_TEMPLATE', None):
msg = ("CAPTCHA_FIELD_TEMPLATE setting is deprecated in favor of widget's template_name.")
warnings.warn(msg, DeprecationWarning)
CAPTCHA_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_FIELD_TEMPLATE', None)
if getattr(settings, 'CAPTCHA_OUTPUT_FORMAT', None):
msg = ("CAPTCHA_OUTPUT_FORMAT setting is deprecated in favor of widget's template_name.")
warnings.warn(msg, DeprecationWarning)
CAPTCHA_OUTPUT_FORMAT = getattr(settings, 'CAPTCHA_OUTPUT_FORMAT', None)
CAPTCHA_MATH_CHALLENGE_OPERATOR = getattr(settings, 'CAPTCHA_MATH_CHALLENGE_OPERATOR', '*')
CAPTCHA_GET_FROM_POOL = getattr(settings, 'CAPTCHA_GET_FROM_POOL', False)
CAPTCHA_GET_FROM_POOL_TIMEOUT = getattr(settings, 'CAPTCHA_GET_FROM_POOL_TIMEOUT', 5)
CAPTCHA_TEST_MODE = getattr(settings, 'CAPTCHA_TEST_MODE', False)
# Failsafe
if CAPTCHA_DICTIONARY_MIN_LENGTH > CAPTCHA_DICTIONARY_MAX_LENGTH:
CAPTCHA_DICTIONARY_MIN_LENGTH, CAPTCHA_DICTIONARY_MAX_LENGTH = CAPTCHA_DICTIONARY_MAX_LENGTH, CAPTCHA_DICTIONARY_MIN_LENGTH
def _callable_from_string(string_or_callable):
if callable(string_or_callable):
return string_or_callable
else:
return getattr(__import__('.'.join(string_or_callable.split('.')[:-1]), {}, {}, ['']), string_or_callable.split('.')[-1])
def get_challenge(generator=None):
return _callable_from_string(generator or CAPTCHA_CHALLENGE_FUNCT)
def noise_functions():
if CAPTCHA_NOISE_FUNCTIONS:
return map(_callable_from_string, CAPTCHA_NOISE_FUNCTIONS)
return []
def filter_functions():
if CAPTCHA_FILTER_FUNCTIONS:
return map(_callable_from_string, CAPTCHA_FILTER_FUNCTIONS)
return []
|
import os
import warnings
from django.conf import settings
CAPTCHA_FONT_PATH = getattr(settings, 'CAPTCHA_FONT_PATH', os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'fonts/Vera.ttf')))
CAPTCHA_FONT_SIZE = getattr(settings, 'CAPTCHA_FONT_SIZE', 22)
CAPTCHA_LETTER_ROTATION = getattr(settings, 'CAPTCHA_LETTER_ROTATION', (-35, 35))
CAPTCHA_BACKGROUND_COLOR = getattr(settings, 'CAPTCHA_BACKGROUND_COLOR', '#ffffff')
CAPTCHA_FOREGROUND_COLOR = getattr(settings, 'CAPTCHA_FOREGROUND_COLOR', '#001100')
CAPTCHA_CHALLENGE_FUNCT = getattr(settings, 'CAPTCHA_CHALLENGE_FUNCT', 'captcha.helpers.random_char_challenge')
CAPTCHA_NOISE_FUNCTIONS = getattr(settings, 'CAPTCHA_NOISE_FUNCTIONS', ('captcha.helpers.noise_arcs', 'captcha.helpers.noise_dots',))
CAPTCHA_FILTER_FUNCTIONS = getattr(settings, 'CAPTCHA_FILTER_FUNCTIONS', ('captcha.helpers.post_smooth',))
CAPTCHA_WORDS_DICTIONARY = getattr(settings, 'CAPTCHA_WORDS_DICTIONARY', '/usr/share/dict/words')
CAPTCHA_PUNCTUATION = getattr(settings, 'CAPTCHA_PUNCTUATION', '''_"',.;:-''')
CAPTCHA_FLITE_PATH = getattr(settings, 'CAPTCHA_FLITE_PATH', None)
CAPTCHA_SOX_PATH = getattr(settings, 'CAPTCHA_SOX_PATH', None)
CAPTCHA_TIMEOUT = getattr(settings, 'CAPTCHA_TIMEOUT', 5) # Minutes
CAPTCHA_LENGTH = int(getattr(settings, 'CAPTCHA_LENGTH', 4)) # Chars
# CAPTCHA_IMAGE_BEFORE_FIELD = getattr(settings, 'CAPTCHA_IMAGE_BEFORE_FIELD', True)
CAPTCHA_DICTIONARY_MIN_LENGTH = getattr(settings, 'CAPTCHA_DICTIONARY_MIN_LENGTH', 0)
CAPTCHA_DICTIONARY_MAX_LENGTH = getattr(settings, 'CAPTCHA_DICTIONARY_MAX_LENGTH', 99)
CAPTCHA_IMAGE_SIZE = getattr(settings, 'CAPTCHA_IMAGE_SIZE', None)
CAPTCHA_IMAGE_TEMPLATE = getattr(settings, 'CAPTCHA_IMAGE_TEMPLATE', 'captcha/image.html')
CAPTCHA_HIDDEN_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_HIDDEN_FIELD_TEMPLATE', 'captcha/hidden_field.html')
CAPTCHA_TEXT_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_TEXT_FIELD_TEMPLATE', 'captcha/text_field.html')
if getattr(settings, 'CAPTCHA_FIELD_TEMPLATE', None):
msg = ("CAPTCHA_FIELD_TEMPLATE setting is deprecated in favor of widget's template_name.")
warnings.warn(msg, DeprecationWarning)
CAPTCHA_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_FIELD_TEMPLATE', None)
if getattr(settings, 'CAPTCHA_OUTPUT_FORMAT', None):
msg = ("CAPTCHA_OUTPUT_FORMAT setting is deprecated in favor of widget's template_name.")
warnings.warn(msg, DeprecationWarning)
CAPTCHA_OUTPUT_FORMAT = getattr(settings, 'CAPTCHA_OUTPUT_FORMAT', None)
CAPTCHA_MATH_CHALLENGE_OPERATOR = getattr(settings, 'CAPTCHA_MATH_CHALLENGE_OPERATOR', '*')
CAPTCHA_GET_FROM_POOL = getattr(settings, 'CAPTCHA_GET_FROM_POOL', False)
CAPTCHA_GET_FROM_POOL_TIMEOUT = getattr(settings, 'CAPTCHA_GET_FROM_POOL_TIMEOUT', 5)
CAPTCHA_TEST_MODE = getattr(settings, 'CAPTCHA_TEST_MODE', False)
# Failsafe
if CAPTCHA_DICTIONARY_MIN_LENGTH > CAPTCHA_DICTIONARY_MAX_LENGTH:
CAPTCHA_DICTIONARY_MIN_LENGTH, CAPTCHA_DICTIONARY_MAX_LENGTH = CAPTCHA_DICTIONARY_MAX_LENGTH, CAPTCHA_DICTIONARY_MIN_LENGTH
def _callable_from_string(string_or_callable):
if callable(string_or_callable):
return string_or_callable
else:
return getattr(__import__('.'.join(string_or_callable.split('.')[:-1]), {}, {}, ['']), string_or_callable.split('.')[-1])
def get_challenge(generator=None):
return _callable_from_string(generator or CAPTCHA_CHALLENGE_FUNCT)
def noise_functions():
if CAPTCHA_NOISE_FUNCTIONS:
return map(_callable_from_string, CAPTCHA_NOISE_FUNCTIONS)
return []
def filter_functions():
if CAPTCHA_FILTER_FUNCTIONS:
return map(_callable_from_string, CAPTCHA_FILTER_FUNCTIONS)
return []
|
en
| 0.401123
|
_"',.;:- # Minutes # Chars # CAPTCHA_IMAGE_BEFORE_FIELD = getattr(settings, 'CAPTCHA_IMAGE_BEFORE_FIELD', True) # Failsafe
| 1.900621
| 2
|
pilbox/test/app_test.py
|
joevandyk/pilbox
| 0
|
6351
|
<filename>pilbox/test/app_test.py<gh_stars>0
from __future__ import absolute_import, division, print_function, \
with_statement
import logging
import os.path
import time
import tornado.escape
import tornado.gen
import tornado.ioloop
from tornado.test.util import unittest
from tornado.testing import AsyncHTTPTestCase, gen_test
import tornado.web
from pilbox.app import PilboxApplication
from pilbox.errors import SignatureError, ClientError, HostError, \
BackgroundError, DimensionsError, FilterError, FormatError, ModeError, \
PositionError, QualityError, UrlError, ImageFormatError, FetchError
from pilbox.signature import sign
from pilbox.test import image_test
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
import cv
except ImportError:
cv = None
logger = logging.getLogger("tornado.application")
class _AppAsyncMixin(object):
def fetch_error(self, code, *args, **kwargs):
response = self.fetch(*args, **kwargs)
self.assertEqual(response.code, code)
self.assertEqual(response.headers.get("Content-Type", None),
"application/json")
return tornado.escape.json_decode(response.body)
def fetch_success(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
self.assertEqual(response.code, 200)
return response
def get_image_resize_cases(self):
cases = image_test.get_image_resize_cases()
m = dict(background="bg", filter="filter", format="fmt",
position="pos", quality="q")
for i, case in enumerate(cases):
path = "/test/data/%s" % os.path.basename(case["source_path"])
cases[i]["source_query_params"] = dict(
url=self.get_url(path),
w=case["width"] or "",
h=case["height"] or "",
mode=case["mode"])
for k in m.keys():
if k in case:
cases[i]["source_query_params"][m.get(k)] = case[k]
if case.get("format") in ["jpeg", "jpg"]:
cases[i]["content_type"] = "image/jpeg"
elif case.get("format") == "png":
cases[i]["content_type"] = "image/png"
elif case.get("format") == "webp":
cases[i]["content_type"] = "image/webp"
else:
cases[i]["content_type"] = None
return cases
class _PilboxTestApplication(PilboxApplication):
def get_handlers(self):
path = os.path.join(os.path.dirname(__file__), "data")
handlers = [(r"/test/data/test-delayed.jpg", _DelayedHandler),
(r"/test/data/(.*)",
tornado.web.StaticFileHandler,
{"path": path})]
handlers.extend(super(_PilboxTestApplication, self).get_handlers())
return handlers
class _DelayedHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.engine
def get(self):
delay = time.time() + float(self.get_argument("delay", 0.0))
yield tornado.gen.Task(
tornado.ioloop.IOLoop.instance().add_timeout, delay)
self.finish()
class AppTest(AsyncHTTPTestCase, _AppAsyncMixin):
def get_app(self):
return _PilboxTestApplication()
def test_missing_url(self):
qs = urlencode(dict(w=1, h=1))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), UrlError.get_code())
def test_missing_dimensions(self):
qs = urlencode(dict(url="http://foo.co/x.jpg"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), DimensionsError.get_code())
def test_invalid_width(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w="a", h=1))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), DimensionsError.get_code())
def test_invalid_height(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h="a"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), DimensionsError.get_code())
def test_invalid_mode(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, mode="foo"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ModeError.get_code())
def test_invalid_hexadecimal_background(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1,
mode="fill", bg="r"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), BackgroundError.get_code())
def test_invalid_long_background(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1,
mode="fill", bg="0f0f0f0f0"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), BackgroundError.get_code())
def test_invalid_position(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, pos="foo"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), PositionError.get_code())
def test_invalid_filter(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, filter="bar"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FilterError.get_code())
def test_invalid_format(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, fmt="foo"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FormatError.get_code())
def test_invalid_integer_quality(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, q="a"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), QualityError.get_code())
def test_outofbounds_quality(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, q=200))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), QualityError.get_code())
def test_unsupported_image_format(self):
path = "/test/data/test-bad-format.gif"
qs = urlencode(dict(url=self.get_url(path), w=1, h=1))
resp = self.fetch_error(415, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ImageFormatError.get_code())
def test_not_found(self):
path = "/test/data/test-not-found.jpg"
qs = urlencode(dict(url=self.get_url(path), w=1, h=1))
resp = self.fetch_error(404, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FetchError.get_code())
def test_not_connect(self):
qs = urlencode(dict(url="http://a.com/a.jpg", w=1, h=1))
resp = self.fetch_error(404, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FetchError.get_code())
def test_invalid_protocol(self):
path = os.path.join(os.path.dirname(__file__), "data", "test1.jpg")
qs = urlencode(dict(url="file://%s" % path, w=1, h=1))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), UrlError.get_code())
def test_valid(self):
cases = self.get_image_resize_cases()
for case in cases:
if case.get("mode") == "crop" and case.get("position") == "face":
continue
self._assert_expected_resize(case)
@unittest.skipIf(cv is None, "OpenCV is not installed")
def test_valid_face(self):
cases = self.get_image_resize_cases()
for case in cases:
if case.get("mode") == "crop" and case.get("position") == "face":
self._assert_expected_resize(case)
def _assert_expected_resize(self, case):
qs = urlencode(case["source_query_params"])
resp = self.fetch_success("/?%s" % qs)
msg = "/?%s does not match %s" \
% (qs, case["expected_path"])
if case["content_type"]:
self.assertEqual(resp.headers.get("Content-Type", None),
case["content_type"])
with open(case["expected_path"], "rb") as expected:
self.assertEqual(resp.buffer.read(), expected.read(), msg)
class AppRestrictedTest(AsyncHTTPTestCase, _AppAsyncMixin):
KEY = "abcdef"
NAME = "abc"
def get_app(self):
return _PilboxTestApplication(
client_name=self.NAME,
client_key=self.KEY,
allowed_hosts=["foo.co", "bar.io", "localhost"])
def test_missing_client_name(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1)
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ClientError.get_code())
def test_bad_client_name(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1, client="123")
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ClientError.get_code())
def test_missing_signature(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1, client=self.NAME)
qs = urlencode(params)
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), SignatureError.get_code())
def test_bad_signature(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1,
client=self.NAME, sig="abc123")
qs = urlencode(params)
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), SignatureError.get_code())
def test_bad_host(self):
params = dict(url="http://bar.co/x.jpg", w=1, h=1, client=self.NAME)
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), HostError.get_code())
def test_valid(self):
cases = self.get_image_resize_cases()
for case in cases:
if case.get("mode") == "crop" and case.get("position") == "face":
continue
params = case["source_query_params"]
params["client"] = self.NAME
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_success("/?%s" % qs)
msg = "/?%s does not match %s" \
% (qs, case["expected_path"])
with open(case["expected_path"], "rb") as expected:
self.assertEqual(resp.buffer.read(), expected.read(), msg)
class AppSlowTest(AsyncHTTPTestCase, _AppAsyncMixin):
def get_app(self):
return _PilboxTestApplication(timeout=0.5)
def test_timeout(self):
url = self.get_url("/test/data/test-delayed.jpg?delay=1.0")
qs = urlencode(dict(url=url, w=1, h=1))
resp = self.fetch_error(404, "/?%s" %qs)
self.assertEqual(resp.get("error_code"), FetchError.get_code())
|
<filename>pilbox/test/app_test.py<gh_stars>0
from __future__ import absolute_import, division, print_function, \
with_statement
import logging
import os.path
import time
import tornado.escape
import tornado.gen
import tornado.ioloop
from tornado.test.util import unittest
from tornado.testing import AsyncHTTPTestCase, gen_test
import tornado.web
from pilbox.app import PilboxApplication
from pilbox.errors import SignatureError, ClientError, HostError, \
BackgroundError, DimensionsError, FilterError, FormatError, ModeError, \
PositionError, QualityError, UrlError, ImageFormatError, FetchError
from pilbox.signature import sign
from pilbox.test import image_test
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
import cv
except ImportError:
cv = None
logger = logging.getLogger("tornado.application")
class _AppAsyncMixin(object):
def fetch_error(self, code, *args, **kwargs):
response = self.fetch(*args, **kwargs)
self.assertEqual(response.code, code)
self.assertEqual(response.headers.get("Content-Type", None),
"application/json")
return tornado.escape.json_decode(response.body)
def fetch_success(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
self.assertEqual(response.code, 200)
return response
def get_image_resize_cases(self):
cases = image_test.get_image_resize_cases()
m = dict(background="bg", filter="filter", format="fmt",
position="pos", quality="q")
for i, case in enumerate(cases):
path = "/test/data/%s" % os.path.basename(case["source_path"])
cases[i]["source_query_params"] = dict(
url=self.get_url(path),
w=case["width"] or "",
h=case["height"] or "",
mode=case["mode"])
for k in m.keys():
if k in case:
cases[i]["source_query_params"][m.get(k)] = case[k]
if case.get("format") in ["jpeg", "jpg"]:
cases[i]["content_type"] = "image/jpeg"
elif case.get("format") == "png":
cases[i]["content_type"] = "image/png"
elif case.get("format") == "webp":
cases[i]["content_type"] = "image/webp"
else:
cases[i]["content_type"] = None
return cases
class _PilboxTestApplication(PilboxApplication):
def get_handlers(self):
path = os.path.join(os.path.dirname(__file__), "data")
handlers = [(r"/test/data/test-delayed.jpg", _DelayedHandler),
(r"/test/data/(.*)",
tornado.web.StaticFileHandler,
{"path": path})]
handlers.extend(super(_PilboxTestApplication, self).get_handlers())
return handlers
class _DelayedHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.engine
def get(self):
delay = time.time() + float(self.get_argument("delay", 0.0))
yield tornado.gen.Task(
tornado.ioloop.IOLoop.instance().add_timeout, delay)
self.finish()
class AppTest(AsyncHTTPTestCase, _AppAsyncMixin):
def get_app(self):
return _PilboxTestApplication()
def test_missing_url(self):
qs = urlencode(dict(w=1, h=1))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), UrlError.get_code())
def test_missing_dimensions(self):
qs = urlencode(dict(url="http://foo.co/x.jpg"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), DimensionsError.get_code())
def test_invalid_width(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w="a", h=1))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), DimensionsError.get_code())
def test_invalid_height(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h="a"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), DimensionsError.get_code())
def test_invalid_mode(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, mode="foo"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ModeError.get_code())
def test_invalid_hexadecimal_background(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1,
mode="fill", bg="r"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), BackgroundError.get_code())
def test_invalid_long_background(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1,
mode="fill", bg="0f0f0f0f0"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), BackgroundError.get_code())
def test_invalid_position(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, pos="foo"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), PositionError.get_code())
def test_invalid_filter(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, filter="bar"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FilterError.get_code())
def test_invalid_format(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, fmt="foo"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FormatError.get_code())
def test_invalid_integer_quality(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, q="a"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), QualityError.get_code())
def test_outofbounds_quality(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, q=200))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), QualityError.get_code())
def test_unsupported_image_format(self):
path = "/test/data/test-bad-format.gif"
qs = urlencode(dict(url=self.get_url(path), w=1, h=1))
resp = self.fetch_error(415, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ImageFormatError.get_code())
def test_not_found(self):
path = "/test/data/test-not-found.jpg"
qs = urlencode(dict(url=self.get_url(path), w=1, h=1))
resp = self.fetch_error(404, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FetchError.get_code())
def test_not_connect(self):
qs = urlencode(dict(url="http://a.com/a.jpg", w=1, h=1))
resp = self.fetch_error(404, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FetchError.get_code())
def test_invalid_protocol(self):
path = os.path.join(os.path.dirname(__file__), "data", "test1.jpg")
qs = urlencode(dict(url="file://%s" % path, w=1, h=1))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), UrlError.get_code())
def test_valid(self):
cases = self.get_image_resize_cases()
for case in cases:
if case.get("mode") == "crop" and case.get("position") == "face":
continue
self._assert_expected_resize(case)
@unittest.skipIf(cv is None, "OpenCV is not installed")
def test_valid_face(self):
cases = self.get_image_resize_cases()
for case in cases:
if case.get("mode") == "crop" and case.get("position") == "face":
self._assert_expected_resize(case)
def _assert_expected_resize(self, case):
qs = urlencode(case["source_query_params"])
resp = self.fetch_success("/?%s" % qs)
msg = "/?%s does not match %s" \
% (qs, case["expected_path"])
if case["content_type"]:
self.assertEqual(resp.headers.get("Content-Type", None),
case["content_type"])
with open(case["expected_path"], "rb") as expected:
self.assertEqual(resp.buffer.read(), expected.read(), msg)
class AppRestrictedTest(AsyncHTTPTestCase, _AppAsyncMixin):
KEY = "abcdef"
NAME = "abc"
def get_app(self):
return _PilboxTestApplication(
client_name=self.NAME,
client_key=self.KEY,
allowed_hosts=["foo.co", "bar.io", "localhost"])
def test_missing_client_name(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1)
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ClientError.get_code())
def test_bad_client_name(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1, client="123")
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ClientError.get_code())
def test_missing_signature(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1, client=self.NAME)
qs = urlencode(params)
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), SignatureError.get_code())
def test_bad_signature(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1,
client=self.NAME, sig="abc123")
qs = urlencode(params)
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), SignatureError.get_code())
def test_bad_host(self):
params = dict(url="http://bar.co/x.jpg", w=1, h=1, client=self.NAME)
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), HostError.get_code())
def test_valid(self):
cases = self.get_image_resize_cases()
for case in cases:
if case.get("mode") == "crop" and case.get("position") == "face":
continue
params = case["source_query_params"]
params["client"] = self.NAME
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_success("/?%s" % qs)
msg = "/?%s does not match %s" \
% (qs, case["expected_path"])
with open(case["expected_path"], "rb") as expected:
self.assertEqual(resp.buffer.read(), expected.read(), msg)
class AppSlowTest(AsyncHTTPTestCase, _AppAsyncMixin):
def get_app(self):
return _PilboxTestApplication(timeout=0.5)
def test_timeout(self):
url = self.get_url("/test/data/test-delayed.jpg?delay=1.0")
qs = urlencode(dict(url=url, w=1, h=1))
resp = self.fetch_error(404, "/?%s" %qs)
self.assertEqual(resp.get("error_code"), FetchError.get_code())
|
none
| 1
| 2.208522
| 2
|
|
hackathon/darkmattertemperaturedistribution/example.py
|
Neelraj21/phython
| 6
|
6352
|
<filename>hackathon/darkmattertemperaturedistribution/example.py<gh_stars>1-10
#!/usr/bin/env python
from scipy import *
from pylab import *
#from pylab import imshow
#!
#! Some graphical explorations of the Julia sets with python and pyreport
#!#########################################################################
#$
#$ We start by defining a function J:
#$ \[ J_c : z \rightarrow z^2 + c \]
#$
def J(c):
return lambda z : z**2 + c
[x,y] = ogrid[ -1:1:0.002, -1:1:0.002 ]
z = x + y *1j
#! If we study the divergence of function J under repeated iteration
#! depending on its inital conditions we get a very pretty graph
threshTime = zeros_like(z)
for i in range(40):
z = J(0.285)(z)
threshTime += z*conj(z) > 4
figure(0)
axes([0,0,1,1])
axis('off')
imshow(threshTime)
bone()
show()
#! We can also do that systematicaly for other values of c:
axes([0,0,1,1])
axis('off')
rcParams.update({'figure.figsize': [10.5,5]})
c_values = (0.285 + 0.013j, 0.45 - 0.1428j, -0.70176 -0.3842j,
-0.835-0.2321j, -0.939 +0.167j, -0.986+0.87j)
for i,c in enumerate(c_values):
threshTime = zeros_like(z)
z = x + y *1j
for n in range(40):
z = J(c)(z)
threshTime += z*conj(z) > 4
subplot(2,3,i+1)
imshow(threshTime)
axis('off')
show()
|
<filename>hackathon/darkmattertemperaturedistribution/example.py<gh_stars>1-10
#!/usr/bin/env python
from scipy import *
from pylab import *
#from pylab import imshow
#!
#! Some graphical explorations of the Julia sets with python and pyreport
#!#########################################################################
#$
#$ We start by defining a function J:
#$ \[ J_c : z \rightarrow z^2 + c \]
#$
def J(c):
return lambda z : z**2 + c
[x,y] = ogrid[ -1:1:0.002, -1:1:0.002 ]
z = x + y *1j
#! If we study the divergence of function J under repeated iteration
#! depending on its inital conditions we get a very pretty graph
threshTime = zeros_like(z)
for i in range(40):
z = J(0.285)(z)
threshTime += z*conj(z) > 4
figure(0)
axes([0,0,1,1])
axis('off')
imshow(threshTime)
bone()
show()
#! We can also do that systematicaly for other values of c:
axes([0,0,1,1])
axis('off')
rcParams.update({'figure.figsize': [10.5,5]})
c_values = (0.285 + 0.013j, 0.45 - 0.1428j, -0.70176 -0.3842j,
-0.835-0.2321j, -0.939 +0.167j, -0.986+0.87j)
for i,c in enumerate(c_values):
threshTime = zeros_like(z)
z = x + y *1j
for n in range(40):
z = J(c)(z)
threshTime += z*conj(z) > 4
subplot(2,3,i+1)
imshow(threshTime)
axis('off')
show()
|
en
| 0.449252
|
#!/usr/bin/env python #from pylab import imshow #! #! Some graphical explorations of the Julia sets with python and pyreport #!######################################################################### #$ #$ We start by defining a function J: #$ \[ J_c : z \rightarrow z^2 + c \] #$ #! If we study the divergence of function J under repeated iteration #! depending on its inital conditions we get a very pretty graph #! We can also do that systematicaly for other values of c:
| 2.668376
| 3
|
resources/migrations/0126_add_field_disallow_overlapping_reservations_per_user.py
|
codepointtku/respa
| 1
|
6353
|
# Generated by Django 2.2.21 on 2021-06-23 12:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('resources', '0125_add_timmi_payload_model'),
]
operations = [
migrations.AddField(
model_name='unit',
name='disallow_overlapping_reservations_per_user',
field=models.BooleanField(default=False, verbose_name='Disallow overlapping reservations in this unit per user.'),
),
]
|
# Generated by Django 2.2.21 on 2021-06-23 12:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('resources', '0125_add_timmi_payload_model'),
]
operations = [
migrations.AddField(
model_name='unit',
name='disallow_overlapping_reservations_per_user',
field=models.BooleanField(default=False, verbose_name='Disallow overlapping reservations in this unit per user.'),
),
]
|
en
| 0.814878
|
# Generated by Django 2.2.21 on 2021-06-23 12:43
| 1.507241
| 2
|
src/lora_multihop/module_config.py
|
marv1913/lora_multihop
| 0
|
6354
|
import logging
from lora_multihop import serial_connection, variables
def config_module(configuration=variables.MODULE_CONFIG):
if serial_connection.execute_command(configuration, [variables.STATUS_OK]):
serial_connection.execute_command('AT+SEND=1', [variables.STATUS_OK])
serial_connection.execute_command('a', ['AT,SENDING', 'AT,SENDED'])
logging.debug('module config successfully set')
return True
logging.warning("could not set module config")
return False
def set_address(address):
cmd = f'AT+ADDR={address}'
if serial_connection.execute_command(serial_connection.str_to_bytes(cmd), [variables.STATUS_OK]):
logging.debug(f'module address successfully set to: {address}')
return True
logging.warning("could not set module address")
return False
def get_current_address():
serial_connection.execute_command(serial_connection.str_to_bytes(variables.GET_ADDR))
addr = serial_connection.response_q.get(variables.COMMAND_VERIFICATION_TIMEOUT)
addr = serial_connection.bytes_to_str(addr)
addr_as_list = addr.split(variables.LORA_MODULE_DELIMITER)
if addr_as_list[0].strip() != 'AT' or addr_as_list[2].strip() != 'OK':
raise ValueError('could not get address of module')
return addr_as_list[1]
|
import logging
from lora_multihop import serial_connection, variables
def config_module(configuration=variables.MODULE_CONFIG):
if serial_connection.execute_command(configuration, [variables.STATUS_OK]):
serial_connection.execute_command('AT+SEND=1', [variables.STATUS_OK])
serial_connection.execute_command('a', ['AT,SENDING', 'AT,SENDED'])
logging.debug('module config successfully set')
return True
logging.warning("could not set module config")
return False
def set_address(address):
cmd = f'AT+ADDR={address}'
if serial_connection.execute_command(serial_connection.str_to_bytes(cmd), [variables.STATUS_OK]):
logging.debug(f'module address successfully set to: {address}')
return True
logging.warning("could not set module address")
return False
def get_current_address():
serial_connection.execute_command(serial_connection.str_to_bytes(variables.GET_ADDR))
addr = serial_connection.response_q.get(variables.COMMAND_VERIFICATION_TIMEOUT)
addr = serial_connection.bytes_to_str(addr)
addr_as_list = addr.split(variables.LORA_MODULE_DELIMITER)
if addr_as_list[0].strip() != 'AT' or addr_as_list[2].strip() != 'OK':
raise ValueError('could not get address of module')
return addr_as_list[1]
|
none
| 1
| 2.348034
| 2
|
|
eris/script/ferdian.py
|
ferdianap/Eris_test
| 1
|
6355
|
<reponame>ferdianap/Eris_test
#!/usr/bin/env python
# Copyright (c) 2013-2014, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
copied from
Baxter RSDK Joint Position Example: file playback
"""
from __future__ import print_function
import sys
import rospy
import baxter_interface
from baxter_interface import CHECK_VERSION
import glob
from std_srvs.srv import Empty
def try_float(x):
try:
return float(x)
except ValueError:
return None
def clean_line(line, names):
"""
Cleans a single line of recorded joint positions
@param line: the line described in a list to process
@param names: joint name keys
"""
#convert the line of strings to a float or None
line = [try_float(x) for x in line.rstrip().split(',')]
#zip the values with the joint names
combined = zip(names[1:], line[1:])
#take out any tuples that have a none value
cleaned = [x for x in combined if x[1] is not None]
#convert it to a dictionary with only valid commands
command = dict(cleaned)
left_command = dict((key, command[key]) for key in command.keys()
if key[:-2] == 'left_')
right_command = dict((key, command[key]) for key in command.keys()
if key[:-2] == 'right_')
return (command, left_command, right_command, line)
def map_file(filename, loops=1):
"""
Loops through csv file
@param filename: the file to play
@param loops: number of times to loop
values < 0 mean 'infinite'
Does not loop indefinitely, but only until the file is read
and processed. Reads each line, split up in columns and
formats each line into a controller command in the form of
name/value pairs. Names come from the column headers
first column is the time stamp
"""
left = baxter_interface.Limb('left')
right = baxter_interface.Limb('right')
grip_left = baxter_interface.Gripper('left', CHECK_VERSION)
grip_right = baxter_interface.Gripper('right', CHECK_VERSION)
rate = rospy.Rate(1000)
if grip_left.error():
grip_left.reset()
if grip_right.error():
grip_right.reset()
if (not grip_left.calibrated() and
grip_left.type() != 'custom'):
grip_left.calibrate()
if (not grip_right.calibrated() and
grip_right.type() != 'custom'):
grip_right.calibrate()
print("Playing back: %s" % (filename,))
with open(filename, 'r') as f:
lines = f.readlines()
keys = lines[0].rstrip().split(',')
l = 0
# If specified, repeat the file playback 'loops' number of times
while loops < 1 or l < loops:
i = 0
l += 1
print("Moving to start position...")
_cmd, lcmd_start, rcmd_start, _raw = clean_line(lines[1], keys)
left.move_to_joint_positions(lcmd_start)
right.move_to_joint_positions(rcmd_start)
start_time = rospy.get_time()
for values in lines[1:]:
i += 1
loopstr = str(loops) if loops > 0 else "forever"
sys.stdout.write("\r Record %d of %d, loop %d of %s" %
(i, len(lines) - 1, l, loopstr))
sys.stdout.flush()
cmd, lcmd, rcmd, values = clean_line(values, keys)
#command this set of commands until the next frame
while (rospy.get_time() - start_time) < values[0]:
if rospy.is_shutdown():
print("\n Aborting - ROS shutdown")
return False
if len(lcmd):
left.set_joint_positions(lcmd)
if len(rcmd):
right.set_joint_positions(rcmd)
if ('left_gripper' in cmd and
grip_left.type() != 'custom'):
grip_left.command_position(cmd['left_gripper'])
if ('right_gripper' in cmd and
grip_right.type() != 'custom'):
grip_right.command_position(cmd['right_gripper'])
rate.sleep()
print
return True
def main():
dir = '/home/ros-baxter/sequence1/'
fam = 'no'
ext = '.rec'
#fname = fam+'*'+ext
#fam_list = glob.glob(ext)
#print(fam_list)
rospy.init_node("ferdian_file_playback")
client = rospy.ServiceProxy("ferdian_example_service",Empty)
rs = baxter_interface.RobotEnable(CHECK_VERSION)
rs.enable()
rospy.loginfo("waiting for service")
rospy.wait_for_service("ferdian_example_service")
rospy.loginfo("service available")
#put your loop here
for file in sorted(glob.glob('./sequence1/*.rec')):
map_file(file)
rospy.loginfo("sending signal...") # to the image processing node
#for x in range(0, 3):
# map_file("AtoE.rec")
res = client()
rospy.loginfo("service returned")
###
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Copyright (c) 2013-2014, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
copied from
Baxter RSDK Joint Position Example: file playback
"""
from __future__ import print_function
import sys
import rospy
import baxter_interface
from baxter_interface import CHECK_VERSION
import glob
from std_srvs.srv import Empty
def try_float(x):
try:
return float(x)
except ValueError:
return None
def clean_line(line, names):
"""
Cleans a single line of recorded joint positions
@param line: the line described in a list to process
@param names: joint name keys
"""
#convert the line of strings to a float or None
line = [try_float(x) for x in line.rstrip().split(',')]
#zip the values with the joint names
combined = zip(names[1:], line[1:])
#take out any tuples that have a none value
cleaned = [x for x in combined if x[1] is not None]
#convert it to a dictionary with only valid commands
command = dict(cleaned)
left_command = dict((key, command[key]) for key in command.keys()
if key[:-2] == 'left_')
right_command = dict((key, command[key]) for key in command.keys()
if key[:-2] == 'right_')
return (command, left_command, right_command, line)
def map_file(filename, loops=1):
"""
Loops through csv file
@param filename: the file to play
@param loops: number of times to loop
values < 0 mean 'infinite'
Does not loop indefinitely, but only until the file is read
and processed. Reads each line, split up in columns and
formats each line into a controller command in the form of
name/value pairs. Names come from the column headers
first column is the time stamp
"""
left = baxter_interface.Limb('left')
right = baxter_interface.Limb('right')
grip_left = baxter_interface.Gripper('left', CHECK_VERSION)
grip_right = baxter_interface.Gripper('right', CHECK_VERSION)
rate = rospy.Rate(1000)
if grip_left.error():
grip_left.reset()
if grip_right.error():
grip_right.reset()
if (not grip_left.calibrated() and
grip_left.type() != 'custom'):
grip_left.calibrate()
if (not grip_right.calibrated() and
grip_right.type() != 'custom'):
grip_right.calibrate()
print("Playing back: %s" % (filename,))
with open(filename, 'r') as f:
lines = f.readlines()
keys = lines[0].rstrip().split(',')
l = 0
# If specified, repeat the file playback 'loops' number of times
while loops < 1 or l < loops:
i = 0
l += 1
print("Moving to start position...")
_cmd, lcmd_start, rcmd_start, _raw = clean_line(lines[1], keys)
left.move_to_joint_positions(lcmd_start)
right.move_to_joint_positions(rcmd_start)
start_time = rospy.get_time()
for values in lines[1:]:
i += 1
loopstr = str(loops) if loops > 0 else "forever"
sys.stdout.write("\r Record %d of %d, loop %d of %s" %
(i, len(lines) - 1, l, loopstr))
sys.stdout.flush()
cmd, lcmd, rcmd, values = clean_line(values, keys)
#command this set of commands until the next frame
while (rospy.get_time() - start_time) < values[0]:
if rospy.is_shutdown():
print("\n Aborting - ROS shutdown")
return False
if len(lcmd):
left.set_joint_positions(lcmd)
if len(rcmd):
right.set_joint_positions(rcmd)
if ('left_gripper' in cmd and
grip_left.type() != 'custom'):
grip_left.command_position(cmd['left_gripper'])
if ('right_gripper' in cmd and
grip_right.type() != 'custom'):
grip_right.command_position(cmd['right_gripper'])
rate.sleep()
print
return True
def main():
dir = '/home/ros-baxter/sequence1/'
fam = 'no'
ext = '.rec'
#fname = fam+'*'+ext
#fam_list = glob.glob(ext)
#print(fam_list)
rospy.init_node("ferdian_file_playback")
client = rospy.ServiceProxy("ferdian_example_service",Empty)
rs = baxter_interface.RobotEnable(CHECK_VERSION)
rs.enable()
rospy.loginfo("waiting for service")
rospy.wait_for_service("ferdian_example_service")
rospy.loginfo("service available")
#put your loop here
for file in sorted(glob.glob('./sequence1/*.rec')):
map_file(file)
rospy.loginfo("sending signal...") # to the image processing node
#for x in range(0, 3):
# map_file("AtoE.rec")
res = client()
rospy.loginfo("service returned")
###
if __name__ == '__main__':
main()
|
en
| 0.741896
|
#!/usr/bin/env python # Copyright (c) 2013-2014, Rethink Robotics # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the Rethink Robotics nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. copied from Baxter RSDK Joint Position Example: file playback Cleans a single line of recorded joint positions @param line: the line described in a list to process @param names: joint name keys #convert the line of strings to a float or None #zip the values with the joint names #take out any tuples that have a none value #convert it to a dictionary with only valid commands Loops through csv file @param filename: the file to play @param loops: number of times to loop values < 0 mean 'infinite' Does not loop indefinitely, but only until the file is read and processed. Reads each line, split up in columns and formats each line into a controller command in the form of name/value pairs. Names come from the column headers first column is the time stamp # If specified, repeat the file playback 'loops' number of times #command this set of commands until the next frame #fname = fam+'*'+ext #fam_list = glob.glob(ext) #print(fam_list) #put your loop here # to the image processing node #for x in range(0, 3): # map_file("AtoE.rec") ###
| 1.693644
| 2
|
core/src/main/python/akdl/entry/base_entry.py
|
zhangjun0x01/Alink
| 3,301
|
6356
|
import abc
from typing import Dict, Callable
import tensorflow as tf
from flink_ml_framework.context import Context
from flink_ml_framework.java_file import *
from ..runner import tf_helper, io_helper
from ..runner.output_writer import DirectOutputWriter
try:
from flink_ml_tensorflow.tensorflow_context import TFContext
except:
from flink_ml_tensorflow2.tensorflow_context import TFContext
# noinspection PyUnresolvedReferences
from tensorflow_io.core.python.ops import core_ops
__all__ = ['TF1_TYPE', 'TF2_TYPE']
TF1_TYPE = 'tf1'
TF2_TYPE = 'tf2'
class BaseEntry(abc.ABC):
def __init__(self, func_name, engine_type):
self.func_name = func_name
self.engine_type = engine_type
@staticmethod
def get_func_by_name(func_name):
"""
Get function by the func name
:param func_name: func name
:return: function
"""
if '.' not in func_name:
if func_name in globals():
return globals()[func_name]
else:
raise RuntimeError('cannot find function[{}]'.format(func_name))
else:
module_name, func_name = func_name.rsplit('.', 1)
import importlib
# load the module, will raise ImportError if module cannot be loaded
m = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
c = getattr(m, func_name)
return c
@abc.abstractmethod
def construct_args(self, **kwargs):
pass
def is_batch(self):
return True
def post_process(self, **kwargs):
pass
def entry_func(self, context: Context):
tf_context = TFContext(context)
properties = tf_context.properties
print('properties', properties, flush=True)
# intra_op_parallelism is set by akdl, because there is a bug in TensorFlow 1.x
# See: https://stackoverflow.com/questions/34426268/restricting-number-of-cores-used
intra_op_parallelism = int(properties['ALINK:intra_op_parallelism'])
if self.engine_type == TF1_TYPE:
tf_helper.set_intra_op_parallelism(intra_op_parallelism_threads=intra_op_parallelism)
elif self.engine_type == TF2_TYPE:
tf.config.threading.set_intra_op_parallelism_threads(intra_op_parallelism)
num_workers = int(properties['ALINK:num_workers'])
work_dir = properties['ALINK:work_dir']
cluster, task_type, task_index = tf_context.export_estimator_cluster()
if self.is_batch():
java_queue_file = JavaFile(context.from_java(), context.to_java())
dataset_file = os.path.join(work_dir, 'dataset.tfrecords')
dataset, dataset_length = io_helper.convert_java_queue_file_to_repeatable_dataset(java_queue_file,
dataset_file)
print("number of records: " + str(dataset_length), flush=True)
dataset_fn: Callable[[], tf.data.TFRecordDataset] = lambda: tf.data.TFRecordDataset(dataset_file)
else:
dataset_fn: Callable[[], tf.data.TFRecordDataset] = lambda: tf_context.flink_stream_dataset()
dataset = None
dataset_file = None
dataset_length = None
saved_model_dir = os.path.join(work_dir, 'savedmodel')
user_params: Dict = json.loads(properties['ALINK:user_defined_params'])
for i in range(1, 1024):
key = "ALINK:bc_" + str(i)
if key in properties:
user_params[key] = context.properties[key]
key = "ALINK:model_dir"
if key in properties:
user_params[key] = properties[key]
output_writer = DirectOutputWriter(tf_context.from_java(), tf_context.to_java())
locals_copy = locals().copy()
locals_copy.pop("self")
print("locals_copy = ", locals_copy, flush=True)
args = self.construct_args(**locals_copy)
func = self.get_func_by_name(self.func_name)
func(args)
print("task_type = {}, task_index = {}: done tf_user_main".format(task_type, task_index), flush=True)
local_vars = locals().copy()
local_vars.pop('self')
self.post_process(**local_vars)
print("task_type = {}, task_index = {}: exit".format(task_type, task_index), flush=True)
output_writer.close()
|
import abc
from typing import Dict, Callable
import tensorflow as tf
from flink_ml_framework.context import Context
from flink_ml_framework.java_file import *
from ..runner import tf_helper, io_helper
from ..runner.output_writer import DirectOutputWriter
try:
from flink_ml_tensorflow.tensorflow_context import TFContext
except:
from flink_ml_tensorflow2.tensorflow_context import TFContext
# noinspection PyUnresolvedReferences
from tensorflow_io.core.python.ops import core_ops
__all__ = ['TF1_TYPE', 'TF2_TYPE']
TF1_TYPE = 'tf1'
TF2_TYPE = 'tf2'
class BaseEntry(abc.ABC):
def __init__(self, func_name, engine_type):
self.func_name = func_name
self.engine_type = engine_type
@staticmethod
def get_func_by_name(func_name):
"""
Get function by the func name
:param func_name: func name
:return: function
"""
if '.' not in func_name:
if func_name in globals():
return globals()[func_name]
else:
raise RuntimeError('cannot find function[{}]'.format(func_name))
else:
module_name, func_name = func_name.rsplit('.', 1)
import importlib
# load the module, will raise ImportError if module cannot be loaded
m = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
c = getattr(m, func_name)
return c
@abc.abstractmethod
def construct_args(self, **kwargs):
pass
def is_batch(self):
return True
def post_process(self, **kwargs):
pass
def entry_func(self, context: Context):
tf_context = TFContext(context)
properties = tf_context.properties
print('properties', properties, flush=True)
# intra_op_parallelism is set by akdl, because there is a bug in TensorFlow 1.x
# See: https://stackoverflow.com/questions/34426268/restricting-number-of-cores-used
intra_op_parallelism = int(properties['ALINK:intra_op_parallelism'])
if self.engine_type == TF1_TYPE:
tf_helper.set_intra_op_parallelism(intra_op_parallelism_threads=intra_op_parallelism)
elif self.engine_type == TF2_TYPE:
tf.config.threading.set_intra_op_parallelism_threads(intra_op_parallelism)
num_workers = int(properties['ALINK:num_workers'])
work_dir = properties['ALINK:work_dir']
cluster, task_type, task_index = tf_context.export_estimator_cluster()
if self.is_batch():
java_queue_file = JavaFile(context.from_java(), context.to_java())
dataset_file = os.path.join(work_dir, 'dataset.tfrecords')
dataset, dataset_length = io_helper.convert_java_queue_file_to_repeatable_dataset(java_queue_file,
dataset_file)
print("number of records: " + str(dataset_length), flush=True)
dataset_fn: Callable[[], tf.data.TFRecordDataset] = lambda: tf.data.TFRecordDataset(dataset_file)
else:
dataset_fn: Callable[[], tf.data.TFRecordDataset] = lambda: tf_context.flink_stream_dataset()
dataset = None
dataset_file = None
dataset_length = None
saved_model_dir = os.path.join(work_dir, 'savedmodel')
user_params: Dict = json.loads(properties['ALINK:user_defined_params'])
for i in range(1, 1024):
key = "ALINK:bc_" + str(i)
if key in properties:
user_params[key] = context.properties[key]
key = "ALINK:model_dir"
if key in properties:
user_params[key] = properties[key]
output_writer = DirectOutputWriter(tf_context.from_java(), tf_context.to_java())
locals_copy = locals().copy()
locals_copy.pop("self")
print("locals_copy = ", locals_copy, flush=True)
args = self.construct_args(**locals_copy)
func = self.get_func_by_name(self.func_name)
func(args)
print("task_type = {}, task_index = {}: done tf_user_main".format(task_type, task_index), flush=True)
local_vars = locals().copy()
local_vars.pop('self')
self.post_process(**local_vars)
print("task_type = {}, task_index = {}: exit".format(task_type, task_index), flush=True)
output_writer.close()
|
en
| 0.679116
|
# noinspection PyUnresolvedReferences Get function by the func name :param func_name: func name :return: function # load the module, will raise ImportError if module cannot be loaded # get the class, will raise AttributeError if class cannot be found # intra_op_parallelism is set by akdl, because there is a bug in TensorFlow 1.x # See: https://stackoverflow.com/questions/34426268/restricting-number-of-cores-used
| 2.137483
| 2
|
corm-tests/test_corm_api.py
|
jbcurtin/cassandra-orm
| 1
|
6357
|
import pytest
ENCODING = 'utf-8'
@pytest.fixture(scope='function', autouse=True)
def setup_case(request):
def destroy_case():
from corm import annihilate_keyspace_tables, SESSIONS
annihilate_keyspace_tables('mykeyspace')
for keyspace_name, session in SESSIONS.copy().items():
if keyspace_name in ['global']:
continue
session.shutdown()
del SESSIONS[keyspace_name]
request.addfinalizer(destroy_case)
def test_initial_api():
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
class TestModel(CORMBase):
__keyspace__ = 'mykeyspace'
something: str
other: str
register_table(TestModel)
sync_schema()
one = TestModel('one', 'two')
two = TestModel('one', 'two')
three = TestModel('one', 'three')
insert([one, two, three])
def test_keyspace_api():
import hashlib
import uuid
from corm import register_table, insert, sync_schema, \
keyspace_exists, keyspace_destroy, keyspace_create
from corm.datatypes import CassandraKeyspaceStrategy
from corm.models import CORMBase
# Keyspaces seem to have to start with Alpha-Letters
keyspace_name = hashlib.md5(str(uuid.uuid4()).encode(ENCODING)).hexdigest()
keyspace_name = f'abc_{keyspace_name}'
assert keyspace_exists(keyspace_name) is False
keyspace_create(keyspace_name, CassandraKeyspaceStrategy.Simple)
assert keyspace_exists(keyspace_name) is True
keyspace_destroy(keyspace_name)
assert keyspace_exists(keyspace_name) is False
class TestModelKeyspace(CORMBase):
__keyspace__ = keyspace_name
item: str
register_table(TestModelKeyspace)
assert keyspace_exists(keyspace_name) is False
sync_schema()
assert keyspace_exists(keyspace_name) is True
one = TestModelKeyspace('one')
insert([one])
keyspace_destroy(keyspace_name)
assert keyspace_exists(keyspace_name) is False
def test_float_api():
from corm import register_table, insert, sync_schema, select
from corm.models import CORMBase
class TestModelFloat(CORMBase):
__keyspace__ = 'mykeyspace'
input_one: float
register_table(TestModelFloat)
sync_schema()
data = 324.593998934
one = TestModelFloat(data)
insert([one])
for idx, entry in enumerate(select(TestModelFloat)):
assert entry.input_one == data
def test_boolean_api():
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
from datetime import datetime
class TestModelBoolean(CORMBase):
__keyspace__ = 'mykeyspace'
item: str
created: datetime
value: bool
register_table(TestModelBoolean)
sync_schema()
one = TestModelBoolean('one', datetime.utcnow(), True)
two = TestModelBoolean('two', datetime.utcnow(), False)
insert([one, two])
def test_datetime_api():
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
from datetime import datetime
class TestModelDatetime(CORMBase):
__keyspace__ = 'mykeyspace'
item: str
created: datetime
register_table(TestModelDatetime)
sync_schema()
one = TestModelDatetime('one', datetime.utcnow())
two = TestModelDatetime('two', datetime.utcnow())
insert([one, two])
def test_set_api():
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
from corm.annotations import Set
class TestModelSet(CORMBase):
__keyspace__ = 'mykeyspace'
something: str
other: Set
register_table(TestModelSet)
sync_schema()
one = TestModelSet('one', {'first'})
two = TestModelSet('two', {'last', 'second-to-last'})
three = TestModelSet('three', {'last', 'second-to-last', 'last'})
four = TestModelSet('four', ['one', 'two', 'three', 'four'])
insert([one, two, three, four])
def test_select_api():
import random
from corm import register_table, insert, sync_schema, select
from corm.models import CORMBase
from corm.annotations import Set
from datetime import datetime
MAX_INT = 1000
class TestModelSelect(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
register_table(TestModelSelect)
sync_schema()
insert_later = []
values = []
for idx in range(0, 100):
values.append({
'random_number': random.randint(0, MAX_INT),
'created': datetime.utcnow()
})
entry = TestModelSelect(values[-1]['random_number'], values[-1]['created'])
insert_later.append(entry)
if len(insert_later) > 20:
insert(insert_later)
insert_later = []
insert(insert_later)
for idx, entry in enumerate(select(TestModelSelect, fetch_size=100)):
assert isinstance(entry, TestModelSelect)
# Order is not consistent
# assert entry.random_number == values[idx]['random_number']
# assert entry.created == values[idx]['created']
assert idx > 0
def test_select_where_api():
import random
from corm import register_table, insert, sync_schema, select, where
from corm.models import CORMBase
from datetime import datetime
MAX_INT = 99999
class TestModelSelectSource(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
one: str
two: str
class TestModelSelectPivot(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
one: str
two: str
source: TestModelSelectSource
# TODO: Build UserType integration
# register_table(TestModelSelectSource)
# register_table(TestModelSelectPivot)
def test_alter_table_api():
from corm import register_table, insert, sync_schema, select, obtain_session
from corm.models import CORMBase
from datetime import datetime
# Create Table or Delete Column on existing Table
class TestModelAlter(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
register_table(TestModelAlter)
sync_schema()
COL_CQL = f'''
SELECT
column_name, type
FROM
system_schema.columns
WHERE
table_name = '{TestModelAlter._corm_details.table_name}'
AND
keyspace_name = '{TestModelAlter._corm_details.keyspace}'
'''
rows = [(row.column_name, row.type) for row in obtain_session('mykeyspace').execute(COL_CQL)]
assert len(rows) == 3
# Add Column on existing Table
class TestModelAlter(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
new_column: str
register_table(TestModelAlter)
sync_schema()
rows = [(row.column_name, row.type) for row in obtain_session('mykeyspace').execute(COL_CQL)]
assert len(rows) == 4
def test_not_ordered_by_pk_field():
import random
from corm import register_table, insert, sync_schema, select, obtain_session
from corm.models import CORMBase
from datetime import datetime
class TestNotOrderedByPkField(CORMBase):
__keyspace__ = 'mykeyspace'
__primary_keys__ = ['one', 'two', 'three']
random_number: int
created: datetime
one: str
two: str
three: str
register_table(TestNotOrderedByPkField)
sync_schema()
first_entry = TestNotOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'beta')
gamma = TestNotOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'gamma')
delta = TestNotOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'delta')
second_entry = TestNotOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'alpha')
insert([first_entry, gamma, delta, second_entry])
for idx, entry in enumerate(select(TestNotOrderedByPkField)):
if idx == 0:
assert entry.three != 'alpha'
def test_ordered_by_pk_field():
import random
from corm import register_table, insert, sync_schema, select, obtain_session
from corm.models import CORMBase
from corm.datatypes import TableOrdering
from datetime import datetime
class TestOrderedByPkField(CORMBase):
__keyspace__ = 'mykeyspace'
__primary_keys__ = ['one', 'two', 'three']
__ordered_by_primary_keys__ = TableOrdering.DESC
random_number: int
created: datetime
one: str
two: str
three: str
register_table(TestOrderedByPkField)
sync_schema()
first_entry = TestOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'beta')
second_entry = TestOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'alpha')
gamma = TestOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'gamma')
delta = TestOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'delta')
insert([first_entry, second_entry, delta, gamma])
for idx, entry in enumerate(select(TestOrderedByPkField)):
if idx == 0:
assert entry.three == 'alpha'
elif idx == 1:
assert entry.three == 'beta'
elif idx == 2:
assert entry.three == 'delta'
elif idx == 3:
assert entry.three == 'gamma'
def test_corm_auth():
import os
os.environ['CLUSTER_PORT'] = '9043'
os.environ['CLUSTER_USERNAME'] = 'cassandra'
os.environ['CLUSTER_PASSWORD'] = '<PASSWORD>'
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
class TestCORMAuth(CORMBase):
one: str
__keyspace__ = 'test_corm_auth'
register_table(TestCORMAuth)
sync_schema()
def test_corm_enum():
import enum
from corm import register_table, insert, sync_schema, select
from corm.models import CORMBase
class OptionList(enum.Enum):
One = 'one'
Two = 'two'
class TestCormEnum(CORMBase):
__keyspace__ = 'test_corm_enum'
option: OptionList
register_table(TestCormEnum)
sync_schema()
first = TestCormEnum(OptionList.One)
second = TestCormEnum(OptionList.Two)
insert([first, second])
for idx, entry in enumerate(select(TestCormEnum)):
assert entry.option in OptionList.__members__.values()
def test_corm_where():
import enum
from corm import register_table, insert, sync_schema, select, where, cp, Operator
from corm.models import CORMBase
class OptionList(enum.Enum):
One = 'one'
Two = 'two'
class TestCORMWhere(CORMBase):
__keyspace__ = 'test_corm_where'
option: OptionList
score: int
register_table(TestCORMWhere)
sync_schema()
one = TestCORMWhere(OptionList.One, 1)
two = TestCORMWhere(OptionList.One, 2)
three = TestCORMWhere(OptionList.Two, 3)
four = TestCORMWhere(OptionList.Two, 4)
insert([one, two, three, four])
for idx, entry in enumerate(where(TestCORMWhere, [cp(Operator.Equal, 'score', 4)])):
assert idx == 0
assert entry.score == 4
assert entry.option == OptionList.Two
for idx, entry in enumerate(where(TestCORMWhere, [cp(Operator.Equal, 'score', 1)])):
assert idx == 0
assert entry.score == 1
assert entry.option == OptionList.One
for idx, entry in enumerate(where(TestCORMWhere, [cp(Operator.Equal, 'option', OptionList.One)])):
assert idx in [0, 1]
assert entry.score in [1, 2]
assert entry.option == OptionList.One
for idx, entry in enumerate(where(TestCORMWhere, [cp(Operator.Equal, 'option', OptionList.Two)])):
assert idx in [0, 1]
assert entry.score in [3, 4]
assert entry.option == OptionList.Two
def test_corm_uuid():
import uuid
from corm import register_table, insert, sync_schema, select
from corm.models import CORMBase
class TestCORMUUID(CORMBase):
__keyspace__ = 'mykeyspace'
identity_test: uuid.UUID
register_table(TestCORMUUID)
sync_schema()
one = TestCORMUUID(uuid.uuid4())
insert([one])
for entry in select(TestCORMUUID):
assert isinstance(entry.identity_test, uuid.UUID)
|
import pytest
ENCODING = 'utf-8'
@pytest.fixture(scope='function', autouse=True)
def setup_case(request):
def destroy_case():
from corm import annihilate_keyspace_tables, SESSIONS
annihilate_keyspace_tables('mykeyspace')
for keyspace_name, session in SESSIONS.copy().items():
if keyspace_name in ['global']:
continue
session.shutdown()
del SESSIONS[keyspace_name]
request.addfinalizer(destroy_case)
def test_initial_api():
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
class TestModel(CORMBase):
__keyspace__ = 'mykeyspace'
something: str
other: str
register_table(TestModel)
sync_schema()
one = TestModel('one', 'two')
two = TestModel('one', 'two')
three = TestModel('one', 'three')
insert([one, two, three])
def test_keyspace_api():
import hashlib
import uuid
from corm import register_table, insert, sync_schema, \
keyspace_exists, keyspace_destroy, keyspace_create
from corm.datatypes import CassandraKeyspaceStrategy
from corm.models import CORMBase
# Keyspaces seem to have to start with Alpha-Letters
keyspace_name = hashlib.md5(str(uuid.uuid4()).encode(ENCODING)).hexdigest()
keyspace_name = f'abc_{keyspace_name}'
assert keyspace_exists(keyspace_name) is False
keyspace_create(keyspace_name, CassandraKeyspaceStrategy.Simple)
assert keyspace_exists(keyspace_name) is True
keyspace_destroy(keyspace_name)
assert keyspace_exists(keyspace_name) is False
class TestModelKeyspace(CORMBase):
__keyspace__ = keyspace_name
item: str
register_table(TestModelKeyspace)
assert keyspace_exists(keyspace_name) is False
sync_schema()
assert keyspace_exists(keyspace_name) is True
one = TestModelKeyspace('one')
insert([one])
keyspace_destroy(keyspace_name)
assert keyspace_exists(keyspace_name) is False
def test_float_api():
from corm import register_table, insert, sync_schema, select
from corm.models import CORMBase
class TestModelFloat(CORMBase):
__keyspace__ = 'mykeyspace'
input_one: float
register_table(TestModelFloat)
sync_schema()
data = 324.593998934
one = TestModelFloat(data)
insert([one])
for idx, entry in enumerate(select(TestModelFloat)):
assert entry.input_one == data
def test_boolean_api():
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
from datetime import datetime
class TestModelBoolean(CORMBase):
__keyspace__ = 'mykeyspace'
item: str
created: datetime
value: bool
register_table(TestModelBoolean)
sync_schema()
one = TestModelBoolean('one', datetime.utcnow(), True)
two = TestModelBoolean('two', datetime.utcnow(), False)
insert([one, two])
def test_datetime_api():
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
from datetime import datetime
class TestModelDatetime(CORMBase):
__keyspace__ = 'mykeyspace'
item: str
created: datetime
register_table(TestModelDatetime)
sync_schema()
one = TestModelDatetime('one', datetime.utcnow())
two = TestModelDatetime('two', datetime.utcnow())
insert([one, two])
def test_set_api():
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
from corm.annotations import Set
class TestModelSet(CORMBase):
__keyspace__ = 'mykeyspace'
something: str
other: Set
register_table(TestModelSet)
sync_schema()
one = TestModelSet('one', {'first'})
two = TestModelSet('two', {'last', 'second-to-last'})
three = TestModelSet('three', {'last', 'second-to-last', 'last'})
four = TestModelSet('four', ['one', 'two', 'three', 'four'])
insert([one, two, three, four])
def test_select_api():
import random
from corm import register_table, insert, sync_schema, select
from corm.models import CORMBase
from corm.annotations import Set
from datetime import datetime
MAX_INT = 1000
class TestModelSelect(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
register_table(TestModelSelect)
sync_schema()
insert_later = []
values = []
for idx in range(0, 100):
values.append({
'random_number': random.randint(0, MAX_INT),
'created': datetime.utcnow()
})
entry = TestModelSelect(values[-1]['random_number'], values[-1]['created'])
insert_later.append(entry)
if len(insert_later) > 20:
insert(insert_later)
insert_later = []
insert(insert_later)
for idx, entry in enumerate(select(TestModelSelect, fetch_size=100)):
assert isinstance(entry, TestModelSelect)
# Order is not consistent
# assert entry.random_number == values[idx]['random_number']
# assert entry.created == values[idx]['created']
assert idx > 0
def test_select_where_api():
import random
from corm import register_table, insert, sync_schema, select, where
from corm.models import CORMBase
from datetime import datetime
MAX_INT = 99999
class TestModelSelectSource(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
one: str
two: str
class TestModelSelectPivot(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
one: str
two: str
source: TestModelSelectSource
# TODO: Build UserType integration
# register_table(TestModelSelectSource)
# register_table(TestModelSelectPivot)
def test_alter_table_api():
from corm import register_table, insert, sync_schema, select, obtain_session
from corm.models import CORMBase
from datetime import datetime
# Create Table or Delete Column on existing Table
class TestModelAlter(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
register_table(TestModelAlter)
sync_schema()
COL_CQL = f'''
SELECT
column_name, type
FROM
system_schema.columns
WHERE
table_name = '{TestModelAlter._corm_details.table_name}'
AND
keyspace_name = '{TestModelAlter._corm_details.keyspace}'
'''
rows = [(row.column_name, row.type) for row in obtain_session('mykeyspace').execute(COL_CQL)]
assert len(rows) == 3
# Add Column on existing Table
class TestModelAlter(CORMBase):
__keyspace__ = 'mykeyspace'
random_number: int
created: datetime
new_column: str
register_table(TestModelAlter)
sync_schema()
rows = [(row.column_name, row.type) for row in obtain_session('mykeyspace').execute(COL_CQL)]
assert len(rows) == 4
def test_not_ordered_by_pk_field():
import random
from corm import register_table, insert, sync_schema, select, obtain_session
from corm.models import CORMBase
from datetime import datetime
class TestNotOrderedByPkField(CORMBase):
__keyspace__ = 'mykeyspace'
__primary_keys__ = ['one', 'two', 'three']
random_number: int
created: datetime
one: str
two: str
three: str
register_table(TestNotOrderedByPkField)
sync_schema()
first_entry = TestNotOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'beta')
gamma = TestNotOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'gamma')
delta = TestNotOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'delta')
second_entry = TestNotOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'alpha')
insert([first_entry, gamma, delta, second_entry])
for idx, entry in enumerate(select(TestNotOrderedByPkField)):
if idx == 0:
assert entry.three != 'alpha'
def test_ordered_by_pk_field():
import random
from corm import register_table, insert, sync_schema, select, obtain_session
from corm.models import CORMBase
from corm.datatypes import TableOrdering
from datetime import datetime
class TestOrderedByPkField(CORMBase):
__keyspace__ = 'mykeyspace'
__primary_keys__ = ['one', 'two', 'three']
__ordered_by_primary_keys__ = TableOrdering.DESC
random_number: int
created: datetime
one: str
two: str
three: str
register_table(TestOrderedByPkField)
sync_schema()
first_entry = TestOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'beta')
second_entry = TestOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'alpha')
gamma = TestOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'gamma')
delta = TestOrderedByPkField(random.randint(0, 99999), datetime.utcnow(), 'one', 'one', 'delta')
insert([first_entry, second_entry, delta, gamma])
for idx, entry in enumerate(select(TestOrderedByPkField)):
if idx == 0:
assert entry.three == 'alpha'
elif idx == 1:
assert entry.three == 'beta'
elif idx == 2:
assert entry.three == 'delta'
elif idx == 3:
assert entry.three == 'gamma'
def test_corm_auth():
import os
os.environ['CLUSTER_PORT'] = '9043'
os.environ['CLUSTER_USERNAME'] = 'cassandra'
os.environ['CLUSTER_PASSWORD'] = '<PASSWORD>'
from corm import register_table, insert, sync_schema
from corm.models import CORMBase
class TestCORMAuth(CORMBase):
one: str
__keyspace__ = 'test_corm_auth'
register_table(TestCORMAuth)
sync_schema()
def test_corm_enum():
import enum
from corm import register_table, insert, sync_schema, select
from corm.models import CORMBase
class OptionList(enum.Enum):
One = 'one'
Two = 'two'
class TestCormEnum(CORMBase):
__keyspace__ = 'test_corm_enum'
option: OptionList
register_table(TestCormEnum)
sync_schema()
first = TestCormEnum(OptionList.One)
second = TestCormEnum(OptionList.Two)
insert([first, second])
for idx, entry in enumerate(select(TestCormEnum)):
assert entry.option in OptionList.__members__.values()
def test_corm_where():
import enum
from corm import register_table, insert, sync_schema, select, where, cp, Operator
from corm.models import CORMBase
class OptionList(enum.Enum):
One = 'one'
Two = 'two'
class TestCORMWhere(CORMBase):
__keyspace__ = 'test_corm_where'
option: OptionList
score: int
register_table(TestCORMWhere)
sync_schema()
one = TestCORMWhere(OptionList.One, 1)
two = TestCORMWhere(OptionList.One, 2)
three = TestCORMWhere(OptionList.Two, 3)
four = TestCORMWhere(OptionList.Two, 4)
insert([one, two, three, four])
for idx, entry in enumerate(where(TestCORMWhere, [cp(Operator.Equal, 'score', 4)])):
assert idx == 0
assert entry.score == 4
assert entry.option == OptionList.Two
for idx, entry in enumerate(where(TestCORMWhere, [cp(Operator.Equal, 'score', 1)])):
assert idx == 0
assert entry.score == 1
assert entry.option == OptionList.One
for idx, entry in enumerate(where(TestCORMWhere, [cp(Operator.Equal, 'option', OptionList.One)])):
assert idx in [0, 1]
assert entry.score in [1, 2]
assert entry.option == OptionList.One
for idx, entry in enumerate(where(TestCORMWhere, [cp(Operator.Equal, 'option', OptionList.Two)])):
assert idx in [0, 1]
assert entry.score in [3, 4]
assert entry.option == OptionList.Two
def test_corm_uuid():
import uuid
from corm import register_table, insert, sync_schema, select
from corm.models import CORMBase
class TestCORMUUID(CORMBase):
__keyspace__ = 'mykeyspace'
identity_test: uuid.UUID
register_table(TestCORMUUID)
sync_schema()
one = TestCORMUUID(uuid.uuid4())
insert([one])
for entry in select(TestCORMUUID):
assert isinstance(entry.identity_test, uuid.UUID)
|
en
| 0.542441
|
# Keyspaces seem to have to start with Alpha-Letters # Order is not consistent # assert entry.random_number == values[idx]['random_number'] # assert entry.created == values[idx]['created'] # TODO: Build UserType integration # register_table(TestModelSelectSource) # register_table(TestModelSelectPivot) # Create Table or Delete Column on existing Table SELECT column_name, type FROM system_schema.columns WHERE table_name = '{TestModelAlter._corm_details.table_name}' AND keyspace_name = '{TestModelAlter._corm_details.keyspace}' # Add Column on existing Table
| 2.191502
| 2
|
src/utilities/getInfo.py
|
UCSB-dataScience-ProjectGroup/movie_rating_prediction
| 2
|
6358
|
import json
import os
from utilities.SaveLoadJson import SaveLoadJson as SLJ
from utilities.LineCount import LineCount as LC
import subprocess
from geolite2 import geolite2
class getData:
#Get Data Functions ------------------------------------------------------
@staticmethod
def getDATA():
result = {"requests":{},
"time":'',
"cpuload":'',
"uptime":'',
"temp":'',
"ip":''}
result["requests"]=getData.getRequests()
time = getData.getTime().split('\t')
result["time"] = time[0]
result["cpuload"]=time[1]
result["uptime"]=getData.getUptime()
result["temp"]=getData.getTemp()
result["ip"]=getData.getIP()
return json.dumps(result)
@staticmethod
def getRequests():
data = SLJ.load('dataStore.txt')
return {"totalRequests":str(data["totalRequests"]),
"totalQueries":str(data["totalQueries"]),
"totalAdjusts":str(data["totalAdjusts"])}
@staticmethod
def getTime():
proc = subprocess.Popen(['uptime'],stdout=subprocess.PIPE, shell=False)
(out, err) = proc.communicate()
return (str(out)[1:9] + '\t' +
str(float(str(out).split(',')[4])*100)+'%')
@staticmethod
def getUptime():
proc = subprocess.Popen(['uptime', '-p'],stdout=subprocess.PIPE, shell=False)
(out, err) = proc.communicate()
return str(out)
@staticmethod
def getTemp():
proc = subprocess.Popen(['vcgencmd', 'measure_temp'],stdout=subprocess.PIPE, shell=False)
(out,err) = proc.communicate()
return str(out)[5:-1]
@staticmethod
def getIP():
proc = subprocess.Popen(['hostname', '-I'],stdout=subprocess.PIPE, shell=False)
(out, err) = proc.communicate()
return str(out)
#Get Access Functions ---------------------------------------------------
@staticmethod
def getAccess():
result={"Countries":dict(),
"CountrySrs":dict(),
"devices":dict(),
"mostRecentSearch":'',
"mostRecentAcc":'',
"mostRecentIP":'',
"recentSearches":[],
"Users":0}
lastNum = 200
total=0
mostRecentIP = ''
mostRecentAcc = ''
mostRecentSearch = ''
Cname='Unknown'
Sname='Unknown'
Ctyname='Unknown'
ips=dict()
logFile = 'utilities/access.log'
newFile='utilities/new.log'
#f = open(newFile, 'w')
with open(logFile, 'r') as lf:
for temp in lf:
line = temp.split(';')
if len(line) > 1:
if line[2] == '200':
if 'GET /find' in line[3]:
#f.write(temp)
mostRecentIP=line[0]
mostRecentAcc=line[1]
reader = geolite2.reader()
loc = reader.get(line[0])
Cname = loc['country']['names']['en']
if 'subdivisions' in loc:
Sname = loc['subdivisions'][0]['names']['en']
else:
Sname='Unknown'
if 'city' in loc:
Ctyname = loc['city']['names']['en']
else:
Ctyname='Unknown'
if Cname not in result["Countries"]:
result["Countries"][Cname]=dict()
result["CountrySrs"][Cname]=0
if Sname not in result["Countries"][Cname]:
result["Countries"][Cname][Sname]=dict()
if Ctyname not in result["Countries"][Cname][Sname]:
result["Countries"][Cname][Sname][Ctyname] = []
result["CountrySrs"][Cname]+=1
total+=1
search = (line[3].split(' ')[1][6:]).replace('%20',' ')
mostRecentSearch=search
if search not in result["Countries"][Cname][Sname][Ctyname]:
result["Countries"][Cname][Sname][Ctyname].append(search)
if len(result["Countries"][Cname][Sname][Ctyname]) >= lastNum:
result["Countries"][Cname][Sname][Ctyname].pop(0)
if search not in result["recentSearches"]:
result["recentSearches"].insert(0,search)
if len(result["recentSearches"]) >= lastNum:
result["recentSearches"].pop(-1)
ips[line[0]]=1
device=(line[4].split('('))
if len(device)>1:
device=device[1]
else:
device="Unknown"
if device not in result["devices"]:
result["devices"][device]=0
result["devices"][device]+=1
#f.close()
#Most recent stuff
result["mostRecentIP"]=mostRecentIP
result["mostRecentAcc"]=mostRecentAcc
result["mostRecentSearch"]=mostRecentSearch
result["mostRecentLoc"]=str(Ctyname+', '+Sname+', '+Cname)
#Unique Users
for key, value in ips.items():
result["Users"]+=1
#Device percents
for key, value in result["devices"].items():
percnt = (float(value)/float(total))*100
result["devices"][key]=format(percnt, '.2f')
#Country percents
for key, value in result["CountrySrs"].items():
percnt = (float(value)/float(total))*100
result["CountrySrs"][key]=format(percnt,'.2f')
#os.system("sudo mv -f "+newFile+" "+logFile)
return json.dumps(result)
|
import json
import os
from utilities.SaveLoadJson import SaveLoadJson as SLJ
from utilities.LineCount import LineCount as LC
import subprocess
from geolite2 import geolite2
class getData:
#Get Data Functions ------------------------------------------------------
@staticmethod
def getDATA():
result = {"requests":{},
"time":'',
"cpuload":'',
"uptime":'',
"temp":'',
"ip":''}
result["requests"]=getData.getRequests()
time = getData.getTime().split('\t')
result["time"] = time[0]
result["cpuload"]=time[1]
result["uptime"]=getData.getUptime()
result["temp"]=getData.getTemp()
result["ip"]=getData.getIP()
return json.dumps(result)
@staticmethod
def getRequests():
data = SLJ.load('dataStore.txt')
return {"totalRequests":str(data["totalRequests"]),
"totalQueries":str(data["totalQueries"]),
"totalAdjusts":str(data["totalAdjusts"])}
@staticmethod
def getTime():
proc = subprocess.Popen(['uptime'],stdout=subprocess.PIPE, shell=False)
(out, err) = proc.communicate()
return (str(out)[1:9] + '\t' +
str(float(str(out).split(',')[4])*100)+'%')
@staticmethod
def getUptime():
proc = subprocess.Popen(['uptime', '-p'],stdout=subprocess.PIPE, shell=False)
(out, err) = proc.communicate()
return str(out)
@staticmethod
def getTemp():
proc = subprocess.Popen(['vcgencmd', 'measure_temp'],stdout=subprocess.PIPE, shell=False)
(out,err) = proc.communicate()
return str(out)[5:-1]
@staticmethod
def getIP():
proc = subprocess.Popen(['hostname', '-I'],stdout=subprocess.PIPE, shell=False)
(out, err) = proc.communicate()
return str(out)
#Get Access Functions ---------------------------------------------------
@staticmethod
def getAccess():
result={"Countries":dict(),
"CountrySrs":dict(),
"devices":dict(),
"mostRecentSearch":'',
"mostRecentAcc":'',
"mostRecentIP":'',
"recentSearches":[],
"Users":0}
lastNum = 200
total=0
mostRecentIP = ''
mostRecentAcc = ''
mostRecentSearch = ''
Cname='Unknown'
Sname='Unknown'
Ctyname='Unknown'
ips=dict()
logFile = 'utilities/access.log'
newFile='utilities/new.log'
#f = open(newFile, 'w')
with open(logFile, 'r') as lf:
for temp in lf:
line = temp.split(';')
if len(line) > 1:
if line[2] == '200':
if 'GET /find' in line[3]:
#f.write(temp)
mostRecentIP=line[0]
mostRecentAcc=line[1]
reader = geolite2.reader()
loc = reader.get(line[0])
Cname = loc['country']['names']['en']
if 'subdivisions' in loc:
Sname = loc['subdivisions'][0]['names']['en']
else:
Sname='Unknown'
if 'city' in loc:
Ctyname = loc['city']['names']['en']
else:
Ctyname='Unknown'
if Cname not in result["Countries"]:
result["Countries"][Cname]=dict()
result["CountrySrs"][Cname]=0
if Sname not in result["Countries"][Cname]:
result["Countries"][Cname][Sname]=dict()
if Ctyname not in result["Countries"][Cname][Sname]:
result["Countries"][Cname][Sname][Ctyname] = []
result["CountrySrs"][Cname]+=1
total+=1
search = (line[3].split(' ')[1][6:]).replace('%20',' ')
mostRecentSearch=search
if search not in result["Countries"][Cname][Sname][Ctyname]:
result["Countries"][Cname][Sname][Ctyname].append(search)
if len(result["Countries"][Cname][Sname][Ctyname]) >= lastNum:
result["Countries"][Cname][Sname][Ctyname].pop(0)
if search not in result["recentSearches"]:
result["recentSearches"].insert(0,search)
if len(result["recentSearches"]) >= lastNum:
result["recentSearches"].pop(-1)
ips[line[0]]=1
device=(line[4].split('('))
if len(device)>1:
device=device[1]
else:
device="Unknown"
if device not in result["devices"]:
result["devices"][device]=0
result["devices"][device]+=1
#f.close()
#Most recent stuff
result["mostRecentIP"]=mostRecentIP
result["mostRecentAcc"]=mostRecentAcc
result["mostRecentSearch"]=mostRecentSearch
result["mostRecentLoc"]=str(Ctyname+', '+Sname+', '+Cname)
#Unique Users
for key, value in ips.items():
result["Users"]+=1
#Device percents
for key, value in result["devices"].items():
percnt = (float(value)/float(total))*100
result["devices"][key]=format(percnt, '.2f')
#Country percents
for key, value in result["CountrySrs"].items():
percnt = (float(value)/float(total))*100
result["CountrySrs"][key]=format(percnt,'.2f')
#os.system("sudo mv -f "+newFile+" "+logFile)
return json.dumps(result)
|
en
| 0.330101
|
#Get Data Functions ------------------------------------------------------ #Get Access Functions --------------------------------------------------- #f = open(newFile, 'w') #f.write(temp) #f.close() #Most recent stuff #Unique Users #Device percents #Country percents #os.system("sudo mv -f "+newFile+" "+logFile)
| 2.609305
| 3
|
nemo/collections/nlp/losses/__init__.py
|
KalifiaBillal/NeMo
| 1
|
6359
|
from nemo.collections.nlp.losses.sgd_loss import SGDDialogueStateLoss
|
from nemo.collections.nlp.losses.sgd_loss import SGDDialogueStateLoss
|
none
| 1
| 1.058699
| 1
|
|
netrunner/test_settings.py
|
MrAGi/netrunner-cambridge
| 0
|
6360
|
# -*- coding: utf-8 -*-
from .settings import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['LOCAL_DB_NAME'],
'USER': os.environ['LOCAL_DB_USER'],
'PASSWORD': os.environ['LOCAL_DB_PASSWORD'],
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
|
# -*- coding: utf-8 -*-
from .settings import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['LOCAL_DB_NAME'],
'USER': os.environ['LOCAL_DB_USER'],
'PASSWORD': os.environ['LOCAL_DB_PASSWORD'],
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 1.283888
| 1
|
Python_Exercicios/calcula_terreno.py
|
thalles-dreissig20/Quebra_Cabeca
| 0
|
6361
|
def area(larg, comp):
a = larg * comp
print(f'A dimensão é {a}')
print('Controle de terrenos')
print('-' * 20)
l = float(input('qual a largura do terreno: '))
c = float(input('qual o comprimento do terreno: '))
area(l , c)
|
def area(larg, comp):
a = larg * comp
print(f'A dimensão é {a}')
print('Controle de terrenos')
print('-' * 20)
l = float(input('qual a largura do terreno: '))
c = float(input('qual o comprimento do terreno: '))
area(l , c)
|
none
| 1
| 3.707773
| 4
|
|
Desafios/desafio_041.py
|
romulogoleniesky/Python_C_E_V
| 0
|
6362
|
<reponame>romulogoleniesky/Python_C_E_V
import datetime
ano = (datetime.datetime.now()).year
nasc = int(input("Digite o seu ano de nascimento: "))
categoria = 0
if (ano - nasc) <= 9:
categoria = str("MIRIM")
elif 9 < (ano - nasc) <= 14:
categoria = str("INFANTIL")
elif 14 < (ano - nasc) <= 19 :
categoria = str("JUNIOR")
elif 19 < (ano - nasc) <= 25:
categoria = str("SÊNIOR")
else:
categoria = str("MASTER")
print(f"A categoria do atleta é {str(categoria)}.")
|
import datetime
ano = (datetime.datetime.now()).year
nasc = int(input("Digite o seu ano de nascimento: "))
categoria = 0
if (ano - nasc) <= 9:
categoria = str("MIRIM")
elif 9 < (ano - nasc) <= 14:
categoria = str("INFANTIL")
elif 14 < (ano - nasc) <= 19 :
categoria = str("JUNIOR")
elif 19 < (ano - nasc) <= 25:
categoria = str("SÊNIOR")
else:
categoria = str("MASTER")
print(f"A categoria do atleta é {str(categoria)}.")
|
none
| 1
| 4.054902
| 4
|
|
eval/metrics.py
|
RecoHut-Stanzas/S168471
| 37
|
6363
|
<filename>eval/metrics.py
import torch
def ndcg_binary_at_k_batch_torch(X_pred, heldout_batch, k=100, device='cpu'):
"""
Normalized Discounted Cumulative Gain@k for for predictions [B, I] and ground-truth [B, I], with binary relevance.
ASSUMPTIONS: all the 0's in heldout_batch indicate 0 relevance.
"""
batch_users = X_pred.shape[0] # batch_size
_, idx_topk = torch.topk(X_pred, k, dim=1, sorted=True)
tp = 1. / torch.log2(torch.arange(2, k + 2, device=device).float())
heldout_batch_nonzero = (heldout_batch > 0).float()
DCG = (heldout_batch_nonzero[torch.arange(batch_users, device=device).unsqueeze(1), idx_topk] * tp).sum(dim=1)
heldout_nonzero = (heldout_batch > 0).sum(dim=1) # num. of non-zero items per batch. [B]
IDCG = torch.tensor([(tp[:min(n, k)]).sum() for n in heldout_nonzero]).to(device)
return DCG / IDCG
def recall_at_k_batch_torch(X_pred, heldout_batch, k=100):
"""
Recall@k for predictions [B, I] and ground-truth [B, I].
"""
batch_users = X_pred.shape[0]
_, topk_indices = torch.topk(X_pred, k, dim=1, sorted=False) # [B, K]
X_pred_binary = torch.zeros_like(X_pred)
if torch.cuda.is_available():
X_pred_binary = X_pred_binary.cuda()
X_pred_binary[torch.arange(batch_users).unsqueeze(1), topk_indices] = 1
X_true_binary = (heldout_batch > 0).float() # .toarray() # [B, I]
k_tensor = torch.tensor([k], dtype=torch.float32)
if torch.cuda.is_available():
X_true_binary = X_true_binary.cuda()
k_tensor = k_tensor.cuda()
tmp = (X_true_binary * X_pred_binary).sum(dim=1).float()
recall = tmp / torch.min(k_tensor, X_true_binary.sum(dim=1).float())
return recall
|
<filename>eval/metrics.py
import torch
def ndcg_binary_at_k_batch_torch(X_pred, heldout_batch, k=100, device='cpu'):
"""
Normalized Discounted Cumulative Gain@k for for predictions [B, I] and ground-truth [B, I], with binary relevance.
ASSUMPTIONS: all the 0's in heldout_batch indicate 0 relevance.
"""
batch_users = X_pred.shape[0] # batch_size
_, idx_topk = torch.topk(X_pred, k, dim=1, sorted=True)
tp = 1. / torch.log2(torch.arange(2, k + 2, device=device).float())
heldout_batch_nonzero = (heldout_batch > 0).float()
DCG = (heldout_batch_nonzero[torch.arange(batch_users, device=device).unsqueeze(1), idx_topk] * tp).sum(dim=1)
heldout_nonzero = (heldout_batch > 0).sum(dim=1) # num. of non-zero items per batch. [B]
IDCG = torch.tensor([(tp[:min(n, k)]).sum() for n in heldout_nonzero]).to(device)
return DCG / IDCG
def recall_at_k_batch_torch(X_pred, heldout_batch, k=100):
"""
Recall@k for predictions [B, I] and ground-truth [B, I].
"""
batch_users = X_pred.shape[0]
_, topk_indices = torch.topk(X_pred, k, dim=1, sorted=False) # [B, K]
X_pred_binary = torch.zeros_like(X_pred)
if torch.cuda.is_available():
X_pred_binary = X_pred_binary.cuda()
X_pred_binary[torch.arange(batch_users).unsqueeze(1), topk_indices] = 1
X_true_binary = (heldout_batch > 0).float() # .toarray() # [B, I]
k_tensor = torch.tensor([k], dtype=torch.float32)
if torch.cuda.is_available():
X_true_binary = X_true_binary.cuda()
k_tensor = k_tensor.cuda()
tmp = (X_true_binary * X_pred_binary).sum(dim=1).float()
recall = tmp / torch.min(k_tensor, X_true_binary.sum(dim=1).float())
return recall
|
en
| 0.825567
|
Normalized Discounted Cumulative Gain@k for for predictions [B, I] and ground-truth [B, I], with binary relevance. ASSUMPTIONS: all the 0's in heldout_batch indicate 0 relevance. # batch_size # num. of non-zero items per batch. [B] Recall@k for predictions [B, I] and ground-truth [B, I]. # [B, K] # .toarray() # [B, I]
| 2.383137
| 2
|
simba/run_dash_tkinter.py
|
justinshenk/simba
| 172
|
6364
|
# All credit to https://stackoverflow.com/questions/46571448/tkinter-and-a-html-file - thanks DELICA - https://stackoverflow.com/users/7027346/delica
from cefpython3 import cefpython as cef
import ctypes
try:
import tkinter as tk
from tkinter import messagebox
except ImportError:
import Tkinter as tk
import sys
import platform
import logging as _logging
# Fix for PyCharm hints warnings
WindowUtils = cef.WindowUtils()
# Platforms
WINDOWS = (platform.system() == "Windows")
LINUX = (platform.system() == "Linux")
MAC = (platform.system() == "Darwin")
# Globals
logger = _logging.getLogger("tkinter_.py")
url = "localhost:8050/"
class MainFrame(tk.Frame):
def __init__(self, root):
self.closing = False
self.browser = None
# Root
root.geometry("900x640")
tk.Grid.rowconfigure(root, 0, weight=1)
tk.Grid.columnconfigure(root, 0, weight=1)
# MainFrame
tk.Frame.__init__(self, root)
self.master.title('SimBA Dashboard')
self.master.protocol("WM_DELETE_WINDOW", self.on_close)
self.bind("<Configure>", self.on_configure)
self.bind("<FocusIn>", self.on_focus_in)
self.bind("<FocusOut>", self.on_focus_out)
self.focus_set()
# Pack MainFrame
self.pack(fill=tk.BOTH, expand=tk.YES)
def embed_browser(self):
window_info = cef.WindowInfo()
rect = [0, 0, self.winfo_width(), self.winfo_height()]
window_info.SetAsChild(self.get_window_handle(), rect)
self.browser = cef.CreateBrowserSync(window_info,
url=url) #todo
assert self.browser
self.browser.SetClientHandler(LoadHandler(self))
self.browser.SetClientHandler(FocusHandler(self))
self.message_loop_work()
def get_window_handle(self):
if self.winfo_id() > 0:
return self.winfo_id()
else:
raise Exception("Couldn't obtain window handle")
def message_loop_work(self):
cef.MessageLoopWork()
self.after(10, self.message_loop_work)
def on_configure(self, event):
width = event.width
height = event.height
if self.browser:
if WINDOWS:
ctypes.windll.user32.SetWindowPos(
self.browser.GetWindowHandle(), 0,
0, 0, width, height, 0x0002)
elif LINUX:
self.browser.SetBounds(0, 0, width, height)
self.browser.NotifyMoveOrResizeStarted()
if not self.browser:
self.embed_browser()
def on_focus_in(self, _):
logger.debug("BrowserFrame.on_focus_in")
if self.browser:
self.browser.SetFocus(True)
self.focus_set()
def on_focus_out(self, _):
logger.debug("BrowserFrame.on_focus_out")
if self.browser:
self.browser.SetFocus(False)
def on_close(self):
if self.browser:
self.browser.CloseBrowser(True)
self.clear_browser_references()
self.destroy()
self.master.destroy()
def get_browser(self):
if self.browser:
return self.browser
return None
def clear_browser_references(self):
self.browser = None
class LoadHandler(object):
def __init__(self, browser_frame):
self.browser_frame = browser_frame
class FocusHandler(object):
def __init__(self, browser):
self.browser = browser
def OnTakeFocus(self, next_component, **_):
logger.debug("FocusHandler.OnTakeFocus, next={next}"
.format(next=next_component))
def OnSetFocus(self, source, **_):
logger.debug("FocusHandler.OnSetFocus, source={source}"
.format(source=source))
return False
def OnGotFocus(self, **_):
"""Fix CEF focus issues (#255). Call browser frame's focus_set
to get rid of type cursor in url entry widget."""
logger.debug("FocusHandler.OnGotFocus")
self.browser.focus_set()
# if __name__ == '__main__':
logger.setLevel(_logging.INFO)
stream_handler = _logging.StreamHandler()
formatter = _logging.Formatter("[%(filename)s] %(message)s")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("CEF Python {ver}".format(ver=cef.__version__))
logger.info("Python {ver} {arch}".format(
ver=platform.python_version(), arch=platform.architecture()[0]))
logger.info("Tk {ver}".format(ver=tk.Tcl().eval('info patchlevel')))
assert cef.__version__ >= "55.3", "CEF Python v55.3+ required to run this"
sys.excepthook = cef.ExceptHook # To shutdown all CEF processes on error
root = tk.Tk()
app = MainFrame(root)
def on_closing():
if messagebox.askokcancel("Quit", "Do you want to quit?"):
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
# Tk must be initialized before CEF otherwise fatal error (Issue #306)
cef.Initialize()
root.mainloop()
# app.mainloop()
cef.Shutdown()
|
# All credit to https://stackoverflow.com/questions/46571448/tkinter-and-a-html-file - thanks DELICA - https://stackoverflow.com/users/7027346/delica
from cefpython3 import cefpython as cef
import ctypes
try:
import tkinter as tk
from tkinter import messagebox
except ImportError:
import Tkinter as tk
import sys
import platform
import logging as _logging
# Fix for PyCharm hints warnings
WindowUtils = cef.WindowUtils()
# Platforms
WINDOWS = (platform.system() == "Windows")
LINUX = (platform.system() == "Linux")
MAC = (platform.system() == "Darwin")
# Globals
logger = _logging.getLogger("tkinter_.py")
url = "localhost:8050/"
class MainFrame(tk.Frame):
def __init__(self, root):
self.closing = False
self.browser = None
# Root
root.geometry("900x640")
tk.Grid.rowconfigure(root, 0, weight=1)
tk.Grid.columnconfigure(root, 0, weight=1)
# MainFrame
tk.Frame.__init__(self, root)
self.master.title('SimBA Dashboard')
self.master.protocol("WM_DELETE_WINDOW", self.on_close)
self.bind("<Configure>", self.on_configure)
self.bind("<FocusIn>", self.on_focus_in)
self.bind("<FocusOut>", self.on_focus_out)
self.focus_set()
# Pack MainFrame
self.pack(fill=tk.BOTH, expand=tk.YES)
def embed_browser(self):
window_info = cef.WindowInfo()
rect = [0, 0, self.winfo_width(), self.winfo_height()]
window_info.SetAsChild(self.get_window_handle(), rect)
self.browser = cef.CreateBrowserSync(window_info,
url=url) #todo
assert self.browser
self.browser.SetClientHandler(LoadHandler(self))
self.browser.SetClientHandler(FocusHandler(self))
self.message_loop_work()
def get_window_handle(self):
if self.winfo_id() > 0:
return self.winfo_id()
else:
raise Exception("Couldn't obtain window handle")
def message_loop_work(self):
cef.MessageLoopWork()
self.after(10, self.message_loop_work)
def on_configure(self, event):
width = event.width
height = event.height
if self.browser:
if WINDOWS:
ctypes.windll.user32.SetWindowPos(
self.browser.GetWindowHandle(), 0,
0, 0, width, height, 0x0002)
elif LINUX:
self.browser.SetBounds(0, 0, width, height)
self.browser.NotifyMoveOrResizeStarted()
if not self.browser:
self.embed_browser()
def on_focus_in(self, _):
logger.debug("BrowserFrame.on_focus_in")
if self.browser:
self.browser.SetFocus(True)
self.focus_set()
def on_focus_out(self, _):
logger.debug("BrowserFrame.on_focus_out")
if self.browser:
self.browser.SetFocus(False)
def on_close(self):
if self.browser:
self.browser.CloseBrowser(True)
self.clear_browser_references()
self.destroy()
self.master.destroy()
def get_browser(self):
if self.browser:
return self.browser
return None
def clear_browser_references(self):
self.browser = None
class LoadHandler(object):
def __init__(self, browser_frame):
self.browser_frame = browser_frame
class FocusHandler(object):
def __init__(self, browser):
self.browser = browser
def OnTakeFocus(self, next_component, **_):
logger.debug("FocusHandler.OnTakeFocus, next={next}"
.format(next=next_component))
def OnSetFocus(self, source, **_):
logger.debug("FocusHandler.OnSetFocus, source={source}"
.format(source=source))
return False
def OnGotFocus(self, **_):
"""Fix CEF focus issues (#255). Call browser frame's focus_set
to get rid of type cursor in url entry widget."""
logger.debug("FocusHandler.OnGotFocus")
self.browser.focus_set()
# if __name__ == '__main__':
logger.setLevel(_logging.INFO)
stream_handler = _logging.StreamHandler()
formatter = _logging.Formatter("[%(filename)s] %(message)s")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("CEF Python {ver}".format(ver=cef.__version__))
logger.info("Python {ver} {arch}".format(
ver=platform.python_version(), arch=platform.architecture()[0]))
logger.info("Tk {ver}".format(ver=tk.Tcl().eval('info patchlevel')))
assert cef.__version__ >= "55.3", "CEF Python v55.3+ required to run this"
sys.excepthook = cef.ExceptHook # To shutdown all CEF processes on error
root = tk.Tk()
app = MainFrame(root)
def on_closing():
if messagebox.askokcancel("Quit", "Do you want to quit?"):
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
# Tk must be initialized before CEF otherwise fatal error (Issue #306)
cef.Initialize()
root.mainloop()
# app.mainloop()
cef.Shutdown()
|
en
| 0.602515
|
# All credit to https://stackoverflow.com/questions/46571448/tkinter-and-a-html-file - thanks DELICA - https://stackoverflow.com/users/7027346/delica # Fix for PyCharm hints warnings # Platforms # Globals # Root # MainFrame # Pack MainFrame #todo Fix CEF focus issues (#255). Call browser frame's focus_set to get rid of type cursor in url entry widget. # if __name__ == '__main__': # To shutdown all CEF processes on error # Tk must be initialized before CEF otherwise fatal error (Issue #306) # app.mainloop()
| 2.766438
| 3
|
domain_data/mujoco_worlds/make_xml.py
|
sfpd/rlreloaded
| 0
|
6365
|
<gh_stars>0
import re
def do_substitution(in_lines):
lines_iter = iter(in_lines)
defn_lines = []
while True:
try:
line = lines_iter.next()
except StopIteration:
raise RuntimeError("didn't find line starting with ---")
if line.startswith('---'):
break
else:
defn_lines.append(line)
d = {}
exec("\n".join(defn_lines), d)
pat = re.compile("\$\((.+?)\)")
out_lines = []
for line in lines_iter:
matches = pat.finditer(line)
for m in matches:
line = line.replace(m.group(0), str(eval(m.group(1),d)))
out_lines.append(line)
return out_lines
from glob import glob
import os.path as osp
infiles = glob(osp.join(osp.dirname(__file__),"*.xml.in"))
for fname in infiles:
with open(fname,"r") as fh:
in_lines = fh.readlines()
out_lines = do_substitution(in_lines)
outfname = fname[:-3]
with open(outfname,"w") as fh:
fh.writelines(out_lines)
|
import re
def do_substitution(in_lines):
lines_iter = iter(in_lines)
defn_lines = []
while True:
try:
line = lines_iter.next()
except StopIteration:
raise RuntimeError("didn't find line starting with ---")
if line.startswith('---'):
break
else:
defn_lines.append(line)
d = {}
exec("\n".join(defn_lines), d)
pat = re.compile("\$\((.+?)\)")
out_lines = []
for line in lines_iter:
matches = pat.finditer(line)
for m in matches:
line = line.replace(m.group(0), str(eval(m.group(1),d)))
out_lines.append(line)
return out_lines
from glob import glob
import os.path as osp
infiles = glob(osp.join(osp.dirname(__file__),"*.xml.in"))
for fname in infiles:
with open(fname,"r") as fh:
in_lines = fh.readlines()
out_lines = do_substitution(in_lines)
outfname = fname[:-3]
with open(outfname,"w") as fh:
fh.writelines(out_lines)
|
none
| 1
| 2.989433
| 3
|
|
myproject/apps/events/migrations/0002_alter_eventhero_options.py
|
cahyareza/django_admin_cookbook
| 0
|
6366
|
<reponame>cahyareza/django_admin_cookbook
# Generated by Django 3.2.12 on 2022-03-28 11:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='eventhero',
options={'verbose_name_plural': 'Event heroes'},
),
]
|
# Generated by Django 3.2.12 on 2022-03-28 11:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='eventhero',
options={'verbose_name_plural': 'Event heroes'},
),
]
|
en
| 0.844845
|
# Generated by Django 3.2.12 on 2022-03-28 11:57
| 1.519964
| 2
|
hilton_sign_in.py
|
bmintz/python-snippets
| 2
|
6367
|
#!/usr/bin/env python3
# encoding: utf-8
import sys
import urllib.parse
import selenium.webdriver
def exit():
driver.quit()
sys.exit(0)
driver = selenium.webdriver.Firefox()
# for some reason, detectportal.firefox.com and connectivitycheck.gstatic.com are not blocked
# therefore, they cannot be used to detect connectivity
# we instead visit another site that is known not to ever have TLS
driver.get('http://neverssl.com')
if 'neverssl.com' in urllib.parse.urlparse(driver.current_url).netloc:
exit()
driver.find_element_by_css_selector('label[for="promo_button"]').click()
driver.find_element_by_css_selector('input[alt="Next"]').click()
driver.find_element_by_css_selector('#PromotionCode').send_keys('lobby18')
driver.find_element_by_css_selector('input[alt="Connect"]').click()
exit()
|
#!/usr/bin/env python3
# encoding: utf-8
import sys
import urllib.parse
import selenium.webdriver
def exit():
driver.quit()
sys.exit(0)
driver = selenium.webdriver.Firefox()
# for some reason, detectportal.firefox.com and connectivitycheck.gstatic.com are not blocked
# therefore, they cannot be used to detect connectivity
# we instead visit another site that is known not to ever have TLS
driver.get('http://neverssl.com')
if 'neverssl.com' in urllib.parse.urlparse(driver.current_url).netloc:
exit()
driver.find_element_by_css_selector('label[for="promo_button"]').click()
driver.find_element_by_css_selector('input[alt="Next"]').click()
driver.find_element_by_css_selector('#PromotionCode').send_keys('lobby18')
driver.find_element_by_css_selector('input[alt="Connect"]').click()
exit()
|
en
| 0.902788
|
#!/usr/bin/env python3 # encoding: utf-8 # for some reason, detectportal.firefox.com and connectivitycheck.gstatic.com are not blocked # therefore, they cannot be used to detect connectivity # we instead visit another site that is known not to ever have TLS
| 2.688613
| 3
|
src/figures/trends/leaf_response.py
|
rhyswhitley/savanna_iav
| 0
|
6368
|
<reponame>rhyswhitley/savanna_iav
#!/usr/bin/env python
import os
from collections import OrderedDict
import cPickle as pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.cm import get_cmap
from matplotlib import style
from scipy import stats
from scipy import integrate
def plot_monthly_response(norm, pert):
plot_grid = gridspec.GridSpec(4, 1, hspace=0.1)
ax1 = plt.subplot(plot_grid[0])
ax2 = plt.subplot(plot_grid[1])
ax3 = plt.subplot(plot_grid[2])
ax4 = plt.subplot(plot_grid[3])
# Stomatal conductance
ax1.plot(norm["Gtree"].values)
ax1.plot(pert["Gtree"].values)
# Leaf transpiration
ax2.plot(norm["Etree"].values)
ax2.plot(pert["Etree"].values)
# Leaf assimilation
ax3.plot(norm["Atree"].values)
ax3.plot(pert["Atree"].values)
ax4.plot(norm["LAItree"].values)
ax4.plot(pert["LAItree"].values)
ax4.plot(norm["LAIgrass"].values)
ax4.plot(pert["LAIgrass"].values)
plt.show()
return 1
def main():
data_dict = pickle.load(open(PKLPATH, 'rb'))
year_agg = lambda x: x.groupby(level=['month', 'hour']).mean()
data_mean_year = [year_agg(df) \
for df in OrderedDict(data_dict).values()]
# **FOR LOOP WILL GO HERE
plot_monthly_response(data_mean_year[3], data_mean_year[6])
return 1
if __name__ == "__main__":
FILEPATH = "~/Savanna/Data/HowardSprings_IAV/pickled/agg/mean_monthly_leaf.pkl"
PKLPATH = os.path.expanduser(FILEPATH)
main()
|
#!/usr/bin/env python
import os
from collections import OrderedDict
import cPickle as pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.cm import get_cmap
from matplotlib import style
from scipy import stats
from scipy import integrate
def plot_monthly_response(norm, pert):
plot_grid = gridspec.GridSpec(4, 1, hspace=0.1)
ax1 = plt.subplot(plot_grid[0])
ax2 = plt.subplot(plot_grid[1])
ax3 = plt.subplot(plot_grid[2])
ax4 = plt.subplot(plot_grid[3])
# Stomatal conductance
ax1.plot(norm["Gtree"].values)
ax1.plot(pert["Gtree"].values)
# Leaf transpiration
ax2.plot(norm["Etree"].values)
ax2.plot(pert["Etree"].values)
# Leaf assimilation
ax3.plot(norm["Atree"].values)
ax3.plot(pert["Atree"].values)
ax4.plot(norm["LAItree"].values)
ax4.plot(pert["LAItree"].values)
ax4.plot(norm["LAIgrass"].values)
ax4.plot(pert["LAIgrass"].values)
plt.show()
return 1
def main():
data_dict = pickle.load(open(PKLPATH, 'rb'))
year_agg = lambda x: x.groupby(level=['month', 'hour']).mean()
data_mean_year = [year_agg(df) \
for df in OrderedDict(data_dict).values()]
# **FOR LOOP WILL GO HERE
plot_monthly_response(data_mean_year[3], data_mean_year[6])
return 1
if __name__ == "__main__":
FILEPATH = "~/Savanna/Data/HowardSprings_IAV/pickled/agg/mean_monthly_leaf.pkl"
PKLPATH = os.path.expanduser(FILEPATH)
main()
|
en
| 0.562122
|
#!/usr/bin/env python # Stomatal conductance # Leaf transpiration # Leaf assimilation # **FOR LOOP WILL GO HERE
| 2.527997
| 3
|
app/index.py
|
vprnet/school-closings
| 0
|
6369
|
<reponame>vprnet/school-closings
#!/usr/local/bin/python2.7
from flask import Flask
import sys
from flask_frozen import Freezer
from upload_s3 import set_metadata
from config import AWS_DIRECTORY
app = Flask(__name__)
app.config.from_object('config')
from views import *
# Serving from s3 leads to some complications in how static files are served
if len(sys.argv) > 1:
if sys.argv[1] == 'build':
PROJECT_ROOT = '/' + AWS_DIRECTORY
elif sys.argv[1] == 'test':
PROJECT_ROOT = '/www.vpr.net/' + AWS_DIRECTORY
else:
PROJECT_ROOT = '/'
class WebFactionMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
environ['SCRIPT_NAME'] = PROJECT_ROOT
return self.app(environ, start_response)
app.wsgi_app = WebFactionMiddleware(app.wsgi_app)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'build':
app.debug = True
freezer = Freezer(app)
freezer.freeze()
set_metadata()
else:
app.run(debug=True)
|
#!/usr/local/bin/python2.7
from flask import Flask
import sys
from flask_frozen import Freezer
from upload_s3 import set_metadata
from config import AWS_DIRECTORY
app = Flask(__name__)
app.config.from_object('config')
from views import *
# Serving from s3 leads to some complications in how static files are served
if len(sys.argv) > 1:
if sys.argv[1] == 'build':
PROJECT_ROOT = '/' + AWS_DIRECTORY
elif sys.argv[1] == 'test':
PROJECT_ROOT = '/www.vpr.net/' + AWS_DIRECTORY
else:
PROJECT_ROOT = '/'
class WebFactionMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
environ['SCRIPT_NAME'] = PROJECT_ROOT
return self.app(environ, start_response)
app.wsgi_app = WebFactionMiddleware(app.wsgi_app)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'build':
app.debug = True
freezer = Freezer(app)
freezer.freeze()
set_metadata()
else:
app.run(debug=True)
|
en
| 0.94543
|
#!/usr/local/bin/python2.7 # Serving from s3 leads to some complications in how static files are served
| 2.203336
| 2
|
proxyclient/linux.py
|
modwizcode/m1n1
| 1
|
6370
|
<filename>proxyclient/linux.py
#!/usr/bin/python
from setup import *
payload = open(sys.argv[1], "rb").read()
dtb = open(sys.argv[2], "rb").read()
if len(sys.argv) > 3:
initramfs = open(sys.argv[3], "rb").read()
initramfs_size = len(initramfs)
else:
initramfs = None
initramfs_size = 0
compressed_size = len(payload)
compressed_addr = u.malloc(compressed_size)
dtb_addr = u.malloc(len(dtb))
print("Loading %d bytes to 0x%x..0x%x..." % (compressed_size, compressed_addr, compressed_addr + compressed_size))
iface.writemem(compressed_addr, payload, True)
print("Loading DTB to 0x%x..." % dtb_addr)
iface.writemem(dtb_addr, dtb)
kernel_size = 32 * 1024 * 1024
kernel_base = u.memalign(2 * 1024 * 1024, kernel_size)
print("Kernel_base: 0x%x" % kernel_base)
assert not (kernel_base & 0xffff)
if initramfs is not None:
initramfs_base = u.memalign(65536, initramfs_size)
print("Loading %d initramfs bytes to 0x%x..." % (initramfs_size, initramfs_base))
iface.writemem(initramfs_base, initramfs, True)
p.kboot_set_initrd(initramfs_base, initramfs_size)
if p.kboot_prepare_dt(dtb_addr):
print("DT prepare failed")
sys.exit(1)
#kernel_size = p.xzdec(compressed_addr, compressed_size)
#if kernel_size < 0:
#raise Exception("Decompression header check error!",)
#print("Uncompressed kernel size: %d bytes" % kernel_size)
print("Uncompressing...")
iface.dev.timeout = 40
kernel_size = p.gzdec(compressed_addr, compressed_size, kernel_base, kernel_size)
print(kernel_size)
if kernel_size < 0:
raise Exception("Decompression error!")
print("Decompress OK...")
p.dc_cvau(kernel_base, kernel_size)
p.ic_ivau(kernel_base, kernel_size)
print("Ready to boot")
daif = u.mrs(DAIF)
daif |= 0x3c0
u.msr(DAIF, daif)
print("DAIF: %x" % daif)
p.kboot_boot(kernel_base)
iface.ttymode()
|
<filename>proxyclient/linux.py
#!/usr/bin/python
from setup import *
payload = open(sys.argv[1], "rb").read()
dtb = open(sys.argv[2], "rb").read()
if len(sys.argv) > 3:
initramfs = open(sys.argv[3], "rb").read()
initramfs_size = len(initramfs)
else:
initramfs = None
initramfs_size = 0
compressed_size = len(payload)
compressed_addr = u.malloc(compressed_size)
dtb_addr = u.malloc(len(dtb))
print("Loading %d bytes to 0x%x..0x%x..." % (compressed_size, compressed_addr, compressed_addr + compressed_size))
iface.writemem(compressed_addr, payload, True)
print("Loading DTB to 0x%x..." % dtb_addr)
iface.writemem(dtb_addr, dtb)
kernel_size = 32 * 1024 * 1024
kernel_base = u.memalign(2 * 1024 * 1024, kernel_size)
print("Kernel_base: 0x%x" % kernel_base)
assert not (kernel_base & 0xffff)
if initramfs is not None:
initramfs_base = u.memalign(65536, initramfs_size)
print("Loading %d initramfs bytes to 0x%x..." % (initramfs_size, initramfs_base))
iface.writemem(initramfs_base, initramfs, True)
p.kboot_set_initrd(initramfs_base, initramfs_size)
if p.kboot_prepare_dt(dtb_addr):
print("DT prepare failed")
sys.exit(1)
#kernel_size = p.xzdec(compressed_addr, compressed_size)
#if kernel_size < 0:
#raise Exception("Decompression header check error!",)
#print("Uncompressed kernel size: %d bytes" % kernel_size)
print("Uncompressing...")
iface.dev.timeout = 40
kernel_size = p.gzdec(compressed_addr, compressed_size, kernel_base, kernel_size)
print(kernel_size)
if kernel_size < 0:
raise Exception("Decompression error!")
print("Decompress OK...")
p.dc_cvau(kernel_base, kernel_size)
p.ic_ivau(kernel_base, kernel_size)
print("Ready to boot")
daif = u.mrs(DAIF)
daif |= 0x3c0
u.msr(DAIF, daif)
print("DAIF: %x" % daif)
p.kboot_boot(kernel_base)
iface.ttymode()
|
en
| 0.286525
|
#!/usr/bin/python #kernel_size = p.xzdec(compressed_addr, compressed_size) #if kernel_size < 0: #raise Exception("Decompression header check error!",) #print("Uncompressed kernel size: %d bytes" % kernel_size)
| 2.121521
| 2
|
src/server.py
|
shizhongpwn/ancypwn
| 1
|
6371
|
<reponame>shizhongpwn/ancypwn<filename>src/server.py
import json
import os
import multiprocessing
import struct
import importlib
from socketserver import TCPServer, StreamRequestHandler
def plugin_module_import(name):
try:
return importlib.import_module(name)
except ModuleNotFoundError as e:
prompt = 'plugin {} not found, please install it first.\n'.format(name)
prompt += 'try follwing:\n\tpip3 install {}'.format(name)
raise PluginNotFoundError(prompt)
class NotificationHandler(StreamRequestHandler):
def handle(self):
length = struct.unpack('<I', self.request.recv(4))[0]
json_content = self.request.recv(length)
content = json.loads(json_content)
terminal = content['terminal']
if content['exec'] != '':
command = 'ancypwn attach -c \'{}\''.format(content['exec'])
else:
command = 'ancypwn attach'
realname = 'ancypwn_terminal_{}'.format(terminal)
mod = plugin_module_import(realname)
mod.run(command)
class ServerProcess(multiprocessing.Process):
def __init__(self, port, *args, **kwargs):
super(ServerProcess, self).__init__(*args, **kwargs)
self.port = port
def run(self):
self.server = TCPServer(('', self.port), NotificationHandler)
self.server.serve_forever()
|
import json
import os
import multiprocessing
import struct
import importlib
from socketserver import TCPServer, StreamRequestHandler
def plugin_module_import(name):
try:
return importlib.import_module(name)
except ModuleNotFoundError as e:
prompt = 'plugin {} not found, please install it first.\n'.format(name)
prompt += 'try follwing:\n\tpip3 install {}'.format(name)
raise PluginNotFoundError(prompt)
class NotificationHandler(StreamRequestHandler):
def handle(self):
length = struct.unpack('<I', self.request.recv(4))[0]
json_content = self.request.recv(length)
content = json.loads(json_content)
terminal = content['terminal']
if content['exec'] != '':
command = 'ancypwn attach -c \'{}\''.format(content['exec'])
else:
command = 'ancypwn attach'
realname = 'ancypwn_terminal_{}'.format(terminal)
mod = plugin_module_import(realname)
mod.run(command)
class ServerProcess(multiprocessing.Process):
def __init__(self, port, *args, **kwargs):
super(ServerProcess, self).__init__(*args, **kwargs)
self.port = port
def run(self):
self.server = TCPServer(('', self.port), NotificationHandler)
self.server.serve_forever()
|
none
| 1
| 2.161721
| 2
|
|
pytorch_utils/collection_utils.py
|
c-hofer/pytorch_utils
| 0
|
6372
|
<reponame>c-hofer/pytorch_utils
def keychain_value_iter(d, key_chain=None, allowed_values=None):
key_chain = [] if key_chain is None else list(key_chain).copy()
if not isinstance(d, dict):
if allowed_values is not None:
assert isinstance(d, allowed_values), 'Value needs to be of type {}!'.format(
allowed_values)
yield key_chain, d
else:
for k, v in d.items():
yield from keychain_value_iter(
v,
key_chain + [k],
allowed_values=allowed_values)
|
def keychain_value_iter(d, key_chain=None, allowed_values=None):
key_chain = [] if key_chain is None else list(key_chain).copy()
if not isinstance(d, dict):
if allowed_values is not None:
assert isinstance(d, allowed_values), 'Value needs to be of type {}!'.format(
allowed_values)
yield key_chain, d
else:
for k, v in d.items():
yield from keychain_value_iter(
v,
key_chain + [k],
allowed_values=allowed_values)
|
none
| 1
| 2.835546
| 3
|
|
speech_to_text/views.py
|
zace3d/video_analysis
| 0
|
6373
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from . import helpers
# Create your views here.
@csrf_exempt
def convert_video(request, version):
# Get video
video = request.FILES['video']
# Transcribe video and extract audio
response = helpers.transcribe_file(video)
context = response
# return render(request, 'api/v1/result_successful.html', context)
return JsonResponse(context, safe=False)
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from . import helpers
# Create your views here.
@csrf_exempt
def convert_video(request, version):
# Get video
video = request.FILES['video']
# Transcribe video and extract audio
response = helpers.transcribe_file(video)
context = response
# return render(request, 'api/v1/result_successful.html', context)
return JsonResponse(context, safe=False)
|
en
| 0.684876
|
# Create your views here. # Get video # Transcribe video and extract audio # return render(request, 'api/v1/result_successful.html', context)
| 1.867302
| 2
|
security_monkey/watchers/vpc/vpn.py
|
boladmin/security_monkey
| 4,258
|
6374
|
<reponame>boladmin/security_monkey
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.vpc.vpn
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: <NAME> <<EMAIL>> @alex.cline
"""
from cloudaux.aws.ec2 import describe_vpn_connections
from security_monkey.cloudaux_watcher import CloudAuxWatcher
from security_monkey.watcher import ChangeItem
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
class VPN(CloudAuxWatcher):
index = 'vpn'
i_am_singular = 'VPN Connection'
i_am_plural = 'VPN Connections'
def __init__(self, *args, **kwargs):
super(VPN, self).__init__(*args, **kwargs)
self.honor_ephemerals = True
self.ephemeral_paths = [
'VgwTelemetry$*$LastStatusChange',
'VgwTelemetry$*$Status',
'VgwTelemetry$*$StatusMessage',
]
def get_name_from_list_output(self, item):
if item.get("Tags"):
for tag in item["Tags"]:
if tag["Key"] == "Name":
return "{} ({})".format(tag["Value"], item["VpnConnectionId"])
return item["VpnConnectionId"]
def list_method(self, **kwargs):
return describe_vpn_connections(**kwargs)
def get_method(self, item, **kwargs):
# Remove the CustomerGatewayConfiguration -- it's not necessary as all the details are present anyway:
item.pop("CustomerGatewayConfiguration", None)
# Set the ARN:
item["Arn"] = "arn:aws:ec2:{region}:{account}:vpn-connection/{id}".format(region=kwargs["region"],
account=kwargs["account_number"],
id=item["VpnConnectionId"])
# Cast the datetimes to something JSON serializable (ISO 8601 string):
for vgw in item.get("VgwTelemetry", []):
if vgw.get("LastStatusChange"):
vgw["LastStatusChange"] = vgw["LastStatusChange"].strftime(DATETIME_FORMAT)
return item
class VPNItem(ChangeItem):
def __init__(self, region=None, account=None, name=None, arn=None, config=None, source_watcher=None):
super(VPNItem, self).__init__(
index=VPN.index,
region=region,
account=account,
name=name,
arn=arn,
new_config=config if config else {},
source_watcher=source_watcher)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.vpc.vpn
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: <NAME> <<EMAIL>> @alex.cline
"""
from cloudaux.aws.ec2 import describe_vpn_connections
from security_monkey.cloudaux_watcher import CloudAuxWatcher
from security_monkey.watcher import ChangeItem
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
class VPN(CloudAuxWatcher):
index = 'vpn'
i_am_singular = 'VPN Connection'
i_am_plural = 'VPN Connections'
def __init__(self, *args, **kwargs):
super(VPN, self).__init__(*args, **kwargs)
self.honor_ephemerals = True
self.ephemeral_paths = [
'VgwTelemetry$*$LastStatusChange',
'VgwTelemetry$*$Status',
'VgwTelemetry$*$StatusMessage',
]
def get_name_from_list_output(self, item):
if item.get("Tags"):
for tag in item["Tags"]:
if tag["Key"] == "Name":
return "{} ({})".format(tag["Value"], item["VpnConnectionId"])
return item["VpnConnectionId"]
def list_method(self, **kwargs):
return describe_vpn_connections(**kwargs)
def get_method(self, item, **kwargs):
# Remove the CustomerGatewayConfiguration -- it's not necessary as all the details are present anyway:
item.pop("CustomerGatewayConfiguration", None)
# Set the ARN:
item["Arn"] = "arn:aws:ec2:{region}:{account}:vpn-connection/{id}".format(region=kwargs["region"],
account=kwargs["account_number"],
id=item["VpnConnectionId"])
# Cast the datetimes to something JSON serializable (ISO 8601 string):
for vgw in item.get("VgwTelemetry", []):
if vgw.get("LastStatusChange"):
vgw["LastStatusChange"] = vgw["LastStatusChange"].strftime(DATETIME_FORMAT)
return item
class VPNItem(ChangeItem):
def __init__(self, region=None, account=None, name=None, arn=None, config=None, source_watcher=None):
super(VPNItem, self).__init__(
index=VPN.index,
region=region,
account=account,
name=name,
arn=arn,
new_config=config if config else {},
source_watcher=source_watcher)
|
en
| 0.773259
|
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. .. module: security_monkey.watchers.vpc.vpn :platform: Unix .. version:: $$VERSION$$ .. moduleauthor:: <NAME> <<EMAIL>> @alex.cline # Remove the CustomerGatewayConfiguration -- it's not necessary as all the details are present anyway: # Set the ARN: # Cast the datetimes to something JSON serializable (ISO 8601 string):
| 2.125262
| 2
|
particle.py
|
coush001/Imperial-MSc-Group-Project-2
| 0
|
6375
|
from itertools import count
import numpy as np
class Particle(object):
"""Object containing all the properties for a single particle"""
_ids = count(0)
def __init__(self, main_data=None, x=np.zeros(2)):
self.id = next(self._ids)
self.main_data = main_data
self.x = np.array(x)
self.v = np.zeros(2)
self.a = np.zeros(2)
self.D = 0
self.rho = main_data.rho0
self.P = 0
self.m = main_data.dx ** 2 * main_data.rho0 # initial mass depends on the initial particle spacing
self.boundary = False # Particle by default is not on the boundary
# For predictor corrector
self.prev_x = np.array(x)
self.prev_v = np.zeros(2)
self.prev_rho = main_data.rho0
def calc_index(self):
"""Calculates the 2D integer index for the particle's location in the search grid"""
# Calculates the bucket coordinates
self.list_num = np.array((self.x - self.main_data.min_x) /
(2.0 * self.main_data.h), int)
def B(self):
return (self.main_data.rho0 * self.main_data.c0 ** 2) / self.main_data.gamma
def update_P(self):
"""
Equation of state
System is assumed slightly compressible
"""
rho0 = self.main_data.rho0
gamma = self.main_data.gamma
self.P = self.B() * ((self.rho / rho0)**gamma - 1)
def set_main_data(self, main_data):
self.main_data = main_data
def set_x(self, x):
self.x = x
self.calc_index()
def set_v(self, v):
self.v = v
def set_a(self, a):
self.a = a
def set_D(self, D):
self.D = D
def set_rho(self, rho):
self.rho = rho
self.update_P()
def m(self, m):
self.m = m
def list_attributes(self):
x_s = "position: " + str(self.x) + ", "
v_s = "velocity: " + str(self.v) + ", "
a_s = "acceleration: " + str(self.a) + ", "
D_s = "derivative of density: " + str(self.D) + ", "
rho_s = "density: " + str(self.rho) + ", "
m_s = "mass: " + str(self.m) + ", "
P_s = "pressure: " + str(self.P) + ", "
boundary_s = "is boundary: " + str(self.boundary)
return [x_s + v_s + a_s + D_s + rho_s + m_s + P_s + boundary_s]
|
from itertools import count
import numpy as np
class Particle(object):
"""Object containing all the properties for a single particle"""
_ids = count(0)
def __init__(self, main_data=None, x=np.zeros(2)):
self.id = next(self._ids)
self.main_data = main_data
self.x = np.array(x)
self.v = np.zeros(2)
self.a = np.zeros(2)
self.D = 0
self.rho = main_data.rho0
self.P = 0
self.m = main_data.dx ** 2 * main_data.rho0 # initial mass depends on the initial particle spacing
self.boundary = False # Particle by default is not on the boundary
# For predictor corrector
self.prev_x = np.array(x)
self.prev_v = np.zeros(2)
self.prev_rho = main_data.rho0
def calc_index(self):
"""Calculates the 2D integer index for the particle's location in the search grid"""
# Calculates the bucket coordinates
self.list_num = np.array((self.x - self.main_data.min_x) /
(2.0 * self.main_data.h), int)
def B(self):
return (self.main_data.rho0 * self.main_data.c0 ** 2) / self.main_data.gamma
def update_P(self):
"""
Equation of state
System is assumed slightly compressible
"""
rho0 = self.main_data.rho0
gamma = self.main_data.gamma
self.P = self.B() * ((self.rho / rho0)**gamma - 1)
def set_main_data(self, main_data):
self.main_data = main_data
def set_x(self, x):
self.x = x
self.calc_index()
def set_v(self, v):
self.v = v
def set_a(self, a):
self.a = a
def set_D(self, D):
self.D = D
def set_rho(self, rho):
self.rho = rho
self.update_P()
def m(self, m):
self.m = m
def list_attributes(self):
x_s = "position: " + str(self.x) + ", "
v_s = "velocity: " + str(self.v) + ", "
a_s = "acceleration: " + str(self.a) + ", "
D_s = "derivative of density: " + str(self.D) + ", "
rho_s = "density: " + str(self.rho) + ", "
m_s = "mass: " + str(self.m) + ", "
P_s = "pressure: " + str(self.P) + ", "
boundary_s = "is boundary: " + str(self.boundary)
return [x_s + v_s + a_s + D_s + rho_s + m_s + P_s + boundary_s]
|
en
| 0.762178
|
Object containing all the properties for a single particle # initial mass depends on the initial particle spacing # Particle by default is not on the boundary # For predictor corrector Calculates the 2D integer index for the particle's location in the search grid # Calculates the bucket coordinates Equation of state System is assumed slightly compressible
| 3.01091
| 3
|
app/main/form.py
|
hussein18149/PITCHBOARD
| 0
|
6376
|
<reponame>hussein18149/PITCHBOARD
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField
from wtforms.validators import Required
class UpdateProfile(FlaskForm):
about = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
class PitchForm(FlaskForm):
pitch = TextAreaField('Write a pitch')
submit = SubmitField('Submit')
class PitchComForm(FlaskForm):
pitchcom = TextAreaField('comment on your pitch ')
submit = SubmitField('Submit')
|
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField
from wtforms.validators import Required
class UpdateProfile(FlaskForm):
about = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
class PitchForm(FlaskForm):
pitch = TextAreaField('Write a pitch')
submit = SubmitField('Submit')
class PitchComForm(FlaskForm):
pitchcom = TextAreaField('comment on your pitch ')
submit = SubmitField('Submit')
|
none
| 1
| 2.662508
| 3
|
|
soar_instruments/sami/adclass.py
|
soar-telescope/dragons-soar
| 1
|
6377
|
<reponame>soar-telescope/dragons-soar
import re
import astrodata
from astrodata import (astro_data_tag, TagSet, astro_data_descriptor,
returns_list)
from astrodata.fits import FitsLoader, FitsProvider
from ..soar import AstroDataSOAR
class AstroDataSAMI(AstroDataSOAR):
__keyword_dict = dict(data_section='DATASEC', gain='GAIN')
@staticmethod
def _matches_data(source):
return source[0].header.get('INSTRUME', '').upper() in {'SAMI', 'SAM'}
@astrodata.astro_data_tag
def _tag_instrument(self):
# QUESTIONS:
# 1) is SAMI always used with the SAM AO?
# 2) is SAMI used only at one telescopes or multiple ones?
# ANSWER:
# 1) SAMI is always used withing SAM but not always with AO.
# 2) SAMI and SAM are only used at SOAR Telescope.
return astrodata.TagSet(['SAMI', 'SAM'])
@astrodata.astro_data_tag
def _tag_flat(self):
# Ideally, we would want 'IMAGE' to be set by the 'IMAGE' tag.
# But since OBSTYPE is being used for both, not clear how that
# can be done right now.
obstype = self.phu.get('OBSTYPE', '')
if 'FLAT' in obstype:
return astrodata.TagSet(['FLAT', 'CAL', 'IMAGE'])
@astrodata.astro_data_tag
def _tag_twilight(self):
if self.phu.get('OBSTYPE') == 'SFLAT':
return astrodata.TagSet(['TWILIGHT'])
@astrodata.astro_data_tag
def _tag_domeflat(self):
if self.phu.get('OBSTYPE') == 'DFLAT':
return astrodata.TagSet(['DOME'])
@astrodata.astro_data_tag
def _tag_acquisition(self):
# Ideally, we would want 'IMAGE' to be set by the 'IMAGE' tag.
# But since OBSTYPE is being used for both, not clear how that
# can be done right now.
filename = self.phu.get('FILENAME', '')
notes = self.phu.get('NOTES', '')
if re.search('acq.[0-9]+', filename) or re.search('/acq/i', notes):
return astrodata.TagSet(['ACQUISITION', 'IMAGE'])
@astrodata.astro_data_tag
def _tag_image(self):
# this one will need something like "if not FABRY keyword", I think.
if self.phu.get('OBSTYPE') == 'OBJECT':
return astrodata.TagSet(['IMAGE'])
@astrodata.astro_data_tag
def _tag_bias(self):
if self.phu.get('OBSTYPE') == 'ZERO':
return astrodata.TagSet(['BIAS', 'CAL'], blocks=['IMAGE', 'FABRY'])
@astrodata.astro_data_descriptor
def data_section(self, pretty=False):
"""
Returns the rectangular section that includes the pixels that would be
exposed to light. If pretty is False, a tuple of 0-based coordinates
is returned with format (x1, x2, y1, y2). If pretty is True, a keyword
value is returned without parsing as a string. In this format, the
coordinates are generally 1-based.
One tuple or string is return per extension/array, in a list. If the
method is called on a single slice, the section is returned as a tuple
or a string.
Parameters
----------
pretty : bool
If True, return the formatted string found in the header.
Returns
-------
tuple of integers or list of tuples
Location of the pixels exposed to light using Python slice values.
string or list of strings
Location of the pixels exposed to light using an IRAF section
format (1-based).
"""
return self._parse_section(self._keyword_for('data_section'), pretty)
@astrodata.astro_data_descriptor
def filter_name(self):
"""
Returns the name of the filter used according to the summary FILTERS
keyword.
Returns
-------
str
The name of the filter.
"""
return self.phu.get('FILTERS')
@astrodata.astro_data_descriptor
def gain(self):
"""
Gain of the amplifier
Returns
-------
float
The gain for each amplifier
"""
# Bruno: GAIN is set to "unavail" in the headers. Do you have
# the gain for each amp in some lookup table?
gain = []
for ad in self[1:]:
val = ad.hdr['gain']
if val != 'unavail':
gain.append(val)
else:
gain.append(None)
return gain
@classmethod
def load(cls, source):
def sami_parser(hdu):
m = re.match('im(\d)', hdu.header.get('EXTNAME', ''))
if m:
hdu.header['EXTNAME'] = ('SCI', 'Added by AstroData')
hdu.header['EXTVER'] = (int(m.group(1)), 'Added by AstroData')
return cls(FitsLoader(FitsProvider).load(source,
extname_parser=sami_parser))
|
import re
import astrodata
from astrodata import (astro_data_tag, TagSet, astro_data_descriptor,
returns_list)
from astrodata.fits import FitsLoader, FitsProvider
from ..soar import AstroDataSOAR
class AstroDataSAMI(AstroDataSOAR):
__keyword_dict = dict(data_section='DATASEC', gain='GAIN')
@staticmethod
def _matches_data(source):
return source[0].header.get('INSTRUME', '').upper() in {'SAMI', 'SAM'}
@astrodata.astro_data_tag
def _tag_instrument(self):
# QUESTIONS:
# 1) is SAMI always used with the SAM AO?
# 2) is SAMI used only at one telescopes or multiple ones?
# ANSWER:
# 1) SAMI is always used withing SAM but not always with AO.
# 2) SAMI and SAM are only used at SOAR Telescope.
return astrodata.TagSet(['SAMI', 'SAM'])
@astrodata.astro_data_tag
def _tag_flat(self):
# Ideally, we would want 'IMAGE' to be set by the 'IMAGE' tag.
# But since OBSTYPE is being used for both, not clear how that
# can be done right now.
obstype = self.phu.get('OBSTYPE', '')
if 'FLAT' in obstype:
return astrodata.TagSet(['FLAT', 'CAL', 'IMAGE'])
@astrodata.astro_data_tag
def _tag_twilight(self):
if self.phu.get('OBSTYPE') == 'SFLAT':
return astrodata.TagSet(['TWILIGHT'])
@astrodata.astro_data_tag
def _tag_domeflat(self):
if self.phu.get('OBSTYPE') == 'DFLAT':
return astrodata.TagSet(['DOME'])
@astrodata.astro_data_tag
def _tag_acquisition(self):
# Ideally, we would want 'IMAGE' to be set by the 'IMAGE' tag.
# But since OBSTYPE is being used for both, not clear how that
# can be done right now.
filename = self.phu.get('FILENAME', '')
notes = self.phu.get('NOTES', '')
if re.search('acq.[0-9]+', filename) or re.search('/acq/i', notes):
return astrodata.TagSet(['ACQUISITION', 'IMAGE'])
@astrodata.astro_data_tag
def _tag_image(self):
# this one will need something like "if not FABRY keyword", I think.
if self.phu.get('OBSTYPE') == 'OBJECT':
return astrodata.TagSet(['IMAGE'])
@astrodata.astro_data_tag
def _tag_bias(self):
if self.phu.get('OBSTYPE') == 'ZERO':
return astrodata.TagSet(['BIAS', 'CAL'], blocks=['IMAGE', 'FABRY'])
@astrodata.astro_data_descriptor
def data_section(self, pretty=False):
"""
Returns the rectangular section that includes the pixels that would be
exposed to light. If pretty is False, a tuple of 0-based coordinates
is returned with format (x1, x2, y1, y2). If pretty is True, a keyword
value is returned without parsing as a string. In this format, the
coordinates are generally 1-based.
One tuple or string is return per extension/array, in a list. If the
method is called on a single slice, the section is returned as a tuple
or a string.
Parameters
----------
pretty : bool
If True, return the formatted string found in the header.
Returns
-------
tuple of integers or list of tuples
Location of the pixels exposed to light using Python slice values.
string or list of strings
Location of the pixels exposed to light using an IRAF section
format (1-based).
"""
return self._parse_section(self._keyword_for('data_section'), pretty)
@astrodata.astro_data_descriptor
def filter_name(self):
"""
Returns the name of the filter used according to the summary FILTERS
keyword.
Returns
-------
str
The name of the filter.
"""
return self.phu.get('FILTERS')
@astrodata.astro_data_descriptor
def gain(self):
"""
Gain of the amplifier
Returns
-------
float
The gain for each amplifier
"""
# Bruno: GAIN is set to "unavail" in the headers. Do you have
# the gain for each amp in some lookup table?
gain = []
for ad in self[1:]:
val = ad.hdr['gain']
if val != 'unavail':
gain.append(val)
else:
gain.append(None)
return gain
@classmethod
def load(cls, source):
def sami_parser(hdu):
m = re.match('im(\d)', hdu.header.get('EXTNAME', ''))
if m:
hdu.header['EXTNAME'] = ('SCI', 'Added by AstroData')
hdu.header['EXTVER'] = (int(m.group(1)), 'Added by AstroData')
return cls(FitsLoader(FitsProvider).load(source,
extname_parser=sami_parser))
|
en
| 0.902502
|
# QUESTIONS: # 1) is SAMI always used with the SAM AO? # 2) is SAMI used only at one telescopes or multiple ones? # ANSWER: # 1) SAMI is always used withing SAM but not always with AO. # 2) SAMI and SAM are only used at SOAR Telescope. # Ideally, we would want 'IMAGE' to be set by the 'IMAGE' tag. # But since OBSTYPE is being used for both, not clear how that # can be done right now. # Ideally, we would want 'IMAGE' to be set by the 'IMAGE' tag. # But since OBSTYPE is being used for both, not clear how that # can be done right now. # this one will need something like "if not FABRY keyword", I think. Returns the rectangular section that includes the pixels that would be exposed to light. If pretty is False, a tuple of 0-based coordinates is returned with format (x1, x2, y1, y2). If pretty is True, a keyword value is returned without parsing as a string. In this format, the coordinates are generally 1-based. One tuple or string is return per extension/array, in a list. If the method is called on a single slice, the section is returned as a tuple or a string. Parameters ---------- pretty : bool If True, return the formatted string found in the header. Returns ------- tuple of integers or list of tuples Location of the pixels exposed to light using Python slice values. string or list of strings Location of the pixels exposed to light using an IRAF section format (1-based). Returns the name of the filter used according to the summary FILTERS keyword. Returns ------- str The name of the filter. Gain of the amplifier Returns ------- float The gain for each amplifier # Bruno: GAIN is set to "unavail" in the headers. Do you have # the gain for each amp in some lookup table?
| 2.6165
| 3
|
practice/src/design_pattern/TemplateMethod.py
|
t10471/python
| 0
|
6378
|
# -*- coding: utf-8 -*-
#単なる継承
class Base(object):
def __init__(self):
pass
def meth(self, int):
return self._meth(int)
def _meth(self, int):
return int
class Pow(Base):
def _meth(self, int):
return pow(int,int)
|
# -*- coding: utf-8 -*-
#単なる継承
class Base(object):
def __init__(self):
pass
def meth(self, int):
return self._meth(int)
def _meth(self, int):
return int
class Pow(Base):
def _meth(self, int):
return pow(int,int)
|
en
| 0.320867
|
# -*- coding: utf-8 -*- #単なる継承
| 3.391287
| 3
|
yoon/stage1_kernel.py
|
yoon28/realsr-noise-injection
| 17
|
6379
|
<reponame>yoon28/realsr-noise-injection
import os, sys
import numpy as np
import cv2
import random
import torch
from configs import Config
from kernelGAN import KernelGAN
from data import DataGenerator
from learner import Learner
import tqdm
DATA_LOC = "/mnt/data/NTIRE2020/realSR/track2" # "/mnt/data/NTIRE2020/realSR/track1"
DATA_X = "DPEDiphone-tr-x" # "Corrupted-tr-x"
DATA_Y = "DPEDiphone-tr-y" # "Corrupted-tr-y"
DATA_VAL = "DPEDiphone-va" # "Corrupted-va-x"
def config_kernelGAN(afile):
img_folder = os.path.dirname(afile)
img_file = os.path.basename(afile)
out_dir = "yoon/kernels/track2"
params = ["--input_image_path", afile,
"--output_dir_path", out_dir,
"--noise_scale", str(1.0),
"--X4"]
conf = Config().parse(params)
conf.input2 = None
return conf
def estimate_kernel(img_file):
conf = config_kernelGAN(img_file)
kgan = KernelGAN(conf)
learner = Learner()
data = DataGenerator(conf, kgan)
for iteration in tqdm.tqdm(range(conf.max_iters), ncols=70):
[g_in, d_in, _] = data.__getitem__(iteration)
kgan.train(g_in, d_in)
learner.update(iteration, kgan)
kgan.finish()
if __name__ == "__main__":
seed_num = 0
torch.manual_seed(seed_num)
torch.cuda.manual_seed(seed_num)
torch.cuda.manual_seed_all(seed_num)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed_num)
random.seed(seed_num)
# exit(0)
data = {"X":[os.path.join(DATA_LOC, DATA_X, f) for f in os.listdir(os.path.join(DATA_LOC, DATA_X)) if f[-4:] == ".png"],
"Y":[os.path.join(DATA_LOC, DATA_Y, f) for f in os.listdir(os.path.join(DATA_LOC, DATA_Y)) if f[-4:] == ".png"],
"val":[os.path.join(DATA_LOC, DATA_VAL, f) for f in os.listdir(os.path.join(DATA_LOC, DATA_VAL)) if f[-4:] == ".png"]}
Kernels = []
Noises = []
for f in data["X"]:
estimate_kernel(f)
print("fin.")
|
import os, sys
import numpy as np
import cv2
import random
import torch
from configs import Config
from kernelGAN import KernelGAN
from data import DataGenerator
from learner import Learner
import tqdm
DATA_LOC = "/mnt/data/NTIRE2020/realSR/track2" # "/mnt/data/NTIRE2020/realSR/track1"
DATA_X = "DPEDiphone-tr-x" # "Corrupted-tr-x"
DATA_Y = "DPEDiphone-tr-y" # "Corrupted-tr-y"
DATA_VAL = "DPEDiphone-va" # "Corrupted-va-x"
def config_kernelGAN(afile):
img_folder = os.path.dirname(afile)
img_file = os.path.basename(afile)
out_dir = "yoon/kernels/track2"
params = ["--input_image_path", afile,
"--output_dir_path", out_dir,
"--noise_scale", str(1.0),
"--X4"]
conf = Config().parse(params)
conf.input2 = None
return conf
def estimate_kernel(img_file):
conf = config_kernelGAN(img_file)
kgan = KernelGAN(conf)
learner = Learner()
data = DataGenerator(conf, kgan)
for iteration in tqdm.tqdm(range(conf.max_iters), ncols=70):
[g_in, d_in, _] = data.__getitem__(iteration)
kgan.train(g_in, d_in)
learner.update(iteration, kgan)
kgan.finish()
if __name__ == "__main__":
seed_num = 0
torch.manual_seed(seed_num)
torch.cuda.manual_seed(seed_num)
torch.cuda.manual_seed_all(seed_num)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed_num)
random.seed(seed_num)
# exit(0)
data = {"X":[os.path.join(DATA_LOC, DATA_X, f) for f in os.listdir(os.path.join(DATA_LOC, DATA_X)) if f[-4:] == ".png"],
"Y":[os.path.join(DATA_LOC, DATA_Y, f) for f in os.listdir(os.path.join(DATA_LOC, DATA_Y)) if f[-4:] == ".png"],
"val":[os.path.join(DATA_LOC, DATA_VAL, f) for f in os.listdir(os.path.join(DATA_LOC, DATA_VAL)) if f[-4:] == ".png"]}
Kernels = []
Noises = []
for f in data["X"]:
estimate_kernel(f)
print("fin.")
|
en
| 0.594442
|
# "/mnt/data/NTIRE2020/realSR/track1" # "Corrupted-tr-x" # "Corrupted-tr-y" # "Corrupted-va-x" # exit(0)
| 2.038964
| 2
|
test/rdfa/test_non_xhtml.py
|
RDFLib/PyRDFa
| 8
|
6380
|
from unittest import TestCase
from pyRdfa import pyRdfa
class NonXhtmlTest(TestCase):
"""
RDFa that is in not well-formed XHTML is passed through html5lib.
These tests make sure that this RDFa can be processed both from
a file, and from a URL.
"""
target1 = '<og:isbn>9780596516499</og:isbn>'
target2 = '<gr:typeOfGood rdf:resource="urn:x-domain:oreilly.com:product:9780596803391.EBOOK"/>'
def test_url(self):
g = pyRdfa().rdf_from_source('http://oreilly.com/catalog/9780596516499/')
self.assert_(self.target1.encode('utf-8') in g)
def test_file(self):
g = pyRdfa().rdf_from_source('test/rdfa/oreilly.html')
self.assert_(self.target2.encode('utf-8') in g)
|
from unittest import TestCase
from pyRdfa import pyRdfa
class NonXhtmlTest(TestCase):
"""
RDFa that is in not well-formed XHTML is passed through html5lib.
These tests make sure that this RDFa can be processed both from
a file, and from a URL.
"""
target1 = '<og:isbn>9780596516499</og:isbn>'
target2 = '<gr:typeOfGood rdf:resource="urn:x-domain:oreilly.com:product:9780596803391.EBOOK"/>'
def test_url(self):
g = pyRdfa().rdf_from_source('http://oreilly.com/catalog/9780596516499/')
self.assert_(self.target1.encode('utf-8') in g)
def test_file(self):
g = pyRdfa().rdf_from_source('test/rdfa/oreilly.html')
self.assert_(self.target2.encode('utf-8') in g)
|
en
| 0.954535
|
RDFa that is in not well-formed XHTML is passed through html5lib. These tests make sure that this RDFa can be processed both from a file, and from a URL.
| 3.05185
| 3
|
python/pyoai/setup.py
|
jr3cermak/robs-kitchensink
| 0
|
6381
|
<reponame>jr3cermak/robs-kitchensink
from setuptools import setup, find_packages
from os.path import join, dirname
setup(
name='pyoai',
version='2.4.6.b',
author='Infrae',
author_email='<EMAIL>',
url='https://github.com/jr3cermak/robs-kitchensink/tree/master/python/pyoai',
classifiers=["Development Status :: 4 - Beta",
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment"],
description="""\
The oaipmh module is a Python implementation of an "Open Archives
Initiative Protocol for Metadata Harvesting" (version 2) client and server.
The protocol is described here:
http://www.openarchives.org/OAI/openarchivesprotocol.html
""",
long_description=(open(join(dirname(__file__), 'README.rst')).read()+
'\n\n'+
open(join(dirname(__file__), 'HISTORY.txt')).read()),
packages=find_packages('src'),
package_dir = {'': 'src'},
zip_safe=False,
license='BSD',
keywords='OAI-PMH xml archive',
install_requires=['lxml'],
)
|
from setuptools import setup, find_packages
from os.path import join, dirname
setup(
name='pyoai',
version='2.4.6.b',
author='Infrae',
author_email='<EMAIL>',
url='https://github.com/jr3cermak/robs-kitchensink/tree/master/python/pyoai',
classifiers=["Development Status :: 4 - Beta",
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment"],
description="""\
The oaipmh module is a Python implementation of an "Open Archives
Initiative Protocol for Metadata Harvesting" (version 2) client and server.
The protocol is described here:
http://www.openarchives.org/OAI/openarchivesprotocol.html
""",
long_description=(open(join(dirname(__file__), 'README.rst')).read()+
'\n\n'+
open(join(dirname(__file__), 'HISTORY.txt')).read()),
packages=find_packages('src'),
package_dir = {'': 'src'},
zip_safe=False,
license='BSD',
keywords='OAI-PMH xml archive',
install_requires=['lxml'],
)
|
en
| 0.586431
|
\ The oaipmh module is a Python implementation of an "Open Archives Initiative Protocol for Metadata Harvesting" (version 2) client and server. The protocol is described here: http://www.openarchives.org/OAI/openarchivesprotocol.html
| 1.644535
| 2
|
utils/functions.py
|
Roozbeh-Bazargani/CPSC-533R-project
| 0
|
6382
|
<filename>utils/functions.py
import torch
from torch import nn
import math
#0 left hip
#1 left knee
#2 left foot
#3 right hip
#4 right knee
#5 right foot
#6 middle hip
#7 neck
#8 nose
#9 head
#10 left shoulder
#11 left elbow
#12 left wrist
#13 right shoulder
#14 right elbow
#15 right wrist
def random_rotation(J3d):
J = J3d # need copy????
batch_size = J.shape[0]
theta = torch.rand(batch_size).cuda() * 2*torch.tensor(math.pi).cuda() # random theta
root = J[:,:,8] # joint 8 = nose is root
J3d_R = rotation(J.cuda(), theta.cuda(), root.unsqueeze(-1).cuda(), False)
return J3d_R, theta, root # need these values in the code
def rotation(J, theta, root, is_reversed): # rotation over y axis by theta
D = root[:,2].cuda() # absolute depth of the root joint
batch_size = root.shape[0]
v_t = torch.zeros((batch_size, 3, 1)).cuda()
v_t[:, 2, :] = D.cuda() # translation vector
if is_reversed:
root, v_t = v_t, root # swap
theta = -theta
# R = torch.tensor([[torch.cos(theta), -torch.sin(theta), 0], [torch.sin(theta), torch.cos(theta), 0], [0, 0, 1]]) # rotation matrix over z by theta degrees
R = torch.zeros((batch_size, 3, 3)).cuda() # rotation matrix over y by theta degrees
R[:, 0, 0] = torch.cos(theta)
R[:, 0, 2] = torch.sin(theta)
R[:, 1, 1] = torch.ones(batch_size)
R[:, 2, 0] = -torch.sin(theta)
R[:, 2, 2] = torch.cos(theta)
# R = torch.tensor([[torch.cos(theta), 0, torch.sin(theta)], [0, 1, 0], [-torch.sin(theta), 0, torch.cos(theta)]]) # rotation matrix over y by theta degrees
# R = torch.tensor([[1, 0, 0], [0, torch.cos(theta), -torch.sin(theta)], [0, torch.sin(theta), torch.cos(theta)]]) # rotation matrix over x by theta degrees
J_R = torch.matmul(R, J - root) + v_t # rotation
return J_R
def reverse_rotation(J3d_R, theta, root):
J = J3d_R # need copy????
return rotation(J.cuda(), theta.cuda(), root.unsqueeze(-1).cuda(), True)
def temporal_loss(J, K, J_R, K_R): # J is J3d at time t and K is J3d at time t+k. J_R means the reversed rotation of J
#print(torch.norm(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), dim=1).shape)
#stop
mse_fn = nn.MSELoss()
return mse_fn(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), torch.zeros(J.shape[0], 3, 16).cuda())
#return torch.norm(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), dim=1)**2
'''
def temporal_loss(J, K, J_R, K_R): # J is J3d at time t and K is J3d at time t+k. J_R means the reversed rotation of J
return torch.norm(J - K - J_R + K_R, dim=1)**2
'''
'''
def random_rotation(J3d):
# J = torch.transpose(J3d, 1, 2)
J = J3d
root = torch.zeros(J.shape[0:2])
for i in range(J.shape[0]):
theta = torch.rand(1).cuda() * 2*torch.tensor(math.pi).cuda() # random theta
root[i] = J[i,:,8] # joint 8 = nose is root
temp = rotation(J[i,:,:], theta, root[i].unsqueeze(1), False)
# print(temp.shape)
J[i,:,:] = temp
return J, theta, root # need these values in the code
def rotation(J, theta, root, is_reversed): # rotation over y axis by theta
D = root[2] # absolute depth of the root joint
v_t = torch.tensor([[0], [0], [D]]).cuda() # translation vector
if is_reversed:
root, v_t = v_t, root # swap
theta = -theta
# R = torch.tensor([[torch.cos(theta), -torch.sin(theta), 0], [torch.sin(theta), torch.cos(theta), 0], [0, 0, 1]]) # rotation matrix over z by theta degrees
R = torch.tensor([[torch.cos(theta), 0, torch.sin(theta)], [0, 1, 0], [-torch.sin(theta), 0, torch.cos(theta)]]).cuda() # rotation matrix over y by theta degrees
# R = torch.tensor([[1, 0, 0], [0, torch.cos(theta), -torch.sin(theta)], [0, torch.sin(theta), torch.cos(theta)]]) # rotation matrix over x by theta degrees
J_R = torch.matmul(R, J.cuda() - root.cuda()) + v_t # rotation
return J_R
def reverse_rotation(J3d_R, theta, root):
# J = torch.transpose(J3d_R, 1, 2)
J = J3d_R
for i in range(J.shape[0]):
J[i,:,:] = rotation(J[i,:,:].cuda(), theta.cuda(), root[i].unsqueeze(1).cuda(), True)
return J
'''
|
<filename>utils/functions.py
import torch
from torch import nn
import math
#0 left hip
#1 left knee
#2 left foot
#3 right hip
#4 right knee
#5 right foot
#6 middle hip
#7 neck
#8 nose
#9 head
#10 left shoulder
#11 left elbow
#12 left wrist
#13 right shoulder
#14 right elbow
#15 right wrist
def random_rotation(J3d):
J = J3d # need copy????
batch_size = J.shape[0]
theta = torch.rand(batch_size).cuda() * 2*torch.tensor(math.pi).cuda() # random theta
root = J[:,:,8] # joint 8 = nose is root
J3d_R = rotation(J.cuda(), theta.cuda(), root.unsqueeze(-1).cuda(), False)
return J3d_R, theta, root # need these values in the code
def rotation(J, theta, root, is_reversed): # rotation over y axis by theta
D = root[:,2].cuda() # absolute depth of the root joint
batch_size = root.shape[0]
v_t = torch.zeros((batch_size, 3, 1)).cuda()
v_t[:, 2, :] = D.cuda() # translation vector
if is_reversed:
root, v_t = v_t, root # swap
theta = -theta
# R = torch.tensor([[torch.cos(theta), -torch.sin(theta), 0], [torch.sin(theta), torch.cos(theta), 0], [0, 0, 1]]) # rotation matrix over z by theta degrees
R = torch.zeros((batch_size, 3, 3)).cuda() # rotation matrix over y by theta degrees
R[:, 0, 0] = torch.cos(theta)
R[:, 0, 2] = torch.sin(theta)
R[:, 1, 1] = torch.ones(batch_size)
R[:, 2, 0] = -torch.sin(theta)
R[:, 2, 2] = torch.cos(theta)
# R = torch.tensor([[torch.cos(theta), 0, torch.sin(theta)], [0, 1, 0], [-torch.sin(theta), 0, torch.cos(theta)]]) # rotation matrix over y by theta degrees
# R = torch.tensor([[1, 0, 0], [0, torch.cos(theta), -torch.sin(theta)], [0, torch.sin(theta), torch.cos(theta)]]) # rotation matrix over x by theta degrees
J_R = torch.matmul(R, J - root) + v_t # rotation
return J_R
def reverse_rotation(J3d_R, theta, root):
J = J3d_R # need copy????
return rotation(J.cuda(), theta.cuda(), root.unsqueeze(-1).cuda(), True)
def temporal_loss(J, K, J_R, K_R): # J is J3d at time t and K is J3d at time t+k. J_R means the reversed rotation of J
#print(torch.norm(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), dim=1).shape)
#stop
mse_fn = nn.MSELoss()
return mse_fn(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), torch.zeros(J.shape[0], 3, 16).cuda())
#return torch.norm(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), dim=1)**2
'''
def temporal_loss(J, K, J_R, K_R): # J is J3d at time t and K is J3d at time t+k. J_R means the reversed rotation of J
return torch.norm(J - K - J_R + K_R, dim=1)**2
'''
'''
def random_rotation(J3d):
# J = torch.transpose(J3d, 1, 2)
J = J3d
root = torch.zeros(J.shape[0:2])
for i in range(J.shape[0]):
theta = torch.rand(1).cuda() * 2*torch.tensor(math.pi).cuda() # random theta
root[i] = J[i,:,8] # joint 8 = nose is root
temp = rotation(J[i,:,:], theta, root[i].unsqueeze(1), False)
# print(temp.shape)
J[i,:,:] = temp
return J, theta, root # need these values in the code
def rotation(J, theta, root, is_reversed): # rotation over y axis by theta
D = root[2] # absolute depth of the root joint
v_t = torch.tensor([[0], [0], [D]]).cuda() # translation vector
if is_reversed:
root, v_t = v_t, root # swap
theta = -theta
# R = torch.tensor([[torch.cos(theta), -torch.sin(theta), 0], [torch.sin(theta), torch.cos(theta), 0], [0, 0, 1]]) # rotation matrix over z by theta degrees
R = torch.tensor([[torch.cos(theta), 0, torch.sin(theta)], [0, 1, 0], [-torch.sin(theta), 0, torch.cos(theta)]]).cuda() # rotation matrix over y by theta degrees
# R = torch.tensor([[1, 0, 0], [0, torch.cos(theta), -torch.sin(theta)], [0, torch.sin(theta), torch.cos(theta)]]) # rotation matrix over x by theta degrees
J_R = torch.matmul(R, J.cuda() - root.cuda()) + v_t # rotation
return J_R
def reverse_rotation(J3d_R, theta, root):
# J = torch.transpose(J3d_R, 1, 2)
J = J3d_R
for i in range(J.shape[0]):
J[i,:,:] = rotation(J[i,:,:].cuda(), theta.cuda(), root[i].unsqueeze(1).cuda(), True)
return J
'''
|
en
| 0.623857
|
#0 left hip #1 left knee #2 left foot #3 right hip #4 right knee #5 right foot #6 middle hip #7 neck #8 nose #9 head #10 left shoulder #11 left elbow #12 left wrist #13 right shoulder #14 right elbow #15 right wrist # need copy???? # random theta # joint 8 = nose is root # need these values in the code # rotation over y axis by theta # absolute depth of the root joint # translation vector # swap # R = torch.tensor([[torch.cos(theta), -torch.sin(theta), 0], [torch.sin(theta), torch.cos(theta), 0], [0, 0, 1]]) # rotation matrix over z by theta degrees # rotation matrix over y by theta degrees # R = torch.tensor([[torch.cos(theta), 0, torch.sin(theta)], [0, 1, 0], [-torch.sin(theta), 0, torch.cos(theta)]]) # rotation matrix over y by theta degrees # R = torch.tensor([[1, 0, 0], [0, torch.cos(theta), -torch.sin(theta)], [0, torch.sin(theta), torch.cos(theta)]]) # rotation matrix over x by theta degrees # rotation # need copy???? # J is J3d at time t and K is J3d at time t+k. J_R means the reversed rotation of J #print(torch.norm(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), dim=1).shape) #stop #return torch.norm(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), dim=1)**2 def temporal_loss(J, K, J_R, K_R): # J is J3d at time t and K is J3d at time t+k. J_R means the reversed rotation of J return torch.norm(J - K - J_R + K_R, dim=1)**2 def random_rotation(J3d): # J = torch.transpose(J3d, 1, 2) J = J3d root = torch.zeros(J.shape[0:2]) for i in range(J.shape[0]): theta = torch.rand(1).cuda() * 2*torch.tensor(math.pi).cuda() # random theta root[i] = J[i,:,8] # joint 8 = nose is root temp = rotation(J[i,:,:], theta, root[i].unsqueeze(1), False) # print(temp.shape) J[i,:,:] = temp return J, theta, root # need these values in the code def rotation(J, theta, root, is_reversed): # rotation over y axis by theta D = root[2] # absolute depth of the root joint v_t = torch.tensor([[0], [0], [D]]).cuda() # translation vector if is_reversed: root, v_t = v_t, root # swap theta = -theta # R = torch.tensor([[torch.cos(theta), -torch.sin(theta), 0], [torch.sin(theta), torch.cos(theta), 0], [0, 0, 1]]) # rotation matrix over z by theta degrees R = torch.tensor([[torch.cos(theta), 0, torch.sin(theta)], [0, 1, 0], [-torch.sin(theta), 0, torch.cos(theta)]]).cuda() # rotation matrix over y by theta degrees # R = torch.tensor([[1, 0, 0], [0, torch.cos(theta), -torch.sin(theta)], [0, torch.sin(theta), torch.cos(theta)]]) # rotation matrix over x by theta degrees J_R = torch.matmul(R, J.cuda() - root.cuda()) + v_t # rotation return J_R def reverse_rotation(J3d_R, theta, root): # J = torch.transpose(J3d_R, 1, 2) J = J3d_R for i in range(J.shape[0]): J[i,:,:] = rotation(J[i,:,:].cuda(), theta.cuda(), root[i].unsqueeze(1).cuda(), True) return J
| 2.546563
| 3
|
Desafio Python/Aula 22 des109.py
|
ayresmajor/Curso-python
| 0
|
6383
|
from des109 import moeda
preco = float(input('Digite o preço pretendido: €'))
print(f'''A metade do preço é {(moeda.metade(preco))}
O dobro do preço é {(moeda.dobra(preco))}
Aumentando o preço 10% temos {(moeda.aumentar(preco, 10))}
Diminuindo o preço 13% temos {(moeda.aumentar(preco, 13))}''')
|
from des109 import moeda
preco = float(input('Digite o preço pretendido: €'))
print(f'''A metade do preço é {(moeda.metade(preco))}
O dobro do preço é {(moeda.dobra(preco))}
Aumentando o preço 10% temos {(moeda.aumentar(preco, 10))}
Diminuindo o preço 13% temos {(moeda.aumentar(preco, 13))}''')
|
pt
| 0.915795
|
A metade do preço é {(moeda.metade(preco))} O dobro do preço é {(moeda.dobra(preco))} Aumentando o preço 10% temos {(moeda.aumentar(preco, 10))} Diminuindo o preço 13% temos {(moeda.aumentar(preco, 13))}
| 3.432426
| 3
|
Chapter13_code/ch13_r05_using_the_rpc_api/xmlrpc.py
|
PacktPublishing/Odoo-Development-Cookbook
| 55
|
6384
|
#!/usr/bin/env python2
import xmlrpclib
db = 'odoo9'
user = 'admin'
password = '<PASSWORD>'
uid = xmlrpclib.ServerProxy('http://localhost:8069/xmlrpc/2/common')\
.authenticate(db, user, password, {})
odoo = xmlrpclib.ServerProxy('http://localhost:8069/xmlrpc/2/object')
installed_modules = odoo.execute_kw(
db, uid, password, 'ir.module.module', 'search_read',
[[('state', '=', 'installed')], ['name']], {})
for module in installed_modules:
print module['name']
|
#!/usr/bin/env python2
import xmlrpclib
db = 'odoo9'
user = 'admin'
password = '<PASSWORD>'
uid = xmlrpclib.ServerProxy('http://localhost:8069/xmlrpc/2/common')\
.authenticate(db, user, password, {})
odoo = xmlrpclib.ServerProxy('http://localhost:8069/xmlrpc/2/object')
installed_modules = odoo.execute_kw(
db, uid, password, 'ir.module.module', 'search_read',
[[('state', '=', 'installed')], ['name']], {})
for module in installed_modules:
print module['name']
|
ru
| 0.196695
|
#!/usr/bin/env python2
| 2.327409
| 2
|
python/zzz/v1-all_feat_cnn/components/features.py
|
emorynlp/character-identification-old
| 1
|
6385
|
<reponame>emorynlp/character-identification-old<gh_stars>1-10
from abc import *
import numpy as np
###########################################################
class AbstractFeatureExtractor(object):
@abstractmethod
def extract(self, object):
return
###########################################################
class EntityFeatureExtractor(AbstractFeatureExtractor):
def __init__(self, empty_embd_shape=None, empty_feat_shape=None):
self.e_EMPTY = np.zeros(empty_embd_shape) if empty_embd_shape else None
self.f_EMPTY = np.zeros(empty_feat_shape) if empty_feat_shape else None
def extract(self, entity, include_average=True, nb_mentions=5, selection_method='last'):
embedding, feature = ([], [])
if entity and include_average:
nb_mentions -= 1
embedding.append(entity.get_avg_mention_embedding())
feature.append(entity.get_avg_mention_feature())
nb_padding = max(0, nb_mentions - len(entity))
nb_mentions -= nb_padding
if selection_method is 'last':
mentions = entity[-nb_mentions:]
embedding += map(lambda m: m.embedding, mentions)
feature += map(lambda m: m.feature, mentions)
for i in xrange(nb_padding):
embedding.append(self.e_EMPTY)
feature.append(self.f_EMPTY)
return np.array(embedding), np.array(feature)
###########################################################
class MentionFeatureExtractor(AbstractFeatureExtractor):
def __init__(self, word2vec, word2gender, spks, poss, deps, ners, spk_dim=8, pos_dim=8, dep_dim=8, ner_dim=8):
self.word2vec = word2vec
self.word2vec_dim = len(word2vec.values()[0])
self.word2gender = word2gender
self.word2gender_dim = len(word2gender.values()[0])
self.spk_dim = spk_dim
self.spk2vec = dict()
for spk in spks:
self.spk2vec[spk] = np.random.rand(spk_dim)
self.pos_dim = pos_dim
self.pos2vec = dict()
for pos in poss:
self.pos2vec[pos] = np.random.rand(pos_dim)
self.dep_dim = dep_dim
self.dep2vec = dict()
for dep in deps:
self.dep2vec[dep] = np.random.rand(dep_dim)
self.ner_dim = ner_dim
self.ner2vec = dict()
for ner in ners:
self.ner2vec[ner] = np.random.rand(ner_dim)
def extract(self, mention):
head_token = self.get_head_token(mention)
first_token, last_token = mention.tokens[0], mention.tokens[-1]
utterance = first_token.parent_utterance()
scene = utterance.parent_scene()
episode = scene.parent_episode()
speaker = utterance.speaker
prev_utterance = utterance.previous_utterance()
prev_speaker = prev_utterance.speaker if prev_utterance is not None else None
flatten_utterance_tokens = self.flatten_utterance(utterance)
flatten_sentence_tokens = self.get_mention_sentence_tokens(utterance, mention)
ft_locations = self.get_token_locations(flatten_utterance_tokens, mention)
start_ftid, end_ftid = ft_locations[0], ft_locations[-1]
token_len = end_ftid - start_ftid
embeddings = list()
# Word embeddings of the head word
embeddings.append(self.get_token_word_vector(head_token))
# First word of the mention
embeddings.append(self.get_token_word_vector(first_token))
# Last word of the mention
embeddings.append(self.get_token_word_vector(last_token))
# Avg of all words in the mention
embeddings.append(self.get_tokens_word_vector(mention))
# Two preceding words of the mention
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-1, 1))
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-2, 1))
# Two following words of the mention
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, end_ftid+1, 1))
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, end_ftid+2, 1))
# Avg of the +-1 words
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-1, token_len+2))
# Avg of the +-2 words
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-2, token_len+4))
# Avg of the -5 words
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-1, -5))
# Avg of the +5 words
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, end_ftid+1, 5))
# Avg of all words in the mention's sentence
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_sentence_tokens, 0, len(flatten_sentence_tokens)))
# Avg of all words in current utterance
embeddings.append(self.get_utterance_vector(utterance))
# Avg of all words in previous utterance
embeddings.append(self.get_utterance_vector(prev_utterance))
# Avg of all words in the scene
embeddings.append(self.get_scene_vector(scene))
# Avg of all words in the episode
embeddings.append(self.get_episode_vector(episode))
features = list()
# Gender information of head token in the mention
features.append(self.get_token_gender_vector(head_token))
# Avg gender information of all tokens in the mention
features.append(self.get_tokens_gender_vector(mention))
# Current speaker information of the utterance
features.append(self.get_speaker_vector(speaker))
# Previous speaker information of the utterance
features.append(self.get_speaker_vector(prev_speaker))
# Pos tag information of head token
features.append(self.get_pos_tag_vector(head_token.pos_tag))
# Ner tag information of head token
features.append(self.get_ner_tag_vector(head_token.ner_tag))
# Dep label information of head token
features.append(self.get_dep_label_vector(head_token.dep_label))
# Dep label information of head token'parent
features.append(np.zeros(self.dep_dim) if head_token.dep_head is None
else self.get_dep_label_vector(head_token.dep_head.dep_label))
# Mention token length/location information within utterance
features.append(self.get_mention_location_information(flatten_utterance_tokens, start_ftid, end_ftid))
return np.array(embeddings), np.concatenate(features)
###### Helper functions #######
def get_head_token(self, mention):
tids = map(lambda t: t.id, mention.tokens)
for token in mention.tokens:
if token.dep_head is not None and token.dep_head.id not in tids:
return token
return mention.tokens[0]
def flatten_utterance(self, utterance):
return [st for statements in utterance.statements for st in statements]
def get_token_locations(self, flatten_tokens, mention):
locations = []
for idx, token in enumerate(flatten_tokens):
if token in mention.tokens:
locations.append(idx)
locations.sort()
return locations
def get_mention_sentence_tokens(self, utterance, mention):
token = mention.tokens[0]
for statement in utterance.statements:
if token in statement:
return statement
return None
###### Mention tokens features #######
def get_token_word_vector(self, token):
word_form = token.word_form.lower()
return self.word2vec[word_form] if word_form in self.word2vec else np.zeros(self.word2vec_dim)
def get_tokens_word_vector(self, mention):
tvector = np.zeros(self.word2vec_dim)
for token in mention.tokens:
tvector += self.get_token_word_vector(token)
return tvector / float(len(mention.tokens))
def get_tokens_word_vector_wOffset(self, flatten_tokens, start, offset):
tvector = np.zeros(self.word2vec_dim)
if offset > 0:
for tid in xrange(start, start+offset):
tvector += self.get_token_word_vector(flatten_tokens[tid]) \
if tid < len(flatten_tokens) else np.zeros(self.word2vec_dim)
else:
for tid in xrange(start, start-offset, -1):
tvector += self.get_token_word_vector(flatten_tokens[tid]) \
if tid <= 0 else np.zeros(self.word2vec_dim)
return tvector / float(offset)
def get_token_gender_vector(self, token):
word_form = token.word_form.lower()
return self.word2gender[word_form] if word_form in self.word2gender else np.zeros(self.word2gender_dim)
def get_tokens_gender_vector(self, mention):
gvector = np.zeros(self.word2gender_dim)
for token in mention.tokens:
gvector += self.get_token_gender_vector(token)
return gvector / float(len(mention.tokens))
def get_speaker_vector(self, speaker):
return self.spk2vec[speaker] if speaker in self.spk2vec else np.zeros(self.spk_dim)
def get_pos_tag_vector(self, tag):
return self.pos2vec[tag] if tag in self.pos2vec else np.zeros(self.pos_dim)
def get_ner_tag_vector(self, tag):
return self.ner2vec[tag] if tag in self.ner2vec else np.zeros(self.ner_dim)
def get_dep_label_vector(self, label):
return self.dep2vec[label] if label in self.dep2vec else np.zeros(self.dep_dim)
def get_mention_location_information(self, flatten_utternace_tokens, start_idx, end_index):
length = len(flatten_utternace_tokens)
# Normalized mention word length, start token location, end token location
return np.array([float(end_index-start_idx)/length, float(start_idx)/length, float(end_index)/length])
#### Transcript document features ####
def get_utterance_vector(self, utterance):
tcount = 0
uvector = np.zeros(self.word2vec_dim)
if utterance is not None:
for u in utterance.statements:
for t in u:
word = t.word_form.lower()
if word in self.word2vec:
uvector = uvector + self.word2vec[word]
tcount += len(u)
return uvector / float(tcount) if tcount > 0 else uvector
def get_scene_vector(self, scene):
svector = np.zeros(self.word2vec_dim)
for utterance in scene.utterances:
svector += self.get_utterance_vector(utterance)
return svector / float(len(scene.utterances)) if scene.utterances else svector
def get_episode_vector(self, episode):
evector = np.zeros(self.word2vec_dim)
for scene in episode.scenes:
evector += self.get_scene_vector(scene)
return evector / float(len(episode.scenes)) if episode.scenes else evector
|
from abc import *
import numpy as np
###########################################################
class AbstractFeatureExtractor(object):
@abstractmethod
def extract(self, object):
return
###########################################################
class EntityFeatureExtractor(AbstractFeatureExtractor):
def __init__(self, empty_embd_shape=None, empty_feat_shape=None):
self.e_EMPTY = np.zeros(empty_embd_shape) if empty_embd_shape else None
self.f_EMPTY = np.zeros(empty_feat_shape) if empty_feat_shape else None
def extract(self, entity, include_average=True, nb_mentions=5, selection_method='last'):
embedding, feature = ([], [])
if entity and include_average:
nb_mentions -= 1
embedding.append(entity.get_avg_mention_embedding())
feature.append(entity.get_avg_mention_feature())
nb_padding = max(0, nb_mentions - len(entity))
nb_mentions -= nb_padding
if selection_method is 'last':
mentions = entity[-nb_mentions:]
embedding += map(lambda m: m.embedding, mentions)
feature += map(lambda m: m.feature, mentions)
for i in xrange(nb_padding):
embedding.append(self.e_EMPTY)
feature.append(self.f_EMPTY)
return np.array(embedding), np.array(feature)
###########################################################
class MentionFeatureExtractor(AbstractFeatureExtractor):
def __init__(self, word2vec, word2gender, spks, poss, deps, ners, spk_dim=8, pos_dim=8, dep_dim=8, ner_dim=8):
self.word2vec = word2vec
self.word2vec_dim = len(word2vec.values()[0])
self.word2gender = word2gender
self.word2gender_dim = len(word2gender.values()[0])
self.spk_dim = spk_dim
self.spk2vec = dict()
for spk in spks:
self.spk2vec[spk] = np.random.rand(spk_dim)
self.pos_dim = pos_dim
self.pos2vec = dict()
for pos in poss:
self.pos2vec[pos] = np.random.rand(pos_dim)
self.dep_dim = dep_dim
self.dep2vec = dict()
for dep in deps:
self.dep2vec[dep] = np.random.rand(dep_dim)
self.ner_dim = ner_dim
self.ner2vec = dict()
for ner in ners:
self.ner2vec[ner] = np.random.rand(ner_dim)
def extract(self, mention):
head_token = self.get_head_token(mention)
first_token, last_token = mention.tokens[0], mention.tokens[-1]
utterance = first_token.parent_utterance()
scene = utterance.parent_scene()
episode = scene.parent_episode()
speaker = utterance.speaker
prev_utterance = utterance.previous_utterance()
prev_speaker = prev_utterance.speaker if prev_utterance is not None else None
flatten_utterance_tokens = self.flatten_utterance(utterance)
flatten_sentence_tokens = self.get_mention_sentence_tokens(utterance, mention)
ft_locations = self.get_token_locations(flatten_utterance_tokens, mention)
start_ftid, end_ftid = ft_locations[0], ft_locations[-1]
token_len = end_ftid - start_ftid
embeddings = list()
# Word embeddings of the head word
embeddings.append(self.get_token_word_vector(head_token))
# First word of the mention
embeddings.append(self.get_token_word_vector(first_token))
# Last word of the mention
embeddings.append(self.get_token_word_vector(last_token))
# Avg of all words in the mention
embeddings.append(self.get_tokens_word_vector(mention))
# Two preceding words of the mention
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-1, 1))
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-2, 1))
# Two following words of the mention
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, end_ftid+1, 1))
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, end_ftid+2, 1))
# Avg of the +-1 words
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-1, token_len+2))
# Avg of the +-2 words
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-2, token_len+4))
# Avg of the -5 words
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, start_ftid-1, -5))
# Avg of the +5 words
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_utterance_tokens, end_ftid+1, 5))
# Avg of all words in the mention's sentence
embeddings.append(self.get_tokens_word_vector_wOffset(flatten_sentence_tokens, 0, len(flatten_sentence_tokens)))
# Avg of all words in current utterance
embeddings.append(self.get_utterance_vector(utterance))
# Avg of all words in previous utterance
embeddings.append(self.get_utterance_vector(prev_utterance))
# Avg of all words in the scene
embeddings.append(self.get_scene_vector(scene))
# Avg of all words in the episode
embeddings.append(self.get_episode_vector(episode))
features = list()
# Gender information of head token in the mention
features.append(self.get_token_gender_vector(head_token))
# Avg gender information of all tokens in the mention
features.append(self.get_tokens_gender_vector(mention))
# Current speaker information of the utterance
features.append(self.get_speaker_vector(speaker))
# Previous speaker information of the utterance
features.append(self.get_speaker_vector(prev_speaker))
# Pos tag information of head token
features.append(self.get_pos_tag_vector(head_token.pos_tag))
# Ner tag information of head token
features.append(self.get_ner_tag_vector(head_token.ner_tag))
# Dep label information of head token
features.append(self.get_dep_label_vector(head_token.dep_label))
# Dep label information of head token'parent
features.append(np.zeros(self.dep_dim) if head_token.dep_head is None
else self.get_dep_label_vector(head_token.dep_head.dep_label))
# Mention token length/location information within utterance
features.append(self.get_mention_location_information(flatten_utterance_tokens, start_ftid, end_ftid))
return np.array(embeddings), np.concatenate(features)
###### Helper functions #######
def get_head_token(self, mention):
tids = map(lambda t: t.id, mention.tokens)
for token in mention.tokens:
if token.dep_head is not None and token.dep_head.id not in tids:
return token
return mention.tokens[0]
def flatten_utterance(self, utterance):
return [st for statements in utterance.statements for st in statements]
def get_token_locations(self, flatten_tokens, mention):
locations = []
for idx, token in enumerate(flatten_tokens):
if token in mention.tokens:
locations.append(idx)
locations.sort()
return locations
def get_mention_sentence_tokens(self, utterance, mention):
token = mention.tokens[0]
for statement in utterance.statements:
if token in statement:
return statement
return None
###### Mention tokens features #######
def get_token_word_vector(self, token):
word_form = token.word_form.lower()
return self.word2vec[word_form] if word_form in self.word2vec else np.zeros(self.word2vec_dim)
def get_tokens_word_vector(self, mention):
tvector = np.zeros(self.word2vec_dim)
for token in mention.tokens:
tvector += self.get_token_word_vector(token)
return tvector / float(len(mention.tokens))
def get_tokens_word_vector_wOffset(self, flatten_tokens, start, offset):
tvector = np.zeros(self.word2vec_dim)
if offset > 0:
for tid in xrange(start, start+offset):
tvector += self.get_token_word_vector(flatten_tokens[tid]) \
if tid < len(flatten_tokens) else np.zeros(self.word2vec_dim)
else:
for tid in xrange(start, start-offset, -1):
tvector += self.get_token_word_vector(flatten_tokens[tid]) \
if tid <= 0 else np.zeros(self.word2vec_dim)
return tvector / float(offset)
def get_token_gender_vector(self, token):
word_form = token.word_form.lower()
return self.word2gender[word_form] if word_form in self.word2gender else np.zeros(self.word2gender_dim)
def get_tokens_gender_vector(self, mention):
gvector = np.zeros(self.word2gender_dim)
for token in mention.tokens:
gvector += self.get_token_gender_vector(token)
return gvector / float(len(mention.tokens))
def get_speaker_vector(self, speaker):
return self.spk2vec[speaker] if speaker in self.spk2vec else np.zeros(self.spk_dim)
def get_pos_tag_vector(self, tag):
return self.pos2vec[tag] if tag in self.pos2vec else np.zeros(self.pos_dim)
def get_ner_tag_vector(self, tag):
return self.ner2vec[tag] if tag in self.ner2vec else np.zeros(self.ner_dim)
def get_dep_label_vector(self, label):
return self.dep2vec[label] if label in self.dep2vec else np.zeros(self.dep_dim)
def get_mention_location_information(self, flatten_utternace_tokens, start_idx, end_index):
length = len(flatten_utternace_tokens)
# Normalized mention word length, start token location, end token location
return np.array([float(end_index-start_idx)/length, float(start_idx)/length, float(end_index)/length])
#### Transcript document features ####
def get_utterance_vector(self, utterance):
tcount = 0
uvector = np.zeros(self.word2vec_dim)
if utterance is not None:
for u in utterance.statements:
for t in u:
word = t.word_form.lower()
if word in self.word2vec:
uvector = uvector + self.word2vec[word]
tcount += len(u)
return uvector / float(tcount) if tcount > 0 else uvector
def get_scene_vector(self, scene):
svector = np.zeros(self.word2vec_dim)
for utterance in scene.utterances:
svector += self.get_utterance_vector(utterance)
return svector / float(len(scene.utterances)) if scene.utterances else svector
def get_episode_vector(self, episode):
evector = np.zeros(self.word2vec_dim)
for scene in episode.scenes:
evector += self.get_scene_vector(scene)
return evector / float(len(episode.scenes)) if episode.scenes else evector
|
en
| 0.651274
|
########################################################### ########################################################### ########################################################### # Word embeddings of the head word # First word of the mention # Last word of the mention # Avg of all words in the mention # Two preceding words of the mention # Two following words of the mention # Avg of the +-1 words # Avg of the +-2 words # Avg of the -5 words # Avg of the +5 words # Avg of all words in the mention's sentence # Avg of all words in current utterance # Avg of all words in previous utterance # Avg of all words in the scene # Avg of all words in the episode # Gender information of head token in the mention # Avg gender information of all tokens in the mention # Current speaker information of the utterance # Previous speaker information of the utterance # Pos tag information of head token # Ner tag information of head token # Dep label information of head token # Dep label information of head token'parent # Mention token length/location information within utterance ###### Helper functions ####### ###### Mention tokens features ####### # Normalized mention word length, start token location, end token location #### Transcript document features ####
| 2.735624
| 3
|
ufdl-core-app/src/ufdl/core_app/models/mixins/_UserRestrictedQuerySet.py
|
waikato-ufdl/ufdl-backend
| 0
|
6386
|
from django.db import models
class UserRestrictedQuerySet(models.QuerySet):
"""
Query-set base class for models which apply per-instance permissions
based on the user accessing them.
"""
def for_user(self, user):
"""
Filters the query-set to those instances that the
given user is allowed to access.
:param user: The user.
:return: The filtered query-set.
"""
raise NotImplementedError(UserRestrictedQuerySet.for_user.__qualname__)
|
from django.db import models
class UserRestrictedQuerySet(models.QuerySet):
"""
Query-set base class for models which apply per-instance permissions
based on the user accessing them.
"""
def for_user(self, user):
"""
Filters the query-set to those instances that the
given user is allowed to access.
:param user: The user.
:return: The filtered query-set.
"""
raise NotImplementedError(UserRestrictedQuerySet.for_user.__qualname__)
|
en
| 0.913031
|
Query-set base class for models which apply per-instance permissions based on the user accessing them. Filters the query-set to those instances that the given user is allowed to access. :param user: The user. :return: The filtered query-set.
| 2.549893
| 3
|
sdk/python/pulumi_azure_native/eventgrid/partner_registration.py
|
sebtelko/pulumi-azure-native
| 0
|
6387
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = ['PartnerRegistrationArgs', 'PartnerRegistration']
@pulumi.input_type
class PartnerRegistrationArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
authorized_azure_subscription_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
customer_service_uri: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
long_description: Optional[pulumi.Input[str]] = None,
partner_customer_service_extension: Optional[pulumi.Input[str]] = None,
partner_customer_service_number: Optional[pulumi.Input[str]] = None,
partner_name: Optional[pulumi.Input[str]] = None,
partner_registration_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_description: Optional[pulumi.Input[str]] = None,
partner_resource_type_display_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_name: Optional[pulumi.Input[str]] = None,
setup_uri: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_state: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]] = None):
"""
The set of arguments for constructing a PartnerRegistration resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_azure_subscription_ids: List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
:param pulumi.Input[str] customer_service_uri: The extension of the customer service URI of the publisher.
:param pulumi.Input[str] location: Location of the resource.
:param pulumi.Input[str] logo_uri: URI of the logo.
:param pulumi.Input[str] long_description: Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
:param pulumi.Input[str] partner_customer_service_extension: The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
:param pulumi.Input[str] partner_customer_service_number: The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
:param pulumi.Input[str] partner_name: Official name of the partner name. For example: "Contoso".
:param pulumi.Input[str] partner_registration_name: Name of the partner registration.
:param pulumi.Input[str] partner_resource_type_description: Short description of the partner resource type. The length of this description should not exceed 256 characters.
:param pulumi.Input[str] partner_resource_type_display_name: Display name of the partner resource type.
:param pulumi.Input[str] partner_resource_type_name: Name of the partner resource type.
:param pulumi.Input[str] setup_uri: URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags of the resource.
:param pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']] visibility_state: Visibility state of the partner registration.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if authorized_azure_subscription_ids is not None:
pulumi.set(__self__, "authorized_azure_subscription_ids", authorized_azure_subscription_ids)
if customer_service_uri is not None:
pulumi.set(__self__, "customer_service_uri", customer_service_uri)
if location is not None:
pulumi.set(__self__, "location", location)
if logo_uri is not None:
pulumi.set(__self__, "logo_uri", logo_uri)
if long_description is not None:
pulumi.set(__self__, "long_description", long_description)
if partner_customer_service_extension is not None:
pulumi.set(__self__, "partner_customer_service_extension", partner_customer_service_extension)
if partner_customer_service_number is not None:
pulumi.set(__self__, "partner_customer_service_number", partner_customer_service_number)
if partner_name is not None:
pulumi.set(__self__, "partner_name", partner_name)
if partner_registration_name is not None:
pulumi.set(__self__, "partner_registration_name", partner_registration_name)
if partner_resource_type_description is not None:
pulumi.set(__self__, "partner_resource_type_description", partner_resource_type_description)
if partner_resource_type_display_name is not None:
pulumi.set(__self__, "partner_resource_type_display_name", partner_resource_type_display_name)
if partner_resource_type_name is not None:
pulumi.set(__self__, "partner_resource_type_name", partner_resource_type_name)
if setup_uri is not None:
pulumi.set(__self__, "setup_uri", setup_uri)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if visibility_state is not None:
pulumi.set(__self__, "visibility_state", visibility_state)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group within the user's subscription.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="authorizedAzureSubscriptionIds")
def authorized_azure_subscription_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
"""
return pulumi.get(self, "authorized_azure_subscription_ids")
@authorized_azure_subscription_ids.setter
def authorized_azure_subscription_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "authorized_azure_subscription_ids", value)
@property
@pulumi.getter(name="customerServiceUri")
def customer_service_uri(self) -> Optional[pulumi.Input[str]]:
"""
The extension of the customer service URI of the publisher.
"""
return pulumi.get(self, "customer_service_uri")
@customer_service_uri.setter
def customer_service_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_service_uri", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="logoUri")
def logo_uri(self) -> Optional[pulumi.Input[str]]:
"""
URI of the logo.
"""
return pulumi.get(self, "logo_uri")
@logo_uri.setter
def logo_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo_uri", value)
@property
@pulumi.getter(name="longDescription")
def long_description(self) -> Optional[pulumi.Input[str]]:
"""
Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
"""
return pulumi.get(self, "long_description")
@long_description.setter
def long_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "long_description", value)
@property
@pulumi.getter(name="partnerCustomerServiceExtension")
def partner_customer_service_extension(self) -> Optional[pulumi.Input[str]]:
"""
The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
"""
return pulumi.get(self, "partner_customer_service_extension")
@partner_customer_service_extension.setter
def partner_customer_service_extension(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_customer_service_extension", value)
@property
@pulumi.getter(name="partnerCustomerServiceNumber")
def partner_customer_service_number(self) -> Optional[pulumi.Input[str]]:
"""
The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
"""
return pulumi.get(self, "partner_customer_service_number")
@partner_customer_service_number.setter
def partner_customer_service_number(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_customer_service_number", value)
@property
@pulumi.getter(name="partnerName")
def partner_name(self) -> Optional[pulumi.Input[str]]:
"""
Official name of the partner name. For example: "Contoso".
"""
return pulumi.get(self, "partner_name")
@partner_name.setter
def partner_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_name", value)
@property
@pulumi.getter(name="partnerRegistrationName")
def partner_registration_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the partner registration.
"""
return pulumi.get(self, "partner_registration_name")
@partner_registration_name.setter
def partner_registration_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_registration_name", value)
@property
@pulumi.getter(name="partnerResourceTypeDescription")
def partner_resource_type_description(self) -> Optional[pulumi.Input[str]]:
"""
Short description of the partner resource type. The length of this description should not exceed 256 characters.
"""
return pulumi.get(self, "partner_resource_type_description")
@partner_resource_type_description.setter
def partner_resource_type_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_resource_type_description", value)
@property
@pulumi.getter(name="partnerResourceTypeDisplayName")
def partner_resource_type_display_name(self) -> Optional[pulumi.Input[str]]:
"""
Display name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_display_name")
@partner_resource_type_display_name.setter
def partner_resource_type_display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_resource_type_display_name", value)
@property
@pulumi.getter(name="partnerResourceTypeName")
def partner_resource_type_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_name")
@partner_resource_type_name.setter
def partner_resource_type_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_resource_type_name", value)
@property
@pulumi.getter(name="setupUri")
def setup_uri(self) -> Optional[pulumi.Input[str]]:
"""
URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
"""
return pulumi.get(self, "setup_uri")
@setup_uri.setter
def setup_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "setup_uri", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tags of the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="visibilityState")
def visibility_state(self) -> Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]]:
"""
Visibility state of the partner registration.
"""
return pulumi.get(self, "visibility_state")
@visibility_state.setter
def visibility_state(self, value: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]]):
pulumi.set(self, "visibility_state", value)
class PartnerRegistration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorized_azure_subscription_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
customer_service_uri: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
long_description: Optional[pulumi.Input[str]] = None,
partner_customer_service_extension: Optional[pulumi.Input[str]] = None,
partner_customer_service_number: Optional[pulumi.Input[str]] = None,
partner_name: Optional[pulumi.Input[str]] = None,
partner_registration_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_description: Optional[pulumi.Input[str]] = None,
partner_resource_type_display_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
setup_uri: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_state: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]] = None,
__props__=None):
"""
Information about a partner registration.
API Version: 2020-04-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_azure_subscription_ids: List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
:param pulumi.Input[str] customer_service_uri: The extension of the customer service URI of the publisher.
:param pulumi.Input[str] location: Location of the resource.
:param pulumi.Input[str] logo_uri: URI of the logo.
:param pulumi.Input[str] long_description: Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
:param pulumi.Input[str] partner_customer_service_extension: The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
:param pulumi.Input[str] partner_customer_service_number: The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
:param pulumi.Input[str] partner_name: Official name of the partner name. For example: "Contoso".
:param pulumi.Input[str] partner_registration_name: Name of the partner registration.
:param pulumi.Input[str] partner_resource_type_description: Short description of the partner resource type. The length of this description should not exceed 256 characters.
:param pulumi.Input[str] partner_resource_type_display_name: Display name of the partner resource type.
:param pulumi.Input[str] partner_resource_type_name: Name of the partner resource type.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription.
:param pulumi.Input[str] setup_uri: URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags of the resource.
:param pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']] visibility_state: Visibility state of the partner registration.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PartnerRegistrationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Information about a partner registration.
API Version: 2020-04-01-preview.
:param str resource_name: The name of the resource.
:param PartnerRegistrationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PartnerRegistrationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorized_azure_subscription_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
customer_service_uri: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
long_description: Optional[pulumi.Input[str]] = None,
partner_customer_service_extension: Optional[pulumi.Input[str]] = None,
partner_customer_service_number: Optional[pulumi.Input[str]] = None,
partner_name: Optional[pulumi.Input[str]] = None,
partner_registration_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_description: Optional[pulumi.Input[str]] = None,
partner_resource_type_display_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
setup_uri: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_state: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PartnerRegistrationArgs.__new__(PartnerRegistrationArgs)
__props__.__dict__["authorized_azure_subscription_ids"] = authorized_azure_subscription_ids
__props__.__dict__["customer_service_uri"] = customer_service_uri
__props__.__dict__["location"] = location
__props__.__dict__["logo_uri"] = logo_uri
__props__.__dict__["long_description"] = long_description
__props__.__dict__["partner_customer_service_extension"] = partner_customer_service_extension
__props__.__dict__["partner_customer_service_number"] = partner_customer_service_number
__props__.__dict__["partner_name"] = partner_name
__props__.__dict__["partner_registration_name"] = partner_registration_name
__props__.__dict__["partner_resource_type_description"] = partner_resource_type_description
__props__.__dict__["partner_resource_type_display_name"] = partner_resource_type_display_name
__props__.__dict__["partner_resource_type_name"] = partner_resource_type_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["setup_uri"] = setup_uri
__props__.__dict__["tags"] = tags
__props__.__dict__["visibility_state"] = visibility_state
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:eventgrid:PartnerRegistration"), pulumi.Alias(type_="azure-native:eventgrid/v20200401preview:PartnerRegistration"), pulumi.Alias(type_="azure-nextgen:eventgrid/v20200401preview:PartnerRegistration"), pulumi.Alias(type_="azure-native:eventgrid/v20201015preview:PartnerRegistration"), pulumi.Alias(type_="azure-nextgen:eventgrid/v20201015preview:PartnerRegistration")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PartnerRegistration, __self__).__init__(
'azure-native:eventgrid:PartnerRegistration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PartnerRegistration':
"""
Get an existing PartnerRegistration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PartnerRegistrationArgs.__new__(PartnerRegistrationArgs)
__props__.__dict__["authorized_azure_subscription_ids"] = None
__props__.__dict__["customer_service_uri"] = None
__props__.__dict__["location"] = None
__props__.__dict__["logo_uri"] = None
__props__.__dict__["long_description"] = None
__props__.__dict__["name"] = None
__props__.__dict__["partner_customer_service_extension"] = None
__props__.__dict__["partner_customer_service_number"] = None
__props__.__dict__["partner_name"] = None
__props__.__dict__["partner_resource_type_description"] = None
__props__.__dict__["partner_resource_type_display_name"] = None
__props__.__dict__["partner_resource_type_name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["setup_uri"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["visibility_state"] = None
return PartnerRegistration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authorizedAzureSubscriptionIds")
def authorized_azure_subscription_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
"""
return pulumi.get(self, "authorized_azure_subscription_ids")
@property
@pulumi.getter(name="customerServiceUri")
def customer_service_uri(self) -> pulumi.Output[Optional[str]]:
"""
The extension of the customer service URI of the publisher.
"""
return pulumi.get(self, "customer_service_uri")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="logoUri")
def logo_uri(self) -> pulumi.Output[Optional[str]]:
"""
URI of the logo.
"""
return pulumi.get(self, "logo_uri")
@property
@pulumi.getter(name="longDescription")
def long_description(self) -> pulumi.Output[Optional[str]]:
"""
Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
"""
return pulumi.get(self, "long_description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnerCustomerServiceExtension")
def partner_customer_service_extension(self) -> pulumi.Output[Optional[str]]:
"""
The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
"""
return pulumi.get(self, "partner_customer_service_extension")
@property
@pulumi.getter(name="partnerCustomerServiceNumber")
def partner_customer_service_number(self) -> pulumi.Output[Optional[str]]:
"""
The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
"""
return pulumi.get(self, "partner_customer_service_number")
@property
@pulumi.getter(name="partnerName")
def partner_name(self) -> pulumi.Output[Optional[str]]:
"""
Official name of the partner name. For example: "Contoso".
"""
return pulumi.get(self, "partner_name")
@property
@pulumi.getter(name="partnerResourceTypeDescription")
def partner_resource_type_description(self) -> pulumi.Output[Optional[str]]:
"""
Short description of the partner resource type. The length of this description should not exceed 256 characters.
"""
return pulumi.get(self, "partner_resource_type_description")
@property
@pulumi.getter(name="partnerResourceTypeDisplayName")
def partner_resource_type_display_name(self) -> pulumi.Output[Optional[str]]:
"""
Display name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_display_name")
@property
@pulumi.getter(name="partnerResourceTypeName")
def partner_resource_type_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning state of the partner registration.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="setupUri")
def setup_uri(self) -> pulumi.Output[Optional[str]]:
"""
URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
"""
return pulumi.get(self, "setup_uri")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to Partner Registration resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="visibilityState")
def visibility_state(self) -> pulumi.Output[Optional[str]]:
"""
Visibility state of the partner registration.
"""
return pulumi.get(self, "visibility_state")
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = ['PartnerRegistrationArgs', 'PartnerRegistration']
@pulumi.input_type
class PartnerRegistrationArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
authorized_azure_subscription_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
customer_service_uri: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
long_description: Optional[pulumi.Input[str]] = None,
partner_customer_service_extension: Optional[pulumi.Input[str]] = None,
partner_customer_service_number: Optional[pulumi.Input[str]] = None,
partner_name: Optional[pulumi.Input[str]] = None,
partner_registration_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_description: Optional[pulumi.Input[str]] = None,
partner_resource_type_display_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_name: Optional[pulumi.Input[str]] = None,
setup_uri: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_state: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]] = None):
"""
The set of arguments for constructing a PartnerRegistration resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_azure_subscription_ids: List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
:param pulumi.Input[str] customer_service_uri: The extension of the customer service URI of the publisher.
:param pulumi.Input[str] location: Location of the resource.
:param pulumi.Input[str] logo_uri: URI of the logo.
:param pulumi.Input[str] long_description: Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
:param pulumi.Input[str] partner_customer_service_extension: The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
:param pulumi.Input[str] partner_customer_service_number: The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
:param pulumi.Input[str] partner_name: Official name of the partner name. For example: "Contoso".
:param pulumi.Input[str] partner_registration_name: Name of the partner registration.
:param pulumi.Input[str] partner_resource_type_description: Short description of the partner resource type. The length of this description should not exceed 256 characters.
:param pulumi.Input[str] partner_resource_type_display_name: Display name of the partner resource type.
:param pulumi.Input[str] partner_resource_type_name: Name of the partner resource type.
:param pulumi.Input[str] setup_uri: URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags of the resource.
:param pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']] visibility_state: Visibility state of the partner registration.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if authorized_azure_subscription_ids is not None:
pulumi.set(__self__, "authorized_azure_subscription_ids", authorized_azure_subscription_ids)
if customer_service_uri is not None:
pulumi.set(__self__, "customer_service_uri", customer_service_uri)
if location is not None:
pulumi.set(__self__, "location", location)
if logo_uri is not None:
pulumi.set(__self__, "logo_uri", logo_uri)
if long_description is not None:
pulumi.set(__self__, "long_description", long_description)
if partner_customer_service_extension is not None:
pulumi.set(__self__, "partner_customer_service_extension", partner_customer_service_extension)
if partner_customer_service_number is not None:
pulumi.set(__self__, "partner_customer_service_number", partner_customer_service_number)
if partner_name is not None:
pulumi.set(__self__, "partner_name", partner_name)
if partner_registration_name is not None:
pulumi.set(__self__, "partner_registration_name", partner_registration_name)
if partner_resource_type_description is not None:
pulumi.set(__self__, "partner_resource_type_description", partner_resource_type_description)
if partner_resource_type_display_name is not None:
pulumi.set(__self__, "partner_resource_type_display_name", partner_resource_type_display_name)
if partner_resource_type_name is not None:
pulumi.set(__self__, "partner_resource_type_name", partner_resource_type_name)
if setup_uri is not None:
pulumi.set(__self__, "setup_uri", setup_uri)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if visibility_state is not None:
pulumi.set(__self__, "visibility_state", visibility_state)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group within the user's subscription.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="authorizedAzureSubscriptionIds")
def authorized_azure_subscription_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
"""
return pulumi.get(self, "authorized_azure_subscription_ids")
@authorized_azure_subscription_ids.setter
def authorized_azure_subscription_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "authorized_azure_subscription_ids", value)
@property
@pulumi.getter(name="customerServiceUri")
def customer_service_uri(self) -> Optional[pulumi.Input[str]]:
"""
The extension of the customer service URI of the publisher.
"""
return pulumi.get(self, "customer_service_uri")
@customer_service_uri.setter
def customer_service_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_service_uri", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="logoUri")
def logo_uri(self) -> Optional[pulumi.Input[str]]:
"""
URI of the logo.
"""
return pulumi.get(self, "logo_uri")
@logo_uri.setter
def logo_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo_uri", value)
@property
@pulumi.getter(name="longDescription")
def long_description(self) -> Optional[pulumi.Input[str]]:
"""
Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
"""
return pulumi.get(self, "long_description")
@long_description.setter
def long_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "long_description", value)
@property
@pulumi.getter(name="partnerCustomerServiceExtension")
def partner_customer_service_extension(self) -> Optional[pulumi.Input[str]]:
"""
The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
"""
return pulumi.get(self, "partner_customer_service_extension")
@partner_customer_service_extension.setter
def partner_customer_service_extension(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_customer_service_extension", value)
@property
@pulumi.getter(name="partnerCustomerServiceNumber")
def partner_customer_service_number(self) -> Optional[pulumi.Input[str]]:
"""
The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
"""
return pulumi.get(self, "partner_customer_service_number")
@partner_customer_service_number.setter
def partner_customer_service_number(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_customer_service_number", value)
@property
@pulumi.getter(name="partnerName")
def partner_name(self) -> Optional[pulumi.Input[str]]:
"""
Official name of the partner name. For example: "Contoso".
"""
return pulumi.get(self, "partner_name")
@partner_name.setter
def partner_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_name", value)
@property
@pulumi.getter(name="partnerRegistrationName")
def partner_registration_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the partner registration.
"""
return pulumi.get(self, "partner_registration_name")
@partner_registration_name.setter
def partner_registration_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_registration_name", value)
@property
@pulumi.getter(name="partnerResourceTypeDescription")
def partner_resource_type_description(self) -> Optional[pulumi.Input[str]]:
"""
Short description of the partner resource type. The length of this description should not exceed 256 characters.
"""
return pulumi.get(self, "partner_resource_type_description")
@partner_resource_type_description.setter
def partner_resource_type_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_resource_type_description", value)
@property
@pulumi.getter(name="partnerResourceTypeDisplayName")
def partner_resource_type_display_name(self) -> Optional[pulumi.Input[str]]:
"""
Display name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_display_name")
@partner_resource_type_display_name.setter
def partner_resource_type_display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_resource_type_display_name", value)
@property
@pulumi.getter(name="partnerResourceTypeName")
def partner_resource_type_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_name")
@partner_resource_type_name.setter
def partner_resource_type_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_resource_type_name", value)
@property
@pulumi.getter(name="setupUri")
def setup_uri(self) -> Optional[pulumi.Input[str]]:
"""
URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
"""
return pulumi.get(self, "setup_uri")
@setup_uri.setter
def setup_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "setup_uri", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tags of the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="visibilityState")
def visibility_state(self) -> Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]]:
"""
Visibility state of the partner registration.
"""
return pulumi.get(self, "visibility_state")
@visibility_state.setter
def visibility_state(self, value: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]]):
pulumi.set(self, "visibility_state", value)
class PartnerRegistration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorized_azure_subscription_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
customer_service_uri: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
long_description: Optional[pulumi.Input[str]] = None,
partner_customer_service_extension: Optional[pulumi.Input[str]] = None,
partner_customer_service_number: Optional[pulumi.Input[str]] = None,
partner_name: Optional[pulumi.Input[str]] = None,
partner_registration_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_description: Optional[pulumi.Input[str]] = None,
partner_resource_type_display_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
setup_uri: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_state: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]] = None,
__props__=None):
"""
Information about a partner registration.
API Version: 2020-04-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_azure_subscription_ids: List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
:param pulumi.Input[str] customer_service_uri: The extension of the customer service URI of the publisher.
:param pulumi.Input[str] location: Location of the resource.
:param pulumi.Input[str] logo_uri: URI of the logo.
:param pulumi.Input[str] long_description: Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
:param pulumi.Input[str] partner_customer_service_extension: The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
:param pulumi.Input[str] partner_customer_service_number: The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
:param pulumi.Input[str] partner_name: Official name of the partner name. For example: "Contoso".
:param pulumi.Input[str] partner_registration_name: Name of the partner registration.
:param pulumi.Input[str] partner_resource_type_description: Short description of the partner resource type. The length of this description should not exceed 256 characters.
:param pulumi.Input[str] partner_resource_type_display_name: Display name of the partner resource type.
:param pulumi.Input[str] partner_resource_type_name: Name of the partner resource type.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription.
:param pulumi.Input[str] setup_uri: URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags of the resource.
:param pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']] visibility_state: Visibility state of the partner registration.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PartnerRegistrationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Information about a partner registration.
API Version: 2020-04-01-preview.
:param str resource_name: The name of the resource.
:param PartnerRegistrationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PartnerRegistrationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorized_azure_subscription_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
customer_service_uri: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
long_description: Optional[pulumi.Input[str]] = None,
partner_customer_service_extension: Optional[pulumi.Input[str]] = None,
partner_customer_service_number: Optional[pulumi.Input[str]] = None,
partner_name: Optional[pulumi.Input[str]] = None,
partner_registration_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_description: Optional[pulumi.Input[str]] = None,
partner_resource_type_display_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
setup_uri: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_state: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PartnerRegistrationArgs.__new__(PartnerRegistrationArgs)
__props__.__dict__["authorized_azure_subscription_ids"] = authorized_azure_subscription_ids
__props__.__dict__["customer_service_uri"] = customer_service_uri
__props__.__dict__["location"] = location
__props__.__dict__["logo_uri"] = logo_uri
__props__.__dict__["long_description"] = long_description
__props__.__dict__["partner_customer_service_extension"] = partner_customer_service_extension
__props__.__dict__["partner_customer_service_number"] = partner_customer_service_number
__props__.__dict__["partner_name"] = partner_name
__props__.__dict__["partner_registration_name"] = partner_registration_name
__props__.__dict__["partner_resource_type_description"] = partner_resource_type_description
__props__.__dict__["partner_resource_type_display_name"] = partner_resource_type_display_name
__props__.__dict__["partner_resource_type_name"] = partner_resource_type_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["setup_uri"] = setup_uri
__props__.__dict__["tags"] = tags
__props__.__dict__["visibility_state"] = visibility_state
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:eventgrid:PartnerRegistration"), pulumi.Alias(type_="azure-native:eventgrid/v20200401preview:PartnerRegistration"), pulumi.Alias(type_="azure-nextgen:eventgrid/v20200401preview:PartnerRegistration"), pulumi.Alias(type_="azure-native:eventgrid/v20201015preview:PartnerRegistration"), pulumi.Alias(type_="azure-nextgen:eventgrid/v20201015preview:PartnerRegistration")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PartnerRegistration, __self__).__init__(
'azure-native:eventgrid:PartnerRegistration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PartnerRegistration':
"""
Get an existing PartnerRegistration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PartnerRegistrationArgs.__new__(PartnerRegistrationArgs)
__props__.__dict__["authorized_azure_subscription_ids"] = None
__props__.__dict__["customer_service_uri"] = None
__props__.__dict__["location"] = None
__props__.__dict__["logo_uri"] = None
__props__.__dict__["long_description"] = None
__props__.__dict__["name"] = None
__props__.__dict__["partner_customer_service_extension"] = None
__props__.__dict__["partner_customer_service_number"] = None
__props__.__dict__["partner_name"] = None
__props__.__dict__["partner_resource_type_description"] = None
__props__.__dict__["partner_resource_type_display_name"] = None
__props__.__dict__["partner_resource_type_name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["setup_uri"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["visibility_state"] = None
return PartnerRegistration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authorizedAzureSubscriptionIds")
def authorized_azure_subscription_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
"""
return pulumi.get(self, "authorized_azure_subscription_ids")
@property
@pulumi.getter(name="customerServiceUri")
def customer_service_uri(self) -> pulumi.Output[Optional[str]]:
"""
The extension of the customer service URI of the publisher.
"""
return pulumi.get(self, "customer_service_uri")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="logoUri")
def logo_uri(self) -> pulumi.Output[Optional[str]]:
"""
URI of the logo.
"""
return pulumi.get(self, "logo_uri")
@property
@pulumi.getter(name="longDescription")
def long_description(self) -> pulumi.Output[Optional[str]]:
"""
Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
"""
return pulumi.get(self, "long_description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnerCustomerServiceExtension")
def partner_customer_service_extension(self) -> pulumi.Output[Optional[str]]:
"""
The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
"""
return pulumi.get(self, "partner_customer_service_extension")
@property
@pulumi.getter(name="partnerCustomerServiceNumber")
def partner_customer_service_number(self) -> pulumi.Output[Optional[str]]:
"""
The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
"""
return pulumi.get(self, "partner_customer_service_number")
@property
@pulumi.getter(name="partnerName")
def partner_name(self) -> pulumi.Output[Optional[str]]:
"""
Official name of the partner name. For example: "Contoso".
"""
return pulumi.get(self, "partner_name")
@property
@pulumi.getter(name="partnerResourceTypeDescription")
def partner_resource_type_description(self) -> pulumi.Output[Optional[str]]:
"""
Short description of the partner resource type. The length of this description should not exceed 256 characters.
"""
return pulumi.get(self, "partner_resource_type_description")
@property
@pulumi.getter(name="partnerResourceTypeDisplayName")
def partner_resource_type_display_name(self) -> pulumi.Output[Optional[str]]:
"""
Display name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_display_name")
@property
@pulumi.getter(name="partnerResourceTypeName")
def partner_resource_type_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning state of the partner registration.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="setupUri")
def setup_uri(self) -> pulumi.Output[Optional[str]]:
"""
URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
"""
return pulumi.get(self, "setup_uri")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to Partner Registration resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="visibilityState")
def visibility_state(self) -> pulumi.Output[Optional[str]]:
"""
Visibility state of the partner registration.
"""
return pulumi.get(self, "visibility_state")
|
en
| 0.748614
|
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a PartnerRegistration resource. :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. :param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_azure_subscription_ids: List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration. :param pulumi.Input[str] customer_service_uri: The extension of the customer service URI of the publisher. :param pulumi.Input[str] location: Location of the resource. :param pulumi.Input[str] logo_uri: URI of the logo. :param pulumi.Input[str] long_description: Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters. :param pulumi.Input[str] partner_customer_service_extension: The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10. :param pulumi.Input[str] partner_customer_service_number: The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43 :param pulumi.Input[str] partner_name: Official name of the partner name. For example: "Contoso". :param pulumi.Input[str] partner_registration_name: Name of the partner registration. :param pulumi.Input[str] partner_resource_type_description: Short description of the partner resource type. The length of this description should not exceed 256 characters. :param pulumi.Input[str] partner_resource_type_display_name: Display name of the partner resource type. :param pulumi.Input[str] partner_resource_type_name: Name of the partner resource type. :param pulumi.Input[str] setup_uri: URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags of the resource. :param pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']] visibility_state: Visibility state of the partner registration. The name of the resource group within the user's subscription. List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration. The extension of the customer service URI of the publisher. Location of the resource. URI of the logo. Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters. The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10. The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43 Official name of the partner name. For example: "Contoso". Name of the partner registration. Short description of the partner resource type. The length of this description should not exceed 256 characters. Display name of the partner resource type. Name of the partner resource type. URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source. Tags of the resource. Visibility state of the partner registration. Information about a partner registration. API Version: 2020-04-01-preview. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_azure_subscription_ids: List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration. :param pulumi.Input[str] customer_service_uri: The extension of the customer service URI of the publisher. :param pulumi.Input[str] location: Location of the resource. :param pulumi.Input[str] logo_uri: URI of the logo. :param pulumi.Input[str] long_description: Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters. :param pulumi.Input[str] partner_customer_service_extension: The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10. :param pulumi.Input[str] partner_customer_service_number: The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43 :param pulumi.Input[str] partner_name: Official name of the partner name. For example: "Contoso". :param pulumi.Input[str] partner_registration_name: Name of the partner registration. :param pulumi.Input[str] partner_resource_type_description: Short description of the partner resource type. The length of this description should not exceed 256 characters. :param pulumi.Input[str] partner_resource_type_display_name: Display name of the partner resource type. :param pulumi.Input[str] partner_resource_type_name: Name of the partner resource type. :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. :param pulumi.Input[str] setup_uri: URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags of the resource. :param pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']] visibility_state: Visibility state of the partner registration. Information about a partner registration. API Version: 2020-04-01-preview. :param str resource_name: The name of the resource. :param PartnerRegistrationArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing PartnerRegistration resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration. The extension of the customer service URI of the publisher. Location of the resource. URI of the logo. Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters. Name of the resource. The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10. The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43 Official name of the partner name. For example: "Contoso". Short description of the partner resource type. The length of this description should not exceed 256 characters. Display name of the partner resource type. Name of the partner resource type. Provisioning state of the partner registration. URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source. The system metadata relating to Partner Registration resource. Tags of the resource. Type of the resource. Visibility state of the partner registration.
| 1.565562
| 2
|
_ar/masking_provement.py
|
TomKingsfordUoA/ResidualMaskingNetwork
| 242
|
6388
|
import os
import glob
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from natsort import natsorted
from models import resmasking_dropout1
from utils.datasets.fer2013dataset import EMOTION_DICT
from barez import show
transform = transforms.Compose(
[
transforms.ToPILImage(),
transforms.ToTensor(),
]
)
def activations_mask(tensor):
tensor = torch.squeeze(tensor, 0)
tensor = torch.mean(tensor, 0)
tensor = tensor.detach().cpu().numpy()
tensor = np.maximum(tensor, 0)
tensor = cv2.resize(tensor, (224, 224))
tensor = tensor - np.min(tensor)
tensor = tensor / np.max(tensor)
heatmap = cv2.applyColorMap(np.uint8(255 * tensor), cv2.COLORMAP_JET)
return heatmap
model = resmasking_dropout1(3, 7)
# state = torch.load('./saved/checkpoints/resmasking_dropout1_rot30_2019Nov17_14.33')
state = torch.load("./saved/checkpoints/Z_resmasking_dropout1_rot30_2019Nov30_13.32")
model.load_state_dict(state["net"])
model.cuda()
model.eval()
for image_path in natsorted(
glob.glob("/home/z/research/bkemo/images/**/*.png", recursive=True)
):
image_name = os.path.basename(image_path)
print(image_name)
# image_path = '/home/z/research/bkemo/images/disgust/0.0_dc10a3_1976_0.png'
image = cv2.imread(image_path)
image = cv2.resize(image, (224, 224))
tensor = transform(image)
tensor = torch.unsqueeze(tensor, 0)
tensor = tensor.cuda()
# output = model(tensor)
x = model.conv1(tensor) # 112
x = model.bn1(x)
x = model.relu(x)
x = model.maxpool(x) # 56
x = model.layer1(x) # 56
m = model.mask1(x)
x = x * (1 + m)
x = model.layer2(x) # 28
m = model.mask2(x)
x = x * (1 + m)
x = model.layer3(x) # 14
heat_1 = activations_mask(x)
m = model.mask3(x)
x = x * (1 + m)
# heat_2 = activations_mask(m)
x = model.layer4(x) # 7
m = model.mask4(x)
x = x * (1 + m)
x = model.avgpool(x)
x = torch.flatten(x, 1)
output = model.fc(x)
# print(np.sum(heat_1 - heat_2))
# show(np.concatenate((image, heat_1, heat_2), axis=1))
cv2.imwrite(
"./masking_provements/{}".format(image_name),
np.concatenate((image, heat_1), axis=1),
)
# np.concatenate((image, heat_1, heat_2), axis=1))
# output = output.cpu().numpy()
# print(EMOTION_DICT[torch.argmax(output, 1).item()])
|
import os
import glob
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from natsort import natsorted
from models import resmasking_dropout1
from utils.datasets.fer2013dataset import EMOTION_DICT
from barez import show
transform = transforms.Compose(
[
transforms.ToPILImage(),
transforms.ToTensor(),
]
)
def activations_mask(tensor):
tensor = torch.squeeze(tensor, 0)
tensor = torch.mean(tensor, 0)
tensor = tensor.detach().cpu().numpy()
tensor = np.maximum(tensor, 0)
tensor = cv2.resize(tensor, (224, 224))
tensor = tensor - np.min(tensor)
tensor = tensor / np.max(tensor)
heatmap = cv2.applyColorMap(np.uint8(255 * tensor), cv2.COLORMAP_JET)
return heatmap
model = resmasking_dropout1(3, 7)
# state = torch.load('./saved/checkpoints/resmasking_dropout1_rot30_2019Nov17_14.33')
state = torch.load("./saved/checkpoints/Z_resmasking_dropout1_rot30_2019Nov30_13.32")
model.load_state_dict(state["net"])
model.cuda()
model.eval()
for image_path in natsorted(
glob.glob("/home/z/research/bkemo/images/**/*.png", recursive=True)
):
image_name = os.path.basename(image_path)
print(image_name)
# image_path = '/home/z/research/bkemo/images/disgust/0.0_dc10a3_1976_0.png'
image = cv2.imread(image_path)
image = cv2.resize(image, (224, 224))
tensor = transform(image)
tensor = torch.unsqueeze(tensor, 0)
tensor = tensor.cuda()
# output = model(tensor)
x = model.conv1(tensor) # 112
x = model.bn1(x)
x = model.relu(x)
x = model.maxpool(x) # 56
x = model.layer1(x) # 56
m = model.mask1(x)
x = x * (1 + m)
x = model.layer2(x) # 28
m = model.mask2(x)
x = x * (1 + m)
x = model.layer3(x) # 14
heat_1 = activations_mask(x)
m = model.mask3(x)
x = x * (1 + m)
# heat_2 = activations_mask(m)
x = model.layer4(x) # 7
m = model.mask4(x)
x = x * (1 + m)
x = model.avgpool(x)
x = torch.flatten(x, 1)
output = model.fc(x)
# print(np.sum(heat_1 - heat_2))
# show(np.concatenate((image, heat_1, heat_2), axis=1))
cv2.imwrite(
"./masking_provements/{}".format(image_name),
np.concatenate((image, heat_1), axis=1),
)
# np.concatenate((image, heat_1, heat_2), axis=1))
# output = output.cpu().numpy()
# print(EMOTION_DICT[torch.argmax(output, 1).item()])
|
en
| 0.302111
|
# state = torch.load('./saved/checkpoints/resmasking_dropout1_rot30_2019Nov17_14.33') # image_path = '/home/z/research/bkemo/images/disgust/0.0_dc10a3_1976_0.png' # output = model(tensor) # 112 # 56 # 56 # 28 # 14 # heat_2 = activations_mask(m) # 7 # print(np.sum(heat_1 - heat_2)) # show(np.concatenate((image, heat_1, heat_2), axis=1)) # np.concatenate((image, heat_1, heat_2), axis=1)) # output = output.cpu().numpy() # print(EMOTION_DICT[torch.argmax(output, 1).item()])
| 2.366513
| 2
|
Python/Gerenciador de pagamentos.py
|
Kauan677/Projetos-Python
| 1
|
6389
|
<gh_stars>1-10
import time
import colorama
def gerenciador_de_pagamento():
preço = float(str(input('Preço das compras: R$')))
print('''Escolha de pagamento:
[ 1 ]A vista dinheiro/cheque: 10% de desconto.
[ 2 ]A vista no cartão: 5% de desconto.
[ 3 ]Em até duas 2x no cartão: preço formal.
[ 4 ]3x ou mais no cartão: 20% de juros.''')
opção = int(input('Opção de pagamento: '))
print('processando...')
time.sleep(2)
if opção == 1:
print('Você ganhará 10% de desconto!')
print(f'Sendo assim as compras custaram R${preço - (preço * 10 / 100 ):.2f}.')
elif opção == 2:
print('Você ganhará 5% de desconto!')
print(f'Sendo assim as compras custaram R${preço - (preço * 5 /100):.2f}')
elif opção == 3:
print(f'As compras sairam em 2x de R${preço / 2:.2f}.')
print(f'Sendo assim custando o preço formal de R${preço:.2f} no final.')
elif opção == 4:
parcelas = int(input('Quantas parcelas: '))
if parcelas >= 3:
print(f'Compras com 20% de juros')
print(f'As compras sairam em {parcelas}x de R${(preço + (preço * 20 / 100)) / parcelas:.2f}')
print(f'Sendo assim as compras custaram R${preço + (preço * 20 / 100):.2f} no final.')
else:
print('Parcela não compreendida, TENTE NOVAMENTE...')
else:
print('Valor não compreendido, TENTE NOVAMENTE...')
gerenciador_de_pagamento()
return opção
while True:
consulta = gerenciador_de_pagamento()
consulta = str(input('Quer consultar novamente? '))
if consulta in ['sim', 'Sim', 'SIM']:
pass
elif consulta in ['não', 'nao','Não', 'Nao', 'NAO','NÃO']:
break
else:
break
|
import time
import colorama
def gerenciador_de_pagamento():
preço = float(str(input('Preço das compras: R$')))
print('''Escolha de pagamento:
[ 1 ]A vista dinheiro/cheque: 10% de desconto.
[ 2 ]A vista no cartão: 5% de desconto.
[ 3 ]Em até duas 2x no cartão: preço formal.
[ 4 ]3x ou mais no cartão: 20% de juros.''')
opção = int(input('Opção de pagamento: '))
print('processando...')
time.sleep(2)
if opção == 1:
print('Você ganhará 10% de desconto!')
print(f'Sendo assim as compras custaram R${preço - (preço * 10 / 100 ):.2f}.')
elif opção == 2:
print('Você ganhará 5% de desconto!')
print(f'Sendo assim as compras custaram R${preço - (preço * 5 /100):.2f}')
elif opção == 3:
print(f'As compras sairam em 2x de R${preço / 2:.2f}.')
print(f'Sendo assim custando o preço formal de R${preço:.2f} no final.')
elif opção == 4:
parcelas = int(input('Quantas parcelas: '))
if parcelas >= 3:
print(f'Compras com 20% de juros')
print(f'As compras sairam em {parcelas}x de R${(preço + (preço * 20 / 100)) / parcelas:.2f}')
print(f'Sendo assim as compras custaram R${preço + (preço * 20 / 100):.2f} no final.')
else:
print('Parcela não compreendida, TENTE NOVAMENTE...')
else:
print('Valor não compreendido, TENTE NOVAMENTE...')
gerenciador_de_pagamento()
return opção
while True:
consulta = gerenciador_de_pagamento()
consulta = str(input('Quer consultar novamente? '))
if consulta in ['sim', 'Sim', 'SIM']:
pass
elif consulta in ['não', 'nao','Não', 'Nao', 'NAO','NÃO']:
break
else:
break
|
pt
| 0.982123
|
Escolha de pagamento: [ 1 ]A vista dinheiro/cheque: 10% de desconto. [ 2 ]A vista no cartão: 5% de desconto. [ 3 ]Em até duas 2x no cartão: preço formal. [ 4 ]3x ou mais no cartão: 20% de juros.
| 3.683721
| 4
|
src/scs_core/osio/data/abstract_topic.py
|
seoss/scs_core
| 3
|
6390
|
<gh_stars>1-10
"""
Created on 2 Apr 2017
@author: <NAME> (<EMAIL>)
"""
from collections import OrderedDict
from scs_core.data.json import JSONable
# --------------------------------------------------------------------------------------------------------------------
class AbstractTopic(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, path, name, description, is_public, info):
"""
Constructor
"""
self.__path = path # string
self.__name = name # string
self.__description = description # string
self.__is_public = is_public # bool
self.__info = info # TopicInfo
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
if self.path is not None:
jdict['topic'] = self.path
jdict['name'] = self.name
jdict['description'] = self.description
jdict['public'] = self.is_public
jdict['topic-info'] = self.info
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def path(self):
return self.__path
@property
def name(self):
return self.__name
@property
def description(self):
return self.__description
@property
def is_public(self):
return self.__is_public
@property
def info(self):
return self.__info
|
"""
Created on 2 Apr 2017
@author: <NAME> (<EMAIL>)
"""
from collections import OrderedDict
from scs_core.data.json import JSONable
# --------------------------------------------------------------------------------------------------------------------
class AbstractTopic(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, path, name, description, is_public, info):
"""
Constructor
"""
self.__path = path # string
self.__name = name # string
self.__description = description # string
self.__is_public = is_public # bool
self.__info = info # TopicInfo
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
if self.path is not None:
jdict['topic'] = self.path
jdict['name'] = self.name
jdict['description'] = self.description
jdict['public'] = self.is_public
jdict['topic-info'] = self.info
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def path(self):
return self.__path
@property
def name(self):
return self.__name
@property
def description(self):
return self.__description
@property
def is_public(self):
return self.__is_public
@property
def info(self):
return self.__info
|
en
| 0.157446
|
Created on 2 Apr 2017 @author: <NAME> (<EMAIL>) # -------------------------------------------------------------------------------------------------------------------- classdocs # ---------------------------------------------------------------------------------------------------------------- Constructor # string # string # string # bool # TopicInfo # ---------------------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------------
| 2.336105
| 2
|
sdk/python/pulumi_azure_native/notificationhubs/latest/get_namespace.py
|
pulumi-bot/pulumi-azure-native
| 0
|
6391
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNamespaceResult',
'AwaitableGetNamespaceResult',
'get_namespace',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:notificationhubs:getNamespace'.""", DeprecationWarning)
@pulumi.output_type
class GetNamespaceResult:
"""
Description of a Namespace resource.
"""
def __init__(__self__, created_at=None, critical=None, data_center=None, enabled=None, id=None, location=None, metric_id=None, name=None, namespace_type=None, provisioning_state=None, region=None, scale_unit=None, service_bus_endpoint=None, sku=None, status=None, subscription_id=None, tags=None, type=None, updated_at=None):
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if critical and not isinstance(critical, bool):
raise TypeError("Expected argument 'critical' to be a bool")
pulumi.set(__self__, "critical", critical)
if data_center and not isinstance(data_center, str):
raise TypeError("Expected argument 'data_center' to be a str")
pulumi.set(__self__, "data_center", data_center)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if metric_id and not isinstance(metric_id, str):
raise TypeError("Expected argument 'metric_id' to be a str")
pulumi.set(__self__, "metric_id", metric_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if namespace_type and not isinstance(namespace_type, str):
raise TypeError("Expected argument 'namespace_type' to be a str")
pulumi.set(__self__, "namespace_type", namespace_type)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", region)
if scale_unit and not isinstance(scale_unit, str):
raise TypeError("Expected argument 'scale_unit' to be a str")
pulumi.set(__self__, "scale_unit", scale_unit)
if service_bus_endpoint and not isinstance(service_bus_endpoint, str):
raise TypeError("Expected argument 'service_bus_endpoint' to be a str")
pulumi.set(__self__, "service_bus_endpoint", service_bus_endpoint)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if subscription_id and not isinstance(subscription_id, str):
raise TypeError("Expected argument 'subscription_id' to be a str")
pulumi.set(__self__, "subscription_id", subscription_id)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if updated_at and not isinstance(updated_at, str):
raise TypeError("Expected argument 'updated_at' to be a str")
pulumi.set(__self__, "updated_at", updated_at)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The time the namespace was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def critical(self) -> Optional[bool]:
"""
Whether or not the namespace is set as Critical.
"""
return pulumi.get(self, "critical")
@property
@pulumi.getter(name="dataCenter")
def data_center(self) -> Optional[str]:
"""
Data center for the namespace
"""
return pulumi.get(self, "data_center")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Whether or not the namespace is currently enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="metricId")
def metric_id(self) -> str:
"""
Identifier for Azure Insights metrics
"""
return pulumi.get(self, "metric_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namespaceType")
def namespace_type(self) -> Optional[str]:
"""
The namespace type.
"""
return pulumi.get(self, "namespace_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the Namespace.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def region(self) -> Optional[str]:
"""
Specifies the targeted region in which the namespace should be created. It can be any of the following values: Australia East, Australia Southeast, Central US, East US, East US 2, West US, North Central US, South Central US, East Asia, Southeast Asia, Brazil South, Japan East, Japan West, North Europe, West Europe
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="scaleUnit")
def scale_unit(self) -> Optional[str]:
"""
ScaleUnit where the namespace gets created
"""
return pulumi.get(self, "scale_unit")
@property
@pulumi.getter(name="serviceBusEndpoint")
def service_bus_endpoint(self) -> Optional[str]:
"""
Endpoint you can use to perform NotificationHub operations.
"""
return pulumi.get(self, "service_bus_endpoint")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the created namespace
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Status of the namespace. It can be any of these values:1 = Created/Active2 = Creating3 = Suspended4 = Deleting
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
"""
The Id of the Azure subscription associated with the namespace.
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> Optional[str]:
"""
The time the namespace was updated.
"""
return pulumi.get(self, "updated_at")
class AwaitableGetNamespaceResult(GetNamespaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNamespaceResult(
created_at=self.created_at,
critical=self.critical,
data_center=self.data_center,
enabled=self.enabled,
id=self.id,
location=self.location,
metric_id=self.metric_id,
name=self.name,
namespace_type=self.namespace_type,
provisioning_state=self.provisioning_state,
region=self.region,
scale_unit=self.scale_unit,
service_bus_endpoint=self.service_bus_endpoint,
sku=self.sku,
status=self.status,
subscription_id=self.subscription_id,
tags=self.tags,
type=self.type,
updated_at=self.updated_at)
def get_namespace(namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamespaceResult:
"""
Description of a Namespace resource.
Latest API Version: 2017-04-01.
:param str namespace_name: The namespace name.
:param str resource_group_name: The name of the resource group.
"""
pulumi.log.warn("""get_namespace is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:notificationhubs:getNamespace'.""")
__args__ = dict()
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:notificationhubs/latest:getNamespace', __args__, opts=opts, typ=GetNamespaceResult).value
return AwaitableGetNamespaceResult(
created_at=__ret__.created_at,
critical=__ret__.critical,
data_center=__ret__.data_center,
enabled=__ret__.enabled,
id=__ret__.id,
location=__ret__.location,
metric_id=__ret__.metric_id,
name=__ret__.name,
namespace_type=__ret__.namespace_type,
provisioning_state=__ret__.provisioning_state,
region=__ret__.region,
scale_unit=__ret__.scale_unit,
service_bus_endpoint=__ret__.service_bus_endpoint,
sku=__ret__.sku,
status=__ret__.status,
subscription_id=__ret__.subscription_id,
tags=__ret__.tags,
type=__ret__.type,
updated_at=__ret__.updated_at)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNamespaceResult',
'AwaitableGetNamespaceResult',
'get_namespace',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:notificationhubs:getNamespace'.""", DeprecationWarning)
@pulumi.output_type
class GetNamespaceResult:
"""
Description of a Namespace resource.
"""
def __init__(__self__, created_at=None, critical=None, data_center=None, enabled=None, id=None, location=None, metric_id=None, name=None, namespace_type=None, provisioning_state=None, region=None, scale_unit=None, service_bus_endpoint=None, sku=None, status=None, subscription_id=None, tags=None, type=None, updated_at=None):
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if critical and not isinstance(critical, bool):
raise TypeError("Expected argument 'critical' to be a bool")
pulumi.set(__self__, "critical", critical)
if data_center and not isinstance(data_center, str):
raise TypeError("Expected argument 'data_center' to be a str")
pulumi.set(__self__, "data_center", data_center)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if metric_id and not isinstance(metric_id, str):
raise TypeError("Expected argument 'metric_id' to be a str")
pulumi.set(__self__, "metric_id", metric_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if namespace_type and not isinstance(namespace_type, str):
raise TypeError("Expected argument 'namespace_type' to be a str")
pulumi.set(__self__, "namespace_type", namespace_type)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", region)
if scale_unit and not isinstance(scale_unit, str):
raise TypeError("Expected argument 'scale_unit' to be a str")
pulumi.set(__self__, "scale_unit", scale_unit)
if service_bus_endpoint and not isinstance(service_bus_endpoint, str):
raise TypeError("Expected argument 'service_bus_endpoint' to be a str")
pulumi.set(__self__, "service_bus_endpoint", service_bus_endpoint)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if subscription_id and not isinstance(subscription_id, str):
raise TypeError("Expected argument 'subscription_id' to be a str")
pulumi.set(__self__, "subscription_id", subscription_id)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if updated_at and not isinstance(updated_at, str):
raise TypeError("Expected argument 'updated_at' to be a str")
pulumi.set(__self__, "updated_at", updated_at)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The time the namespace was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def critical(self) -> Optional[bool]:
"""
Whether or not the namespace is set as Critical.
"""
return pulumi.get(self, "critical")
@property
@pulumi.getter(name="dataCenter")
def data_center(self) -> Optional[str]:
"""
Data center for the namespace
"""
return pulumi.get(self, "data_center")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Whether or not the namespace is currently enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="metricId")
def metric_id(self) -> str:
"""
Identifier for Azure Insights metrics
"""
return pulumi.get(self, "metric_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namespaceType")
def namespace_type(self) -> Optional[str]:
"""
The namespace type.
"""
return pulumi.get(self, "namespace_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the Namespace.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def region(self) -> Optional[str]:
"""
Specifies the targeted region in which the namespace should be created. It can be any of the following values: Australia East, Australia Southeast, Central US, East US, East US 2, West US, North Central US, South Central US, East Asia, Southeast Asia, Brazil South, Japan East, Japan West, North Europe, West Europe
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="scaleUnit")
def scale_unit(self) -> Optional[str]:
"""
ScaleUnit where the namespace gets created
"""
return pulumi.get(self, "scale_unit")
@property
@pulumi.getter(name="serviceBusEndpoint")
def service_bus_endpoint(self) -> Optional[str]:
"""
Endpoint you can use to perform NotificationHub operations.
"""
return pulumi.get(self, "service_bus_endpoint")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the created namespace
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Status of the namespace. It can be any of these values:1 = Created/Active2 = Creating3 = Suspended4 = Deleting
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
"""
The Id of the Azure subscription associated with the namespace.
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> Optional[str]:
"""
The time the namespace was updated.
"""
return pulumi.get(self, "updated_at")
class AwaitableGetNamespaceResult(GetNamespaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNamespaceResult(
created_at=self.created_at,
critical=self.critical,
data_center=self.data_center,
enabled=self.enabled,
id=self.id,
location=self.location,
metric_id=self.metric_id,
name=self.name,
namespace_type=self.namespace_type,
provisioning_state=self.provisioning_state,
region=self.region,
scale_unit=self.scale_unit,
service_bus_endpoint=self.service_bus_endpoint,
sku=self.sku,
status=self.status,
subscription_id=self.subscription_id,
tags=self.tags,
type=self.type,
updated_at=self.updated_at)
def get_namespace(namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamespaceResult:
"""
Description of a Namespace resource.
Latest API Version: 2017-04-01.
:param str namespace_name: The namespace name.
:param str resource_group_name: The name of the resource group.
"""
pulumi.log.warn("""get_namespace is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:notificationhubs:getNamespace'.""")
__args__ = dict()
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:notificationhubs/latest:getNamespace', __args__, opts=opts, typ=GetNamespaceResult).value
return AwaitableGetNamespaceResult(
created_at=__ret__.created_at,
critical=__ret__.critical,
data_center=__ret__.data_center,
enabled=__ret__.enabled,
id=__ret__.id,
location=__ret__.location,
metric_id=__ret__.metric_id,
name=__ret__.name,
namespace_type=__ret__.namespace_type,
provisioning_state=__ret__.provisioning_state,
region=__ret__.region,
scale_unit=__ret__.scale_unit,
service_bus_endpoint=__ret__.service_bus_endpoint,
sku=__ret__.sku,
status=__ret__.status,
subscription_id=__ret__.subscription_id,
tags=__ret__.tags,
type=__ret__.type,
updated_at=__ret__.updated_at)
|
en
| 0.755388
|
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:notificationhubs:getNamespace'. Description of a Namespace resource. The time the namespace was created. Whether or not the namespace is set as Critical. Data center for the namespace Whether or not the namespace is currently enabled. Resource Id Resource location Identifier for Azure Insights metrics Resource name The namespace type. Provisioning state of the Namespace. Specifies the targeted region in which the namespace should be created. It can be any of the following values: Australia East, Australia Southeast, Central US, East US, East US 2, West US, North Central US, South Central US, East Asia, Southeast Asia, Brazil South, Japan East, Japan West, North Europe, West Europe ScaleUnit where the namespace gets created Endpoint you can use to perform NotificationHub operations. The sku of the created namespace Status of the namespace. It can be any of these values:1 = Created/Active2 = Creating3 = Suspended4 = Deleting The Id of the Azure subscription associated with the namespace. Resource tags Resource type The time the namespace was updated. # pylint: disable=using-constant-test Description of a Namespace resource. Latest API Version: 2017-04-01. :param str namespace_name: The namespace name. :param str resource_group_name: The name of the resource group. get_namespace is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:notificationhubs:getNamespace'.
| 1.568121
| 2
|
chue/utils.py
|
naren-m/chue
| 0
|
6392
|
import json
from pygments import highlight
from pygments.lexers import JsonLexer
from pygments.formatters import TerminalFormatter
def print_json_obj(json_object):
json_str = json.dumps(json_object, indent=4, sort_keys=True)
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
def print_json_str(json_str):
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
|
import json
from pygments import highlight
from pygments.lexers import JsonLexer
from pygments.formatters import TerminalFormatter
def print_json_obj(json_object):
json_str = json.dumps(json_object, indent=4, sort_keys=True)
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
def print_json_str(json_str):
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
|
none
| 1
| 2.516861
| 3
|
|
selfdrive/car/chrysler/radar_interface.py
|
919bot/Tessa
| 85
|
6393
|
#!/usr/bin/env python3
import os
from opendbc.can.parser import CANParser
from cereal import car
from selfdrive.car.interfaces import RadarInterfaceBase
RADAR_MSGS_C = list(range(0x2c2, 0x2d4+2, 2)) # c_ messages 706,...,724
RADAR_MSGS_D = list(range(0x2a2, 0x2b4+2, 2)) # d_ messages
LAST_MSG = max(RADAR_MSGS_C + RADAR_MSGS_D)
NUMBER_MSGS = len(RADAR_MSGS_C) + len(RADAR_MSGS_D)
def _create_radar_can_parser():
dbc_f = 'chrysler_pacifica_2017_hybrid_private_fusion.dbc'
msg_n = len(RADAR_MSGS_C)
# list of [(signal name, message name or number, initial values), (...)]
# [('RADAR_STATE', 1024, 0),
# ('LONG_DIST', 1072, 255),
# ('LONG_DIST', 1073, 255),
# ('LONG_DIST', 1074, 255),
# ('LONG_DIST', 1075, 255),
# The factor and offset are applied by the dbc parsing library, so the
# default values should be after the factor/offset are applied.
signals = list(zip(['LONG_DIST'] * msg_n +
['LAT_DIST'] * msg_n +
['REL_SPEED'] * msg_n,
RADAR_MSGS_C * 2 + # LONG_DIST, LAT_DIST
RADAR_MSGS_D, # REL_SPEED
[0] * msg_n + # LONG_DIST
[-1000] * msg_n + # LAT_DIST
[-146.278] * msg_n)) # REL_SPEED set to 0, factor/offset to this
# TODO what are the checks actually used for?
# honda only checks the last message,
# toyota checks all the messages. Which do we want?
checks = list(zip(RADAR_MSGS_C +
RADAR_MSGS_D,
[20]*msg_n + # 20Hz (0.05s)
[20]*msg_n)) # 20Hz (0.05s)
return CANParser(os.path.splitext(dbc_f)[0], signals, checks, 1)
def _address_to_track(address):
if address in RADAR_MSGS_C:
return (address - RADAR_MSGS_C[0]) // 2
if address in RADAR_MSGS_D:
return (address - RADAR_MSGS_D[0]) // 2
raise ValueError("radar received unexpected address %d" % address)
class RadarInterface(RadarInterfaceBase):
def __init__(self, CP):
self.pts = {}
self.delay = 0 # Delay of radar #TUNE
self.rcp = _create_radar_can_parser()
self.updated_messages = set()
self.trigger_msg = LAST_MSG
def update(self, can_strings):
vls = self.rcp.update_strings(can_strings)
self.updated_messages.update(vls)
if self.trigger_msg not in self.updated_messages:
return None
ret = car.RadarData.new_message()
errors = []
if not self.rcp.can_valid:
errors.append("canError")
ret.errors = errors
for ii in self.updated_messages: # ii should be the message ID as a number
cpt = self.rcp.vl[ii]
trackId = _address_to_track(ii)
if trackId not in self.pts:
self.pts[trackId] = car.RadarData.RadarPoint.new_message()
self.pts[trackId].trackId = trackId
self.pts[trackId].aRel = float('nan')
self.pts[trackId].yvRel = float('nan')
self.pts[trackId].measured = True
if 'LONG_DIST' in cpt: # c_* message
self.pts[trackId].dRel = cpt['LONG_DIST'] # from front of car
# our lat_dist is positive to the right in car's frame.
# TODO what does yRel want?
self.pts[trackId].yRel = cpt['LAT_DIST'] # in car frame's y axis, left is positive
else: # d_* message
self.pts[trackId].vRel = cpt['REL_SPEED']
# We want a list, not a dictionary. Filter out LONG_DIST==0 because that means it's not valid.
ret.points = [x for x in self.pts.values() if x.dRel != 0]
self.updated_messages.clear()
return ret
|
#!/usr/bin/env python3
import os
from opendbc.can.parser import CANParser
from cereal import car
from selfdrive.car.interfaces import RadarInterfaceBase
RADAR_MSGS_C = list(range(0x2c2, 0x2d4+2, 2)) # c_ messages 706,...,724
RADAR_MSGS_D = list(range(0x2a2, 0x2b4+2, 2)) # d_ messages
LAST_MSG = max(RADAR_MSGS_C + RADAR_MSGS_D)
NUMBER_MSGS = len(RADAR_MSGS_C) + len(RADAR_MSGS_D)
def _create_radar_can_parser():
dbc_f = 'chrysler_pacifica_2017_hybrid_private_fusion.dbc'
msg_n = len(RADAR_MSGS_C)
# list of [(signal name, message name or number, initial values), (...)]
# [('RADAR_STATE', 1024, 0),
# ('LONG_DIST', 1072, 255),
# ('LONG_DIST', 1073, 255),
# ('LONG_DIST', 1074, 255),
# ('LONG_DIST', 1075, 255),
# The factor and offset are applied by the dbc parsing library, so the
# default values should be after the factor/offset are applied.
signals = list(zip(['LONG_DIST'] * msg_n +
['LAT_DIST'] * msg_n +
['REL_SPEED'] * msg_n,
RADAR_MSGS_C * 2 + # LONG_DIST, LAT_DIST
RADAR_MSGS_D, # REL_SPEED
[0] * msg_n + # LONG_DIST
[-1000] * msg_n + # LAT_DIST
[-146.278] * msg_n)) # REL_SPEED set to 0, factor/offset to this
# TODO what are the checks actually used for?
# honda only checks the last message,
# toyota checks all the messages. Which do we want?
checks = list(zip(RADAR_MSGS_C +
RADAR_MSGS_D,
[20]*msg_n + # 20Hz (0.05s)
[20]*msg_n)) # 20Hz (0.05s)
return CANParser(os.path.splitext(dbc_f)[0], signals, checks, 1)
def _address_to_track(address):
if address in RADAR_MSGS_C:
return (address - RADAR_MSGS_C[0]) // 2
if address in RADAR_MSGS_D:
return (address - RADAR_MSGS_D[0]) // 2
raise ValueError("radar received unexpected address %d" % address)
class RadarInterface(RadarInterfaceBase):
def __init__(self, CP):
self.pts = {}
self.delay = 0 # Delay of radar #TUNE
self.rcp = _create_radar_can_parser()
self.updated_messages = set()
self.trigger_msg = LAST_MSG
def update(self, can_strings):
vls = self.rcp.update_strings(can_strings)
self.updated_messages.update(vls)
if self.trigger_msg not in self.updated_messages:
return None
ret = car.RadarData.new_message()
errors = []
if not self.rcp.can_valid:
errors.append("canError")
ret.errors = errors
for ii in self.updated_messages: # ii should be the message ID as a number
cpt = self.rcp.vl[ii]
trackId = _address_to_track(ii)
if trackId not in self.pts:
self.pts[trackId] = car.RadarData.RadarPoint.new_message()
self.pts[trackId].trackId = trackId
self.pts[trackId].aRel = float('nan')
self.pts[trackId].yvRel = float('nan')
self.pts[trackId].measured = True
if 'LONG_DIST' in cpt: # c_* message
self.pts[trackId].dRel = cpt['LONG_DIST'] # from front of car
# our lat_dist is positive to the right in car's frame.
# TODO what does yRel want?
self.pts[trackId].yRel = cpt['LAT_DIST'] # in car frame's y axis, left is positive
else: # d_* message
self.pts[trackId].vRel = cpt['REL_SPEED']
# We want a list, not a dictionary. Filter out LONG_DIST==0 because that means it's not valid.
ret.points = [x for x in self.pts.values() if x.dRel != 0]
self.updated_messages.clear()
return ret
|
en
| 0.662376
|
#!/usr/bin/env python3 # c_ messages 706,...,724 # d_ messages # list of [(signal name, message name or number, initial values), (...)] # [('RADAR_STATE', 1024, 0), # ('LONG_DIST', 1072, 255), # ('LONG_DIST', 1073, 255), # ('LONG_DIST', 1074, 255), # ('LONG_DIST', 1075, 255), # The factor and offset are applied by the dbc parsing library, so the # default values should be after the factor/offset are applied. # LONG_DIST, LAT_DIST # REL_SPEED # LONG_DIST # LAT_DIST # REL_SPEED set to 0, factor/offset to this # TODO what are the checks actually used for? # honda only checks the last message, # toyota checks all the messages. Which do we want? # 20Hz (0.05s) # 20Hz (0.05s) # Delay of radar #TUNE # ii should be the message ID as a number # c_* message # from front of car # our lat_dist is positive to the right in car's frame. # TODO what does yRel want? # in car frame's y axis, left is positive # d_* message # We want a list, not a dictionary. Filter out LONG_DIST==0 because that means it's not valid.
| 2.329106
| 2
|
mod/tools/ccmake.py
|
mattiasljungstrom/fips
| 429
|
6394
|
<gh_stars>100-1000
"""
wrapper for ccmake command line tool
"""
import subprocess
name = 'ccmake'
platforms = ['linux', 'osx']
optional = True
not_found = "required for 'fips config' functionality"
#-------------------------------------------------------------------------------
def check_exists(fips_dir) :
"""test if ccmake is in the path
:returns: True if ccmake is in the path
"""
try:
out = subprocess.check_output(['ccmake', '--version'])
return True
except (OSError, subprocess.CalledProcessError):
return False
#-------------------------------------------------------------------------------
def run(build_dir) :
"""run ccmake to configure cmake project
:param build_dir: directory where ccmake should run
:returns: True if ccmake returns successful
"""
res = subprocess.call('ccmake .', cwd=build_dir, shell=True)
return res == 0
|
"""
wrapper for ccmake command line tool
"""
import subprocess
name = 'ccmake'
platforms = ['linux', 'osx']
optional = True
not_found = "required for 'fips config' functionality"
#-------------------------------------------------------------------------------
def check_exists(fips_dir) :
"""test if ccmake is in the path
:returns: True if ccmake is in the path
"""
try:
out = subprocess.check_output(['ccmake', '--version'])
return True
except (OSError, subprocess.CalledProcessError):
return False
#-------------------------------------------------------------------------------
def run(build_dir) :
"""run ccmake to configure cmake project
:param build_dir: directory where ccmake should run
:returns: True if ccmake returns successful
"""
res = subprocess.call('ccmake .', cwd=build_dir, shell=True)
return res == 0
|
en
| 0.432057
|
wrapper for ccmake command line tool #------------------------------------------------------------------------------- test if ccmake is in the path :returns: True if ccmake is in the path #------------------------------------------------------------------------------- run ccmake to configure cmake project :param build_dir: directory where ccmake should run :returns: True if ccmake returns successful
| 2.259985
| 2
|
image_quality/handlers/data_generator.py
|
mbartoli/image-quality-assessment
| 1
|
6395
|
import os
import numpy as np
import tensorflow as tf
from image_quality.utils import utils
class TrainDataGenerator(tf.keras.utils.Sequence):
'''inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator'''
def __init__(self, samples, img_dir, batch_size, n_classes, basenet_preprocess,
img_load_dims=(256, 256), img_crop_dims=(224, 224), shuffle=True):
self.samples = samples
self.img_dir = img_dir
self.batch_size = batch_size
self.n_classes = n_classes
self.basenet_preprocess = basenet_preprocess # Keras basenet specific preprocessing function
self.img_load_dims = img_load_dims # dimensions that images get resized into when loaded
self.img_crop_dims = img_crop_dims # dimensions that images get randomly cropped to
self.shuffle = shuffle
self.on_epoch_end() # call ensures that samples are shuffled in first epoch if shuffle is set to True
def __len__(self):
return int(np.ceil(len(self.samples) / self.batch_size)) # number of batches per epoch
def __getitem__(self, index):
batch_indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] # get batch indexes
batch_samples = [self.samples[i] for i in batch_indexes] # get batch samples
X, y = self.__data_generator(batch_samples)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.samples))
if self.shuffle is True:
np.random.shuffle(self.indexes)
def __data_generator(self, batch_samples):
# initialize images and labels tensors for faster processing
X = np.empty((len(batch_samples), *self.img_crop_dims, 3))
y = np.empty((len(batch_samples), self.n_classes))
for i, sample in enumerate(batch_samples):
# load and randomly augment image
img_file = os.path.join(self.img_dir, '{}'.format(sample['image_id']))
img = utils.load_image(img_file, self.img_load_dims)
if img is not None:
img = utils.random_crop(img, self.img_crop_dims)
img = utils.random_horizontal_flip(img)
X[i, ] = img
# normalize labels
y[i, ] = utils.normalize_labels(sample['label'])
# apply basenet specific preprocessing
# input is 4D numpy array of RGB values within [0, 255]
X = self.basenet_preprocess(X)
return X, y
class TestDataGenerator(tf.keras.utils.Sequence):
'''inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator'''
def __init__(self, samples, img_dir, batch_size, n_classes, basenet_preprocess,
img_load_dims=(224, 224)):
self.samples = samples
self.img_dir = img_dir
self.batch_size = batch_size
self.n_classes = n_classes
self.basenet_preprocess = basenet_preprocess # Keras basenet specific preprocessing function
self.img_load_dims = img_load_dims # dimensions that images get resized into when loaded
self.on_epoch_end() # call ensures that samples are shuffled in first epoch if shuffle is set to True
def __len__(self):
return int(np.ceil(len(self.samples) / self.batch_size)) # number of batches per epoch
def __getitem__(self, index):
batch_indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] # get batch indexes
batch_samples = [self.samples[i] for i in batch_indexes] # get batch samples
X, y = self.__data_generator(batch_samples)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.samples))
def __data_generator(self, batch_samples):
# initialize images and labels tensors for faster processing
X = np.empty((len(batch_samples), *self.img_load_dims, 3))
y = np.empty((len(batch_samples), self.n_classes))
for i, sample in enumerate(batch_samples):
# load and randomly augment image
img_file = os.path.join(self.img_dir, '{}'.format(sample['image_id']))
img = utils.load_image(img_file, self.img_load_dims)
if img is not None:
X[i, ] = img
# normalize labels
if sample.get('label') is not None:
y[i, ] = utils.normalize_labels(sample['label'])
# apply basenet specific preprocessing
# input is 4D numpy array of RGB values within [0, 255]
X = self.basenet_preprocess(X)
return X, y
|
import os
import numpy as np
import tensorflow as tf
from image_quality.utils import utils
class TrainDataGenerator(tf.keras.utils.Sequence):
'''inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator'''
def __init__(self, samples, img_dir, batch_size, n_classes, basenet_preprocess,
img_load_dims=(256, 256), img_crop_dims=(224, 224), shuffle=True):
self.samples = samples
self.img_dir = img_dir
self.batch_size = batch_size
self.n_classes = n_classes
self.basenet_preprocess = basenet_preprocess # Keras basenet specific preprocessing function
self.img_load_dims = img_load_dims # dimensions that images get resized into when loaded
self.img_crop_dims = img_crop_dims # dimensions that images get randomly cropped to
self.shuffle = shuffle
self.on_epoch_end() # call ensures that samples are shuffled in first epoch if shuffle is set to True
def __len__(self):
return int(np.ceil(len(self.samples) / self.batch_size)) # number of batches per epoch
def __getitem__(self, index):
batch_indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] # get batch indexes
batch_samples = [self.samples[i] for i in batch_indexes] # get batch samples
X, y = self.__data_generator(batch_samples)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.samples))
if self.shuffle is True:
np.random.shuffle(self.indexes)
def __data_generator(self, batch_samples):
# initialize images and labels tensors for faster processing
X = np.empty((len(batch_samples), *self.img_crop_dims, 3))
y = np.empty((len(batch_samples), self.n_classes))
for i, sample in enumerate(batch_samples):
# load and randomly augment image
img_file = os.path.join(self.img_dir, '{}'.format(sample['image_id']))
img = utils.load_image(img_file, self.img_load_dims)
if img is not None:
img = utils.random_crop(img, self.img_crop_dims)
img = utils.random_horizontal_flip(img)
X[i, ] = img
# normalize labels
y[i, ] = utils.normalize_labels(sample['label'])
# apply basenet specific preprocessing
# input is 4D numpy array of RGB values within [0, 255]
X = self.basenet_preprocess(X)
return X, y
class TestDataGenerator(tf.keras.utils.Sequence):
'''inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator'''
def __init__(self, samples, img_dir, batch_size, n_classes, basenet_preprocess,
img_load_dims=(224, 224)):
self.samples = samples
self.img_dir = img_dir
self.batch_size = batch_size
self.n_classes = n_classes
self.basenet_preprocess = basenet_preprocess # Keras basenet specific preprocessing function
self.img_load_dims = img_load_dims # dimensions that images get resized into when loaded
self.on_epoch_end() # call ensures that samples are shuffled in first epoch if shuffle is set to True
def __len__(self):
return int(np.ceil(len(self.samples) / self.batch_size)) # number of batches per epoch
def __getitem__(self, index):
batch_indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] # get batch indexes
batch_samples = [self.samples[i] for i in batch_indexes] # get batch samples
X, y = self.__data_generator(batch_samples)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.samples))
def __data_generator(self, batch_samples):
# initialize images and labels tensors for faster processing
X = np.empty((len(batch_samples), *self.img_load_dims, 3))
y = np.empty((len(batch_samples), self.n_classes))
for i, sample in enumerate(batch_samples):
# load and randomly augment image
img_file = os.path.join(self.img_dir, '{}'.format(sample['image_id']))
img = utils.load_image(img_file, self.img_load_dims)
if img is not None:
X[i, ] = img
# normalize labels
if sample.get('label') is not None:
y[i, ] = utils.normalize_labels(sample['label'])
# apply basenet specific preprocessing
# input is 4D numpy array of RGB values within [0, 255]
X = self.basenet_preprocess(X)
return X, y
|
en
| 0.707552
|
inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator # Keras basenet specific preprocessing function # dimensions that images get resized into when loaded # dimensions that images get randomly cropped to # call ensures that samples are shuffled in first epoch if shuffle is set to True # number of batches per epoch # get batch indexes # get batch samples # initialize images and labels tensors for faster processing # load and randomly augment image # normalize labels # apply basenet specific preprocessing # input is 4D numpy array of RGB values within [0, 255] inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator # Keras basenet specific preprocessing function # dimensions that images get resized into when loaded # call ensures that samples are shuffled in first epoch if shuffle is set to True # number of batches per epoch # get batch indexes # get batch samples # initialize images and labels tensors for faster processing # load and randomly augment image # normalize labels # apply basenet specific preprocessing # input is 4D numpy array of RGB values within [0, 255]
| 2.856769
| 3
|
codewars/4 kyu/strip-comments.py
|
sirken/coding-practice
| 0
|
6396
|
from Test import Test, Test as test
'''
Complete the solution so that it strips all text that follows any of a set of comment markers passed in. Any whitespace at the end of the line should also be stripped out.
Example:
Given an input string of:
apples, pears # and bananas
grapes
bananas !apples
The output expected would be:
apples, pears
grapes
bananas
The code would be called like so:
result = solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"])
# result should == "apples, pears\ngrapes\nbananas"
'''
# Split by rows, then find earliest marker and extract string before it
def solution(string,markers):
strings = string.split('\n')
l = []
for line in strings:
pos = len(line)
for m in markers:
if m in line:
if line.index(m) < pos:
pos = line.index(m)
l.append(line[:pos].rstrip())
return '\n'.join(l)
# Top solution, split list by \n, edit in place
def solution(string,markers):
parts = string.split('\n')
for s in markers:
parts = [v.split(s)[0].rstrip() for v in parts]
return '\n'.join(parts)
# Top solution expanded
def solution(string,markers):
# split by lines
parts = string.split('\n')
# Loop through markers
for s in markers:
# Loop through all lines, check for any markers
# Split by marker, grab first item, and rstrip whitespace
for num, v in enumerate(parts):
parts[num] = v.split(s)[0].rstrip()
return '\n'.join(parts)
Test.assert_equals(solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"]), "apples, pears\ngrapes\nbananas")
Test.assert_equals(solution("a #b\nc\nd $e f g", ["#", "$"]), "a\nc\nd")
Test.assert_equals(solution('= - avocados oranges pears cherries\nlemons apples\n- watermelons strawberries', ['#', '?', '=', ',', '.', '-', '!']), '\nlemons apples\n')
|
from Test import Test, Test as test
'''
Complete the solution so that it strips all text that follows any of a set of comment markers passed in. Any whitespace at the end of the line should also be stripped out.
Example:
Given an input string of:
apples, pears # and bananas
grapes
bananas !apples
The output expected would be:
apples, pears
grapes
bananas
The code would be called like so:
result = solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"])
# result should == "apples, pears\ngrapes\nbananas"
'''
# Split by rows, then find earliest marker and extract string before it
def solution(string,markers):
strings = string.split('\n')
l = []
for line in strings:
pos = len(line)
for m in markers:
if m in line:
if line.index(m) < pos:
pos = line.index(m)
l.append(line[:pos].rstrip())
return '\n'.join(l)
# Top solution, split list by \n, edit in place
def solution(string,markers):
parts = string.split('\n')
for s in markers:
parts = [v.split(s)[0].rstrip() for v in parts]
return '\n'.join(parts)
# Top solution expanded
def solution(string,markers):
# split by lines
parts = string.split('\n')
# Loop through markers
for s in markers:
# Loop through all lines, check for any markers
# Split by marker, grab first item, and rstrip whitespace
for num, v in enumerate(parts):
parts[num] = v.split(s)[0].rstrip()
return '\n'.join(parts)
Test.assert_equals(solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"]), "apples, pears\ngrapes\nbananas")
Test.assert_equals(solution("a #b\nc\nd $e f g", ["#", "$"]), "a\nc\nd")
Test.assert_equals(solution('= - avocados oranges pears cherries\nlemons apples\n- watermelons strawberries', ['#', '?', '=', ',', '.', '-', '!']), '\nlemons apples\n')
|
en
| 0.820882
|
Complete the solution so that it strips all text that follows any of a set of comment markers passed in. Any whitespace at the end of the line should also be stripped out. Example: Given an input string of: apples, pears # and bananas grapes bananas !apples The output expected would be: apples, pears grapes bananas The code would be called like so: result = solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"]) # result should == "apples, pears\ngrapes\nbananas" # Split by rows, then find earliest marker and extract string before it # Top solution, split list by \n, edit in place # Top solution expanded # split by lines # Loop through markers # Loop through all lines, check for any markers # Split by marker, grab first item, and rstrip whitespace # and bananas\ngrapes\nbananas !apples", ["#", "!"]), "apples, pears\ngrapes\nbananas") #b\nc\nd $e f g", ["#", "$"]), "a\nc\nd")
| 4.126571
| 4
|
qat/interop/qiskit/quantum_channels.py
|
myQLM/myqlm-interop
| 5
|
6397
|
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from qiskit.quantum_info.operators.channel import Choi, PTM, Kraus, Chi, SuperOp
import numpy as np
from qat.comm.quops.ttypes import QuantumChannel, RepresentationType
from qat.comm.datamodel.ttypes import Matrix, ComplexNumber
def array_to_matrix(array):
"""
Transform a two dimmentional numpy array to a myqlm Matrix.
Args:
array: (ndarray) a two dimmentional numpy array
Returns:
(Matrix): a myqlm Matrix
"""
assert len(array.shape) == 2, "The array must be two dimmentional"
data = []
for arr in array:
for elem in arr:
data.append(ComplexNumber(np.real(elem), np.imag(elem)))
matri = Matrix(array.shape[0], array.shape[1], data)
return matri
def qiskit_to_qchannel(representation):
"""
Create a myqlm representation of quantum channel from a qiskit representation
of a quantum channel.
Args:
representation: (Kraus|Choi|Chi|SuperOp|PTM) qiskit representation of a quantum channel.
Returns:
(QuantumChannel): myqlm representation of a quantum channel.
"""
qchannel = None
qiskit_data = representation.data
# Find what representation it is.
# Then create the corresponding matrix (kraus_ops|basis|matrix)from the data
# of the representation.
# Finally, create the QuantumChannel with the RepresentationType, the arity
# (got from the qiskit representation) and the matrix.
if isinstance(representation, Kraus):
kraus_ops = []
for arr in qiskit_data:
kraus_ops.append(array_to_matrix(arr))
qchannel = QuantumChannel(
representation=RepresentationType.KRAUS,
arity=representation.num_qubits,
kraus_ops=kraus_ops)
elif isinstance(representation, Chi):
basis = []
basis.append(array_to_matrix(qiskit_data))
qchannel = QuantumChannel(
representation=RepresentationType.CHI,
arity=representation.num_qubits,
basis=basis)
elif isinstance(representation, SuperOp):
basis = []
basis.append(array_to_matrix(qiskit_data))
qchannel = QuantumChannel(
representation=RepresentationType.SUPEROP,
arity=representation.num_qubits,
basis=basis)
elif isinstance(representation, PTM):
matri = array_to_matrix(qiskit_data)
qchannel = QuantumChannel(
representation=RepresentationType.PTM,
arity=representation.num_qubits,
matrix=matri)
elif isinstance(representation, Choi):
matri = array_to_matrix(qiskit_data)
qchannel = QuantumChannel(
representation=RepresentationType.CHOI,
arity=representation.num_qubits,
matrix=matri)
return qchannel
def qchannel_to_qiskit(representation):
"""
Create a qiskit representation of quantum channel from a myqlm representation
of a quantum channel.
Args:
representation: (QuantumChannel) myqlm representation of a quantum channel.
Returns:
(Kraus|Choi|Chi|SuperOp|PTM): qiskit representation of a quantum channel.
"""
rep = representation.representation
# Find what representation it is.
# Then create the corresponding matrix and shape it like qiskit is expecting it.
# Finally, create the qiskit representation from that matrix.
if rep in (RepresentationType.PTM, RepresentationType.CHOI):
matri = representation.matrix
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
return PTM(data) if (rep == RepresentationType.PTM) else Choi(data)
if rep in (RepresentationType.CHI, RepresentationType.SUPEROP):
final_data = []
for matri in representation.basis:
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
final_data.append(data)
if rep == RepresentationType.CHI:
return Chi(final_data) if len(final_data) > 1 else Chi(final_data[0])
return SuperOp(final_data) if len(final_data) > 1 else SuperOp(final_data[0])
if rep == RepresentationType.KRAUS:
final_data = []
for matri in representation.kraus_ops:
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
final_data.append(data)
return Kraus(final_data)
return None
|
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from qiskit.quantum_info.operators.channel import Choi, PTM, Kraus, Chi, SuperOp
import numpy as np
from qat.comm.quops.ttypes import QuantumChannel, RepresentationType
from qat.comm.datamodel.ttypes import Matrix, ComplexNumber
def array_to_matrix(array):
"""
Transform a two dimmentional numpy array to a myqlm Matrix.
Args:
array: (ndarray) a two dimmentional numpy array
Returns:
(Matrix): a myqlm Matrix
"""
assert len(array.shape) == 2, "The array must be two dimmentional"
data = []
for arr in array:
for elem in arr:
data.append(ComplexNumber(np.real(elem), np.imag(elem)))
matri = Matrix(array.shape[0], array.shape[1], data)
return matri
def qiskit_to_qchannel(representation):
"""
Create a myqlm representation of quantum channel from a qiskit representation
of a quantum channel.
Args:
representation: (Kraus|Choi|Chi|SuperOp|PTM) qiskit representation of a quantum channel.
Returns:
(QuantumChannel): myqlm representation of a quantum channel.
"""
qchannel = None
qiskit_data = representation.data
# Find what representation it is.
# Then create the corresponding matrix (kraus_ops|basis|matrix)from the data
# of the representation.
# Finally, create the QuantumChannel with the RepresentationType, the arity
# (got from the qiskit representation) and the matrix.
if isinstance(representation, Kraus):
kraus_ops = []
for arr in qiskit_data:
kraus_ops.append(array_to_matrix(arr))
qchannel = QuantumChannel(
representation=RepresentationType.KRAUS,
arity=representation.num_qubits,
kraus_ops=kraus_ops)
elif isinstance(representation, Chi):
basis = []
basis.append(array_to_matrix(qiskit_data))
qchannel = QuantumChannel(
representation=RepresentationType.CHI,
arity=representation.num_qubits,
basis=basis)
elif isinstance(representation, SuperOp):
basis = []
basis.append(array_to_matrix(qiskit_data))
qchannel = QuantumChannel(
representation=RepresentationType.SUPEROP,
arity=representation.num_qubits,
basis=basis)
elif isinstance(representation, PTM):
matri = array_to_matrix(qiskit_data)
qchannel = QuantumChannel(
representation=RepresentationType.PTM,
arity=representation.num_qubits,
matrix=matri)
elif isinstance(representation, Choi):
matri = array_to_matrix(qiskit_data)
qchannel = QuantumChannel(
representation=RepresentationType.CHOI,
arity=representation.num_qubits,
matrix=matri)
return qchannel
def qchannel_to_qiskit(representation):
"""
Create a qiskit representation of quantum channel from a myqlm representation
of a quantum channel.
Args:
representation: (QuantumChannel) myqlm representation of a quantum channel.
Returns:
(Kraus|Choi|Chi|SuperOp|PTM): qiskit representation of a quantum channel.
"""
rep = representation.representation
# Find what representation it is.
# Then create the corresponding matrix and shape it like qiskit is expecting it.
# Finally, create the qiskit representation from that matrix.
if rep in (RepresentationType.PTM, RepresentationType.CHOI):
matri = representation.matrix
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
return PTM(data) if (rep == RepresentationType.PTM) else Choi(data)
if rep in (RepresentationType.CHI, RepresentationType.SUPEROP):
final_data = []
for matri in representation.basis:
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
final_data.append(data)
if rep == RepresentationType.CHI:
return Chi(final_data) if len(final_data) > 1 else Chi(final_data[0])
return SuperOp(final_data) if len(final_data) > 1 else SuperOp(final_data[0])
if rep == RepresentationType.KRAUS:
final_data = []
for matri in representation.kraus_ops:
data_re = []
data_im = []
for i in range(matri.nRows):
for j in range(matri.nCols):
data_re.append(matri.data[i * matri.nRows + j].re + 0.j)
data_im.append(matri.data[i * matri.nRows + j].im)
data = np.array(data_re)
data.imag = np.array(data_im)
data = data.reshape((matri.nRows, matri.nCols))
final_data.append(data)
return Kraus(final_data)
return None
|
en
| 0.79484
|
# -*- coding: utf-8 -*- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Transform a two dimmentional numpy array to a myqlm Matrix. Args: array: (ndarray) a two dimmentional numpy array Returns: (Matrix): a myqlm Matrix Create a myqlm representation of quantum channel from a qiskit representation of a quantum channel. Args: representation: (Kraus|Choi|Chi|SuperOp|PTM) qiskit representation of a quantum channel. Returns: (QuantumChannel): myqlm representation of a quantum channel. # Find what representation it is. # Then create the corresponding matrix (kraus_ops|basis|matrix)from the data # of the representation. # Finally, create the QuantumChannel with the RepresentationType, the arity # (got from the qiskit representation) and the matrix. Create a qiskit representation of quantum channel from a myqlm representation of a quantum channel. Args: representation: (QuantumChannel) myqlm representation of a quantum channel. Returns: (Kraus|Choi|Chi|SuperOp|PTM): qiskit representation of a quantum channel. # Find what representation it is. # Then create the corresponding matrix and shape it like qiskit is expecting it. # Finally, create the qiskit representation from that matrix.
| 2.099681
| 2
|
mne_nirs/simulation/_simulation.py
|
mshader/mne-nirs
| 0
|
6398
|
<reponame>mshader/mne-nirs
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import numpy as np
from mne import Annotations, create_info
from mne.io import RawArray
def simulate_nirs_raw(sfreq=3., amplitude=1.,
sig_dur=300., stim_dur=5.,
isi_min=15., isi_max=45.):
"""
Create simulated data.
.. warning:: Work in progress: I am trying to think on the best API.
Parameters
----------
sfreq : Number
The sample rate.
amplitude : Number
The amplitude of the signal to simulate in uM.
sig_dur : Number
The length of the signal to generate in seconds.
stim_dur : Number
The length of the stimulus to generate in seconds.
isi_min : Number
The minimum duration of the inter stimulus interval in seconds.
isi_max : Number
The maximum duration of the inter stimulus interval in seconds.
Returns
-------
raw : instance of Raw
The generated raw instance.
"""
from nilearn.stats.first_level_model import make_first_level_design_matrix
from pandas import DataFrame
frame_times = np.arange(sig_dur * sfreq) / sfreq
onset = 0.
onsets = []
conditions = []
durations = []
while onset < sig_dur - 60:
onset += np.random.uniform(isi_min, isi_max) + stim_dur
onsets.append(onset)
conditions.append("A")
durations.append(stim_dur)
events = DataFrame({'trial_type': conditions,
'onset': onsets,
'duration': durations})
dm = make_first_level_design_matrix(frame_times, events,
drift_model='polynomial',
drift_order=0)
annotations = Annotations(onsets, durations, conditions)
info = create_info(ch_names=['Simulated'], sfreq=sfreq, ch_types=['hbo'])
raw = RawArray(dm[["A"]].to_numpy().T * amplitude * 1.e-6,
info, verbose=False)
raw.set_annotations(annotations)
return raw
|
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import numpy as np
from mne import Annotations, create_info
from mne.io import RawArray
def simulate_nirs_raw(sfreq=3., amplitude=1.,
sig_dur=300., stim_dur=5.,
isi_min=15., isi_max=45.):
"""
Create simulated data.
.. warning:: Work in progress: I am trying to think on the best API.
Parameters
----------
sfreq : Number
The sample rate.
amplitude : Number
The amplitude of the signal to simulate in uM.
sig_dur : Number
The length of the signal to generate in seconds.
stim_dur : Number
The length of the stimulus to generate in seconds.
isi_min : Number
The minimum duration of the inter stimulus interval in seconds.
isi_max : Number
The maximum duration of the inter stimulus interval in seconds.
Returns
-------
raw : instance of Raw
The generated raw instance.
"""
from nilearn.stats.first_level_model import make_first_level_design_matrix
from pandas import DataFrame
frame_times = np.arange(sig_dur * sfreq) / sfreq
onset = 0.
onsets = []
conditions = []
durations = []
while onset < sig_dur - 60:
onset += np.random.uniform(isi_min, isi_max) + stim_dur
onsets.append(onset)
conditions.append("A")
durations.append(stim_dur)
events = DataFrame({'trial_type': conditions,
'onset': onsets,
'duration': durations})
dm = make_first_level_design_matrix(frame_times, events,
drift_model='polynomial',
drift_order=0)
annotations = Annotations(onsets, durations, conditions)
info = create_info(ch_names=['Simulated'], sfreq=sfreq, ch_types=['hbo'])
raw = RawArray(dm[["A"]].to_numpy().T * amplitude * 1.e-6,
info, verbose=False)
raw.set_annotations(annotations)
return raw
|
en
| 0.722893
|
# Authors: <NAME> <<EMAIL>> # # License: BSD (3-clause) Create simulated data. .. warning:: Work in progress: I am trying to think on the best API. Parameters ---------- sfreq : Number The sample rate. amplitude : Number The amplitude of the signal to simulate in uM. sig_dur : Number The length of the signal to generate in seconds. stim_dur : Number The length of the stimulus to generate in seconds. isi_min : Number The minimum duration of the inter stimulus interval in seconds. isi_max : Number The maximum duration of the inter stimulus interval in seconds. Returns ------- raw : instance of Raw The generated raw instance.
| 2.35487
| 2
|
build/lib/dataaccess/TransactionRepository.py
|
athanikos/cryptodataaccess
| 0
|
6399
|
<gh_stars>0
from cryptomodel.cryptostore import user_notification, user_channel, user_transaction, operation_type
from mongoengine import Q
from cryptodataaccess import helpers
from cryptodataaccess.helpers import if_none_raise, if_none_raise_with_id
class TransactionRepository:
def __init__(self, config, log_error):
self.configuration = config
self.log_error = log_error
def fetch_transaction(self, id):
return helpers.server_time_out_wrapper(self, self.do_fetch_transaction, id)
def fetch_transactions(self, user_id):
return helpers.server_time_out_wrapper(self, self.do_fetch_transactions, user_id)
def fetch_transactions(self, user_id):
return helpers.server_time_out_wrapper(self, self.do_fetch_transactions, user_id)
def insert_transaction(self, user_id, volume, symbol, value, price, currency, date, source, source_id, operation):
return helpers.server_time_out_wrapper(self, self.do_insert_transaction, user_id, volume, symbol,
value, price, currency, date, source, source_id, operation)
def update_transaction(self, id, user_id, volume, symbol, value, price, currency, date, source, source_id,
operation):
return helpers.server_time_out_wrapper(self, self.do_update_transaction, id,
user_id, volume, symbol, value, price, currency, date, source, source_id,
operation)
def delete_transaction(self, id, throw_if_does_not_exist=True):
helpers.server_time_out_wrapper(self, self.do_delete_transaction, id, throw_if_does_not_exist)
def do_delete_transaction(self, id, throw_if_does_not_exist=True):
helpers.do_local_connect(self.configuration)
trans = user_transaction.objects(id=id).first()
if throw_if_does_not_exist:
if_none_raise_with_id(id, trans)
if trans is not None:
trans.delete()
def do_update_transaction(self, id, user_id, volume, symbol, value, price, currency, date, source, source_id,
operation):
helpers.do_local_connect(self.configuration)
trans = user_transaction.objects(id=id).first()
if_none_raise_with_id(id, trans)
trans.user_id = user_id
trans.volume = volume
trans.symbol = symbol
trans.value = value
trans.price = price
trans.date = date
trans.source = source
trans.currency = currency
trans.source_id = source_id
trans.operation = operation
trans.save()
return user_transaction.objects(id=id).first()
def do_insert_transaction(self, user_id, volume, symbol, value, price, currency, date, source, source_id,
operation):
helpers.do_local_connect(self.configuration)
trans = user_transaction()
trans.user_id = user_id
trans.volume = volume
trans.symbol = symbol
trans.value = value
trans.price = price
trans.date = date
trans.currency = currency
trans.source = source
trans.source_id = source_id
trans.operation = operation
trans.save()
return user_transaction.objects(id=trans.id).first()
def do_fetch_transactions(self, user_id ):
helpers.do_local_connect(self.configuration)
return user_transaction.objects(Q(user_id=user_id))
def do_fetch_transaction(self, id ):
helpers.do_local_connect(self.configuration)
return user_transaction.objects(Q(id=id))[0]
|
from cryptomodel.cryptostore import user_notification, user_channel, user_transaction, operation_type
from mongoengine import Q
from cryptodataaccess import helpers
from cryptodataaccess.helpers import if_none_raise, if_none_raise_with_id
class TransactionRepository:
def __init__(self, config, log_error):
self.configuration = config
self.log_error = log_error
def fetch_transaction(self, id):
return helpers.server_time_out_wrapper(self, self.do_fetch_transaction, id)
def fetch_transactions(self, user_id):
return helpers.server_time_out_wrapper(self, self.do_fetch_transactions, user_id)
def fetch_transactions(self, user_id):
return helpers.server_time_out_wrapper(self, self.do_fetch_transactions, user_id)
def insert_transaction(self, user_id, volume, symbol, value, price, currency, date, source, source_id, operation):
return helpers.server_time_out_wrapper(self, self.do_insert_transaction, user_id, volume, symbol,
value, price, currency, date, source, source_id, operation)
def update_transaction(self, id, user_id, volume, symbol, value, price, currency, date, source, source_id,
operation):
return helpers.server_time_out_wrapper(self, self.do_update_transaction, id,
user_id, volume, symbol, value, price, currency, date, source, source_id,
operation)
def delete_transaction(self, id, throw_if_does_not_exist=True):
helpers.server_time_out_wrapper(self, self.do_delete_transaction, id, throw_if_does_not_exist)
def do_delete_transaction(self, id, throw_if_does_not_exist=True):
helpers.do_local_connect(self.configuration)
trans = user_transaction.objects(id=id).first()
if throw_if_does_not_exist:
if_none_raise_with_id(id, trans)
if trans is not None:
trans.delete()
def do_update_transaction(self, id, user_id, volume, symbol, value, price, currency, date, source, source_id,
operation):
helpers.do_local_connect(self.configuration)
trans = user_transaction.objects(id=id).first()
if_none_raise_with_id(id, trans)
trans.user_id = user_id
trans.volume = volume
trans.symbol = symbol
trans.value = value
trans.price = price
trans.date = date
trans.source = source
trans.currency = currency
trans.source_id = source_id
trans.operation = operation
trans.save()
return user_transaction.objects(id=id).first()
def do_insert_transaction(self, user_id, volume, symbol, value, price, currency, date, source, source_id,
operation):
helpers.do_local_connect(self.configuration)
trans = user_transaction()
trans.user_id = user_id
trans.volume = volume
trans.symbol = symbol
trans.value = value
trans.price = price
trans.date = date
trans.currency = currency
trans.source = source
trans.source_id = source_id
trans.operation = operation
trans.save()
return user_transaction.objects(id=trans.id).first()
def do_fetch_transactions(self, user_id ):
helpers.do_local_connect(self.configuration)
return user_transaction.objects(Q(user_id=user_id))
def do_fetch_transaction(self, id ):
helpers.do_local_connect(self.configuration)
return user_transaction.objects(Q(id=id))[0]
|
none
| 1
| 2.256847
| 2
|