blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
366ec5a806ff6bf11680dcbbaff0751281364ddd
|
b40d1a26ea04a19ec0da7bf55db84b7ee36cc898
|
/leetcode.com/python/801_Minimum_Swaps_To_Make_Sequences_Increasing.py
|
6fb8c63199f28bbbb5cf3751df594d4bdf08e015
|
[
"MIT"
] |
permissive
|
partho-maple/coding-interview-gym
|
5e8af7d404c28d4b9b52e5cffc540fd51d8025cf
|
20ae1a048eddbc9a32c819cf61258e2b57572f05
|
refs/heads/master
| 2022-09-11T16:36:01.702626
| 2022-03-14T08:39:47
| 2022-03-14T08:39:47
| 69,802,909
| 862
| 438
|
MIT
| 2022-08-18T06:42:46
| 2016-10-02T14:51:31
|
Python
|
UTF-8
|
Python
| false
| false
| 910
|
py
|
801_Minimum_Swaps_To_Make_Sequences_Increasing.py
|
# Source: https://tinyurl.com/tzx7wpv
class Solution(object):
def minSwap(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
arrLen = len(A)
swaps, noSwaps = [arrLen] * arrLen, [arrLen] * arrLen
swaps[0], noSwaps[0] = 1, 0
for i in range(1, arrLen):
currentNumA, prevNumA = A[i], A[i - 1]
currentNumB, prevNumB = B[i], B[i - 1]
if currentNumA > prevNumA and currentNumB > prevNumB:
noSwaps[i] = noSwaps[i - 1]
swaps[i] = swaps[i - 1] + 1
if currentNumB > prevNumA and currentNumA > prevNumB:
noSwaps[i] = min(noSwaps[i], swaps[i - 1]) # If we do ot make the swap in this case
swaps[i] = min(swaps[i], noSwaps[i - 1] + 1) # If we make the swap in this case
return min(swaps[-1], noSwaps[-1])
|
b64b3ba780687ce21d8651cc566d0f92d8d0fb64
|
66fc3d58e94e8340a0d825501776a1dea37c0198
|
/tests/python/ContextTest.py
|
5cfc847fd890f9d10a4ed25fd7a2b4e41fc0b032
|
[
"BSD-3-Clause",
"CC-BY-4.0"
] |
permissive
|
AcademySoftwareFoundation/OpenColorIO
|
dad370b54be147ae94f18ed6414d53bd76e9ef74
|
96f528fdfb7f9fb24388e33f6a968d29a3909cf8
|
refs/heads/main
| 2023-08-29T08:51:45.625957
| 2023-08-29T01:42:37
| 2023-08-29T01:42:37
| 775,131
| 843
| 236
|
BSD-3-Clause
| 2023-09-14T02:56:01
| 2010-07-14T18:22:06
|
C++
|
UTF-8
|
Python
| false
| false
| 2,652
|
py
|
ContextTest.py
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
import copy, unittest, os, sys
import PyOpenColorIO as OCIO
class ContextTest(unittest.TestCase):
def test_copy(self):
"""
Test the deepcopy() method.
"""
cont = OCIO.Context()
cont.setSearchPath('testing123:testing456')
cont.setWorkingDir('/dir/123')
cont.setEnvironmentMode(OCIO.ENV_ENVIRONMENT_LOAD_PREDEFINED)
cont['TeSt'] = 'foobar'
cont['Bar'] = 'Foo'
other = copy.deepcopy(cont)
self.assertFalse(other is cont)
self.assertEqual(other.getCacheID(), cont.getCacheID())
self.assertEqual(other.getSearchPath(), cont.getSearchPath())
self.assertEqual(other.getWorkingDir(), cont.getWorkingDir())
self.assertEqual(other.getEnvironmentMode(), cont.getEnvironmentMode())
self.assertEqual(list(other), list(cont))
def test_interface(self):
"""
Construct and use Context.
"""
cont = OCIO.Context()
cont.setSearchPath('testing123')
cont.setWorkingDir('/dir/123')
self.assertEqual('c79df3338e491627cd7c7b3a9d6fb08f', cont.getCacheID())
self.assertEqual('testing123', cont.getSearchPath())
self.assertEqual('/dir/123', cont.getWorkingDir())
cont['TeSt'] = 'foobar'
self.assertEqual('foobar', cont['TeSt'])
self.assertEqual(1, len(cont))
cont_iter = iter(cont)
self.assertEqual(len(cont_iter), 1)
self.assertEqual(cont_iter[0], 'TeSt')
cont.loadEnvironment()
self.assertEqual(len(cont), 1)
cont['TEST1'] = 'foobar'
self.assertEqual(len(cont), 2)
self.assertEqual('/foo/foobar/bar', cont.resolveStringVar('/foo/${TEST1}/bar'))
cont.clearStringVars()
self.assertEqual(len(cont), 0)
self.assertEqual(OCIO.ENV_ENVIRONMENT_LOAD_PREDEFINED, cont.getEnvironmentMode())
cont.setEnvironmentMode(OCIO.ENV_ENVIRONMENT_LOAD_ALL)
self.assertEqual(OCIO.ENV_ENVIRONMENT_LOAD_ALL, cont.getEnvironmentMode())
cont.clearSearchPaths()
sp = cont.getSearchPaths()
self.assertEqual(len(sp), 0)
cont.addSearchPath('First/ Path')
self.assertEqual(len(sp), 1)
cont.addSearchPath('D:\\Second\\Path\\')
self.assertEqual(len(sp), 2)
self.assertEqual(next(sp), 'First/ Path')
self.assertEqual(next(sp), 'D:\\Second\\Path\\')
cont.setSearchPath('testing123')
with self.assertRaises(OCIO.ExceptionMissingFile):
foo = cont.resolveFileLocation('test.lut')
|
f7ee13a4f32148e4c656056221b627061bd9ecce
|
703b4a4c5e1baa34e0b09628db99bc3bc06d1b6e
|
/wgdi/peaksfit.py
|
6834a8e07eeb2e8430630caf6e7f9527c87b30a0
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
SunPengChuan/wgdi
|
2c1c4b6a44e5fb18fc6045b0d4494991150d08b2
|
333a3910f32a0e5c273f034ce26c1a53b2aa92d5
|
refs/heads/master
| 2023-06-01T04:21:49.055744
| 2023-05-11T13:34:41
| 2023-05-11T13:34:41
| 146,754,483
| 103
| 20
|
BSD-2-Clause
| 2021-03-27T19:23:55
| 2018-08-30T13:33:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,443
|
py
|
peaksfit.py
|
import re
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from scipy.stats import gaussian_kde, linregress
import wgdi.base as base
class peaksfit():
def __init__(self, options):
self.figsize = 10, 6.18
self.fontsize = 9
self.area = 0, 3
self.mode = 'median'
self.histogram_only = 'false'
for k, v in options:
setattr(self, str(k), v)
print(str(k), ' = ', v)
self.figsize = [float(k) for k in self.figsize.split(',')]
self.area = [float(k) for k in self.area.split(',')]
self.bins_number = int(self.bins_number)
self.peaks = 1
def ks_values(self, df):
df.loc[df['ks'].str.startswith('_'),'ks']= df.loc[df['ks'].str.startswith('_'),'ks'].str[1:]
ks = df['ks'].str.split('_')
ks_total = []
ks_average = []
for v in ks.values:
ks_total.extend([float(k) for k in v])
ks_average = df['ks_average'].values
ks_median = df['ks_median'].values
return [ks_median, ks_average, ks_total]
def gaussian_fuc(self, x, *params):
y = np.zeros_like(x)
for i in range(0, len(params), 3):
amp = float(params[i])
ctr = float(params[i+1])
wid = float(params[i+2])
y = y + amp * np.exp(-((x - ctr)/wid)**2)
return y
def kde_fit(self, data, x):
kde = gaussian_kde(data)
kde.set_bandwidth(bw_method=kde.factor/3.)
p = kde(x)
guess = [1,1, 1]*self.peaks
popt, pcov = curve_fit(self.gaussian_fuc, x, p, guess, maxfev = 80000)
popt = [abs(k) for k in popt]
data = []
y = self.gaussian_fuc(x, *popt)
for i in range(0, len(popt), 3):
array = [popt[i], popt[i+1], popt[i+2]]
data.append(self.gaussian_fuc(x, *array))
slope, intercept, r_value, p_value, std_err = linregress(p, y)
print("\nR-square: "+str(r_value**2))
print("The gaussian fitting curve parameters are :")
print(' | '.join([str(k) for k in popt]))
return y, data
def run(self):
plt.rcParams['ytick.major.pad'] = 0
fig, ax = plt.subplots(figsize=self.figsize)
bkinfo = pd.read_csv(self.blockinfo)
ks_median, ks_average, ks_total = self.ks_values(bkinfo)
data = eval('ks_'+self.mode)
data = [k for k in data if self.area[0] <= k <= self.area[1]]
x = np.linspace(self.area[0], self.area[1], self.bins_number)
n, bins, patches = ax.hist(data, int(
self.bins_number), density=1, facecolor='blue', alpha=0.3, label='Histogram')
if self.histogram_only == True or self.histogram_only.upper() == 'TRUE':
pass
else:
y, fit = self.kde_fit(data, x)
ax.plot(x, y, color='black', linestyle='-', label='Gaussian fitting')
ax.grid()
align = dict(family='Arial', verticalalignment="center",
horizontalalignment="center")
ax.set_xlabel(r'${K_{s}}$', fontsize=20)
ax.set_ylabel('Frequency', fontsize=20)
ax.tick_params(labelsize=18)
ax.legend(fontsize=20)
ax.set_xlim(self.area)
plt.subplots_adjust(left=0.09, right=0.96, top=0.93, bottom=0.12)
plt.savefig(self.savefig, dpi=500)
plt.show()
sys.exit(0)
|
48beca0df04aef322e311d32cd17f9dd08691024
|
6146e33102797407ede06ce2daa56c28fdfa2812
|
/contrib/ops/convertAnimCache.py
|
a14000509f26f9955baf8c4eb5825f3db466c254
|
[
"BSD-3-Clause"
] |
permissive
|
GafferHQ/gaffer
|
e1eb78ba8682bfbb7b17586d6e7b47988c3b7d64
|
59cab96598c59b90bee6d3fc1806492a5c03b4f1
|
refs/heads/main
| 2023-09-01T17:36:45.227956
| 2023-08-30T09:10:56
| 2023-08-30T09:10:56
| 9,043,124
| 707
| 144
|
BSD-3-Clause
| 2023-09-14T09:05:37
| 2013-03-27T00:04:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,648
|
py
|
convertAnimCache.py
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import glob
import IECore
class convertAnimCache( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self, "Converts animation caches from an old skool format to a nice new one.", IECore.FileSequenceParameter( "result", "" ) )
self.parameters().addParameters(
[
IECore.FileSequenceParameter(
"inputSequence",
"The animation sequence to convert.",
defaultValue = "",
allowEmptyString = False,
check = IECore.FileSequenceParameter.CheckType.MustExist,
extensions = "fio",
),
IECore.FileSequenceParameter(
"outputSequence",
"The animation sequence to create",
defaultValue = "",
allowEmptyString = False,
extensions = "fio",
),
],
)
def doOperation( self, args ) :
src = self.parameters()["inputSequence"].getFileSequenceValue()
dst = self.parameters()["outputSequence"].getFileSequenceValue()
# if no frame list is specified on the dst parameter, then we use the same as src parameter.
if isinstance( dst.frameList, IECore.EmptyFrameList ):
dst.frameList = src.frameList
for ( sf, df ) in zip( src.fileNames(), dst.fileNames() ) :
sc = IECore.AttributeCache( sf, IECore.IndexedIOOpenMode.Read )
dc = IECore.AttributeCache( df, IECore.IndexedIOOpenMode.Write )
combinedBound = IECore.Box3f()
for objectName in sc.objects() :
p = b = None
with IECore.IgnoredExceptions( Exception ) :
p = sc.read( objectName, "vertCache.P" )
b = sc.read( objectName, "vertCache.boundingBox" )
if p is not None and b is not None :
combinedBound.extendBy( b.value )
dc.write( "-" + objectName, "primVar:P", p )
dc.write( "-" + objectName, "bound", b )
dc.write( "-", "bound", IECore.Box3fData( combinedBound ) )
return args["outputSequence"].value
IECore.registerRunTimeTyped( convertAnimCache )
|
0799190515b8e4e9b12bd97abd2b2d3f57efdad1
|
ed62b03278a3dec0237e9a405e624baf11724469
|
/fastmri_recon/models/training/image_tboard_cback.py
|
7a5fd4006b8dbcfe57c1c5d810c733bdcfecf8f4
|
[
"MIT"
] |
permissive
|
zaccharieramzi/fastmri-reproducible-benchmark
|
f93b460bade2b6301caa9526e5c6385369971366
|
4a4ec09524437d11153fc5a525621783689bed38
|
refs/heads/master
| 2023-05-01T01:27:19.117953
| 2022-05-04T11:31:11
| 2022-05-04T11:31:11
| 193,113,083
| 147
| 50
|
MIT
| 2023-04-12T12:17:00
| 2019-06-21T14:36:25
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
image_tboard_cback.py
|
"""Inspired by https://github.com/sicara/tf-explain/blob/master/tf_explain/callbacks/grad_cam.py"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import Callback
class TensorBoardImage(Callback):
def __init__(self, log_dir, image, model_input):
super().__init__()
self.log_dir = log_dir
self.image = image
self.model_input = model_input
def set_model(self, model):
self.model = model
self.writer = tf.summary.create_file_writer(self.log_dir, filename_suffix='images')
def on_train_begin(self, _):
self.write_image(self.image, 'Original Image', 0)
def on_train_end(self, _):
self.writer.close()
def write_image(self, image, tag, epoch):
image_to_write = np.copy(image)
image_to_write -= image_to_write.min()
image_to_write /= image_to_write.max()
with self.writer.as_default():
tf.summary.image(tag, image_to_write, step=epoch)
def on_epoch_end(self, epoch, logs={}):
reconstructed_image = self.model.predict_on_batch(self.model_input)
if isinstance(reconstructed_image, list):
reconstructed_image = reconstructed_image[0]
self.write_image(reconstructed_image, 'Reconstructed Image', epoch)
|
f48cb365ce355863f126c744d7db4ddf878cfba8
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/common/py_vulcanize/third_party/rcssmin/bench/cssmin.py
|
cbfbf8d4966b33c3014ee33354cc73e8c2a963cd
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 7,426
|
py
|
cssmin.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""`cssmin` - A Python port of the YUI CSS compressor.
:Copyright:
Copyright 2011 - 2014
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
from StringIO import StringIO # The pure-Python StringIO supports unicode.
except ImportError:
from io import StringIO
import re
__version__ = '0.2.0'
def remove_comments(css):
"""Remove all CSS comment blocks."""
iemac = False
preserve = False
comment_start = css.find("/*")
while comment_start >= 0:
# Preserve comments that look like `/*!...*/`.
# Slicing is used to make sure we don"t get an IndexError.
preserve = css[comment_start + 2:comment_start + 3] == "!"
comment_end = css.find("*/", comment_start + 2)
if comment_end < 0:
if not preserve:
css = css[:comment_start]
break
elif comment_end >= (comment_start + 2):
if css[comment_end - 1] == "\\":
# This is an IE Mac-specific comment; leave this one and the
# following one alone.
comment_start = comment_end + 2
iemac = True
elif iemac:
comment_start = comment_end + 2
iemac = False
elif not preserve:
css = css[:comment_start] + css[comment_end + 2:]
else:
comment_start = comment_end + 2
comment_start = css.find("/*", comment_start)
return css
def remove_unnecessary_whitespace(css):
"""Remove unnecessary whitespace characters."""
def pseudoclasscolon(css):
"""
Prevents 'p :link' from becoming 'p:link'.
Translates 'p :link' into 'p ___PSEUDOCLASSCOLON___link'; this is
translated back again later.
"""
regex = re.compile(r"(^|\})(([^\{\:])+\:)+([^\{]*\{)")
match = regex.search(css)
while match:
css = ''.join([
css[:match.start()],
match.group().replace(":", "___PSEUDOCLASSCOLON___"),
css[match.end():]])
match = regex.search(css)
return css
css = pseudoclasscolon(css)
# Remove spaces from before things.
css = re.sub(r"\s+([!{};:>+\(\)\],])", r"\1", css)
# If there is a `@charset`, then only allow one, and move to the beginning.
css = re.sub(r"^(.*)(@charset \"[^\"]*\";)", r"\2\1", css)
css = re.sub(r"^(\s*@charset [^;]+;\s*)+", r"\1", css)
# Put the space back in for a few cases, such as `@media screen` and
# `(-webkit-min-device-pixel-ratio:0)`.
css = re.sub(r"\band\(", "and (", css)
# Put the colons back.
css = css.replace('___PSEUDOCLASSCOLON___', ':')
# Remove spaces from after things.
css = re.sub(r"([!{}:;>+\(\[,])\s+", r"\1", css)
return css
def remove_unnecessary_semicolons(css):
"""Remove unnecessary semicolons."""
return re.sub(r";+\}", "}", css)
def remove_empty_rules(css):
"""Remove empty rules."""
return re.sub(r"[^\}\{]+\{\}", "", css)
def normalize_rgb_colors_to_hex(css):
"""Convert `rgb(51,102,153)` to `#336699`."""
regex = re.compile(r"rgb\s*\(\s*([0-9,\s]+)\s*\)")
match = regex.search(css)
while match:
colors = map(lambda s: s.strip(), match.group(1).split(","))
hexcolor = '#%.2x%.2x%.2x' % tuple(map(int, colors))
css = css.replace(match.group(), hexcolor)
match = regex.search(css)
return css
def condense_zero_units(css):
"""Replace `0(px, em, %, etc)` with `0`."""
return re.sub(r"([\s:])(0)(px|em|%|in|cm|mm|pc|pt|ex)", r"\1\2", css)
def condense_multidimensional_zeros(css):
"""Replace `:0 0 0 0;`, `:0 0 0;` etc. with `:0;`."""
css = css.replace(":0 0 0 0;", ":0;")
css = css.replace(":0 0 0;", ":0;")
css = css.replace(":0 0;", ":0;")
# Revert `background-position:0;` to the valid `background-position:0 0;`.
css = css.replace("background-position:0;", "background-position:0 0;")
return css
def condense_floating_points(css):
"""Replace `0.6` with `.6` where possible."""
return re.sub(r"(:|\s)0+\.(\d+)", r"\1.\2", css)
def condense_hex_colors(css):
"""Shorten colors from #AABBCC to #ABC where possible."""
regex = re.compile(r"([^\"'=\s])(\s*)#([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])")
match = regex.search(css)
while match:
first = match.group(3) + match.group(5) + match.group(7)
second = match.group(4) + match.group(6) + match.group(8)
if first.lower() == second.lower():
css = css.replace(match.group(), match.group(1) + match.group(2) + '#' + first)
match = regex.search(css, match.end() - 3)
else:
match = regex.search(css, match.end())
return css
def condense_whitespace(css):
"""Condense multiple adjacent whitespace characters into one."""
return re.sub(r"\s+", " ", css)
def condense_semicolons(css):
"""Condense multiple adjacent semicolon characters into one."""
return re.sub(r";;+", ";", css)
def wrap_css_lines(css, line_length):
"""Wrap the lines of the given CSS to an approximate length."""
lines = []
line_start = 0
for i, char in enumerate(css):
# It's safe to break after `}` characters.
if char == '}' and (i - line_start >= line_length):
lines.append(css[line_start:i + 1])
line_start = i + 1
if line_start < len(css):
lines.append(css[line_start:])
return '\n'.join(lines)
def cssmin(css, wrap=None):
css = remove_comments(css)
css = condense_whitespace(css)
# A pseudo class for the Box Model Hack
# (see http://tantek.com/CSS/Examples/boxmodelhack.html)
css = css.replace('"\\"}\\""', "___PSEUDOCLASSBMH___")
css = remove_unnecessary_whitespace(css)
css = remove_unnecessary_semicolons(css)
css = condense_zero_units(css)
css = condense_multidimensional_zeros(css)
css = condense_floating_points(css)
css = normalize_rgb_colors_to_hex(css)
css = condense_hex_colors(css)
if wrap is not None:
css = wrap_css_lines(css, wrap)
css = css.replace("___PSEUDOCLASSBMH___", '"\\"}\\""')
css = condense_semicolons(css)
return css.strip()
def main():
import optparse
import sys
p = optparse.OptionParser(
prog="cssmin", version=__version__,
usage="%prog [--wrap N]",
description="""Reads raw CSS from stdin, and writes compressed CSS to stdout.""")
p.add_option(
'-w', '--wrap', type='int', default=None, metavar='N',
help="Wrap output to approximately N chars per line.")
options, args = p.parse_args()
sys.stdout.write(cssmin(sys.stdin.read(), wrap=options.wrap))
if __name__ == '__main__':
main()
|
8e51b75357cde464cbd05440c64e8847e55a6667
|
1534531d248728e583310214c84cd329cfeb243b
|
/accelerator/shell/ds.py
|
c1b3cf76833047362a980999a28f77cd126312af
|
[
"Apache-2.0"
] |
permissive
|
eBay/accelerator
|
415a006d18283940661c0f3cbae2c311acc1ffaa
|
8376d289e39cd90562de7dc2e3cdaa0bf080587b
|
refs/heads/master
| 2023-03-10T11:08:58.828517
| 2022-07-14T19:15:46
| 2022-07-14T19:15:46
| 130,265,539
| 146
| 30
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,762
|
py
|
ds.py
|
############################################################################
# #
# Copyright (c) 2017 eBay Inc. #
# Modifications copyright (c) 2019-2021 Carl Drougge #
# Modifications copyright (c) 2019-2021 Anders Berkeman #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
from __future__ import division, print_function
import sys
import locale
from datetime import datetime, time, date
from math import ceil, floor, log10, isinf, isnan
from accelerator.compat import ArgumentParser
from accelerator.compat import terminal_size
from .parser import name2ds, name2job
from accelerator.colourwrapper import colour
from accelerator.error import NoSuchWhateverError
from accelerator.extras import quote
MINMAXWIDTH = 13 # minimum number of characters reserved for min/max values
COLUMNS, LINES = terminal_size()
def colwidth(rows):
# find max string len per column
return [max(len(s) for s in col) for col in zip(*rows)]
def printcolwise(data, template, printfunc, minrows=8, indent=4):
if not data:
return
cols = (COLUMNS - indent) // (len(template.format(*printfunc(data[0]))) + 2)
n = int(ceil(len(data) / cols))
n = max(n, minrows)
for r in range(n):
v = data[r::n]
if v:
print(' ' * indent + ' '.join(template.format(*printfunc(x)) for x in v))
def main(argv, cfg):
usage = "%(prog)s [options] ds [ds [...]]"
parser = ArgumentParser(prog=argv.pop(0), usage=usage)
parser.add_argument('-c', '--chain', action='store_true', help='list all datasets in a chain')
parser.add_argument('-C', '--non-empty-chain', action='store_true', help='list all non-empty datasets in a chain')
parser.add_argument('-l', '--list', action='store_true', help='list all datasets in a job with number of rows')
parser.add_argument('-L', '--chainedlist', action='store_true', help='list all datasets in a job with number of chained rows')
parser.add_argument('-m', '--suppress-minmax', action='store_true', help='do not print min/max column values')
parser.add_argument('-n', '--suppress-columns', action='store_true', help='do not print columns')
parser.add_argument('-q', '--suppress-errors', action='store_true', help='silently ignores bad input datasets/jobids')
parser.add_argument('-s', '--slices', action='store_true', help='list relative number of lines per slice in sorted order')
parser.add_argument('-S', '--chainedslices', action='store_true', help='same as -s but for full chain')
parser.add_argument('-w', '--location', action='store_true', help='show where (ds/filename) each column is stored')
parser.add_argument("dataset", nargs='+', help='the job part of the dataset name can be specified in the same ways as for "ax job". you can use ds~ or ds~N to follow the chain N steps backwards, or ^ to follow .parent. this requires specifying the ds-name, so wd-1~ will not do this, but wd-1/default~ will.')
args = parser.parse_intermixed_args(argv)
args.chain = args.chain or args.non_empty_chain
def finish(badinput):
if badinput and not args.suppress_errors:
print('Error, failed to resolve datasets:', file=sys.stderr)
for n, e in badinput:
print(' %r: %s' % (n, e,), file=sys.stderr)
exit(1)
exit()
badinput = []
if args.list or args.chainedlist:
for n in args.dataset:
try:
try:
dsvec = name2ds(cfg, n).job.datasets
except NoSuchWhateverError:
dsvec = name2job(cfg, n).datasets
except Exception as e:
badinput.append((n, e))
dsvec = None
if dsvec:
print('%s' % (dsvec[0].job,))
v = []
for ds in dsvec:
if args.chainedlist:
lines = sum(sum(x.lines) for x in ds.chain())
else:
lines = sum(ds.lines)
v.append((ds.name, '{:n}'.format(lines)))
len_n, len_l = colwidth(v)
template = "{0:%d} ({1:>%d})" % (len_n, len_l)
for name, numlines in sorted(v):
print(' ' + template.format(name, numlines))
finish(badinput)
for n in args.dataset:
try:
ds = name2ds(cfg, n)
except NoSuchWhateverError as e:
badinput.append((n, e))
continue
print(ds.quoted)
if ds.parent:
if isinstance(ds.parent, tuple):
print(" Parents:")
max_n = max(len(x.quoted) for x in ds.parent)
template = "{1:%d}" % (max_n,)
data = tuple((None, x.quoted) for ix, x in enumerate(ds.parent))
data = sorted(data, key = lambda x: x[1])
printcolwise(data, template, lambda x: x, minrows=8, indent=8)
else:
print(" Parent:", ds.parent.quoted)
print(" Method:", quote(ds.job.method))
if ds.filename:
print(" Filename:", quote(ds.filename))
if ds.previous:
print(" Previous:", ds.previous.quoted)
if ds.hashlabel is not None:
print(" Hashlabel:", quote(ds.hashlabel))
def prettyminmax(minval, maxval):
if args.suppress_minmax:
return ''
s = '[%%%ds, %%%ds]' % (MINMAXWIDTH, MINMAXWIDTH)
if minval is None:
return ''
elif isinstance(minval, float):
def intdigits(x):
if isinf(x) or isnan(x):
return 3
return min(MINMAXWIDTH - 2, floor(log10(abs(x)) + 1)) if x else (MINMAXWIDTH - 2)//2
ints = max(intdigits(minval), intdigits(maxval))
if ints > 0:
format = "%% %d.%df" % (ints, MINMAXWIDTH - ints - 2)
elif ints < -4:
format = "%% .%de" % (MINMAXWIDTH - 7,)
else:
format = "%% .%df" % (MINMAXWIDTH - 3,)
def format_or_int(v):
try:
i = int(v)
if v == i:
return i
except (OverflowError, ValueError):
pass
return locale.format_string(format, v)
return s % (format_or_int(minval), format_or_int(maxval))
elif isinstance(minval, int):
return s % (minval, maxval)
elif isinstance(minval, (date, time, datetime)):
return s % (minval, maxval)
else:
return s % (minval, maxval)
if not args.suppress_columns:
print(" Columns:")
name2typ = {n: c.type + '+None' if c.none_support else c.type for n, c in ds.columns.items()}
len_n, len_t = colwidth((quote(n), name2typ[n]) for n, c in ds.columns.items())
if args.location:
len_l = max(len(quote(c.location)) for c in ds.columns.values())
len_c = max(len(c.compression) for c in ds.columns.values())
template = ' {2} {0:%d} {1:%d} {4:%d} {5:%d} {3}' % (len_n, len_t, len_l, len_c,)
else:
template = ' {2} {0:%d} {1:%d} {3}' % (len_n, len_t,)
chain = False
if args.chainedslices or args.chain:
chain = ds.chain()
for n, c in sorted(ds.columns.items()):
if chain:
minval, maxval = chain.min(n), chain.max(n)
else:
minval, maxval = c.min, c.max
hashdot = colour.bold("*") if n == ds.hashlabel else " "
print(template.format(quote(n), name2typ[n], hashdot, prettyminmax(minval, maxval), quote(c.location), c.compression).rstrip())
print(" {0:n} columns".format(len(ds.columns)))
print(" {0:n} lines".format(sum(ds.lines)))
if ds.previous or args.chain:
chain = ds.chain()
if args.non_empty_chain:
print(" Full chain length {0:n}, from {1} to {2}".format(len(chain), chain[0], chain[-1]))
chain = [ds for ds in chain if sum(ds.lines)]
print(" Filtered chain length {0:n}".format(len(chain)))
if chain:
if not args.non_empty_chain:
print(" Chain length {0:n}, from {1} to {2}".format(len(chain), chain[0], chain[-1]))
if args.chain:
data = tuple((ix, "%s/%s" % (x.job, x.name), "{:n}".format(sum(x.lines))) for ix, x in enumerate(chain))
max_n, max_l = colwidth(x[1:] for x in data)
template = "{0:3}: {1:%d} ({2:>%d})" % (max_n, max_l)
printcolwise(data, template, lambda x: (x[0], x[1], x[2]), minrows=8, indent=8)
if args.slices or args.chainedslices:
if args.chainedslices and ds.previous:
data = ((ix, '{:n}'.format(sum(x)), sum(x)) for ix, x in enumerate(zip(*(x.lines for x in ds.chain()))))
print(' Balance, lines per slice, full chain:')
else:
data = ((ix, '{:n}'.format(x), x) for ix, x in enumerate(ds.lines))
if ds.previous:
print(' Balance, lines per slice, tip dataset:')
else:
print(' Balance, lines per slice:')
data = sorted(data, key=lambda x: -x[2])
s = sum(x[2] for x in data)
len_n = max(len(x[1]) for x in data)
template = "{0:3}: {1!s}%% ({2:>%d})" % (len_n,)
printcolwise(data, template, lambda x: (x[0], locale.format_string("%6.2f", (100 * x[2] / (s or 1e20))), x[1]), minrows=8, indent=8)
print(" Max to average ratio: " + locale.format_string("%2.3f", (max(x[2] for x in data) / ((s or 1e20) / len(data)),) ))
if ds.previous:
print(" {0:n} total lines in chain".format(sum(sum(ds.lines) for ds in chain)))
finish(badinput)
|
99fb39002c8c4487e0da3413a88b2b1b8b059835
|
32809f6f425bf5665fc19de2bc929bacc3eeb469
|
/src/0873-Length-of-Longest-Fibonacci-Subsequence/0873.py
|
41b03b83352a55448b812b2b9f717659cc431133
|
[] |
no_license
|
luliyucoordinate/Leetcode
|
9f6bf01f79aa680e2dff11e73e4d10993467f113
|
bcc04d49969654cb44f79218a7ef2fd5c1e5449a
|
refs/heads/master
| 2023-05-25T04:58:45.046772
| 2023-05-24T11:57:20
| 2023-05-24T11:57:20
| 132,753,892
| 1,575
| 569
| null | 2023-05-24T11:57:22
| 2018-05-09T12:30:59
|
C++
|
UTF-8
|
Python
| false
| false
| 484
|
py
|
0873.py
|
class Solution:
def lenLongestFibSubseq(self, A: List[int]) -> int:
A_s = set(A)
n = len(A)
res = 0
for i in range(n):
for j in range(i+1, n):
x, y = A[j], A[i] + A[j]
l = 2
if x*(res - 2) > A[-1]:
break
while y in A_s:
x, y = y, x+y
l += 1
res = max(res, l)
return res if res >= 3 else 0
|
abf4336317ead36c9713540591d3a83f27edf720
|
057a475216e9beed41983481aafcaf109bbf58da
|
/tests/integration/test_grpc_protocol/test.py
|
efc7d98e820ce87853b3457370284465a5ae003d
|
[
"Apache-2.0"
] |
permissive
|
ClickHouse/ClickHouse
|
fece5204263a5b4d693854b6039699265f1bb27f
|
6649328db809d51a694c358571539bc5820464be
|
refs/heads/master
| 2023-08-31T18:48:36.615225
| 2023-08-31T17:51:24
| 2023-08-31T17:51:24
| 60,246,359
| 23,878
| 5,449
|
Apache-2.0
| 2023-09-14T20:10:52
| 2016-06-02T08:28:18
|
C++
|
UTF-8
|
Python
| false
| false
| 24,784
|
py
|
test.py
|
import os
import pytest
import sys
import time
import pytz
import uuid
import grpc
from helpers.cluster import ClickHouseCluster, run_and_check
from threading import Thread
import gzip
import lz4.frame
GRPC_PORT = 9100
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
DEFAULT_ENCODING = "utf-8"
# Use grpcio-tools to generate *pb2.py files from *.proto.
proto_dir = os.path.join(SCRIPT_DIR, "./protos")
gen_dir = os.path.join(SCRIPT_DIR, "./_gen")
os.makedirs(gen_dir, exist_ok=True)
run_and_check(
"python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} \
{proto_dir}/clickhouse_grpc.proto".format(
proto_dir=proto_dir, gen_dir=gen_dir
),
shell=True,
)
sys.path.append(gen_dir)
import clickhouse_grpc_pb2
import clickhouse_grpc_pb2_grpc
# Utilities
config_dir = os.path.join(SCRIPT_DIR, "./configs")
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
"node",
main_configs=["configs/grpc_config.xml"],
# Bug in TSAN reproduces in this test https://github.com/grpc/grpc/issues/29550#issuecomment-1188085387
env_variables={
"TSAN_OPTIONS": "report_atomic_races=0 " + os.getenv("TSAN_OPTIONS", default="")
},
)
main_channel = None
def create_channel():
node_ip_with_grpc_port = cluster.get_instance_ip("node") + ":" + str(GRPC_PORT)
channel = grpc.insecure_channel(node_ip_with_grpc_port)
grpc.channel_ready_future(channel).result(timeout=10)
global main_channel
if not main_channel:
main_channel = channel
return channel
def query_common(
query_text,
settings={},
input_data=[],
input_data_delimiter="",
output_format="TabSeparated",
send_output_columns=False,
external_tables=[],
user_name="",
password="",
query_id="123",
session_id="",
stream_output=False,
channel=None,
):
if type(input_data) is not list:
input_data = [input_data]
if type(input_data_delimiter) is str:
input_data_delimiter = input_data_delimiter.encode(DEFAULT_ENCODING)
if not channel:
channel = main_channel
stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(channel)
def query_info():
input_data_part = input_data.pop(0) if input_data else b""
if type(input_data_part) is str:
input_data_part = input_data_part.encode(DEFAULT_ENCODING)
return clickhouse_grpc_pb2.QueryInfo(
query=query_text,
settings=settings,
input_data=input_data_part,
input_data_delimiter=input_data_delimiter,
output_format=output_format,
send_output_columns=send_output_columns,
external_tables=external_tables,
user_name=user_name,
password=password,
query_id=query_id,
session_id=session_id,
next_query_info=bool(input_data),
)
def send_query_info():
yield query_info()
while input_data:
input_data_part = input_data.pop(0)
if type(input_data_part) is str:
input_data_part = input_data_part.encode(DEFAULT_ENCODING)
yield clickhouse_grpc_pb2.QueryInfo(
input_data=input_data_part, next_query_info=bool(input_data)
)
stream_input = len(input_data) > 1
if stream_input and stream_output:
return list(stub.ExecuteQueryWithStreamIO(send_query_info()))
elif stream_input:
return [stub.ExecuteQueryWithStreamInput(send_query_info())]
elif stream_output:
return list(stub.ExecuteQueryWithStreamOutput(query_info()))
else:
return [stub.ExecuteQuery(query_info())]
def query_no_errors(*args, **kwargs):
results = query_common(*args, **kwargs)
if results and results[-1].HasField("exception"):
raise Exception(results[-1].exception.display_text)
return results
def query(*args, **kwargs):
output = b""
for result in query_no_errors(*args, **kwargs):
output += result.output
return output.decode(DEFAULT_ENCODING)
def query_and_get_error(*args, **kwargs):
results = query_common(*args, **kwargs)
if not results or not results[-1].HasField("exception"):
raise Exception("Expected to be failed but succeeded!")
return results[-1].exception
def query_and_get_totals(*args, **kwargs):
totals = b""
for result in query_no_errors(*args, **kwargs):
totals += result.totals
return totals.decode(DEFAULT_ENCODING)
def query_and_get_extremes(*args, **kwargs):
extremes = b""
for result in query_no_errors(*args, **kwargs):
extremes += result.extremes
return extremes.decode(DEFAULT_ENCODING)
def query_and_get_logs(*args, **kwargs):
logs = ""
for result in query_no_errors(*args, **kwargs):
for log_entry in result.logs:
# print(log_entry)
logs += log_entry.text + "\n"
return logs
class QueryThread(Thread):
def __init__(
self, query_text, expected_output, query_id, use_separate_channel=False
):
Thread.__init__(self)
self.query_text = query_text
self.expected_output = expected_output
self.use_separate_channel = use_separate_channel
self.query_id = query_id
def run(self):
if self.use_separate_channel:
with create_channel() as channel:
assert (
query(self.query_text, query_id=self.query_id, channel=channel)
== self.expected_output
)
else:
assert (
query(self.query_text, query_id=self.query_id) == self.expected_output
)
@pytest.fixture(scope="module", autouse=True)
def start_cluster():
cluster.start()
try:
with create_channel() as channel:
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def reset_after_test():
yield
node.query_with_retry("DROP TABLE IF EXISTS t")
# Actual tests
def test_select_one():
assert query("SELECT 1") == "1\n"
def test_ordinary_query():
assert query("SELECT count() FROM numbers(100)") == "100\n"
def test_insert_query():
query("CREATE TABLE t (a UInt8) ENGINE = Memory")
query("INSERT INTO t VALUES (1),(2),(3)")
query("INSERT INTO t FORMAT TabSeparated 4\n5\n6\n")
query("INSERT INTO t VALUES", input_data="(7),(8)")
query("INSERT INTO t FORMAT TabSeparated", input_data="9\n10\n")
assert query("SELECT a FROM t ORDER BY a") == "1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n"
def test_insert_query_streaming():
query("CREATE TABLE t (a UInt8) ENGINE = Memory")
query(
"INSERT INTO t VALUES",
input_data=["(1),(2),(3),", "(5),(4),(6),", "(7),(8),(9)"],
)
assert query("SELECT a FROM t ORDER BY a") == "1\n2\n3\n4\n5\n6\n7\n8\n9\n"
def test_insert_query_delimiter():
query("CREATE TABLE t (a UInt8) ENGINE = Memory")
query(
"INSERT INTO t FORMAT CSV 1\n2",
input_data=["3", "4\n5"],
input_data_delimiter="\n",
)
assert query("SELECT a FROM t ORDER BY a") == "1\n2\n3\n4\n5\n"
query("DROP TABLE t")
query("CREATE TABLE t (a UInt8) ENGINE = Memory")
query("INSERT INTO t FORMAT CSV 1\n2", input_data=["3", "4\n5"])
assert query("SELECT a FROM t ORDER BY a") == "1\n5\n234\n"
def test_insert_default_column():
query(
"CREATE TABLE t (a UInt8, b Int32 DEFAULT 100 - a, c String DEFAULT 'c') ENGINE = Memory"
)
query("INSERT INTO t (c, a) VALUES ('x',1),('y',2)")
query("INSERT INTO t (a) FORMAT TabSeparated", input_data="3\n4\n")
assert (
query("SELECT * FROM t ORDER BY a") == "1\t99\tx\n"
"2\t98\ty\n"
"3\t97\tc\n"
"4\t96\tc\n"
)
def test_insert_splitted_row():
query("CREATE TABLE t (a UInt8) ENGINE = Memory")
query("INSERT INTO t VALUES", input_data=["(1),(2),(", "3),(5),(4),(6)"])
assert query("SELECT a FROM t ORDER BY a") == "1\n2\n3\n4\n5\n6\n"
def test_output_format():
query("CREATE TABLE t (a UInt8) ENGINE = Memory")
query("INSERT INTO t VALUES (1),(2),(3)")
assert (
query("SELECT a FROM t ORDER BY a FORMAT JSONEachRow")
== '{"a":1}\n{"a":2}\n{"a":3}\n'
)
assert (
query("SELECT a FROM t ORDER BY a", output_format="JSONEachRow")
== '{"a":1}\n{"a":2}\n{"a":3}\n'
)
def test_totals_and_extremes():
query("CREATE TABLE t (x UInt8, y UInt8) ENGINE = Memory")
query("INSERT INTO t VALUES (1, 2), (2, 4), (3, 2), (3, 3), (3, 4)")
assert (
query("SELECT sum(x), y FROM t GROUP BY y WITH TOTALS") == "4\t2\n3\t3\n5\t4\n"
)
assert (
query_and_get_totals("SELECT sum(x), y FROM t GROUP BY y WITH TOTALS")
== "12\t0\n"
)
assert query("SELECT x, y FROM t") == "1\t2\n2\t4\n3\t2\n3\t3\n3\t4\n"
assert (
query_and_get_extremes("SELECT x, y FROM t", settings={"extremes": "1"})
== "1\t2\n3\t4\n"
)
def test_get_query_details():
result = list(
query_no_errors("CREATE TABLE t (a UInt8) ENGINE = Memory", query_id="123")
)[0]
assert result.query_id == "123"
pytz.timezone(result.time_zone)
assert result.output_format == ""
assert len(result.output_columns) == 0
assert result.output == b""
#
result = list(
query_no_errors("SELECT 'a', 1", query_id="", output_format="TabSeparated")
)[0]
uuid.UUID(result.query_id)
pytz.timezone(result.time_zone)
assert result.output_format == "TabSeparated"
assert len(result.output_columns) == 0
assert result.output == b"a\t1\n"
#
result = list(
query_no_errors(
"SELECT 'a' AS x, 1 FORMAT JSONEachRow",
query_id="",
send_output_columns=True,
)
)[0]
uuid.UUID(result.query_id)
pytz.timezone(result.time_zone)
assert result.output_format == "JSONEachRow"
assert ([(col.name, col.type) for col in result.output_columns]) == [
("x", "String"),
("1", "UInt8"),
]
assert result.output == b'{"x":"a","1":1}\n'
def test_errors_handling():
e = query_and_get_error("")
# print(e)
assert "Empty query" in e.display_text
query("CREATE TABLE t (a UInt8) ENGINE = Memory")
e = query_and_get_error("CREATE TABLE t (a UInt8) ENGINE = Memory")
assert "Table default.t already exists" in e.display_text
def test_authentication():
query("CREATE USER OR REPLACE john IDENTIFIED BY 'qwe123'")
assert (
query("SELECT currentUser()", user_name="john", password="qwe123") == "john\n"
)
query("DROP USER john")
def test_logs():
query = "SELECT has(groupArray(number), 42) FROM numbers(1000000) SETTINGS max_block_size=100000"
logs = query_and_get_logs(
query,
settings={"send_logs_level": "debug"},
)
assert query in logs
assert "Read 1000000 rows" in logs
assert "Peak memory usage" in logs
def test_progress():
results = query_no_errors(
"SELECT number, sleep(0.31) FROM numbers(8) SETTINGS max_block_size=2, interactive_delay=100000",
stream_output=True,
)
results = list(results)
for result in results:
result.time_zone = ""
result.query_id = ""
# print(results)
# Note: We can't convert those messages to string like `results = str(results)` and then compare it as a string
# because str() can serialize a protobuf message with any order of fields.
expected_results = [
clickhouse_grpc_pb2.Result(
output_format="TabSeparated",
progress=clickhouse_grpc_pb2.Progress(
read_rows=2, read_bytes=16, total_rows_to_read=8
),
),
clickhouse_grpc_pb2.Result(output=b"0\t0\n1\t0\n"),
clickhouse_grpc_pb2.Result(
progress=clickhouse_grpc_pb2.Progress(read_rows=2, read_bytes=16)
),
clickhouse_grpc_pb2.Result(output=b"2\t0\n3\t0\n"),
clickhouse_grpc_pb2.Result(
progress=clickhouse_grpc_pb2.Progress(read_rows=2, read_bytes=16)
),
clickhouse_grpc_pb2.Result(output=b"4\t0\n5\t0\n"),
clickhouse_grpc_pb2.Result(
progress=clickhouse_grpc_pb2.Progress(read_rows=2, read_bytes=16)
),
clickhouse_grpc_pb2.Result(output=b"6\t0\n7\t0\n"),
clickhouse_grpc_pb2.Result(
stats=clickhouse_grpc_pb2.Stats(
rows=8,
blocks=4,
allocated_bytes=1092,
applied_limit=True,
rows_before_limit=8,
)
),
]
assert results == expected_results
def test_session_settings():
session_a = "session A"
session_b = "session B"
query("SET custom_x=1", session_id=session_a)
query("SET custom_y=2", session_id=session_a)
query("SET custom_x=3", session_id=session_b)
query("SET custom_y=4", session_id=session_b)
assert (
query(
"SELECT getSetting('custom_x'), getSetting('custom_y')",
session_id=session_a,
)
== "1\t2\n"
)
assert (
query(
"SELECT getSetting('custom_x'), getSetting('custom_y')",
session_id=session_b,
)
== "3\t4\n"
)
def test_session_temp_tables():
session_a = "session A"
session_b = "session B"
query("CREATE TEMPORARY TABLE my_temp_table(a Int8)", session_id=session_a)
query("INSERT INTO my_temp_table VALUES (10)", session_id=session_a)
assert query("SELECT * FROM my_temp_table", session_id=session_a) == "10\n"
query("CREATE TEMPORARY TABLE my_temp_table(a Int8)", session_id=session_b)
query("INSERT INTO my_temp_table VALUES (20)", session_id=session_b)
assert query("SELECT * FROM my_temp_table", session_id=session_b) == "20\n"
assert query("SELECT * FROM my_temp_table", session_id=session_a) == "10\n"
def test_no_session():
e = query_and_get_error("SET custom_x=1")
assert "There is no session" in e.display_text
e = query_and_get_error("CREATE TEMPORARY TABLE my_temp_table(a Int8)")
assert "There is no session" in e.display_text
def test_input_function():
query("CREATE TABLE t (a UInt8) ENGINE = Memory")
query(
"INSERT INTO t SELECT col1 * col2 FROM input('col1 UInt8, col2 UInt8') FORMAT CSV",
input_data=["5,4\n", "8,11\n", "10,12\n"],
)
assert query("SELECT a FROM t ORDER BY a") == "20\n88\n120\n"
query(
"INSERT INTO t SELECT col1 * col2 FROM input('col1 UInt8, col2 UInt8') FORMAT CSV 11,13"
)
assert query("SELECT a FROM t ORDER BY a") == "20\n88\n120\n143\n"
query(
"INSERT INTO t SELECT col1 * col2 FROM input('col1 UInt8, col2 UInt8') FORMAT CSV 20,10\n",
input_data="15,15\n",
)
assert query("SELECT a FROM t ORDER BY a") == "20\n88\n120\n143\n200\n225\n"
def test_external_table():
columns = [
clickhouse_grpc_pb2.NameAndType(name="UserID", type="UInt64"),
clickhouse_grpc_pb2.NameAndType(name="UserName", type="String"),
]
ext1 = clickhouse_grpc_pb2.ExternalTable(
name="ext1",
columns=columns,
data=b"1\tAlex\n2\tBen\n3\tCarl\n",
format="TabSeparated",
)
assert (
query("SELECT * FROM ext1 ORDER BY UserID", external_tables=[ext1])
== "1\tAlex\n"
"2\tBen\n"
"3\tCarl\n"
)
ext2 = clickhouse_grpc_pb2.ExternalTable(
name="ext2", columns=columns, data=b"4,Daniel\n5,Ethan\n", format="CSV"
)
assert (
query(
"SELECT * FROM (SELECT * FROM ext1 UNION ALL SELECT * FROM ext2) ORDER BY UserID",
external_tables=[ext1, ext2],
)
== "1\tAlex\n"
"2\tBen\n"
"3\tCarl\n"
"4\tDaniel\n"
"5\tEthan\n"
)
unnamed_columns = [
clickhouse_grpc_pb2.NameAndType(type="UInt64"),
clickhouse_grpc_pb2.NameAndType(type="String"),
]
unnamed_table = clickhouse_grpc_pb2.ExternalTable(
columns=unnamed_columns, data=b"6\tGeorge\n7\tFred\n"
)
assert (
query("SELECT * FROM _data ORDER BY _2", external_tables=[unnamed_table])
== "7\tFred\n"
"6\tGeorge\n"
)
def test_external_table_streaming():
columns = [
clickhouse_grpc_pb2.NameAndType(name="UserID", type="UInt64"),
clickhouse_grpc_pb2.NameAndType(name="UserName", type="String"),
]
def send_query_info():
yield clickhouse_grpc_pb2.QueryInfo(
query="SELECT * FROM exts ORDER BY UserID",
external_tables=[
clickhouse_grpc_pb2.ExternalTable(
name="exts", columns=columns, data=b"1\tAlex\n2\tBen\n3\tCarl\n"
)
],
next_query_info=True,
)
yield clickhouse_grpc_pb2.QueryInfo(
external_tables=[
clickhouse_grpc_pb2.ExternalTable(
name="exts", data=b"4\tDaniel\n5\tEthan\n"
)
]
)
stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel)
result = stub.ExecuteQueryWithStreamInput(send_query_info())
assert (
result.output == b"1\tAlex\n"
b"2\tBen\n"
b"3\tCarl\n"
b"4\tDaniel\n"
b"5\tEthan\n"
)
def test_simultaneous_queries_same_channel():
threads = []
try:
for i in range(0, 100):
thread = QueryThread(
"SELECT sum(number) FROM numbers(10)",
expected_output="45\n",
query_id="sqA" + str(i),
)
threads.append(thread)
thread.start()
finally:
for thread in threads:
thread.join()
def test_simultaneous_queries_multiple_channels():
threads = []
try:
for i in range(0, 100):
thread = QueryThread(
"SELECT sum(number) FROM numbers(10)",
expected_output="45\n",
query_id="sqB" + str(i),
use_separate_channel=True,
)
threads.append(thread)
thread.start()
finally:
for thread in threads:
thread.join()
def test_cancel_while_processing_input():
query("CREATE TABLE t (a UInt8) ENGINE = Memory")
def send_query_info():
yield clickhouse_grpc_pb2.QueryInfo(
query="INSERT INTO t FORMAT TabSeparated",
input_data=b"1\n2\n3\n",
next_query_info=True,
)
yield clickhouse_grpc_pb2.QueryInfo(
input_data=b"4\n5\n6\n", next_query_info=True
)
yield clickhouse_grpc_pb2.QueryInfo(cancel=True)
stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel)
result = stub.ExecuteQueryWithStreamInput(send_query_info())
assert result.cancelled == True
def test_cancel_while_generating_output():
def send_query_info():
yield clickhouse_grpc_pb2.QueryInfo(
query="SELECT number, sleep(0.2) FROM numbers(10) SETTINGS max_block_size=2"
)
time.sleep(0.5)
yield clickhouse_grpc_pb2.QueryInfo(cancel=True)
stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel)
results = list(stub.ExecuteQueryWithStreamIO(send_query_info()))
assert len(results) >= 1
assert results[-1].cancelled == True
output = b""
for result in results:
output += result.output
assert output == b"0\t0\n1\t0\n2\t0\n3\t0\n"
def test_compressed_output():
query_info = clickhouse_grpc_pb2.QueryInfo(
query="SELECT 0 FROM numbers(1000)", output_compression_type="lz4"
)
stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel)
result = stub.ExecuteQuery(query_info)
assert lz4.frame.decompress(result.output) == (b"0\n") * 1000
def test_compressed_output_streaming():
query_info = clickhouse_grpc_pb2.QueryInfo(
query="SELECT 0 FROM numbers(100000)", output_compression_type="lz4"
)
stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel)
d_context = lz4.frame.create_decompression_context()
data = b""
for result in stub.ExecuteQueryWithStreamOutput(query_info):
d1, _, _ = lz4.frame.decompress_chunk(d_context, result.output)
data += d1
assert data == (b"0\n") * 100000
def test_compressed_output_gzip():
query_info = clickhouse_grpc_pb2.QueryInfo(
query="SELECT 0 FROM numbers(1000)",
output_compression_type="gzip",
output_compression_level=6,
)
stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel)
result = stub.ExecuteQuery(query_info)
assert gzip.decompress(result.output) == (b"0\n") * 1000
def test_compressed_totals_and_extremes():
query("CREATE TABLE t (x UInt8, y UInt8) ENGINE = Memory")
query("INSERT INTO t VALUES (1, 2), (2, 4), (3, 2), (3, 3), (3, 4)")
stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel)
query_info = clickhouse_grpc_pb2.QueryInfo(
query="SELECT sum(x), y FROM t GROUP BY y WITH TOTALS",
output_compression_type="lz4",
)
result = stub.ExecuteQuery(query_info)
assert lz4.frame.decompress(result.totals) == b"12\t0\n"
query_info = clickhouse_grpc_pb2.QueryInfo(
query="SELECT x, y FROM t",
settings={"extremes": "1"},
output_compression_type="lz4",
)
result = stub.ExecuteQuery(query_info)
assert lz4.frame.decompress(result.extremes) == b"1\t2\n3\t4\n"
def test_compressed_insert_query_streaming():
query("CREATE TABLE t (a UInt8) ENGINE = Memory")
data = lz4.frame.compress(b"(1),(2),(3),(5),(4),(6),(7),(8),(9)")
sz1 = len(data) // 3
sz2 = len(data) // 3
d1 = data[:sz1]
d2 = data[sz1 : sz1 + sz2]
d3 = data[sz1 + sz2 :]
def send_query_info():
yield clickhouse_grpc_pb2.QueryInfo(
query="INSERT INTO t VALUES",
input_data=d1,
input_compression_type="lz4",
next_query_info=True,
)
yield clickhouse_grpc_pb2.QueryInfo(input_data=d2, next_query_info=True)
yield clickhouse_grpc_pb2.QueryInfo(input_data=d3)
stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel)
stub.ExecuteQueryWithStreamInput(send_query_info())
assert query("SELECT a FROM t ORDER BY a") == "1\n2\n3\n4\n5\n6\n7\n8\n9\n"
def test_compressed_external_table():
columns = [
clickhouse_grpc_pb2.NameAndType(name="UserID", type="UInt64"),
clickhouse_grpc_pb2.NameAndType(name="UserName", type="String"),
]
d1 = lz4.frame.compress(b"1\tAlex\n2\tBen\n3\tCarl\n")
d2 = gzip.compress(b"4,Daniel\n5,Ethan\n")
ext1 = clickhouse_grpc_pb2.ExternalTable(
name="ext1",
columns=columns,
data=d1,
format="TabSeparated",
compression_type="lz4",
)
ext2 = clickhouse_grpc_pb2.ExternalTable(
name="ext2", columns=columns, data=d2, format="CSV", compression_type="gzip"
)
stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel)
query_info = clickhouse_grpc_pb2.QueryInfo(
query="SELECT * FROM (SELECT * FROM ext1 UNION ALL SELECT * FROM ext2) ORDER BY UserID",
external_tables=[ext1, ext2],
)
result = stub.ExecuteQuery(query_info)
assert (
result.output == b"1\tAlex\n"
b"2\tBen\n"
b"3\tCarl\n"
b"4\tDaniel\n"
b"5\tEthan\n"
)
def test_transport_compression():
query_info = clickhouse_grpc_pb2.QueryInfo(
query="SELECT 0 FROM numbers(1000000)",
transport_compression_type="gzip",
transport_compression_level=3,
)
stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel)
result = stub.ExecuteQuery(query_info)
assert result.output == (b"0\n") * 1000000
def test_opentelemetry_context_propagation():
trace_id = "80c190b5-9dc1-4eae-82b9-6c261438c817"
parent_span_id = 123
trace_state = "some custom state"
trace_id_hex = trace_id.replace("-", "")
parent_span_id_hex = f"{parent_span_id:0>16X}"
metadata = [
("traceparent", f"00-{trace_id_hex}-{parent_span_id_hex}-01"),
("tracestate", trace_state),
]
stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel)
query_info = clickhouse_grpc_pb2.QueryInfo(query="SELECT 1")
result = stub.ExecuteQuery(query_info, metadata=metadata)
assert result.output == b"1\n"
node.query("SYSTEM FLUSH LOGS")
assert (
node.query(
f"SELECT attribute['db.statement'], attribute['clickhouse.tracestate'] FROM system.opentelemetry_span_log "
f"WHERE trace_id='{trace_id}' AND operation_name='query'"
)
== "SELECT 1\tsome custom state\n"
)
|
184a910ba8181ec327296287f0df4fe4cb052c8b
|
091a6200be74bf6577c86f623665bcc24e16b02b
|
/PyGamer_Improved_Thermal_Camera/code.py
|
d14feb1d8c2ca886d39441448ff7cbce1dd929c7
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Learning_System_Guides
|
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
|
5eaa7a15a437c533b89f359a25983e24bb6b5438
|
refs/heads/main
| 2023-09-05T18:31:41.621956
| 2023-09-05T15:36:09
| 2023-09-05T15:36:09
| 105,065,494
| 937
| 937
|
MIT
| 2023-09-12T18:48:53
| 2017-09-27T20:22:44
|
C
|
UTF-8
|
Python
| false
| false
| 18,544
|
py
|
code.py
|
# SPDX-FileCopyrightText: 2022 Jan Goolsbey for Adafruit Industries
# SPDX-License-Identifier: MIT
"""
`thermalcamera`
================================================================================
PyGamer/PyBadge Thermal Camera Project
"""
import time
import gc
import board
import keypad
import busio
from ulab import numpy as np
import displayio
import neopixel
from analogio import AnalogIn
from digitalio import DigitalInOut
from simpleio import map_range, tone
from adafruit_display_text.label import Label
from adafruit_bitmap_font import bitmap_font
from adafruit_display_shapes.rect import Rect
import adafruit_amg88xx
from index_to_rgb.iron import index_to_rgb
from thermalcamera_converters import celsius_to_fahrenheit, fahrenheit_to_celsius
from thermalcamera_config import ALARM_F, MIN_RANGE_F, MAX_RANGE_F, SELFIE
# Instantiate the integral display and define its size
display = board.DISPLAY
display.brightness = 1.0
WIDTH = display.width
HEIGHT = display.height
# Load the text font from the fonts folder
font_0 = bitmap_font.load_font("/fonts/OpenSans-9.bdf")
# Instantiate the joystick if available
if hasattr(board, "JOYSTICK_X"):
# PyGamer with joystick
HAS_JOYSTICK = True
joystick_x = AnalogIn(board.JOYSTICK_X)
joystick_y = AnalogIn(board.JOYSTICK_Y)
else:
# PyBadge with buttons
HAS_JOYSTICK = False # PyBadge with buttons
# Enable the speaker
DigitalInOut(board.SPEAKER_ENABLE).switch_to_output(value=True)
# Instantiate and clear the NeoPixels
pixels = neopixel.NeoPixel(board.NEOPIXEL, 5, pixel_order=neopixel.GRB)
pixels.brightness = 0.25
pixels.fill(0x000000)
# Initialize ShiftRegisterKeys to read PyGamer/PyBadge buttons
panel = keypad.ShiftRegisterKeys(
clock=board.BUTTON_CLOCK,
data=board.BUTTON_OUT,
latch=board.BUTTON_LATCH,
key_count=8,
value_when_pressed=True,
)
# Define front panel button event values
BUTTON_LEFT = 7 # LEFT button
BUTTON_UP = 6 # UP button
BUTTON_DOWN = 5 # DOWN button
BUTTON_RIGHT = 4 # RIGHT button
BUTTON_FOCUS = 3 # SELECT button
BUTTON_SET = 2 # START button
BUTTON_HOLD = 1 # button A
BUTTON_IMAGE = 0 # button B
# Initiate the AMG8833 Thermal Camera
i2c = busio.I2C(board.SCL, board.SDA, frequency=400000)
amg8833 = adafruit_amg88xx.AMG88XX(i2c)
# Display splash graphics
splash = displayio.Group(scale=display.width // 160)
bitmap = displayio.OnDiskBitmap("/thermalcamera_splash.bmp")
splash.append(displayio.TileGrid(bitmap, pixel_shader=bitmap.pixel_shader))
board.DISPLAY.show(splash)
# Thermal sensor grid axis size; AMG8833 sensor is 8x8
SENSOR_AXIS = 8
# Display grid parameters
GRID_AXIS = (2 * SENSOR_AXIS) - 1 # Number of cells per axis
GRID_SIZE = HEIGHT # Axis size (pixels) for a square grid
GRID_X_OFFSET = WIDTH - GRID_SIZE # Right-align grid with display boundary
CELL_SIZE = GRID_SIZE // GRID_AXIS # Size of a grid cell in pixels
PALETTE_SIZE = 100 # Number of display colors in spectral palette (must be > 0)
# Set up the 2-D sensor data narray
SENSOR_DATA = np.array(range(SENSOR_AXIS**2)).reshape((SENSOR_AXIS, SENSOR_AXIS))
# Set up and load the 2-D display color index narray with a spectrum
GRID_DATA = np.array(range(GRID_AXIS**2)).reshape((GRID_AXIS, GRID_AXIS)) / (
GRID_AXIS**2
)
# Set up the histogram accumulation narray
# HISTOGRAM = np.zeros(GRID_AXIS)
# Convert default alarm and min/max range values from config file
ALARM_C = fahrenheit_to_celsius(ALARM_F)
MIN_RANGE_C = fahrenheit_to_celsius(MIN_RANGE_F)
MAX_RANGE_C = fahrenheit_to_celsius(MAX_RANGE_F)
# Default colors for temperature value sidebar
BLACK = 0x000000
RED = 0xFF0000
YELLOW = 0xFFFF00
CYAN = 0x00FFFF
BLUE = 0x0000FF
WHITE = 0xFFFFFF
# Text colors for setup helper's on-screen parameters
SETUP_COLORS = [("ALARM", WHITE), ("RANGE", RED), ("RANGE", CYAN)]
# ### Helpers ###
def play_tone(freq=440, duration=0.01):
"""Play a tone over the speaker"""
tone(board.A0, freq, duration)
def flash_status(text="", duration=0.05):
"""Flash status message once"""
status_label.color = WHITE
status_label.text = text
time.sleep(duration)
status_label.color = BLACK
time.sleep(duration)
status_label.text = ""
def update_image_frame(selfie=False):
"""Get camera data and update display"""
for _row in range(0, GRID_AXIS):
for _col in range(0, GRID_AXIS):
if selfie:
color_index = GRID_DATA[GRID_AXIS - 1 - _row][_col]
else:
color_index = GRID_DATA[GRID_AXIS - 1 - _row][GRID_AXIS - 1 - _col]
color = index_to_rgb(round(color_index * PALETTE_SIZE, 0) / PALETTE_SIZE)
if color != image_group[((_row * GRID_AXIS) + _col)].fill:
image_group[((_row * GRID_AXIS) + _col)].fill = color
def update_histo_frame():
"""Calculate and display histogram"""
min_histo.text = str(MIN_RANGE_F) # Display the legend
max_histo.text = str(MAX_RANGE_F)
histogram = np.zeros(GRID_AXIS) # Clear histogram accumulation array
# Collect camera data and calculate the histogram
for _row in range(0, GRID_AXIS):
for _col in range(0, GRID_AXIS):
histo_index = int(map_range(GRID_DATA[_col, _row], 0, 1, 0, GRID_AXIS - 1))
histogram[histo_index] = histogram[histo_index] + 1
histo_scale = np.max(histogram) / (GRID_AXIS - 1)
if histo_scale <= 0:
histo_scale = 1
# Display the histogram
for _col in range(0, GRID_AXIS):
for _row in range(0, GRID_AXIS):
if histogram[_col] / histo_scale > GRID_AXIS - 1 - _row:
image_group[((_row * GRID_AXIS) + _col)].fill = index_to_rgb(
round((_col / GRID_AXIS), 3)
)
else:
image_group[((_row * GRID_AXIS) + _col)].fill = BLACK
def ulab_bilinear_interpolation():
"""2x bilinear interpolation to upscale the sensor data array; by @v923z
and @David.Glaude."""
GRID_DATA[1::2, ::2] = SENSOR_DATA[:-1, :]
GRID_DATA[1::2, ::2] += SENSOR_DATA[1:, :]
GRID_DATA[1::2, ::2] /= 2
GRID_DATA[::, 1::2] = GRID_DATA[::, :-1:2]
GRID_DATA[::, 1::2] += GRID_DATA[::, 2::2]
GRID_DATA[::, 1::2] /= 2
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
def setup_mode():
"""Change alarm threshold and minimum/maximum range values"""
status_label.color = WHITE
status_label.text = "-SET-"
ave_label.color = BLACK # Turn off average label and value display
ave_value.color = BLACK
max_value.text = str(MAX_RANGE_F) # Display maximum range value
min_value.text = str(MIN_RANGE_F) # Display minimum range value
time.sleep(0.8) # Show SET status text before setting parameters
status_label.text = "" # Clear status text
param_index = 0 # Reset index of parameter to set
setup_state = "SETUP" # Set initial state
while setup_state == "SETUP":
# Select parameter to set
setup_state = "SELECT_PARAM" # Parameter selection state
while setup_state == "SELECT_PARAM":
param_index = max(0, min(2, param_index))
status_label.text = SETUP_COLORS[param_index][0]
image_group[param_index + 226].color = BLACK
status_label.color = BLACK
time.sleep(0.25)
image_group[param_index + 226].color = SETUP_COLORS[param_index][1]
status_label.color = WHITE
time.sleep(0.25)
param_index -= get_joystick()
_buttons = panel.events.get()
if _buttons and _buttons.pressed:
if _buttons.key_number == BUTTON_UP: # HOLD button pressed
param_index = param_index - 1
if _buttons.key_number == BUTTON_DOWN: # SET button pressed
param_index = param_index + 1
if _buttons.key_number == BUTTON_HOLD: # HOLD button pressed
play_tone(1319, 0.030) # Musical note E6
setup_state = "ADJUST_VALUE" # Next state
if _buttons.key_number == BUTTON_SET: # SET button pressed
play_tone(1319, 0.030) # Musical note E6
setup_state = "EXIT" # Next state
# Adjust parameter value
param_value = int(image_group[param_index + 230].text)
while setup_state == "ADJUST_VALUE":
param_value = max(32, min(157, param_value))
image_group[param_index + 230].text = str(param_value)
image_group[param_index + 230].color = BLACK
status_label.color = BLACK
time.sleep(0.05)
image_group[param_index + 230].color = SETUP_COLORS[param_index][1]
status_label.color = WHITE
time.sleep(0.2)
param_value += get_joystick()
_buttons = panel.events.get()
if _buttons and _buttons.pressed:
if _buttons.key_number == BUTTON_UP: # HOLD button pressed
param_value = param_value + 1
if _buttons.key_number == BUTTON_DOWN: # SET button pressed
param_value = param_value - 1
if _buttons.key_number == BUTTON_HOLD: # HOLD button pressed
play_tone(1319, 0.030) # Musical note E6
setup_state = "SETUP" # Next state
if _buttons.key_number == BUTTON_SET: # SET button pressed
play_tone(1319, 0.030) # Musical note E6
setup_state = "EXIT" # Next state
# Exit setup process
status_label.text = "RESUME"
time.sleep(0.5)
status_label.text = ""
# Display average label and value
ave_label.color = YELLOW
ave_value.color = YELLOW
return int(alarm_value.text), int(max_value.text), int(min_value.text)
def get_joystick():
"""Read the joystick and interpret as up/down buttons (PyGamer)"""
if HAS_JOYSTICK:
if joystick_y.value < 20000:
# Up
return 1
if joystick_y.value > 44000:
# Down
return -1
return 0
play_tone(440, 0.1) # Musical note A4
play_tone(880, 0.1) # Musical note A5
# ### Define the display group ###
mkr_t0 = time.monotonic() # Time marker: Define Display Elements
image_group = displayio.Group(scale=1)
# Define the foundational thermal image grid cells; image_group[0:224]
# image_group[#] = image_group[ (row * GRID_AXIS) + column ]
for row in range(0, GRID_AXIS):
for col in range(0, GRID_AXIS):
cell_x = (col * CELL_SIZE) + GRID_X_OFFSET
cell_y = row * CELL_SIZE
cell = Rect(
x=cell_x,
y=cell_y,
width=CELL_SIZE,
height=CELL_SIZE,
fill=None,
outline=None,
stroke=0,
)
image_group.append(cell)
# Define labels and values
status_label = Label(font_0, text="", color=None)
status_label.anchor_point = (0.5, 0.5)
status_label.anchored_position = ((WIDTH // 2) + (GRID_X_OFFSET // 2), HEIGHT // 2)
image_group.append(status_label) # image_group[225]
alarm_label = Label(font_0, text="alm", color=WHITE)
alarm_label.anchor_point = (0, 0)
alarm_label.anchored_position = (1, 16)
image_group.append(alarm_label) # image_group[226]
max_label = Label(font_0, text="max", color=RED)
max_label.anchor_point = (0, 0)
max_label.anchored_position = (1, 46)
image_group.append(max_label) # image_group[227]
min_label = Label(font_0, text="min", color=CYAN)
min_label.anchor_point = (0, 0)
min_label.anchored_position = (1, 106)
image_group.append(min_label) # image_group[228]
ave_label = Label(font_0, text="ave", color=YELLOW)
ave_label.anchor_point = (0, 0)
ave_label.anchored_position = (1, 76)
image_group.append(ave_label) # image_group[229]
alarm_value = Label(font_0, text=str(ALARM_F), color=WHITE)
alarm_value.anchor_point = (0, 0)
alarm_value.anchored_position = (1, 5)
image_group.append(alarm_value) # image_group[230]
max_value = Label(font_0, text=str(MAX_RANGE_F), color=RED)
max_value.anchor_point = (0, 0)
max_value.anchored_position = (1, 35)
image_group.append(max_value) # image_group[231]
min_value = Label(font_0, text=str(MIN_RANGE_F), color=CYAN)
min_value.anchor_point = (0, 0)
min_value.anchored_position = (1, 95)
image_group.append(min_value) # image_group[232]
ave_value = Label(font_0, text="---", color=YELLOW)
ave_value.anchor_point = (0, 0)
ave_value.anchored_position = (1, 65)
image_group.append(ave_value) # image_group[233]
min_histo = Label(font_0, text="", color=None)
min_histo.anchor_point = (0, 0.5)
min_histo.anchored_position = (GRID_X_OFFSET, 121)
image_group.append(min_histo) # image_group[234]
max_histo = Label(font_0, text="", color=None)
max_histo.anchor_point = (1, 0.5)
max_histo.anchored_position = (WIDTH - 2, 121)
image_group.append(max_histo) # image_group[235]
range_histo = Label(font_0, text="-RANGE-", color=None)
range_histo.anchor_point = (0.5, 0.5)
range_histo.anchored_position = ((WIDTH // 2) + (GRID_X_OFFSET // 2), 121)
image_group.append(range_histo) # image_group[236]
# ###--- PRIMARY PROCESS SETUP ---###
mkr_t1 = time.monotonic() # Time marker: Primary Process Setup
# pylint: disable=no-member
mem_fm1 = gc.mem_free() # Monitor free memory
DISPLAY_IMAGE = True # Image display mode; False for histogram
DISPLAY_HOLD = False # Active display mode; True to hold display
DISPLAY_FOCUS = False # Standard display range; True to focus display range
# pylint: disable=invalid-name
orig_max_range_f = 0 # Establish temporary range variables
orig_min_range_f = 0
# Activate display, show preloaded sample spectrum, and play welcome tone
display.show(image_group)
update_image_frame()
flash_status("IRON", 0.75)
play_tone(880, 0.010) # Musical note A5
# ###--- PRIMARY PROCESS LOOP ---###
while True:
mkr_t2 = time.monotonic() # Time marker: Acquire Sensor Data
if DISPLAY_HOLD:
flash_status("-HOLD-", 0.25)
else:
sensor = amg8833.pixels # Get sensor_data data
# Put sensor data in array; limit to the range of 0, 80
SENSOR_DATA = np.clip(np.array(sensor), 0, 80)
# Update and display alarm setting and max, min, and ave stats
mkr_t4 = time.monotonic() # Time marker: Display Statistics
v_max = np.max(SENSOR_DATA)
v_min = np.min(SENSOR_DATA)
v_ave = np.mean(SENSOR_DATA)
alarm_value.text = str(ALARM_F)
max_value.text = str(celsius_to_fahrenheit(v_max))
min_value.text = str(celsius_to_fahrenheit(v_min))
ave_value.text = str(celsius_to_fahrenheit(v_ave))
# Normalize temperature to index values and interpolate
mkr_t5 = time.monotonic() # Time marker: Normalize and Interpolate
SENSOR_DATA = (SENSOR_DATA - MIN_RANGE_C) / (MAX_RANGE_C - MIN_RANGE_C)
GRID_DATA[::2, ::2] = SENSOR_DATA # Copy sensor data to the grid array
ulab_bilinear_interpolation() # Interpolate to produce 15x15 result
# Display image or histogram
mkr_t6 = time.monotonic() # Time marker: Display Image
if DISPLAY_IMAGE:
update_image_frame(selfie=SELFIE)
else:
update_histo_frame()
# If alarm threshold is reached, flash NeoPixels and play alarm tone
if v_max >= ALARM_C:
pixels.fill(RED)
play_tone(880, 0.015) # Musical note A5
pixels.fill(BLACK)
# See if a panel button is pressed
buttons = panel.events.get()
if buttons and buttons.pressed:
if buttons.key_number == BUTTON_HOLD:
# Toggle display hold (shutter)
play_tone(1319, 0.030) # Musical note E6
DISPLAY_HOLD = not DISPLAY_HOLD
if buttons.key_number == BUTTON_IMAGE:
# Toggle image/histogram mode (display image)
play_tone(659, 0.030) # Musical note E5
DISPLAY_IMAGE = not DISPLAY_IMAGE
if DISPLAY_IMAGE:
min_histo.color = None
max_histo.color = None
range_histo.color = None
else:
min_histo.color = CYAN
max_histo.color = RED
range_histo.color = BLUE
if buttons.key_number == BUTTON_FOCUS: # Toggle display focus mode
play_tone(698, 0.030) # Musical note F5
DISPLAY_FOCUS = not DISPLAY_FOCUS
if DISPLAY_FOCUS:
# Set range values to image min/max for focused image display
orig_min_range_f = MIN_RANGE_F
orig_max_range_f = MAX_RANGE_F
MIN_RANGE_F = celsius_to_fahrenheit(v_min)
MAX_RANGE_F = celsius_to_fahrenheit(v_max)
# Update range min and max values in Celsius
MIN_RANGE_C = v_min
MAX_RANGE_C = v_max
flash_status("FOCUS", 0.2)
else:
# Restore previous (original) range values for image display
MIN_RANGE_F = orig_min_range_f
MAX_RANGE_F = orig_max_range_f
# Update range min and max values in Celsius
MIN_RANGE_C = fahrenheit_to_celsius(MIN_RANGE_F)
MAX_RANGE_C = fahrenheit_to_celsius(MAX_RANGE_F)
flash_status("ORIG", 0.2)
if buttons.key_number == BUTTON_SET:
# Activate setup mode
play_tone(784, 0.030) # Musical note G5
# Invoke startup helper; update alarm and range values
ALARM_F, MAX_RANGE_F, MIN_RANGE_F = setup_mode()
ALARM_C = fahrenheit_to_celsius(ALARM_F)
MIN_RANGE_C = fahrenheit_to_celsius(MIN_RANGE_F)
MAX_RANGE_C = fahrenheit_to_celsius(MAX_RANGE_F)
mkr_t7 = time.monotonic() # Time marker: End of Primary Process
gc.collect()
mem_fm7 = gc.mem_free()
# Print frame performance report
print("*** PyBadge/Gamer Performance Stats ***")
print(f" define display: {(mkr_t1 - mkr_t0):6.3f} sec")
print(f" free memory: {mem_fm1 / 1000:6.3f} Kb")
print("")
print(" rate")
print(f" 1) acquire: {(mkr_t4 - mkr_t2):6.3f} sec ", end="")
print(f"{(1 / (mkr_t4 - mkr_t2)):5.1f} /sec")
print(f" 2) stats: {(mkr_t5 - mkr_t4):6.3f} sec")
print(f" 3) convert: {(mkr_t6 - mkr_t5):6.3f} sec")
print(f" 4) display: {(mkr_t7 - mkr_t6):6.3f} sec")
print(" =======")
print(f"total frame: {(mkr_t7 - mkr_t2):6.3f} sec ", end="")
print(f"{(1 / (mkr_t7 - mkr_t2)):5.1f} /sec")
print(f" free memory: {mem_fm7 / 1000:6.3f} Kb")
print("")
|
a1b5b68958d3eec2a719a580320b133816dfcc2d
|
ab7d5ec2e40b26c33da957210b5d2da77f9b696d
|
/repos/emily-python/contents/py/parser.py
|
daf8b8f3d03b532056999c5e126cf816210aed81
|
[] |
no_license
|
mcclure/bitbucket-backup
|
e49d280363ff7ef687f03473e463865a7ad8a817
|
b6a02ca8decf843fa0a765c842c24e7eccf59307
|
refs/heads/archive
| 2023-01-24T21:15:14.875131
| 2020-02-02T20:56:23
| 2020-02-02T20:56:23
| 237,833,969
| 115
| 6
| null | 2023-01-07T14:24:14
| 2020-02-02T20:43:56
|
C
|
UTF-8
|
Python
| false
| false
| 24,192
|
py
|
parser.py
|
# Parser: Transformations ("macros") applied to parse tree to gradually make it executable
from core import *
from util import switch
import reader
import execution
class ParserException(EmilyException):
pass
# Standard macros-- "ast"
class Macro(object):
def __init__(s, progress):
s.progress = progress
# Macro machinery
class MacroLevel(object):
def __init__(s, progress, contents):
s.progress = progress
s.contents = contents
class MacroShortCircuit(Exception):
def __init__(s, error):
s.error = error
class SequenceTracker(object):
def __init__(s, statements):
s.statements = statements
s.idx = 0
s.argIdx = 0
def steal(s, symbol):
if s.more():
nodes = s.statements[s.idx].nodes
if nodes and isSymbol(nodes[0], symbol):
s.idx += 1
return nodes
return None
def __iter__(s):
return s
def more(s):
return s.idx < len(s.statements)
def next(s):
if s.more():
statement = s.statements[s.idx]
s.idx += 1
s.argIdx += 1
return statement
else:
raise StopIteration()
class BidiIterator(object):
def __init__(s, source, rightward):
if rightward:
s.left = source
s.right = []
else:
s.left = []
s.right = source
s.rightward = rightward
def source(s):
return s.left if s.rightward else s.right
def result(s):
return s.right if s.rightward else s.left
def push(s, v):
if s.rightward:
s.right.insert(0, v)
else:
s.left.append(v)
def pop(s):
if s.rightward:
return s.left.pop()
else:
return s.right.pop(0)
def replace(s, left, at, right):
s.left = left or []
s.right = right or []
if at:
s.push(at)
class Parser(object):
def __init__(s, clone = None, clear = False):
s.errors = clone.errors if clone else []
if clear:
s.macros = []
else:
if clone:
s.macros = [MacroLevel(level.progress, list(level.contents)) for level in clone.macros]
else:
s.macros = []
s.loadAll(defaultMacros)
def _innerLoad(s, macro):
for existing in s.macros:
if existing.progress == macro.progress:
existing.contents.append(macro)
return
s.macros.append(MacroLevel(macro.progress, [macro]))
def _sort(s):
s.macros.sort(key=lambda x:x.progress)
def load(s, macro):
s._innerLoad(macro)
s._sort()
def loadAll(s, macros): # FIXME: O(n^2)
for macro in macros:
s._innerLoad(macro)
s._sort()
def checkComplete(s, node):
if node.progress < ProgressBase.Executable:
return s.errorAt(node, "Internal error: Macro processing completed but this node is unfinished")
return None
def errorAt(s, loc, msg):
s.errors.append( Error(loc, msg) )
return execution.InvalidExec(loc)
def process(s, nodes, tracker = None):
if not nodes:
raise Exception("Internal error: macro process() cannot work on an empty list")
try:
for level in s.macros:
i = BidiIterator(nodes, 1 == level.progress % 2)
while i.source():
at = i.pop()
left = i.left
right = i.right
if at.progress > level.progress:
i.push(at)
continue
for macro in level.contents:
if macro.match(left, at, right):
result = macro.apply(s, left, at, right, tracker)
# TODO: Catch exceptions
if type(result) == Error:
raise MacroShortCircuit(result)
else:
(left, at, right) = result
break
i.replace(left, at, right)
nodes = i.result()
except MacroShortCircuit as e:
return s.errorAt( e.error.loc, e.error.msg )
if not nodes:
# at is known non-None because otherwise we would have failed earlier
return s.errorAt(at.loc, "Macro malfunctioned and produced an empty list")
if type(nodes[0]) == reader.ExpGroup:
if not nodes[0].nonempty():
result = s.makeUnit(nodes.pop(0))
else:
nodes0 = nodes.pop(0)
tracker = SequenceTracker(nodes0.statements)
result = s.process(next(tracker).nodes, tracker)
if tracker.more(): # Wait. There's more?
return s.errorAt(nodes0.loc, "Line started with a multiline parenthesis group. Did you mean to use \"do\"?")
nodes0 = None
else:
result = nodes.pop(0)
completenessError = s.checkComplete(result)
if completenessError:
return completenessError
while nodes:
arg = nodes.pop(0)
if type(arg) == reader.ExpGroup:
if not arg.nonempty():
result = execution.ApplyExec(result.loc, result, s.makeUnit(arg))
else:
tracker = SequenceTracker(arg.statements)
for statement in tracker:
if not statement.nodes:
return s.errorAt(arg.loc, "Argument #%s to function is blank" % (tracker.argIdx))
result = execution.ApplyExec(result.loc, result, s.process(statement.nodes, tracker))
else:
completenessError = s.checkComplete(arg)
if completenessError:
return completenessError
result = execution.ApplyExec(result.loc, result, arg)
s.checkComplete(result)
return result
def makeUnit(s, grp):
return execution.NullLiteralExec(grp.loc)
def makeSequence(s, loc, statements, shouldReturn = False):
execs = []
macros = []
m = None
tracker = SequenceTracker(statements)
for stm in tracker:
currentParser = m or s
exe = currentParser.process(stm.nodes, tracker)
if type(exe) == UserMacroList: # Apply these macros to all following lines
if not m or exe.profile: # Instantiate parser on first custom macro, or any "profile" call
m = Parser(currentParser, exe.profile) # Inherit from existing parser, clear if this is a "profile"
m.loadAll(exe.contents)
if exe.export:
macros += exe.contents # FIXME: This endures even after a "profile". Is that weird?
if exe.payload:
execs.append(exe.payload)
else:
execs.append(exe)
hasLets = bool(macros)
if not hasLets:
for exe in execs: # FIXME: This approach will do something weird if you = in a argument list or condition
if type(exe) == execution.SetExec and (exe.isLet or exe.isExport):
hasLets = True
break
return execution.SequenceExec(loc, shouldReturn, hasLets, execs, macros)
def makeArray(s, seq):
tracker = SequenceTracker(seq.statements)
return [s.process(stm.nodes, tracker) for stm in tracker] if seq.nonempty() else []
# Standard macros-- "make values"
# TODO: do, if, while
def isSymbol(exp, match):
return type(exp) == reader.SymbolExp and not exp.isAtom and not exp.isEscaped and exp.content == match
# Abstract macro: matches on just one known symbol
class OneSymbolMacro(Macro):
def match(s, left, node, right):
return isSymbol(node, s.symbol())
# Macro for loading macros -- Can masquerade as an Executable
class UserMacroList(execution.Executable):
def __init__(s, loc, contents, export, profile, payload = None):
super(UserMacroList, s).__init__(loc)
s.contents = contents
s.export = export
s.profile = profile
s.payload = payload # Executable to run after
def __unicode__(s):
return u"[Misplaced macro node]"
def eval(s, scope):
raise Exception("\"Macro\" statement in invalid place")
class MacroMacro(OneSymbolMacro):
def __init__(s, profile):
super(MacroMacro, s).__init__(progress=ProgressBase.Parser + 10)
s.profile = profile
def symbol(s):
return u"profile" if s.profile else u"macro"
def apply(s, m, left, node, right, _):
export = False
if left:
if not (len(left) == 1 and isSymbol(left[0], "export")):
return Error(node.loc, "Stray garbage before \"%s\"" % s.symbol())
export = True
if not right:
return Error(node.loc, u"Emptiness after \"%s\"" % s.symbol())
macroGroup = right[0]
if type(macroGroup) == reader.SymbolExp:
# TODO: Consider only allowing this if atom keys. TODO: Do something more sensible when this fails?
macroObject = m.process(right).eval(execution.profileScope)
try:
macroList = macroObject.apply(execution.AtomLiteralExec(node.loc, execution.macroExportList))
except execution.InternalExecutionException as e:
raise execution.ExecutionException(macroGroup.loc, u"macro load", unicode(e))
if type(macroList) != list:
return Error(macroGroup.loc, u"macro import path did not resolve to a valid module")
else:
payload = None
importObject = macroObject
if isinstance(importObject, execution.LazyMacroLoader): # TODO: What about module objects?
importObject = importObject.importObject()
if importObject:
payload = execution.ImportAllExec(node.loc,
execution.StoredLiteralExec(node.loc, importObject),
export)
return ([], UserMacroList(node.loc, macroList, export, s.profile, payload), [])
elif type(macroGroup) == reader.ExpGroup:
if len(right) > 1:
return Error(node.loc, u"Stray garbage after \"%s (group)\"" % s.symbol())
macros = m.makeArray(macroGroup)
return ([], UserMacroList(node.loc, [ast.eval(execution.defaultScope) for ast in macros], export, s.profile), [])
else:
return Error(node.loc, u"Expected a path or a (group) after \"%s\"" % s.symbol())
class ImportMacro(OneSymbolMacro):
def __init__(s):
super(ImportMacro, s).__init__(progress=ProgressBase.Parser + 10)
def symbol(s):
return u"import"
# FIXME: This entire setup too easily accepts nonsense like "import a + b from c + d"
def generateSetExec(s, m, loc, prefix, target):
if not target:
return Error(loc, u"Missing target to import")
if prefix:
if type(target[0]) == reader.SymbolExp:
if target[0].isAtom: # FIXME: This check should occur even if prefix does not
return Error(target[0].loc, u"Expected a symbol after \"import\"")
newSymbol = reader.SymbolExp(target[0].loc, True)
newSymbol.content = target[0].content
target = [newSymbol] + target[1:]
target = prefix + target
if len(target) == 1:
return Error(target[0].loc, u"import expects either multiple symbols or a \"from\" clause")
if type(target[-1]) != reader.SymbolExp or not target[-1].isAtom:
return Error(target[-1].loc, u"End of import path needs to be an atom")
symbol = execution.AtomLiteralExec(target[-1].loc, target[-1].content)
return execution.SetExec(loc, True, False, False, False, None, symbol, m.process(target))
def apply(s, m, left, node, right, tracker):
prefix = None
if left:
if isSymbol(left[0], u"from"):
if len(left) == 1:
return Error(left[0].loc, u"Expected symbols between \"from\" and \"import\"")
prefix = left[1:]
else:
return Error(node.loc, u"Stray garbage before \"import\"")
if len(right) == 1 and type(right[0]) == reader.ExpGroup:
setExecs = []
for stm in right[0].statements:
setExec = s.generateSetExec(m, node.loc, prefix, stm.nodes)
if type(setExec) == Error:
return setExec
setExecs.append(setExec)
result = execution.SequenceExec(node.loc, False, False, setExecs)
elif len(right) == 1 and isSymbol(right[0], "*"):
result = execution.ImportAllExec(node.loc, m.process(prefix))
else:
result = s.generateSetExec(m, node.loc, prefix, right)
if type(result) == Error:
return result
return ([], result, [])
# = sign
class SetMacro(OneSymbolMacro):
def __init__(s):
super(SetMacro, s).__init__(progress = ProgressBase.Parser + 100)
def symbol(s):
return u"="
def apply(s, m, left, node, right, tracker):
isLet = False
isMethod = False
isField = False
isExport = False
target = None
for idx in range(len(left)):
if isSymbol(left[idx], u"let"):
isLet = True
elif isSymbol(left[idx], u"method"):
isMethod = True
elif isSymbol(left[idx], u"field"):
isField = True
elif isSymbol(left[idx], u"export"):
isExport = True
else:
break
if isLet and isExport:
return Error(node.loc, "Cannot use \"let\" and \"export\" together")
if left:
left = left[idx:]
if len(left) == 0:
return Error(node.loc, "Missing name in =")
key = left[-1]
if len(left) > 1:
target = m.process(left[:-1])
key = m.process([key])
else: # Currently under all circumstances a = b is a flat atom assignment
if type(key) != reader.SymbolExp or key.isAtom:
return Error(key.loc, "Assigned name must be alphanumeric")
key = execution.AtomLiteralExec(key.loc, key.content)
value = m.process(right, tracker)
return ([], execution.SetExec(node.loc, isLet, isMethod, isField, isExport, target, key, value), [])
# Abstract macro: Expects SYMBOL (GROUP)
class SeqMacro(OneSymbolMacro):
def apply(s, m, left, node, right, _):
if not right:
return Error(node.loc, u"Emptiness after \"%s\"" % (s.symbol()))
seq = right.pop(0)
if type(seq) != reader.ExpGroup:
return Error(node.loc, u"Expected a (group) after \"%s\"" % (s.symbol()))
return (left, s.construct(m, seq), right)
# do (statements)
class DoMacro(SeqMacro):
def __init__(s):
super(DoMacro, s).__init__(progress = ProgressBase.Parser + 400)
def symbol(s):
return u"do"
def construct(s, m, seq):
return m.makeSequence(seq.loc, seq.statements, True)
# if (cond) (ifBlock) (elseBlock?) -- OR -- while (cond) (whileBlock)
class IfMacro(OneSymbolMacro):
def __init__(s, loop):
super(IfMacro, s).__init__(progress = ProgressBase.Parser + 400)
s.loop = loop
def symbol(s):
return u"while" if s.loop else u"if"
def apply(s, m, left, node, right, tracker):
if not right:
return Error(node.loc, u"Emptiness after \"%s\"" % (node.content))
cond = right.pop(0)
if not right:
return Error(node.loc, u"Emptiness after \"%s (condition)\"" % (node.content))
seq = right.pop(0)
if type(seq) != reader.ExpGroup:
return Error(node.loc, u"Expected a (group) after \"%s (condition)\"" % (node.content))
cond = m.process([cond])
seq = m.makeSequence(seq.loc, seq.statements, not s.loop)
elseq = None
if not s.loop:
if not right and tracker:
right = tracker.steal(u"else")
if not right and tracker:
right = tracker.steal(u"elif")
if right:
if isSymbol(right[0], "else"):
right.pop(0) # Throw away else symbol
elseq = right.pop(0)
if type(elseq) != reader.ExpGroup:
return Error(node.loc, u"Expected a (group) after \"else\"")
elseq = m.makeSequence(elseq.loc, elseq.statements, True)
elif isSymbol(right[0], "elif"):
elifSymbol = right.pop(0)
_, elseq, right = s.apply(m, [], elifSymbol, right, tracker)
return (left, execution.IfExec(node.loc, s.loop, cond, seq, elseq), right)
# function (args) (body) -- OR -- func (args) (body)
class FunctionMacro(OneSymbolMacro):
def __init__(s):
super(FunctionMacro, s).__init__(progress = ProgressBase.Parser + 400)
def symbol(s):
return u"function"
def apply(s, m, left, node, right, _):
name = node.content
if not right:
return Error(node.loc, u"Emptiness after \"%s\"" % (name))
argSymbols = right.pop(0)
if type(argSymbols) != reader.ExpGroup:
return Error(node.loc, u"Expected a (group) after \"%s\"" % (name))
if not right:
return Error(node.loc, u"Emptiness after \"%s (args)\"" % (name))
seq = right.pop(0)
if type(seq) != reader.ExpGroup:
return Error(node.loc, u"Expected a (group) after \"%s (args)\"" % (name))
args = []
if argSymbols.nonempty():
for stm in argSymbols.statements:
if not stm.nodes:
return Error(node.loc, u"Arg #%d on %s is blank" % (len(args)+1, name))
if type(stm.nodes[0]) != reader.SymbolExp:
return Error(node.loc, u"Arg #%d on %s is not a symbol" % (len(args)+1, name))
args.append(stm.nodes[0].content)
return (left, execution.MakeFuncExec(node.loc, args, m.makeSequence(seq.loc, seq.statements, True)), right)
# match (matchbody)
class MatchCase(object):
def __init__(s, targetExe, unpacks, statement):
s.targetExe = targetExe
s.unpacks = unpacks
s.statement = statement
class MatchMacro(OneSymbolMacro):
def __init__(s):
super(MatchMacro, s).__init__(progress = ProgressBase.Parser + 400)
def symbol(s):
return u"match"
def apply(s, m, left, node, right, tracker):
if not right:
return Error(node.loc, u"Emptiness after \"match\"")
lines = right.pop(0)
if type(lines) != reader.ExpGroup:
return Error(node.loc, u"Expected a (group) after \"match\"")
result = []
for stmIdx in range(len(lines.statements) if lines.statements else 0):
stm = lines.statements[stmIdx]
if not stm.nodes: # Match is "like code" so may contain a blank line...?
continue
eqIdx = None # Find = sign
for idx in range(len(stm.nodes)):
if isSymbol(stm.nodes[idx], '='):
eqIdx = idx
break
if eqIdx is None:
return Error(node.loc, u"match line #%d does not have an =" % (stmIdx+1))
eqNode = stm.nodes[eqIdx]
eqLeft = stm.nodes[:eqIdx]
eqRight = stm.nodes[eqIdx+1:]
if not eqLeft:
return Error(node.loc, u"On match line #%d, left of = is blank" % (stmIdx+1))
if len(eqLeft) > 2:
return Error(node.loc, u"On match line #%d, left of = has too many symbols. Try adding parenthesis?" % (stmIdx+1))
if not eqRight:
return Error(node.loc, u"On match line #%d, right of = is blank" % (stmIdx+1))
target = eqLeft.pop(0)
unpacksExp = None
unpacks = []
if eqLeft:
unpacksExp = eqLeft[0]
foundUnpack = False
if type(unpacksExp) == reader.SymbolExp:
unpacks = [execution.AtomLiteralExec(unpacksExp.loc, unpacksExp.content)]
foundUnpack = True
elif type(unpacksExp) == reader.ExpGroup:
for statement in unpacksExp.statements:
if not statement.nodes or type(statement.nodes[0]) != reader.SymbolExp:
foundUnpack = False
break
unpacks.append(execution.AtomLiteralExec(statement.nodes[0].loc, statement.nodes[0].content))
foundUnpack = True
if not foundUnpack:
return Error(node.loc, u"On match line #%d, variable unpack list on left of = is garbled" % (stmIdx+1))
if isSymbol(target, '_'):
if unpacksExp:
return Error(node.loc, u"On match line #%d, variable unpack list used with _" % (stmIdx+1))
target = None
elif isSymbol(target, 'array'):
if not unpacksExp:
return Error(node.loc, u"On match line #%d, \"array\" used but no unpack list found" % (stmIdx+1))
target = None
if target:
target = m.process([target])
tempStatement = m.process(eqRight, tracker)
result.append( MatchCase(target, unpacks, tempStatement) )
return (left, execution.MakeMatchExec(node.loc, result), right)
# array (contents)
class ArrayMacro(SeqMacro):
def __init__(s):
super(ArrayMacro, s).__init__(progress = ProgressBase.Parser + 500)
def symbol(s):
return u"array"
def construct(s, m, seq):
return execution.MakeArrayExec(seq.loc, m.makeArray(seq))
# new (contents) or inherit (contents)
class ObjectMacro(OneSymbolMacro):
def __init__(s, isInstance):
super(ObjectMacro, s).__init__(progress = ProgressBase.Parser + 500)
s.isInstance = isInstance
def symbol(s):
return u"new" if s.isInstance else u"inherit"
def apply(s, m, left, node, right, _):
if not right:
return Error(node.loc, u"Emptiness after \"new\"")
base = right.pop(0)
base = m.process([base])
if not right:
seq = reader.ExpGroup(base.loc)
else:
seq = right.pop(0)
if type(seq) != reader.ExpGroup:
return Error(node.loc, u"Expected a (group) after \"new [base]\"")
seq = m.makeSequence(seq.loc, seq.statements, False).execs if seq.nonempty() else []
values = []
assigns = []
foundSet = False
for assign in seq:
if type(assign) == execution.SetExec:
foundSet = True
if assign.target:
return Error(assign.loc, "Assignment inside object literal was not of form key=value")
if assign.isLet:
return Error(assign.loc, "\"let\" is redundant in an object literal")
if assign.isExport:
return Error(assign.loc, "\"export\" is redundant in an object literal")
assign.isLet = True
assigns.append(assign)
elif type(assign) == execution.ImportAllExec:
foundSet = True
assigns.append(assign)
else:
if foundSet:
return Error(assign.loc, "Found a stray value expression inside an object literal")
else:
values.append(assign)
return (left, execution.MakeObjectExec(node.loc, base, values, assigns, s.isInstance), right)
# Final pass: Turn everything not swallowed by a macro into a value
class ValueMacro(Macro):
def __init__(s):
super(ValueMacro, s).__init__(progress = ProgressBase.Parser + 900)
def match(s, left, node, right):
c = type(node)
return c == reader.QuoteExp or c == reader.NumberExp or c == reader.SymbolExp
def apply(s, m, left, node, right, _):
for case in switch(type(node)):
if case(reader.QuoteExp):
node = execution.StringLiteralExec(node.loc, node.content)
elif case(reader.NumberExp):
value = node.integer
if node.dot:
value += "."
if node.decimal is not None:
value += node.decimal
node = execution.NumberLiteralExec(node.loc, float(value))
elif case(reader.SymbolExp):
if node.isAtom:
node = execution.AtomLiteralExec(node.loc, node.content)
else:
node = execution.VarExec(node.loc, node.content)
else:
return Error(node.loc, "Internal error: AST node of indecipherable type %s found in a place that shouldn't be possible" % (type(node).__name__))
return (left, node, right)
# "Nonstandard" macros
# (left) && (right)
class FancySplitterMacro(OneSymbolMacro):
def __init__(s, progress):
super(FancySplitterMacro, s).__init__(progress = progress)
def apply(s, m, left, node, right, _):
if not left:
return Error(node.loc, u"Emptiness after \"%s\"" % (node.content))
if not right:
return Error(node.loc, u"Emptiness after \"%s\"" % (node.content))
leftExe = m.process(left)
rightExe = m.process(right)
return s.expression(node.loc, leftExe, rightExe)
class AndMacro(FancySplitterMacro):
def __init__(s):
super(AndMacro, s).__init__(progress = ProgressBase.Parser + 605)
def symbol(s):
return "&&"
def expression(s, loc, leftExe, rightExe):
falseHere = execution.BooleanLiteralExec(loc, False)
trueHere = execution.BooleanLiteralExec(loc, True)
return ([],
execution.IfExec(loc, False, leftExe,
execution.IfExec(loc, False, rightExe, trueHere, falseHere),
falseHere),
[])
class OrMacro(FancySplitterMacro):
def __init__(s):
super(OrMacro, s).__init__(progress = ProgressBase.Parser + 603)
def symbol(s):
return "||"
def expression(s, loc, leftExe, rightExe):
falseHere = execution.BooleanLiteralExec(loc, False)
trueHere = execution.BooleanLiteralExec(loc, True)
return ([],
execution.IfExec(loc, False, leftExe, trueHere,
execution.IfExec(loc, False, rightExe, trueHere, falseHere)),
[])
# (left) || (right)
# User defined macro constructors
class UserMacro(OneSymbolMacro):
def __init__(s, progress, symbol):
super(UserMacro, s).__init__(progress = progress)
s.symbolCache = symbol
def symbol(s):
return s.symbolCache
class SplitMacro(UserMacro):
def __init__(s, progress, symbol):
super(SplitMacro, s).__init__(progress, symbol)
def apply(s, m, left, node, right, tracker):
if not left: # Slight code redundancy with FancySplitter?
return Error(node.loc, u"Emptiness after \"%s\"" % (node.content))
if not right:
return Error(node.loc, u"Emptiness after \"%s\"" % (node.content))
return ([],
execution.ApplyExec(node.loc,
execution.ApplyExec(node.loc,
execution.VarExec(node.loc, s.symbolCache),
m.process(left)),
m.process(right, tracker)),
[])
class UnaryMacro(UserMacro):
def __init__(s, progress, symbol):
super(UnaryMacro, s).__init__(progress, symbol)
def apply(s, m, left, node, right, tracker):
if not right:
return Error(node.loc, u"Emptiness after \"%s\"" % (s.symbol()))
return (left,
execution.ApplyExec(node.loc,
execution.VarExec(node.loc, s.symbolCache),
m.process(right, tracker)),
[])
minimalMacros = [
SetMacro(),
ValueMacro()
]
defaultMacros = [
MacroMacro(False), MacroMacro(True), ImportMacro(),
DoMacro(), IfMacro(False), IfMacro(True), FunctionMacro(), MatchMacro(),
ArrayMacro(), ObjectMacro(True), ObjectMacro(False),
] + minimalMacros
shortCircuitBooleanMacros = [OrMacro(), AndMacro()]
def exeFromAst(ast):
parser = Parser()
result = parser.makeSequence(ast.loc, ast.statements) # TODO test to make sure it's a group
if parser.errors:
output = [errorFormat(e) for e in parser.errors]
raise ParserException(u"\n".join(output))
return result
|
e7587fe68c668a903f57925f3a0669a0c21ce055
|
d3b468ef0938ec32edf71ea1ceeb5b5d06ebf171
|
/autotest/generate_parquet_test_file.py
|
2768254d2c26574a2b03de1a40314c035b489d35
|
[
"LicenseRef-scancode-warranty-disclaimer",
"SunPro",
"LicenseRef-scancode-info-zip-2005-02",
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
OSGeo/gdal
|
30a1e1fb0909d758d4f636d481bf03fcd7affe3c
|
1e7746b2546b8c4878f4bfdb20c87f87e561745b
|
refs/heads/master
| 2023-09-03T19:37:50.027999
| 2023-09-03T18:29:31
| 2023-09-03T18:29:31
| 6,148,317
| 4,100
| 2,611
|
NOASSERTION
| 2023-09-14T20:23:19
| 2012-10-09T21:39:58
|
C++
|
UTF-8
|
Python
| false
| false
| 26,536
|
py
|
generate_parquet_test_file.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read functionality for OGR Parquet driver.
# Author: Even Rouault <even dot rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2022, Planet Labs
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
wkt_epsg_4326 = (
'GEOGCRS["WGS 84",ENSEMBLE["World Geodetic '
+ 'System 1984 ensemble",MEMBER["World Geodetic '
+ 'System 1984 (Transit)"],MEMBER["World '
+ 'Geodetic System 1984 (G730)"],MEMBER["World '
+ 'Geodetic System 1984 (G873)"],MEMBER["World '
+ 'Geodetic System 1984 (G1150)"],MEMBER["World '
+ 'Geodetic System 1984 (G1674)"],MEMBER["World '
+ 'Geodetic System 1984 (G1762)"],MEMBER["World '
+ "Geodetic System 1984 "
+ '(G2139)"],ELLIPSOID["WGS '
+ '84",6378137,298.257223563],ENSEMBLEACCURACY[2.0]],CS[ellipsoidal,2],AXIS["geodetic '
+ 'latitude (Lat)",north],AXIS["geodetic '
+ "longitude "
+ '(Lon)",east],UNIT["degree",0.0174532925199433],USAGE[SCOPE["Horizontal '
+ "component of 3D "
+ 'system."],AREA["World."],BBOX[-90,-180,90,180]],ID["EPSG",4326]]'
)
def generate_test_parquet():
import datetime
import decimal
import json
import pathlib
import struct
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
boolean = pa.array([True, False, None, False, True], type=pa.bool_())
uint8 = pa.array([None if i == 2 else 1 + i for i in range(5)], type=pa.uint8())
int8 = pa.array([None if i == 2 else -2 + i for i in range(5)], type=pa.int8())
uint16 = pa.array(
[None if i == 2 else 1 + i * 10000 for i in range(5)], type=pa.uint16()
)
int16 = pa.array(
[None if i == 2 else -20000 + i * 10000 for i in range(5)], type=pa.int16()
)
uint32 = pa.array(
[None if i == 2 else 1 + i * 1000000000 for i in range(5)], type=pa.uint32()
)
int32 = pa.array(
[None if i == 2 else -2000000000 + i * 1000000000 for i in range(5)],
type=pa.int32(),
)
uint64 = pa.array(
[None if i == 2 else 1 + i * 100000000000 for i in range(5)], type=pa.uint64()
)
int64 = pa.array(
[None if i == 2 else -200000000000 + i * 100000000000 for i in range(5)],
type=pa.int64(),
)
float32 = pa.array(
[None if i == 2 else 1.5 + i for i in range(5)], type=pa.float32()
)
float64 = pa.array(
[None if i == 2 else 1.5 + i for i in range(5)], type=pa.float64()
)
string = pa.array(["abcd", "", None, "c", "d"], type=pa.string())
large_string = pa.array(["abcd", "", None, "c", "d"], type=pa.large_string())
gmt_plus_2 = datetime.timezone(datetime.timedelta(hours=2))
timestamp_ms_gmt_plus_2 = pa.array(
[
pd.Timestamp(
year=2019,
month=1,
day=1,
hour=14,
microsecond=500 * 1000,
tz=gmt_plus_2,
)
]
* 5,
type=pa.timestamp("ms", tz=gmt_plus_2),
)
gmt = datetime.timezone(datetime.timedelta(hours=0))
timestamp_ms_gmt = pa.array(
[
pd.Timestamp(
year=2019, month=1, day=1, hour=14, microsecond=500 * 1000, tz=gmt
)
]
* 5,
type=pa.timestamp("ms", tz=gmt),
)
gmt_minus_0215 = datetime.timezone(datetime.timedelta(hours=-2.25))
timestamp_ms_gmt_minus_0215 = pa.array(
[
pd.Timestamp(
year=2019,
month=1,
day=1,
hour=14,
microsecond=500 * 1000,
tz=gmt_minus_0215,
)
]
* 5,
type=pa.timestamp("ms", tz=gmt_minus_0215),
)
timestamp_s_no_tz = pa.array(
[pd.Timestamp(year=2019, month=1, day=1, hour=14)] * 5,
type=pa.timestamp("s"),
)
timestamp_us_no_tz = pa.array(
[pd.Timestamp(year=2019, month=1, day=1, hour=14, microsecond=500)] * 5,
type=pa.timestamp("us"),
)
timestamp_ns_no_tz = pa.array(
[pd.Timestamp(year=2019, month=1, day=1, hour=14, microsecond=1)] * 5,
type=pa.timestamp("ns"),
)
time32_s = pa.array([3600 + 120 + 3, None, 3, 4, 5], type=pa.time32("s"))
time32_ms = pa.array(
[(3600 + 120 + 3) * 1000 + 456, 2, 3, 4, 5], type=pa.time32("ms")
)
time64_us = pa.array([(3600 + 120 + 3) * 1e6, None, 3, 4, 5], type=pa.time64("us"))
time64_ns = pa.array(
[(3600 + 120 + 3) * 1e9 + 456, 2, 3, 4, 5], type=pa.time64("ns")
)
date32 = pa.array([1, 2, 3, 4, 5], type=pa.date32())
date64 = pa.array([86400 * 1000, 2, 3, 4, 5], type=pa.date64())
duration_s = pa.array([1, 2, 3, 4, 5], type=pa.duration("s"))
duration_ms = pa.array([1, 2, 3, 4, 5], type=pa.duration("ms"))
binary = pa.array([b"\x00\x01"] * 5, type=pa.binary())
large_binary = pa.array([b"\x00\x01"] * 5, type=pa.large_binary())
fixed_size_binary = pa.array(
[b"\x00\x01", b"\x00\x00", b"\x01\x01", b"\x01\x00", b"\x00\x01"],
type=pa.binary(2),
)
decimal128 = pa.array(
[
decimal.Decimal("1234.567"),
decimal.Decimal("-1234.567"),
None,
decimal.Decimal("1234.567"),
decimal.Decimal("-1234.567"),
],
type=pa.decimal128(7, 3),
)
decimal256 = pa.array(
[
decimal.Decimal("1234.567"),
decimal.Decimal("-1234.567"),
None,
decimal.Decimal("1234.567"),
decimal.Decimal("-1234.567"),
],
type=pa.decimal256(7, 3),
)
list_boolean = pa.array(
[
None
if i == 2
else [None if j == 0 else True if (j % 2) == 0 else False for j in range(i)]
for i in range(5)
],
type=pa.list_(pa.bool_()),
)
list_uint8 = pa.array(
[
None
if i == 2
else [None if j == 0 else j + i * (i - 1) // 2 for j in range(i)]
for i in range(5)
],
type=pa.list_(pa.uint8()),
)
list_int8 = pa.array(
[
None
if i == 2
else [None if j == 0 else j + i * (i - 1) // 2 for j in range(i)]
for i in range(5)
],
type=pa.list_(pa.int8()),
)
list_uint16 = pa.array(
[
None
if i == 2
else [None if j == 0 else j + i * (i - 1) // 2 for j in range(i)]
for i in range(5)
],
type=pa.list_(pa.uint16()),
)
list_int16 = pa.array(
[
None
if i == 2
else [None if j == 0 else j + i * (i - 1) // 2 for j in range(i)]
for i in range(5)
],
type=pa.list_(pa.int16()),
)
list_uint32 = pa.array(
[
None
if i == 2
else [None if j == 0 else j + i * (i - 1) // 2 for j in range(i)]
for i in range(5)
],
type=pa.list_(pa.uint32()),
)
list_int32 = pa.array(
[
None
if i == 2
else [None if j == 0 else j + i * (i - 1) // 2 for j in range(i)]
for i in range(5)
],
type=pa.list_(pa.int32()),
)
list_uint64 = pa.array(
[
None
if i == 2
else [None if j == 0 else j + i * (i - 1) // 2 for j in range(i)]
for i in range(5)
],
type=pa.list_(pa.uint64()),
)
list_int64 = pa.array(
[
None
if i == 2
else [None if j == 0 else j + i * (i - 1) // 2 for j in range(i)]
for i in range(5)
],
type=pa.list_(pa.int64()),
)
list_float32 = pa.array(
[
None
if i == 2
else [None if j == 0 else 0.5 + j + i * (i - 1) // 2 for j in range(i)]
for i in range(5)
],
type=pa.list_(pa.float32()),
)
list_float64 = pa.array(
[
None
if i == 2
else [None if j == 0 else 0.5 + j + i * (i - 1) // 2 for j in range(i)]
for i in range(5)
],
type=pa.list_(pa.float64()),
)
list_string = pa.array(
[
None
if i == 2
else [
"".join(["%c" % (65 + j + k) for k in range(1 + j)]) for j in range(i)
]
for i in range(5)
]
)
fixed_size_list_boolean = pa.array(
[[True, False], [False, True], [True, False], [False, True], [True, False]],
type=pa.list_(pa.bool_(), 2),
)
fixed_size_list_uint8 = pa.array(
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], type=pa.list_(pa.uint8(), 2)
)
fixed_size_list_int8 = pa.array(
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], type=pa.list_(pa.int8(), 2)
)
fixed_size_list_uint16 = pa.array(
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], type=pa.list_(pa.uint16(), 2)
)
fixed_size_list_int16 = pa.array(
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], type=pa.list_(pa.int16(), 2)
)
fixed_size_list_uint32 = pa.array(
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], type=pa.list_(pa.uint32(), 2)
)
fixed_size_list_int32 = pa.array(
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], type=pa.list_(pa.int32(), 2)
)
fixed_size_list_uint64 = pa.array(
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], type=pa.list_(pa.uint64(), 2)
)
fixed_size_list_int64 = pa.array(
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], type=pa.list_(pa.int64(), 2)
)
fixed_size_list_float32 = pa.array(
[[0, None], [2, 3], [4, 5], [6, 7], [8, 9]], type=pa.list_(pa.float32(), 2)
)
fixed_size_list_float64 = pa.array(
[[0, None], [2, 3], [4, 5], [6, 7], [8, 9]], type=pa.list_(pa.float64(), 2)
)
fixed_size_list_string = pa.array(
[["a", "b"], ["c", "d"], ["e", "f"], ["g", "h"], ["i", "j"]],
type=pa.list_(pa.string(), 2),
)
struct_field = pa.array(
[{"a": 1, "b": 2.5, "c": {"d": "e", "f": "g"}, "h": [5, 6], "i": 3}] * 5
)
# struct_val = { "a": 5 }
# for i in range(123):
# struct_val = { "a": struct_val }
# struct_field = pa.array([struct_val] * 5)
map_boolean = pa.array(
[[("x", None), ("y", True)], [("z", True)], None, [], []],
type=pa.map_(pa.string(), pa.bool_()),
)
map_uint8 = pa.array(
[[("x", 1), ("y", None)], [("z", 3)], None, [], []],
type=pa.map_(pa.string(), pa.uint8()),
)
map_int8 = pa.array(
[[("x", 1), ("y", None)], [("z", 3)], None, [], []],
type=pa.map_(pa.string(), pa.int8()),
)
map_uint16 = pa.array(
[[("x", 1), ("y", None)], [("z", 3)], None, [], []],
type=pa.map_(pa.string(), pa.uint16()),
)
map_int16 = pa.array(
[[("x", 1), ("y", None)], [("z", 3)], None, [], []],
type=pa.map_(pa.string(), pa.int16()),
)
map_uint32 = pa.array(
[[("x", 4 * 1000 * 1000 * 1000), ("y", None)], [("z", 3)], None, [], []],
type=pa.map_(pa.string(), pa.uint32()),
)
map_int32 = pa.array(
[[("x", 2 * 1000 * 1000 * 1000), ("y", None)], [("z", 3)], None, [], []],
type=pa.map_(pa.string(), pa.int32()),
)
map_uint64 = pa.array(
[[("x", 4 * 1000 * 1000 * 1000 * 1000), ("y", None)], [("z", 3)], None, [], []],
type=pa.map_(pa.string(), pa.uint64()),
)
map_int64 = pa.array(
[
[("x", -2 * 1000 * 1000 * 1000 * 1000), ("y", None)],
[("z", 3)],
None,
[],
[],
],
type=pa.map_(pa.string(), pa.int64()),
)
map_float32 = pa.array(
[[("x", 1.5), ("y", None)], [("z", 3)], None, [], []],
type=pa.map_(pa.string(), pa.float32()),
)
map_float64 = pa.array(
[[("x", 1.5), ("y", None)], [("z", 3)], None, [], []],
type=pa.map_(pa.string(), pa.float64()),
)
map_string = pa.array(
[[("x", "x_val"), ("y", None)], [("z", "z_val")], None, [], []],
type=pa.map_(pa.string(), pa.string()),
)
indices = pa.array([0, 1, 2, None, 2], type=pa.int32())
dictionary = pa.array(["foo", "bar", "baz"])
dict = pa.DictionaryArray.from_arrays(indices, dictionary)
map_list = pa.array(
[[("x", []), ("y", [])], [("z", [])], None, [], []],
type=pa.map_(pa.string(), pa.list_(pa.uint32())),
)
geometry = pa.array(
[
None if i == 1 else (b"\x01\x01\x00\x00\x00" + struct.pack("<dd", i, 2))
for i in range(5)
],
type=pa.binary(),
)
names = [
"boolean",
"uint8",
"int8",
"uint16",
"int16",
"uint32",
"int32",
"uint64",
"int64",
"float32",
"float64",
"string",
"large_string",
"timestamp_ms_gmt",
"timestamp_ms_gmt_plus_2",
"timestamp_ms_gmt_minus_0215",
"timestamp_s_no_tz",
"timestamp_us_no_tz",
"timestamp_ns_no_tz",
"time32_s",
"time32_ms",
"time64_us",
"time64_ns",
"date32",
"date64",
# "duration_s",
# "duration_ms",
"binary",
"large_binary",
"fixed_size_binary",
"decimal128",
"decimal256",
"list_boolean",
"list_uint8",
"list_int8",
"list_uint16",
"list_int16",
"list_uint32",
"list_int32",
"list_uint64",
"list_int64",
"list_float32",
"list_float64",
"list_string",
"fixed_size_list_boolean",
"fixed_size_list_uint8",
"fixed_size_list_int8",
"fixed_size_list_uint16",
"fixed_size_list_int16",
"fixed_size_list_uint32",
"fixed_size_list_int32",
"fixed_size_list_uint64",
"fixed_size_list_int64",
"fixed_size_list_float32",
"fixed_size_list_float64",
"fixed_size_list_string",
"struct_field",
"map_boolean",
"map_uint8",
"map_int8",
"map_uint16",
"map_int16",
"map_uint32",
"map_int32",
"map_uint64",
"map_int64",
"map_float32",
"map_float64",
"map_string",
# "map_list",
"dict",
"geometry",
]
locals_ = locals()
table = pa.table([locals_[x] for x in names], names=names)
my_schema = table.schema.with_metadata(
{
"geo": json.dumps(
{
"version": "0.1.0",
"primary_column": "geometry",
"columns": {
"geometry": {
"crs": wkt_epsg_4326,
"bbox": [0, 2, 4, 2],
"encoding": "WKB",
}
},
}
)
}
)
table = table.cast(my_schema)
HERE = pathlib.Path(__file__).parent
pq.write_table(
table,
HERE / "ogr/data/parquet/test.parquet",
compression="NONE",
row_group_size=3,
version="1.0",
)
pq.write_table(
table,
HERE / "ogr/data/parquet/test_single_group.parquet",
compression="NONE",
version="1.0",
)
import pyarrow.feather as feather
feather.write_feather(table, HERE / "ogr/data/arrow/test.feather")
def generate_all_geoms_parquet():
import json
import pathlib
import pyarrow as pa
import pyarrow.parquet as pq
from osgeo import ogr
g1 = ogr.CreateGeometryFromWkt("POINT(1 2)")
g2 = ogr.CreateGeometryFromWkt("LINESTRING(3 4,5 6)")
g3 = ogr.CreateGeometryFromWkt(
"POLYGON((10 0,11 0,11 -1,11 0,10 0),(10.2 -0.2,10.8 -0.2,10.8 -0.8,10.2 -0.8,10.2 -0.2))"
)
g4 = ogr.CreateGeometryFromWkt("MULTIPOINT(7 8,9 10)")
g5 = ogr.CreateGeometryFromWkt("MULTILINESTRING((11 12,13 14),(15 16,17 18))")
g6 = ogr.CreateGeometryFromWkt(
"MULTIPOLYGON(((100 0,101 0,101 1,101 0,100 0),(100.2 0.2,100.8 0.2,100.8 0.8,100.2 0.8,100.2 0.2)))"
)
g7 = ogr.CreateGeometryFromWkt(
"GEOMETRYCOLLECTION(POINT(19 20),LINESTRING(21 22, 23 24))"
)
geometry = pa.array(
[x.ExportToWkb(byte_order=ogr.wkbXDR) for x in (g1, g2, g3, g4, g5, g6, g7)],
type=pa.binary(),
)
names = ["geometry"]
locals_ = locals()
table = pa.table([locals_[x] for x in names], names=names)
my_schema = table.schema.with_metadata(
{
"geo": json.dumps(
{
"version": "0.1.0",
"primary_column": "geometry",
"columns": {"geometry": {"crs": wkt_epsg_4326, "encoding": "WKB"}},
}
)
}
)
table = table.cast(my_schema)
HERE = pathlib.Path(__file__).parent
pq.write_table(
table, HERE / "ogr/data/parquet/all_geoms.parquet", compression="NONE"
)
def generate_parquet_wkt_with_dict():
import json
import pathlib
import pyarrow as pa
import pyarrow.parquet as pq
geometry = pa.array(
["POINT (1 2)", "POINT (3 4)", None, "POINT (7 8)", "POINT (9 10)"],
type=pa.string(),
)
indices = pa.array([0, 1, 2, None, 2])
dictionary = pa.array(["foo", "bar", "baz"])
dict = pa.DictionaryArray.from_arrays(indices, dictionary)
names = ["geometry", "dict"]
locals_ = locals()
table = pa.table([locals_[x] for x in names], names=names)
my_schema = table.schema.with_metadata(
{
"geo": json.dumps(
{
"version": "0.1.0",
"primary_column": "geometry",
"columns": {"geometry": {"encoding": "WKT"}},
}
)
}
)
table = table.cast(my_schema)
HERE = pathlib.Path(__file__).parent
pq.write_table(
table,
HERE / "ogr/data/parquet/wkt_with_dict.parquet",
compression="NONE",
row_group_size=3,
)
def generate_nested_types():
import pathlib
import pyarrow as pa
import pyarrow.parquet as pq
map_list_bool = pa.array(
[
[("x", [True]), ("y", [False, True])],
[("z", [])],
None,
[("w", [True, False])],
[("null", None)],
],
type=pa.map_(pa.string(), pa.list_(pa.bool_())),
)
map_list_uint8 = pa.array(
[[("x", [2]), ("y", [3, 4])], [("z", [])], None, [("w", [5, 6])], []],
type=pa.map_(pa.string(), pa.list_(pa.uint8())),
)
map_list_int8 = pa.array(
[[("x", [2]), ("y", [3, 4])], [("z", [])], None, [("w", [5, 6])], []],
type=pa.map_(pa.string(), pa.list_(pa.int8())),
)
map_list_uint16 = pa.array(
[[("x", [2]), ("y", [3, 4])], [("z", [])], None, [("w", [5, 6])], []],
type=pa.map_(pa.string(), pa.list_(pa.uint16())),
)
map_list_int16 = pa.array(
[[("x", [2]), ("y", [3, 4])], [("z", [])], None, [("w", [5, 6])], []],
type=pa.map_(pa.string(), pa.list_(pa.int16())),
)
map_list_uint32 = pa.array(
[[("x", [2]), ("y", [3, 4])], [("z", [])], None, [("w", [5, 6])], []],
type=pa.map_(pa.string(), pa.list_(pa.uint32())),
)
map_list_int32 = pa.array(
[[("x", [2]), ("y", [3, 4])], [("z", [])], None, [("w", [5, 6])], []],
type=pa.map_(pa.string(), pa.list_(pa.int32())),
)
map_list_uint64 = pa.array(
[[("x", [2]), ("y", [3, 4])], [("z", [])], None, [("w", [5, 6])], []],
type=pa.map_(pa.string(), pa.list_(pa.uint64())),
)
map_list_int64 = pa.array(
[[("x", [2]), ("y", [3, 4])], [("z", [])], None, [("w", [5, 6])], []],
type=pa.map_(pa.string(), pa.list_(pa.int64())),
)
map_list_float32 = pa.array(
[[("x", [2]), ("y", [3, 4])], [("z", [])], None, [("w", [5, 6])], []],
type=pa.map_(pa.string(), pa.list_(pa.float32())),
)
map_list_float64 = pa.array(
[[("x", [2]), ("y", [3, 4])], [("z", [])], None, [("w", [5, 6])], []],
type=pa.map_(pa.string(), pa.list_(pa.float64())),
)
map_map_bool = pa.array(
[
[("a", [("b", True), ("c", None), ("d", None)]), ("e", None)],
None,
[("f", [("g", False)])],
None,
None,
],
type=pa.map_(pa.string(), pa.map_(pa.string(), pa.bool_())),
)
map_map_uint8 = pa.array(
[
[("a", [("b", 1), ("c", None), ("d", 2)]), ("e", None)],
None,
[("f", [("g", 3)])],
None,
None,
],
type=pa.map_(pa.string(), pa.map_(pa.string(), pa.uint8())),
)
map_map_int8 = pa.array(
[
[("a", [("b", 1), ("c", None), ("d", 2)]), ("e", None)],
None,
[("f", [("g", 3)])],
None,
None,
],
type=pa.map_(pa.string(), pa.map_(pa.string(), pa.int8())),
)
map_map_uint16 = pa.array(
[
[("a", [("b", 1), ("c", None), ("d", 2)]), ("e", None)],
None,
[("f", [("g", 3)])],
None,
None,
],
type=pa.map_(pa.string(), pa.map_(pa.string(), pa.uint16())),
)
map_map_int16 = pa.array(
[
[("a", [("b", 1), ("c", None), ("d", 2)]), ("e", None)],
None,
[("f", [("g", 3)])],
None,
None,
],
type=pa.map_(pa.string(), pa.map_(pa.string(), pa.int16())),
)
map_map_uint32 = pa.array(
[
[("a", [("b", 1), ("c", None), ("d", 2)]), ("e", None)],
None,
[("f", [("g", 3)])],
None,
None,
],
type=pa.map_(pa.string(), pa.map_(pa.string(), pa.uint32())),
)
map_map_int32 = pa.array(
[
[("a", [("b", 1), ("c", None), ("d", 2)]), ("e", None)],
None,
[("f", [("g", 3)])],
None,
None,
],
type=pa.map_(pa.string(), pa.map_(pa.string(), pa.int32())),
)
map_map_uint64 = pa.array(
[
[("a", [("b", 1), ("c", None), ("d", 2)]), ("e", None)],
None,
[("f", [("g", 3)])],
None,
None,
],
type=pa.map_(pa.string(), pa.map_(pa.string(), pa.uint64())),
)
map_map_int64 = pa.array(
[
[("a", [("b", 1), ("c", None), ("d", 2)]), ("e", None)],
None,
[("f", [("g", 3)])],
None,
None,
],
type=pa.map_(pa.string(), pa.map_(pa.string(), pa.int64())),
)
map_map_float32 = pa.array(
[
[("a", [("b", 1), ("c", None), ("d", 2)]), ("e", None)],
None,
[("f", [("g", 3)])],
None,
None,
],
type=pa.map_(pa.string(), pa.map_(pa.string(), pa.float32())),
)
map_map_float64 = pa.array(
[
[("a", [("b", 1), ("c", None), ("d", 2)]), ("e", None)],
None,
[("f", [("g", 3)])],
None,
None,
],
type=pa.map_(pa.string(), pa.map_(pa.string(), pa.float64())),
)
map_map_string = pa.array(
[
[("a", [("b", "c"), ("d", None)]), ("e", None)],
None,
[("f", [("g", "h")])],
None,
None,
],
type=pa.map_(pa.string(), pa.map_(pa.string(), pa.string())),
)
list_list_string = pa.array(
[[["a"], None, ["b", None, "cd"]], None, [["efg"]], [], []],
type=pa.list_(pa.list_(pa.string())),
)
list_map_string = pa.array(
[[[("a", "b"), ("c", "d")], [("e", "f")]], None, [None], [], []],
type=pa.list_(pa.map_(pa.string(), pa.string())),
)
names = [
"map_list_bool",
"map_list_uint8",
"map_list_int8",
"map_list_uint16",
"map_list_int16",
"map_list_uint32",
"map_list_int32",
"map_list_uint64",
"map_list_int64",
"map_list_float32",
"map_list_float64",
"map_map_bool",
"map_map_uint8",
"map_map_int8",
"map_map_uint16",
"map_map_int16",
"map_map_uint32",
"map_map_int32",
"map_map_uint64",
"map_map_int64",
"map_map_float32",
"map_map_float64",
"map_map_string",
"list_list_string",
"list_map_string",
]
locals_ = locals()
table = pa.table([locals_[x] for x in names], names=names)
HERE = pathlib.Path(__file__).parent
pq.write_table(
table,
HERE / "ogr/data/parquet/nested_types.parquet",
compression="NONE",
row_group_size=3,
)
if __name__ == "__main__":
generate_test_parquet()
generate_all_geoms_parquet()
generate_parquet_wkt_with_dict()
generate_nested_types()
|
c1d3125275bcdde936407abec26a9b88de181e87
|
6ffd23679939f59f0a09c9507a126ba056b239d7
|
/imperative/python/test/unit/functional/test_tensor.py
|
a4a81e3f9e986a9cb578d3d05fd3d495d1709fc5
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
MegEngine/MegEngine
|
74c1c9b6022c858962caf7f27e6f65220739999f
|
66b79160d35b2710c00befede0c3fd729109e474
|
refs/heads/master
| 2023-08-23T20:01:32.476848
| 2023-08-01T07:12:01
| 2023-08-11T06:04:12
| 248,175,118
| 5,697
| 585
|
Apache-2.0
| 2023-07-19T05:11:07
| 2020-03-18T08:21:58
|
C++
|
UTF-8
|
Python
| false
| false
| 32,283
|
py
|
test_tensor.py
|
# -*- coding: utf-8 -*-
import os
import platform
from typing import Tuple
import numpy as np
import pytest
from utils import get_var_value, make_tensor, opr_test
import megengine.functional as F
from megengine import Tensor
from megengine.core._imperative_rt.core2 import create_complex
from megengine.core._trace_option import use_symbolic_shape
from megengine.core.tensor import megbrain_graph as G
from megengine.core.tensor.utils import astensor1d
from megengine.jit import trace
from megengine.utils.network import Network, set_symbolic_shape
from megengine.utils.network_node import VarNode
def test_eye():
dtypes = [np.float32, np.bool]
cases = [{"input": [10, 20]}, {"input": [30]}]
for dtype in dtypes:
for case in cases:
np.testing.assert_allclose(
F.eye(case["input"], dtype=dtype).numpy(),
np.eye(*case["input"]).astype(dtype),
)
np.testing.assert_allclose(
F.eye(*case["input"], dtype=dtype).numpy(),
np.eye(*case["input"]).astype(dtype),
)
np.testing.assert_allclose(
F.eye(Tensor(case["input"]), dtype=dtype).numpy(),
np.eye(*case["input"]).astype(dtype),
)
@pytest.mark.parametrize("is_varnode", [False, True])
def test_diag(is_varnode):
if is_varnode:
network = Network()
else:
network = None
shapes = [(10, 10), (6, 9), (8, 7), (8,)]
cases = []
for shp in shapes:
cases.append({"input": [np.random.random(shp).astype("float32")]})
for axis in range(-2, 3):
def run(data):
return F.diag(data, k=axis)
opr_test(cases, run, ref_fn=lambda x: np.diag(x, axis), network=network)
def test_full():
shape = (2, 3)
values = [True, 4, 5.0]
for value in values:
np.testing.assert_allclose(F.full(shape, value).numpy(), np.full(shape, value))
assert F.full(shape, value).dtype == Tensor(value).dtype
@pytest.mark.parametrize("is_varnode", [True, False])
def test_cumsum(is_varnode):
if is_varnode:
network = Network()
else:
network = None
x = Tensor([[1, 2, 3], [4, 5, 6]], np.int32)
y = F.cumsum(x, -1)
np.testing.assert_equal(
y.numpy(), np.array([[1, 3, 6], [4, 9, 15]]).astype(np.int32)
)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_concat(is_varnode):
if is_varnode:
network = Network()
else:
network = None
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]), network=network)
x1 = Tensor(np.arange(0, 6, dtype=np.float32).reshape((2, 3)))
x2 = Tensor(np.arange(6, 12, dtype=np.float32).reshape((2, 3)))
y = F.concat([x1, x2], axis=-1)
np.testing.assert_equal(
y.numpy(),
np.array([[0, 1, 2, 6, 7, 8], [3, 4, 5, 9, 10, 11]]).astype(np.float32),
)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_condtake(is_varnode):
if is_varnode:
network = Network()
else:
network = None
x = np.array([[1, 2, 3], [4, 5, 6]]).astype("float32")
y = np.array([[True, False, True], [False, True, True]])
xx = make_tensor(x, network)
yy = make_tensor(y, network)
val, idx = F.cond_take(yy, xx)
if is_varnode:
np.testing.assert_equal(get_var_value(val), x[y])
np.testing.assert_equal(get_var_value(idx), np.where(y.reshape(-1))[0])
else:
np.testing.assert_equal(val.numpy(), x[y])
np.testing.assert_equal(idx.numpy(), np.where(y.reshape(-1))[0])
@pytest.mark.parametrize("is_varnode", [True, False])
def test_concat_stack_device(is_varnode):
if is_varnode:
network = Network()
else:
network = None
data1 = make_tensor(np.random.random((2, 2, 2)).astype("float32"), network, "cpu0")
data2 = make_tensor(np.random.random((2, 2, 2)).astype("float32"), network, "cpu1")
data3 = make_tensor(np.random.random((2, 2, 2)).astype("float32"), network, "cpu0")
for func in [F.concat, F.stack]:
out = F.concat([data1, data2], device="cpu1")
assert str(out.device).split(":")[0] == "cpu1"
out = F.concat([data1, data3])
assert str(out.device).split(":")[0] == "cpu0"
with pytest.raises(RuntimeError):
try:
out = F.concat([data1, data2])
except:
raise RuntimeError("inputs have different devices")
@pytest.mark.parametrize("is_varnode", [True, False])
def test_stack(is_varnode):
if is_varnode:
network = Network()
else:
network = None
data1 = np.random.random((3, 2, 2)).astype("float32")
data2 = np.random.random((3, 2, 2)).astype("float32")
data3 = np.random.random((3, 2, 2)).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
for ai in range(3):
def run(data1, data2):
return F.stack([data1, data2], axis=ai)
opr_test(
cases, run, ref_fn=lambda x, y: np.stack([x, y], axis=ai), network=network
)
x1 = Tensor(np.arange(0, 3, dtype=np.float32).reshape((3)))
x2 = Tensor(np.arange(6, 9, dtype=np.float32).reshape((3)))
y = F.stack([x1, x2], axis=-1)
np.testing.assert_equal(
y.numpy(), np.array([[0, 6], [1, 7], [2, 8]]).astype(np.float32)
)
x1 = Tensor(np.arange(0, 3, dtype=np.float32).reshape((3)))
x2 = Tensor(np.arange(6, 9, dtype=np.float32).reshape((3)))
y = F.stack([x1, x2], axis=-1)
np.testing.assert_equal(
y.numpy(), np.array([[0, 6], [1, 7], [2, 8]]).astype(np.float32)
)
x1 = Tensor(np.random.rand(600))
x2 = F.broadcast_to(Tensor(np.array(3)), (600,))
y = F.stack([x2, x1], axis=0)
np.testing.assert_equal(y.numpy(), np.stack((x2.numpy(), x1.numpy()), axis=0))
y = F.stack([x2, x2], axis=0)
np.testing.assert_equal(y.numpy(), np.stack((x2.numpy(), x2.numpy()), axis=0))
@pytest.mark.parametrize("is_varnode", [True, False])
def test_split_basic(is_varnode):
if is_varnode:
network = Network()
saved_symbolic_shape = set_symbolic_shape(False)
else:
network = None
data = np.random.random((2, 3, 4, 5)).astype(np.float32)
inp = make_tensor(data, network)
mge_out0 = F.split(inp, 2, axis=3)
mge_out1 = F.split(inp, [3], axis=3)
np_out = np.split(data, [3, 5], axis=3)
assert len(mge_out0) == 2
assert len(mge_out1) == 2
np.testing.assert_equal(mge_out0[0].numpy(), np_out[0])
np.testing.assert_equal(mge_out1[0].numpy(), np_out[0])
np.testing.assert_equal(mge_out0[1].numpy(), np_out[1])
np.testing.assert_equal(mge_out1[1].numpy(), np_out[1])
try:
F.split(inp, 4)
assert False
except ValueError as e:
pass
try:
F.split(inp, [3, 2, 5], axis=3)
assert False
except ValueError as e:
assert str(e) == "Invalid nsplits_or_secions: [3, 2, 5]"
if is_varnode:
set_symbolic_shape(saved_symbolic_shape)
@pytest.mark.parametrize("symbolic", [None, False, True])
def test_split(symbolic):
x = Tensor(np.random.random((10, 20)), dtype=np.float32)
y = F.split(x, 3, axis=-1)
z = F.split(x, [6, 17], axis=-1)
assert str([i.numpy().shape for i in y]) == "[(10, 7), (10, 7), (10, 6)]"
assert str([i.numpy().shape for i in z]) == "[(10, 6), (10, 11), (10, 3)]"
inp1 = np.random.random((3, 4, 5, 6)).astype(np.float32)
inp2 = np.random.random((0, 4, 5, 6)).astype(np.float32)
def ref(inp, nsplits_or_sections, axis):
return np.split(inp, nsplits_or_sections, axis)
def func(inp, nsplits_or_sections, axis):
return F.split(inp, nsplits_or_sections, axis)
cases = [
(inp1, 2, 3),
(inp1, [3], 3),
(inp1, [3, 3, 5], 3),
(inp2, 2, 3),
(inp2, [3], 3),
(inp2, [3, 3, 5], 3),
]
for case in cases:
if symbolic is None:
fn = func
else:
fn = trace(symbolic=symbolic)(func)
for i in range(3 if symbolic is not None else 1):
ref_out = ref(*case)
out = fn(Tensor(case[0]), case[1], case[2])
assert len(ref_out) == len(out)
for idx in range(len(ref_out)):
np.testing.assert_equal(ref_out[idx], out[idx].numpy())
def test_gather():
x = Tensor([[1, 2], [3, 4], [5, 6],])
index = Tensor([[0, 1], [1, 0], [1, 1]])
y = F.gather(x, 1, index)
np.testing.assert_equal(
y.numpy(), np.array([[1, 2], [4, 3], [6, 6]]).astype(np.int32)
)
def test_scatter():
x = Tensor(np.zeros(shape=(3, 5), dtype=np.float32))
source = Tensor(
[
[0.9935, 0.9465, 0.2256, 0.8926, 0.4396],
[0.7723, 0.0718, 0.5939, 0.357, 0.4576],
]
)
index = Tensor([[0, 2, 0, 2, 1], [2, 0, 1, 1, 2]])
y = F.scatter(x, -2, index, source)
np.testing.assert_equal(
y.numpy().round(decimals=4),
np.array(
[
[0.9935, 0.0718, 0.2256, 0.0, 0.0],
[0.0, 0.0, 0.5939, 0.357, 0.4396],
[0.7723, 0.9465, 0.0, 0.8926, 0.4576],
]
).astype(np.float32),
)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_swapaxes(is_varnode):
if is_varnode:
network = Network()
else:
network = None
x = Tensor(np.array([[1, 2, 3]], dtype=np.int32))
y = F.swapaxes(x, 0, 1)
np.testing.assert_equal(y.numpy(), np.array([[1], [2], [3]]).astype(np.int32))
@pytest.mark.parametrize("is_varnode", [True, False])
def test_reshape(is_varnode):
if is_varnode:
network = Network()
else:
network = None
x = np.arange(6, dtype="float32")
xx = make_tensor(x, network)
y = x.reshape(1, 2, 3)
for shape in [
(1, 2, 3),
(1, -1, 3),
(1, make_tensor(-1, network), 3),
np.array([1, -1, 3], dtype="int32"),
make_tensor([1, -1, 3], network),
]:
yy = F.reshape(xx, shape)
np.testing.assert_equal(yy.numpy(), y)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_broadcast_auto_infer(is_varnode):
if is_varnode:
network = Network()
else:
network = None
x = np.random.random((1, 2, 3)).astype(np.float32)
xx = make_tensor(x, network)
for shape in [
(1, 2, 3),
(1, None, 3),
]:
yy = F.broadcast_to(xx, shape)
np.testing.assert_equal(yy.numpy(), x)
with pytest.raises(ValueError):
F.broadcast_to(xx, (1, -1, 3))
with pytest.raises(ValueError):
F.broadcast_to(xx, (None, 1, 2, 3))
F.broadcast_to(xx, (1, None, 2, 3))
t = make_tensor(2, network)
F.broadcast_to(xx, (t, None, 2, 3))
@pytest.mark.parametrize("is_trace", [True, False])
def test_reshape_on_empty_tensor(is_trace):
input1_shape = (100, 0, 1)
output1_shape = (100, 0, 10)
data1 = Tensor(np.random.random(input1_shape).astype(np.float32))
input2_shape = (10, 0)
output2_shape = (0,)
data2 = Tensor(np.random.random(input2_shape).astype(np.float32))
input3_shape = (10, 0, 10)
output3_shape = (0, 1, 2, 3)
data3 = Tensor(np.random.random(input3_shape).astype(np.float32))
def comp(out, target_shp):
assert out._tuple_shape == target_shp
def func(x, shp):
return F.reshape(x, shp)
cases = [
[data1, output1_shape],
[data2, output2_shape],
[data3, output3_shape],
]
def test(func, inp, comp, target_shp):
out = func(inp, target_shp)
comp(out, target_shp)
if is_trace:
for symbolic in [False, True]:
for inp, target_shp in cases:
func_traced = trace(symbolic=symbolic)(func)
test(func_traced, inp, comp, target_shp)
test(func_traced, inp, comp, target_shp)
test(func_traced, inp, comp, target_shp)
else:
for inp, target_shp in cases:
test(func, inp, comp, target_shp)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_reshape_shape_inference(is_varnode):
if is_varnode:
network = Network()
saved_symbolic_shape = set_symbolic_shape(False)
else:
network = None
x_shape_known = make_tensor([1, 2, 3, 4], network)
x_shape_unknown = F.broadcast_to(
make_tensor([1.0], network), shape=make_tensor([1, 1, 1, 1], network).sum()
)
tshp_unknown = astensor1d(
(make_tensor([2], network), make_tensor([2], network)), x_shape_known
)
tshp_known = astensor1d((2, 2), x_shape_known)
tshp_known_unspec = astensor1d((2, -1), x_shape_known)
def check_shape(output, target):
source = output.shape
if isinstance(source, Tensor):
source = source.numpy()
np.testing.assert_equal(source, target.shape)
def func(x, target_shape):
return x.reshape(target_shape)
cases = [
{"input": [x_shape_known, tshp_unknown], "output": [np.zeros((2, 2)),]},
{"input": [x_shape_unknown, tshp_unknown], "output": [np.zeros((2, 2)),]},
{"input": [x_shape_known, tshp_known], "output": [np.zeros((2, 2)),]},
{"input": [x_shape_known, tshp_known_unspec], "output": [np.zeros((2, 2)),]},
{"input": [x_shape_unknown, tshp_known], "output": [np.zeros((2, 2)),]},
{"input": [x_shape_unknown, tshp_known_unspec], "output": [np.zeros((2, 2)),]},
]
opr_test(cases, func, compare_fn=check_shape, test_trace=True, network=network)
if is_varnode:
set_symbolic_shape(saved_symbolic_shape)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_squeeze(is_varnode):
if is_varnode:
network = Network()
saved_symbolic_shape = set_symbolic_shape(False)
else:
network = None
x = Tensor(np.array([1, 2], dtype=np.int32).reshape(1, 1, 2, 1))
y = F.squeeze(x, -1)
np.testing.assert_equal(y.numpy(), np.array([[[1, 2]]]).astype(np.int32))
x = np.arange(6, dtype="float32").reshape(1, 2, 3, 1)
xx = make_tensor(x, network)
for axis in [None, 3, -4, (3, -4)]:
y = np.squeeze(x, axis)
yy = F.squeeze(xx, axis)
np.testing.assert_equal(y, yy.numpy())
if is_varnode:
set_symbolic_shape(saved_symbolic_shape)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_expand_dims(is_varnode):
if is_varnode:
network = Network()
else:
network = None
x = Tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
y = F.expand_dims(x, -1)
np.testing.assert_equal(
y.numpy(), np.array([[[1], [2], [3]], [[4], [5], [6]]]).astype(np.int32)
)
x = np.arange(6, dtype="float32").reshape(2, 3)
xx = make_tensor(x, network)
for axis in [2, -3, (3, -4), (1, -4)]:
y = np.expand_dims(x, axis)
yy = F.expand_dims(xx, axis)
np.testing.assert_equal(y, yy.numpy())
def test_expand_dims_for_scalar():
x = np.array(1, dtype="float32")
xx = make_tensor(x, None)
for axis in [0, -1, (0, 1), (-1, -2), (0, -1)]:
y = np.expand_dims(x, axis)
yy = F.expand_dims(xx, axis)
np.testing.assert_equal(y, yy.numpy())
for axis in [1, -2, (1, 2), (-2, -3)]:
np.testing.assert_raises(np.AxisError, np.expand_dims, x, axis)
np.testing.assert_raises(RuntimeError, F.expand_dims, xx, axis)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_elemwise_dtype_promotion(is_varnode):
if is_varnode:
network = Network()
else:
network = None
x = np.random.rand(2, 3).astype("float32")
y = np.random.rand(1, 3).astype("float16")
xx = make_tensor(x, network)
yy = make_tensor(y, network)
z = xx * yy
np.testing.assert_equal(z.numpy(), x * y)
z = xx + y
np.testing.assert_equal(z.numpy(), x + y)
z = x - yy
np.testing.assert_equal(z.numpy(), x - y)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_linspace(is_varnode):
if is_varnode:
network = Network()
else:
network = None
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
network=network,
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
network=network,
)
cases = [
{"input": [1, make_tensor(9, network), 9]},
{"input": [make_tensor(1, network), 9, make_tensor(9, network)]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(1, 9, 9, dtype=np.float32),
network=network,
)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_arange(is_varnode):
if is_varnode:
network = Network()
else:
network = None
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
network=network,
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
network=network,
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
network=network,
)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_round(is_varnode):
if is_varnode:
network = Network()
else:
network = None
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round, network=network)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_flatten(is_varnode):
if is_varnode:
network = Network()
else:
network = None
inp_shape = (2, 2, 3, 3)
x = Tensor(np.arange(36, dtype=np.int32).reshape(inp_shape),)
y = F.flatten(x, -2, -1)
np.testing.assert_equal(
y.numpy(),
np.array(
[
[[0, 1, 2, 3, 4, 5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15, 16, 17]],
[
[18, 19, 20, 21, 22, 23, 24, 25, 26],
[27, 28, 29, 30, 31, 32, 33, 34, 35],
],
]
).astype(np.int32),
)
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
cases = [
{"input": data0, "output": data0.flatten()},
{"input": data1, "output": data1.flatten()},
]
opr_test(cases, F.flatten, network=network)
cases = [
{"input": data0, "output": data0.reshape(2, -1)},
{"input": data1, "output": data1.reshape(4, -1)},
]
opr_test(cases, F.flatten, start_axis=1, network=network)
cases = [
{"input": data0, "output": data0.reshape(2, 3, -1)},
{"input": data1, "output": data1.reshape(4, 5, -1)},
]
opr_test(cases, F.flatten, start_axis=2, network=network)
cases = [
{"input": data0, "output": data0.reshape(2, -1, 5)},
{"input": data1, "output": data1.reshape(4, -1, 7)},
]
opr_test(
cases, F.flatten, start_axis=1, end_axis=2, network=network,
)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_broadcast(is_varnode):
if is_varnode:
network = Network()
else:
network = None
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 1)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
input3_shape = (10, 10)
output3_shape = (10, 10)
data3 = np.random.random(input3_shape).astype(np.float32)
cases = [
{
"input": [data1, output1_shape],
"output": np.broadcast_to(data1, output1_shape),
},
{
"input": [data2, output2_shape],
"output": np.broadcast_to(data2, output2_shape),
},
{
"input": [data3, output3_shape],
"output": np.broadcast_to(data3, output3_shape),
},
]
opr_test(cases, F.broadcast_to, network=network)
x = F.ones((2, 1, 3))
with pytest.raises(RuntimeError):
F.broadcast_to(x, (2, 3, 4))
with pytest.raises(RuntimeError):
F.broadcast_to(x, (4, 1, 3))
with pytest.raises(RuntimeError):
F.broadcast_to(x, (1, 3))
@pytest.mark.parametrize("is_trace", [True, False])
def test_broadcast_on_empty_tensor(is_trace):
input1_shape = (100, 0, 1)
output1_shape = (100, 0, 10)
data1 = Tensor(np.random.random(input1_shape).astype(np.float32))
input2_shape = (10, 0)
output2_shape = (10, 10, 0)
data2 = Tensor(np.random.random(input2_shape).astype(np.float32))
input3_shape = (0, 0, 1, 10)
output3_shape = (10, 0, 0, 10, 10)
data3 = Tensor(np.random.random(input3_shape).astype(np.float32))
def comp(out, target_shp):
assert out._tuple_shape == target_shp
def func(x, shp):
return F.broadcast_to(x, shp)
cases = [
[data1, output1_shape],
[data2, output2_shape],
[data3, output3_shape],
]
def test(func, inp, comp, target_shp):
out = func(inp, target_shp)
comp(out, target_shp)
if is_trace:
for symbolic in [False, True]:
for inp, target_shp in cases:
func_traced = trace(symbolic=symbolic)(func)
test(func_traced, inp, comp, target_shp)
test(func_traced, inp, comp, target_shp)
test(func_traced, inp, comp, target_shp)
else:
for inp, target_shp in cases:
test(func, inp, comp, target_shp)
@pytest.mark.parametrize(
"input_shape, target_shapes",
[
((3,), [(2, 1, 3), (1, 2, 3), (2, 2, 3)]),
((1, 3, 1), [(2, None, 3), (3, None, 3), (1, None, 1)]),
],
)
@pytest.mark.parametrize("is_symbolic", [True, False])
def test_broadcast_on_trace(is_symbolic, input_shape, target_shapes):
x = F.ones(input_shape)
@trace(symbolic=is_symbolic)
def broadcast(inp, shape):
return F.broadcast_to(inp, shape)
for target_shape in target_shapes:
if None in target_shape:
symbolic_target_shape = tuple(
map(lambda x: None if x is None else Tensor(x), target_shape)
)
output = broadcast(x, symbolic_target_shape)
for i in range(len(target_shape)):
if target_shape[i] is not None:
assert output._tuple_shape[i] == target_shape[i]
else:
assert (
output._tuple_shape[i] == x._tuple_shape[i - len(target_shape)]
)
else:
symbolic_target_shape = Tensor(target_shape)
output = broadcast(x, symbolic_target_shape)
assert output._tuple_shape == target_shape
@pytest.mark.parametrize("is_varnode", [True, False])
def test_utils_astensor1d(is_varnode):
if is_varnode:
network = Network()
else:
network = None
reference = make_tensor(0, network)
# literal
x = [1, 2, 3]
for dtype in [None, "float32"]:
xx = astensor1d(x, reference, dtype=dtype)
assert isinstance(xx, type(reference))
np.testing.assert_equal(xx.numpy(), x)
# numpy array
x = np.asarray([1, 2, 3], dtype="int32")
for dtype in [None, "float32"]:
xx = astensor1d(x, reference, dtype=dtype)
assert isinstance(xx, type(reference))
np.testing.assert_equal(xx.numpy(), x.astype(dtype) if dtype else x)
# tensor
x = make_tensor([1, 2, 3], network)
for dtype in [None, "float32"]:
xx = astensor1d(x, reference, dtype=dtype)
assert isinstance(xx, type(reference))
np.testing.assert_equal(xx.numpy(), x.numpy())
# mixed
x = [1, make_tensor(2, network), 3]
for dtype in [None, "float32"]:
xx = astensor1d(x, reference, dtype=dtype)
assert isinstance(xx, type(reference))
np.testing.assert_equal(xx.numpy(), [1, 2, 3])
# varnode
if is_varnode:
a = np.array([[1, 2, 3], [4, 5, 6]]).astype("float32")
b = np.array([[True, False, True], [False, True, True]])
aa = make_tensor(a, network)
bb = make_tensor(b, network)
x, y = F.cond_take(bb, aa)
for dtype in [None, "float32"]:
xx = astensor1d(x, reference, dtype=dtype)
assert isinstance(xx, type(reference))
np.testing.assert_equal(get_var_value(xx), get_var_value(x))
def test_device():
x = Tensor([1, 2, 3], dtype="float32")
y1 = F.eye(x.shape, dtype="float32")
y2 = F.eye(x.shape, dtype="float32", device=None)
np.testing.assert_almost_equal(y1.numpy(), y2.numpy())
y3 = F.eye(x.shape, dtype="float32", device="xpux")
y4 = F.eye(x.shape, dtype="float32", device=x.device)
np.testing.assert_almost_equal(y3.numpy(), y4.numpy())
y5 = F.full((3, 2), 4, device=x.device)
y6 = F.full((3, 2), 4, device="xpux")
np.testing.assert_almost_equal(y5.numpy(), y6.numpy())
@pytest.mark.parametrize("is_varnode", [True, False])
def test_identity(is_varnode):
if is_varnode:
network = Network()
else:
network = None
x = make_tensor(np.random.random((5, 10)).astype(np.float32), network)
y = F.copy(x)
np.testing.assert_equal(y.numpy(), x)
def copy_test(dst, src, network):
data = np.random.random((2, 3)).astype(np.float32)
x = make_tensor(data, device=src, network=network)
y = F.copy(x, dst)
assert np.allclose(data, y.numpy())
if network is None:
z = x.to(dst)
assert np.allclose(data, z.numpy())
@pytest.mark.require_ngpu(1)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_copy_h2d(is_varnode):
if is_varnode:
network = Network()
else:
network = None
copy_test("cpu0", "gpu0", network=network)
@pytest.mark.require_ngpu(1)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_copy_d2h(is_varnode):
if is_varnode:
network = Network()
else:
network = None
copy_test("gpu0", "cpu0", network=network)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_copy_d2d(is_varnode):
if is_varnode:
network = Network()
else:
network = None
copy_test("gpu0", "gpu1", network=network)
copy_test("gpu0:0", "gpu0:1", network=network)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape, device_src, device_dst",
[
((0,), "cpu0", "cpu0"),
((10, 0), "cpu0", "cpu1"),
((2, 0, 3), "cpu0", "gpu0"),
((1, 0, 1, 0), "gpu0", "cpu0"),
((2, 3, 4, 5, 0), "gpu0", "gpu1"),
],
)
@pytest.mark.parametrize("is_symbolic", [None, True, False])
def test_copy_empty(shape, device_src, device_dst, is_symbolic):
inp = Tensor(np.random.randn(*shape).astype("float32"), device=device_src)
def func(inp):
return F.copy(inp, device_dst)
if is_symbolic is not None:
func = trace(symbolic=is_symbolic)(func)
for _ in range(3):
out = func(inp)
assert out.numpy().shape == shape
assert out.device == device_dst
if is_symbolic is None:
break
@pytest.mark.parametrize(
"shape, repeats, axis",
[
((2,), 2, 0),
((2, 3, 4, 5), 3, 0),
((2, 3, 4, 5), 4, 3),
((2,), 2, None),
((2, 3, 4, 5), 3, None),
((), 1, None),
((), 10, None),
],
)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_repeat(shape, repeats, axis, is_varnode):
if is_varnode:
network = Network()
else:
network = None
def repeat_func(inp):
return F.repeat(inp=inp, repeats=repeats, axis=axis)
if shape != ():
cases = [
{"input": np.random.randn(*shape).astype("float32")},
]
else:
cases = [{"input": np.array(1.23)}]
opr_test(
cases,
repeat_func,
ref_fn=lambda inp: np.repeat(inp, repeats, axis),
network=network,
)
@pytest.mark.parametrize(
"shape, reps",
[
((2,), (2,)),
((2, 3, 4, 5), (1, 1, 1, 1)),
((2, 3, 4, 5), (1, 2, 3, 4)),
# FIXME: tile does not support ndim 7
# ((2, 3, 4, 5), (2, 2, 2, 2, 2, 2, 2)),
],
)
@pytest.mark.parametrize("is_varnode", [True])
def test_tile(shape, reps, is_varnode):
if is_varnode:
network = Network()
else:
network = None
def tile_func(inp):
return F.tile(inp=inp, reps=reps)
cases = [{"input": np.random.randn(*shape).astype("float32")}]
opr_test(cases, tile_func, ref_fn=lambda inp: np.tile(inp, reps), network=network)
@pytest.mark.parametrize(
"shape, shifts, axis",
[
((2, 3), 0, None),
((2, 3), 1, 0),
((2, 3), 100, 0),
((2, 3), -100, 0),
((2, 3, 4, 5), (-1, 1), (0, 1)),
((2, 3, 4, 5), (-2, 1, 2), (1, 2, 3)),
],
)
@pytest.mark.parametrize("is_varnode", [True, False])
def test_roll(shape, shifts, axis, is_varnode):
if is_varnode:
network = Network()
else:
network = None
x = Tensor([[1, 2], [3, 4], [5, 6]], np.int32)
y = F.roll(x, 1, -1)
np.testing.assert_equal(
y.numpy(), np.array([[2, 1], [4, 3], [6, 5]]).astype(np.int32)
)
inp = np.random.randn(*shape).astype("float32")
def func(inp):
return F.roll(inp, shifts, axis)
cases = [
{"input": inp},
]
opr_test(
cases, func, ref_fn=lambda inp: np.roll(inp, shifts, axis), network=network
)
@pytest.mark.parametrize(
"shape, shifts, axis", [((10, 0), 5, 1), ((10, 0), -10, 1),],
)
@pytest.mark.parametrize("is_symbolic", [None, True, False])
def test_roll_empty_tensor(shape, shifts, axis, is_symbolic):
inp = Tensor(np.random.randn(*shape).astype("float32"))
def func(inp):
return F.roll(inp, shifts, axis)
if is_symbolic is not None:
func = trace(symbolic=is_symbolic)(func)
out_ref = np.roll(inp.numpy(), shifts, axis)
for _ in range(3):
out = F.roll(inp, shifts, axis)
np.testing.assert_equal(out.numpy(), out_ref)
if is_symbolic is None:
break
def test_polar():
def polar(abs, angle):
return F.polar(abs, angle)
def numpy_polar(abs, angle):
return abs * np.cos(angle) + abs * np.sin(angle) * 1j
cases = [{"input": [np.random.random((2, 3, 4)), np.random.random((2, 3, 4))]}]
# complex can not be trace output
opr_test(cases, polar, ref_fn=numpy_polar, test_trace=False)
def test_create_complex():
real = Tensor(np.arange(0, 6).reshape((1, 2, 3)).astype("float32"))
imag = Tensor(np.arange(0, 6).reshape((1, 2, 3)).astype("float32"))
complex = create_complex(real, imag)
np.testing.assert_allclose(complex.numpy(), real.numpy() + imag.numpy() * 1j)
|
d55326a7676a41567a5e45b876039dd1dc120c60
|
cde096ba977b63becc1b9066677331ef4594a797
|
/csfieldguide/chapters/migrations/0006_chapter_number.py
|
24ac6360c07eb778fbe7cf436b2862fb7bba6ba3
|
[
"CC-BY-NC-SA-4.0",
"BSD-3-Clause",
"CC0-1.0",
"ISC",
"Unlicense",
"LicenseRef-scancode-secret-labs-2011",
"WTFPL",
"Apache-2.0",
"LGPL-3.0-only",
"MIT",
"CC-BY-SA-4.0",
"LicenseRef-scancode-public-domain",
"CC-BY-NC-2.5",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
uccser/cs-field-guide
|
655524b161fab0ab422679dd80720f660f2cfa98
|
ea3281ec6f4d17538f6d3cf6f88d74fa54581b34
|
refs/heads/develop
| 2023-08-28T14:33:58.789843
| 2023-08-28T08:24:03
| 2023-08-28T08:24:03
| 34,356,619
| 364
| 97
|
MIT
| 2023-09-14T08:58:55
| 2015-04-21T23:00:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 504
|
py
|
0006_chapter_number.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-01 01:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chapters', '0005_delete_interactive'),
]
operations = [
migrations.AddField(
model_name='chapter',
name='number',
field=models.SmallIntegerField(default=1, unique=True),
preserve_default=False,
),
]
|
efb71d90b0b9f4ca50189d04f3b49f6f438e2efe
|
fbd2702e8c45d5e6cec39877295ef45f2f61d426
|
/tests/test_expressions.py
|
698366f503eb9e7ed56010abb8005a196805889a
|
[
"MIT"
] |
permissive
|
pynamodb/PynamoDB
|
365961a5cd7b2d7e924f8edf367dde641d00914a
|
335c7cde6732c5121347207e60479d96e47338f6
|
refs/heads/master
| 2023-08-22T06:19:59.762796
| 2023-07-18T05:37:13
| 2023-07-18T05:37:13
| 16,058,979
| 1,987
| 412
|
MIT
| 2023-08-29T14:48:51
| 2014-01-20T02:18:35
|
Python
|
UTF-8
|
Python
| false
| false
| 30,876
|
py
|
test_expressions.py
|
from typing import Any
from typing import Dict
from unittest import TestCase
from pynamodb.attributes import ListAttribute, MapAttribute, NumberSetAttribute, UnicodeAttribute, UnicodeSetAttribute, \
NumberAttribute
from pynamodb.expressions.condition import Condition, size
from pynamodb.expressions.operand import Path, Value
from pynamodb.expressions.projection import create_projection_expression
from pynamodb.expressions.update import Action, Update
class PathTestCase(TestCase):
def test_document_path(self):
path = Path('foo.bar')
assert str(path) == 'foo.bar'
assert repr(path) == "Path(['foo', 'bar'])"
def test_attribute_name(self):
path = Path(['foo.bar'])
assert str(path) == "'foo.bar'"
assert repr(path) == "Path(['foo.bar'])"
def test_index_document_path(self):
path = Path('foo.bar')[0]
assert str(path) == 'foo.bar[0]'
assert repr(path) == "Path(['foo', 'bar[0]'])"
def test_index_attribute_name(self):
path = Path(['foo.bar'])[0]
assert str(path) == "'foo.bar'[0]"
assert repr(path) == "Path(['foo.bar[0]'])"
def test_index_map_attribute(self):
path = Path(['foo.bar'])['baz']
assert str(path) == "'foo.bar'.baz"
assert repr(path) == "Path(['foo.bar', 'baz'])"
def test_index_invalid(self):
with self.assertRaises(TypeError):
_ = Path('foo.bar')[0.0] # type: ignore
class ActionTestCase(TestCase):
def test_action(self):
action = Action(Path('foo.bar'))
action.format_string = '{0}'
assert repr(action) == 'foo.bar'
def test_action_eq(self):
action = Action(Path('foo.bar'))
assert action == action
action_eq = Action(Path('foo.bar'))
assert action == action_eq
action_not_eq = Action(Path('spam.ham'))
assert action != action_not_eq
attr_s = UnicodeAttribute(attr_name='foo')
assert attr_s.set('bar') == attr_s.set('bar')
assert attr_s.set('bar') != attr_s.set('baz')
attr_n = NumberAttribute(attr_name='num')
assert attr_n.add(42) == attr_n.add(42)
assert attr_n.add(42) != attr_n.set(42)
assert attr_n.add(42) != attr_n.add(7)
attr_s2 = UnicodeAttribute(attr_name='foo')
assert attr_s.set('bar') != attr_s2.set('bar')
class ProjectionExpressionTestCase(TestCase):
def setUp(self):
self.placeholders: Dict[str, str] = {}
def test_create_projection_expression(self):
attributes_to_get = ['Description', 'RelatedItems[0]', 'ProductReviews.FiveStar']
projection_expression = create_projection_expression(attributes_to_get, self.placeholders)
assert projection_expression == "#0, #1[0], #2.#3"
assert self.placeholders == {
'Description': '#0',
'RelatedItems': '#1',
'ProductReviews': '#2',
'FiveStar': '#3',
}
def test_create_projection_expression_repeated_names(self):
attributes_to_get = ['ProductReviews.FiveStar', 'ProductReviews.ThreeStar', 'ProductReviews.OneStar']
projection_expression = create_projection_expression(attributes_to_get, self.placeholders)
assert projection_expression == "#0.#1, #0.#2, #0.#3"
assert self.placeholders == {'ProductReviews': '#0', 'FiveStar': '#1', 'ThreeStar': '#2', 'OneStar': '#3'}
def test_create_projection_expression_invalid_attribute_raises(self):
invalid_attributes = ['', '[0]', 'foo[bar]', 'MyList[-1]', 'MyList[0.4]']
for attribute in invalid_attributes:
with self.assertRaises(ValueError):
create_projection_expression([attribute], {})
def test_create_project_expression_with_document_paths(self):
attributes_to_get = [Path('foo.bar')[0]]
projection_expression = create_projection_expression(attributes_to_get, self.placeholders)
assert projection_expression == "#0.#1[0]"
assert self.placeholders == {'foo': '#0', 'bar': '#1'}
def test_create_project_expression_with_attribute_names(self):
attributes_to_get = [Path(['foo.bar'])[0]]
projection_expression = create_projection_expression(attributes_to_get, self.placeholders)
assert projection_expression == "#0[0]"
assert self.placeholders == {'foo.bar': '#0'}
def test_create_projection_expression_with_attributes(self):
attributes_to_get = [
UnicodeAttribute(attr_name='ProductReviews.FiveStar'),
UnicodeAttribute(attr_name='ProductReviews.ThreeStar'),
UnicodeAttribute(attr_name='ProductReviews.OneStar')
]
projection_expression = create_projection_expression(attributes_to_get, self.placeholders)
assert projection_expression == "#0, #1, #2"
assert self.placeholders == {
'ProductReviews.FiveStar': '#0',
'ProductReviews.ThreeStar': '#1',
'ProductReviews.OneStar': '#2',
}
def test_create_projection_expression_not_a_list(self):
attributes_to_get = 'Description'
projection_expression = create_projection_expression(attributes_to_get, self.placeholders)
assert projection_expression == "#0"
assert self.placeholders == {'Description': '#0'}
class ConditionExpressionTestCase(TestCase):
def setUp(self):
self.attribute = UnicodeAttribute(attr_name='foo')
self.placeholder_names: Dict[str, str] = {}
self.expression_attribute_values: Dict[str, str] = {}
def test_condition_eq(self):
condition = self.attribute == 'foo'
condition_eq = self.attribute == 'foo'
condition_not_eq = self.attribute == 'bar'
assert condition == condition_eq
assert condition != condition_not_eq
different_attr = UnicodeAttribute(attr_name='foo')
condition_not_eq = different_attr == 'foo'
assert condition != condition_not_eq
def test_equal(self):
condition = self.attribute == 'bar'
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_not_equal(self):
condition = self.attribute != 'bar'
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 <> :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_less_than(self):
condition = self.attribute < 'bar'
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 < :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_less_than_or_equal(self):
condition = self.attribute <= 'bar'
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 <= :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_greater_than(self):
condition = self.attribute > 'bar'
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 > :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_greater_than_or_equal(self):
condition = self.attribute >= 'bar'
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 >= :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_between(self):
condition = self.attribute.between('bar', 'baz')
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 BETWEEN :0 AND :1"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}, ':1': {'S': 'baz'}}
def test_in(self):
condition = self.attribute.is_in('bar', 'baz')
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 IN (:0, :1)"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}, ':1': {'S': 'baz'}}
def test_exists(self):
condition = self.attribute.exists()
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "attribute_exists (#0)"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {}
def test_does_not_exist(self):
condition = self.attribute.does_not_exist()
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "attribute_not_exists (#0)"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {}
def test_is_type(self):
condition = self.attribute.is_type()
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "attribute_type (#0, :0)"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'S'}}
def test_begins_with(self):
condition = self.attribute.startswith('bar')
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "begins_with (#0, :0)"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_contains(self):
condition = self.attribute.contains('bar')
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "contains (#0, :0)"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_contains_string_set(self):
condition = UnicodeSetAttribute(attr_name='foo').contains('bar')
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "contains (#0, :0)"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_contains_number_set(self):
condition = NumberSetAttribute(attr_name='foo').contains(1)
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "contains (#0, :0)"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'N': '1'}}
def test_contains_list(self):
condition = ListAttribute(attr_name='foo').contains('bar')
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "contains (#0, :0)"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_contains_attribute(self):
condition = ListAttribute(attr_name='foo').contains(Path('bar'))
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "contains (#0, #1)"
assert self.placeholder_names == {'foo': '#0', 'bar': '#1'}
assert self.expression_attribute_values == {}
def test_size(self):
condition = size(self.attribute) == 3
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "size (#0) = :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'N': '3'}}
def test_sizes(self):
condition = size(self.attribute) == size(Path('bar'))
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "size (#0) = size (#1)"
assert self.placeholder_names == {'foo': '#0', 'bar': '#1'}
assert self.expression_attribute_values == {}
def test_and(self):
condition = (self.attribute < 'bar') & (self.attribute > 'baz')
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "(#0 < :0 AND #0 > :1)"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}, ':1': {'S': 'baz'}}
def test_invalid_and(self):
condition = self.attribute < 'bar'
with self.assertRaises(TypeError):
condition &= None
def test_rand(self):
condition = None
condition &= self.attribute < 'bar'
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 < :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_invalid_rand(self):
condition = 42
with self.assertRaises(TypeError):
condition &= self.attribute < 'bar'
def test_or(self):
condition = (self.attribute < 'bar') | (self.attribute > 'baz')
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "(#0 < :0 OR #0 > :1)"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}, ':1': {'S': 'baz'}}
def test_invalid_or(self):
condition = self.attribute < 'bar'
with self.assertRaises(TypeError):
condition |= None
def test_not(self):
condition = ~(self.attribute < 'bar')
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "(NOT #0 < :0)"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_compound_logic(self):
condition = (~(self.attribute < 'bar') & (self.attribute > 'baz')) | (self.attribute == 'foo')
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "(((NOT #0 < :0) AND #0 > :1) OR #0 = :2)"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}, ':1': {'S': 'baz'}, ':2': {'S': 'foo'}}
def test_indexing(self):
condition = ListAttribute(attr_name='foo')[0] == 'bar'
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0[0] = :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_invalid_indexing(self):
with self.assertRaises(TypeError):
_ = self.attribute[0]
def test_double_indexing(self):
condition = ListAttribute(attr_name='foo')[0][1] == 'bar'
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0[0][1] = :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_typed_list_indexing(self):
class StringMap(MapAttribute):
bar = UnicodeAttribute()
condition = ListAttribute(attr_name='foo', of=StringMap)[0].bar == 'baz' # type: ignore
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0[0].#1 = :0"
assert self.placeholder_names == {'foo': '#0', 'bar': '#1'}
assert self.expression_attribute_values == {':0': {'S': 'baz'}}
def test_map_comparison(self):
# Simulate initialization from inside an AttributeContainer
my_map_attribute = MapAttribute[str, str](attr_name='foo')
my_map_attribute._make_attribute()
my_map_attribute._update_attribute_paths(my_map_attribute.attr_name)
condition = my_map_attribute == MapAttribute(bar='baz')
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'M': {'bar': {'S': 'baz'}}}}
def test_map_comparison_rhs(self):
# Simulate initialization from inside an AttributeContainer
my_map_attribute = MapAttribute[str, str](attr_name='foo')
my_map_attribute._make_attribute()
my_map_attribute._update_attribute_paths(my_map_attribute.attr_name)
condition = MapAttribute(bar='baz') == my_map_attribute
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'M': {'bar': {'S': 'baz'}}}}
def test_list_comparison(self):
condition = ListAttribute(attr_name='foo') == ['bar', 'baz']
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'L': [{'S': 'bar'}, {'S': 'baz'}]}}
def test_dotted_attribute_name(self):
self.attribute.attr_name = 'foo.bar'
condition = self.attribute == 'baz'
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = :0"
assert self.placeholder_names == {'foo.bar': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'baz'}}
def test_map_attribute_indexing(self):
# Simulate initialization from inside an AttributeContainer
my_map_attribute = MapAttribute[str, str](attr_name='foo.bar')
my_map_attribute._make_attribute()
my_map_attribute._update_attribute_paths(my_map_attribute.attr_name)
condition: Condition = my_map_attribute['foo'] == 'baz' # type: ignore
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0.#1 = :0"
assert self.placeholder_names == {'foo.bar': '#0', 'foo': '#1'}
assert self.expression_attribute_values == {':0': {'S': 'baz'}}
def test_map_attribute_dereference(self):
class MyMapAttribute(MapAttribute):
nested_string = self.attribute
# Simulate initialization from inside an AttributeContainer
my_map_attribute = MyMapAttribute(attr_name='foo.bar')
my_map_attribute._make_attribute()
my_map_attribute._update_attribute_paths(my_map_attribute.attr_name)
condition: Condition = my_map_attribute.nested_string == 'baz' # type: ignore
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0.#1 = :0"
assert self.placeholder_names == {'foo.bar': '#0', 'foo': '#1'}
assert self.expression_attribute_values == {':0': {'S': 'baz'}}
def test_map_attribute_dereference_via_indexing(self):
class MyMapAttribute(MapAttribute):
nested_string = self.attribute
# Simulate initialization from inside an AttributeContainer
my_map_attribute = MyMapAttribute(attr_name='foo.bar')
my_map_attribute._make_attribute()
my_map_attribute._update_attribute_paths(my_map_attribute.attr_name)
condition = my_map_attribute['nested_string'] == 'baz'
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0.#1 = :0"
assert self.placeholder_names == {'foo.bar': '#0', 'foo': '#1'}
assert self.expression_attribute_values == {':0': {'S': 'baz'}}
def test_map_attribute_dereference_via_indexing_missing_attribute(self):
class MyMapAttribute(MapAttribute):
nested_string = self.attribute
# Simulate initialization from inside an AttributeContainer
my_map_attribute = MyMapAttribute(attr_name='foo.bar')
my_map_attribute._make_attribute()
my_map_attribute._update_attribute_paths(my_map_attribute.attr_name)
with self.assertRaises(AttributeError):
_ = my_map_attribute['missing_attribute'] == 'baz'
def test_attribute_comparison(self):
condition = self.attribute == UnicodeAttribute(attr_name='bar')
expression = condition.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = #1"
assert self.placeholder_names == {'foo': '#0', 'bar': '#1'}
assert self.expression_attribute_values == {}
class UpdateExpressionTestCase(TestCase):
def setUp(self):
self.attribute = UnicodeAttribute(attr_name='foo')
self.set_attribute = NumberSetAttribute(attr_name='foo_set')
self.list_attribute = ListAttribute[Any](attr_name='foo_list')
self.placeholder_names: Dict[str, str] = {}
self.expression_attribute_values: Dict[str, str] = {}
def test_set_action(self):
action = self.attribute.set('bar')
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'S': 'bar'}}
def test_set_action_as_remove(self):
action = self.set_attribute.set([])
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0"
assert self.placeholder_names == {'foo_set': '#0'}
assert self.expression_attribute_values == {}
def test_set_action_attribute_container(self):
# Simulate initialization from inside an AttributeContainer
my_map_attribute = MapAttribute[str, str](attr_name='foo')
my_map_attribute._make_attribute()
my_map_attribute._update_attribute_paths(my_map_attribute.attr_name)
action = my_map_attribute.set(MapAttribute(bar='baz'))
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'M': {'bar': {'S': 'baz'}}}}
def test_increment_action(self):
action = self.attribute.set(Path('bar') + 0)
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = #1 + :0"
assert self.placeholder_names == {'foo': '#0', 'bar': '#1'}
assert self.expression_attribute_values == {':0': {'N': '0'}}
def test_increment_action_value(self):
action = self.attribute.set(Value(0) + Path('bar'))
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = :0 + #1"
assert self.placeholder_names == {'foo': '#0', 'bar': '#1'}
assert self.expression_attribute_values == {':0': {'N': '0'}}
def test_decrement_action(self):
action = self.attribute.set(Path('bar') - 0)
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = #1 - :0"
assert self.placeholder_names == {'foo': '#0', 'bar': '#1'}
assert self.expression_attribute_values == {':0': {'N': '0'}}
def test_decrement_action_value(self):
action = self.attribute.set(Value(0) - Path('bar'))
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = :0 - #1"
assert self.placeholder_names == {'foo': '#0', 'bar': '#1'}
assert self.expression_attribute_values == {':0': {'N': '0'}}
def test_append_action(self):
action = self.attribute.set(Path('bar').append(['baz']))
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = list_append (#1, :0)"
assert self.placeholder_names == {'foo': '#0', 'bar': '#1'}
assert self.expression_attribute_values == {':0': {'L': [{'S': 'baz'}]}}
def test_prepend_action(self):
action = self.attribute.set(Path('bar').prepend(['baz']))
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = list_append (:0, #1)"
assert self.placeholder_names == {'foo': '#0', 'bar': '#1'}
assert self.expression_attribute_values == {':0': {'L': [{'S': 'baz'}]}}
def test_conditional_set_action(self):
action = self.attribute.set(Path('bar') | 'baz')
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 = if_not_exists (#1, :0)"
assert self.placeholder_names == {'foo': '#0', 'bar': '#1'}
assert self.expression_attribute_values == {':0': {'S': 'baz'}}
def test_remove_action(self):
action = self.attribute.remove()
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {}
def test_remove_action_list_element(self):
action = self.list_attribute[10].remove()
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0[10]"
assert self.placeholder_names == {'foo_list': '#0'}
assert self.expression_attribute_values == {}
def test_add_action(self):
action = Path('foo').add(0)
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'N': '0'}}
def test_add_action_set(self):
action = NumberSetAttribute(attr_name='foo').add(0, 1)
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'NS': ['0', '1']}}
def test_add_action_serialized(self):
action = NumberSetAttribute(attr_name='foo').add({'NS': ['0']})
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'NS': ['0']}}
def test_add_action_list(self):
with self.assertRaises(ValueError):
Path('foo').add({'L': [{'N': '0'}]})
def test_delete_action(self):
action = NumberSetAttribute(attr_name='foo').delete(0, 1)
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'NS': ['0', '1']}}
def test_delete_action_set(self):
action = NumberSetAttribute(attr_name='foo').delete({0, 1})
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'NS': ['0', '1']}}
def test_delete_action_serialized(self):
action = NumberSetAttribute(attr_name='foo').delete({'NS': ['0']})
expression = action.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "#0 :0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {':0': {'NS': ['0']}}
def test_delete_action_non_set(self):
with self.assertRaises(ValueError):
Path('foo').delete({'N': '0'})
def test_update(self):
update = Update(
self.attribute.set({'S': 'bar'}),
self.attribute.remove(),
self.set_attribute.add({'NS': ['0']}),
self.set_attribute.delete({'NS': ['1']})
)
expression = update.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "SET #0 = :0 REMOVE #0 ADD #1 :1 DELETE #1 :2"
assert self.placeholder_names == {'foo': '#0', 'foo_set': '#1'}
assert self.expression_attribute_values == {
':0': {'S': 'bar'},
':1': {'NS': ['0']},
':2': {'NS': ['1']}
}
def test_update_set_to_empty(self):
update = Update(
self.set_attribute.set([]),
)
expression = update.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "REMOVE #0"
assert self.placeholder_names == {'foo_set': '#0'}
assert self.expression_attribute_values == {}
def test_update_skips_empty_clauses(self):
update = Update(self.attribute.remove())
expression = update.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression == "REMOVE #0"
assert self.placeholder_names == {'foo': '#0'}
assert self.expression_attribute_values == {}
def test_update_empty(self):
update = Update()
expression = update.serialize(self.placeholder_names, self.expression_attribute_values)
assert expression is None
assert self.placeholder_names == {}
assert self.expression_attribute_values == {}
|
56f937ba3b0b0f2c6789f7b035c7ab06752ee842
|
d139ef8d18fcde584b06c1d7d25477d7d31ee59b
|
/google/ads/googleads/v14/resources/types/google_ads_field.py
|
4b6069782ab5ad0aac6903f1d4a25c337ed7bfda
|
[
"Apache-2.0"
] |
permissive
|
googleads/google-ads-python
|
a53993e6be057d3aa61f276b69e97b8b338d1c12
|
146d7070c1ea2140555d49d73c77892430b37314
|
refs/heads/main
| 2023-08-31T01:58:16.738997
| 2023-06-05T08:18:42
| 2023-08-28T19:08:38
| 143,435,091
| 422
| 525
|
Apache-2.0
| 2023-09-12T17:46:52
| 2018-08-03T14:08:04
|
Python
|
UTF-8
|
Python
| false
| false
| 6,179
|
py
|
google_ads_field.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import MutableSequence
import proto # type: ignore
from google.ads.googleads.v14.enums.types import google_ads_field_category
from google.ads.googleads.v14.enums.types import google_ads_field_data_type
__protobuf__ = proto.module(
package="google.ads.googleads.v14.resources",
marshal="google.ads.googleads.v14",
manifest={
"GoogleAdsField",
},
)
class GoogleAdsField(proto.Message):
r"""A field or resource (artifact) used by GoogleAdsService.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
resource_name (str):
Output only. The resource name of the artifact. Artifact
resource names have the form:
``googleAdsFields/{name}``
name (str):
Output only. The name of the artifact.
This field is a member of `oneof`_ ``_name``.
category (google.ads.googleads.v14.enums.types.GoogleAdsFieldCategoryEnum.GoogleAdsFieldCategory):
Output only. The category of the artifact.
selectable (bool):
Output only. Whether the artifact can be used
in a SELECT clause in search queries.
This field is a member of `oneof`_ ``_selectable``.
filterable (bool):
Output only. Whether the artifact can be used
in a WHERE clause in search queries.
This field is a member of `oneof`_ ``_filterable``.
sortable (bool):
Output only. Whether the artifact can be used
in a ORDER BY clause in search queries.
This field is a member of `oneof`_ ``_sortable``.
selectable_with (MutableSequence[str]):
Output only. The names of all resources,
segments, and metrics that are selectable with
the described artifact.
attribute_resources (MutableSequence[str]):
Output only. The names of all resources that
are selectable with the described artifact.
Fields from these resources do not segment
metrics when included in search queries.
This field is only set for artifacts whose
category is RESOURCE.
metrics (MutableSequence[str]):
Output only. This field lists the names of
all metrics that are selectable with the
described artifact when it is used in the FROM
clause. It is only set for artifacts whose
category is RESOURCE.
segments (MutableSequence[str]):
Output only. This field lists the names of
all artifacts, whether a segment or another
resource, that segment metrics when included in
search queries and when the described artifact
is used in the FROM clause. It is only set for
artifacts whose category is RESOURCE.
enum_values (MutableSequence[str]):
Output only. Values the artifact can assume
if it is a field of type ENUM.
This field is only set for artifacts of category
SEGMENT or ATTRIBUTE.
data_type (google.ads.googleads.v14.enums.types.GoogleAdsFieldDataTypeEnum.GoogleAdsFieldDataType):
Output only. This field determines the
operators that can be used with the artifact in
WHERE clauses.
type_url (str):
Output only. The URL of proto describing the
artifact's data type.
This field is a member of `oneof`_ ``_type_url``.
is_repeated (bool):
Output only. Whether the field artifact is
repeated.
This field is a member of `oneof`_ ``_is_repeated``.
"""
resource_name: str = proto.Field(
proto.STRING,
number=1,
)
name: str = proto.Field(
proto.STRING,
number=21,
optional=True,
)
category: google_ads_field_category.GoogleAdsFieldCategoryEnum.GoogleAdsFieldCategory = proto.Field(
proto.ENUM,
number=3,
enum=google_ads_field_category.GoogleAdsFieldCategoryEnum.GoogleAdsFieldCategory,
)
selectable: bool = proto.Field(
proto.BOOL,
number=22,
optional=True,
)
filterable: bool = proto.Field(
proto.BOOL,
number=23,
optional=True,
)
sortable: bool = proto.Field(
proto.BOOL,
number=24,
optional=True,
)
selectable_with: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=25,
)
attribute_resources: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=26,
)
metrics: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=27,
)
segments: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=28,
)
enum_values: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=29,
)
data_type: google_ads_field_data_type.GoogleAdsFieldDataTypeEnum.GoogleAdsFieldDataType = proto.Field(
proto.ENUM,
number=12,
enum=google_ads_field_data_type.GoogleAdsFieldDataTypeEnum.GoogleAdsFieldDataType,
)
type_url: str = proto.Field(
proto.STRING,
number=30,
optional=True,
)
is_repeated: bool = proto.Field(
proto.BOOL,
number=31,
optional=True,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
a3d80089ac2e850fa5304023e43ed71333948028
|
831c6ac1fa3253a5ef49b493ab211590d39e220e
|
/platforms/windows-64/SConscript
|
db11486675a8ace59bf23d2bd0a716e66fee53b4
|
[
"MIT",
"CC-BY-3.0"
] |
permissive
|
touilleMan/godot-python
|
a61bd686e929f62962b196243c0edf17b662271f
|
b9757da859a4d9fae86c330224881738d6b97392
|
refs/heads/master
| 2023-09-04T15:02:44.858077
| 2022-08-20T13:22:02
| 2022-08-20T13:22:02
| 69,164,674
| 1,766
| 168
|
NOASSERTION
| 2023-02-11T21:26:19
| 2016-09-25T13:03:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,313
|
SConscript
|
import zstandard
import tarfile
import json
import shutil
import subprocess
from pathlib import Path
Import("env")
cpython_build = Dir("cpython_build")
env["bits"] = "64"
env["godot_binary_download_platform"] = "win64.exe"
env["cpython_build"] = cpython_build
env["cpython_build_dir"] = cpython_build
env["DIST_SITE_PACKAGES"] = Dir(f"{env['DIST_PLATFORM']}/Lib/site-packages")
### Build config for pythonscript ###
# Cannot use CPPPATH&LIBPATH here given headers are within `cpython_build` target,
# so Scons consider the headers are a missing target
env.AppendUnique(CFLAGS=[f"-I{cpython_build.abspath}/include"])
env.AppendUnique(LINKFLAGS=[f"/LIBPATH:{cpython_build.abspath}/libs"])
env.AppendUnique(CYTHON_COMPILE_DEPS=[cpython_build])
### Fetch Python prebuild ###
CPYTHON_PREBUILD_URL = "https://github.com/indygreg/python-build-standalone/releases/download/20200830/cpython-3.8.5-x86_64-pc-windows-msvc-shared-pgo-20200830T2254.tar.zst"
cpython_prebuild_archive = env.Download(
target=File(CPYTHON_PREBUILD_URL.rsplit("/", 1)[1]), url=CPYTHON_PREBUILD_URL
)
env.NoClean(cpython_prebuild_archive)
### Extract prebuild ###
def extract_cpython_prebuild(target, source, env):
archive_path = source[0].abspath
target_path = target[0].abspath
with open(archive_path, "rb") as fh:
dctx = zstandard.ZstdDecompressor()
with dctx.stream_reader(fh) as reader:
with tarfile.open(mode="r|", fileobj=reader) as tf:
tf.extractall(target_path)
cpython_prebuild_src = env.Command(
Dir("cpython_prebuild"), cpython_prebuild_archive, extract_cpython_prebuild
)
env.NoClean(cpython_prebuild_src)
### Generate custom build from the prebuild ###
def generate_cpython_build(target, source, env):
build = Path(target[0].abspath)
prebuild = Path(source[0].abspath) / "python"
conf = json.loads((prebuild / "PYTHON.json").read_text())
assert conf["version"] == "5"
assert conf["libpython_link_mode"] == "shared"
assert conf["target_triple"] == "x86_64-pc-windows-msvc"
shutil.copytree(str(prebuild / "install"), str(build), symlinks=True)
shutil.copytree(str(prebuild / "licenses"), str(build / "licenses"), symlinks=True)
stdlib_path = build / "Lib"
# Remove tests lib (pretty big and basically useless)
shutil.rmtree(str(stdlib_path / "test"))
# Remove .pdb debug symbols
for pdbfile in (build / "DLLs").glob("*.pdb"):
pdbfile.unlink()
# Also remove __pycache__ & .pyc stuff
for pycache in stdlib_path.glob("**/__pycache__"):
shutil.rmtree(str(pycache))
# Make sure site-packages is empty to avoid including pip (ensurepip should be used instead)
shutil.rmtree(str(stdlib_path / "site-packages"))
# Zip the stdlib to save plenty of space \o/
if env["compressed_stdlib"]:
shutil.make_archive(base_name=build / "python38", format="zip", root_dir=str(stdlib_path))
shutil.rmtree(str(stdlib_path))
stdlib_path.mkdir()
# Oddly enough, os.py must be present (even if empty !) otherwise
# Python failed to find it home...
(stdlib_path / "os.py").touch()
(stdlib_path / "site-packages").mkdir()
env.Command(cpython_build, cpython_prebuild_src, generate_cpython_build)
env.NoClean(cpython_build)
|
|
e21dcdf5810279381f2617911ce2aabb071f802a
|
b5ce6908490cfb8e6a1e1cbe4745d675122ddce0
|
/questions/combination-sum-iv/Solution.py
|
05f19a9f1046663685a097106615a806a5e3cc67
|
[
"MIT"
] |
permissive
|
franklingu/leetcode-solutions
|
8895910f13208e1d8e604100d84c2dd35684cde4
|
7ad7e5c1c040510b7b7bd225ed4297054464dbc6
|
refs/heads/master
| 2023-01-09T01:34:08.097518
| 2023-01-02T02:05:35
| 2023-01-02T02:05:35
| 43,345,677
| 155
| 66
|
MIT
| 2020-10-02T03:41:36
| 2015-09-29T04:54:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
Solution.py
|
"""
Given an integer array with all positive numbers and no duplicates, find the number of possible combinations that add up to a positive integer target.
Example:
nums = [1, 2, 3]
target = 4
The possible combination ways are:
(1, 1, 1, 1)
(1, 1, 2)
(1, 2, 1)
(1, 3)
(2, 1, 1)
(2, 2)
(3, 1)
Note that different sequences are counted as different combinations.
Therefore the output is 7.
Follow up:
What if negative numbers are allowed in the given array?
How does it change the problem?
What limitation we need to add to the question to allow negative numbers?
Credits:
Special thanks to @pbrother for adding this problem and creating all test cases.
"""
class Solution:
def combinationSum4(self, nums: List[int], target: int) -> int:
def get_num_of_ways(nums, target, track):
if target < 0:
return 0
elif target == 0:
return 1
if target in track:
return track[target]
ss = 0
for n in nums:
ss += get_num_of_ways(nums, target - n, track)
track[target] = ss
return ss
if target == 0:
return 0
track = {}
return get_num_of_ways(nums, target, track)
|
c4231224fbe39e55ae5f4d9c14f5a99dce586e43
|
4d6935a26f211987f54b980fe174971e4a1366e8
|
/ddparser/run.py
|
d6f00f4ba03df25a047c4d9eb3f37d25a915d197
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
baidu/DDParser
|
17e6d7c653172a93a30caa5aa52c01264067d41a
|
144c09bd058cea53810a45789812aa50aa0f711c
|
refs/heads/master
| 2023-08-29T00:02:21.543273
| 2023-02-05T03:05:09
| 2023-02-05T03:05:57
| 284,605,403
| 946
| 165
|
Apache-2.0
| 2022-10-29T16:12:08
| 2020-08-03T04:45:58
|
Python
|
UTF-8
|
Python
| false
| false
| 20,002
|
py
|
run.py
|
# -*- coding: UTF-8 -*-
################################################################################
#
# Copyright (c) 2020 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import sys
import os
import datetime
import logging
import math
import six
import paddle
from six.moves import input
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
try:
reload(sys)
sys.setdefaultencoding('utf8')
except:
pass
import LAC
import numpy as np
import paddle.distributed as dist
from paddle import fluid
from paddle.fluid import dygraph
from paddle.fluid import layers
from ddparser.ernie.optimization import AdamW
from ddparser.ernie.optimization import LinearDecay
from ddparser.parser import epoch_train
from ddparser.parser import epoch_evaluate
from ddparser.parser import epoch_predict
from ddparser.parser import save
from ddparser.parser import load
from ddparser.parser import decode
from ddparser.parser import ArgConfig
from ddparser.parser import Environment
from ddparser.parser import Model
from ddparser.parser.data_struct import Corpus
from ddparser.parser.data_struct import TextDataset
from ddparser.parser.data_struct import batchify
from ddparser.parser.data_struct import Field
from ddparser.parser.data_struct import utils
from ddparser.parser.data_struct import Metric
"""
程序入口,定义了训练,评估,预测等函数
"""
def train(env):
"""Train"""
args = env.args
logging.info("loading data.")
train = Corpus.load(args.train_data_path, env.fields)
dev = Corpus.load(args.valid_data_path, env.fields)
test = Corpus.load(args.test_data_path, env.fields)
logging.info("init dataset.")
train = TextDataset(train, env.fields, args.buckets)
dev = TextDataset(dev, env.fields, args.buckets)
test = TextDataset(test, env.fields, args.buckets)
logging.info("set the data loaders.")
train.loader = batchify(train, args.batch_size, args.use_data_parallel, True)
dev.loader = batchify(dev, args.batch_size)
test.loader = batchify(test, args.batch_size)
logging.info("{:6} {:5} sentences, ".format('train:', len(train)) + "{:3} batches, ".format(len(train.loader)) +
"{} buckets".format(len(train.buckets)))
logging.info("{:6} {:5} sentences, ".format('dev:', len(dev)) + "{:3} batches, ".format(len(dev.loader)) +
"{} buckets".format(len(dev.buckets)))
logging.info("{:6} {:5} sentences, ".format('test:', len(test)) + "{:3} batches, ".format(len(test.loader)) +
"{} buckets".format(len(test.buckets)))
logging.info("Create the model")
model = Model(args)
# init parallel strategy
if args.use_data_parallel:
dist.init_parallel_env()
model = paddle.DataParallel(model)
if args.encoding_model.startswith(
"ernie") and args.encoding_model != "ernie-lstm" or args.encoding_model == 'transformer':
args['lr'] = args.ernie_lr
else:
args['lr'] = args.lstm_lr
if args.encoding_model.startswith("ernie") and args.encoding_model != "ernie-lstm":
max_steps = 100 * len(train.loader)
decay = LinearDecay(args.lr, int(args.warmup_proportion * max_steps), max_steps)
else:
decay = dygraph.ExponentialDecay(learning_rate=args.lr, decay_steps=args.decay_steps, decay_rate=args.decay)
grad_clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=args.clip)
if args.encoding_model.startswith("ernie") and args.encoding_model != "ernie-lstm":
optimizer = AdamW(
learning_rate=decay,
parameter_list=model.parameters(),
weight_decay=args.weight_decay,
grad_clip=grad_clip,
)
else:
optimizer = fluid.optimizer.AdamOptimizer(
learning_rate=decay,
beta1=args.mu,
beta2=args.nu,
epsilon=args.epsilon,
parameter_list=model.parameters(),
grad_clip=grad_clip,
)
total_time = datetime.timedelta()
best_e, best_metric = 1, Metric()
puncts = dygraph.to_variable(env.puncts, zero_copy=False)
logging.info("start training.")
for epoch in range(1, args.epochs + 1):
start = datetime.datetime.now()
# train one epoch and update the parameter
logging.info("Epoch {} / {}:".format(epoch, args.epochs))
epoch_train(args, model, optimizer, train.loader, epoch)
if args.local_rank == 0:
loss, dev_metric = epoch_evaluate(args, model, dev.loader, puncts)
logging.info("{:6} Loss: {:.4f} {}".format('dev:', loss, dev_metric))
loss, test_metric = epoch_evaluate(args, model, test.loader, puncts)
logging.info("{:6} Loss: {:.4f} {}".format('test:', loss, test_metric))
t = datetime.datetime.now() - start
# save the model if it is the best so far
if dev_metric > best_metric and epoch > args.patience // 10:
best_e, best_metric = epoch, dev_metric
save(args.model_path, args, model, optimizer)
logging.info("{}s elapsed (saved)\n".format(t))
else:
logging.info("{}s elapsed\n".format(t))
total_time += t
if epoch - best_e >= args.patience:
break
if args.local_rank == 0:
model = load(args.model_path, model)
loss, metric = epoch_evaluate(args, model, test.loader, puncts)
logging.info("max score of dev is {:.2%} at epoch {}".format(best_metric.score, best_e))
logging.info("the score of test at epoch {} is {:.2%}".format(best_e, metric.score))
logging.info("average time of each epoch is {}s".format(total_time / epoch))
logging.info("{}s elapsed".format(total_time))
def evaluate(env):
"""Evaluate"""
args = env.args
puncts = dygraph.to_variable(env.puncts, zero_copy=False)
logging.info("Load the dataset")
evaluates = Corpus.load(args.test_data_path, env.fields)
dataset = TextDataset(evaluates, env.fields, args.buckets)
# set the data loader
dataset.loader = batchify(dataset, args.batch_size)
logging.info("{} sentences, ".format(len(dataset)) + "{} batches, ".format(len(dataset.loader)) +
"{} buckets".format(len(dataset.buckets)))
logging.info("Load the model")
model = load(args.model_path)
logging.info("Evaluate the dataset")
start = datetime.datetime.now()
loss, metric = epoch_evaluate(args, model, dataset.loader, puncts)
total_time = datetime.datetime.now() - start
logging.info("Loss: {:.4f} {}".format(loss, metric))
logging.info("{}s elapsed, {:.2f} Sents/s".format(total_time, len(dataset) / total_time.total_seconds()))
def predict(env):
"""Predict"""
args = env.args
logging.info("Load the dataset")
if args.prob:
env.fields = env.fields._replace(PHEAD=Field("prob"))
predicts = Corpus.load(args.infer_data_path, env.fields)
dataset = TextDataset(predicts, [env.WORD, env.FEAT], args.buckets)
# set the data loader
dataset.loader = batchify(dataset, args.batch_size)
logging.info("{} sentences, {} batches".format(len(dataset), len(dataset.loader)))
logging.info("Load the model")
model = load(args.model_path)
model.args = args
logging.info("Make predictions on the dataset")
start = datetime.datetime.now()
model.eval()
pred_arcs, pred_rels, pred_probs = epoch_predict(env, args, model, dataset.loader)
total_time = datetime.datetime.now() - start
# restore the order of sentences in the buckets
indices = np.argsort(np.array([i for bucket in dataset.buckets.values() for i in bucket]))
predicts.head = [pred_arcs[i] for i in indices]
predicts.deprel = [pred_rels[i] for i in indices]
if args.prob:
predicts.prob = [pred_probs[i] for i in indices]
logging.info("Save the predicted result to {}".format(args.infer_result_path))
predicts.save(args.infer_result_path)
logging.info("{}s elapsed, {:.2f} Sents/s".format(total_time, len(dataset) / total_time.total_seconds()))
def predict_query(env):
"""Predict one query"""
args = env.args
logging.info("Load the model")
model = load(args.model_path)
model.eval()
lac_mode = "seg" if args.feat != "pos" else "lac"
lac = LAC.LAC(mode=lac_mode)
if args.prob:
env.fields = env.fields._replace(PHEAD=Field("prob"))
while True:
query = input()
if isinstance(query, six.text_type):
pass
else:
query = query.decode("utf-8")
if not query:
logging.info("quit!")
return
if len(query) > 200:
logging.info("The length of the query should be less than 200!")
continue
start = datetime.datetime.now()
lac_results = lac.run([query])
predicts = Corpus.load_lac_results(lac_results, env.fields)
dataset = TextDataset(predicts, [env.WORD, env.FEAT])
# set the data loader
dataset.loader = batchify(dataset, args.batch_size, use_multiprocess=False, sequential_sampler=True)
pred_arcs, pred_rels, pred_probs = epoch_predict(env, args, model, dataset.loader)
predicts.head = pred_arcs
predicts.deprel = pred_rels
if args.prob:
predicts.prob = pred_probs
predicts._print()
total_time = datetime.datetime.now() - start
logging.info("{}s elapsed, {:.2f} Sents/s, {:.2f} ms/Sents".format(
total_time,
len(dataset) / total_time.total_seconds(),
total_time.total_seconds() / len(dataset) * 1000))
class DDParser(object):
"""
DDParser
Args:
use_cuda: BOOL, 是否使用gpu
tree: BOOL, 是否返回树结构
prob: BOOL, 是否返回弧的概率
use_pos: BOOL, 是否返回词性标签(仅parse函数生效)
model_files_path: str, 模型地址, 为None时下载默认模型
buckets: BOOL, 是否对样本分桶. 若buckets=True,则会对inputs按长度分桶,处理长度不均匀的输入速度更新快,default=False
batch_size: INT, 批尺寸, 当buckets为False时,每个batch大小均等于batch_size; 当buckets为True时,每个batch的大小约为'batch_size / 当前桶句子的平均长度'。
当default=None时,分桶batch_size默认等于1000,不分桶默认等于50。
encoding_model:指定模型,可以选lstm、transformer、ernie-1.0、ernie-tiny等
"""
def __init__(
self,
use_cuda=False,
tree=True,
prob=False,
use_pos=False,
model_files_path=None,
buckets=False,
batch_size=None,
encoding_model="ernie-lstm",
):
if model_files_path is None:
if encoding_model in ["lstm", "transformer", "ernie-1.0", "ernie-tiny", "ernie-lstm"]:
model_files_path = self._get_abs_path(os.path.join("./model_files/", encoding_model))
else:
raise KeyError("Unknown encoding model.")
if not os.path.exists(model_files_path):
try:
utils.download_model_from_url(model_files_path, encoding_model)
except Exception as e:
logging.error("Failed to download model, please try again")
logging.error("error: {}".format(e))
raise e
args = [
"--model_files={}".format(model_files_path), "--config_path={}".format(self._get_abs_path('config.ini')),
"--encoding_model={}".format(encoding_model)
]
if use_cuda:
args.append("--use_cuda")
if tree:
args.append("--tree")
if prob:
args.append("--prob")
if batch_size:
args.append("--batch_size={}".format(batch_size))
args = ArgConfig(args)
# Don't instantiate the log handle
args.log_path = None
self.env = Environment(args)
self.args = self.env.args
paddle.set_device(self.env.place)
self.model = load(self.args.model_path)
self.model.eval()
self.lac = None
self.use_pos = use_pos
# buckets=None if not buckets else defaults
if not buckets:
self.args.buckets = None
if args.prob:
self.env.fields = self.env.fields._replace(PHEAD=Field("prob"))
if self.use_pos:
self.env.fields = self.env.fields._replace(CPOS=Field("postag"))
# set default batch size if batch_size is None and not buckets
if batch_size is None and not buckets:
self.args.batch_size = 50
def parse(self, inputs):
"""
预测未切词的句子。
Args:
x: list(str) | str, 未分词的句子,类型为str或list
Returns:
outputs: list, 依存分析结果
Example:
>>> ddp = DDParser()
>>> inputs = "百度是一家高科技公司"
>>> ddp.parse(inputs)
[{'word': ['百度', '是', '一家', '高科技', '公司'], 'head': [2, 0, 5, 5, 2], 'deprel': ['SBV', 'HED', 'ATT', 'ATT', 'VOB']}]
>>> inputs = ["百度是一家高科技公司", "他送了一本书"]
>>> ddp.parse(inputs)
[{'word': ['百度', '是', '一家', '高科技', '公司'], 'head': [2, 0, 5, 5, 2], 'deprel': ['SBV', 'HED', 'ATT', 'ATT', 'VOB']},
{'word': ['他', '送', '了', '一本', '书'], 'head': [2, 0, 2, 5, 2], 'deprel': ['SBV', 'HED', 'MT', 'ATT', 'VOB']}]
>>> ddp = DDParser(prob=True, use_pos=True)
>>> inputs = "百度是一家高科技公司"
>>> ddp.parse(inputs)
[{'word': ['百度', '是', '一家', '高科技', '公司'], 'postag': ['ORG', 'v', 'm', 'n', 'n'],
'head': [2, 0, 5, 5, 2], 'deprel': ['SBV', 'HED', 'ATT', 'ATT', 'VOB'], 'prob': [1.0, 1.0, 1.0, 1.0, 1.0]}]
"""
if not self.lac:
self.lac = LAC.LAC(mode="lac" if self.use_pos else "seg", use_cuda=self.args.use_cuda)
if not inputs:
return
if isinstance(inputs, six.string_types):
inputs = [inputs]
if all([isinstance(i, six.string_types) and i for i in inputs]):
lac_results = []
position = 0
try:
inputs = [query if isinstance(query, six.text_type) else query.decode("utf-8") for query in inputs]
except UnicodeDecodeError:
logging.warning("encoding only supports UTF-8!")
return
while position < len(inputs):
lac_results += self.lac.run(inputs[position:position + self.args.batch_size])
position += self.args.batch_size
predicts = Corpus.load_lac_results(lac_results, self.env.fields)
else:
logging.warning("please check the foramt of your inputs.")
return
dataset = TextDataset(predicts, [self.env.WORD, self.env.FEAT], self.args.buckets)
# set the data loader
dataset.loader = batchify(
dataset,
self.args.batch_size,
use_multiprocess=False,
sequential_sampler=True if not self.args.buckets else False,
)
pred_arcs, pred_rels, pred_probs = epoch_predict(self.env, self.args, self.model, dataset.loader)
if self.args.buckets:
indices = np.argsort(np.array([i for bucket in dataset.buckets.values() for i in bucket]))
else:
indices = range(len(pred_arcs))
predicts.head = [pred_arcs[i] for i in indices]
predicts.deprel = [pred_rels[i] for i in indices]
if self.args.prob:
predicts.prob = [pred_probs[i] for i in indices]
outputs = predicts.get_result()
return outputs
def parse_seg(self, inputs):
"""
预测已切词的句子。
Args:
x: list(list(str)), 已分词的句子,类型为list
Returns:
outputs: list, 依存分析结果
Example:
>>> ddp = DDParser()
>>> inputs = [['百度', '是', '一家', '高科技', '公司'], ['他', '送', '了', '一本', '书']]
>>> ddp.parse_seg(inputs)
[{'word': ['百度', '是', '一家', '高科技', '公司'], 'head': [2, 0, 5, 5, 2], 'deprel': ['SBV', 'HED', 'ATT', 'ATT', 'VOB']},
{'word': ['他', '送', '了', '一本', '书'], 'head': [2, 0, 2, 5, 2], 'deprel': ['SBV', 'HED', 'MT', 'ATT', 'VOB']}]
>>> ddp = DDParser(prob=True)
>>> inputs = [['百度', '是', '一家', '高科技', '公司']]
>>> ddp.parse_seg(inputs)
[{'word': ['百度', '是', '一家', '高科技', '公司'], 'head': [2, 0, 5, 5, 2],
'deprel': ['SBV', 'HED', 'ATT', 'ATT', 'VOB'], 'prob': [1.0, 1.0, 1.0, 1.0, 1.0]}]
"""
if not inputs:
return
if all([isinstance(i, list) and i and all(i) for i in inputs]):
predicts = Corpus.load_word_segments(inputs, self.env.fields)
else:
logging.warning("please check the foramt of your inputs.")
return
dataset = TextDataset(predicts, [self.env.WORD, self.env.FEAT], self.args.buckets)
# set the data loader
dataset.loader = batchify(
dataset,
self.args.batch_size,
use_multiprocess=False,
sequential_sampler=True if not self.args.buckets else False,
)
pred_arcs, pred_rels, pred_probs = epoch_predict(self.env, self.args, self.model, dataset.loader)
if self.args.buckets:
indices = np.argsort(np.array([i for bucket in dataset.buckets.values() for i in bucket]))
else:
indices = range(len(pred_arcs))
predicts.head = [pred_arcs[i] for i in indices]
predicts.deprel = [pred_rels[i] for i in indices]
if self.args.prob:
predicts.prob = [pred_probs[i] for i in indices]
outputs = predicts.get_result()
if outputs[0].get("postag", None):
for output in outputs:
del output["postag"]
return outputs
def _get_abs_path(self, path):
return os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), path))
if __name__ == "__main__":
logging.info("init arguments.")
args = ArgConfig()
logging.info("init environment.")
env = Environment(args)
logging.info("Override the default configs\n{}".format(env.args))
logging.info("{}\n{}\n{}\n{}".format(env.WORD, env.FEAT, env.ARC, env.REL))
logging.info("Set the max num of threads to {}".format(env.args.threads))
logging.info("Set the seed for generating random numbers to {}".format(env.args.seed))
logging.info("Run the subcommand in mode {}".format(env.args.mode))
paddle.set_device(env.place)
mode = env.args.mode
if mode == "train":
train(env)
elif mode == "evaluate":
evaluate(env)
elif mode == "predict":
predict(env)
elif mode == "predict_q":
predict_query(env)
else:
logging.error("Unknown task mode: {}.".format(mode))
|
cd570a27b8dc15900facf858ac100b50adc2f260
|
448dccb21fdd5be407d7202fefecb00a1b5ef65d
|
/cadquery/vis.py
|
401ef94c3d6293d0fd3ac332443d43ec1342cf8a
|
[
"Apache-2.0"
] |
permissive
|
CadQuery/cadquery
|
6be365a29713f8ddef15da142e85910d2f965313
|
e8d74474ca4869d014cba6e1f835cdabad86bf88
|
refs/heads/master
| 2023-08-28T06:23:43.229880
| 2023-08-21T16:54:37
| 2023-08-21T16:54:37
| 155,099,400
| 2,345
| 272
|
NOASSERTION
| 2023-09-09T21:21:52
| 2018-10-28T17:57:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,888
|
py
|
vis.py
|
from . import Shape, Workplane, Assembly, Sketch, Compound, Color
from .occ_impl.exporters.assembly import _vtkRenderWindow
from .occ_impl.jupyter_tools import DEFAULT_COLOR
from typing import Union
from OCP.TopoDS import TopoDS_Shape
from vtkmodules.vtkInteractionWidgets import vtkOrientationMarkerWidget
from vtkmodules.vtkRenderingAnnotation import vtkAxesActor
from vtkmodules.vtkInteractionStyle import vtkInteractorStyleTrackballCamera
from vtkmodules.vtkRenderingCore import vtkMapper, vtkRenderWindowInteractor
def _to_assy(*objs: Union[Shape, Workplane, Assembly, Sketch]) -> Assembly:
assy = Assembly(color=Color(*DEFAULT_COLOR))
for obj in objs:
if isinstance(obj, (Shape, Workplane, Assembly)):
assy.add(obj)
elif isinstance(obj, Sketch):
assy.add(obj._faces)
assy.add(Compound.makeCompound(obj._edges))
assy.add(Compound.makeCompound(obj._wires))
elif isinstance(obj, TopoDS_Shape):
assy.add(Shape(obj))
else:
raise ValueError(f"{obj} has unsupported type {type(obj)}")
return assy
def show(*objs: Union[Shape, Workplane, Assembly, Sketch]):
"""
Show CQ objects using VTK
"""
# construct the assy
assy = _to_assy(*objs)
# create a VTK window
win = _vtkRenderWindow(assy)
win.SetWindowName("CQ viewer")
# rendering related settings
win.SetMultiSamples(16)
vtkMapper.SetResolveCoincidentTopologyToPolygonOffset()
vtkMapper.SetResolveCoincidentTopologyPolygonOffsetParameters(1, 0)
vtkMapper.SetResolveCoincidentTopologyLineOffsetParameters(-1, 0)
# create a VTK interactor
inter = vtkRenderWindowInteractor()
inter.SetInteractorStyle(vtkInteractorStyleTrackballCamera())
inter.SetRenderWindow(win)
# construct an axes indicator
axes = vtkAxesActor()
axes.SetDragable(0)
tp = axes.GetXAxisCaptionActor2D().GetCaptionTextProperty()
tp.SetColor(0, 0, 0)
axes.GetYAxisCaptionActor2D().GetCaptionTextProperty().ShallowCopy(tp)
axes.GetZAxisCaptionActor2D().GetCaptionTextProperty().ShallowCopy(tp)
# add to an orientation widget
orient_widget = vtkOrientationMarkerWidget()
orient_widget.SetOrientationMarker(axes)
orient_widget.SetViewport(0.9, 0.0, 1.0, 0.2)
orient_widget.SetZoom(1.1)
orient_widget.SetInteractor(inter)
orient_widget.EnabledOn()
orient_widget.InteractiveOff()
# use gradient background
renderer = win.GetRenderers().GetFirstRenderer()
renderer.GradientBackgroundOn()
# set size and camera
win.SetSize(*win.GetScreenSize())
win.SetPosition(-10, 0)
camera = renderer.GetActiveCamera()
camera.Roll(-35)
camera.Elevation(-45)
renderer.ResetCamera()
# show and return
inter.Initialize()
win.Render()
inter.Start()
# alias
show_object = show
|
35abbfde518ad23d420f268846a90c3b89503438
|
0c8ac66ae050e1a98dd8afd7525c9ed74ec5d300
|
/config/settings/base.py
|
fda579e9752e4dc387901f87cb4b33541da6b38d
|
[] |
no_license
|
TareqMonwer/Django-School-Management
|
5b1c8145d04082063bc14fc9db1ce38b4db97a9d
|
3d425d300a77ad505089a3a4c0a9dc71cacbe89a
|
refs/heads/master
| 2023-08-19T23:36:34.359488
| 2023-08-13T05:53:42
| 2023-08-13T05:53:42
| 221,053,244
| 409
| 163
| null | 2023-08-13T05:53:44
| 2019-11-11T19:22:28
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 9,517
|
py
|
base.py
|
import os
from pathlib import Path
import environ
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from django.core.exceptions import ImproperlyConfigured
from django.contrib.messages import constants as messages
from utilities.constants import settings_message_constants
######################## Django Core & Custom Configs ########################
##############################################################################
BASE_DIR = Path(__file__).parent.parent.parent
env = environ.Env(
# set casting, default value
DEBUG=(bool, True),
USE_CELERY_REDIS=(bool, False),
USE_PAYMENT_OPTIONS=(bool, True),
USE_SENTRY=(bool, False),
USE_MAILCHIMP=(bool, False),
SSL_ISSANDBOX=(bool, True),
)
# reading .env file
env.read_env(str(BASE_DIR / "envs/.env"))
SECRET_KEY = env('SECRET_KEY')
DEBUG = env('DEBUG')
try:
DJANGO_ADMIN_URL = env('DJANGO_ADMIN_URL')
except ImproperlyConfigured:
DJANGO_ADMIN_URL = 'admin'
DEFAULT_APPS = [
'django_school_management.accounts.apps.AccountsConfig', # must be on top
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# allauth required
'django.contrib.sites',
]
LOCAL_APPS = [
'django_school_management.students.apps.StudentsConfig',
'django_school_management.teachers.apps.TeachersConfig',
'django_school_management.result.apps.ResultConfig',
'django_school_management.academics.apps.AcademicsConfig',
'django_school_management.pages.apps.PagesConfig',
'django_school_management.articles.apps.ArticlesConfig',
'django_school_management.institute.apps.InstituteConfig',
'django_school_management.payments.apps.PaymentsConfig',
'django_school_management.notices.apps.NoticesConfig',
]
# third party apps
THIRD_PARTY_APPS = [
'rest_framework',
'corsheaders',
'crispy_forms',
'crispy_bootstrap4',
'rolepermissions',
'taggit',
'django_extensions',
'django_filters',
'allauth',
'allauth.account',
'allauth.socialaccount',
'ckeditor',
'ckeditor_uploader',
'mptt',
'widget_tweaks',
'django_social_share',
'django_countries',
'import_export',
# 'admin_honeypot', # admin_honeypot doesn't support Django 4
'django_tables2',
'bootstrap4',
'django_file_form',
'tinymce',
]
INSTALLED_APPS = DEFAULT_APPS + LOCAL_APPS + THIRD_PARTY_APPS
SITE_ID = 1
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# attach_institute_data_ctx_processor was implemented for same support.
# 'institute.middleware.AttachInstituteDataMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(BASE_DIR / 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# ctx processeor to attach institute data in templates
'context_processors.dj_sms_context_processor.attach_institute_data_ctx_processor',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
DATABASES = {
'default': env.db(),
}
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': f'{env("REDIS_HOST")}:{env("REDIS_PORT")}',
},
}
# Write session to the DB, only load it from the cache
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
# SET MYSQLDB charset for storing Bengali text
if 'mysql' in DATABASES['default']['ENGINE']:
DATABASES['default']['OPTIONS'] = {'charset': 'utf8mb4'}
# Password validation
# https://docs.djangoproject.com/en/4.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
# Validators are opted out intentionally,
# please customize this as per your application requirements.
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'accounts.User'
AUTHENTICATION_BACKENDS = [
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
]
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_EMAIL_REQUIRED = True
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = env('TIME_ZONE')
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = str(BASE_DIR / 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
str(BASE_DIR / 'static')
]
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger'
}
MEDIA_ROOT = str(BASE_DIR / 'media')
MEDIA_URL = '/media/'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = env('EMAIL_HOST')
EMAIL_PORT = env('EMAIL_PORT')
EMAIL_HOST_USER = env('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD')
# login/register redirects
LOGIN_REDIRECT_URL = 'account:profile_complete'
LOGOUT_REDIRECT_URL = 'account_login'
LOGIN_URL = 'account:profile_complete'
LOGOUT_URL = 'account_logout'
######################## Third Party Configs ########################
#####################################################################
# DRF CONFIGS
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
CORS_ALLOW_ALL_ORIGINS = True
# SENTRY - For loggin and monitoring purposes
USE_SENTRY = env('USE_SENTRY')
if USE_SENTRY:
sentry_sdk.init(
dsn=env('SENTRY_DSN'),
integrations=[DjangoIntegration()],
traces_sample_rate=1.0,
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True,
# debug=True will work even if the DEBUG=False in Django.
debug=True
)
# for permission management
ROLEPERMISSIONS_MODULE = 'django_school_management.academics.roles'
# ROLEPERMISSIONS_REGISTER_ADMIN = True
CRISPY_TEMPLATE_PACK = 'bootstrap4'
CKEDITOR_UPLOAD_PATH = 'ck-uploads/'
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_ALLOW_NONIMAGE_FILES = False
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'full',
'extraPlugins': ['codesnippet', 'markdown'], 'width': '100%',
},
}
# STOP SENDING EMAIL FOR USER REGISTRATION
ACCOUNT_EMAIL_VERIFICATION = 'none' # use 'mandatory' or 'optional' for respective cases.
# Django taggit.
TAGGIT_CASE_INSENSITIVE = True
# =========================== PAYMENTS ===========================
# BRAINTREE FOR HANDLING PAYMENTS
USE_PAYMENT_OPTIONS = env('USE_PAYMENT_OPTIONS')
if USE_PAYMENT_OPTIONS:
try:
# Braintree
BRAINTREE_MERCHANT_ID = env('BRAINTREE_MERCHANT_ID')
BRAINTREE_PUBLIC_KEY = env('BRAINTREE_PUBLIC_KEY')
BRAINTREE_PRIVATE_KEY = env('BRAINTREE_PRIVATE_KEY')
# SSLCommerz
STORE_ID = env('STORE_ID')
STORE_PASS = env('STORE_PASS')
SSL_ISSANDBOX = env('SSL_ISSANDBOX')
except ImproperlyConfigured:
raise ImproperlyConfigured(settings_message_constants.INCORRECT_PAYMENT_GATEWAY_SETUP_MESSAGE)
# CELERY BROKER CONFIG
USE_CELERY_REDIS = env('USE_CELERY_REDIS')
if USE_CELERY_REDIS:
try:
CELERY_BROKER_URL = env('CELERY_BROKER_URL')
CELERY_RESULT_BACKEND = env('CELERY_RESULT_BACKEND')
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Asia/Dhaka'
except ImproperlyConfigured:
raise ImproperlyConfigured(settings_message_constants.INCORRECT_CELERY_REDIS_SETUP_MESSAGE)
# MAILCHIMP INTEGRATION
USE_MAILCHIMP = env('USE_MAILCHIMP')
if USE_MAILCHIMP:
MAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY')
MAILCHIMP_DATA_CENTER = env('MAILCHIMP_DATA_CENTER')
MAILCHIMP_LIST_ID = env('MAILCHIMP_LIST_ID')
TINYMCE_DEFAULT_CONFIG = {
"theme": "silver",
"height": 500,
"menubar": False,
"plugins": "advlist,autolink,lists,link,image,charmap,print,preview,anchor,"
"searchreplace,visualblocks,code,fullscreen,insertdatetime,media,table,paste,"
"code,help,wordcount",
"toolbar": "undo redo | formatselect | "
"bold italic backcolor | alignleft aligncenter "
"alignright alignjustify | bullist numlist outdent indent | "
"removeformat | help",
}
|
c9b1cce05b5c81ea7dc66f2f0c063733513302b6
|
391fb5b11425d59ea917c6fed51fe1fa9c672764
|
/tests/opytimizer/optimizers/science/test_wwo.py
|
a0fb03596c61e38e5494dc3d8a1d3350e767bf3a
|
[
"Apache-2.0"
] |
permissive
|
gugarosa/opytimizer
|
89e60d582dee9e31b1723e35d08103d7f8f5d3e1
|
7326a887ed8e3858bc99c8815048d56d02edf88c
|
refs/heads/master
| 2023-08-01T08:09:12.055317
| 2023-05-11T15:21:58
| 2023-05-11T15:21:58
| 109,152,650
| 602
| 45
|
Apache-2.0
| 2023-09-07T14:26:13
| 2017-11-01T16:04:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,451
|
py
|
test_wwo.py
|
import numpy as np
from opytimizer.optimizers.science import wwo
from opytimizer.spaces import search
np.random.seed(0)
def test_wwo_params():
params = {"h_max": 5, "alpha": 1.001, "beta": 0.001, "k_max": 1}
new_wwo = wwo.WWO(params=params)
assert new_wwo.h_max == 5
assert new_wwo.alpha == 1.001
assert new_wwo.beta == 0.001
assert new_wwo.k_max == 1
def test_wwo_params_setter():
new_wwo = wwo.WWO()
try:
new_wwo.h_max = "a"
except:
new_wwo.h_max = 5
try:
new_wwo.h_max = -1
except:
new_wwo.h_max = 5
assert new_wwo.h_max == 5
try:
new_wwo.alpha = "b"
except:
new_wwo.alpha = 1.001
try:
new_wwo.alpha = -1
except:
new_wwo.alpha = 1.001
assert new_wwo.alpha == 1.001
try:
new_wwo.beta = "c"
except:
new_wwo.beta = 0.001
try:
new_wwo.beta = -1
except:
new_wwo.beta = 0.001
assert new_wwo.beta == 0.001
try:
new_wwo.k_max = "d"
except:
new_wwo.k_max = 1
try:
new_wwo.k_max = -1
except:
new_wwo.k_max = 1
assert new_wwo.k_max == 1
def test_wwo_compile():
search_space = search.SearchSpace(
n_agents=50, n_variables=2, lower_bound=[0, 0], upper_bound=[10, 10]
)
new_wwo = wwo.WWO()
new_wwo.compile(search_space)
try:
new_wwo.height = 1
except:
new_wwo.height = np.array([1])
assert new_wwo.height == np.array([1])
try:
new_wwo.length = 1
except:
new_wwo.length = np.array([1])
assert new_wwo.length == np.array([1])
def test_wwo_propagate_wave():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(
n_agents=50, n_variables=2, lower_bound=[0, 0], upper_bound=[10, 10]
)
new_wwo = wwo.WWO()
new_wwo.compile(search_space)
wave = new_wwo._propagate_wave(search_space.agents[0], square, 0)
assert type(wave).__name__ == "Agent"
def test_wwo_refract_wave():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(
n_agents=50, n_variables=2, lower_bound=[0, 0], upper_bound=[10, 10]
)
new_wwo = wwo.WWO()
new_wwo.compile(search_space)
height, length = new_wwo._refract_wave(
search_space.agents[0], search_space.best_agent, square, 0
)
assert height == 5
assert length != 0
def test_wwo_break_wave():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(
n_agents=50, n_variables=2, lower_bound=[0, 0], upper_bound=[10, 10]
)
new_wwo = wwo.WWO()
new_wwo.compile(search_space)
broken_wave = new_wwo._break_wave(search_space.agents[0], square, 0)
assert type(broken_wave).__name__ == "Agent"
def test_wwo_update_wave_length():
search_space = search.SearchSpace(
n_agents=50, n_variables=2, lower_bound=[0, 0], upper_bound=[10, 10]
)
new_wwo = wwo.WWO()
new_wwo.compile(search_space)
new_wwo._update_wave_length(search_space.agents)
def test_wwo_update():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(
n_agents=50, n_variables=2, lower_bound=[0, 0], upper_bound=[10, 10]
)
new_wwo = wwo.WWO()
new_wwo.compile(search_space)
new_wwo.update(search_space, square)
new_wwo.update(search_space, square)
|
04f64bda474a229148ed249d175902f4e78830a2
|
2d7b921f9f82268fdfa871d7087de5422c230c24
|
/src/roles/harlot.py
|
d0a7be7b2692da095831f1e7b8a9329c13005b76
|
[
"BSD-2-Clause"
] |
permissive
|
lykoss/lykos
|
11e01119d1d63e97b2c0fa35835accd3e4960365
|
f4b52e0e5a31645d67bac7713c0b35c4308c50b9
|
refs/heads/master
| 2023-08-31T05:49:08.117378
| 2023-08-27T04:41:56
| 2023-08-27T04:41:56
| 16,919,993
| 128
| 103
|
NOASSERTION
| 2023-07-13T22:09:19
| 2014-02-17T17:02:57
|
Python
|
UTF-8
|
Python
| false
| false
| 5,426
|
py
|
harlot.py
|
from __future__ import annotations
import random
import re
from typing import Optional, Union
from src import users
from src.cats import Wolf
from src.containers import UserSet, UserDict
from src.decorators import command
from src.dispatcher import MessageDispatcher
from src.events import Event, event_listener
from src.functions import get_players, get_all_players, get_main_role, get_reveal_role, get_target
from src.gamestate import GameState
from src.messages import messages
from src.status import try_misdirection, try_exchange
from src.users import User
VISITED: UserDict[users.User, users.User] = UserDict()
PASSED = UserSet()
FORCE_PASSED = UserSet()
@command("visit", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("harlot",))
def hvisit(wrapper: MessageDispatcher, message: str):
"""Visit a player. You will die if you visit a wolf or a target of the wolves."""
if VISITED.get(wrapper.source):
wrapper.pm(messages["harlot_already_visited"].format(VISITED[wrapper.source]))
return
if wrapper.source in FORCE_PASSED:
wrapper.pm(messages["already_being_visited"])
return
var = wrapper.game_state
target = get_target(wrapper, re.split(" +", message)[0], not_self_message="harlot_not_self")
if not target:
return
target = try_misdirection(var, wrapper.source, target)
if try_exchange(var, wrapper.source, target):
return
VISITED[wrapper.source] = target
PASSED.discard(wrapper.source)
house = var.players.index(target)
var.locations[wrapper.source] = f"house_{house}"
wrapper.pm(messages["harlot_success"].format(target))
if target is not wrapper.source:
target.send(messages["harlot_success"].format(wrapper.source))
revt = Event("visit", {})
revt.dispatch(var, "harlot", wrapper.source, target)
@command("pass", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("harlot",))
def pass_cmd(wrapper: MessageDispatcher, message: str):
"""Do not visit someone tonight."""
if VISITED.get(wrapper.source):
wrapper.pm(messages["harlot_already_visited"].format(VISITED[wrapper.source]))
return
PASSED.add(wrapper.source)
wrapper.pm(messages["no_visit"])
@event_listener("visit")
def on_visit(evt: Event, var: GameState, visitor_role: str, visitor: User, visited: User):
if visited in get_all_players(var, ("harlot",)):
# if we're being visited by anyone and we haven't visited yet, we have to stay home with them
if visited not in VISITED:
FORCE_PASSED.add(visited)
PASSED.add(visited)
visited.send(messages["already_being_visited"])
@event_listener("night_kills")
def on_night_kills(evt: Event, var: GameState):
wolves = get_players(var, Wolf)
for harlot, target in VISITED.items():
if target in wolves:
evt.data["victims"].add(harlot)
evt.data["killers"][harlot].append("@wolves")
@event_listener("night_death_message")
def on_night_death_message(evt: Event, var: GameState, victim: User, killer: Union[User, str]):
if killer == "@wolves" and victim in VISITED:
if VISITED[victim] in get_players(var, Wolf):
evt.data["key"] = "harlot_visited_wolf"
else:
evt.data["key"] = "visited_victim" if var.role_reveal in ("on", "team") else "visited_victim_no_reveal"
@event_listener("retribution_kill")
def on_retribution_kill(evt: Event, var: GameState, victim: User, loser: User):
if VISITED.get(victim) in get_players(var, Wolf):
evt.data["target"] = VISITED[victim]
@event_listener("chk_nightdone")
def on_chk_nightdone(evt: Event, var: GameState):
evt.data["acted"].extend(VISITED)
evt.data["acted"].extend(PASSED)
evt.data["nightroles"].extend(get_all_players(var, ("harlot",)))
@event_listener("new_role")
def on_new_role(evt: Event, var: GameState, player: User, old_role: Optional[str]):
if old_role == "harlot" and evt.data["role"] != "harlot":
PASSED.discard(player)
FORCE_PASSED.discard(player)
if player in VISITED:
VISITED.pop(player).send(messages["harlot_disappeared"].format(player))
@event_listener("send_role")
def on_send_role(evt: Event, var: GameState):
for harlot in get_all_players(var, ("harlot",)):
pl = get_players(var)
random.shuffle(pl)
pl.remove(harlot)
harlot.send(messages["harlot_notify"])
if var.next_phase == "night":
harlot.send(messages["players_list"].format(pl))
@event_listener("begin_day")
def on_begin_day(evt: Event, var: GameState):
VISITED.clear()
PASSED.clear()
FORCE_PASSED.clear()
@event_listener("del_player")
def on_del_player(evt: Event, var: GameState, player: User, all_roles: set[str], death_triggers: bool):
if "harlot" not in all_roles:
return
del VISITED[:player:]
PASSED.discard(player)
FORCE_PASSED.discard(player)
@event_listener("reset")
def on_reset(evt: Event, var: GameState):
VISITED.clear()
PASSED.clear()
FORCE_PASSED.clear()
@event_listener("get_role_metadata")
def on_get_role_metadata(evt: Event, var: Optional[GameState], kind: str):
if kind == "role_categories":
evt.data["harlot"] = {"Village", "Safe", "Nocturnal"}
elif kind == "lycanthropy_role":
evt.data["harlot"] = {"prefix": "harlot"}
|
f18c569758f070cbf22d50ad8e3ab5ffd5180fb0
|
1e528494a929deada984822438b3ab569762e6c6
|
/rx/testing/mockdisposable.py
|
4307a4ee0b375de5d308e00466d4b7b5ede038fa
|
[
"MIT"
] |
permissive
|
Sprytile/Sprytile
|
a0233a00a243f263691921d7e1f6af05c5eb5442
|
6b68d0069aef5bfed6ab40d1d5a94a3382b41619
|
refs/heads/master
| 2022-07-10T06:54:01.003723
| 2020-09-26T07:25:35
| 2020-09-26T07:25:35
| 72,276,917
| 860
| 91
|
MIT
| 2022-07-07T23:37:19
| 2016-10-29T09:47:09
|
Python
|
UTF-8
|
Python
| false
| false
| 299
|
py
|
mockdisposable.py
|
from .reactive_assert import AssertList
class MockDisposable():
def __init__(self, scheduler):
self.scheduler = scheduler
self.disposes = AssertList()
self.disposes.append(self.scheduler.clock)
def dispose(self):
self.disposes.append(self.scheduler.clock)
|
bc9e6a1bdf6da52ffd2c2c835100a52d67d09595
|
5f21b3194ca63301ea9eaa22f7d46a11b4068ea2
|
/iredis/__init__.py
|
4b67c39d7e3b39d3ea20d65b8a3c081a50320293
|
[
"BSD-3-Clause"
] |
permissive
|
laixintao/iredis
|
f326b36bb21293d4ba7710d624d180924bb6cecd
|
5d9832f7d18d098c6c763fd25f40d513c9db1738
|
refs/heads/master
| 2023-08-08T22:28:39.269150
| 2023-07-24T05:30:28
| 2023-07-24T05:30:28
| 166,371,259
| 2,455
| 131
|
BSD-3-Clause
| 2023-07-24T05:30:29
| 2019-01-18T08:31:15
|
Python
|
UTF-8
|
Python
| false
| false
| 23
|
py
|
__init__.py
|
__version__ = "1.13.1"
|
5a03c4e592404caafab87045b6037a4f5426c957
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Autodesk/Revit/DB/__init___parts/PolylineSegments.py
|
3bf33d71ff0d114ea28c4f7c411ff3e9a9ecdebf
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,604
|
py
|
PolylineSegments.py
|
class PolylineSegments(object,IDisposable):
""" An output node that represents a tessellated polyline segments. """
def Dispose(self):
""" Dispose(self: PolylineSegments) """
pass
def GetVertices(self):
"""
GetVertices(self: PolylineSegments) -> IList[XYZ]
Returns an array of vertices of the polyline segments.
Returns: Array of XYZ points.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: PolylineSegments,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
EndLocalParameter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Local parameter associated with the end point.
Get: EndLocalParameter(self: PolylineSegments) -> float
"""
EndParameter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Parameter associated with the end point.
Get: EndParameter(self: PolylineSegments) -> float
"""
IsFilled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Indicates whether the area enclosed by the polyline is to be filled or not.
Get: IsFilled(self: PolylineSegments) -> bool
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: PolylineSegments) -> bool
"""
LineProperties=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Access to the line (pen) properties of the polyline
Get: LineProperties(self: PolylineSegments) -> LineProperties
"""
StartLocalParameter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Local parameter associated with the start point.
Get: StartLocalParameter(self: PolylineSegments) -> float
"""
StartParameter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Parameter associated with the start point.
Get: StartParameter(self: PolylineSegments) -> float
"""
|
3999197948855f5952eca9705da1d2178f14f93a
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/puppet/zulip/files/nagios_plugins/zulip_app_frontend/check_queue_worker_errors
|
f7b83d6b1949e28db9e7af230ba75075c10e4d9a
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 457
|
check_queue_worker_errors
|
#!/usr/bin/env python3
"""
Nagios plugin to check that none of our queue workers have reported errors.
"""
import glob
import os
import sys
# settings.QUEUE_ERROR_DIR; not importing Django so that this can run
# as the nagios user.
wildcard = os.path.join("/var/log/zulip/queue_error", "*.errors")
clean = True
for fn in glob.glob(wildcard):
print(f"WARNING: Queue errors logged in {fn}")
clean = False
if not clean:
sys.exit(1)
sys.exit(0)
|
|
0223724403c931b04c866f37c8c65d7b5db3ab6e
|
b097b7caa954a0447bef9a7144e15fbc1b08a96b
|
/examples/hubert/evaluate.py
|
55c58226a9c8a0d45219aa1f94a07455bdd07468
|
[
"CC-BY-NC-4.0",
"BSD-2-Clause"
] |
permissive
|
pytorch/audio
|
3fa7006404020c9ce731f27b94f0257195d2efe3
|
e057d7d144e2716588b80255f0a143662fd5c10d
|
refs/heads/main
| 2023-09-03T15:46:06.918708
| 2023-09-02T00:39:15
| 2023-09-02T00:39:15
| 90,321,822
| 2,319
| 675
|
BSD-2-Clause
| 2023-09-13T22:09:20
| 2017-05-05T00:38:05
|
Python
|
UTF-8
|
Python
| false
| false
| 6,462
|
py
|
evaluate.py
|
import argparse
import logging
from typing import Dict, List
import torch
import torch.nn.functional as F
import torchaudio
from torchaudio.models.decoder import ctc_decoder, CTCDecoder, download_pretrained_files
from utils import _get_id2label
logger = logging.getLogger(__name__)
def _load_checkpoint(checkpoint: str) -> torch.nn.Module:
model = torchaudio.models.hubert_base(aux_num_out=29)
checkpoint = torch.load(checkpoint, map_location="cpu")
state_dict = checkpoint["state_dict"]
new_state_dict = {}
for k in state_dict:
if "model.wav2vec2" in k:
new_state_dict[k.replace("model.wav2vec2.", "")] = state_dict[k]
elif "aux" in k:
new_state_dict[k] = state_dict[k]
model.load_state_dict(new_state_dict)
return model
def _viterbi_decode(emission: torch.Tensor, id2token: Dict, blank_idx: int = 0) -> List[str]:
"""Run greedy decoding for ctc outputs.
Args:
emission (torch.Tensor): Output of CTC layer. Tensor with dimensions (..., time, num_tokens).
id2token (Dictionary): The dictionary that maps indices of emission's last dimension
to the corresponding tokens.
Returns:
(List of str): The decoding result. List of string in lower case.
"""
hypothesis = emission.argmax(-1).unique_consecutive()
hypothesis = hypothesis[hypothesis != blank_idx]
hypothesis = "".join(id2token[int(i)] for i in hypothesis).replace("|", " ").strip()
return hypothesis.split()
def _ctc_decode(emission, decoder: CTCDecoder) -> List[str]:
"""Run CTC decoding with a KenLM language model.
Args:
emission (torch.Tensor): Output of CTC layer. Tensor with dimensions `(..., time, num_tokens)`.
decoder (CTCDecoder): The initialized CTCDecoder.
Returns:
(List of str): The decoding result. List of string in lower case.
"""
hypothesis = decoder(emission)
hypothesis = hypothesis[0][0].words
hypothesis = [word for word in hypothesis if word != " "]
return hypothesis
def run_inference(args):
if args.use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
# Load the fine-tuned HuBERTPretrainModel from checkpoint.
model = _load_checkpoint(args.checkpoint)
model.eval().to(device)
if args.use_lm:
# get decoder files
files = download_pretrained_files("librispeech-4-gram")
decoder = ctc_decoder(
lexicon=files.lexicon,
tokens=files.tokens,
lm=files.lm,
nbest=args.nbest,
beam_size=args.beam_size,
beam_size_token=args.beam_size_token,
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_score,
sil_score=args.sil_score,
log_add=False,
)
else:
id2token = _get_id2label()
dataset = torchaudio.datasets.LIBRISPEECH(args.librispeech_path, url=args.split)
total_edit_distance = 0
total_length = 0
for idx, sample in enumerate(dataset):
waveform, _, transcript, _, _, _ = sample
transcript = transcript.strip().lower().strip().replace("\n", "")
with torch.inference_mode():
emission, _ = model(waveform.to(device))
emission = F.log_softmax(emission, dim=-1)
if args.use_lm:
hypothesis = _ctc_decode(emission.cpu(), decoder)
else:
hypothesis = _viterbi_decode(emission, id2token)
total_edit_distance += torchaudio.functional.edit_distance(hypothesis, transcript.split())
total_length += len(transcript.split())
if idx % 100 == 0:
logger.info(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.info(f"Final WER: {total_edit_distance / total_length}")
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--librispeech-path",
type=str,
help="Folder where LibriSpeech dataset is stored.",
)
parser.add_argument(
"--split",
type=str,
choices=["dev-clean", "dev-other", "test-clean", "test-other"],
help="LibriSpeech dataset split. (Default: 'test-clean')",
default="test-clean",
)
parser.add_argument(
"--checkpoint",
type=str,
help="The checkpoint path of fine-tuned HuBERTPretrainModel.",
)
parser.add_argument("--use-lm", action="store_true", help="Whether to use language model for decoding.")
parser.add_argument("--nbest", type=int, default=1, help="Number of best hypotheses to return.")
parser.add_argument(
"--beam-size",
type=int,
default=1500,
help="Beam size for determining number of hypotheses to store. (Default: 1500)",
)
parser.add_argument(
"--beam-size-token",
type=int,
default=29,
help="Number of tokens to consider at each beam search step. (Default: 29)",
)
parser.add_argument(
"--beam-threshold", type=int, default=100, help="Beam threshold for pruning hypotheses. (Default: 100)"
)
parser.add_argument(
"--lm-weight",
type=float,
default=2.46,
help="Languge model weight in decoding. (Default: 2.46)",
)
parser.add_argument(
"--word-score",
type=float,
default=-0.59,
help="Word insertion score in decoding. (Default: -0.59)",
)
parser.add_argument(
"--unk-score", type=float, default=float("-inf"), help="Unknown word insertion score. (Default: -inf)"
)
parser.add_argument("--sil-score", type=float, default=0, help="Silence insertion score. (Default: 0)")
parser.add_argument("--use-gpu", action="store_true", help="Whether to use GPU for decoding.")
parser.add_argument("--debug", action="store_true", help="Whether to use debug level for logging.")
return parser.parse_args()
def _init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def _main():
args = _parse_args()
_init_logger(args.debug)
run_inference(args)
if __name__ == "__main__":
_main()
|
d3bfa5b311b3598ce09cc87a645c36ac253e0a04
|
8d3ec6437b83eb3d6f7176e6bd2a756848f6956b
|
/src/nsupdate/main/iptools.py
|
9309afa7ad5664bc636dbf3d731a1897835a9948
|
[
"BSD-3-Clause"
] |
permissive
|
nsupdate-info/nsupdate.info
|
4430c6b6a47a72d045f9a24ccf052cda004cee30
|
a9df0ae0d5a7d8480142678bfd7d1834c90160a0
|
refs/heads/master
| 2023-09-05T09:24:24.562117
| 2023-08-28T19:40:15
| 2023-08-28T19:40:15
| 13,284,138
| 918
| 114
|
NOASSERTION
| 2023-09-05T12:26:02
| 2013-10-02T22:01:02
|
Python
|
UTF-8
|
Python
| false
| false
| 636
|
py
|
iptools.py
|
"""
Misc. IP tools: normalize, handle mapped addresses
"""
from netaddr import IPAddress
def normalize_mapped_address(ipaddr):
"""
Converts a IPv4-mapped IPv6 address into a IPv4 address. Handles both the
::ffff:192.0.2.128 format as well as the deprecated ::192.0.2.128 format.
:param ipaddr: IP address [str]
:return: normalized IP address [str]
"""
ipaddr = IPAddress(ipaddr)
if ipaddr.is_ipv4_compat() or ipaddr.is_ipv4_mapped():
ipaddr = ipaddr.ipv4()
return str(ipaddr)
# currently, normalize_ip does no more than normalize_mapped_address:
normalize_ip = normalize_mapped_address
|
89b7c017b78f94764d52916d453384b9cb2dc34a
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/dagster/dagster_tests/asset_defs_tests/asset_package/__init__.py
|
0c14133ec993e946a1be608a83008ee7290db21a
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 767
|
py
|
__init__.py
|
from dagster import AssetKey, SourceAsset, asset
@asset
def little_richard():
pass
def make_list_of_assets():
# these assets are stashed inside a function so that they need to be discovered through
# a list
@asset
def james_brown():
pass
@asset
def fats_domino():
pass
return [james_brown, fats_domino]
def make_list_of_source_assets():
# these source assets are stashed inside a function so that they need to be discovered through
# a list
buddy_holly = SourceAsset(key=AssetKey("buddy_holly"))
jerry_lee_lewis = SourceAsset(key=AssetKey("jerry_lee_lewis"))
return [buddy_holly, jerry_lee_lewis]
list_of_assets_and_source_assets = [*make_list_of_assets(), *make_list_of_source_assets()]
|
45048fcc9036d7f3ca2dcb0917fba3f95a8c14d7
|
f74e34e1f308f2d453d66b4a531a88c44cfd4a5f
|
/fiducial_slam/scripts/move_origin.py
|
0cb7b17eb352d8b40a6657bdafef0e79267f7c55
|
[
"BSD-3-Clause"
] |
permissive
|
UbiquityRobotics/fiducials
|
40059bd4a3e3098dba18e3408ec84c638c39c43d
|
6c09104dd183925549e73825d50123ba5339d258
|
refs/heads/noetic-devel
| 2023-01-06T19:04:33.108565
| 2022-09-23T20:37:23
| 2022-09-23T20:37:23
| 30,819,640
| 263
| 143
|
BSD-3-Clause
| 2022-11-17T12:20:13
| 2015-02-15T06:11:15
|
C
|
UTF-8
|
Python
| false
| false
| 648
|
py
|
move_origin.py
|
#!/usr/bin/python
"""
Move origin of fiducial co-ordinate system
"""
import numpy, sys, os
from fiducial_slam.map import Map
if __name__ == "__main__":
argc = len(sys.argv)
if argc != 4 and argc != 5:
print "Usage: %s x y z [file]" % sys.argv[0]
sys.exit(1)
offset = numpy.array([float(sys.argv[1]), float(sys.argv[2]), float(sys.argv[3])])
if argc == 5:
filename = sys.argv[4]
else:
filename = "~/.ros/slam/map.txt"
filename = os.path.expanduser(filename)
map = Map(filename)
fids = map.keys()
for fid in fids:
f = map[fid]
f.position += offset
map.save()
|
251cc20cbcabc16ebc6ed19d563927f53738651a
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/recycle_instance.py
|
8f495c2566c86db0733490324b1e57b98684bf87
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 10,792
|
py
|
recycle_instance.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RecycleInstance:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'name': 'str',
'mode': 'str',
'datastore': 'RecycleDatastore',
'pay_mode': 'str',
'enterprise_project_id': 'str',
'backup_id': 'str',
'created_at': 'str',
'deleted_at': 'str',
'retained_until': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'mode': 'mode',
'datastore': 'datastore',
'pay_mode': 'pay_mode',
'enterprise_project_id': 'enterprise_project_id',
'backup_id': 'backup_id',
'created_at': 'created_at',
'deleted_at': 'deleted_at',
'retained_until': 'retained_until'
}
def __init__(self, id=None, name=None, mode=None, datastore=None, pay_mode=None, enterprise_project_id=None, backup_id=None, created_at=None, deleted_at=None, retained_until=None):
"""RecycleInstance
The model defined in huaweicloud sdk
:param id: 实例ID
:type id: str
:param name: 实例名称
:type name: str
:param mode: 实例类型。支持集群、副本集、以及单节点。 取值 - Sharding - ReplicaSet - Single
:type mode: str
:param datastore:
:type datastore: :class:`huaweicloudsdkdds.v3.RecycleDatastore`
:param pay_mode: 计费方式。 - 取值为“0”,表示按需计费。 - 取值为“1”,表示包年/包月计费。
:type pay_mode: str
:param enterprise_project_id: 企业项目ID,取值为“0”,表示为default企业项目
:type enterprise_project_id: str
:param backup_id: 备份ID
:type backup_id: str
:param created_at: 创建时间
:type created_at: str
:param deleted_at: 删除时间
:type deleted_at: str
:param retained_until: 保留截止时间
:type retained_until: str
"""
self._id = None
self._name = None
self._mode = None
self._datastore = None
self._pay_mode = None
self._enterprise_project_id = None
self._backup_id = None
self._created_at = None
self._deleted_at = None
self._retained_until = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if mode is not None:
self.mode = mode
if datastore is not None:
self.datastore = datastore
if pay_mode is not None:
self.pay_mode = pay_mode
if enterprise_project_id is not None:
self.enterprise_project_id = enterprise_project_id
if backup_id is not None:
self.backup_id = backup_id
if created_at is not None:
self.created_at = created_at
if deleted_at is not None:
self.deleted_at = deleted_at
if retained_until is not None:
self.retained_until = retained_until
@property
def id(self):
"""Gets the id of this RecycleInstance.
实例ID
:return: The id of this RecycleInstance.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this RecycleInstance.
实例ID
:param id: The id of this RecycleInstance.
:type id: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this RecycleInstance.
实例名称
:return: The name of this RecycleInstance.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RecycleInstance.
实例名称
:param name: The name of this RecycleInstance.
:type name: str
"""
self._name = name
@property
def mode(self):
"""Gets the mode of this RecycleInstance.
实例类型。支持集群、副本集、以及单节点。 取值 - Sharding - ReplicaSet - Single
:return: The mode of this RecycleInstance.
:rtype: str
"""
return self._mode
@mode.setter
def mode(self, mode):
"""Sets the mode of this RecycleInstance.
实例类型。支持集群、副本集、以及单节点。 取值 - Sharding - ReplicaSet - Single
:param mode: The mode of this RecycleInstance.
:type mode: str
"""
self._mode = mode
@property
def datastore(self):
"""Gets the datastore of this RecycleInstance.
:return: The datastore of this RecycleInstance.
:rtype: :class:`huaweicloudsdkdds.v3.RecycleDatastore`
"""
return self._datastore
@datastore.setter
def datastore(self, datastore):
"""Sets the datastore of this RecycleInstance.
:param datastore: The datastore of this RecycleInstance.
:type datastore: :class:`huaweicloudsdkdds.v3.RecycleDatastore`
"""
self._datastore = datastore
@property
def pay_mode(self):
"""Gets the pay_mode of this RecycleInstance.
计费方式。 - 取值为“0”,表示按需计费。 - 取值为“1”,表示包年/包月计费。
:return: The pay_mode of this RecycleInstance.
:rtype: str
"""
return self._pay_mode
@pay_mode.setter
def pay_mode(self, pay_mode):
"""Sets the pay_mode of this RecycleInstance.
计费方式。 - 取值为“0”,表示按需计费。 - 取值为“1”,表示包年/包月计费。
:param pay_mode: The pay_mode of this RecycleInstance.
:type pay_mode: str
"""
self._pay_mode = pay_mode
@property
def enterprise_project_id(self):
"""Gets the enterprise_project_id of this RecycleInstance.
企业项目ID,取值为“0”,表示为default企业项目
:return: The enterprise_project_id of this RecycleInstance.
:rtype: str
"""
return self._enterprise_project_id
@enterprise_project_id.setter
def enterprise_project_id(self, enterprise_project_id):
"""Sets the enterprise_project_id of this RecycleInstance.
企业项目ID,取值为“0”,表示为default企业项目
:param enterprise_project_id: The enterprise_project_id of this RecycleInstance.
:type enterprise_project_id: str
"""
self._enterprise_project_id = enterprise_project_id
@property
def backup_id(self):
"""Gets the backup_id of this RecycleInstance.
备份ID
:return: The backup_id of this RecycleInstance.
:rtype: str
"""
return self._backup_id
@backup_id.setter
def backup_id(self, backup_id):
"""Sets the backup_id of this RecycleInstance.
备份ID
:param backup_id: The backup_id of this RecycleInstance.
:type backup_id: str
"""
self._backup_id = backup_id
@property
def created_at(self):
"""Gets the created_at of this RecycleInstance.
创建时间
:return: The created_at of this RecycleInstance.
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this RecycleInstance.
创建时间
:param created_at: The created_at of this RecycleInstance.
:type created_at: str
"""
self._created_at = created_at
@property
def deleted_at(self):
"""Gets the deleted_at of this RecycleInstance.
删除时间
:return: The deleted_at of this RecycleInstance.
:rtype: str
"""
return self._deleted_at
@deleted_at.setter
def deleted_at(self, deleted_at):
"""Sets the deleted_at of this RecycleInstance.
删除时间
:param deleted_at: The deleted_at of this RecycleInstance.
:type deleted_at: str
"""
self._deleted_at = deleted_at
@property
def retained_until(self):
"""Gets the retained_until of this RecycleInstance.
保留截止时间
:return: The retained_until of this RecycleInstance.
:rtype: str
"""
return self._retained_until
@retained_until.setter
def retained_until(self, retained_until):
"""Sets the retained_until of this RecycleInstance.
保留截止时间
:param retained_until: The retained_until of this RecycleInstance.
:type retained_until: str
"""
self._retained_until = retained_until
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecycleInstance):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
57319f356674a0580e608ec6b895595890192554
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/weblayer/browser/safe_browsing/DEPS
|
031570e0746cfbfc1b689d7ea1385f89ed01d18b
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 141
|
DEPS
|
include_rules = [
"+components/safe_browsing/content/browser",
"+components/safe_browsing/content/common",
"+services/network/test",
]
|
|
f5c577714b2ca640617169c3d0a2d6467493c93c
|
7d60cde80c322ff41b233967456f9a5b9f938d16
|
/sqlalchemy/orm-6-joins.py
|
861866cc3ba5939bfa4f2d091dccbdde0ea24547
|
[] |
no_license
|
besnik/tutorials
|
e8a9f9a0a6709f69746b51f5a5bdc3c7520c49d5
|
600494b7296854f873aac7b9ff3c4cc23be7d989
|
refs/heads/master
| 2020-05-21T20:28:21.944713
| 2020-03-28T07:25:14
| 2020-03-28T07:25:14
| 65,710,896
| 113
| 39
| null | 2017-03-07T19:13:13
| 2016-08-15T06:47:56
|
Python
|
UTF-8
|
Python
| false
| false
| 3,623
|
py
|
orm-6-joins.py
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
# Base
Base = declarative_base()
# Concrete type
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String)
def __repr__(self):
return "<User(id: %r, name: %r)>" % (self.id, self.name)
# Engine and create tables
from sqlalchemy import create_engine
engine = create_engine("sqlite://", echo=True)
Base.metadata.create_all(engine)
# Session with identity map
from sqlalchemy.orm import Session
session = Session(bind=engine)
# adding multiple objects as *pending*
u1 = User(name="slavo")
session.add_all([
u1,
User(name="jano"),
User(name="vlado"),
User(name="peter"),
User(name="brano")
])
# finalize transaction
session.commit();
# Many-to-One relationship (Adr->User - one user can live on multiple addresses)
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
# in sqlalchemy we have to declare relation ship twice
# 1) relation type at core level
# 2) relationship on orm level and object level
class Address(Base):
__tablename__ = "address"
id = Column(Integer, primary_key=True)
email = Column(String, nullable=False)
user_id = Column(Integer, ForeignKey("user.id"))
user = relationship("User", backref="addresses") # creates addresses property on referenced object
def __repr__(self):
return "<Address(%r)>" % self.email
# Creates addresses table
Base.metadata.create_all(engine)
u1 = User(name="Matus")
u1.addresses = [
Address(email="matus@matus.com"),
Address(email="matus@woho.com"),
Address(email="matus@microsoft.io")
]
u2 = User(name="Martin")
u2.addresses = [Address(email="martin@martin.com")]
session.add(u1) # also added addresses
session.add(u2)
session.commit()
# -----------------------------------------------------------
# Joins
# -----------------------------------------------------------
# query can select from multiple tables at once. joins from left most entity
# example of *implicit join*
rows = session.query(User, Address).filter(User.id == Address.user_id).all();
for row in rows:
print(row)
# *explicit join*
rows = session.query(User, Address).join(Address, User.id == Address.user_id).all();
for row in rows:
print(row)
# *join using relationship()*
rows = session.query(User, Address).join(User.addresses).all()
for row in rows:
print(row)
# automatical join if there is no ambiguity (e.g. one foreign key). if it can't figure out it will throw
# recommended pattern is to avoid this and use explicit join
rows = session.query(User, Address).join(Address).all()
for row in rows:
print(row)
# filtering
row = session.query(User.name).join(User.addresses).filter(Address.email == "matus@woho.com").first()
print(row)
# join from right entity using select_from()
rows = session.query(User, Address).select_from(Address).join(Address.user).all();
for row in rows:
print(row)
# join with *subquery*
# subquery returns "alias" construct for us to use
from sqlalchemy import func
# subquery is selectable unit, acts like a table (metadata)
subq = session.query( func.count(Address.id).label("count"), User.id.label("user_id") ).\
join(Address.user).\
group_by(User.id).\
subquery()
rows = session.query(User.name, func.coalesce(subq.c.count, 0)).\
outerjoin(subq, User.id == subq.c.user_id).all()
for row in rows:
print(row)
print(subq)
print(subq.element)
print(subq.element.froms)
print(repr(subq.element.froms[0].left))
print(repr(subq.element.froms[0].right))
|
13bcc19a509d5d4c73d250a508a03c69bde9212e
|
4cccbd59c06b10f3bbe1d5e8c3082c8b0c9a8145
|
/edb/server/protocol/auth_ext/data.py
|
e713d5a7146323725af8731e750e4e2f5158f25f
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
edgedb/edgedb
|
ff26656ee449208b88ae85a6ad9823fce4f2ecad
|
4d614ce5de15e0b08575b0bf6738ece02c516ded
|
refs/heads/master
| 2023-09-05T07:10:05.409260
| 2023-09-01T23:20:13
| 2023-09-01T23:20:13
| 95,817,032
| 11,683
| 404
|
Apache-2.0
| 2023-09-14T17:25:49
| 2017-06-29T20:30:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,059
|
py
|
data.py
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dataclasses
from typing import Optional
@dataclasses.dataclass
class UserInfo:
"""
OpenID Connect compatible user info.
See: https://openid.net/specs/openid-connect-core-1_0.html
"""
sub: str
name: Optional[str] = None
given_name: Optional[str] = None
family_name: Optional[str] = None
middle_name: Optional[str] = None
nickname: Optional[str] = None
preferred_username: Optional[str] = None
profile: Optional[str] = None
picture: Optional[str] = None
website: Optional[str] = None
email: Optional[str] = None
email_verified: Optional[bool] = None
gender: Optional[str] = None
birthdate: Optional[str] = None
zoneinfo: Optional[str] = None
locale: Optional[str] = None
phone_number: Optional[str] = None
phone_number_verified: Optional[bool] = None
address: Optional[dict[str, str]] = None
updated_at: Optional[float] = None
def __str__(self) -> str:
return self.sub
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}("
f"sub={self.sub!r} "
f"name={self.name!r} "
f"email={self.email!r} "
f"preferred_username={self.preferred_username!r})"
)
@dataclasses.dataclass
class Email:
"""Email address"""
address: str
is_verified: bool
is_primary: bool
|
edd0128e167ee91824904ea48d5f88b634106bf6
|
7d571b303508d302bf22e4fba3e19044af309fb8
|
/batchflow/run_notebook.py
|
436ade8df27c0ca518b26959da055e7410ae7914
|
[
"Apache-2.0"
] |
permissive
|
analysiscenter/batchflow
|
0a62943836ff41c24274118c410af763750b490b
|
bcc2c723976cb5780d7b2876f2c2df74c186d343
|
refs/heads/master
| 2023-09-01T08:29:06.906776
| 2023-08-23T14:11:03
| 2023-08-23T14:11:03
| 84,835,419
| 110
| 42
|
Apache-2.0
| 2023-09-08T10:25:34
| 2017-03-13T14:22:53
|
Python
|
UTF-8
|
Python
| false
| false
| 10,615
|
py
|
run_notebook.py
|
""" Utility functions for running Jupyter Notebooks."""
import os
import time
def run_notebook(path, inputs=None, outputs=None, inputs_pos=1, out_path_db=None, execute_kwargs=None,
out_path_ipynb=None, out_path_html=None, add_timestamp=True, hide_code_cells=False, display_links=True,
raise_exception=False, return_notebook=False):
""" Run a notebook and save the execution result.
Allows to pass `inputs` arguments, that are used as inputs for notebook execution. Under the hood,
we place all of them into a separate cell, inserted in the notebook; hence, all of the keys must be valid Python
names, and values should be valid for re-creating objects.
Heavily inspired by https://github.com/tritemio/nbrun.
Also, allows to pass `outputs` parameter, which is a list of local variables that you need to return from
the executed notebook. Under the hood, we insert a cell that saves local variables with names from the `outputs`
in the shelve db. If the notebook failed, then the cell is executed directly. After that, we extract output
variables in this method and return them.
Parameters
----------
path : str
Path to the notebook to execute.
inputs : dict, optional
Inputs for notebook execution. Converted into a cell of variable assignments and inserted
into the notebook on `inputs_pos` place.
outputs : str or iterable of str
List of notebook local variables that return to output.
inputs_pos : int
Position to insert the cell with inputs into the notebook.
out_path_db : str, optional
Path to save the shelve database files. There is no need in files extension.
If None and `inputs` or `outputs` are provided, than `out_path_db` is created from `out_path_ipynb`.
execute_kwargs : dict, optional
Other parameters of `:class:ExecutePreprocessor`.
out_path_ipynb : str, optional
Path to save the output .ipynb file.
out_path_html : str, optional
Path to save the output .html file.
add_timestamp : bool
Whether to add a cell with execution information at the beginning of the executed notebook.
hide_code_cells : bool
Whether to hide the code cells in the executed notebook.
display_links : bool
Whether to display links to the executed notebook and html at execution.
raise_exception : bool
Whether to re-raise exceptions from the notebook.
return_notebook : bool
Whether to return the notebook object from this function.
Returns
-------
exec_res : dict
Dictionary with the notebook execution results.
It provides next information:
- 'failed' : bool
Whether the execution was failed.
- 'outputs' : dict
The notebook saved outputs.
- 'failed cell number': int
An error cell execution number (if exists).
- 'traceback' : str
Traceback message from the notebook (if exists).
- 'notebook' : :class:`nbformat.notebooknode.NotebookNode`
Executed notebook object.
Note that this output is provided only if `return_notebook` is True.
"""
# pylint: disable=bare-except, lost-exception
import nbformat
from jupyter_client.manager import KernelManager
from nbconvert.preprocessors import ExecutePreprocessor
import shelve
from dill import Pickler, Unpickler
from textwrap import dedent
if inputs or outputs:
# Set `out_path_db` value
if out_path_db is None:
if out_path_ipynb:
out_path_db = os.path.splitext(out_path_ipynb)[0] + '_db'
else:
error_message = """\
Invalid value for `out_path_db` argument. If `inputs` or `outputs` are provided,
then you need to provide `out_path_db` or `out_path_ipynb` arguments."""
error_message = dedent(error_message)
raise ValueError(error_message)
# (Re)create a shelve database
shelve.Pickler = Pickler
shelve.Unpickler = Unpickler
with shelve.open(out_path_db) as notebook_db:
notebook_db.clear()
if isinstance(outputs, str):
outputs = [outputs]
working_dir = './'
execute_kwargs = execute_kwargs or {'timeout': -1}
executor = ExecutePreprocessor(**execute_kwargs)
kernel_manager = KernelManager()
# Notebook preparation:
# Read the notebook, insert a cell with inputs, insert another cell for outputs extraction
notebook = nbformat.read(path, as_version=4)
if hide_code_cells:
notebook["metadata"].update({"hide_input": True})
if inputs or outputs:
# Code for work with the shelve database from the notebook
comment_header = "# Cell inserted during automated execution\n"
code_header = f"""\
import os, shelve
from dill import Pickler, Unpickler
shelve.Pickler = Pickler
shelve.Unpickler = Unpickler
out_path_db = {repr(out_path_db)}"""
code_header = dedent(code_header)
if inputs:
# Save `inputs` in the shelve database and create a cell in the notebook
# for parameters extraction
with shelve.open(out_path_db) as notebook_db:
notebook_db.update(inputs)
code = """\n
# Inputs loading
with shelve.open(out_path_db) as notebook_db:
inputs = {**notebook_db}
locals().update(inputs)"""
code = dedent(code)
code = comment_header + code_header + code
notebook['cells'].insert(inputs_pos, nbformat.v4.new_code_cell(code))
if outputs:
# Create a cell to extract outputs from the notebook
# It saves locals from the notebook with preferred names in the shelve database
# This cell will be executed in error case too
code = f"""\n
# Output dict preparation
output = {{}}
outputs = {outputs}
for value_name in outputs:
if value_name in locals():
output[value_name] = locals()[value_name]
with shelve.open(out_path_db) as notebook_db:
notebook_db['outputs'] = output"""
code = dedent(code)
code = comment_header + (code_header if not inputs else "") + code
output_cell = nbformat.v4.new_code_cell(code)
notebook['cells'].append(output_cell)
# Execute the notebook
start_time = time.time()
exec_failed = False
try:
executor.preprocess(notebook, {'metadata': {'path': working_dir}}, km=kernel_manager)
except:
exec_failed = True
# Save notebook outputs in the shelve db
if outputs is not None:
executor.kc = kernel_manager.client() # For compatibility with 5.x.x version
executor.preprocess_cell(output_cell, {'metadata': {'path': working_dir}}, -1)
if raise_exception:
raise
finally:
# Check if something went wrong
failed, error_cell_num, traceback_message = extract_traceback(notebook=notebook)
failed = failed or exec_failed
# Prepare execution results: execution state, notebook outputs and error info (if exists)
if failed:
exec_res = {'failed': failed, 'failed cell number': error_cell_num, 'traceback': traceback_message}
else:
exec_res = {'failed': failed, 'failed cell number': None, 'traceback': ''}
if outputs is not None:
with shelve.open(out_path_db) as notebook_db:
exec_res['outputs'] = notebook_db.get('outputs', {})
if add_timestamp:
timestamp = (f"**Executed:** {time.ctime(start_time)}<br>"
f"**Duration:** {time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time))}<br>"
f"**Autogenerated from:** [{path}]\n\n---")
timestamp_cell = nbformat.v4.new_markdown_cell(timestamp)
notebook['cells'].insert(0, timestamp_cell)
# Save the executed notebook/HTML to disk
if out_path_ipynb:
save_notebook(notebook=notebook, out_path_ipynb=out_path_ipynb, display_link=display_links)
if out_path_html:
notebook_to_html(notebook=notebook, out_path_html=out_path_html, display_link=display_links)
# Remove shelve files if the notebook is successfully executed
if out_path_db and not failed:
for ext in ['bak', 'dat', 'dir']:
os.remove(out_path_db + '.' + ext)
if return_notebook:
exec_res['notebook'] = notebook
return exec_res
# Save notebook functions
def save_notebook(notebook, out_path_ipynb, display_link):
""" Save notebook as ipynb file."""
import nbformat
from IPython.display import display, FileLink
with open(out_path_ipynb, 'w', encoding='utf-8') as file:
nbformat.write(notebook, file)
if display_link:
display(FileLink(out_path_ipynb))
def notebook_to_html(notebook, out_path_html, display_link):
""" Save notebook as ipynb file."""
from nbconvert import HTMLExporter
from IPython.display import display, FileLink
html_exporter = HTMLExporter()
body, _ = html_exporter.from_notebook_node(notebook)
with open(out_path_html, 'w', encoding='utf-8') as f:
f.write(body)
if display_link:
display(FileLink(out_path_html))
def extract_traceback(notebook):
""" Extracts information about an error from the notebook.
Parameters
----------
notebook: :class:`nbformat.notebooknode.NotebookNode`
Executed notebook to find an error traceback.
Returns
-------
bool
Whether the executed notebook has an error traceback.
int or None
Number of a cell with a traceback.
If None, then the notebook doesn't contain an error traceback.
str
Error traceback if exists.
"""
for cell in notebook['cells']:
# Find a cell output with a traceback and extract the traceback
outputs = cell.get('outputs', [])
for output in outputs:
traceback = output.get('traceback', [])
if traceback:
traceback = '\n'.join(traceback)
return True, cell['execution_count'], traceback
return False, None, ""
|
1bb431dbee6001d1761a9cb491f279ee5391131d
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/test/modules/test_higher_derivative_slice.py
|
c06842c02c5e1e15c04664a43250647fb7d380c8
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 2,288
|
py
|
test_higher_derivative_slice.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def random_index(dim):
start = np.random.choice(list(range(dim)))
stop = np.random.choice(list(range(1, dim + 1)))
if start >= stop:
start, stop = stop - 1, start + 1
step = np.random.randint(1, dim)
return f"{start}:{stop}:{step}"
def random_slice(dim_vec):
slice_index = ", ".join(random_index(dim) for dim in dim_vec)
return slice_index
def _test_slice_grad_grad_impl(test_case):
ndim = np.random.randint(2, 5)
x_shape = [np.random.randint(3, 8) for _ in range(ndim)]
x = random_tensor(len(x_shape), *x_shape).requires_grad_(True)
slice_index = random_slice(x_shape)
y = eval(f"x[{slice_index}]")
init_grad = random_tensor(len(y.oneflow.shape), *y.oneflow.shape).requires_grad_()
x_grad = torch.autograd.grad(y, x, init_grad, create_graph=True)[0]
test_case.assertTrue(
np.allclose(
x_grad.pytorch.detach().cpu().numpy(), x_grad.oneflow.detach().numpy()
)
)
init_grad_grad = random_tensor(
len(x_grad.oneflow.shape), *x_grad.oneflow.shape
).requires_grad_()
dgrad = torch.autograd.grad(x_grad, init_grad, init_grad_grad, create_graph=False)[
0
]
test_case.assertTrue(
np.allclose(
dgrad.pytorch.detach().cpu().numpy(), dgrad.oneflow.detach().numpy(),
)
)
class TestSliceHigherDerivative(flow.unittest.TestCase):
def test_slice_grad_grad(test_case):
for i in range(10):
_test_slice_grad_grad_impl(test_case)
if __name__ == "__main__":
unittest.main()
|
a2369fea8d42797ae48d2b3ac1a387f16f09d63b
|
a0447b03ad89a41a5c2e2073e32aeaf4d6279340
|
/ironic/drivers/modules/redfish/firmware_utils.py
|
feeec2df23b5fb821f9e2edbcd9b8820b4fc4b5b
|
[
"Apache-2.0"
] |
permissive
|
openstack/ironic
|
2ae87e36d7a62d44b7ed62cad4e2e294d48e061b
|
ab76ff12e1c3c2208455e917f1a40d4000b4e990
|
refs/heads/master
| 2023-08-31T11:08:34.486456
| 2023-08-31T04:45:05
| 2023-08-31T04:45:05
| 10,066,301
| 411
| 365
|
Apache-2.0
| 2023-07-25T02:05:53
| 2013-05-14T22:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 10,767
|
py
|
firmware_utils.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
from urllib import parse as urlparse
import jsonschema
from oslo_log import log
from oslo_utils import fileutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import image_service
from ironic.common import swift
from ironic.conf import CONF
LOG = log.getLogger(__name__)
_UPDATE_FIRMWARE_SCHEMA = {
"$schema": "http://json-schema.org/schema#",
"title": "update_firmware clean step schema",
"type": "array",
# list of firmware update images
"items": {
"type": "object",
"required": ["url", "checksum"],
"properties": {
"url": {
"description": "URL for firmware file",
"type": "string",
"minLength": 1
},
"checksum": {
"description": "SHA1 checksum for firmware file",
"type": "string",
"minLength": 1
},
"wait": {
"description": "optional wait time for firmware update",
"type": "integer",
"minimum": 1
},
"source":
{
"description": "optional firmware_source to override global "
"setting for firmware file",
"type": "string",
"enum": ["http", "local", "swift"]
}
},
"additionalProperties": False
}
}
_FIRMWARE_SUBDIR = 'firmware'
def validate_update_firmware_args(firmware_images):
"""Validate ``update_firmware`` step input argument
:param firmware_images: args to validate.
:raises: InvalidParameterValue When argument is not valid
"""
try:
jsonschema.validate(firmware_images, _UPDATE_FIRMWARE_SCHEMA)
except jsonschema.ValidationError as err:
raise exception.InvalidParameterValue(
_('Invalid firmware update %(firmware_images)s. Errors: %(err)s')
% {'firmware_images': firmware_images, 'err': err})
def get_swift_temp_url(parsed_url):
"""Gets Swift temporary URL
:param parsed_url: Parsed URL from URL in format
swift://container/[sub-folder/]file
:returns: Swift temporary URL
"""
return swift.SwiftAPI().get_temp_url(
parsed_url.netloc, parsed_url.path.lstrip('/'),
CONF.redfish.swift_object_expiry_timeout)
def download_to_temp(node, url):
"""Downloads to temporary location from given URL
:param node: Node for which to download to temporary location
:param url: URL to download from
:returns: File path of temporary location file is downloaded to
"""
parsed_url = urlparse.urlparse(url)
scheme = parsed_url.scheme.lower()
if scheme not in ('http', 'swift', 'file'):
raise exception.InvalidParameterValue(
_('%(scheme)s is not supported for %(url)s.')
% {'scheme': scheme, 'url': parsed_url.geturl()})
tempdir = os.path.join(tempfile.gettempdir(), node.uuid)
os.makedirs(tempdir, exist_ok=True)
temp_file = os.path.join(
tempdir,
os.path.basename(parsed_url.path))
LOG.debug('For node %(node)s firmware at %(url)s will be downloaded to '
'temporary location at %(temp_file)s',
{'node': node.uuid, 'url': url, 'temp_file': temp_file})
if scheme == 'http':
with open(temp_file, 'wb') as tf:
image_service.HttpImageService().download(url, tf)
elif scheme == 'swift':
swift_url = get_swift_temp_url(parsed_url)
with open(temp_file, 'wb') as tf:
image_service.HttpImageService().download(swift_url, tf)
elif scheme == 'file':
with open(temp_file, 'wb') as tf:
image_service.FileImageService().download(
parsed_url.path, tf)
return temp_file
def verify_checksum(node, checksum, file_path):
"""Verify checksum.
:param node: Node for which file to verify checksum
:param checksum: Expected checksum value
:param file_path: File path for which to verify checksum
:raises RedfishError: When checksum does not match
"""
if len(checksum) <= 41:
# SHA1: 40 bytes long
calculated_checksum = fileutils.compute_file_checksum(
file_path, algorithm='sha1')
elif len(checksum) <= 64:
calculated_checksum = fileutils.compute_file_checksum(
file_path, algorithm='sha256')
elif len(checksum) <= 128:
calculated_checksum = fileutils.compute_file_checksum(
file_path, algorithm='sha512')
else:
raise exception.RedfishError(
_('Unable to identify checksum to perform firmware file checksum '
'calculation. Please validate your input in and try again. '
'Received: %(checksum)s')
% {'checksum': checksum})
if checksum != calculated_checksum:
raise exception.RedfishError(
_('For node %(node)s firmware file %(temp_file)s checksums do not '
'match. Expected: %(checksum)s, calculated: '
'%(calculated_checksum)s.')
% {'node': node.uuid, 'temp_file': file_path, 'checksum': checksum,
'calculated_checksum': calculated_checksum})
def stage(node, source, temp_file):
"""Stage temporary file to configured location
:param node: Node for which to stage the file
:param source: Where to stage the file. Corresponds to
CONF.redfish.firmware_source.
:param temp_file: File path of temporary file to stage
:returns: Tuple of staged URL and source (http or swift) that needs
cleanup of staged files afterwards.
:raises RedfishError: If staging to HTTP server has failed.
"""
staged_url = None
filename = os.path.basename(temp_file)
if source in ('http', 'local'):
http_url = CONF.deploy.external_http_url or CONF.deploy.http_url
staged_url = urlparse.urljoin(
http_url, "/".join([_FIRMWARE_SUBDIR, node.uuid, filename]))
staged_folder = os.path.join(
CONF.deploy.http_root, _FIRMWARE_SUBDIR, node.uuid)
staged_path = os.path.join(staged_folder, filename)
LOG.debug('For node %(node)s temporary file %(temp_file)s will be '
'hard-linked or copied to %(staged_path)s and served over '
'%(staged_url)s',
{'node': node.uuid, 'temp_file': temp_file,
'staged_path': staged_path, 'staged_url': staged_url})
os.makedirs(staged_folder, exist_ok=True)
try:
os.link(temp_file, staged_path)
os.chmod(temp_file, CONF.redfish.file_permission)
except OSError as oserror:
LOG.debug("Could not hardlink file %(temp_file)s to location "
"%(staged_path)s. Will try to copy it. Error: %(error)s",
{'temp_file': temp_file, 'staged_path': staged_path,
'error': oserror})
try:
shutil.copyfile(temp_file, staged_path)
os.chmod(staged_path, CONF.redfish.file_permission)
except IOError as ioerror:
raise exception.RedfishError(
_('For %(node)s failed to copy firmware file '
'%(temp_file)s to HTTP server root. Error %(error)s')
% {'node': node.uuid, 'temp_file': temp_file,
'error': ioerror})
elif source == 'swift':
container = CONF.redfish.swift_container
timeout = CONF.redfish.swift_object_expiry_timeout
swift_api = swift.SwiftAPI()
object_name = "/".join([node.uuid, filename])
swift_api.create_object(
container,
object_name,
temp_file,
object_headers={'X-Delete-After': str(timeout)})
staged_url = swift_api.get_temp_url(
container, object_name, timeout)
LOG.debug('For node %(node)s temporary file at %(temp_file)s will be '
'served from Swift temporary URL %(staged_url)s',
{'node': node.uuid, 'temp_file': temp_file,
'staged_url': staged_url})
need_cleanup = 'swift' if source == 'swift' else 'http'
return staged_url, need_cleanup
def cleanup(node):
"""Clean up staged files
:param node: Node for which to clean up. Should contain
'firmware_cleanup' entry in `driver_internal_info` to indicate
source(s) to be cleaned up.
"""
# Cleaning up temporary just in case there is something when staging
# to http or swift has failed.
temp_dir = os.path.join(tempfile.gettempdir(), node.uuid)
LOG.debug('For node %(node)s cleaning up temporary files, if any, from '
'%(temp_dir)s.', {'node': node.uuid, 'temp_dir': temp_dir})
shutil.rmtree(temp_dir, ignore_errors=True)
cleanup = node.driver_internal_info.get('firmware_cleanup')
if not cleanup:
return
if 'http' in cleanup:
http_dir = os.path.join(
CONF.deploy.http_root, _FIRMWARE_SUBDIR, node.uuid)
LOG.debug('For node %(node)s cleaning up files from %(http_dir)s.',
{'node': node.uuid, 'http_dir': http_dir})
shutil.rmtree(http_dir, ignore_errors=True)
if 'swift' in cleanup:
swift_api = swift.SwiftAPI()
container = CONF.redfish.swift_container
LOG.debug('For node %(node)s cleaning up files from Swift container '
'%(container)s.',
{'node': node.uuid, 'container': container})
_, objects = swift_api.connection.get_container(container)
for o in objects:
name = o.get('name')
if name and name.startswith(node.uuid):
try:
swift_api.delete_object(container, name)
except exception.SwiftOperationError as error:
LOG.warning('For node %(node)s failed to clean up '
'%(object)s. Error: %(error)s',
{'node': node.uuid, 'object': name,
'error': error})
|
9452f475137fe8d7cbcad999a67b96554872b28c
|
b728c792b5171f6be6ad91919b4a76a6f198b3e9
|
/src/lib/python/bundy/util/cio/__init__.py
|
935160a7ee10e012622b3d37df1632e9e38d31a6
|
[
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"BSL-1.0"
] |
permissive
|
bundy-dns/bundy
|
c8beeca2c051924590794c92a3a58d1980a86024
|
3d41934996b82b0cd2fe22dd74d2abc1daba835d
|
refs/heads/master
| 2021-09-28T16:24:39.037808
| 2021-09-22T06:04:17
| 2021-09-22T06:04:17
| 19,160,469
| 110
| 33
|
NOASSERTION
| 2021-09-22T06:04:18
| 2014-04-25T20:54:37
|
C++
|
UTF-8
|
Python
| false
| false
| 88
|
py
|
__init__.py
|
"""
Here are function and classes for forwarding socket sessions between processes.
"""
|
f3061407c826cdc3a18800d76611a4094caa6dac
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/recorder/table_managers/recorder_runs.py
|
455c8375b1cf043b7a24f687fcbff94f30295ed2
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 5,031
|
py
|
recorder_runs.py
|
"""Track recorder run history."""
from __future__ import annotations
import bisect
from dataclasses import dataclass
from datetime import datetime
from sqlalchemy.orm.session import Session
import homeassistant.util.dt as dt_util
from ..db_schema import RecorderRuns
from ..models import process_timestamp
def _find_recorder_run_for_start_time(
run_history: _RecorderRunsHistory, start: datetime
) -> RecorderRuns | None:
"""Find the recorder run for a start time in _RecorderRunsHistory."""
run_timestamps = run_history.run_timestamps
runs_by_timestamp = run_history.runs_by_timestamp
# bisect_left tells us were we would insert
# a value in the list of runs after the start timestamp.
#
# The run before that (idx-1) is when the run started
#
# If idx is 0, history never ran before the start timestamp
#
if idx := bisect.bisect_left(run_timestamps, start.timestamp()):
return runs_by_timestamp[run_timestamps[idx - 1]]
return None
@dataclass(frozen=True)
class _RecorderRunsHistory:
"""Bisectable history of RecorderRuns."""
run_timestamps: list[int]
runs_by_timestamp: dict[int, RecorderRuns]
class RecorderRunsManager:
"""Track recorder run history."""
def __init__(self) -> None:
"""Track recorder run history."""
self._recording_start = dt_util.utcnow()
self._current_run_info: RecorderRuns | None = None
self._run_history = _RecorderRunsHistory([], {})
@property
def recording_start(self) -> datetime:
"""Return the time the recorder started recording states."""
return self._recording_start
@property
def first(self) -> RecorderRuns:
"""Get the first run."""
if runs_by_timestamp := self._run_history.runs_by_timestamp:
return next(iter(runs_by_timestamp.values()))
return self.current
@property
def current(self) -> RecorderRuns:
"""Get the current run."""
# If start has not been called yet because the recorder is
# still starting up we want history to use the current time
# as the created time to ensure we can still return results
# and we do not try to pull data from the previous run.
return self._current_run_info or RecorderRuns(
start=self.recording_start, created=dt_util.utcnow()
)
@property
def active(self) -> bool:
"""Return if a run is active."""
return self._current_run_info is not None
def get(self, start: datetime) -> RecorderRuns | None:
"""Return the recorder run that started before or at start.
If the first run started after the start, return None
"""
if start >= self.recording_start:
return self.current
return _find_recorder_run_for_start_time(self._run_history, start)
def start(self, session: Session) -> None:
"""Start a new run.
Must run in the recorder thread.
"""
self._current_run_info = RecorderRuns(
start=self.recording_start, created=dt_util.utcnow()
)
session.add(self._current_run_info)
session.flush()
session.expunge(self._current_run_info)
self.load_from_db(session)
def reset(self) -> None:
"""Reset the run when the database is changed or fails.
Must run in the recorder thread.
"""
self._recording_start = dt_util.utcnow()
self._current_run_info = None
def end(self, session: Session) -> None:
"""End the current run.
Must run in the recorder thread.
"""
assert self._current_run_info is not None
self._current_run_info.end = dt_util.utcnow()
session.add(self._current_run_info)
def load_from_db(self, session: Session) -> None:
"""Update the run cache.
Must run in the recorder thread.
"""
run_timestamps: list[int] = []
runs_by_timestamp: dict[int, RecorderRuns] = {}
for run in session.query(RecorderRuns).order_by(RecorderRuns.start.asc()).all():
session.expunge(run)
if run_dt := process_timestamp(run.start):
# Not sure if this is correct or runs_by_timestamp annotation should be changed
timestamp = int(run_dt.timestamp())
run_timestamps.append(timestamp)
runs_by_timestamp[timestamp] = run
#
# self._run_history is accessed in get()
# which is allowed to be called from any thread
#
# We use a dataclass to ensure that when we update
# run_timestamps and runs_by_timestamp
# are never out of sync with each other.
#
self._run_history = _RecorderRunsHistory(run_timestamps, runs_by_timestamp)
def clear(self) -> None:
"""Clear the current run after ending it.
Must run in the recorder thread.
"""
if self._current_run_info:
self._current_run_info = None
|
d59be39c9409831bc132f879d3b880b3339c1805
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/olap/doris/extension/dbt-doris/dbt/include/doris/profile_template.yml
|
1e62e6de94a9dd238107cb8926df19d8b76544f5
|
[
"OpenSSL",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-facebook-patent-rights-2",
"PSF-2.0",
"dtoa",
"MIT",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 1,386
|
yml
|
profile_template.yml
|
#!/usr/bin/env python
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
fixed:
type: doris
prompts:
host:
hint: 'hostname for your instance(your doris fe host)'
port:
default: 9030
type: 'int'
hint: 'port for your instance(your doris fe query_port)'
schema:
default: 'dbt'
hint: 'the schema name as stored in the database,doris have not schema to make a collection of table or view'
username:
hint: 'your doris username'
password:
hint: 'your doris password, if no password, just Enter'
hide_input: true
default: ''
threads:
hint: "1 or more"
type: "int"
default: 1
|
11b374bff1a583893023a8bb12380f3db840e151
|
fbe68d84e97262d6d26dd65c704a7b50af2b3943
|
/third_party/virtualbox/src/VBox/ValidationKit/testboxscript/win/fix_stale_refs.py
|
5a17161df45401e78cbd3d68d771ae8fce9483b7
|
[
"MIT",
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.0",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"MPL-1.0",
"LicenseRef-scancode-generic-exception",
"Apache-2.0",
"OpenSSL"
] |
permissive
|
thalium/icebox
|
c4e6573f2b4f0973b6c7bb0bf068fe9e795fdcfb
|
6f78952d58da52ea4f0e55b2ab297f28e80c1160
|
refs/heads/master
| 2022-08-14T00:19:36.984579
| 2022-02-22T13:10:31
| 2022-02-22T13:10:31
| 190,019,914
| 585
| 109
|
MIT
| 2022-01-13T20:58:15
| 2019-06-03T14:18:12
|
C++
|
UTF-8
|
Python
| false
| false
| 6,098
|
py
|
fix_stale_refs.py
|
# -*- coding: utf-8 -*-
# $Id: fix_stale_refs.py $
"""
This module must be used interactively!
Use with caution as it will delete some values from the regisry!
It tries to locate client references to products that no longer exist.
"""
__copyright__ = \
"""
Copyright (C) 2012-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 118781 $"
from _winreg import HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS
from _winreg import OpenKey, CloseKey, EnumKey, QueryInfoKey, EnumValue, DeleteValue, QueryValueEx
from distutils.util import strtobool
def reverse_bytes(hex_string):
"""
This function reverses the order of bytes in the provided string.
Each byte is represented by two characters which are reversed as well.
"""
#print 'reverse_bytes(' + hex_string + ')'
chars = len(hex_string)
if chars > 2:
return reverse_bytes(hex_string[chars/2:]) + reverse_bytes(hex_string[:chars/2])
else:
return hex_string[1] + hex_string[0]
def transpose_guid(guid):
"""
Windows Installer uses different way to present GUID string. This function converts GUID
from installer's presentation to more conventional form.
"""
return '{' + reverse_bytes(guid[0:8]) + '-' + reverse_bytes(guid[8:12]) + \
'-' + reverse_bytes(guid[12:16]) + \
'-' + reverse_bytes(guid[16:18]) + reverse_bytes(guid[18:20]) + \
'-' + ''.join([reverse_bytes(guid[i:i+2]) for i in range(20, 32, 2)]) + '}'
PRODUCTS_KEY = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Installer\UserData\S-1-5-18\Products'
COMPONENTS_KEY = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Installer\UserData\S-1-5-18\Components'
def get_installed_products():
"""
Enumerate all installed products.
"""
products = {}
hkey_products = OpenKey(HKEY_LOCAL_MACHINE, PRODUCTS_KEY, 0, KEY_ALL_ACCESS)
try:
product_index = 0
while True:
product_guid = EnumKey(hkey_products, product_index)
hkey_product_properties = OpenKey(hkey_products, product_guid + r'\InstallProperties', 0, KEY_ALL_ACCESS)
try:
value = QueryValueEx(hkey_product_properties, 'DisplayName')[0]
except WindowsError, exception:
if exception.winerror != 2:
raise
value = '<unknown>'
CloseKey(hkey_product_properties)
products[product_guid] = value
product_index += 1
except WindowsError, exceptione:
if exceptione.winerror != 259:
print exceptione.strerror + '.', 'error', exceptione.winerror
CloseKey(hkey_products)
print 'Installed products:'
for product_key in sorted(products.keys()):
print transpose_guid(product_key), '=', products[product_key]
print
return products
def get_missing_products(hkey_components):
"""
Detect references to missing products.
"""
products = get_installed_products()
missing_products = {}
for component_index in xrange(0, QueryInfoKey(hkey_components)[0]):
component_guid = EnumKey(hkey_components, component_index)
hkey_component = OpenKey(hkey_components, component_guid, 0, KEY_ALL_ACCESS)
clients = []
for value_index in xrange(0, QueryInfoKey(hkey_component)[1]):
client_guid, client_path = EnumValue(hkey_component, value_index)[:2]
clients.append((client_guid, client_path))
if not client_guid in products:
if client_guid in missing_products:
missing_products[client_guid].append((component_guid, client_path))
else:
missing_products[client_guid] = [(component_guid, client_path)]
CloseKey(hkey_component)
return missing_products
def main():
"""
Enumerate all installed products, go through all components and check if client refences
point to valid products. Remove references to non-existing products if the user allowed it.
"""
hkey_components = OpenKey(HKEY_LOCAL_MACHINE, COMPONENTS_KEY, 0, KEY_ALL_ACCESS)
missing_products = get_missing_products(hkey_components)
print 'Missing products refer the following components:'
for product_guid in sorted(missing_products.keys()):
if product_guid[1:] == '0'*31:
continue
print 'Product', transpose_guid(product_guid) + ':'
for component_guid, component_file in missing_products[product_guid]:
print ' ' + transpose_guid(component_guid), '=', component_file
print 'Remove all references to product', transpose_guid(product_guid) + '? [y/n]'
if strtobool(raw_input().lower()):
for component_guid, component_file in missing_products[product_guid]:
hkey_component = OpenKey(hkey_components, component_guid, 0, KEY_ALL_ACCESS)
print 'Removing reference in ' + transpose_guid(component_guid), '=', component_file
DeleteValue(hkey_component, product_guid)
CloseKey(hkey_component)
else:
print 'Cancelled removal of product', transpose_guid(product_guid)
CloseKey(hkey_components)
if __name__ == "__main__":
main()
|
951088bc16807a0a5243b515d7c5c7c854599ada
|
b06437eddb77e6d4fb6a4196295fce08845874b0
|
/packages/python-google-compute-engine/google_compute_engine/distro_lib/helpers.py
|
c20b24198587048ebb44419e18d13a9f6c793f71
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
GoogleCloudPlatform/compute-image-packages
|
0601abba643dd95d7ae730994bf92dbecae2a2c7
|
cf4b33214f770da2299923a5fa73d3d95f66ec35
|
refs/heads/master
| 2023-08-11T18:26:53.981415
| 2021-10-28T19:42:19
| 2021-10-28T19:42:19
| 11,852,493
| 329
| 220
|
Apache-2.0
| 2021-03-30T18:03:34
| 2013-08-02T20:52:04
|
Python
|
UTF-8
|
Python
| false
| false
| 5,922
|
py
|
helpers.py
|
#!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distro helpers."""
import os
import subprocess
import time
def CallDhclient(
interfaces, logger, dhclient_script=None):
"""Configure the network interfaces using dhclient.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
"""
logger.info('Enabling the Ethernet interfaces %s.', interfaces)
dhclient_command = ['dhclient']
if dhclient_script and os.path.exists(dhclient_script):
dhclient_command += ['-sf', dhclient_script]
try:
subprocess.check_call(dhclient_command + ['-x'] + interfaces)
subprocess.check_call(dhclient_command + interfaces)
except subprocess.CalledProcessError:
logger.warning('Could not enable interfaces %s.', interfaces)
def CallDhclientIpv6(interfaces, logger, dhclient_script=None,
release_lease=False):
"""Configure the network interfaces for IPv6 using dhclient.
Args:
interface: string, the output device names for enabling IPv6.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
release_lease: Release the IPv6 lease.
"""
logger.info('Calling Dhclient for IPv6 configuration '
'on the Ethernet interfaces %s.', interfaces)
timeout_command = ['timeout', '5']
dhclient_command = ['dhclient']
if release_lease:
try:
subprocess.check_call(
timeout_command + dhclient_command + [
'-6', '-r', '-v'] + interfaces)
except subprocess.CalledProcessError:
logger.warning('Could not release IPv6 lease on interface %s.',
interfaces)
return
# Check for a 'tentative' IPv6 address which would prevent `dhclient -6` from
# succeeding below. This should only take 1 second, but we try for up to 5.
command = ['ip', '-6', '-o', 'a', 's', 'dev', interfaces[0], 'scope',
'link', 'tentative']
for i in range(5):
output = ''
try:
output = subprocess.check_output(command)
except subprocess.CalledProcessError as e:
logger.warning('Could not confirm tentative IPv6 address: %s.', e.output)
if output:
logger.info('Found tentative ipv6 link address %s, sleeping 1 second.',
output.strip())
time.sleep(1)
else:
break
if dhclient_script and os.path.exists(dhclient_script):
dhclient_command += ['-sf', dhclient_script]
try:
subprocess.check_call(
timeout_command + dhclient_command + ['-1', '-6', '-v'] + interfaces)
except subprocess.CalledProcessError:
logger.warning('Could not enable IPv6 on interface %s.', interfaces)
def CallEnableRouteAdvertisements(interfaces, logger):
"""Enable route advertisements.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
"""
for interface in interfaces:
accept_ra = (
'net.ipv6.conf.{interface}.accept_ra_rt_info_max_plen'.format(
interface=interface))
CallSysctl(logger, accept_ra, 128)
def CallHwclock(logger):
"""Sync clock using hwclock.
Args:
logger: logger object, used to write to SysLog and serial port.
"""
command = ['/sbin/hwclock', '--hctosys']
try:
subprocess.check_call(command)
except subprocess.CalledProcessError:
logger.warning('Failed to sync system time with hardware clock.')
else:
logger.info('Synced system time with hardware clock.')
def CallNtpdate(logger):
"""Sync clock using ntpdate.
Args:
logger: logger object, used to write to SysLog and serial port.
"""
ntpd_inactive = subprocess.call(['service', 'ntpd', 'status'])
try:
if not ntpd_inactive:
subprocess.check_call(['service', 'ntpd', 'stop'])
subprocess.check_call(
'ntpdate `awk \'$1=="server" {print $2}\' /etc/ntp.conf`', shell=True)
if not ntpd_inactive:
subprocess.check_call(['service', 'ntpd', 'start'])
except subprocess.CalledProcessError:
logger.warning('Failed to sync system time with ntp server.')
else:
logger.info('Synced system time with ntp server.')
def CallSysctl(logger, name, value):
"""Write a variable using sysctl.
Args:
logger: logger object, used to write to SysLog and serial port.
name: string name of the sysctl variable.
value: value of the sysctl variable.
"""
logger.info('Configuring sysctl %s.', name)
sysctl_command = [
'sysctl', '-w', '{name}={value}'.format(name=name, value=value)]
try:
subprocess.check_call(sysctl_command)
except subprocess.CalledProcessError:
logger.warning('Unable to configure sysctl %s.', name)
def SystemctlRestart(service, logger):
"""Restart a service using systemctl.
Args:
service: the name of the service to restart.
logger: logger object, used to write to SysLog and serial port.
"""
logger.info('Restarting service via "systemctl restart %s".', service)
systemctl_command = ['systemctl', 'restart', service]
try:
subprocess.check_call(systemctl_command)
except subprocess.CalledProcessError:
logger.warning('Failed to restart service %s.', service)
|
992bfe4a9d23f9389807ee2fe97f124c465ffc9b
|
bd2cef65bfaad1d7a3b675c26d8fa9010611bb97
|
/core/friendly_organizer.py
|
6c3d4e8bf31faec892c2813281455b168e5527aa
|
[] |
no_license
|
nyu-mlab/iot-inspector-client
|
44310d94abe803cc1c8bc1fb5c524e83ddb71105
|
7eba33c62f74fdf842dbefbda3db56ba7f0eb135
|
refs/heads/master
| 2023-08-31T01:53:25.305641
| 2023-08-24T18:28:55
| 2023-08-24T18:28:55
| 158,865,806
| 101
| 24
| null | 2023-09-11T10:27:14
| 2018-11-23T18:23:07
|
Python
|
UTF-8
|
Python
| false
| false
| 9,735
|
py
|
friendly_organizer.py
|
"""
Add friendly names to entities.
"""
import core.model as model
import core.common as common
import core.global_state as global_state
import core.networking as networking
import core.config as config
import core.anonymization as anonymization
from core.oui_parser import get_vendor
from core.ttl_cache import ttl_cache
import os
import geoip2.database
import functools
import tldextract
import json
ip_country_parser = geoip2.database.Reader(
os.path.join(
common.get_python_code_directory(), '..', 'data', 'maxmind-country.mmdb'
)
)
tracker_directory = os.path.join(
common.get_python_code_directory(), '..', 'data'
)
# Source: https://github.com/duckduckgo/tracker-blocklists/tree/main
tracker_json_list = [
os.path.join(tracker_directory, 'tds.json'),
os.path.join(tracker_directory, 'apple-tds.json'),
os.path.join(tracker_directory, 'android-tds.json')
]
def add_product_info_to_devices():
updated_row_count = 0
# Find all distinct MAC addresses for which the is_inspected field is 1
with model.db:
q = model.Device.select(model.Device.mac_addr) \
.group_by(model.Device.mac_addr) \
.where(model.Device.is_inspected == 1)
mac_addr_list = [device.mac_addr for device in q]
# For each MAC address, find the corresponding product name
inferred_product_name_dict = dict()
for mac_addr in mac_addr_list:
friendly_names = []
product_name = infer_product_name(mac_addr)
oui_vendor = get_vendor(mac_addr)
if product_name:
friendly_names.append(product_name.split('/')[-1])
if oui_vendor:
friendly_names.append(oui_vendor)
if not friendly_names:
continue
inferred_product_name_dict[mac_addr] = ' / '.join(friendly_names)
# Update the database with the inferred product names into the `friendly_product` field
with model.write_lock:
with model.db:
for mac_addr, product_name in inferred_product_name_dict.items():
row_count = model.Device.update(
friendly_product=product_name
).where(model.Device.mac_addr == mac_addr
).execute()
updated_row_count += row_count
common.log(f'[Friendly Organizer] Updated {updated_row_count} rows of product info.')
def infer_product_name(device_mac_addr: str) -> str:
# Ask NYU server, but first we make sure that we're donating data
if config.get('donation_start_ts', 0) == 0:
return ''
# Also we make sure that the user_key has been set
user_key = config.get('user_key', '')
if not user_key:
return ''
# Anonymize the MAC address
device_id = anonymization.get_device_id(device_mac_addr)
# Send an HTTP GET request to the NYU server and ask
url = global_state.DEVICE_INSIGHTS_URL + f'/{user_key}/{device_id}'
try:
return common.http_request(
method='get',
field_to_extract='product_name',
args=[url],
kwargs=dict(timeout=10)
)
except IOError:
return ''
@ttl_cache(maxsize=8192, ttl=15)
def get_hostname_from_ip_addr(ip_addr: str, in_memory_only=False) -> str:
"""
Returns the hostname associated with an IP address.
Returns an empty string if the hostname is not found.
"""
if networking.is_private_ip_addr(ip_addr):
return '(local network)'
# Ask the in-memory cache
try:
with global_state.global_state_lock:
return global_state.hostname_dict[ip_addr]
except KeyError:
pass
if in_memory_only:
return ''
# Ask the database
try:
with model.db:
hostname = model.Hostname.get(model.Hostname.ip_addr == ip_addr).hostname
if hostname:
# Save the hostname value in memory
with global_state.global_state_lock:
global_state.hostname_dict[ip_addr] = hostname
return hostname
except model.Hostname.DoesNotExist:
pass
# Ask NYU server, but first we make sure that we're donating data
if config.get('donation_start_ts', 0) == 0:
return ''
# Also we make sure that the user_key has been set
user_key = config.get('user_key', '')
if not user_key:
return ''
# Send an HTTP GET request to the NYU server and ask
url = global_state.IP_INSIGHTS_URL + f'/{user_key}/{ip_addr}'
hostname = ''
try:
hostname = common.http_request(
method='get',
field_to_extract='hostname',
args=[url],
kwargs=dict(timeout=10)
)
except IOError:
pass
if not hostname:
return ''
# Remove trailing dots
if hostname.endswith('.'):
hostname = hostname[:-1]
# Add question mark to denoate uncertainty only if the hostname is not parenthesized
if '(' not in hostname and '?' not in hostname:
hostname += '?'
# Save the hostname value in memory
with global_state.global_state_lock:
global_state.hostname_dict[ip_addr] = hostname
return hostname
def add_hostname_info_to_flows():
"""
Adds hostname, reg_domain, and tracker_company to flows retroactively.
"""
updated_row_count = 0
for direction in ['src', 'dst']:
ip_addr_col = getattr(model.Flow, f'{direction}_ip_addr')
hostname_col = getattr(model.Flow, f'{direction}_hostname')
mac_addr_col = getattr(model.Flow, f'{direction}_device_mac_addr')
ip_addr_list = list()
# Find all distinct IP addresses for which the hostname field is empty
with model.db:
q = model.Flow.select(ip_addr_col) \
.group_by(ip_addr_col) \
.where((ip_addr_col != '') & (hostname_col == '') & (mac_addr_col == ''))
ip_addr_list = [getattr(flow, f'{direction}_ip_addr') for flow in q]
# For each IP address, find the corresponding hostname and update the
# reg_domain and tracker_company fields
for ip_addr in ip_addr_list:
# Find the hostname from various sources; could be a slow operation
hostname = get_hostname_from_ip_addr(ip_addr)
if not hostname:
continue
reg_domain = get_reg_domain(hostname)
tracker_company = get_tracker_company(reg_domain)
with model.write_lock:
with model.db:
row_count = model.Flow.update(
**{
f'{direction}_hostname': hostname,
f'{direction}_reg_domain': reg_domain,
f'{direction}_tracker_company': tracker_company
}
).where(
(ip_addr_col == ip_addr) &
(hostname_col == '') &
(mac_addr_col == '')
).execute()
updated_row_count += row_count
common.log(f'[Friendly Organizer] Updated {updated_row_count} rows of hostname info.')
@functools.lru_cache(maxsize=8192)
def get_country_from_ip_addr(remote_ip_addr):
"""Returns country for IP."""
if networking.is_private_ip_addr(remote_ip_addr):
return '(local network)'
try:
country = ip_country_parser.country(remote_ip_addr).country.name
if country:
return country
except Exception:
pass
return ''
def parse_tracking_json(json_contents):
block_list_dict = dict()
for domain, info in json_contents['trackers'].items():
tracker_company = info['owner']['displayName']
if tracker_company:
block_list_dict[domain] = tracker_company
return block_list_dict
@functools.lru_cache(maxsize=1)
def initialize_ad_tracking_db():
"""
Initializes the AdTracker table with the default list of trackers. Ran only once at startup.
"""
# If the AdTracker table is empty, initialize it with the default list
with model.db:
if model.AdTracker.select().count() > 0:
return
block_list_dict = dict()
# Load trackers from file; may be outdated -- TODO: Update these lists
# in future versions
for tracker_json_file in tracker_json_list:
with open(tracker_json_file, 'r') as f:
block_list_dict.update(parse_tracking_json(json.load(f)))
# Add trackers to database
for hostname, tracker_company in block_list_dict.items():
model.AdTracker.create(
hostname=hostname,
tracker_company=tracker_company
)
@functools.lru_cache(maxsize=8192)
def get_tracker_company(hostname: str) -> str:
"""
Returns the tracker company for a given hostname; if not a tracking company, returns an empty string
"""
initialize_ad_tracking_db()
uncertain = '?' in hostname
hostname = hostname.replace('?', '')
try:
company = model.AdTracker.get(model.AdTracker.hostname == hostname).tracker_company
except model.AdTracker.DoesNotExist:
return ''
else:
if uncertain:
company += '?'
return company
@functools.lru_cache(maxsize=8192)
def get_reg_domain(full_domain):
if not full_domain:
return ''
if full_domain == '(local network)':
return full_domain
reg_domain = tldextract.extract(full_domain.replace('?', '')) \
.registered_domain
if reg_domain:
if '?' in full_domain:
reg_domain += '?'
return reg_domain
return full_domain
|
889b61372e44b3f3e131f6b5aae8f7d1690ff62d
|
cde096ba977b63becc1b9066677331ef4594a797
|
/csfieldguide/tests/chapters/loaders/test_chapter_sections_loader.py
|
cff76b39715de194d335e1845d67c0919ce7b093
|
[
"CC-BY-NC-SA-4.0",
"BSD-3-Clause",
"CC0-1.0",
"ISC",
"Unlicense",
"LicenseRef-scancode-secret-labs-2011",
"WTFPL",
"Apache-2.0",
"LGPL-3.0-only",
"MIT",
"CC-BY-SA-4.0",
"LicenseRef-scancode-public-domain",
"CC-BY-NC-2.5",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
uccser/cs-field-guide
|
655524b161fab0ab422679dd80720f660f2cfa98
|
ea3281ec6f4d17538f6d3cf6f88d74fa54581b34
|
refs/heads/develop
| 2023-08-28T14:33:58.789843
| 2023-08-28T08:24:03
| 2023-08-28T08:24:03
| 34,356,619
| 364
| 97
|
MIT
| 2023-09-14T08:58:55
| 2015-04-21T23:00:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 11,744
|
py
|
test_chapter_sections_loader.py
|
import os.path
from unittest import mock
from tests.BaseTestWithDB import BaseTestWithDB
from tests.chapters.ChaptersTestDataGenerator import ChaptersTestDataGenerator
from tests.interactives.InteractivesTestDataGenerator import InteractivesTestDataGenerator
from chapters.management.commands._ChapterSectionsLoader import ChapterSectionsLoader
from chapters.models import Chapter, ChapterSection
from utils.errors.MissingRequiredFieldError import MissingRequiredFieldError
from utils.errors.NoHeadingFoundInMarkdownFileError import NoHeadingFoundInMarkdownFileError
from utils.errors.InvalidYAMLValueError import InvalidYAMLValueError
from utils.errors.KeyNotFoundError import KeyNotFoundError
class ChapterSectionsLoaderTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_data = ChaptersTestDataGenerator()
self.interactives_test_data = InteractivesTestDataGenerator()
self.loader_name = "chapter-sections"
self.base_path = os.path.join(self.test_data.LOADER_ASSET_PATH, self.loader_name)
def test_chapters_chapter_section_loader_single_section(self):
test_slug = "single-section"
chapter = self.test_data.create_chapter("1")
factory = mock.Mock()
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
chapter_section_loader.load()
self.assertQuerysetEqual(
ChapterSection.objects.all(),
["<ChapterSection: This is the section heading>"],
transform=repr
)
def test_chapters_chapter_section_loader_multiple_sections(self):
test_slug = "multiple-sections"
chapter = self.test_data.create_chapter("1")
factory = mock.Mock()
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
chapter_section_loader.load()
self.assertQuerysetEqual(
ChapterSection.objects.all(),
[
"<ChapterSection: This is the first section>",
"<ChapterSection: This is the second section>"
],
transform=repr
)
def test_chapters_chapter_section_loader_missing_section_data(self):
test_slug = "missing-section-data"
chapter = self.test_data.create_chapter("1")
factory = mock.Mock()
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
self.assertRaises(
MissingRequiredFieldError,
chapter_section_loader.load
)
def test_chapters_chapter_section_loader_missing_section_number(self):
test_slug = "missing-section-number"
chapter = self.test_data.create_chapter("1")
factory = mock.Mock()
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
self.assertRaises(
MissingRequiredFieldError,
chapter_section_loader.load
)
def test_chapters_chapter_section_loader_invalid_section_number(self):
test_slug = "invalid-section-number"
chapter = self.test_data.create_chapter("1")
factory = mock.Mock()
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
self.assertRaises(
InvalidYAMLValueError,
chapter_section_loader.load
)
def test_chapters_chapter_section_loader_duplicate_section_numbers(self):
test_slug = "duplicate-section-numbers"
chapter = self.test_data.create_chapter("1")
factory = mock.Mock()
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
self.assertRaises(
InvalidYAMLValueError,
chapter_section_loader.load
)
def test_chapters_chapter_section_loader_missing_name(self):
test_slug = "missing-name"
chapter = self.test_data.create_chapter("1")
factory = mock.Mock()
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
self.assertRaises(
NoHeadingFoundInMarkdownFileError,
chapter_section_loader.load
)
def test_chapters_chapter_section_loader_interactive(self):
test_slug = "interactives"
chapter = self.test_data.create_chapter("1")
interactive1 = self.interactives_test_data.create_interactive(1)
interactive2 = self.interactives_test_data.create_interactive(2)
interactive3 = self.interactives_test_data.create_interactive(3)
factory = mock.Mock()
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
chapter_section_loader.load()
self.assertQuerysetEqual(
ChapterSection.objects.all(),
["<ChapterSection: Interactives>"],
transform=repr
)
self.assertEqual(
list(Chapter.objects.get(slug=chapter.slug).interactives.order_by("slug")),
[
interactive1,
interactive2,
interactive3,
],
)
def test_chapters_chapter_section_loader_interactive_invalid(self):
test_slug = "invalid-interactive"
chapter = self.test_data.create_chapter("1")
factory = mock.Mock()
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
self.assertRaises(
KeyNotFoundError,
chapter_section_loader.load
)
def test_chapters_chapter_section_loader_non_sequential_section_number(self):
test_slug = "non-sequential-section-numbers"
chapter = self.test_data.create_chapter("1")
factory = mock.Mock()
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
self.assertRaises(
InvalidYAMLValueError,
chapter_section_loader.load
)
def test_chapters_chapter_section_loader_added_section(self):
test_slug = "single-section"
chapter = self.test_data.create_chapter("1")
factory = mock.Mock()
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
chapter_section_loader.load()
self.assertQuerysetEqual(
ChapterSection.objects.all(),
["<ChapterSection: This is the section heading>"],
transform=repr
)
# Now add the section once the previous one is in the database
test_slug = "added-section"
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
chapter_section_loader.load()
self.assertQuerysetEqual(
ChapterSection.objects.all(),
[
"<ChapterSection: This is the section heading>",
"<ChapterSection: This is the added section heading>"
],
transform=repr
)
def test_chapters_chapter_section_loader_insert_middle_section(self):
test_slug = "multiple-sections"
chapter = self.test_data.create_chapter("1")
factory = mock.Mock()
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
chapter_section_loader.load()
self.assertQuerysetEqual(
ChapterSection.objects.all(),
[
"<ChapterSection: This is the first section>",
"<ChapterSection: This is the second section>"
],
transform=repr
)
# Now add the section to the middle now that the previous
# ones are in the database
test_slug = "middle-section"
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
chapter_section_loader.load()
self.assertQuerysetEqual(
ChapterSection.objects.all(),
[
"<ChapterSection: This is the first section>",
"<ChapterSection: This is the middle section heading>",
"<ChapterSection: This is the second section>"
],
transform=repr
)
def test_chapters_chapter_section_loader_delete_middle_section(self):
test_slug = "middle-section"
chapter = self.test_data.create_chapter("1")
factory = mock.Mock()
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
chapter_section_loader.load()
self.assertQuerysetEqual(
ChapterSection.objects.all(),
[
"<ChapterSection: This is the first section>",
"<ChapterSection: This is the middle section heading>",
"<ChapterSection: This is the second section>"
],
transform=repr
)
# Delete the middle section from the database
test_slug = "multiple-sections"
chapter_section_loader = ChapterSectionsLoader(
factory,
chapter,
base_path=self.base_path,
content_path=test_slug,
structure_filename="{}.yaml".format(test_slug),
)
chapter_section_loader.load()
self.assertQuerysetEqual(
ChapterSection.objects.all(),
[
"<ChapterSection: This is the first section>",
"<ChapterSection: This is the second section>"
],
transform=repr
)
|
eb1cb8f1cf6a06ecc37ceaca94c160ae830a3e81
|
21800ee29401e359877efa834dee08fe175cfd06
|
/panostretch.py
|
97f5efd813819cc537e497e402b3298cc02d2fd9
|
[
"MIT"
] |
permissive
|
zouchuhang/LayoutNetv2
|
7050fdcf899d54a85102cd8d1851fa47c9d1677b
|
74002f8d4535534f24d0ac44520d66b0283c0bad
|
refs/heads/master
| 2021-11-24T02:43:22.587330
| 2021-11-18T19:16:45
| 2021-11-18T19:16:45
| 199,546,402
| 207
| 36
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,044
|
py
|
panostretch.py
|
import functools
import numpy as np
from scipy.ndimage import map_coordinates
def uv_meshgrid(w, h):
uv = np.stack(np.meshgrid(range(w), range(h)), axis=-1)
uv = uv.astype(np.float64)
uv[..., 0] = ((uv[..., 0] + 0.5) / w - 0.5) * 2 * np.pi
uv[..., 1] = ((uv[..., 1] + 0.5) / h - 0.5) * np.pi
return uv
@functools.lru_cache()
def _uv_tri(w, h):
uv = uv_meshgrid(w, h)
sin_u = np.sin(uv[..., 0])
cos_u = np.cos(uv[..., 0])
tan_v = np.tan(uv[..., 1])
return sin_u, cos_u, tan_v
def uv_tri(w, h):
sin_u, cos_u, tan_v = _uv_tri(w, h)
return sin_u.copy(), cos_u.copy(), tan_v.copy()
def coorx2u(x, w=1024):
return ((x + 0.5) / w - 0.5) * 2 * np.pi
def coory2v(y, h=512):
return ((y + 0.5) / h - 0.5) * np.pi
def u2coorx(u, w=1024):
return (u / (2 * np.pi) + 0.5) * w - 0.5
def v2coory(v, h=512):
return (v / np.pi + 0.5) * h - 0.5
def uv2xy(u, v, z=-50):
c = z / np.tan(v)
x = c * np.cos(u)
y = c * np.sin(u)
return x, y
def pano_connect_points(p1, p2, z=-50, w=1024, h=512):
if p1[0] == p2[0]:
return np.array([p1, p2], np.float32)
u1 = coorx2u(p1[0], w)
v1 = coory2v(p1[1], h)
u2 = coorx2u(p2[0], w)
v2 = coory2v(p2[1], h)
x1, y1 = uv2xy(u1, v1, z)
x2, y2 = uv2xy(u2, v2, z)
if abs(p1[0] - p2[0]) < w / 2:
pstart = np.ceil(min(p1[0], p2[0]))
pend = np.floor(max(p1[0], p2[0]))
else:
pstart = np.ceil(max(p1[0], p2[0]))
pend = np.floor(min(p1[0], p2[0]) + w)
coorxs = (np.arange(pstart, pend + 1) % w).astype(np.float64)
vx = x2 - x1
vy = y2 - y1
us = coorx2u(coorxs, w)
ps = (np.tan(us) * x1 - y1) / (vy - np.tan(us) * vx)
cs = np.sqrt((x1 + ps * vx) ** 2 + (y1 + ps * vy) ** 2)
vs = np.arctan2(z, cs)
coorys = v2coory(vs)
return np.stack([coorxs, coorys], axis=-1)
def pano_stretch(img, mask, corners, kx, ky, order=1):
'''
img: [H, W, C]
corners: [N, 2] in image coordinate (x, y) format
kx: Stretching along front-back direction
ky: Stretching along left-right direction
order: Interpolation order. 0 for nearest-neighbor. 1 for bilinear.
'''
# Process image
sin_u, cos_u, tan_v = uv_tri(img.shape[1], img.shape[0])
u0 = np.arctan2(sin_u * kx / ky, cos_u)
v0 = np.arctan(tan_v * np.sin(u0) / sin_u * ky)
refx = (u0 / (2 * np.pi) + 0.5) * img.shape[1] - 0.5
refy = (v0 / np.pi + 0.5) * img.shape[0] - 0.5
# [TODO]: using opencv remap could probably speedup the process a little
stretched_img = np.stack([
map_coordinates(img[..., i], [refy, refx], order=order, mode='wrap')
for i in range(img.shape[-1])
], axis=-1)
stretched_mask = np.stack([
map_coordinates(mask[..., i], [refy, refx], order=order, mode='wrap')
for i in range(mask.shape[-1])
], axis=-1)
#stretched_label = np.stack([
# map_coordinates(label[..., i], [refy, refx], order=order, mode='wrap')
# for i in range(label.shape[-1])
#], axis=-1)
# Process corners
corners_u0 = coorx2u(corners[:, 0], img.shape[1])
corners_v0 = coory2v(corners[:, 1], img.shape[0])
corners_u = np.arctan2(np.sin(corners_u0) * ky / kx, np.cos(corners_u0))
corners_v = np.arctan(np.tan(corners_v0) * np.sin(corners_u) / np.sin(corners_u0) / ky)
cornersX = u2coorx(corners_u, img.shape[1])
cornersY = v2coory(corners_v, img.shape[0])
stretched_corners = np.stack([cornersX, cornersY], axis=-1)
return stretched_img, stretched_mask, stretched_corners
def visualize_pano_stretch(stretched_img, stretched_cor, title):
'''
Helper function for visualizing the effect of pano_stretch
'''
thikness = 2
color = (0, 255, 0)
for i in range(4):
xys = pano_connect_points(stretched_cor[i*2], stretched_cor[(i*2+2) % 8], z=-50)
xys = xys.astype(int)
blue_split = np.where((xys[1:, 0] - xys[:-1, 0]) < 0)[0]
if len(blue_split) == 0:
cv2.polylines(stretched_img, [xys], False, color, 2)
else:
t = blue_split[0] + 1
cv2.polylines(stretched_img, [xys[:t]], False, color, thikness)
cv2.polylines(stretched_img, [xys[t:]], False, color, thikness)
for i in range(4):
xys = pano_connect_points(stretched_cor[i*2+1], stretched_cor[(i*2+3) % 8], z=50)
xys = xys.astype(int)
blue_split = np.where((xys[1:, 0] - xys[:-1, 0]) < 0)[0]
if len(blue_split) == 0:
cv2.polylines(stretched_img, [xys], False, color, 2)
else:
t = blue_split[0] + 1
cv2.polylines(stretched_img, [xys[:t]], False, color, thikness)
cv2.polylines(stretched_img, [xys[t:]], False, color, thikness)
cv2.putText(stretched_img, title, (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 0, 0), 2, cv2.LINE_AA)
return stretched_img.astype(np.uint8)
if __name__ == '__main__':
import argparse
import time
from PIL import Image
import cv2
parser = argparse.ArgumentParser()
parser.add_argument('--i', default='data/valid/img/pano_abpohapclcyuuz.png')
parser.add_argument('--i_gt', default='data/valid/label_cor/pano_abpohapclcyuuz.txt')
parser.add_argument('--o', default='sample_stretched_pano.png')
parser.add_argument('--kx', default=2, type=float,
help='Stretching along front-back direction')
parser.add_argument('--ky', default=1, type=float,
help='Stretching along left-right direction')
args = parser.parse_args()
img = np.array(Image.open(args.i), np.float64)
with open(args.i_gt) as f:
cor = np.array([line.strip().split() for line in f], np.int32)
stretched_img, stretched_cor = pano_stretch(img, cor, args.kx, args.ky)
title = 'kx=%3.2f, ky=%3.2f' % (args.kx, args.ky)
visual_stretched_img = visualize_pano_stretch(stretched_img, stretched_cor, title)
Image.fromarray(visual_stretched_img).save(args.o)
|
2c1d1c66690c635b6808855b42e743f8889ded3f
|
50dd46b8ece33f3cdd174284b15d1d51f89669d4
|
/2018/finals/re-drm/validator/hash_asparagus.py
|
4a1d95d154648ea47f80e169fcd9dfd0f2a67bbd
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
google/google-ctf
|
f99da1ee07729bbccb869fff1cbaed6a80e43bcc
|
df02323eaf945d15e124801c74abaadca2749dc7
|
refs/heads/master
| 2023-08-31T14:30:27.548081
| 2023-08-29T13:04:20
| 2023-08-29T13:04:20
| 131,317,137
| 4,136
| 607
|
Apache-2.0
| 2023-08-30T22:17:02
| 2018-04-27T15:56:03
|
Go
|
UTF-8
|
Python
| false
| false
| 3,373
|
py
|
hash_asparagus.py
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Ian Eldred Pudney"
import hashlib
import subprocess
import sys
import threading
import re
import time
def clean_string(string):
"""Removes ANSI escape sequences and non-alphanumeric chars, and converts to lowercase."""
string = re.sub(r'\x1B\[[0-?]*[ -/]*[@-~]', "", string)
ret = []
for c in string:
if c.isalnum():
ret.append(c)
return "".join(ret).lower()
def run_command(command, channel, enable_logging, wait_for="$> "):
if enable_logging:
sys.stdout.write(command)
try:
channel.stdin.write(command)
except:
pass # avoid broken pipes
buf = []
while True:
c = channel.stdout.read(1)
if enable_logging:
sys.stdout.write(c)
if len(c) == 0:
break
buf.append(c)
if len(buf) >= len(wait_for) and "".join(buf[-len(wait_for):]) == wait_for:
break
result = "".join(buf)
return result
def shash(data):
return hashlib.sha256(data).hexdigest()
def run_asparagus(path, serial, infile, outfile, enable_logging, expected_outfile=None):
"""Run the ASPARAGUS binary at the specified path, with input.
Sends input from the specified file to ASPARAGUS, once per line.
Writes the hash of each line's result to the specified file.
If enable_logging is true, shows the data transmitted in the
terminal. Finally, returns the hash of the whole output."""
process = subprocess.Popen([path], executable=path, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
outhashes = ""
result = run_command("", process, enable_logging, wait_for=": ")
h = shash(clean_string(result))
outfile.write(h + "\n")
outhashes += h + "\n"
if expected_outfile is not None:
expected_line = expected_outfile.readline()
if expected_line[-1] == "\n":
expected_line = expected_line[0:-1]
if expected_line != shash(h): # double-hashed to prevent reverse-engineering
print "Got wrong pre-serial output: " + h[0:8] + "/" + shash(h)[0:8] + "/" + expected_line[0:8]
return
for line in infile:
if line[-1] != "\n":
line = line + "\n"
line = line.replace("__SERIAL__", serial)
line = line.replace("__PROGRAM_NAME__", path)
result = run_command(line, process, enable_logging)
h = shash(clean_string(result))
outfile.write(h + "\n")
outhashes += h + "\n"
if expected_outfile is not None:
expected_line = expected_outfile.readline()
if expected_line[-1] == "\n":
expected_line = expected_line[0:-1]
if expected_line != shash(h): # double-hashed to prevent reverse-engineering
print "Got wrong output for command '" + line[0:-1] + "': " + h[0:8] + "/" + shash(h)[0:8] + "/" + expected_line[0:8]
return
if not result:
break
process.wait()
return shash(outhashes)
|
2b72900f1815d99fe13e9eee61e2751c34c8b0f8
|
82a7c9ae5392d847df4b01ae864c81bd14caab74
|
/users/models.py
|
c113aca796eab9b49c13bb32d533418f96f0e99c
|
[
"MIT"
] |
permissive
|
DjangoChinaOrg/Django-China-API
|
d4ab9be99d94d74f9f5f93620ea6d062d4fa18a6
|
79a5d85fe88ba7784d08d370b8e7519f7274f208
|
refs/heads/dev
| 2021-07-23T17:37:59.471725
| 2018-11-08T09:55:03
| 2018-11-08T09:55:03
| 122,849,421
| 190
| 52
|
MIT
| 2018-10-13T12:18:09
| 2018-02-25T15:37:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,117
|
py
|
models.py
|
import os
from django.core.files.base import ContentFile
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.signals import user_logged_in
from django.db import models
from .mugshot import Avatar
from .utils import get_ip_address_from_request
def user_mugshot_path(instance, filename):
return os.path.join('mugshots', instance.username, filename)
from django.db import models
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFill
class User(AbstractUser):
"""
用户模型定义
"""
last_login_ip = models.GenericIPAddressField(
"最近一次登陆IP",
unpack_ipv4=True,
blank=True,
null=True
)
ip_joined = models.GenericIPAddressField("注册IP", unpack_ipv4=True, blank=True, null=True)
nickname = models.CharField("昵称", max_length=50, unique=True)
mugshot = models.ImageField("头像", upload_to=user_mugshot_path)
mugshot_thumbnail = ImageSpecField(source='mugshot',
processors=[ResizeToFill(100, 100)],
format='JPEG',
options={'quality': 60})
def __str__(self):
return self.username
def save(self, *args, **kwargs):
if not self.mugshot:
avatar = Avatar(rows=10, columns=10)
image_byte_array = avatar.get_image(
string=self.username,
width=480,
height=480,
pad=10
)
self.mugshot.save('default_mugshot.png', ContentFile(image_byte_array), save=False)
if not self.pk and not self.nickname:
# 自动将username存入到nickname域内
self.nickname = self.username
super(User, self).save(*args, **kwargs)
def update_last_login_ip(sender, user, request, **kwargs):
"""
更新用户最后一次登陆的IP地址
"""
ip = get_ip_address_from_request(request)
if ip:
user.last_login_ip = ip
user.save()
user_logged_in.connect(update_last_login_ip)
|
fca474ec43d169fa6aa9c699eb1bb64f42690802
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/Pipl/Scripts/CheckSender/CheckSender_test.py
|
3a5cfd69380084b10c5c8bf9a73615d32101664c
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,109
|
py
|
CheckSender_test.py
|
from CheckSender import get_sender_from_text, format_data
def test_get_sender_from_text():
"""
Given:
- Text with email address.
When:
- Running the get_sender_from_text function.
Then:
- Validating the sender email is correct.
"""
sender = get_sender_from_text('from: test1@gmail.com')
assert sender == 'test1@gmail.com'
def test_get_sender_from_text_no_address_found():
"""
Given:
- Text without email address.
When:
- Running the get_sender_from_text function.
Then:
- Validating the sender email is empty.
"""
sender = get_sender_from_text('from: test1gmail.com')
assert sender == ''
def test_format_data():
"""
Given:
- data output from pipl-search command.
When:
- Running the format_data function.
Then:
- Validating the outputs as expected.
"""
data = format_data([{'Account': {'IDs': '1,2,3', 'Addresses': ['test1@gmail.com', 'test2@gmail.com']}}])
assert data == [{'Account': 'IDs: 1,2,3\nAddresses: test1@gmail.com,\ntest2@gmail.com'}]
|
f0af48e1b30edb9d4be3bb0d8468f01a4144236d
|
59bb398c5f23770e4725f35f932f3a5fd013efae
|
/jwst/residual_fringe/residual_fringe_step.py
|
a422f3c69e37bb0d3085d5f84c064cea40fa83b5
|
[
"BSD-2-Clause"
] |
permissive
|
spacetelescope/jwst
|
9826d86781c6e01aced951882471f8b967fa1f6e
|
a4a0e8ad2b88249f01445ee1dcf175229c51033f
|
refs/heads/master
| 2023-09-04T09:54:04.504036
| 2023-08-31T20:19:27
| 2023-08-31T20:19:27
| 60,551,519
| 449
| 106
|
NOASSERTION
| 2023-09-14T21:21:33
| 2016-06-06T18:34:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,109
|
py
|
residual_fringe_step.py
|
#! /usr/bin/env python
from stdatamodels.jwst import datamodels
from ..stpipe import Step
from . import residual_fringe
from functools import partial
__all__ = ["ResidualFringeStep"]
class ResidualFringeStep(Step):
"""
ResidualFringeStep: Apply residual fringe correction to a science image
using parameters in the residual fringe reference file.
Parameters
----------
input_data : asn file or single file
"""
class_alias = 'residual_fringe'
spec = """
skip = boolean(default=True)
save_intermediate_results = boolean(default = False)
search_output_file = boolean(default = False)
ignore_region_min = list(default = None)
ignore_region_max = list(default = None)
suffix = string(default = 'residual_fringe')
"""
reference_file_types = ['fringefreq', 'regions']
def process(self, input):
self.transmission_level = 80 # sets the transmission level to use in the regions file
# 80% is what other steps use.
# set up the dictionary to ignore wavelength regions in the residual fringe correction
ignore_regions = {}
ignore_regions['num'] = 0
ignore_regions['min'] = []
ignore_regions['max'] = []
if self.ignore_region_min is not None:
for region in self.ignore_region_min:
ignore_regions['min'].append(float(region))
min_num = len(ignore_regions['min'])
if self.ignore_region_max is not None:
for region in self.ignore_region_max:
ignore_regions['max'].append(float(region))
max_num = len(ignore_regions['max'])
if max_num != min_num:
self.log.error("Number of minimum and maximum wavelengths to ignore are not the same")
raise ValueError("Number of ignore_region_min does not match ignore_region_max")
ignore_regions['num'] = min_num
if min_num > 0:
self.log.info('Ignoring {} wavelength regions'.format(min_num))
self.ignore_regions = ignore_regions
input = datamodels.open(input)
if isinstance(input, datamodels.IFUImageModel):
exptype = input.meta.exposure.type
else:
raise TypeError("Failed to process file type {}".format(type(input)))
# Setup output path naming if associations are involved.
asn_id = None
try:
asn_id = self.input.meta.asn_table.asn_id
except (AttributeError, KeyError):
pass
if asn_id is None:
asn_id = self.search_attr('asn_id')
if asn_id is not None:
_make_output_path = self.search_attr(
'_make_output_path', parent_first=True
)
self._make_output_path = partial(
_make_output_path,
asn_id=asn_id
)
# Set up residual fringe correction parameters
pars = {
'transmission_level': self.transmission_level,
'save_intermediate_results': self.save_intermediate_results,
'make_output_path': self.make_output_path
}
if exptype != 'MIR_MRS':
self.log(" Residual Fringe correction is only for MIRI MRS data")
self.log.error("Unsupported ", f"exposure type: {exptype}")
input.meta.cal_step.residual_fringe = "SKIPPED"
return input
# 1. set up the reference files
# 2. correct the model
# 3. return from step
self.residual_fringe_filename = self.get_reference_file(input, 'fringefreq')
self.log.info('Using FRINGEFREQ reference file:{}'.
format(self.residual_fringe_filename))
# set up regions reference file
self.regions_filename = self.get_reference_file(input, 'regions')
self.log.info('Using MRS regions reference file: {}'.
format(self.regions_filename))
# Check for a valid reference files. If they are not found skip step
if self.residual_fringe_filename == 'N/A' or self.regions_filename == 'N/A':
if self.residual_fringe_filename == 'N/A':
self.log.warning('No FRINGEFREQ reference file found')
self.log.warning('Residual Fringe step will be skipped')
if self.regions_filename == 'N/A':
self.log.warning('No MRS regions reference file found')
self.log.warning('Residual Fringe step will be skipped')
input.meta.cal_step.residual_fringe = "SKIPPED"
return input
# Do the correction
rfc = residual_fringe.ResidualFringeCorrection(input,
self.residual_fringe_filename,
self.regions_filename,
self.ignore_regions,
**pars)
result = rfc.do_correction()
result.meta.cal_step.residual_fringe = 'COMPLETE'
return result
|
d5197bb74f7e6c581efc946fe09059d973f8d924
|
ccb4cb8358fb896a88bbf0c6771462d898d7a492
|
/skyfield/naifcodes.py
|
08de5dc64081da58a42d3fa53f19ae6461e418ad
|
[
"MIT"
] |
permissive
|
skyfielders/python-skyfield
|
a30d34a680dcd285bc8cd39cedc2629f792d5821
|
61fb6324e312715e20aa75ec24dc87286442be1a
|
refs/heads/master
| 2023-08-31T13:10:32.863587
| 2023-08-10T14:25:56
| 2023-08-10T14:25:56
| 7,924,113
| 1,040
| 204
|
MIT
| 2023-08-28T19:44:50
| 2013-01-30T21:19:21
|
Python
|
UTF-8
|
Python
| false
| false
| 797
|
py
|
naifcodes.py
|
from jplephem.names import (
target_name_pairs as code_name_pairs,
target_names as code_names
)
name_codes = dict((name, code) for code, name in code_name_pairs)
def numbered_name_of(code):
"""Given a code, return a string giving both the code and name.
>>> numbered_name_of(301)
'301 Moon'
"""
name = code_names.get(code, '(Unnamed)')
return '{0} {1}'.format(code, name)
def _target_name(target):
"""Return `target` annotated for display to the user.
* A string target is quoted to make clear that it's a string, like 'comet'.
* A numeric target has its NAIF name appended, like 399 EARTH.
"""
if isinstance(target, str):
return repr(target)
name = code_names.get(target, 'UNKNOWN')
return '{0} {1}'.format(target, name)
|
cbc6a7ff60b0e7ac1a96e61852c83966853c524c
|
07c27d72f10dbf64c11b6d6cae83d4dc757a2384
|
/tests/pbxsections/TestPBXCopyFilesBuildPhase.py
|
a683cf5c57f584a076e598cf5b5df86b8f37a851
|
[
"MIT"
] |
permissive
|
kronenthaler/mod-pbxproj
|
a481cded65a4f082338c1c8b290aa3382124b6ce
|
e0c42005cdcee3b10522ba5709f4228ec10c049c
|
refs/heads/master
| 2023-06-09T03:12:32.120613
| 2022-10-04T12:14:11
| 2022-10-04T12:14:11
| 8,949,989
| 1,089
| 278
|
MIT
| 2023-01-09T10:50:43
| 2013-03-22T10:50:15
|
Python
|
UTF-8
|
Python
| false
| false
| 446
|
py
|
TestPBXCopyFilesBuildPhase.py
|
import unittest
from pbxproj.pbxsections.PBXCopyFilesBuildPhase import PBXCopyFilesBuildPhase
class PBXCopyFilesBuildPhaseTest(unittest.TestCase):
def testGetComment(self):
phase = PBXCopyFilesBuildPhase()
self.assertEqual(phase._get_comment(), "CopyFiles")
def testGetCommentFromParent(self):
phase = PBXCopyFilesBuildPhase()
phase.name = "copy"
self.assertEqual(phase._get_comment(), "copy")
|
98692834263b40517c502ddef7617ae937e4a985
|
f7e0780b4d73ebf6e50fe4053c01fd3cc4d6b227
|
/auctioning_platform/auctions/auctions/application/repositories/__init__.py
|
21cd12bce44f5828c90ecac831edf6bfcf29fae8
|
[
"MIT"
] |
permissive
|
Enforcer/clean-architecture
|
78d663585f913c51a0460bcafa4af35515cdf549
|
f0c1c0a8364996d309e7381b44933807529200b1
|
refs/heads/master
| 2023-02-20T01:40:24.653512
| 2022-08-02T20:59:03
| 2022-08-02T20:59:03
| 208,138,785
| 454
| 51
|
MIT
| 2023-02-16T01:31:26
| 2019-09-12T20:16:08
|
Python
|
UTF-8
|
Python
| false
| false
| 108
|
py
|
__init__.py
|
__all__ = ["AuctionsRepository"]
from auctions.application.repositories.auctions import AuctionsRepository
|
e9f0ce4e2e6103bb7a28224181ede025518d2b3d
|
b3cde44e48d293d94b612a34506e03feb2ab7fcc
|
/src/python/fsqio/pants/tags/validate.py
|
8e40208ecdb863b87fa404fabc515720dbbed29e
|
[
"Apache-2.0"
] |
permissive
|
foursquare/fsqio
|
0cfd51e71d572f9f5682c03fdaba7c79ffff6785
|
f5a5699c18babaa6810de84bcc6dd1522bcfe259
|
refs/heads/master
| 2022-08-10T10:29:28.004139
| 2022-06-28T22:42:39
| 2022-06-28T22:42:39
| 48,192,426
| 267
| 66
|
Apache-2.0
| 2022-06-28T22:42:40
| 2015-12-17T18:53:44
|
Scala
|
UTF-8
|
Python
| false
| false
| 8,054
|
py
|
validate.py
|
# coding=utf-8
# Copyright 2013 Foursquare Labs Inc. All Rights Reserved.
from __future__ import absolute_import, division, print_function
from hashlib import sha1
import os
import textwrap
from pants.base.exceptions import TaskError
from pants.base.fingerprint_strategy import FingerprintStrategy
from pants.task.task import Task
class NameTagsAndDepFingerprintStrategy(FingerprintStrategy):
def compute_fingerprint(self, target):
hasher = sha1()
hasher.update(target.address.spec)
for tag in sorted(target.tags):
hasher.update(tag)
return hasher.hexdigest()
def __hash__(self):
return 1
def __eq__(self, other):
return isinstance(other, type(self))
class Tagger(Task):
@classmethod
def register_options(cls, register):
super(Tagger, cls).register_options(register)
register(
'--by-basename',
type=dict,
fingerprint=True,
default={},
advanced=True,
)
register(
'--by-prefix',
type=dict,
fingerprint=True,
default={},
advanced=True,
)
register(
'--by-tag',
type=dict,
fingerprint=True,
default={},
advanced=True,
)
@classmethod
def product_types(cls):
return ['tagged_build_graph']
def execute(self):
basenames = self.get_options().by_basename
prefixes = self.get_options().by_prefix
tags = self.get_options().by_tag
if prefixes or basenames or tags:
for target in self.context.targets():
this_basename = os.path.basename(target.address.spec_path)
target._tags |= set(basenames.get(this_basename, []))
for prefix, p_tags in prefixes.items():
if target.address.spec.startswith(prefix):
target._tags |= set(p_tags)
for tag, t_tags in tags.items():
if tag in target._tags:
target._tags |= set(t_tags)
class BuildGraphRuleViolation(object):
def __init__(self, target, dep, tag):
self.target = target
self.dep = dep
self.tag = tag
self.direct = dep in target.dependencies
class TagValidationError(TaskError):
pass
class BannedTag(BuildGraphRuleViolation):
def msg(self):
return '{} bans dependency on {} (via tag: {})'.format(
self.target.address.spec, self.dep.address.spec, self.tag)
class MissingTag(BuildGraphRuleViolation):
def msg(self):
return '{} requires dependencies to have tag {} and thus cannot depend on {}'.format(
self.target.address.spec, self.tag, self.dep.address.spec)
class MissingOneOfTag(BuildGraphRuleViolation):
def msg(self):
return '{} requires dependencies to have at least one tag from {} and thus cannot depend on {}'.format(
self.target.address.spec, self.tag, self.dep.address.spec)
class MustHaveViolation(BuildGraphRuleViolation):
def msg(self):
return '{} cannot depend on {} without having tag {}'.format(
self.target.address.spec, self.dep.address.spec, self.tag)
class MustHaveOneOfViolation(BuildGraphRuleViolation):
def msg(self):
return '{} cannot depend on {} without having one of the following tags {}'.format(
self.target.address.spec, self.dep.address.spec, self.tag)
class FSCommonViolation(BuildGraphRuleViolation):
def msg(self):
return textwrap.dedent("""{} cannot have '{}' tag because it is not in 'src/jvm/io/fsq/'
or 'src/jvm/com/foursquare/common'""".format(
self.target, self.tag))
class Validate(Task):
def __init__(self, *args, **kwargs):
super(Validate, self).__init__(*args, **kwargs)
self._transitive_closure_cache = {}
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data('tagged_build_graph')
@classmethod
def product_types(cls):
return ['validated_build_graph']
def execute(self):
if 'buildgen' in self.context.requested_goals:
return
violations = []
with self.invalidated(self.context.targets(),
invalidate_dependents=True,
fingerprint_strategy=NameTagsAndDepFingerprintStrategy(),
topological_order=True) as invalidation_check:
for vts in invalidation_check.invalid_vts:
invalid_targets = vts.targets
for target in invalid_targets:
if 'exempt' not in target.tags:
violations.extend(self.dependee_violations(target))
violations.extend(self.banned_tag_violations(target))
violations.extend(self.required_tag_violations(target))
violations.extend(self.fscommon_violations(target))
direct_violations = [v for v in violations if v.direct]
if direct_violations:
violations = direct_violations
for v in violations:
self.context.log.error(v.msg())
if violations:
raise TagValidationError('The graph validation failed, please check the failures above.')
def extract_matching_tags(self, prefix, target):
return {tag.split(':', 1)[1] for tag in target.tags if tag.startswith(prefix)}
def nonexempt_deps(self, address):
if address not in self._transitive_closure_cache:
computed_closure = self.context.build_graph.transitive_subgraph_of_addresses([address])
self._transitive_closure_cache[address] = [
dep for dep in computed_closure
if (
dep.address != address and
dep.address not in self.context.build_graph.synthetic_addresses and
'exempt' not in dep.tags
)
]
return self._transitive_closure_cache[address]
def dependee_violations(self, target, include_transitive=True):
for dep in self.nonexempt_deps(target.address):
for must_have in self.extract_matching_tags('dependees_must_have:', dep):
if must_have not in target.tags:
violation = MustHaveViolation(target, dep, must_have)
if include_transitive or violation.direct:
yield violation
for must_have_one_of in self.extract_matching_tags('dependees_must_have_one_of:', dep):
has_one = False
for one_of in must_have_one_of.split(','):
if one_of.strip() in target.tags:
has_one = True
break
if not has_one:
violation = MustHaveOneOfViolation(target, dep, must_have_one_of)
if include_transitive or violation.direct:
yield violation
def fscommon_violations(self, target):
if('fscommon' in target.tags and 'should_remove_fscommon_tag' not in target.tags):
path = target.address.spec_path
# TODO:Jamie untangle resources, but ignore for now
allowed = [
'com/foursquare/common',
'com/twitter/finagle',
'io/fsq',
'src/resources',
'src/webapp',
'test/resources',
]
if not any(sub in target.address.spec_path for sub in allowed):
yield FSCommonViolation(target, dep=None, tag='fscommon')
def required_tag_violations(self, target):
required_tags = self.extract_matching_tags('dependencies_must_have:', target)
if required_tags:
for dep in self.nonexempt_deps(target.address):
for required in required_tags:
if required not in dep.tags:
yield MissingTag(target, dep, required)
required_tags = self.extract_matching_tags('dependencies_must_have_one_of:', target)
if required_tags:
required_tags = map(lambda x: x.split(','), required_tags)
for dep in self.nonexempt_deps(target.address):
has_one = False
for tag_group in required_tags:
for tag in tag_group:
if tag.strip() in dep.tags:
has_one = True
break
if not has_one:
yield MissingOneOfTag(target, dep, required_tags)
def banned_tag_violations(self, target):
banned_tags = self.extract_matching_tags('dependencies_cannot_have:', target)
if banned_tags:
for dep in self.nonexempt_deps(target.address):
for banned in banned_tags:
if banned in dep.tags:
yield BannedTag(target, dep, banned)
|
bbda5458c7c6d1f9f7cfc1bb83b79b0346dedc6c
|
bb12da8a0d637b68255b04b182fdd47558bec12e
|
/tests/test_eyaml_eyamlprocessor.py
|
150749df92e90efd95520ce93f36d5cb8e691af1
|
[
"ISC"
] |
permissive
|
wwkimball/yamlpath
|
03f1dffc0c5d5208d43fca33578de9ad0074c395
|
be7af7de60e920659b535aaae39046e84c85c248
|
refs/heads/master
| 2023-08-25T07:42:53.174510
| 2023-03-30T22:32:16
| 2023-03-30T22:32:16
| 184,194,939
| 102
| 20
|
ISC
| 2023-03-30T22:32:17
| 2019-04-30T05:05:17
|
Python
|
UTF-8
|
Python
| false
| false
| 12,201
|
py
|
test_eyaml_eyamlprocessor.py
|
import pytest
from subprocess import run, CalledProcessError
from ruamel.yaml import YAML
from yamlpath.func import unwrap_node_coords
from yamlpath.enums import YAMLValueFormats
from yamlpath.eyaml.enums import EYAMLOutputFormats
from yamlpath.eyaml import EYAMLProcessor
from yamlpath.wrappers import ConsolePrinter
from yamlpath.eyaml.exceptions import EYAMLCommandException
from tests.conftest import requireseyaml, quiet_logger, old_eyaml_keys
@requireseyaml
@pytest.fixture
def eyamldata_f():
data = """---
aliases:
- &secretIdentity >
ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEw
DQYJKoZIhvcNAQEBBQAEggEAUVX4h0PQVnxj5niYRvDPce/TisckEBqkOOcL
ukGr+AFewRfLQ03zMUcr13jS5w7N6K9TIMPyc0QIvzL82a6jWpNAB7kFD+Ua
lQcNwFIERYbo3SVn5+r8GTPzS82z59icEgFeL1ChNkL/vRYgys8IJrrJC/uS
6QQ463hspwF2JyzUF7LM9Jc1EyGuJ1uektj/6jLxnYINrMazC61vb92++2Bk
eMyFRZyCpJ/0ooHvhtF8ZxlLujPbgaUFCRpCxpXIYOGeTcrqgCZzkU3eUv2r
PcCqlxHMOjN2SUXBY1pz8ApqErJ9/x0H9lZvD02XYclAMIWb8jouWJA0LaQ0
Vvji7zBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBBVTmGQjl06z3JmnY65
2STRgDB+D8ySgg5OezkWVRCWXyaei2yeLx4NhKUftXz1G4vbM2rCkFd5Unps
u30g09oF92k=]
- &secretPhrase >
ENC[PKCS7,MIIBiQYJKoZIhvcNAQcDoIIBejCCAXYCAQAxggEhMIIBHQIBADAFMAACAQEw
DQYJKoZIhvcNAQEBBQAEggEAXut3j+WbWxcl5Qh857pldvob8b+Av4VwtboA
14g0slbMCemTtAW8fgpDzmrOGnvPvH5cjcY9FFXdvwa9U02NqosQKV0m7msD
NirSWcQMGXaOBZqdedIohSHtUKz0QQ0GuEWxFGgXZjGbSwYSzdY6av5zEOFl
IxGywYD0jLLFm+NkhKxj9wpPyq7qB1JsIqdAbuW33QwgYI8Hp6IBuQCZNFk/
wnVn9ctInnPhLrvaDt6DN/ikT1d7F7VhteVQzFl3QoL6WW5pcg/vNVp7kvE9
xJi6eop2BNOuv0Cvm5gJ6OsRX/a/JmA3jZmiweWk5Z3M6OEPpPwnqx1oz3cR
ES762TBMBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBBlMyRzGtnxUiel+T+d
uZFwgCA+DMcg3EllW8xXA7Kc4dhERJLb7VX2mtM9E2s4KrsIyw==]
anchored::secrets: &anchoredSecrets
exposed_values:
ident: This identity is in the wild.
phrase: This phrase is in the wild.
aliased_values:
ident: *secretIdentity
phrase: *secretPhrase
array_of_array_idents:
-
- >
ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEw
DQYJKoZIhvcNAQEBBQAEggEAxum+Uyt3ETjkaQ9C5PqnpCUVCU6wrUYuVBk+
PV7t7hayWGrG+dixzUUP9HKbIh6kbVYIwGCpEhMOmJQZ8TLiu/ye+KQzX/CE
wz4uk7qvv/OvsFiMqmApxcvzNl2Qq7unCScXfngZKPjv4BxAFI1axzsUmxLx
ChOUSkLMkuIJ5myAw43Sfan9Yx3lk96IoN97gN74ZzXTRGjl3n0zxrHy3obT
M12f+MZqHuaTnuvksakk32nQ7jGX82QqxX3HChEkzUkKXG5ceS/cFzSTj9QG
xbYrUXDNq/uviShfVk6tUey76VJAguLlw1ONqRkjonjAz7iR+YIu4RzvPvyJ
Grz/ezBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBDcP+GwmSrNro9UALHI
FoRFgDCwH91AbH9DOpDMj3HAOlxD2JzAkpy4X9SOZVn6Vht2do38Y1Z02Ccf
pHj/ObATQ9M=]
aliased::secrets:
<<: *anchoredSecrets
novel_values:
ident: >
ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEw
DQYJKoZIhvcNAQEBBQAEggEAxum+Uyt3ETjkaQ9C5PqnpCUVCU6wrUYuVBk+
PV7t7hayWGrG+dixzUUP9HKbIh6kbVYIwGCpEhMOmJQZ8TLiu/ye+KQzX/CE
wz4uk7qvv/OvsFiMqmApxcvzNl2Qq7unCScXfngZKPjv4BxAFI1axzsUmxLx
ChOUSkLMkuIJ5myAw43Sfan9Yx3lk96IoN97gN74ZzXTRGjl3n0zxrHy3obT
M12f+MZqHuaTnuvksakk32nQ7jGX82QqxX3HChEkzUkKXG5ceS/cFzSTj9QG
xbYrUXDNq/uviShfVk6tUey76VJAguLlw1ONqRkjonjAz7iR+YIu4RzvPvyJ
Grz/ezBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBDcP+GwmSrNro9UALHI
FoRFgDCwH91AbH9DOpDMj3HAOlxD2JzAkpy4X9SOZVn6Vht2do38Y1Z02Ccf
pHj/ObATQ9M=]
phrase: >
ENC[PKCS7,MIIBygYJKoZIhvcNAQcDoIIBuzCCAbcCAQAxggEhMIIBHQIBADAFMAACAQEw
DQYJKoZIhvcNAQEBBQAEggEAqaxguhCDS3afSGGsUvQ2bqDXQ71yIRa2sT3N
1BH8d1fVDEo371jydJhRNg4bAwWjfvOIsuwhkEAtPSycvPNCimvAYz32dybA
GsQxWo9KuQbmakQ+EDrkxtmS1yxQiUbWk7xw7XercPU8jKSqd3FfpRgKQAKV
lQeFsCb8Cx/uWg2rS00SZ78LciYeFMwEhx2GYpjmOjdGkMVUKtJhPA24QAeF
9QSpOmzpIdUho1hxBg9IP5K6HwgNBoYUMhL4xd8hhpRZUn0VSuERvKZAlRyh
syT4tpPx6MWZw1UXWhPkZg66Fyq+MGJk5Q1zval6Kypqq83SisqsN2z4h1Zx
WcXJMzCBjAYJKoZIhvcNAQcBMB0GCWCGSAFlAwQBKgQQZRyFQ+GeBvF2A9Yy
QjUr5oBgwxfr7sa3oJfZNssJy4JlmUhfGVGN5GvxSGHM4zB+2c4lLx9Hk8MZ
BBdpMsMcD8QqbTFpgOhKTh/pkT9DKP6CUcvSm7oxD7E8RtEyeIP3vH1UlulK
A0INSZQsnGO+uGIX]
string_values:
ident: ENC[PKCS7,MIIBygYJKoZIhvcNAQcDoIIBuzCCAbcCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEADFShQa5BcW0ctXJ6KGiadZYB2hPJrkO6tpOjz3qJzC6zuOrsL80NGOt9njDSEDQhpwvWHaREYJiv4WdBTdRuS2wVkxev/xDMCJrtrTSZ8aFZ2rFy7bkqBx5qiklOtgX2s9jUwzZ1y6YP7HbrBO2d3tO0Df79FvmcgwQOVRUk03BTczbA3xQc1Y50CBoS2d1VE8UxnSUUij3J/tOmugL9QkdSvBIyHwiKiy6brGgwaU1ddGPLMFdRXYN/gpyIbG595YhwTQfDWMp/2gBA7KZf941QJiIvxvq4LoYInNyBK+qTyaVmRRhDTxC4Cs0WfFlkPTkUdGu/GBc32+UDGwDOpTCBjAYJKoZIhvcNAQcBMB0GCWCGSAFlAwQBKgQQ/Ox/sWRra4DBDVh5a4QTD4Bg8v37JkY8S0fNbG/Dq5lRCz7/iQW7c1/f0JfRVIi7qTZfTRElrJo/+/o/SWq6bsj8eUA3UjooR07L0gerjOmd2p1kQQlZ1vEukbF/pnuptS07Gdrs4WlN/6KBIEJ//0Rc]
phrase: ENC[PKCS7,MIIBqQYJKoZIhvcNAQcDoIIBmjCCAZYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAR4DfxkRrYAHFQv97lXvaMyxy2iygBWgXWpKUBskCXbUAU0AZ56dsJS00ibVoRNpBoOkIwVN67G7/z084YA+Oqsg4Tw3NIIek14xChqL9m4ehtv1iMMutPM97wF2Yn2JRs63wKSN4l3nmTp/TFpko5rwj1rKap72mpDwrjJEWwRf4nzcdIzp6a7uWcBUVtG09Cu3VLUtoeAtKsIXVhMAZ2r/ozCSAbIQsFKiRPi2I8fL0ovhnmOXAmuB3eRStMDuGey0vCGYtFvWsmBoXkztIlfHB7/oXUJ5ABgu8D+9JmeXYQA4TjdL6gcQA+cNq9otvorTXnbNLgaRBCGFAqTvMhDBsBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBCw5QXmetCPTaxcpJWAefs+gEDo1hDNBXPFVhtvqqXUzicYZVxDADp2aUo/AdchuG15+8ic7K01aCdL4qkAtyx4HM16Hz0WVYIeiFyUgpCLY1EA]
"""
yaml = YAML()
return yaml.load(data)
@pytest.fixture
def force_subprocess_run_cpe(monkeypatch):
import yamlpath.eyaml.eyamlprocessor as break_module
def fake_run(*args, **kwargs):
raise CalledProcessError(42, "bad eyaml")
monkeypatch.setattr(break_module, "run", fake_run)
@pytest.fixture
def force_no_access(monkeypatch):
import yamlpath.eyaml.eyamlprocessor as break_module
def fake_access(*args, **kwargs):
return False
monkeypatch.setattr(break_module, "access", fake_access)
class Test_eyaml_EYAMLProcessor():
def test_find_eyaml_paths(self, quiet_logger, eyamldata_f):
processor = EYAMLProcessor(quiet_logger, eyamldata_f)
expected = [
"aliases[&secretIdentity]",
"aliases[&secretPhrase]",
"anchored::secrets.aliased_values.ident",
"anchored::secrets.aliased_values.phrase",
"anchored::secrets.array_of_array_idents[0][0]",
"aliased::secrets.novel_values.ident",
"aliased::secrets.novel_values.phrase",
"aliased::secrets.string_values.ident",
"aliased::secrets.string_values.phrase",
]
actual = []
for path in processor.find_eyaml_paths():
actual.append(str(path))
assert actual == expected
@requireseyaml
@pytest.mark.parametrize("yaml_path,compare", [
("aliases[&secretIdentity]", "This is not the identity you are looking for."),
("aliases[&secretPhrase]", "There is no secret phrase."),
("aliases", ["This is not the identity you are looking for.", "There is no secret phrase."]),
("(aliases)", ["This is not the identity you are looking for.", "There is no secret phrase."]),
("((aliases)*)[0]", "This is not the identity you are looking for."),
])
def test_happy_get_eyaml_values(self, quiet_logger, eyamldata_f, old_eyaml_keys, yaml_path, compare):
processor = EYAMLProcessor(quiet_logger, eyamldata_f, privatekey=old_eyaml_keys[0], publickey=old_eyaml_keys[1])
for node in processor.get_eyaml_values(yaml_path, True):
assert unwrap_node_coords(node) == compare
@requireseyaml
@pytest.mark.parametrize("yaml_path,compare,mustexist,output_format", [
("aliases[&secretIdentity]", "This is your new identity.", True, EYAMLOutputFormats.STRING),
("aliases[&brandNewEntry]", "This key doesn't already exist.", False, EYAMLOutputFormats.BLOCK),
])
def test_happy_set_eyaml_value(self, quiet_logger, eyamldata_f, old_eyaml_keys, yaml_path, compare, mustexist, output_format):
processor = EYAMLProcessor(quiet_logger, eyamldata_f, privatekey=old_eyaml_keys[0], publickey=old_eyaml_keys[1])
# Set the test value
processor.set_eyaml_value(yaml_path, compare, output_format, mustexist)
# Ensure the new value is encrypted
encvalue = None
for encnode in processor.get_nodes(yaml_path):
encvalue = unwrap_node_coords(encnode)
break
assert EYAMLProcessor.is_eyaml_value(encvalue)
@requireseyaml
@pytest.mark.parametrize("yaml_path,newval,eoformat,yvformat", [
("/aliased::secrets/novel_values/ident", "New, novel, encrypted identity in BLOCK format", EYAMLOutputFormats.BLOCK, YAMLValueFormats.FOLDED),
("/aliased::secrets/string_values/ident", "New, novel, encrypted identity in STRING format", EYAMLOutputFormats.STRING, YAMLValueFormats.BARE),
])
def test_preserve_old_blockiness(self, quiet_logger, eyamldata_f, old_eyaml_keys, yaml_path, newval, eoformat, yvformat):
processor = EYAMLProcessor(quiet_logger, eyamldata_f, privatekey=old_eyaml_keys[0], publickey=old_eyaml_keys[1])
processor.set_eyaml_value(yaml_path, newval, output=eoformat)
encvalue = None
encformat = YAMLValueFormats.DEFAULT
for encnode in processor.get_nodes(yaml_path):
encvalue = unwrap_node_coords(encnode)
encformat = YAMLValueFormats.from_node(encvalue)
break
assert EYAMLProcessor.is_eyaml_value(encvalue) and yvformat == encformat
def test_none_eyaml_value(self):
assert False == EYAMLProcessor.is_eyaml_value(None)
@pytest.mark.parametrize("exe", [
("/no/such/file/anywhere"),
("this-file-does-not-exist"),
(None),
])
def test_impossible_eyaml_exe(self, exe):
assert None == EYAMLProcessor.get_eyaml_executable(exe)
def test_not_can_run_eyaml(self, quiet_logger):
processor = EYAMLProcessor(quiet_logger, None)
processor.eyaml = None
assert False == processor._can_run_eyaml()
@requireseyaml
def test_bad_encryption_keys(self, quiet_logger):
processor = EYAMLProcessor(quiet_logger, None)
processor.privatekey = "/no/such/file"
processor.publickey = "/no/such/file"
with pytest.raises(EYAMLCommandException):
processor.encrypt_eyaml("test")
def test_no_encrypt_without_eyaml(self, quiet_logger):
processor = EYAMLProcessor(quiet_logger, None)
processor.eyaml = None
with pytest.raises(EYAMLCommandException):
processor.encrypt_eyaml("test")
def test_no_decrypt_without_eyaml(self, quiet_logger):
processor = EYAMLProcessor(quiet_logger, None)
processor.eyaml = None
with pytest.raises(EYAMLCommandException):
processor.decrypt_eyaml("ENC[...]")
def test_ignore_already_encrypted_cryps(self, quiet_logger):
processor = EYAMLProcessor(quiet_logger, None)
testval = "ENC[...]"
assert testval == processor.encrypt_eyaml(testval)
def test_ignore_already_decrypted_cryps(self, quiet_logger):
processor = EYAMLProcessor(quiet_logger, None)
testval = "some value"
assert testval == processor.decrypt_eyaml(testval)
@requireseyaml
def test_impossible_decryption(self, quiet_logger, old_eyaml_keys):
processor = EYAMLProcessor(quiet_logger, None)
testval = "ENC[...]"
with pytest.raises(EYAMLCommandException):
processor.decrypt_eyaml(testval)
def test_encrypt_calledprocesserror(self, quiet_logger, force_subprocess_run_cpe):
processor = EYAMLProcessor(quiet_logger, None)
with pytest.raises(EYAMLCommandException):
processor.encrypt_eyaml("any value")
def test_decrypt_calledprocesserror(self, quiet_logger, force_subprocess_run_cpe):
processor = EYAMLProcessor(quiet_logger, None)
with pytest.raises(EYAMLCommandException):
processor.decrypt_eyaml("ENC[...]")
@requireseyaml
def test_non_executable(self, old_eyaml_keys, force_no_access):
assert EYAMLProcessor.get_eyaml_executable(str(old_eyaml_keys[0])) is None
|
969e09d079505ff4a84fff3ff9347497a60aea81
|
e9ee565cfff9e6b2a1ea6f73368f4a8948274795
|
/src/pybel/io/line_utils.py
|
b33d8ea6eef8583d3a519fc53c2767c4154e4769
|
[
"MIT"
] |
permissive
|
pybel/pybel
|
7e79530b454e23ae48486a5c0e3207744b7fa139
|
ed66f013a77f9cbc513892b0dad1025b8f68bb46
|
refs/heads/master
| 2022-08-26T18:41:25.724850
| 2022-02-11T12:22:35
| 2022-02-11T12:22:35
| 68,376,693
| 133
| 40
|
MIT
| 2022-02-11T12:11:24
| 2016-09-16T12:09:49
|
Python
|
UTF-8
|
Python
| false
| false
| 11,090
|
py
|
line_utils.py
|
# -*- coding: utf-8 -*-
"""This module contains helper functions for reading BEL scripts."""
import logging
import os
import re
import time
from typing import Any, Iterable, List, Mapping, Optional, Tuple
from bel_resources import ResourceError, split_file_to_annotations_and_definitions
from pyparsing import ParseException
from sqlalchemy.exc import OperationalError
from tqdm.autonotebook import tqdm
from ..constants import INVERSE_DOCUMENT_KEYS, REQUIRED_METADATA
from ..exceptions import (
BELParserWarning,
BELSyntaxError,
InconsistentDefinitionError,
MalformedMetadataException,
MissingMetadataException,
PlaceholderAminoAcidWarning,
VersionFormatWarning,
)
from ..manager import Manager
from ..parser import BELParser, MetadataParser
from ..struct.graph import BELGraph
__all__ = [
"parse_lines",
]
logger = logging.getLogger(__name__)
parser_logger = logging.getLogger("pybel.parser")
METADATA_LINE_RE = re.compile(r"(SET\s+DOCUMENT|DEFINE\s+NAMESPACE|DEFINE\s+ANNOTATION)")
LOG_FMT = "%d:%d %s %s"
LOG_FMT_PATH = "%s:%d:%d %s %s"
def parse_lines(
graph: BELGraph,
lines: Iterable[str],
manager: Optional[Manager] = None,
disallow_nested: bool = False,
citation_clearing: bool = True,
use_tqdm: bool = False,
tqdm_kwargs: Optional[Mapping[str, Any]] = None,
no_identifier_validation: bool = False,
disallow_unqualified_translocations: bool = False,
allow_redefinition: bool = False,
allow_definition_failures: bool = False,
allow_naked_names: bool = False,
required_annotations: Optional[List[str]] = None,
upgrade_urls: bool = False,
) -> None:
"""Parse an iterable of lines into this graph.
Delegates to :func:`parse_document`, :func:`parse_definitions`, and :func:`parse_statements`.
:param graph: A BEL graph
:param lines: An iterable over lines of BEL script
:param manager: A PyBEL database manager
:param disallow_nested: If true, turns on nested statement failures
:param citation_clearing: Should :code:`SET Citation` statements clear evidence and all annotations?
Delegated to :class:`pybel.parser.ControlParser`
:param use_tqdm: Use :mod:`tqdm` to show a progress bar?
:param tqdm_kwargs: Keywords to pass to ``tqdm``
:param disallow_unqualified_translocations: If true, allow translocations without TO and FROM clauses.
:param required_annotations: Annotations that are required for all statements
:param upgrade_urls: Automatically upgrade old namespace URLs. Defaults to false.
.. warning::
These options allow concessions for parsing BEL that is either **WRONG** or **UNSCIENTIFIC**. Use them at
risk to reproducibility and validity of your results.
:param no_identifier_validation: If true, turns off namespace validation
:param allow_naked_names: If true, turns off naked namespace failures
:param allow_redefinition: If true, doesn't fail on second definition of same name or annotation
:param allow_definition_failures: If true, allows parsing to continue if a terminology file download/parse fails
"""
docs, definitions, statements = split_file_to_annotations_and_definitions(lines)
if manager is None:
manager = Manager()
metadata_parser = MetadataParser(
manager,
allow_redefinition=allow_redefinition,
skip_validation=no_identifier_validation,
upgrade_urls=upgrade_urls,
)
parse_document(
graph,
docs,
metadata_parser,
)
parse_definitions(
graph,
definitions,
metadata_parser,
allow_failures=allow_definition_failures,
use_tqdm=use_tqdm,
tqdm_kwargs=tqdm_kwargs,
)
bel_parser = BELParser(
graph=graph,
# terminologies
namespace_to_term_to_encoding=metadata_parser.namespace_to_term_to_encoding,
namespace_to_pattern=metadata_parser.namespace_to_pattern,
annotation_to_term=metadata_parser.annotation_to_term,
annotation_to_pattern=metadata_parser.annotation_to_pattern,
annotation_to_local=metadata_parser.annotation_to_local,
# language settings
disallow_nested=disallow_nested,
citation_clearing=citation_clearing,
skip_validation=no_identifier_validation,
allow_naked_names=allow_naked_names,
disallow_unqualified_translocations=disallow_unqualified_translocations,
required_annotations=required_annotations,
)
parse_statements(
graph,
statements,
bel_parser,
use_tqdm=use_tqdm,
tqdm_kwargs=tqdm_kwargs,
)
logger.info(
"Network has %d nodes and %d edges",
graph.number_of_nodes(),
graph.number_of_edges(),
)
def parse_document(
graph: BELGraph,
enumerated_lines: Iterable[Tuple[int, str]],
metadata_parser: MetadataParser,
) -> None:
"""Parse the lines in the document section of a BEL script."""
parse_document_start_time = time.time()
for line_number, line in enumerated_lines:
try:
metadata_parser.parseString(line, line_number=line_number)
except VersionFormatWarning as exc:
_log_parse_exception(graph, exc)
graph.add_warning(exc)
except Exception as e:
exc = MalformedMetadataException(line_number, line, 0)
_log_parse_exception(graph, exc)
raise exc from e
for required in REQUIRED_METADATA:
required_metadatum = metadata_parser.document_metadata.get(required)
if required_metadatum is not None:
continue
required_metadatum_key = INVERSE_DOCUMENT_KEYS[required]
# This has to be insert since it needs to go on the front!
exc = MissingMetadataException.make(required_metadatum_key)
graph.warnings.insert(0, (None, exc, {}))
_log_parse_exception(graph, exc)
graph.document.update(metadata_parser.document_metadata)
logger.info(
"Finished parsing document section in %.02f seconds",
time.time() - parse_document_start_time,
)
def parse_definitions(
graph: BELGraph,
enumerated_lines: Iterable[Tuple[int, str]],
metadata_parser: MetadataParser,
allow_failures: bool = False,
use_tqdm: bool = False,
tqdm_kwargs: Optional[Mapping[str, Any]] = None,
) -> None:
"""Parse the lines in the definitions section of a BEL script.
:param graph: A BEL graph
:param enumerated_lines: An enumerated iterable over the lines in the definitions section of a BEL script
:param metadata_parser: A metadata parser
:param allow_failures: If true, allows parser to continue past strange failures
:param use_tqdm: Use :mod:`tqdm` to show a progress bar?
:param tqdm_kwargs: Keywords to pass to ``tqdm``
:raises: pybel.parser.parse_exceptions.InconsistentDefinitionError
:raises: pybel.resources.exc.ResourceError
:raises: sqlalchemy.exc.OperationalError
"""
parse_definitions_start_time = time.time()
if use_tqdm:
_tqdm_kwargs = dict(desc="Definitions", leave=False)
if tqdm_kwargs:
_tqdm_kwargs.update(tqdm_kwargs)
enumerated_lines = tqdm(list(enumerated_lines), **_tqdm_kwargs)
for line_number, line in enumerated_lines:
try:
metadata_parser.parseString(line, line_number=line_number)
except (InconsistentDefinitionError, ResourceError) as e:
parser_logger.exception(LOG_FMT, line_number, 0, e.__class__.__name__, line)
raise e
except OperationalError as e:
parser_logger.warning(
"Need to upgrade database. See http://pybel.readthedocs.io/en/latest/installation.html#upgrading",
)
raise e
except Exception as e:
if not allow_failures:
exc = MalformedMetadataException(line_number, line, 0)
_log_parse_exception(graph, exc)
raise exc from e
graph.namespace_url.update(metadata_parser.namespace_url_dict)
graph.namespace_pattern.update(
{keyword: pattern.pattern for keyword, pattern in metadata_parser.namespace_to_pattern.items()}
)
graph.annotation_url.update(metadata_parser.annotation_url_dict)
graph.annotation_pattern.update(
{keyword: pattern.pattern for keyword, pattern in metadata_parser.annotation_to_pattern.items()}
)
graph.annotation_list.update(metadata_parser.annotation_to_local)
logger.info(
"Finished parsing definitions section in %.02f seconds",
time.time() - parse_definitions_start_time,
)
metadata_parser.ensure_resources()
logger.info("Finished ensuring namespaces in cache")
def parse_statements(
graph: BELGraph,
enumerated_lines: Iterable[Tuple[int, str]],
bel_parser: BELParser,
use_tqdm: bool = True,
tqdm_kwargs: Optional[Mapping[str, Any]] = None,
) -> None:
"""Parse a list of statements from a BEL Script.
:param graph: A BEL graph
:param enumerated_lines: An enumerated iterable over the lines in the statements section of a BEL script
:param bel_parser: A BEL parser
:param use_tqdm: Use :mod:`tqdm` to show a progress bar? Requires reading whole file to memory.
:param tqdm_kwargs: Keywords to pass to ``tqdm``
"""
parse_statements_start_time = time.time()
if use_tqdm:
tqdm_kwargs = {} if tqdm_kwargs is None else dict(tqdm_kwargs)
tqdm_kwargs.setdefault("desc", "Statements")
tqdm_kwargs.setdefault("leave", False)
enumerated_lines = tqdm(list(enumerated_lines), **tqdm_kwargs)
for line_number, line in enumerated_lines:
try:
bel_parser.parseString(line, line_number=line_number)
except ParseException as e:
exc = BELSyntaxError(line_number, line, e.loc)
_log_parse_exception(graph, exc)
graph.add_warning(exc, bel_parser.get_annotations())
except PlaceholderAminoAcidWarning as exc:
exc.line_number = line_number
_log_parse_exception(graph, exc)
graph.add_warning(exc, bel_parser.get_annotations())
except BELParserWarning as exc:
_log_parse_exception(graph, exc)
graph.add_warning(exc, bel_parser.get_annotations())
except Exception:
parser_logger.exception(LOG_FMT, line_number, 0, "General Failure", line)
raise
logger.info(
"Parsed statements section in %.02f seconds with %d warnings",
time.time() - parse_statements_start_time,
len(graph.warnings),
)
def _log_parse_exception(graph: BELGraph, exc: BELParserWarning):
if graph.path:
s = LOG_FMT_PATH % (
os.path.basename(graph.path),
exc.line_number,
exc.position,
exc.__class__.__name__,
exc,
)
else:
s = LOG_FMT % (exc.line_number, exc.position, exc.__class__.__name__, exc)
tqdm.write(s)
|
756effc61738a99f933fd7b70642c529e2bea1e9
|
e75c5412063078c9ea3e7c71a8dc7a2026083a34
|
/astropy/table/tests/conftest.py
|
a888d9222a18ab899d11f9db5a271ebe3f0aa9c4
|
[
"BSD-3-Clause"
] |
permissive
|
astropy/astropy
|
d6636f24acdf2b18fc3e413ca0c4b1162a63dd41
|
53188c39a23c33b72df5850ec59e31886f84e29d
|
refs/heads/main
| 2023-08-27T18:16:44.061375
| 2023-08-27T16:07:35
| 2023-08-27T16:07:35
| 2,081,289
| 3,922
| 1,935
|
BSD-3-Clause
| 2023-09-14T09:23:26
| 2011-07-21T01:33:49
|
Python
|
UTF-8
|
Python
| false
| false
| 7,690
|
py
|
conftest.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
All of the pytest fixtures used by astropy.table are defined here.
`conftest.py` is a "special" module name for pytest that is always
imported, but is not looked in for tests, and it is the recommended
place to put fixtures that are shared between modules. These fixtures
can not be defined in a module by a different name and still be shared
between modules.
"""
import pickle
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import pytest
from astropy import coordinates, table, time
from astropy import units as u
from astropy.table import QTable, Table, pprint
from astropy.table.table_helpers import ArrayWrapper
@pytest.fixture(params=[table.Column, table.MaskedColumn])
def Column(request):
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
return request.param
class MaskedTable(table.Table):
def __init__(self, *args, **kwargs):
kwargs["masked"] = True
table.Table.__init__(self, *args, **kwargs)
class MyRow(table.Row):
pass
class MyColumn(table.Column):
pass
class MyMaskedColumn(table.MaskedColumn):
pass
class MyTableColumns(table.TableColumns):
pass
class MyTableFormatter(pprint.TableFormatter):
pass
class MyTable(table.Table):
Row = MyRow
Column = MyColumn
MaskedColumn = MyMaskedColumn
TableColumns = MyTableColumns
TableFormatter = MyTableFormatter
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
@pytest.fixture(params=["unmasked", "masked", "subclass"])
def table_types(request):
class TableTypes:
def __init__(self, request):
if request.param == "unmasked":
self.Table = table.Table
self.Column = table.Column
elif request.param == "masked":
self.Table = MaskedTable
self.Column = table.MaskedColumn
elif request.param == "subclass":
self.Table = MyTable
self.Column = MyColumn
return TableTypes(request)
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
@pytest.fixture(params=[False, True])
def table_data(request):
class TableData:
def __init__(self, request):
self.Table = MaskedTable if request.param else table.Table
self.Column = table.MaskedColumn if request.param else table.Column
self.COLS = [
self.Column(
name="a",
data=[1, 2, 3],
description="da",
format="%i",
meta={"ma": 1},
unit="ua",
),
self.Column(
name="b",
data=[4, 5, 6],
description="db",
format="%d",
meta={"mb": 1},
unit="ub",
),
self.Column(
name="c",
data=[7, 8, 9],
description="dc",
format="%f",
meta={"mc": 1},
unit="ub",
),
]
self.DATA = self.Table(self.COLS)
return TableData(request)
class SubclassTable(table.Table):
pass
@pytest.fixture(params=[True, False])
def tableclass(request):
return table.Table if request.param else SubclassTable
@pytest.fixture(params=list(range(0, pickle.HIGHEST_PROTOCOL + 1)))
def protocol(request):
"""
Fixture to run all the tests for all available pickle protocols.
"""
return request.param
# Fixture to run all tests for both an unmasked (ndarray) and masked
# (MaskedArray) column.
@pytest.fixture(params=[False, True])
def table_type(request):
return MaskedTable if request.param else table.Table
# Stuff for testing mixin columns
MIXIN_COLS = {
"quantity": [0, 1, 2, 3] * u.m,
"longitude": coordinates.Longitude(
[0.0, 1.0, 5.0, 6.0] * u.deg, wrap_angle=180.0 * u.deg
),
"latitude": coordinates.Latitude([5.0, 6.0, 10.0, 11.0] * u.deg),
"time": time.Time([2000, 2001, 2002, 2003], format="jyear"),
"timedelta": time.TimeDelta([1, 2, 3, 4], format="jd"),
"skycoord": coordinates.SkyCoord(ra=[0, 1, 2, 3] * u.deg, dec=[0, 1, 2, 3] * u.deg),
"sphericalrep": coordinates.SphericalRepresentation(
[0, 1, 2, 3] * u.deg, [0, 1, 2, 3] * u.deg, 1 * u.kpc
),
"cartesianrep": coordinates.CartesianRepresentation(
[0, 1, 2, 3] * u.pc, [4, 5, 6, 7] * u.pc, [9, 8, 8, 6] * u.pc
),
"sphericaldiff": coordinates.SphericalCosLatDifferential(
[0, 1, 2, 3] * u.mas / u.yr, [0, 1, 2, 3] * u.mas / u.yr, 10 * u.km / u.s
),
"arraywrap": ArrayWrapper([0, 1, 2, 3]),
"arrayswap": ArrayWrapper(np.arange(4, dtype="i").byteswap().newbyteorder()),
"ndarraylil": np.array(
[(7, "a"), (8, "b"), (9, "c"), (9, "c")], dtype="<i4,|S1"
).view(table.NdarrayMixin),
"ndarraybig": np.array(
[(7, "a"), (8, "b"), (9, "c"), (9, "c")], dtype=">i4,|S1"
).view(table.NdarrayMixin),
"stokescoord": coordinates.StokesCoord(range(1, 5)),
}
MIXIN_COLS["earthlocation"] = coordinates.EarthLocation(
lon=MIXIN_COLS["longitude"],
lat=MIXIN_COLS["latitude"],
height=MIXIN_COLS["quantity"],
)
MIXIN_COLS["sphericalrepdiff"] = coordinates.SphericalRepresentation(
MIXIN_COLS["sphericalrep"], differentials=MIXIN_COLS["sphericaldiff"]
)
@pytest.fixture(params=sorted(MIXIN_COLS))
def mixin_cols(request):
"""
Fixture to return a set of columns for mixin testing which includes
an index column 'i', two string cols 'a', 'b' (for joins etc), and
one of the available mixin column types.
"""
cols = OrderedDict()
mixin_cols = deepcopy(MIXIN_COLS)
cols["i"] = table.Column([0, 1, 2, 3], name="i")
cols["a"] = table.Column(["a", "b", "b", "c"], name="a")
cols["b"] = table.Column(["b", "c", "a", "d"], name="b")
cols["m"] = mixin_cols[request.param]
return cols
def _get_test_table():
T = QTable.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
T["q"] = np.arange(len(T)) * u.m
T.meta.update({"ta": 1})
T["c"].meta.update({"a": 1})
T["c"].description = "column c"
return T
@pytest.fixture()
def T1b(request):
"""Basic table"""
T = _get_test_table()
return T
@pytest.fixture(params=[False, True])
def T1(request):
"""Basic table with or without index on integer column a"""
T = _get_test_table()
if request.param:
T.add_index("a")
return T
@pytest.fixture(params=[False, True])
def T1q(request):
"""Basic table where a column is integer or Quantity"""
T = _get_test_table()
if request.param:
T["a"] = T["a"] * u.m
return T
@pytest.fixture(params=[(False, False), (False, True), (True, False), (True, True)])
def T1m(request):
"""Basic table with or without index on column a, where a is integer or Quantity"""
T = _get_test_table()
add_index, is_quantity = request.param
if is_quantity:
T["a"] = T["a"] * u.m
if add_index:
T.add_index("a")
return T
@pytest.fixture(params=[Table, QTable])
def operation_table_type(request):
return request.param
|
274c678dcfe33ced8748712cc6c34d69d56eb291
|
c46754b9600a12df4f9d7a6320dfc19aa96b1e1d
|
/tests/models/auto/test_modeling_tf_auto.py
|
c8754ca42702fc07482c393e6f5083bf3ff7cfba
|
[
"Apache-2.0"
] |
permissive
|
huggingface/transformers
|
ccd52a0d7c59e5f13205f32fd96f55743ebc8814
|
4fa0aff21ee083d0197a898cdf17ff476fae2ac3
|
refs/heads/main
| 2023-09-05T19:47:38.981127
| 2023-09-05T19:21:33
| 2023-09-05T19:21:33
| 155,220,641
| 102,193
| 22,284
|
Apache-2.0
| 2023-09-14T20:44:49
| 2018-10-29T13:56:00
|
Python
|
UTF-8
|
Python
| false
| false
| 13,164
|
py
|
test_modeling_tf_auto.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPT2Config, T5Config, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeq2SeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPT2LMHeadModel,
TFRobertaForMaskedLM,
TFT5ForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpt2.modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.t5.modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class NewModelConfig(BertConfig):
model_type = "new-model"
if is_tf_available():
class TFNewModel(TFBertModel):
config_class = NewModelConfig
@require_tf
class TFAutoModelTest(unittest.TestCase):
@slow
def test_model_from_pretrained(self):
model_name = "bert-base-cased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModel.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertModel)
@slow
def test_model_for_pretraining_from_pretrained(self):
model_name = "bert-base-cased"
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelForPreTraining.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForPreTraining)
@slow
def test_model_for_causal_lm(self):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, GPT2Config)
model = TFAutoModelForCausalLM.from_pretrained(model_name)
model, loading_info = TFAutoModelForCausalLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFGPT2LMHeadModel)
@slow
def test_lmhead_model_from_pretrained(self):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelWithLMHead.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForMaskedLM)
@slow
def test_model_for_masked_lm(self):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelForMaskedLM.from_pretrained(model_name)
model, loading_info = TFAutoModelForMaskedLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForMaskedLM)
@slow
def test_model_for_encoder_decoder_lm(self):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, T5Config)
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_name)
model, loading_info = TFAutoModelForSeq2SeqLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFT5ForConditionalGeneration)
@slow
def test_sequence_classification_model_from_pretrained(self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelForSequenceClassification.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForSequenceClassification)
@slow
def test_question_answering_model_from_pretrained(self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelForQuestionAnswering.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForQuestionAnswering)
@slow
@require_tensorflow_probability
def test_table_question_answering_model_from_pretrained(self):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, TapasConfig)
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_name)
model, loading_info = TFAutoModelForTableQuestionAnswering.from_pretrained(
model_name, output_loading_info=True
)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFTapasForQuestionAnswering)
def test_from_pretrained_identifier(self):
model = TFAutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER)
self.assertIsInstance(model, TFBertForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
def test_from_identifier_from_model_type(self):
model = TFAutoModelWithLMHead.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER)
self.assertIsInstance(model, TFRobertaForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
def test_from_pretrained_with_tuple_values(self):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
model = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny")
self.assertIsInstance(model, TFFunnelModel)
config = copy.deepcopy(model.config)
config.architectures = ["FunnelBaseModel"]
model = TFAutoModel.from_config(config)
self.assertIsInstance(model, TFFunnelBaseModel)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
model = TFAutoModel.from_pretrained(tmp_dir)
self.assertIsInstance(model, TFFunnelBaseModel)
def test_new_model_registration(self):
try:
AutoConfig.register("new-model", NewModelConfig)
auto_classes = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(ValueError):
auto_class.register(BertConfig, TFNewModel)
auto_class.register(NewModelConfig, TFNewModel)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(ValueError):
auto_class.register(BertConfig, TFBertModel)
# Now that the config is registered, it can be used as any other config with the auto-API
tiny_config = BertModelTester(self).get_config()
config = NewModelConfig(**tiny_config.to_dict())
model = auto_class.from_config(config)
self.assertIsInstance(model, TFNewModel)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
new_model = auto_class.from_pretrained(tmp_dir)
self.assertIsInstance(new_model, TFNewModel)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def test_repo_not_found(self):
with self.assertRaisesRegex(
EnvironmentError, "bert-base is not a local folder and is not a valid model identifier"
):
_ = TFAutoModel.from_pretrained("bert-base")
def test_revision_not_found(self):
with self.assertRaisesRegex(
EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"
):
_ = TFAutoModel.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa")
def test_model_file_not_found(self):
with self.assertRaisesRegex(
EnvironmentError,
"hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin",
):
_ = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model")
def test_model_from_pt_suggestion(self):
with self.assertRaisesRegex(EnvironmentError, "Use `from_pt=True` to load this model"):
_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
def test_cached_model_has_minimum_calls_to_head(self):
# Make sure we have cached the model.
_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(counter.get_request_count, 0)
self.assertEqual(counter.head_request_count, 1)
self.assertEqual(counter.other_request_count, 0)
# With a sharded checkpoint
_ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded")
with RequestCounter() as counter:
_ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded")
self.assertEqual(counter.get_request_count, 0)
self.assertEqual(counter.head_request_count, 1)
self.assertEqual(counter.other_request_count, 0)
|
1dd16a6c44e6509c31d623fb400ff7bb88e886a1
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/home_connect/switch.py
|
61dd11dbc6f248c892f9eed1f942361f49192b48
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 6,024
|
py
|
switch.py
|
"""Provides a switch for Home Connect."""
import logging
from typing import Any
from homeconnect.api import HomeConnectError
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_DEVICE, CONF_ENTITIES
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import (
ATTR_VALUE,
BSH_ACTIVE_PROGRAM,
BSH_OPERATION_STATE,
BSH_POWER_ON,
BSH_POWER_STATE,
DOMAIN,
)
from .entity import HomeConnectEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Home Connect switch."""
def get_entities():
"""Get a list of entities."""
entities = []
hc_api = hass.data[DOMAIN][config_entry.entry_id]
for device_dict in hc_api.devices:
entity_dicts = device_dict.get(CONF_ENTITIES, {}).get("switch", [])
entity_list = [HomeConnectProgramSwitch(**d) for d in entity_dicts]
entity_list += [HomeConnectPowerSwitch(device_dict[CONF_DEVICE])]
entities += entity_list
return entities
async_add_entities(await hass.async_add_executor_job(get_entities), True)
class HomeConnectProgramSwitch(HomeConnectEntity, SwitchEntity):
"""Switch class for Home Connect."""
def __init__(self, device, program_name):
"""Initialize the entity."""
desc = " ".join(["Program", program_name.split(".")[-1]])
if device.appliance.type == "WasherDryer":
desc = " ".join(
["Program", program_name.split(".")[-3], program_name.split(".")[-1]]
)
super().__init__(device, desc)
self.program_name = program_name
self._state = None
self._remote_allowed = None
@property
def is_on(self):
"""Return true if the switch is on."""
return bool(self._state)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Start the program."""
_LOGGER.debug("Tried to turn on program %s", self.program_name)
try:
await self.hass.async_add_executor_job(
self.device.appliance.start_program, self.program_name
)
except HomeConnectError as err:
_LOGGER.error("Error while trying to start program: %s", err)
self.async_entity_update()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Stop the program."""
_LOGGER.debug("Tried to stop program %s", self.program_name)
try:
await self.hass.async_add_executor_job(self.device.appliance.stop_program)
except HomeConnectError as err:
_LOGGER.error("Error while trying to stop program: %s", err)
self.async_entity_update()
async def async_update(self) -> None:
"""Update the switch's status."""
state = self.device.appliance.status.get(BSH_ACTIVE_PROGRAM, {})
if state.get(ATTR_VALUE) == self.program_name:
self._state = True
else:
self._state = False
_LOGGER.debug("Updated, new state: %s", self._state)
class HomeConnectPowerSwitch(HomeConnectEntity, SwitchEntity):
"""Power switch class for Home Connect."""
def __init__(self, device):
"""Inititialize the entity."""
super().__init__(device, "Power")
self._state = None
@property
def is_on(self):
"""Return true if the switch is on."""
return bool(self._state)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Switch the device on."""
_LOGGER.debug("Tried to switch on %s", self.name)
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting, BSH_POWER_STATE, BSH_POWER_ON
)
except HomeConnectError as err:
_LOGGER.error("Error while trying to turn on device: %s", err)
self._state = False
self.async_entity_update()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Switch the device off."""
_LOGGER.debug("tried to switch off %s", self.name)
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting,
BSH_POWER_STATE,
self.device.power_off_state,
)
except HomeConnectError as err:
_LOGGER.error("Error while trying to turn off device: %s", err)
self._state = True
self.async_entity_update()
async def async_update(self) -> None:
"""Update the switch's status."""
if (
self.device.appliance.status.get(BSH_POWER_STATE, {}).get(ATTR_VALUE)
== BSH_POWER_ON
):
self._state = True
elif (
self.device.appliance.status.get(BSH_POWER_STATE, {}).get(ATTR_VALUE)
== self.device.power_off_state
):
self._state = False
elif self.device.appliance.status.get(BSH_OPERATION_STATE, {}).get(
ATTR_VALUE, None
) in [
"BSH.Common.EnumType.OperationState.Ready",
"BSH.Common.EnumType.OperationState.DelayedStart",
"BSH.Common.EnumType.OperationState.Run",
"BSH.Common.EnumType.OperationState.Pause",
"BSH.Common.EnumType.OperationState.ActionRequired",
"BSH.Common.EnumType.OperationState.Aborting",
"BSH.Common.EnumType.OperationState.Finished",
]:
self._state = True
elif (
self.device.appliance.status.get(BSH_OPERATION_STATE, {}).get(ATTR_VALUE)
== "BSH.Common.EnumType.OperationState.Inactive"
):
self._state = False
else:
self._state = None
_LOGGER.debug("Updated, new state: %s", self._state)
|
6bad3e27e2536a83572d834ddc4a32b5e1a14c7d
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/hqadmin/urls.py
|
dcb3fef66c3bd4308691b2c2d5130108a3f9a736
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,629
|
py
|
urls.py
|
from django.conf.urls import include, re_path as url
from corehq.apps.api.urls import admin_urlpatterns as admin_api_urlpatterns
from corehq.apps.domain.views.tombstone import TombstoneManagement, create_tombstone
from corehq.apps.hqadmin.views.data import doc_in_es, download_blob, raw_doc
from corehq.apps.hqadmin.views.operations import (
CallcenterUCRCheck,
ReprocessMessagingCaseUpdatesView,
mass_email,
)
from corehq.apps.hqadmin.views.reports import (
DownloadGIRView,
DownloadMALTView,
)
from corehq.apps.hqadmin.views.system import (
SystemInfoView,
branches_on_staging,
GlobalThresholds,
check_services,
pillow_operation_api,
system_ajax,
)
from corehq.apps.hqadmin.views.users import (
AdminRestoreView,
AppBuildTimingsView,
DisableTwoFactorView,
DisableUserView,
SuperuserManagement,
OffboardingUserList,
WebUserDataView,
superuser_table,
web_user_lookup,
)
from corehq.apps.hqadmin.views.utils import default
from corehq.apps.reports.dispatcher import AdminReportDispatcher
urlpatterns = [
url(r'^$', default, name="default_admin_report"),
url(r'^system/$', SystemInfoView.as_view(), name=SystemInfoView.urlname),
url(r'^system/system_ajax$', system_ajax, name="system_ajax"),
url(r'^system/check_services$', check_services, name="check_services"),
url(r'^system/autostaging/$', branches_on_staging, name="branches_on_staging"),
url(r'^global_thresholds/$', GlobalThresholds.as_view(), name=GlobalThresholds.urlname),
url(r'^mass_email/$', mass_email, name="mass_email"),
# Same view supported with three possible urls to support tracking
# username and domain in the url via audit
url(r'^superuser_management/$', SuperuserManagement.as_view(), name=SuperuserManagement.urlname),
url(r'^get_offboarding_list/$', OffboardingUserList.as_view(), name=OffboardingUserList.urlname),
url(r'^superuser_table.csv$', superuser_table, name='superuser_table'),
url(r'^tombstone_management/$', TombstoneManagement.as_view(), name=TombstoneManagement.urlname),
url(r'^create_tombstone/$', create_tombstone, name='create_tombstone'),
url(r'^phone/restore/$', AdminRestoreView.as_view(), name="admin_restore"),
url(r'^phone/restore/(?P<app_id>[\w-]+)/$', AdminRestoreView.as_view(), name='app_aware_admin_restore'),
url(r'^app_build_timings/$', AppBuildTimingsView.as_view(), name="app_build_timings"),
url(r'^do_pillow_op/$', pillow_operation_api, name="pillow_operation_api"),
url(r'^web_user_lookup/$', web_user_lookup, name='web_user_lookup'),
url(r'^disable_two_factor/$', DisableTwoFactorView.as_view(), name=DisableTwoFactorView.urlname),
url(r'^disable_account/$', DisableUserView.as_view(), name=DisableUserView.urlname),
url(r'^doc_in_es/$', doc_in_es, name='doc_in_es'),
url(r'^raw_couch/$', raw_doc, name='raw_couch'),
url(r'^raw_doc/$', raw_doc, name='raw_doc'),
url(r'^download_blob/$', download_blob, name='download_blob'),
url(r'^api/', include(admin_api_urlpatterns)),
url(r'^callcenter_ucr_check/$', CallcenterUCRCheck.as_view(), name=CallcenterUCRCheck.urlname),
url(r'^download_malt/$',
DownloadMALTView.as_view(), name=DownloadMALTView.urlname),
url(r'^download_gir', DownloadGIRView.as_view(), name=DownloadGIRView.urlname),
url(r'^reprocess_messaging_case_updates/$', ReprocessMessagingCaseUpdatesView.as_view(),
name=ReprocessMessagingCaseUpdatesView.urlname),
url(r'^web_user_data', WebUserDataView.as_view(), name=WebUserDataView.urlname),
AdminReportDispatcher.url_pattern(),
]
|
a8d4dedd161d9e0a036ce0ed9c3cafd70d5c4d27
|
3257372291236aac1737b057c9ac6c61da9ccca0
|
/tutorials/W0D3_LinearAlgebra/solutions/W0D3_Tutorial1_Solution_b89ad2ef.py
|
14f3bd4d630901803f32927f70feaea19936b55c
|
[
"CC-BY-4.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
NeuromatchAcademy/precourse
|
230ead0d11ae7b0dba21c8df97695a1796e9797d
|
b7f2432c6a68a7984ca923ceed8e07d5cfdb77c3
|
refs/heads/main
| 2023-07-26T11:18:24.493966
| 2023-07-09T14:42:49
| 2023-07-09T14:42:49
| 256,327,558
| 639
| 174
|
MIT
| 2023-07-09T14:42:50
| 2020-04-16T20:54:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 546
|
py
|
W0D3_Tutorial1_Solution_b89ad2ef.py
|
"""
1) They are linearly dependent as one can be formed as a linear combination of
the others (a + b = c). You could also have known this because you have four
3D vectors. You don't need 4 vectors to get anywhere in 3D space!
2) The span of a, b, c, and d is all of 3D space (R^3).
3) The span of a, b, and c is a 2D plane through all of 3D space (note this is not R^2)
4) The span of a is a 1D line through 3D space (note this is not R^1)
5) The span of a and b is a 2d plane through 3D space (note this is not R^2)
""";
|
5fa8052413b1ef4a3330828dead0fa1f9fd56c4f
|
993f18c21402d7a4ff21ddb7ff2ec6c80e466f20
|
/onnx/backend/test/case/node/det.py
|
9b28320eff557d8ad0c95af83ae960fe3b0e93a4
|
[
"Apache-2.0"
] |
permissive
|
onnx/onnx
|
10d3916803c7babff89ec0fa9045127bcccad376
|
8a475b34cb3875df311a46f57571646498f5bda7
|
refs/heads/main
| 2023-08-18T18:50:03.388353
| 2023-08-16T22:18:46
| 2023-08-16T22:18:46
| 102,692,863
| 16,164
| 4,150
|
Apache-2.0
| 2023-09-14T17:10:38
| 2017-09-07T04:53:45
|
Python
|
UTF-8
|
Python
| false
| false
| 975
|
py
|
det.py
|
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.node import expect
class Det(Base):
@staticmethod
def export_2d() -> None:
node = onnx.helper.make_node(
"Det",
inputs=["x"],
outputs=["y"],
)
x = np.arange(4).reshape(2, 2).astype(np.float32)
y = np.linalg.det(x) # expect -2
expect(node, inputs=[x], outputs=[y], name="test_det_2d")
@staticmethod
def export_nd() -> None:
node = onnx.helper.make_node(
"Det",
inputs=["x"],
outputs=["y"],
)
x = np.array([[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]).astype(
np.float32
)
y = np.linalg.det(x) # expect array([-2., -3., -8.])
expect(node, inputs=[x], outputs=[y], name="test_det_nd")
|
03a1323a0e4894fc33fa2ac1d88cb69cbb5e8fae
|
cadb6dceb7bb67ce47ef48b2c83f480a65d6b01a
|
/s3prl/problem/superb/ks.py
|
2d0bf71457c691a90d3c809a8d6fdbb325362be0
|
[
"Apache-2.0",
"CC-BY-NC-4.0"
] |
permissive
|
s3prl/s3prl
|
52ec2ae4df5a61c786c122085603aa9c5e8c2681
|
76a9432b824f6ae3eae09a35a67782c4ed582832
|
refs/heads/main
| 2023-08-17T02:26:57.524087
| 2023-06-10T17:12:27
| 2023-06-10T17:12:27
| 196,905,457
| 1,549
| 398
|
Apache-2.0
| 2023-09-14T13:07:05
| 2019-07-15T01:54:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,987
|
py
|
ks.py
|
from s3prl import Container
from s3prl.corpus.speech_commands import gsc_v1_for_superb
from s3prl.dataset.utterance_classification_pipe import UtteranceClassificationPipe
from s3prl.nn import MeanPoolingLinear
from s3prl.sampler import BalancedWeightedSampler, FixedBatchSizeBatchSampler
from s3prl.task.utterance_classification_task import UtteranceClassificationTask
from s3prl.util.configuration import default_cfg
from .base import SuperbProblem
EFFECTS = [["channels", "1"], ["rate", "16000"], ["gain", "-3.0"]]
class SuperbKS(SuperbProblem):
@default_cfg(
**SuperbProblem.setup.default_except(
corpus=dict(
CLS=gsc_v1_for_superb,
dataset_root="???",
),
train_datapipe=dict(
CLS=UtteranceClassificationPipe,
train_category_encoder=True,
sox_effects=EFFECTS,
),
train_sampler=dict(
CLS=BalancedWeightedSampler,
batch_size=32,
),
valid_datapipe=dict(
CLS=UtteranceClassificationPipe,
sox_effects=EFFECTS,
),
valid_sampler=dict(
CLS=BalancedWeightedSampler,
batch_size=32,
),
test_datapipe=dict(
CLS=UtteranceClassificationPipe,
sox_effects=EFFECTS,
),
test_sampler=dict(
CLS=FixedBatchSizeBatchSampler,
batch_size=32,
),
downstream=dict(
CLS=MeanPoolingLinear,
hidden_size=256,
),
task=dict(
CLS=UtteranceClassificationTask,
),
)
)
@classmethod
def setup(cls, **cfg):
super().setup(**cfg)
@default_cfg(
**SuperbProblem.train.default_except(
optimizer=dict(
CLS="torch.optim.Adam",
lr=1.0e-4,
),
trainer=dict(
total_steps=200000,
log_step=100,
eval_step=5000,
save_step=1000,
gradient_clipping=1.0,
gradient_accumulate_steps=1,
valid_metric="accuracy",
valid_higher_better=True,
),
)
)
@classmethod
def train(cls, **cfg):
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(
stages=["setup", "train", "inference"],
start_stage="setup",
final_stage="inference",
setup=setup.default_cfg.deselect("workspace", "resume"),
train=train.default_cfg.deselect("workspace", "resume"),
inference=inference.default_cfg.deselect("workspace", "resume"),
)
@classmethod
def run(cls, **cfg):
super().run(**cfg)
|
5932013716c9b33959771252d3dfa6a715a5bbe8
|
681ee1b3339a394a7a8c1dcab4529fbf371975d3
|
/tasks/__behave.py
|
1c98389b02ab680c1b733d52758fe080860941f5
|
[
"BSD-2-Clause"
] |
permissive
|
behave/behave
|
0c26624d48a8a3e8dce53e792cf6a71fabd4a79a
|
1c6197b35c15e07b5bae62b3131a98f9caa88f4e
|
refs/heads/main
| 2023-08-31T13:23:37.644488
| 2023-07-31T20:06:53
| 2023-07-31T20:11:27
| 2,642,784
| 2,744
| 665
|
NOASSERTION
| 2023-09-01T19:32:05
| 2011-10-25T11:02:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
__behave.py
|
# -*- coding: UTF-8 -*-
"""
Invoke build script (python based).
.. seealso:: https://github.com/pyinvoke/invoke
"""
from __future__ import print_function
import sys
from invoke import task, Collection
# USE_PTY = os.isatty(sys.stdout)
USE_PTY = sys.stdout.isatty()
# ---------------------------------------------------------------------------
# TASKS
# ---------------------------------------------------------------------------
# MAYBE: echo=False):
@task(help={
"args": "Command line args for behave",
"format": "Formatter to use",
})
def behave_test(ctx, args="", format=""): # pylint: disable=redefined-builtin
"""Run behave tests."""
format = format or ctx.behave_test.format
options = ctx.behave_test.options or ""
args = args or ctx.behave_test.args
behave = "{python} bin/behave".format(python=sys.executable)
ctx.run("{behave} -f {format} {options} {args}".format(
behave=behave, format=format, options=options, args=args), pty=USE_PTY)
# ---------------------------------------------------------------------------
# TASK MANAGEMENT / CONFIGURATION
# ---------------------------------------------------------------------------
# namespace.add_task(behave_test, default=True)
namespace = Collection()
namespace.add_task(behave_test, default=True)
namespace.configure({
"behave_test": {
"args": "",
"format": "progress2",
"options": "", # -- NOTE: Overide in configfile "invoke.yaml"
},
})
|
5b674f65a4d3596eba97516113fc4893f142c2d0
|
d8a5893ed1c75f4da55ca904b9f61ca2690f1caf
|
/databricks_cli/jobs/api.py
|
9e64071cb9a5455d0ed2ba36aae14b7b0b7f0e03
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
databricks/databricks-cli
|
082718a7729c67160a316cae76d687bdcde49ccd
|
9bb7f5b0e3da5d6386b0ca89bfbe68c942d8298c
|
refs/heads/main
| 2023-08-31T10:14:17.970777
| 2023-07-05T11:09:13
| 2023-07-05T11:09:13
| 93,211,371
| 375
| 270
|
NOASSERTION
| 2023-08-08T19:45:28
| 2017-06-02T23:38:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,069
|
py
|
api.py
|
# Databricks CLI
# Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"), except
# that the use of services to which certain application programming
# interfaces (each, an "API") connect requires that the user first obtain
# a license for the use of the APIs from Databricks, Inc. ("Databricks"),
# by creating an account at www.databricks.com and agreeing to either (a)
# the Community Edition Terms of Service, (b) the Databricks Terms of
# Service, or (c) another written agreement between Licensee and Databricks
# for the use of the APIs.
#
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from databricks_cli.sdk import JobsService
class JobsApi(object):
def __init__(self, api_client):
self.client = JobsService(api_client)
def create_job(self, json, headers=None, version=None):
return self.client.client.perform_query('POST', '/jobs/create', data=json, headers=headers,
version=version)
def list_jobs(self, job_type=None, expand_tasks=None, offset=None, limit=None, headers=None,
version=None, name=None):
resp = self.client.list_jobs(job_type=job_type, expand_tasks=expand_tasks, offset=offset,
limit=limit, headers=headers, version=version,
name=name)
if 'jobs' not in resp:
resp['jobs'] = []
return resp
def delete_job(self, job_id, headers=None, version=None):
return self.client.delete_job(job_id, headers=headers, version=version)
def get_job(self, job_id, headers=None, version=None):
return self.client.get_job(job_id, headers=headers, version=version)
def reset_job(self, json, headers=None, version=None):
return self.client.client.perform_query('POST', '/jobs/reset', data=json, headers=headers,
version=version)
def run_now(self, job_id, jar_params, notebook_params, python_params, spark_submit_params,
python_named_params=None, idempotency_token=None, headers=None, version=None):
return self.client.run_now(job_id, jar_params, notebook_params, python_params,
spark_submit_params, python_named_params,
idempotency_token, headers=headers, version=version)
def _list_jobs_by_name(self, name, headers=None):
jobs = self.list_jobs(headers=headers, name=name)['jobs']
result = list(filter(lambda job: job['settings']['name'] == name, jobs))
return result
|
4189e4b9175e3a001cca7fbe0afcd5d09d36e182
|
2b9f0e11bca4cbaad224654884cac1e50667d7d5
|
/egs/dns_challenge/baseline/eval_on_synthetic.py
|
eaf91752ff5c2847111979deee403726038c1f31
|
[
"MIT"
] |
permissive
|
asteroid-team/asteroid
|
6dcfe6bc1571c7cc92dbab49962b002bab195c25
|
cd5b35d84381f325ad5f4bee861d8cb26ba7dae9
|
refs/heads/master
| 2023-08-22T19:18:14.649055
| 2023-07-19T11:38:26
| 2023-07-19T11:38:26
| 215,969,476
| 1,284
| 278
|
MIT
| 2023-09-12T07:09:36
| 2019-10-18T07:48:15
|
Python
|
UTF-8
|
Python
| false
| false
| 5,569
|
py
|
eval_on_synthetic.py
|
import glob
import os
import random
import soundfile as sf
import torch
import yaml
import json
import argparse
import pandas as pd
from tqdm import tqdm
from pprint import pprint
from asteroid.metrics import get_metrics
from model import load_best_model
from local.preprocess_dns import make_wav_id_dict
parser = argparse.ArgumentParser()
parser.add_argument(
"--test_dir", type=str, required=True, help="Test directory including wav files"
)
parser.add_argument(
"--use_gpu", type=int, default=0, help="Whether to use the GPU for model execution"
)
parser.add_argument("--exp_dir", default="exp/tmp", help="Experiment root")
parser.add_argument(
"--n_save_ex", type=int, default=50, help="Number of audio examples to save, -1 means all"
)
ALL_METRICS = ["si_sdr", "sdr", "sir", "sar", "stoi", "pesq"]
COMPUTE_METRICS = ALL_METRICS
def main(conf):
# Get best trained model
model = load_best_model(conf["train_conf"], conf["exp_dir"])
if conf["use_gpu"]:
model = model.cuda()
# Evaluate performances separately w/ and w/o reverb
for subdir in ["with_reverb", "no_reverb"]:
dict_list = get_wavs_dict_list(os.path.join(conf["test_dir"], subdir))
save_dir = os.path.join(conf["exp_dir"], subdir + "examples/")
os.makedirs(save_dir, exist_ok=True)
all_metrics_df = evaluate(dict_list, model, conf=conf, save_dir=save_dir)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics_{}.csv".format(subdir)))
# Print and save summary metrics
final_results = {}
for metric_name in COMPUTE_METRICS:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics {} :".format(subdir))
pprint(final_results)
filename = os.path.join(conf["exp_dir"], "final_metrics_{}.json".format(subdir))
with open(filename, "w") as f:
json.dump(final_results, f, indent=0)
def get_wavs_dict_list(test_dir):
"""Creates a list of example pair dictionaries.
Args:
test_dir (str): Directory where clean/ and noisy/ subdirectories can
be found.
Returns:
List[dict] : list of noisy/clean pair dictionaries.
Each dict looks like :
{'clean': clean_path,
'noisy': noisy_path,
'id': 3}
"""
# Find all clean files and make an {id: filepath} dictionary
clean_wavs = glob.glob(os.path.join(test_dir, "clean/*.wav"))
clean_dic = make_wav_id_dict(clean_wavs)
# Same for noisy files
noisy_wavs = glob.glob(os.path.join(test_dir, "noisy/*.wav"))
noisy_dic = make_wav_id_dict(noisy_wavs)
assert clean_dic.keys() == noisy_dic.keys()
# Combine both dictionaries
dict_list = [dict(clean=clean_dic[k], noisy=noisy_dic[k], id=k) for k in clean_dic.keys()]
return dict_list
def evaluate(dict_list, model, conf, save_dir=None):
model_device = next(model.parameters()).device
# Randomly choose the indexes of sentences to save.
if save_dir is None:
conf["n_save_ex"] = 0
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(dict_list)
save_idx = random.sample(range(len(dict_list)), conf["n_save_ex"])
series_list = []
for idx, wav_dic in enumerate(tqdm(dict_list)):
# Forward the network on the mixture.
noisy_np, clean_np, fs = load_wav_dic(wav_dic)
with torch.no_grad():
net_input = torch.tensor(noisy_np)[None, None].to(model_device)
est_clean_np = model.denoise(net_input).squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix=noisy_np,
clean=clean_np,
estimate=est_clean_np,
sample_rate=fs,
metrics_list=COMPUTE_METRICS,
)
utt_metrics["noisy_path"] = wav_dic["noisy"]
utt_metrics["clean_path"] = wav_dic["clean"]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "noisy.wav", noisy_np, fs)
sf.write(local_save_dir + "clean.wav", clean_np, fs)
sf.write(local_save_dir + "estimate.wav", est_clean_np, fs)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
return all_metrics_df
def load_wav_dic(wav_dic):
"""Load wavs files from a dictionary with path entries.
Returns:
tuple: noisy speech waveform, clean speech waveform.
"""
noisy_path, clean_path = wav_dic["noisy"], wav_dic["clean"]
noisy, fs = sf.read(noisy_path, dtype="float32")
clean, fs = sf.read(clean_path, dtype="float32")
return noisy, clean, fs
if __name__ == "__main__":
args = parser.parse_args()
arg_dic = dict(vars(args))
# Load training config
conf_path = os.path.join(args.exp_dir, "conf.yml")
with open(conf_path) as conf_file:
train_conf = yaml.safe_load(conf_file)
arg_dic["train_conf"] = train_conf
main(arg_dic)
|
65d228fae63e010dc0fd30b98faeeee2aa3455fb
|
e9869359c839c8c175ae7877bc35dcfdfe4058f8
|
/kornia/utils/_compat.py
|
5e94f77a98b531131d4e1bed912151a7f96d1649
|
[
"Apache-2.0"
] |
permissive
|
kornia/kornia
|
80f93eae6a70b8bc0c9784f92a842ab9a6ab54ae
|
1e0f8baa7318c05b17ea6dbb48605691bca8972f
|
refs/heads/master
| 2023-08-31T06:32:45.960859
| 2023-08-30T21:59:41
| 2023-08-30T21:59:41
| 145,693,916
| 7,351
| 833
|
Apache-2.0
| 2023-09-12T21:59:29
| 2018-08-22T10:31:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,857
|
py
|
_compat.py
|
from typing import TYPE_CHECKING, Callable, ContextManager, List, Optional, Tuple, TypeVar
import torch
from packaging import version
from torch import Tensor
def torch_version() -> str:
"""Parse the `torch.__version__` variable and removes +cu*/cpu."""
return torch.__version__.split('+')[0]
def torch_version_lt(major: int, minor: int, patch: int) -> bool:
_version = version.parse(torch_version())
return _version < version.parse(f"{major}.{minor}.{patch}")
def torch_version_le(major: int, minor: int, patch: int) -> bool:
_version = version.parse(torch_version())
return _version <= version.parse(f"{major}.{minor}.{patch}")
def torch_version_ge(major: int, minor: int, patch: Optional[int] = None) -> bool:
_version = version.parse(torch_version())
if patch is None:
return _version >= version.parse(f"{major}.{minor}")
else:
return _version >= version.parse(f"{major}.{minor}.{patch}")
if TYPE_CHECKING:
# TODO: remove this branch when kornia relies on torch >= 1.10.0
def torch_meshgrid(tensors: List[Tensor], indexing: Optional[str] = None) -> Tuple[Tensor, ...]:
...
elif torch_version_ge(1, 10, 0):
def torch_meshgrid(tensors: List[Tensor], indexing: str):
return torch.meshgrid(tensors, indexing=indexing)
else:
# TODO: remove this branch when kornia relies on torch >= 1.10.0
def torch_meshgrid(tensors: List[Tensor], indexing: str):
return torch.meshgrid(tensors)
if TYPE_CHECKING:
# TODO: remove this branch when kornia relies on torch >= 1.10.0
_T = TypeVar('_T')
torch_inference_mode: Callable[..., ContextManager[_T]]
elif torch_version_ge(1, 10, 0):
torch_inference_mode = torch.inference_mode
else:
# TODO: remove this branch when kornia relies on torch >= 1.10.0
torch_inference_mode = torch.no_grad
|
b1f1204c41a44118727a5da64ea1a0a5f2c4343f
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/healthcareapis/v20221001preview/_inputs.py
|
98f71cba7c9085b56c5816cd74cdbdf20d9021d9
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 8,659
|
py
|
_inputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AnalyticsConnectorDataLakeDataDestinationArgs',
'AnalyticsConnectorFhirServiceDataSourceArgs',
'AnalyticsConnectorFhirToParquetMappingArgs',
'ServiceManagedIdentityIdentityArgs',
]
@pulumi.input_type
class AnalyticsConnectorDataLakeDataDestinationArgs:
def __init__(__self__, *,
data_lake_name: pulumi.Input[str],
type: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None):
"""
The Data Lake data destination for Analytics Connector.
:param pulumi.Input[str] data_lake_name: The name for the Data Lake.
:param pulumi.Input[str] type: Type of data destination.
Expected value is 'datalake'.
:param pulumi.Input[str] name: Name of data destination.
"""
pulumi.set(__self__, "data_lake_name", data_lake_name)
pulumi.set(__self__, "type", 'datalake')
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="dataLakeName")
def data_lake_name(self) -> pulumi.Input[str]:
"""
The name for the Data Lake.
"""
return pulumi.get(self, "data_lake_name")
@data_lake_name.setter
def data_lake_name(self, value: pulumi.Input[str]):
pulumi.set(self, "data_lake_name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of data destination.
Expected value is 'datalake'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of data destination.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class AnalyticsConnectorFhirServiceDataSourceArgs:
def __init__(__self__, *,
kind: pulumi.Input[Union[str, 'FhirServiceVersion']],
type: pulumi.Input[str],
url: pulumi.Input[str]):
"""
The FHIR service data source for Analytics Connector.
:param pulumi.Input[Union[str, 'FhirServiceVersion']] kind: The kind of FHIR Service.
:param pulumi.Input[str] type: Type of data source.
Expected value is 'fhirservice'.
:param pulumi.Input[str] url: The URL of FHIR service.
"""
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "type", 'fhirservice')
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[Union[str, 'FhirServiceVersion']]:
"""
The kind of FHIR Service.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[Union[str, 'FhirServiceVersion']]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of data source.
Expected value is 'fhirservice'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def url(self) -> pulumi.Input[str]:
"""
The URL of FHIR service.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: pulumi.Input[str]):
pulumi.set(self, "url", value)
@pulumi.input_type
class AnalyticsConnectorFhirToParquetMappingArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
extension_schema_reference: Optional[pulumi.Input[str]] = None,
filter_configuration_reference: Optional[pulumi.Input[str]] = None):
"""
FHIR Service data mapping configuration for Analytics Connector.
:param pulumi.Input[str] type: Type of data mapping.
Expected value is 'fhirToParquet'.
:param pulumi.Input[str] extension_schema_reference: Artifact reference for extension schema.
:param pulumi.Input[str] filter_configuration_reference: Artifact reference for filter configurations.
"""
pulumi.set(__self__, "type", 'fhirToParquet')
if extension_schema_reference is not None:
pulumi.set(__self__, "extension_schema_reference", extension_schema_reference)
if filter_configuration_reference is not None:
pulumi.set(__self__, "filter_configuration_reference", filter_configuration_reference)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of data mapping.
Expected value is 'fhirToParquet'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="extensionSchemaReference")
def extension_schema_reference(self) -> Optional[pulumi.Input[str]]:
"""
Artifact reference for extension schema.
"""
return pulumi.get(self, "extension_schema_reference")
@extension_schema_reference.setter
def extension_schema_reference(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "extension_schema_reference", value)
@property
@pulumi.getter(name="filterConfigurationReference")
def filter_configuration_reference(self) -> Optional[pulumi.Input[str]]:
"""
Artifact reference for filter configurations.
"""
return pulumi.get(self, "filter_configuration_reference")
@filter_configuration_reference.setter
def filter_configuration_reference(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filter_configuration_reference", value)
@pulumi.input_type
class ServiceManagedIdentityIdentityArgs:
def __init__(__self__, *,
type: pulumi.Input[Union[str, 'ServiceManagedIdentityType']],
user_assigned_identities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Setting indicating whether the service has a managed identity associated with it.
:param pulumi.Input[Union[str, 'ServiceManagedIdentityType']] type: Type of identity being specified, currently SystemAssigned and None are allowed.
:param pulumi.Input[Sequence[pulumi.Input[str]]] user_assigned_identities: The set of user assigned identities associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty objects ({}) in requests.
"""
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter
def type(self) -> pulumi.Input[Union[str, 'ServiceManagedIdentityType']]:
"""
Type of identity being specified, currently SystemAssigned and None are allowed.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[Union[str, 'ServiceManagedIdentityType']]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The set of user assigned identities associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty objects ({}) in requests.
"""
return pulumi.get(self, "user_assigned_identities")
@user_assigned_identities.setter
def user_assigned_identities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "user_assigned_identities", value)
|
d70ffa5205db4b3f5a6d6bb2861d7f29434ac583
|
a411a55762de11dc2c9d913ff33d2f1477ac02cf
|
/dp/cloud/python/magma/configuration_controller/tests/unit/test_dp_logs.py
|
b20911641a4b76c066d2d8b01df26733f057c1a5
|
[
"BSD-3-Clause"
] |
permissive
|
magma/magma
|
0dc48c1513d9968bd05fb7589f302c192b7c0f94
|
0e1d895dfe625681229e181fbc2dbad83e13c5cb
|
refs/heads/master
| 2023-09-04T09:31:56.140395
| 2023-08-29T13:54:49
| 2023-08-29T13:54:49
| 170,803,235
| 1,219
| 525
|
NOASSERTION
| 2023-09-07T17:45:42
| 2019-02-15T04:46:24
|
C++
|
UTF-8
|
Python
| false
| false
| 4,126
|
py
|
test_dp_logs.py
|
"""
Copyright 2022 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
from freezegun import freeze_time
from magma.configuration_controller.custom_types.custom_types import DBResponse
from magma.db_service.models import (
DBCbsd,
DBCbsdState,
DBRequest,
DBRequestType,
)
from magma.db_service.tests.local_db_test_case import LocalDBTestCase
from magma.fluentd_client.client import DPLog
from magma.fluentd_client.dp_logs import make_dp_log, now
from parameterized import parameterized
DP = 'DP'
CBSD = 'CBSD'
SAS = 'SAS'
SOME_FCC_ID = 'some_fcc_id'
HEARTBEAT_REQUEST = 'heartbeatRequest'
SOME_SERIAL_NUMBER = 'some_serial_number'
SOME_NETWORK_ID = 'some_network_id'
SOME_MESSAGE = 'some_message'
SOME_DATE = datetime.datetime.now(datetime.timezone.utc)
SOME_TIMESTAMP = int(SOME_DATE.timestamp())
class IncorrectDPLog(object):
...
@freeze_time(SOME_DATE)
class DPLogsTestCase(LocalDBTestCase):
def setUp(self):
super().setUp()
cbsd_state = DBCbsdState(name='some_cbsd_state')
cbsd = DBCbsd(
state=cbsd_state,
desired_state=cbsd_state,
fcc_id=SOME_FCC_ID,
cbsd_serial_number=SOME_SERIAL_NUMBER,
network_id=SOME_NETWORK_ID,
)
req_type = DBRequestType(name=HEARTBEAT_REQUEST)
self.session.add_all([cbsd, req_type])
self.session.commit()
@parameterized.expand([
(False, '', '', ''),
(True, SOME_SERIAL_NUMBER, SOME_FCC_ID, SOME_NETWORK_ID),
])
def test_dp_log_created_from_db_request(self, with_cbsd, serial_num, fcc_id, network_id):
# Given
req_type = self.session.query(DBRequestType).first()
cbsd = None
if with_cbsd:
cbsd = self.session.query(DBCbsd).first()
request = DBRequest(type=req_type, cbsd=cbsd, payload=SOME_MESSAGE)
# When
actual_log = make_dp_log(request)
# Then
expected_log = DPLog(
event_timestamp=SOME_TIMESTAMP,
cbsd_serial_number=serial_num,
fcc_id=fcc_id,
log_from=DP,
log_message=SOME_MESSAGE,
log_name=HEARTBEAT_REQUEST,
log_to=SAS,
network_id=network_id,
response_code=None,
)
self.assertEqual(expected_log, actual_log)
@parameterized.expand([
(False, '', '', ''),
(True, SOME_SERIAL_NUMBER, SOME_FCC_ID, SOME_NETWORK_ID),
])
def test_dp_log_created_from_db_response(self, with_cbsd, serial_num, fcc_id, network_id):
# Given
req_type = self.session.query(DBRequestType).first()
cbsd = None
if with_cbsd:
cbsd = self.session.query(DBCbsd).first()
request = DBRequest(type=req_type, cbsd=cbsd, payload='some_request_message')
resp_payload = {"response": {"responseCode": "0"}}
response = DBResponse(request=request, response_code=200, payload=resp_payload)
# When
actual_log = make_dp_log(response)
# Then
expected_log = DPLog(
event_timestamp=SOME_TIMESTAMP,
cbsd_serial_number=serial_num,
fcc_id=fcc_id,
log_from=SAS,
log_message="{'response': {'responseCode': '0'}}",
log_name='heartbeatResponse',
log_to=DP,
network_id=network_id,
response_code="0",
)
self.assertEqual(expected_log, actual_log)
def test_make_dp_log_returns_type_error_for_unknown_message_type(self):
with self.assertRaises(TypeError):
make_dp_log(IncorrectDPLog())
def test_datetime_now(self):
self.assertEqual(SOME_TIMESTAMP, now())
|
d91ff182242877278244dc2bdedab8547ecf430b
|
c19bcbc98555ef06276f9f0dcffc9ac35942a7c4
|
/jc/parsers/clf_s.py
|
fc43ed1e46d47af62c0e03257d10779bbaa5fb4d
|
[
"MIT"
] |
permissive
|
kellyjonbrazil/jc
|
4e81a5421cd20be5965baf375f4a5671c2ef0410
|
4cd721be8595db52b620cc26cd455d95bf56b85b
|
refs/heads/master
| 2023-08-30T09:53:18.284296
| 2023-07-30T17:08:39
| 2023-07-30T17:08:39
| 215,404,927
| 6,278
| 185
|
MIT
| 2023-09-08T14:52:22
| 2019-10-15T22:04:52
|
Python
|
UTF-8
|
Python
| false
| false
| 6,863
|
py
|
clf_s.py
|
"""jc - JSON Convert Common Log Format file streaming parser
> This streaming parser outputs JSON Lines (cli) or returns an Iterable of
> Dictionaries (module)
This parser will handle the Common Log Format standard as specified at
https://www.w3.org/Daemon/User/Config/Logging.html#common-logfile-format.
Combined Log Format is also supported. (Referer and User Agent fields added)
Extra fields may be present and will be enclosed in the `extra` field as
a single string.
If a log line cannot be parsed, an object with an `unparsable` field will
be present with a value of the original line.
The `epoch` calculated timestamp field is naive. (i.e. based on the
local time of the system the parser is run on)
The `epoch_utc` calculated timestamp field is timezone-aware and is
only available if the timezone field is UTC.
Usage (cli):
$ cat file.log | jc --clf-s
Usage (module):
import jc
result = jc.parse('clf_s', common_log_file_output.splitlines())
for item in result:
# do something
Schema:
Empty strings and `-` values are converted to `null`/`None`.
{
"host": string,
"ident": string,
"authuser": string,
"date": string,
"day": integer,
"month": string,
"year": integer,
"hour": integer,
"minute": integer,
"second": integer,
"tz": string,
"request": string,
"request_method": string,
"request_url": string,
"request_version": string,
"status": integer,
"bytes": integer,
"referer": string,
"user_agent": string,
"extra": string,
"epoch": integer, # [0]
"epoch_utc": integer, # [1]
"unparsable": string # [2]
}
[0] naive timestamp
[1] timezone-aware timestamp. Only available if timezone field is UTC
[2] exists if the line was not able to be parsed
Examples:
$ cat file.log | jc --clf-s
{"host":"127.0.0.1","ident":"user-identifier","authuser":"frank","...}
{"host":"1.1.1.2","ident":null,"authuser":null,"date":"11/Nov/2016...}
...
$ cat file.log | jc --clf-s -r
{"host":"127.0.0.1","ident":"user-identifier","authuser":"frank","...}
{"host":"1.1.1.2","ident":"-","authuser":"-","date":"11/Nov/2016:0...}
...
"""
import re
from typing import Dict, Iterable
import jc.utils
from jc.streaming import (
add_jc_meta, streaming_input_type_check, streaming_line_input_type_check, raise_or_yield
)
from jc.jc_types import JSONDictType, StreamingOutputType
from jc.exceptions import ParseError
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.0'
description = 'Common and Combined Log Format file streaming parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
compatible = ['linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd']
tags = ['standard', 'file', 'string']
streaming = True
__version__ = info.version
def _process(proc_data: JSONDictType) -> JSONDictType:
"""
Final processing to conform to the schema.
Parameters:
proc_data: (Dictionary) raw structured data to process
Returns:
Dictionary. Structured data to conform to the schema.
"""
int_list = {'day', 'year', 'hour', 'minute', 'second', 'status', 'bytes'}
for key, val in proc_data.items():
# integer conversions
if key in int_list:
proc_data[key] = jc.utils.convert_to_int(val)
# convert `-` and blank values to None
if val == '-' or val == '':
proc_data[key] = None
# add unix timestamps
if 'date' in proc_data:
ts = jc.utils.timestamp(proc_data['date'], format_hint=(1800,))
proc_data['epoch'] = ts.naive
proc_data['epoch_utc'] = ts.utc
return proc_data
@add_jc_meta
def parse(
data: Iterable[str],
raw: bool = False,
quiet: bool = False,
ignore_exceptions: bool = False
) -> StreamingOutputType:
"""
Main text parsing generator function. Returns an iterable object.
Parameters:
data: (iterable) line-based text data to parse
(e.g. sys.stdin or str.splitlines())
raw: (boolean) unprocessed output if True
quiet: (boolean) suppress warning messages if True
ignore_exceptions: (boolean) ignore parsing exceptions if True
Returns:
Iterable of Dictionaries
"""
jc.utils.compatibility(__name__, info.compatible, quiet)
streaming_input_type_check(data)
clf_pattern = re.compile(r'''
^(?P<host>-|\S+)\s
(?P<ident>-|\S+)\s
(?P<authuser>-|\S+)\s
\[
(?P<date>
(?P<day>\d+)/
(?P<month>\S\S\S)/
(?P<year>\d\d\d\d):
(?P<hour>\d\d):
(?P<minute>\d\d):
(?P<second>\d\d)\s
(?P<tz>\S+)
)
\]\s
\"(?P<request>.*?)\"\s
(?P<status>-|\d\d\d)\s
(?P<bytes>-|\d+)\s?
(?:\"(?P<referer>.*?)\"\s?)?
(?:\"(?P<user_agent>.*?)\"\s?)?
(?P<extra>.*)
''', re.VERBOSE
)
request_pattern = re.compile(r'''
(?P<request_method>\S+)\s
(?P<request_url>.*?(?=\sHTTPS?/|$))\s? # positive lookahead for HTTP(S)/ or end of string
(?P<request_version>HTTPS?/[\d\.]+)?
''', re.VERBOSE
)
for line in data:
try:
streaming_line_input_type_check(line)
output_line: Dict = {}
if not line.strip():
continue
clf_match = re.match(clf_pattern, line)
if clf_match:
output_line = clf_match.groupdict()
if output_line.get('request', None):
request_string = output_line['request']
request_match = re.match(request_pattern, request_string)
if request_match:
output_line.update(request_match.groupdict())
else:
output_line = {"unparsable": line.strip()}
if output_line:
yield output_line if raw else _process(output_line)
else:
raise ParseError('Not Common Log Format data')
except Exception as e:
yield raise_or_yield(ignore_exceptions, e, line)
|
08f5773a359cd26b113b39e26be7e70ddbe15bb0
|
0621aabcfeac5e4a08d49aa3f791fe46f366a734
|
/doc/Programs/LecturePrograms/programs/PDE/python/2dwave/testnew.py
|
f6f940b6e110e1be424ccfbdc0a0314c93c3dde7
|
[
"CC0-1.0"
] |
permissive
|
CompPhysics/ComputationalPhysics
|
16341df05a7094ea6442c98466a704249c7085dc
|
be73eb8bbb989a69fb27e87d7d2686451e7e81f9
|
refs/heads/master
| 2022-03-20T17:41:39.981703
| 2022-03-04T17:36:23
| 2022-03-04T17:36:23
| 48,985,734
| 287
| 146
|
CC0-1.0
| 2020-12-04T12:55:37
| 2016-01-04T09:01:24
| null |
UTF-8
|
Python
| false
| false
| 1,981
|
py
|
testnew.py
|
#!/usr/bin/env python
# This script reads in data from file with the solutions of the
# 2dim wave function. The data are organized as
# time
# l, i, j, u(i,j) where k is the time index t_l, i refers to x_i and j to y_j
# At the end it converts a series of png files to a movie
# file movie.gif. You can run this movie file using the imagemagick
# software animate as - animate movie.gif et voila', Hollywood next
# It creates a movie of the time evolution with the scitools.easyviz library.
# To fetch this addition to python go to the link
# http://code.google.com/p/scitools/wiki/Installation
# This additional tool is the same as that used in INF1100 and should
# be installed on most machines.
from numpy import *
from scitools.easyviz import *
import sys, os
try:
inputfilename = sys.argv[1]
except:
print "Usage of this script", sys.argv[0], "inputfile"; sys.exit(1)
# Read file with data
ifile = open(inputfilename, 'r')
uarray = {}
for line in ifile:
elements = line.split()
if len(elements) < 4: continue
l, i, j, uvalue = elements
l = int(l); i = int(i)
j = int(j); uvalue = float(uvalue)
if not l in uarray:
uarray[l] = {}
if not i in uarray[l]:
uarray[l][i] = {}
if not j in uarray[l][i]:
uarray[l][i][j] = {}
uarray[l][i][j] = uvalue
ifile.close()
# Fixed Lengths used in other function to set up the grids.
Lx = 1; nx = len(uarray[1]);
Ly = 1; ny = len(uarray[1][0]);
ntime = len(uarray);
print nx, ny, ntime
x,y = ndgrid(linspace(0,Lx,nx),linspace(0,Ly,ny),sparse=False)
plotnr = 0
u = zeros([nx, ny])
# Loop over time steps
for l in xrange(1, ntime):
for i in xrange(0,nx):
for j in xrange(0,ny):
u[i,j] = uarray[l][i][j]
plotnr += 1
mesh(x,y,u,hardcopy='frame%04d.png'%plotnr,show=False,axis=[0,1,0,1,-1,1])
#Make movie
movie('frame*.png',encoder='convert', output_file='movie.gif', fps=10)
cmd = 'animate movie.gif'
os.system(cmd)
|
d91ee45167e7f11bcdc52d699d3c59607a8d7040
|
646ed67b2b4d730fa17bbc8693771c6047f5213b
|
/post.py
|
0e5acd4903d667d707b35875a9166d0a6c0783db
|
[
"MIT"
] |
permissive
|
zhou13/lcnn
|
f5d67b5c50d67b5d298a94ee676eed44678654c6
|
57524636bc4614a32beac1af3b31f66ded2122ae
|
refs/heads/master
| 2022-05-25T20:11:21.470252
| 2022-05-10T06:44:05
| 2022-05-10T06:44:05
| 185,550,840
| 448
| 101
|
MIT
| 2021-09-27T03:44:49
| 2019-05-08T07:03:02
|
Python
|
UTF-8
|
Python
| false
| false
| 4,160
|
py
|
post.py
|
#!/usr/bin/env python3
"""Post-processing the output of neural network
Usage:
post.py [options] <input-dir> <output-dir>
post.py ( -h | --help )
Examples:
post.py logs/logname/npz/000336000 result/logname
Arguments:
input-dir Directory that stores the npz
output-dir Output directory
Options:
-h --help Show this screen.
--plot Generate images besides npz files
--thresholds=<thresholds> A comma-separated list for thresholding
[default: 0.006,0.010,0.015]
"""
import glob
import math
import os
import os.path as osp
import sys
import cv2
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from docopt import docopt
from lcnn.postprocess import postprocess
from lcnn.utils import parmap
PLTOPTS = {"color": "#33FFFF", "s": 1.2, "edgecolors": "none", "zorder": 5}
cmap = plt.get_cmap("jet")
norm = mpl.colors.Normalize(vmin=0.92, vmax=1.02)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
def c(x):
return sm.to_rgba(x)
def imshow(im):
plt.close()
sizes = im.shape
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width / height, 1, forward=False)
ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])
ax.set_axis_off()
fig.add_axes(ax)
plt.xlim([-0.5, sizes[1] - 0.5])
plt.ylim([sizes[0] - 0.5, -0.5])
plt.imshow(im)
def main():
args = docopt(__doc__)
files = sorted(glob.glob(osp.join(args["<input-dir>"], "*.npz")))
inames = sorted(glob.glob("data/wireframe/valid-images/*.jpg"))
gts = sorted(glob.glob("data/wireframe/valid/*.npz"))
prefix = args["<output-dir>"]
inputs = list(zip(files, inames, gts))
thresholds = list(map(float, args["--thresholds"].split(",")))
def handle(allname):
fname, iname, gtname = allname
print("Processing", fname)
im = cv2.imread(iname)
with np.load(fname) as f:
lines = f["lines"]
scores = f["score"]
with np.load(gtname) as f:
gtlines = f["lpos"][:, :, :2]
gtlines[:, :, 0] *= im.shape[0] / 128
gtlines[:, :, 1] *= im.shape[1] / 128
for i in range(1, len(lines)):
if (lines[i] == lines[0]).all():
lines = lines[:i]
scores = scores[:i]
break
lines[:, :, 0] *= im.shape[0] / 128
lines[:, :, 1] *= im.shape[1] / 128
diag = (im.shape[0] ** 2 + im.shape[1] ** 2) ** 0.5
for threshold in thresholds:
nlines, nscores = postprocess(lines, scores, diag * threshold, 0, False)
outdir = osp.join(prefix, f"{threshold:.3f}".replace(".", "_"))
os.makedirs(outdir, exist_ok=True)
npz_name = osp.join(outdir, osp.split(fname)[-1])
if args["--plot"]:
# plot gt
imshow(im[:, :, ::-1])
for (a, b) in gtlines:
plt.plot([a[1], b[1]], [a[0], b[0]], c="orange", linewidth=0.5)
plt.scatter(a[1], a[0], **PLTOPTS)
plt.scatter(b[1], b[0], **PLTOPTS)
plt.savefig(npz_name.replace(".npz", ".png"), dpi=500, bbox_inches=0)
thres = [0.96, 0.97, 0.98, 0.99]
for i, t in enumerate(thres):
imshow(im[:, :, ::-1])
for (a, b), s in zip(nlines[nscores > t], nscores[nscores > t]):
plt.plot([a[1], b[1]], [a[0], b[0]], c=c(s), linewidth=0.5)
plt.scatter(a[1], a[0], **PLTOPTS)
plt.scatter(b[1], b[0], **PLTOPTS)
plt.savefig(
npz_name.replace(".npz", f"_{i}.png"), dpi=500, bbox_inches=0
)
nlines[:, :, 0] *= 128 / im.shape[0]
nlines[:, :, 1] *= 128 / im.shape[1]
np.savez_compressed(npz_name, lines=nlines, score=nscores)
parmap(handle, inputs, 12)
if __name__ == "__main__":
main()
|
68495114a874133690291bc785504ade50aa9ecf
|
d7b9b490c954c7a9160b69f8ce2c907ef4681ecb
|
/jobs/migrations/0014_merge.py
|
0b65016dae5f0d0f98a6760dd4eb4c30017b77a0
|
[
"Apache-2.0"
] |
permissive
|
python/pythondotorg
|
00db93a4b1789a4d438806d106d9cee3349ad78c
|
c4ee749942227ca75c8e670546afe67232d647b2
|
refs/heads/main
| 2023-08-28T20:04:24.735314
| 2023-08-03T19:12:29
| 2023-08-03T19:12:29
| 6,127,047
| 1,131
| 646
|
Apache-2.0
| 2023-08-24T15:57:04
| 2012-10-08T16:00:15
|
Python
|
UTF-8
|
Python
| false
| false
| 226
|
py
|
0014_merge.py
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0013_auto_20170810_1627'),
('jobs', '0013_auto_20170810_1625'),
]
operations = [
]
|
a460f20ac1fc2c8a48f26cb233aa2a1f2076d170
|
9335c48ecf8e8c003d014b8fc2a2fe1ad22ea379
|
/pytorch_toolbelt/losses/logcosh.py
|
abc82e5d0d8cf1f66a00e65aab84376dd962a9a4
|
[
"MIT"
] |
permissive
|
BloodAxe/pytorch-toolbelt
|
7f86f3f3f9a7cdcb8d49a5f45882f7d16556c535
|
75e6f467472702acbbb7e690d8cbf5496b859c29
|
refs/heads/develop
| 2023-08-28T18:57:51.377858
| 2023-08-27T09:50:01
| 2023-08-27T09:50:01
| 175,851,515
| 1,503
| 126
|
MIT
| 2023-08-19T14:23:55
| 2019-03-15T16:02:49
|
Python
|
UTF-8
|
Python
| false
| false
| 334
|
py
|
logcosh.py
|
import torch
from pytorch_toolbelt.losses.functional import log_cosh_loss
from torch import nn
__all__ = ["LogCoshLoss"]
class LogCoshLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
return log_cosh_loss(y_pred, y_true)
|
2cbe247c6eb733a37d8fbbe96fe97f2f66820b39
|
06cb7dc8d34b13de8c3ab617089a986ce91d9510
|
/pymysql/tests/test_basic.py
|
e77605fdf477d011f1539d13060f9ba85ebd43fc
|
[
"MIT"
] |
permissive
|
PyMySQL/PyMySQL
|
060c12ead432563f5f881f65475fb7ab308b1908
|
6b10225c94087d47782049aafc8e12efa512337b
|
refs/heads/main
| 2023-08-29T19:37:22.643725
| 2023-06-30T16:29:58
| 2023-06-30T16:29:58
| 2,114,213
| 7,442
| 1,604
|
MIT
| 2023-09-14T21:55:03
| 2011-07-27T17:38:47
|
Python
|
UTF-8
|
Python
| false
| false
| 15,010
|
py
|
test_basic.py
|
import datetime
import json
import time
import pytest
import pymysql.cursors
from pymysql.tests import base
__all__ = ["TestConversion", "TestCursor", "TestBulkInserts"]
class TestConversion(base.PyMySQLTestCase):
def test_datatypes(self):
"""test every data type"""
conn = self.connect()
c = conn.cursor()
c.execute(
"""
create table test_datatypes (
b bit,
i int,
l bigint,
f real,
s varchar(32),
u varchar(32),
bb blob,
d date,
dt datetime,
ts timestamp,
td time,
t time,
st datetime)
"""
)
try:
# insert values
v = (
True,
-3,
123456789012,
5.7,
"hello'\" world",
"Espa\xc3\xb1ol",
"binary\x00data".encode(conn.encoding),
datetime.date(1988, 2, 2),
datetime.datetime(2014, 5, 15, 7, 45, 57),
datetime.timedelta(5, 6),
datetime.time(16, 32),
time.localtime(),
)
c.execute(
"insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values"
" (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
v,
)
c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes")
r = c.fetchone()
self.assertEqual(b"\x01", r[0])
self.assertEqual(v[1:10], r[1:10])
self.assertEqual(
datetime.timedelta(0, 60 * (v[10].hour * 60 + v[10].minute)), r[10]
)
self.assertEqual(datetime.datetime(*v[-1][:6]), r[-1])
c.execute("delete from test_datatypes")
# check nulls
c.execute(
"insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st)"
" values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
[None] * 12,
)
c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes")
r = c.fetchone()
self.assertEqual(tuple([None] * 12), r)
c.execute("delete from test_datatypes")
# check sequences type
for seq_type in (tuple, list, set, frozenset):
c.execute(
"insert into test_datatypes (i, l) values (2,4), (6,8), (10,12)"
)
seq = seq_type([2, 6])
c.execute(
"select l from test_datatypes where i in %s order by i", (seq,)
)
r = c.fetchall()
self.assertEqual(((4,), (8,)), r)
c.execute("delete from test_datatypes")
finally:
c.execute("drop table test_datatypes")
def test_dict(self):
"""test dict escaping"""
conn = self.connect()
c = conn.cursor()
c.execute("create table test_dict (a integer, b integer, c integer)")
try:
c.execute(
"insert into test_dict (a,b,c) values (%(a)s, %(b)s, %(c)s)",
{"a": 1, "b": 2, "c": 3},
)
c.execute("select a,b,c from test_dict")
self.assertEqual((1, 2, 3), c.fetchone())
finally:
c.execute("drop table test_dict")
def test_string(self):
conn = self.connect()
c = conn.cursor()
c.execute("create table test_dict (a text)")
test_value = "I am a test string"
try:
c.execute("insert into test_dict (a) values (%s)", test_value)
c.execute("select a from test_dict")
self.assertEqual((test_value,), c.fetchone())
finally:
c.execute("drop table test_dict")
def test_integer(self):
conn = self.connect()
c = conn.cursor()
c.execute("create table test_dict (a integer)")
test_value = 12345
try:
c.execute("insert into test_dict (a) values (%s)", test_value)
c.execute("select a from test_dict")
self.assertEqual((test_value,), c.fetchone())
finally:
c.execute("drop table test_dict")
def test_binary(self):
"""test binary data"""
data = bytes(bytearray(range(255)))
conn = self.connect()
self.safe_create_table(
conn, "test_binary", "create table test_binary (b binary(255))"
)
with conn.cursor() as c:
c.execute("insert into test_binary (b) values (_binary %s)", (data,))
c.execute("select b from test_binary")
self.assertEqual(data, c.fetchone()[0])
def test_blob(self):
"""test blob data"""
data = bytes(bytearray(range(256)) * 4)
conn = self.connect()
self.safe_create_table(conn, "test_blob", "create table test_blob (b blob)")
with conn.cursor() as c:
c.execute("insert into test_blob (b) values (_binary %s)", (data,))
c.execute("select b from test_blob")
self.assertEqual(data, c.fetchone()[0])
def test_untyped(self):
"""test conversion of null, empty string"""
conn = self.connect()
c = conn.cursor()
c.execute("select null,''")
self.assertEqual((None, ""), c.fetchone())
c.execute("select '',null")
self.assertEqual(("", None), c.fetchone())
def test_timedelta(self):
"""test timedelta conversion"""
conn = self.connect()
c = conn.cursor()
c.execute(
"select time('12:30'), time('23:12:59'), time('23:12:59.05100'),"
+ " time('-12:30'), time('-23:12:59'), time('-23:12:59.05100'), time('-00:30')"
)
self.assertEqual(
(
datetime.timedelta(0, 45000),
datetime.timedelta(0, 83579),
datetime.timedelta(0, 83579, 51000),
-datetime.timedelta(0, 45000),
-datetime.timedelta(0, 83579),
-datetime.timedelta(0, 83579, 51000),
-datetime.timedelta(0, 1800),
),
c.fetchone(),
)
def test_datetime_microseconds(self):
"""test datetime conversion w microseconds"""
conn = self.connect()
c = conn.cursor()
dt = datetime.datetime(2013, 11, 12, 9, 9, 9, 123450)
c.execute("create table test_datetime (id int, ts datetime(6))")
try:
c.execute("insert into test_datetime values (%s, %s)", (1, dt))
c.execute("select ts from test_datetime")
self.assertEqual((dt,), c.fetchone())
finally:
c.execute("drop table test_datetime")
class TestCursor(base.PyMySQLTestCase):
# this test case does not work quite right yet, however,
# we substitute in None for the erroneous field which is
# compatible with the DB-API 2.0 spec and has not broken
# any unit tests for anything we've tried.
# def test_description(self):
# """ test description attribute """
# # result is from MySQLdb module
# r = (('Host', 254, 11, 60, 60, 0, 0),
# ('User', 254, 16, 16, 16, 0, 0),
# ('Password', 254, 41, 41, 41, 0, 0),
# ('Select_priv', 254, 1, 1, 1, 0, 0),
# ('Insert_priv', 254, 1, 1, 1, 0, 0),
# ('Update_priv', 254, 1, 1, 1, 0, 0),
# ('Delete_priv', 254, 1, 1, 1, 0, 0),
# ('Create_priv', 254, 1, 1, 1, 0, 0),
# ('Drop_priv', 254, 1, 1, 1, 0, 0),
# ('Reload_priv', 254, 1, 1, 1, 0, 0),
# ('Shutdown_priv', 254, 1, 1, 1, 0, 0),
# ('Process_priv', 254, 1, 1, 1, 0, 0),
# ('File_priv', 254, 1, 1, 1, 0, 0),
# ('Grant_priv', 254, 1, 1, 1, 0, 0),
# ('References_priv', 254, 1, 1, 1, 0, 0),
# ('Index_priv', 254, 1, 1, 1, 0, 0),
# ('Alter_priv', 254, 1, 1, 1, 0, 0),
# ('Show_db_priv', 254, 1, 1, 1, 0, 0),
# ('Super_priv', 254, 1, 1, 1, 0, 0),
# ('Create_tmp_table_priv', 254, 1, 1, 1, 0, 0),
# ('Lock_tables_priv', 254, 1, 1, 1, 0, 0),
# ('Execute_priv', 254, 1, 1, 1, 0, 0),
# ('Repl_slave_priv', 254, 1, 1, 1, 0, 0),
# ('Repl_client_priv', 254, 1, 1, 1, 0, 0),
# ('Create_view_priv', 254, 1, 1, 1, 0, 0),
# ('Show_view_priv', 254, 1, 1, 1, 0, 0),
# ('Create_routine_priv', 254, 1, 1, 1, 0, 0),
# ('Alter_routine_priv', 254, 1, 1, 1, 0, 0),
# ('Create_user_priv', 254, 1, 1, 1, 0, 0),
# ('Event_priv', 254, 1, 1, 1, 0, 0),
# ('Trigger_priv', 254, 1, 1, 1, 0, 0),
# ('ssl_type', 254, 0, 9, 9, 0, 0),
# ('ssl_cipher', 252, 0, 65535, 65535, 0, 0),
# ('x509_issuer', 252, 0, 65535, 65535, 0, 0),
# ('x509_subject', 252, 0, 65535, 65535, 0, 0),
# ('max_questions', 3, 1, 11, 11, 0, 0),
# ('max_updates', 3, 1, 11, 11, 0, 0),
# ('max_connections', 3, 1, 11, 11, 0, 0),
# ('max_user_connections', 3, 1, 11, 11, 0, 0))
# conn = self.connect()
# c = conn.cursor()
# c.execute("select * from mysql.user")
#
# self.assertEqual(r, c.description)
def test_fetch_no_result(self):
"""test a fetchone() with no rows"""
conn = self.connect()
c = conn.cursor()
c.execute("create table test_nr (b varchar(32))")
try:
data = "pymysql"
c.execute("insert into test_nr (b) values (%s)", (data,))
self.assertEqual(None, c.fetchone())
finally:
c.execute("drop table test_nr")
def test_aggregates(self):
"""test aggregate functions"""
conn = self.connect()
c = conn.cursor()
try:
c.execute("create table test_aggregates (i integer)")
for i in range(0, 10):
c.execute("insert into test_aggregates (i) values (%s)", (i,))
c.execute("select sum(i) from test_aggregates")
(r,) = c.fetchone()
self.assertEqual(sum(range(0, 10)), r)
finally:
c.execute("drop table test_aggregates")
def test_single_tuple(self):
"""test a single tuple"""
conn = self.connect()
c = conn.cursor()
self.safe_create_table(
conn, "mystuff", "create table mystuff (id integer primary key)"
)
c.execute("insert into mystuff (id) values (1)")
c.execute("insert into mystuff (id) values (2)")
c.execute("select id from mystuff where id in %s", ((1,),))
self.assertEqual([(1,)], list(c.fetchall()))
c.close()
def test_json(self):
args = self.databases[0].copy()
args["charset"] = "utf8mb4"
conn = pymysql.connect(**args)
# MariaDB only has limited JSON support, stores data as longtext
# https://mariadb.com/kb/en/json-data-type/
if not self.mysql_server_is(conn, (5, 7, 0)):
pytest.skip("JSON type is only supported on MySQL >= 5.7")
self.safe_create_table(
conn,
"test_json",
"""\
create table test_json (
id int not null,
json JSON not null,
primary key (id)
);""",
)
cur = conn.cursor()
json_str = '{"hello": "こんにちは"}'
cur.execute("INSERT INTO test_json (id, `json`) values (42, %s)", (json_str,))
cur.execute("SELECT `json` from `test_json` WHERE `id`=42")
res = cur.fetchone()[0]
self.assertEqual(json.loads(res), json.loads(json_str))
cur.execute("SELECT CAST(%s AS JSON) AS x", (json_str,))
res = cur.fetchone()[0]
self.assertEqual(json.loads(res), json.loads(json_str))
class TestBulkInserts(base.PyMySQLTestCase):
cursor_type = pymysql.cursors.DictCursor
def setUp(self):
super().setUp()
self.conn = conn = self.connect()
# create a table and some data to query
self.safe_create_table(
conn,
"bulkinsert",
"""\
CREATE TABLE bulkinsert
(
id int,
name char(20),
age int,
height int,
PRIMARY KEY (id)
)
""",
)
def _verify_records(self, data):
conn = self.connect()
cursor = conn.cursor()
cursor.execute("SELECT id, name, age, height from bulkinsert")
result = cursor.fetchall()
self.assertEqual(sorted(data), sorted(result))
def test_bulk_insert(self):
conn = self.connect()
cursor = conn.cursor()
data = [(0, "bob", 21, 123), (1, "jim", 56, 45), (2, "fred", 100, 180)]
cursor.executemany(
"insert into bulkinsert (id, name, age, height) " "values (%s,%s,%s,%s)",
data,
)
self.assertEqual(
cursor._executed,
bytearray(
b"insert into bulkinsert (id, name, age, height) values "
b"(0,'bob',21,123),(1,'jim',56,45),(2,'fred',100,180)"
),
)
cursor.execute("commit")
self._verify_records(data)
def test_bulk_insert_multiline_statement(self):
conn = self.connect()
cursor = conn.cursor()
data = [(0, "bob", 21, 123), (1, "jim", 56, 45), (2, "fred", 100, 180)]
cursor.executemany(
"""insert
into bulkinsert (id, name,
age, height)
values (%s,
%s , %s,
%s )
""",
data,
)
self.assertEqual(
cursor._executed.strip(),
bytearray(
b"""insert
into bulkinsert (id, name,
age, height)
values (0,
'bob' , 21,
123 ),(1,
'jim' , 56,
45 ),(2,
'fred' , 100,
180 )"""
),
)
cursor.execute("commit")
self._verify_records(data)
def test_bulk_insert_single_record(self):
conn = self.connect()
cursor = conn.cursor()
data = [(0, "bob", 21, 123)]
cursor.executemany(
"insert into bulkinsert (id, name, age, height) " "values (%s,%s,%s,%s)",
data,
)
cursor.execute("commit")
self._verify_records(data)
def test_issue_288(self):
"""executemany should work with "insert ... on update" """
conn = self.connect()
cursor = conn.cursor()
data = [(0, "bob", 21, 123), (1, "jim", 56, 45), (2, "fred", 100, 180)]
cursor.executemany(
"""insert
into bulkinsert (id, name,
age, height)
values (%s,
%s , %s,
%s ) on duplicate key update
age = values(age)
""",
data,
)
self.assertEqual(
cursor._executed.strip(),
bytearray(
b"""insert
into bulkinsert (id, name,
age, height)
values (0,
'bob' , 21,
123 ),(1,
'jim' , 56,
45 ),(2,
'fred' , 100,
180 ) on duplicate key update
age = values(age)"""
),
)
cursor.execute("commit")
self._verify_records(data)
|
8bd19c783cf2196afad3657801691244fd15065d
|
0a3c2a81392328633e59a1020304c31b50acde62
|
/skmob/measures/tests/test_individual.py
|
548b65aac3d3666b781cf5f3acb73dd533ec7fc7
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-mobility/scikit-mobility
|
dcd350163263ade9455ad8c5e8ab64cd405b37cf
|
9433d05a4cf7f42144e2e92279098740521493e2
|
refs/heads/master
| 2023-08-13T10:54:04.719179
| 2023-01-20T17:43:20
| 2023-01-20T17:43:20
| 184,337,448
| 679
| 161
|
BSD-3-Clause
| 2023-07-02T03:30:10
| 2019-04-30T22:02:13
|
Python
|
UTF-8
|
Python
| false
| false
| 17,616
|
py
|
test_individual.py
|
from ...core.trajectorydataframe import TrajDataFrame
from ...utils import constants, gislib
from ...measures import individual
import numpy as np
import pandas as pd
import math
from collections import Counter
import operator
import pytest
earth_distance_km = gislib.getDistance
latitude = constants.LATITUDE
longitude = constants.LONGITUDE
date_time = constants.DATETIME
user_id = constants.UID
atol = 1e-12
def all_equal(a, b):
return np.allclose(a, b, rtol=0., atol=atol)
lats_lngs = np.array([[39.978253, 116.3272755],
[40.013819, 116.306532],
[39.878987, 116.1266865],
[40.013819, 116.306532],
[39.97958, 116.313649],
[39.978696, 116.3262205],
[39.98153775, 116.31079],
[39.978161, 116.3272425],
[38.978161, 115.3272425]])
traj = pd.DataFrame(lats_lngs, columns=[latitude, longitude])
traj[date_time ] = pd.to_datetime([
'20130101 8:34:04', '20130101 10:34:08', '20130105 10:34:08',
'20130110 12:34:15', '20130101 1:34:28', '20130101 3:34:54',
'20130101 4:34:55', '20130105 5:29:12', '20130115 00:29:12'])
traj[user_id] = [1 for _ in range(5)] + [2 for _ in range(3)] + [3]
etraj = pd.DataFrame([
[0, 0, 0, 1],
[1, 0, 1, 1],
[1, 1, 2, 1],
[0, 0, 3, 1],
[1, 0, 4, 1],
[2, 0, 5, 1],
[0, 0, 6, 1],
[1, 0, 7, 1],
[1, 1, 8, 1],
[0, 0, 9, 1],
[1, 0, 1, 2],
[1, 1, 2, 2],
[1, 0, 3, 2],
[1, 0, 0, 3]],
columns=[latitude, longitude, date_time, user_id])
all_users = [1, 2, 3]
@pytest.mark.parametrize('traj', [traj])
def test_radius_of_gyration(traj):
result = individual.radius_of_gyration(traj)
for user in all_users:
traj_1 = traj[traj[user_id] == user].sort_values(by=date_time)
lats_lngs_1 = list(map(tuple, traj_1[[latitude, longitude]].values))
cm = np.mean(lats_lngs_1, axis=0)
expected_result = np.mean(np.array([earth_distance_km(cm, lats_lngs_1[i]) ** 2.
for i in range(len(lats_lngs_1))])) ** 0.5
assert all_equal(result[result[user_id] == user]['radius_of_gyration'].values[0], expected_result)
@pytest.mark.parametrize('traj', [traj])
@pytest.mark.parametrize('k', [2, 3, 300])
def test_k_radius_of_gyration(traj, k):
result = individual.k_radius_of_gyration(traj, k=k)
for user in all_users:
traj_1 = traj[traj[user_id] == user].sort_values(by=date_time)
lats_lngs_0 = list(map(tuple, traj_1[[latitude, longitude]].values))
top_k = sorted(Counter(lats_lngs_0).items(), key=operator.itemgetter(1), reverse=True)[:k]
lats_lngs_1 = np.array([list(l[0]) for l in top_k for _ in range(l[1])])
cm = np.mean(lats_lngs_1, axis=0)
expected_result = np.mean(np.array([earth_distance_km(cm, lats_lngs_1[i]) ** 2.
for i in range(len(lats_lngs_1))])) ** 0.5
assert all_equal(result[result[user_id] == user][str(k)+'k_radius_of_gyration'].values[0], expected_result)
@pytest.mark.parametrize('traj', [etraj])
def test_random_entropy(traj):
result = individual.random_entropy(traj)
for user in all_users:
traj_1 = traj[traj[user_id] == user].sort_values(by=date_time)
lats_lngs_1 = np.unique(traj_1[[latitude, longitude]].values, axis=0)
expected_result = np.log2(len(lats_lngs_1))
assert all_equal(result[result[user_id] == user]['random_entropy'].values[0], expected_result)
@pytest.mark.parametrize('traj', [etraj])
def test_uncorrelated_entropy(traj):
result = individual.uncorrelated_entropy(traj)
for user in all_users:
traj_1 = traj[traj[user_id] == user].sort_values(by=date_time)
lats_lngs_0 = list(map(tuple, traj_1[[latitude, longitude]].values))
expected_result = individual.stats.entropy(list(Counter(lats_lngs_0).values()), base=2.)
# assert np.abs(individual.uncorrelated_entropy(traj)[user] - expected_result) < atol
assert all_equal(result[result[user_id] == user]['uncorrelated_entropy'].values[0], expected_result)
@pytest.mark.parametrize('traj', [etraj])
def test_real_entropy(traj):
result = individual.real_entropy(traj)
# entropy of 1
H1 = 1. / (1 + 1 + 1 + 3 + 2 + 1 + 5 + 4 + 3 + 2) * 10. * np.log2(10.)
# entropy of 2
H2 = 1. / (1 + 1 + 2) * 3. * np.log2(3.)
# entropy of 3
H3 = 1. / (1) * 10. * np.log2(1.)
expected_result = [H1, H2, H3]
for i, user in enumerate(all_users):
assert all_equal(result[result[user_id] == user]['real_entropy'].values[0], expected_result[i])
@pytest.mark.parametrize('traj', [traj])
def test_jump_lengths(traj):
result = individual.jump_lengths(traj)
for user in all_users:
traj_1 = traj[traj[user_id] == user].sort_values(by=date_time)
lats_lngs_1 = traj_1[[latitude, longitude]].values
expected_result = np.array([earth_distance_km(lats_lngs_1[i - 1], lats_lngs_1[i])
for i in range(1, len(lats_lngs_1))])
assert all_equal(result[result[user_id] == user]['jump_lengths'].values[0], expected_result)
class TestIndividualMetrics:
def setup_method(self):
latitude = constants.LATITUDE
longitude = constants.LONGITUDE
date_time = constants.DATETIME
user_id = constants.UID
lat_lons = np.array([[43.8430139, 10.5079940],
[43.5442700, 10.3261500],
[43.7085300, 10.4036000],
[43.7792500, 11.2462600],
[43.8430139, 10.5079940],
[43.7085300, 10.4036000],
[43.8430139, 10.5079940],
[43.5442700, 10.3261500],
[43.5442700, 10.3261500],
[43.7085300, 10.4036000],
[43.8430139, 10.5079940],
[43.7792500, 11.2462600],
[43.7085300, 10.4036000],
[43.5442700, 10.3261500],
[43.7792500, 11.2462600],
[43.7085300, 10.4036000],
[43.7792500, 11.2462600],
[43.8430139, 10.5079940],
[43.8430139, 10.5079940],
[43.5442700, 10.3261500]])
traj = pd.DataFrame(lat_lons, columns=[latitude, longitude])
traj[date_time] = pd.to_datetime([
'20110203 8:34:04', '20110203 9:34:04', '20110203 10:34:04', '20110204 10:34:04',
'20110203 8:34:04', '20110203 9:34:04', '20110204 10:34:04', '20110204 11:34:04',
'20110203 8:34:04', '20110203 9:34:04', '20110204 10:34:04', '20110204 11:34:04',
'20110204 10:34:04', '20110204 11:34:04', '20110204 12:34:04',
'20110204 10:34:04', '20110204 11:34:04', '20110205 12:34:04',
'20110204 10:34:04', '20110204 11:34:04'])
traj[user_id] = [1 for _ in range(4)] + [2 for _ in range(4)] + \
[3 for _ in range(4)] + [4 for _ in range(3)] + \
[5 for _ in range(3)] + [6 for _ in range(2)]
self.unique_users = [1,2,3,4,5,6]
self.traj = traj.sort_values([user_id, date_time])
self.trjdat = TrajDataFrame(traj, user_id=user_id)
def test_radius_of_gyration(self):
output = individual.radius_of_gyration(self.trjdat)
assert (len(output) == 6)
assert (isinstance(output, pd.core.frame.DataFrame))
assert (math.isclose(output[output.uid==1]['radius_of_gyration'].values[0], 31.964885737))
assert (math.isclose(output[output.uid == 2]['radius_of_gyration'].values[0], 14.988909726))
assert (math.isclose(output[output.uid == 3]['radius_of_gyration'].values[0], 31.964885737))
assert (math.isclose(output[output.uid == 4]['radius_of_gyration'].values[0], 35.241089869))
assert (math.isclose(output[output.uid == 5]['radius_of_gyration'].values[0], 30.727237693))
assert (math.isclose(output[output.uid == 6]['radius_of_gyration'].values[0], 18.146860183))
def test_k_radius_of_gyration(self):
output = individual.k_radius_of_gyration(self.trjdat)
assert (len(output) == 6)
assert (isinstance(output, pd.core.frame.DataFrame))
assert (math.isclose(output[output.uid == 1]['2k_radius_of_gyration'].values[0], 18.14686018))
assert (math.isclose(output[output.uid == 2]['2k_radius_of_gyration'].values[0], 8.0811433))
assert (math.isclose(output[output.uid == 3]['2k_radius_of_gyration'].values[0], 9.64969996))
assert (math.isclose(output[output.uid == 4]['2k_radius_of_gyration'].values[0], 9.64969996))
assert (math.isclose(output[output.uid == 5]['2k_radius_of_gyration'].values[0], 34.07360735))
assert (math.isclose(output[output.uid == 6]['2k_radius_of_gyration'].values[0], 18.14686018))
output = individual.k_radius_of_gyration(self.trjdat, k=1)
assert (len(output) == 6)
assert (isinstance(output, pd.core.frame.DataFrame))
assert (math.isclose(output[output.uid == 1]['1k_radius_of_gyration'].values[0], 0))
assert (math.isclose(output[output.uid == 2]['1k_radius_of_gyration'].values[0], 0))
assert (math.isclose(output[output.uid == 3]['1k_radius_of_gyration'].values[0], 0))
assert (math.isclose(output[output.uid == 4]['1k_radius_of_gyration'].values[0], 0))
assert (math.isclose(output[output.uid == 5]['1k_radius_of_gyration'].values[0], 0))
assert (math.isclose(output[output.uid == 6]['1k_radius_of_gyration'].values[0], 0))
def test_random_entropy(self):
output = individual.random_entropy(self.trjdat)
assert (len(output) == 6)
assert (isinstance(output, pd.core.frame.DataFrame))
assert (math.isclose(output[output.uid == 1]['random_entropy'].values[0], 2))
assert (math.isclose(output[output.uid == 2]['random_entropy'].values[0], 1.5849625))
assert (math.isclose(output[output.uid == 3]['random_entropy'].values[0], 2))
assert (math.isclose(output[output.uid == 4]['random_entropy'].values[0], 1.5849625))
assert (math.isclose(output[output.uid == 5]['random_entropy'].values[0], 1.5849625))
assert (math.isclose(output[output.uid == 6]['random_entropy'].values[0], 1))
def test_uncorrelated_entropy(self):
output = individual.uncorrelated_entropy(self.trjdat)
assert (len(output) == 6)
assert (isinstance(output, pd.core.frame.DataFrame))
assert (math.isclose(output[output.uid == 1]['uncorrelated_entropy'].values[0], 2))
assert (math.isclose(output[output.uid == 2]['uncorrelated_entropy'].values[0], 1.5000000))
assert (math.isclose(output[output.uid == 3]['uncorrelated_entropy'].values[0], 2))
assert (math.isclose(output[output.uid == 4]['uncorrelated_entropy'].values[0], 1.5849625))
assert (math.isclose(output[output.uid == 5]['uncorrelated_entropy'].values[0], 1.5849625))
assert (math.isclose(output[output.uid == 6]['uncorrelated_entropy'].values[0], 1))
output = individual.uncorrelated_entropy(self.trjdat, normalize=True)
assert (len(output) == 6)
assert (isinstance(output, pd.core.frame.DataFrame))
assert (math.isclose(output[output.uid == 1]['norm_uncorrelated_entropy'].values[0], 1))
assert (math.isclose(output[output.uid == 2]['norm_uncorrelated_entropy'].values[0], 0.94639463))
assert (math.isclose(output[output.uid == 3]['norm_uncorrelated_entropy'].values[0], 1))
assert (math.isclose(output[output.uid == 4]['norm_uncorrelated_entropy'].values[0], 1))
assert (math.isclose(output[output.uid == 5]['norm_uncorrelated_entropy'].values[0], 1))
assert (math.isclose(output[output.uid == 6]['norm_uncorrelated_entropy'].values[0], 1))
def test_real_entropy(self):
output = individual.real_entropy(self.trjdat)
assert (len(output) == 6)
assert (isinstance(output, pd.core.frame.DataFrame))
assert (math.isclose(output[output.uid == 1]['real_entropy'].values[0], 1.60000000))
assert (math.isclose(output[output.uid == 2]['real_entropy'].values[0], 1.14285714285))
assert (math.isclose(output[output.uid == 3]['real_entropy'].values[0], 1.60000000))
assert (math.isclose(output[output.uid == 4]['real_entropy'].values[0], 1.1887218755))
assert (math.isclose(output[output.uid == 5]['real_entropy'].values[0], 1.1887218755))
assert (math.isclose(output[output.uid == 6]['real_entropy'].values[0], 0.6666666666))
def test_maximum_distance(self):
output = individual.maximum_distance(self.trjdat)
assert (len(output) == 6)
assert (isinstance(output, pd.core.frame.DataFrame))
assert (math.isclose(output[output.uid == 1]['maximum_distance'].values[0], 68.14698568))
assert (math.isclose(output[output.uid == 2]['maximum_distance'].values[0], 36.29370121))
assert (math.isclose(output[output.uid == 3]['maximum_distance'].values[0], 59.66188292))
assert (math.isclose(output[output.uid == 4]['maximum_distance'].values[0], 78.4910639))
assert (math.isclose(output[output.uid == 5]['maximum_distance'].values[0], 68.14698568))
assert (math.isclose(output[output.uid == 6]['maximum_distance'].values[0], 36.29370121))
def test_distance_straight_line(self):
output = individual.distance_straight_line(self.trjdat)
assert (len(output) == 6)
assert (isinstance(output, pd.core.frame.DataFrame))
assert (math.isclose(output[output.uid == 1]['distance_straight_line'].values[0], 123.74008488))
assert (math.isclose(output[output.uid == 2]['distance_straight_line'].values[0], 70.57908362))
assert (math.isclose(output[output.uid == 3]['distance_straight_line'].values[0], 96.10397212))
assert (math.isclose(output[output.uid == 4]['distance_straight_line'].values[0], 97.79046189))
assert (math.isclose(output[output.uid == 5]['distance_straight_line'].values[0], 127.8088686))
assert (math.isclose(output[output.uid == 6]['distance_straight_line'].values[0], 36.29370121))
def test_number_of_location(self):
output = individual.number_of_locations(self.trjdat)
assert (len(output) == 6)
assert (isinstance(output, pd.core.frame.DataFrame))
assert (output[output.uid == 1]['number_of_locations'].values[0] == 4)
assert (output[output.uid == 2]['number_of_locations'].values[0] == 3)
assert (output[output.uid == 3]['number_of_locations'].values[0] == 4)
assert (output[output.uid == 4]['number_of_locations'].values[0] == 3)
assert (output[output.uid == 5]['number_of_locations'].values[0] == 3)
assert (output[output.uid == 6]['number_of_locations'].values[0] == 2)
def test_home_location(self):
output = individual.home_location(self.trjdat)
assert (len(output) == 6)
assert (isinstance(output, pd.core.frame.DataFrame))
assert (output[output.uid == 1]['lat'].values[0] == 43.544270)
assert (output[output.uid == 2]['lat'].values[0] == 43.8430139)
assert (output[output.uid == 3]['lat'].values[0] == 43.544270)
assert (output[output.uid == 4]['lat'].values[0] == 43.544270)
assert (output[output.uid == 5]['lat'].values[0] == 43.708530)
assert (output[output.uid == 6]['lat'].values[0] == 43.544270)
assert (output[output.uid == 1]['lng'].values[0] == 10.326150)
assert (output[output.uid == 2]['lng'].values[0] == 10.507994)
assert (output[output.uid == 3]['lng'].values[0] == 10.326150)
assert (output[output.uid == 4]['lng'].values[0] == 10.326150)
assert (output[output.uid == 5]['lng'].values[0] == 10.403600)
assert (output[output.uid == 6]['lng'].values[0] == 10.326150)
def test_max_distance_from_home(self):
output = individual.max_distance_from_home(self.trjdat)
assert (len(output) == 6)
assert (isinstance(output, pd.core.frame.DataFrame))
assert (math.isclose(output[output.uid == 1]['max_distance_from_home'].values[0], 78.4910639))
assert (math.isclose(output[output.uid == 2]['max_distance_from_home'].values[0], 36.29370121))
assert (math.isclose(output[output.uid == 3]['max_distance_from_home'].values[0], 78.4910639))
assert (math.isclose(output[output.uid == 4]['max_distance_from_home'].values[0], 78.4910639))
assert (math.isclose(output[output.uid == 5]['max_distance_from_home'].values[0], 68.14698568))
assert (math.isclose(output[output.uid == 6]['max_distance_from_home'].values[0], 36.29370121))
def test_number_of_visits(self):
output = individual.number_of_visits(self.trjdat)
assert (len(output) == 6)
assert (isinstance(output, pd.core.frame.DataFrame))
assert (output[output.uid == 1]['number_of_visits'].values[0] == 4)
assert (output[output.uid == 2]['number_of_visits'].values[0] == 4)
assert (output[output.uid == 3]['number_of_visits'].values[0] == 4)
assert (output[output.uid == 4]['number_of_visits'].values[0] == 3)
assert (output[output.uid == 5]['number_of_visits'].values[0] == 3)
assert (output[output.uid == 6]['number_of_visits'].values[0] == 2)
|
3b849b674f31ab326abcd61317187709d56fda43
|
da769d44cfb931914ff51c0f1f302b056837c388
|
/elpis/engines/__init__.py
|
e61417d8f48651a96da4d443d46342aa2a36a9d2
|
[
"Apache-2.0"
] |
permissive
|
CoEDL/elpis
|
d7ef5d8c5daf450df10ca57371291d953555eaa7
|
9a019483b4440a96f80486142fb53c7b95c8f983
|
refs/heads/master
| 2023-07-08T05:06:13.276450
| 2023-03-09T00:17:37
| 2023-03-09T00:17:37
| 154,595,187
| 142
| 40
|
Apache-2.0
| 2023-09-05T09:41:13
| 2018-10-25T02:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,225
|
py
|
__init__.py
|
from abc import ABC
from typing import Type
from elpis.engines.common.objects.interface import Interface
from elpis.engines.common.objects.model import Model
from elpis.engines.common.objects.transcription import Transcription
from elpis.engines.kaldi.objects.model import KaldiModel
from elpis.engines.kaldi.objects.transcription import KaldiTranscription
from elpis.engines.hft.objects.model import HFTModel
from elpis.engines.hft.objects.transcription import HFTTranscription
class Engine(ABC):
def __init__(self, model: Type[Model], transcription: Type[Transcription]):
self._model = model
self._transcription = transcription
@property
def model(self) -> Type[Model]:
return self._model
@property
def transcription(self) -> Type[Transcription]:
return self._transcription
def __str__(self):
return f"{type(self).__name__} {type(self.model)} {type(self.transcription)}"
class KaldiEngine(Engine):
def __init__(self):
super().__init__(KaldiModel, KaldiTranscription)
class HFTEngine(Engine):
def __init__(self):
super().__init__(HFTModel, HFTTranscription)
ENGINES = {
"kaldi": KaldiEngine(),
"hft": HFTEngine(),
}
|
29a51fe8647264764cc64f1250607f5132167034
|
df1254b56f35b24644e00493c50d4b6eb3c15b7b
|
/colour/examples/appearance/examples_hke.py
|
392c377a780047ead3b3837efd941e75b1737b34
|
[
"BSD-3-Clause"
] |
permissive
|
colour-science/colour
|
908400b227cf81668675e41099256ce50b23ae4b
|
1fdf3b3042922e8d4f86b989b00a06e7e5d81102
|
refs/heads/develop
| 2023-09-01T23:17:07.186869
| 2023-08-26T09:40:45
| 2023-08-26T09:40:45
| 17,114,363
| 1,756
| 301
|
BSD-3-Clause
| 2023-09-14T10:24:37
| 2014-02-23T18:55:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,225
|
py
|
examples_hke.py
|
"""Showcases Helmholtz—Kohlrausch effect estimation computations."""
import colour
from colour.plotting import colour_style, plot_multi_colour_swatches
from colour.utilities import message_box
wp = colour.xy_to_Luv_uv([0.31271, 0.32902])
average_luminance = 0.14
swatches = [
[0.45079660, 0.52288689],
[0.19124902, 0.55444488],
[0.13128455, 0.51210591],
[0.14889223, 0.37091478],
[0.28992574, 0.30964533],
]
swatches_XYZ = []
for patch in swatches:
in_XYZ = colour.Luv_to_XYZ(colour.uv_to_Luv(patch))
swatches_XYZ.append(in_XYZ * (average_luminance / in_XYZ[1]))
# Adapting Luminance, 250 cd/m^2 represents a typical modern computer
# display peak luminance.
L_a = 250 * average_luminance
bg_grey = colour.xy_to_XYZ(colour.Luv_uv_to_xy(wp)) * average_luminance
swatches_normal = []
swatches_VCC = []
swatches_VAC = []
for i in range(len(swatches)):
VCC = colour.HelmholtzKohlrausch_effect_luminous_Nayatani1997(
swatches[i], wp, L_a, method="VCC"
)
VAC = colour.HelmholtzKohlrausch_effect_luminous_Nayatani1997(
swatches[i], wp, L_a, method="VAC"
)
swatches_normal.append(colour.XYZ_to_sRGB(bg_grey))
swatches_normal.append(colour.XYZ_to_sRGB(swatches_XYZ[i]))
swatches_VCC.append(colour.XYZ_to_sRGB(bg_grey))
swatches_VCC.append(colour.XYZ_to_sRGB(swatches_XYZ[i] / VCC))
swatches_VAC.append(colour.XYZ_to_sRGB(bg_grey * VAC))
swatches_VAC.append(colour.XYZ_to_sRGB(swatches_XYZ[i]))
colour_style()
message_box(
"Plotting swatches with the same luminance (Y).\n"
"The Helmholtz—Kohlrausch effect will be very noticeable."
)
plot_multi_colour_swatches(swatches_normal, compare_swatches="stacked")
message_box(
"Plotting HKE-compensated swatches with VCC method.\n"
"The Helmholtz—Kohlrausch effect has been compensated using VCC"
"(variable chromatic colour) method."
)
plot_multi_colour_swatches(swatches_VCC, compare_swatches="stacked")
message_box(
"Plotting HKE-compensated swatches with VAC method.\n"
"The Helmholtz—Kohlrausch effect has been compensated for using VAC"
"(variable achromatic colour) method."
)
plot_multi_colour_swatches(swatches_VAC, compare_swatches="stacked")
|
fca61a03d4778c5ae0426866df4cfeb99a975440
|
fa3f6d4e9169fb95f828013d179d03accdff381b
|
/grr/server/grr_response_server/databases/db_signed_binaries_test.py
|
83c1983e9c904f63679a5f9ada2a58285564258b
|
[
"Apache-2.0"
] |
permissive
|
google/grr
|
c51a2bd251ed2f7adae538541990a2cc01fdcc8c
|
44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6
|
refs/heads/master
| 2023-09-05T20:02:36.823914
| 2023-07-26T09:34:09
| 2023-07-26T09:34:09
| 14,909,673
| 4,683
| 927
|
Apache-2.0
| 2023-07-26T09:34:10
| 2013-12-04T00:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,325
|
py
|
db_signed_binaries_test.py
|
#!/usr/bin/env python
"""Tests for signed-binary DB functionality."""
from grr_response_server.databases import db
from grr_response_server.rdfvalues import objects as rdf_objects
_test_id1 = rdf_objects.SignedBinaryID(
binary_type=rdf_objects.SignedBinaryID.BinaryType.EXECUTABLE,
path="linux/test/hello")
_test_id2 = rdf_objects.SignedBinaryID(
binary_type=rdf_objects.SignedBinaryID.BinaryType.PYTHON_HACK,
path="windows/test/hello")
_test_references1 = rdf_objects.BlobReferences(items=[
rdf_objects.BlobReference(offset=0, size=2, blob_id=b"\xaa" * 32),
rdf_objects.BlobReference(offset=2, size=3, blob_id=b"\xbb" * 32),
])
_test_references2 = rdf_objects.BlobReferences(items=[
rdf_objects.BlobReference(offset=0, size=3, blob_id=b"\xcc" * 32),
rdf_objects.BlobReference(offset=3, size=2, blob_id=b"\xdd" * 32),
])
class DatabaseTestSignedBinariesMixin(object):
"""Mixin that adds tests for signed binary DB functionality."""
def testReadSignedBinaryReferences(self):
self.db.WriteSignedBinaryReferences(_test_id1, _test_references1)
stored_hash_id, stored_timestamp = self.db.ReadSignedBinaryReferences(
_test_id1)
self.assertEqual(stored_hash_id, _test_references1)
self.assertGreater(stored_timestamp.AsMicrosecondsSinceEpoch(), 0)
def testUpdateSignedBinaryReferences(self):
self.db.WriteSignedBinaryReferences(_test_id1, _test_references1)
stored_references1, timestamp1 = self.db.ReadSignedBinaryReferences(
_test_id1)
self.assertEqual(stored_references1, _test_references1)
self.db.WriteSignedBinaryReferences(_test_id1, _test_references2)
stored_references2, timestamp2 = self.db.ReadSignedBinaryReferences(
_test_id1)
self.assertEqual(stored_references2, _test_references2)
self.assertGreater(timestamp2, timestamp1)
def testUnknownSignedBinary(self):
with self.assertRaises(db.UnknownSignedBinaryError):
self.db.ReadSignedBinaryReferences(_test_id1)
def testReadIDsForAllSignedBinaries(self):
self.db.WriteSignedBinaryReferences(_test_id1, _test_references1)
self.db.WriteSignedBinaryReferences(_test_id2, _test_references2)
self.assertCountEqual(self.db.ReadIDsForAllSignedBinaries(),
[_test_id1, _test_id2])
def testDeleteSignedBinaryReferences(self):
self.db.WriteSignedBinaryReferences(_test_id1, _test_references1)
self.assertNotEmpty(self.db.ReadIDsForAllSignedBinaries())
self.db.DeleteSignedBinaryReferences(_test_id1)
self.assertEmpty(self.db.ReadIDsForAllSignedBinaries())
# Trying to delete again shouldn't raise.
self.db.DeleteSignedBinaryReferences(_test_id1)
def testWriteAndReadLongUnicodePath(self):
test_id = rdf_objects.SignedBinaryID(
binary_type=rdf_objects.SignedBinaryID.BinaryType.EXECUTABLE,
path="linux/" + "🚀" * 1000 + "/hello")
self.db.WriteSignedBinaryReferences(test_id, _test_references1)
stored_hash_id, stored_timestamp = self.db.ReadSignedBinaryReferences(
test_id)
self.assertEqual(stored_hash_id, _test_references1)
self.assertGreater(stored_timestamp.AsMicrosecondsSinceEpoch(), 0)
self.assertEqual(self.db.ReadIDsForAllSignedBinaries(), [test_id])
# This file is a test library and thus does not require a __main__ block.
|
5722dfb22d4abd50af03f73be475d2dc90a713f0
|
bffbde8cc7a544f1b5d6c1bc4b84ca607226e134
|
/benchmarks/engine/utils/imagenet_utils/__init__.py
|
24340fca4777d63c4c6e97b994a17f42cd3cc884
|
[
"MIT"
] |
permissive
|
VainF/Torch-Pruning
|
c006d274e69c5c592ca1e302a70f6603504b8e07
|
e2478a72022c96af3b9053da359a726939e1adaf
|
refs/heads/master
| 2023-09-05T02:38:36.804176
| 2023-09-04T11:26:29
| 2023-09-04T11:26:29
| 228,203,350
| 1,606
| 231
|
MIT
| 2023-09-06T16:45:28
| 2019-12-15T15:07:24
|
Python
|
UTF-8
|
Python
| false
| false
| 49
|
py
|
__init__.py
|
from . import presets, sampler, transforms, utils
|
dcf15c81af2b9cabd6eca2b7b08819ecb287941a
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/jinja2/visitor.py
|
17c6aaba570742652f70bf1e7bf1a576c9d256ae
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 3,568
|
py
|
visitor.py
|
"""API for traversing the AST nodes. Implemented by the compiler and
meta introspection.
"""
import typing as t
from .nodes import Node
if t.TYPE_CHECKING:
import typing_extensions as te
class VisitCallable(te.Protocol):
def __call__(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
...
class NodeVisitor:
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node: Node) -> "t.Optional[VisitCallable]":
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
return getattr(self, f"visit_{type(node).__name__}", None)
def visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Called if no explicit visitor function exists for a node."""
for child_node in node.iter_child_nodes():
self.visit(child_node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> Node:
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.List[Node]:
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
return [rv]
return rv
|
aeecc9f3ba8317bd6088a0e1400af3c605b16405
|
51b38967e871e1d8bbdf170fbcb9bd7a4c48eda3
|
/python/RawNet1/Keras/01-trn_RawNet.py
|
fcaa82c3d5f4af8dc2914e42ec768ac82d3563c6
|
[
"MIT"
] |
permissive
|
Jungjee/RawNet
|
e9c97ac67aeb3d6f756d7b3bfc657149776dfd60
|
a49fc21942dd3414ac2100f26f5c208379f90adc
|
refs/heads/master
| 2022-06-25T09:12:44.572839
| 2022-06-22T08:53:01
| 2022-06-22T08:53:01
| 176,226,132
| 298
| 64
|
MIT
| 2022-06-22T08:52:55
| 2019-03-18T07:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 14,745
|
py
|
01-trn_RawNet.py
|
import os
import numpy as np
np.random.seed(1016)
import yaml
import queue
import struct
import pickle as pk
from multiprocessing import Process
from threading import Thread
from tqdm import tqdm
from time import sleep
from keras.utils import multi_gpu_model, plot_model, to_categorical
from keras.optimizers import *
from keras.models import Model
from sklearn.metrics import roc_curve
from scipy.optimize import brentq
from scipy.interpolate import interp1d
from model_RawNet_pre_train import get_model as get_model_pretrn
from model_RawNet import get_model
def cos_sim(a,b):
return np.dot(a,b) / (np.linalg.norm(a) * np.linalg.norm(b))
def simple_loss(y_true, y_pred):
return K.mean(y_pred)
def zero_loss(y_true, y_pred):
return 0.5 * K.sum(y_pred, axis=0)
def compose_spkFeat_dic(lines, model, f_desc_dic, base_dir):
'''
Extracts speaker embeddings from a given model
=====
lines: (list) A list of strings that indicate each utterance
model: (keras model) DNN that extracts speaker embeddings,
output layer should be rmoved(model_pred)
f_desc_dic: (dictionary) A dictionary of file objects
'''
dic_spkFeat = {}
for line in tqdm(lines, desc='extracting spk feats'):
k, f, p = line.strip().split(' ')
p = int(p)
if f not in f_desc_dic:
f_tmp = '/'.join([base_dir, f])
f_desc_dic[f] = open(f_tmp, 'rb')
f_desc_dic[f].seek(p)
l = struct.unpack('i', f_desc_dic[f].read(4))[0]# number of samples of each utterance
utt = np.asarray(struct.unpack('%df'%l, f_desc_dic[f].read(l * 4)), dtype=np.float32)# read binary utterance
spkFeat = model.predict(utt.reshape(1,-1,1))[0]# extract speaker embedding from utt
dic_spkFeat[k] = spkFeat
return dic_spkFeat
def make_spkdic(lines):
'''
Returns a dictionary where
key: (str) speaker name
value: (int) unique integer for each speaker
'''
idx = 0
dic_spk = {}
list_spk = []
for line in lines:
k, f, p = line.strip().split(' ')
spk = k.split('/')[0]
if spk not in dic_spk:
dic_spk[spk] = idx
list_spk.append(spk)
idx += 1
return (dic_spk, list_spk)
def compose_batch(lines, f_desc_dic, dic_spk, nb_samp, base_dir):
'''
Compose one mini-batch using utterances in `lines'
nb_samp: (int) duration of utterance at train phase.
Fixed for each mini-batch for mini-batch training.
'''
batch = []
ans = []
for line in lines:
k, f, p = line.strip().split(' ')
ans.append(dic_spk[k.split('/')[0]])
p = int(p)
if f not in f_desc_dic:
f_tmp = '/'.join([base_dir, f])
f_desc_dic[f] = open(f_tmp, 'rb')
f_desc_dic[f].seek(p)
l = struct.unpack('i', f_desc_dic[f].read(4))[0]
utt = struct.unpack('%df'%l, f_desc_dic[f].read(l * 4))
_nb_samp = len(utt)
#need to verify this part later!!!!!!
assert _nb_samp >= nb_samp
cut = np.random.randint(low = 0, high = _nb_samp - nb_samp)
utt = utt[cut:cut+nb_samp]
batch.append(utt)
return (np.asarray(batch, dtype=np.float32).reshape(len(lines), -1, 1), np.asarray(ans))
def process_epoch(lines, q, batch_size, nb_samp, dic_spk, base_dir):
'''
Wrapper function for processing mini-batches for the train set once.
'''
f_desc_dic = {}
nb_batch = int(len(lines) / batch_size)
for i in range(nb_batch):
while True:
if q.full():
sleep(0.1)
else:
q.put(compose_batch(lines = lines[i*batch_size: (i+1)*batch_size],
f_desc_dic = f_desc_dic,
dic_spk = dic_spk,
nb_samp = nb_samp,
base_dir = base_dir))
break
for k in f_desc_dic.keys():
f_desc_dic[k].close()
return
#======================================================================#
#======================================================================#
if __name__ == '__main__':
#======================================================================#
#==Yaml load===========================================================#
#======================================================================#
_abspath = os.path.abspath(__file__)
dir_yaml = os.path.splitext(_abspath)[0] + '.yaml'
with open(dir_yaml, 'r') as f_yaml:
parser = yaml.load(f_yaml)
dir_dev_scp = parser['dev_scp']
with open(dir_dev_scp, 'r') as f_dev_scp:
dev_lines = f_dev_scp.readlines()
dic_spk, list_spk = make_spkdic(dev_lines)
parser['model']['nb_spk'] = len(list_spk)
print('# spk: ', len(list_spk))
parser['model']['batch_size'] = int(parser['batch_size'] / parser['nb_gpu'])
assert parser['batch_size'] % parser['nb_gpu'] == 0
#select utterances for validation; speaker with 'B'
val_lines = []
for l in dev_lines:
if l[0] == 'B':
val_lines.append(l)
eval_lines = open(parser['eval_scp'], 'r').readlines()
trials = open(parser['trials'], 'r').readlines()
val_trials = open(parser['val_trials'], 'r').readlines()
nb_batch = int(len(dev_lines) / parser['batch_size'])
global q
q = queue.Queue(maxsize=1000)
dummy_y = np.zeros((parser['batch_size'], 1))
#======================================================================#
#==Pre-train===========================================================#
#======================================================================#
model, m_name = get_model_pretrn(argDic = parser['model'])
model_pred = Model(inputs=model.get_layer('input_pretrn').input, outputs=model.get_layer('code_pretrn').output)
save_dir = parser['save_dir'] + m_name + '_' + parser['name'] + '/'
#make folders
if not os.path.exists(save_dir):
os.makedirs(save_dir)
with open(save_dir + 'summary_pretrn.txt' ,'w+') as f_summary:
model.summary(print_fn=lambda x: f_summary.write(x + '\n'))
f_params = open(save_dir + 'f_params.txt', 'w')
for k, v in parser.items():
print(k, v)
f_params.write('{}:\t{}\n'.format(k, v))
f_params.write('DNN model params\n')
for k, v in parser['model'].items():
f_params.write('{}:\t{}\n'.format(k, v))
print(m_name)
f_params.write('model_name: %s\n'%m_name)
f_params.close()
'''#uncomment to save model architecture in json
model_json = model.to_json()
with open(save_dir + 'arc.json', 'w') as f_json:
f_json.write(model_json)
'''
if not os.path.exists(save_dir + 'results_pretrn/'):
os.makedirs(save_dir + 'results_pretrn/')
if not os.path.exists(save_dir + 'models_pretrn/'):
os.makedirs(save_dir + 'models_pretrn/')
f_eer = open(save_dir + 'eers_pretrn.txt', 'w', buffering=1)
#unzip for model graph visualization (need extra libraries)
#plot_model(model, to_file=parser['save_dir'] +'visualization.png', show_shapes=True)
optimizer = eval(parser['optimizer'])(lr=parser['lr'], decay = 0.0, amsgrad = bool(parser['amsgrad']))
if bool(parser['mg']):
model_mg = multi_gpu_model(model, gpus=parser['nb_gpu'])
model_mg.compile(optimizer = optimizer,
loss = {'s_bs_loss':simple_loss,
'c_loss':zero_loss},
loss_weights = {'s_bs_loss':1, 'c_loss':parser['c_lambda']},
metrics=['accuracy'])
model.compile(optimizer = optimizer,
loss = {'s_bs_loss':simple_loss,
'c_loss':zero_loss},
loss_weights = {'s_bs_loss':1, 'c_loss': parser['c_lambda']},
metrics=['accuracy'])
best_val_eer = 99.
for epoch in tqdm(range(parser['epoch'])):
np.random.shuffle(dev_lines)
p = Thread(target = process_epoch, args = (dev_lines,
q,
parser['batch_size'],
parser['nb_samp'],
dic_spk,
parser['base_dir']))
p.start()
#train one epoch!
loss = 999.
loss1 = 999.
loss2 = 999.
pbar = tqdm(range(nb_batch))
for b in pbar:
pbar.set_description('epoch: %d, loss: %.3f, loss_s+bs: %.3f, loss_c: %.3f'%(epoch, loss, loss1, loss2))
while True:
if q.empty():
sleep(0.1)
else:
x, y = q.get()
y = to_categorical(y, num_classes=parser['model']['nb_spk'])
if bool(parser['mg']):
loss, loss1, loss2, acc1, acc2 = model_mg.train_on_batch([x, y], [dummy_y, dummy_y])
else:
loss, loss1, loss2, acc1, acc2 = model.train_on_batch([x, y], [dummy_y, dummy_y])
break
p.join()
#validate!
dic_val = compose_spkFeat_dic(lines = val_lines,
model = model_pred,
f_desc_dic = {},
base_dir = parser['base_dir'])
y = []
y_score = []
for smpl in val_trials:
target, spkMd, utt = smpl.strip().split(' ')
target = int(target)
cos_score = cos_sim(dic_val[spkMd], dic_val[utt])
y.append(target)
y_score.append(cos_score)
fpr, tpr, thresholds = roc_curve(y, y_score, pos_label=1)
eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
print('\nepoch: %d, val_eer: %f'%(int(epoch), eer))
f_eer.write('%d %f '%(epoch, eer))
if float(eer) < best_val_eer:
best_val_eer = float(eer)
model.save_weights(save_dir + 'models_pretrn/best_model_on_validation.h5')
#evaluate!
dic_eval = compose_spkFeat_dic(lines = eval_lines,
model = model_pred,
f_desc_dic = {},
base_dir = parser['base_dir'])
f_res = open(save_dir + 'results_pretrn/epoch%s.txt'%(epoch), 'w')
y = []
y_score = []
for smpl in trials:
target, spkMd, utt = smpl.strip().split(' ')
target = int(target)
cos_score = cos_sim(dic_eval[spkMd], dic_eval[utt])
y.append(target)
y_score.append(cos_score)
f_res.write('{score} {target}\n'.format(score=cos_score,target=target))
f_res.close()
fpr, tpr, thresholds = roc_curve(y, y_score, pos_label=1)
'''
#prints threshold
#thresh = interp1d(fpr, thresholds)(eer)
print(thresh)
'''
eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
print('\nepoch: %d, eer: %f'%(int(epoch), eer))
f_eer.write('%f\n'%(eer))
if not bool(parser['save_best_only']):
model.save_weights(save_dir + 'models_pretrn/%d-%.4f.h5'%(epoch, eer))
f_eer.close()
#======================================================================#
#==Train RawNet========================================================#
#======================================================================#
model, m_name = get_model(argDic = parser['model'])
model_pred = Model(inputs=model.get_layer('input_RawNet').input, outputs=model.get_layer('code_RawNet').output)
model.load_weights(save_dir+'models_pretrn/best_model_on_validation.h5', by_name = True)
with open(save_dir + 'summary_RawNet.txt' ,'w+') as f_summary:
model.summary(print_fn=lambda x: f_summary.write(x + '\n'))
if not os.path.exists(save_dir + 'results_RawNet/'):
os.makedirs(save_dir + 'results_RawNet/')
if not os.path.exists(save_dir + 'models_RawNet/'):
os.makedirs(save_dir + 'models_RawNet/')
f_eer = open(save_dir + 'eers_RawNet.txt', 'w', buffering=1)
optimizer = eval(parser['optimizer'])(lr=parser['lr'], decay = parser['opt_decay'], amsgrad = bool(parser['amsgrad']))
parser['c_lambda'] = parser['c_lambda'] * 0.01
if bool(parser['mg']):
model_mg = multi_gpu_model(model, gpus=parser['nb_gpu'])
model_mg.compile(optimizer = optimizer,
loss = {'gru_s_bs_loss':simple_loss,
'gru_c_loss':zero_loss},
loss_weights = {'gru_s_bs_loss':1, 'gru_c_loss':parser['c_lambda']},
metrics=['accuracy'])
model.compile(optimizer = optimizer,
loss = {'gru_s_bs_loss':simple_loss,
'gru_c_loss':zero_loss},
loss_weights = {'gru_s_bs_loss':1, 'gru_c_loss': parser['c_lambda']},
metrics=['accuracy'])
best_val_eer = 99.
for epoch in tqdm(range(parser['epoch'])):
np.random.shuffle(dev_lines)
p = Thread(target = process_epoch, args = (dev_lines,
q,
parser['batch_size'],
parser['nb_samp'],
dic_spk,
parser['base_dir']))
p.start()
#train one epoch!
loss = 999.
loss1 = 999.
loss2 = 999.
pbar = tqdm(range(nb_batch))
for b in pbar:
pbar.set_description('epoch: %d, loss: %.3f, loss_s+bs: %.3f, loss_c: %.3f'%(epoch, loss, loss1, loss2))
while True:
if q.empty():
sleep(0.1)
else:
x, y = q.get()
y = to_categorical(y, num_classes=parser['model']['nb_spk'])
if bool(parser['mg']):
loss, loss1, loss2, acc1, acc2 = model_mg.train_on_batch([x, y], [dummy_y, dummy_y])
else:
loss, loss1, loss2, acc1, acc2 = model.train_on_batch([x, y], [dummy_y, dummy_y])
break
p.join()
#validate!
dic_val = compose_spkFeat_dic(lines = val_lines,
model = model_pred,
f_desc_dic = {},
base_dir = parser['base_dir'])
y = []
y_score = []
for smpl in val_trials:
target, spkMd, utt = smpl.strip().split(' ')
target = int(target)
cos_score = cos_sim(dic_val[spkMd], dic_val[utt])
y.append(target)
y_score.append(cos_score)
fpr, tpr, thresholds = roc_curve(y, y_score, pos_label=1)
eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
print('\nepoch: %d, val_eer: %f'%(int(epoch), eer))
f_eer.write('%d %f '%(epoch, eer))
if float(eer) < best_val_eer:
best_val_eer = float(eer)
model.save_weights(save_dir + 'models_RawNet/best_model_on_validation.h5')
#evaluate!
dic_eval = compose_spkFeat_dic(lines = eval_lines,
model = model_pred,
f_desc_dic = {},
base_dir = parser['base_dir'])
f_res = open(save_dir + 'results_RawNet/epoch%s.txt'%(epoch), 'w')
y = []
y_score = []
for smpl in trials:
target, spkMd, utt = smpl.strip().split(' ')
target = int(target)
cos_score = cos_sim(dic_eval[spkMd], dic_eval[utt])
y.append(target)
y_score.append(cos_score)
f_res.write('{score} {target}\n'.format(score=cos_score,target=target))
f_res.close()
fpr, tpr, thresholds = roc_curve(y, y_score, pos_label=1)
'''
#prints threshold
#thresh = interp1d(fpr, thresholds)(eer)
print(thresh)
'''
eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
print('\nepoch: %d, eer: %f'%(int(epoch), eer))
f_eer.write('%f\n'%(eer))
if not bool(parser['save_best_only']):
model.save_weights(save_dir + 'models_RawNet/%d-%.4f.h5'%(epoch, eer))
f_eer.close()
#======================================================================#
#==Extract RawNet Embeddings===========================================#
#======================================================================#
model.load_weights(save_dir + 'models_RawNet/best_model_on_validation.h5')
if not os.path.exists(parser['gru_embeddings']):
os.makedirs(parser['gru_embeddings'])
print('Extracting Embeddings from GRU model: dev set')
dev_dic_embeddings = compose_spkFeat_dic(lines = dev_lines,
model = model_pred,
f_desc_dic = {},
base_dir = parser['base_dir'])
print('Extracting Embeddings from GRU model: eval set')
eval_dic_embeddings = compose_spkFeat_dic(lines = eval_lines,
model = model_pred,
f_desc_dic = {},
base_dir = parser['base_dir'])
f_embeddings = open(parser['gru_embeddings'] + 'speaker_embeddings_RawNet', 'wb')
pk.dump({'dev_dic_embeddings': dev_dic_embeddings, 'eval_dic_embeddings': eval_dic_embeddings},
f_embeddings,
protocol = pk.HIGHEST_PROTOCOL)
|
459502d6a2a402385efa0b56791367e1cd0af205
|
c4b8e1e09dedbccd37ca008ecaaca4438610bbaf
|
/cpmpy/volsay3.py
|
ce064f5f75ec6b6216a2c086f4f8a39c84b4a717
|
[
"MIT"
] |
permissive
|
hakank/hakank
|
4806598b98cb36dd51b24b0ab688f52dadfe9626
|
c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2
|
refs/heads/master
| 2023-08-15T00:21:52.750270
| 2023-07-27T16:21:40
| 2023-07-27T16:21:40
| 11,933,517
| 336
| 97
|
MIT
| 2023-07-27T11:19:42
| 2013-08-06T20:12:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,083
|
py
|
volsay3.py
|
"""
Volsay problem in cpmpy.
From the OPL model volsay.mod
'''
Consider a Belgian company Volsay, which specializes in producing ammoniac gas
(NH3) and ammonium chloride (NH4Cl). Volsay has at its disposal 50 units of
nitrogen (N), 180 units of hydrogen (H), and 40 units of chlorine (Cl). The company
makes a profit of 40 Euros for each sale of an ammoniac gas unit and 50 Euros
for each sale of an ammonium chloride unit. Volsay would like a production plan
maximizing its profits given its available stocks.
'''
This cpmpy model was written by Hakan Kjellerstrand (hakank@gmail.com)
See also my cpmpy page: http://hakank.org/cpmpy/
"""
from cpmpy import *
import cpmpy.solvers
import numpy as np
from cpmpy_hakank import *
def volsay3(obj_value=None):
# data
num_products = 2
Gas, Chloride = range(num_products)
products = ['Gas', 'Chloride']
num_components = 3
Nitrogen, Hydrogen, Chlorine = range(num_components)
components = ['Nitrogen', 'Hydrogen', 'Chlorine']
# Demands of Component per Product
Demand = [[1, 3, 0],
[1, 4, 1]]
Profit = [30, 40] # per product
Stock = [50, 180, 40] # per component
# declare variables
Production = intvar(0,10000,shape=num_products)
obj = intvar(0, 10000,name="obj")
if obj_value == None:
model = Model(maximize=obj)
else:
model = Model(obj==obj_value)
#
# constraints
#
for c in range(num_components):
model += (sum([Demand[p][c]* Production[p] for p in range(num_products)]) <= Stock[c])
# objective to minimize
model += (obj == sum([Profit[p]*Production[p] for p in range(num_products)]))
def print_sol():
print("obj:", obj.value())
for i in range(num_products):
print(products[i], '=', Production[i].value())
print()
ss = CPM_ortools(model)
if obj_value == None:
if ss.solve():
print_sol()
return obj.value()
else:
num_solutions = ss.solveAll(display=print_sol)
print("num_solutions:",num_solutions)
obj = volsay3(None)
print("\nAll obtimal solutions:")
volsay3(obj)
|
42b14b3f9d1a30b8bb3b87cc96210636f4e05b15
|
3f5f778f973e229037007b1eb00e5171cbe2560e
|
/examples/statisticslearning/gaussian_statistics_learning_DrawFromPrior_reload_NNs.py
|
3363ade6664728647f5fad86edb715188a8fa5f5
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] |
permissive
|
eth-cscs/abcpy
|
9f568b66f66ebc0b835c837dac481c9c2ef199fe
|
caf0fd899424da69c0ef0bcd499696c5a077cdb1
|
refs/heads/master
| 2023-03-16T12:34:34.650734
| 2023-03-13T16:07:26
| 2023-03-13T16:07:26
| 79,544,398
| 107
| 43
|
BSD-3-Clause-Clear
| 2023-08-16T11:04:51
| 2017-01-20T09:15:25
|
Python
|
UTF-8
|
Python
| false
| false
| 7,971
|
py
|
gaussian_statistics_learning_DrawFromPrior_reload_NNs.py
|
import logging
import numpy as np
def infer_parameters(steps=2, n_sample=50, n_samples_per_param=1, logging_level=logging.WARN):
"""Perform inference for this example.
Parameters
----------
steps : integer, optional
Number of iterations in the sequential PMCABC algorithm ("generations"). The default value is 3
n_samples : integer, optional
Number of posterior samples to generate. The default value is 250.
n_samples_per_param : integer, optional
Number of data points in each simulated data set. The default value is 10.
Returns
-------
abcpy.output.Journal
A journal containing simulation results, metadata and optionally intermediate results.
"""
logging.basicConfig(level=logging_level)
# define backend
# Note, the dummy backend does not parallelize the code!
from abcpy.backends import BackendDummy as Backend
backend = Backend()
# define observation for true parameters mean=170, std=15
height_obs = [160.82499176, 167.24266737, 185.71695756, 153.7045709, 163.40568812, 140.70658699, 169.59102084,
172.81041696, 187.38782738, 179.66358934, 176.63417241, 189.16082803, 181.98288443, 170.18565017,
183.78493886, 166.58387299, 161.9521899, 155.69213073, 156.17867343, 144.51580379, 170.29847515,
197.96767899, 153.36646527, 162.22710198, 158.70012047, 178.53470703, 170.77697743, 164.31392633,
165.88595994, 177.38083686, 146.67058471763457, 179.41946565658628, 238.02751620619537,
206.22458790620766, 220.89530574344568, 221.04082532837026, 142.25301427453394, 261.37656571434275,
171.63761180867033, 210.28121820385866, 237.29130237612236, 175.75558340169619, 224.54340549862235,
197.42448680731226, 165.88273684581381, 166.55094082844519, 229.54308602661584, 222.99844054358519,
185.30223966014586, 152.69149367593846, 206.94372818527413, 256.35498655339154, 165.43140916577741,
250.19273595481803, 148.87781549665536, 223.05547559193792, 230.03418198709608, 146.13611923127021,
138.24716809523139, 179.26755740864527, 141.21704876815426, 170.89587081800852, 222.96391329259626,
188.27229523693822, 202.67075179617672, 211.75963110985992, 217.45423324370509]
# define prior
from abcpy.continuousmodels import Uniform
mu = Uniform([[150], [200]], name="mu")
sigma = Uniform([[5], [25]], name="sigma")
# define the model
from abcpy.continuousmodels import Normal
height = Normal([mu, sigma], )
# 1) generate simulations from prior
from abcpy.inferences import DrawFromPrior
draw_from_prior = DrawFromPrior([height], backend=backend)
# notice the use of the `.sample_par_sim_pairs` method rather than `.sample` to obtain data suitably formatted
# for the summary statistics learning routines
parameters, simulations = draw_from_prior.sample_par_sim_pairs(100, n_samples_per_param=1)
# if you want to use the test loss to do early stopping in the training:
parameters_val, simulations_val = draw_from_prior.sample_par_sim_pairs(100, n_samples_per_param=1)
# discard the mid dimension (n_samples_per_param, as the StatisticsLearning classes use that =1)
simulations = simulations.reshape(simulations.shape[0], simulations.shape[2])
simulations_val = simulations_val.reshape(simulations_val.shape[0], simulations_val.shape[2])
# 2) now train the NNs with the different methods with the generated data
from abcpy.statistics import Identity
identity = Identity() # to apply before computing the statistics
logging.info("semiNN")
from abcpy.statisticslearning import SemiautomaticNN, TripletDistanceLearning
semiNN = SemiautomaticNN([height], identity, backend=backend, parameters=parameters,
simulations=simulations, parameters_val=parameters_val, simulations_val=simulations_val,
early_stopping=True, # early stopping
seed=1, n_epochs=10, scale_samples=False, use_tqdm=False)
logging.info("triplet")
triplet = TripletDistanceLearning([height], identity, backend=backend, parameters=parameters,
simulations=simulations, parameters_val=parameters_val,
simulations_val=simulations_val,
early_stopping=True, # early stopping
seed=1, n_epochs=10, scale_samples=True, use_tqdm=False)
# 3) save and re-load NNs:
# get the statistics from the already fit StatisticsLearning object 'semiNN':
learned_seminn_stat = semiNN.get_statistics()
learned_triplet_stat = triplet.get_statistics()
# this has a save net method:
learned_seminn_stat.save_net("seminn_net.pth")
# if you used `scale_samples=True` in learning the NNs, need to provide a path where pickle stores the scaler too:
learned_triplet_stat.save_net("triplet_net.pth", path_to_scaler="scaler.pkl")
# to reload: need to use the Neural Embedding statistics fromFile; this needs to know which kind of NN it is using;
# need therefore to pass either the input/output size (it data size and number parameters) or the network class if
# that was specified explicitly in the StatisticsLearning class. Check the docstring for NeuralEmbedding.fromFile
# for more details.
from abcpy.statistics import NeuralEmbedding
learned_seminn_stat_loaded = NeuralEmbedding.fromFile("seminn_net.pth", input_size=1, output_size=2)
learned_triplet_stat_loaded = NeuralEmbedding.fromFile("triplet_net.pth", input_size=1, output_size=2,
path_to_scaler="scaler.pkl")
# 4) you can optionally rescale the different summary statistics be their standard deviation on a reference dataset
# of simulations. To do this, it is enough to pass at initialization the reference dataset, and the rescaling will
# be applied every time the statistics is computed on some simulation or observation.
learned_triplet_stat_loaded = NeuralEmbedding.fromFile("triplet_net.pth", input_size=1, output_size=2,
path_to_scaler="scaler.pkl",
reference_simulations=simulations_val)
# 5) perform inference
# define distance
from abcpy.distances import Euclidean
distance_calculator = Euclidean(learned_seminn_stat_loaded)
# define kernel
from abcpy.perturbationkernel import DefaultKernel
kernel = DefaultKernel([mu, sigma])
# define sampling scheme
from abcpy.inferences import PMCABC
sampler = PMCABC([height], [distance_calculator], backend, kernel, seed=1)
eps_arr = np.array([500]) # starting value of epsilon; the smaller, the slower the algorithm.
# at each iteration, take as epsilon the epsilon_percentile of the distances obtained by simulations at previous
# iteration from the observation
epsilon_percentile = 10
journal = sampler.sample([height_obs], steps, eps_arr, n_sample, n_samples_per_param, epsilon_percentile)
return journal
def analyse_journal(journal):
# output parameters and weights
print(journal.opt_values)
print(journal.get_weights())
# do post analysis
print(journal.posterior_mean())
print(journal.posterior_cov())
# print configuration
print(journal.configuration)
# plot posterior
journal.plot_posterior_distr(path_to_save="posterior.png")
# save and load journal
journal.save("experiments.jnl")
from abcpy.output import Journal
new_journal = Journal.fromFile('experiments.jnl')
if __name__ == "__main__":
journal = infer_parameters(logging_level=logging.INFO)
analyse_journal(journal)
|
a0d767d773751a0f3039216f4b33aed3b98eaaa8
|
d6bcc2a87c2e419528c0edc98ebd3d3717a16716
|
/test/lib/ufe/testComboCmd.py
|
ca216dd2b1e098dc586f8fce8c224caff58476bd
|
[
"DOC"
] |
permissive
|
Autodesk/maya-usd
|
ac9e03f39132c6b221032f21dc98805b4aa52d31
|
dc1c13a3f8012b2a99a45e46fb30250fd4b82487
|
refs/heads/dev
| 2023-09-05T07:39:58.640296
| 2023-09-01T19:56:30
| 2023-09-01T19:56:30
| 198,889,624
| 692
| 208
| null | 2023-09-14T20:49:17
| 2019-07-25T19:25:28
|
Mathematica
|
UTF-8
|
Python
| false
| false
| 44,894
|
py
|
testComboCmd.py
|
#!/usr/bin/env python
#
# Copyright 2020 Autodesk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fixturesUtils
import mayaUtils
from testUtils import assertVectorAlmostEqual
import testTRSBase
import ufeUtils
import usdUtils
import mayaUsd.ufe
import mayaUsd.lib
from pxr import UsdGeom, Vt, Gf
from maya import cmds
from maya import standalone
from maya.api import OpenMaya as om
import ufe
from functools import partial
from math import degrees
from math import radians
from math import cos
import os
import unittest
def transform3dScale(transform3d):
matrix = om.MMatrix(transform3d.inclusiveMatrix().matrix)
return om.MTransformationMatrix(matrix).scale(om.MSpace.kObject)
def transform3dRotation(transform3d):
matrix = om.MMatrix(transform3d.inclusiveMatrix().matrix)
return om.MTransformationMatrix(matrix).rotation()
def matrix4dTranslation(matrix4d):
translation = matrix4d.matrix[-1]
return translation[:-1]
def transform3dTranslation(transform3d):
return matrix4dTranslation(transform3d.inclusiveMatrix())
def addVec(mayaVec, usdVec):
return mayaVec + om.MVector(*usdVec)
def combineScales(scale1, scale2):
return [scale1[0]*scale2[0], scale1[1]*scale2[1], scale1[2]*scale2[2] ]
def nameToPlug(nodeName):
selection = om.MSelectionList()
selection.add(nodeName)
return selection.getPlug(0)
def checkPivotsAndCompensations(testCase, mayaObjName, usdT3d):
'''Confirm matching Maya and UFE object pivots and pivot compensations.'''
# getAttr() returns a single-element vector that holds a 3-element tuple.
assertVectorAlmostEqual(testCase, cmds.getAttr(mayaObjName+".rp")[0],
usdT3d.rotatePivot().vector, places=6)
assertVectorAlmostEqual(testCase, cmds.getAttr(mayaObjName+".sp")[0],
usdT3d.scalePivot().vector, places=6)
assertVectorAlmostEqual(testCase, cmds.getAttr(mayaObjName+".rpt")[0],
usdT3d.rotatePivotTranslation().vector, places=6)
assertVectorAlmostEqual(testCase, cmds.getAttr(mayaObjName+".spt")[0],
usdT3d.scalePivotTranslation().vector, places=6)
def checkWorldSpaceXform(testCase, objects):
'''Confirm matching Maya and UFE object world space positions.
The Maya object is the first object in the objects argument, and is used
as the benchmark.'''
mayaWorld = cmds.xform(objects[0], q=True, ws=True, matrix=True)
for t3d in objects[1:]:
# Flatten out UFE matrices for comparison with Maya output.
usdWorld = [y for x in t3d.inclusiveMatrix().matrix for y in x]
assertVectorAlmostEqual(testCase, mayaWorld, usdWorld)
class TestObserver(ufe.Observer):
def __init__(self):
super(TestObserver, self).__init__()
self._transform3d = 0
self._valueChanged = 0
def __call__(self, notification):
if (ufeUtils.ufeFeatureSetVersion() >= 2):
if (ufeUtils.ufeFeatureSetVersion() >= 4):
if isinstance(notification, ufe.AttributeChanged):
self._valueChanged += 1
else:
if isinstance(notification, ufe.AttributeValueChanged):
self._valueChanged += 1
if isinstance(notification, ufe.Transform3dChanged):
self._transform3d += 1
@property
def nbValueChanged(self):
return self._valueChanged
@property
def nbTransform3d(self):
return self._transform3d
@property
def notifications(self):
return self._valueChanged + self._transform3d
def reset(self):
self._transform3d = 0
self._valueChanged = 0
class ComboCmdTestCase(testTRSBase.TRSTestCaseBase):
'''Verify the Transform3d UFE interface, for multiple runtimes.
The Maya move, rotate, and scale commands is used to test setting object
translation, rotation, and scale.
As of 05-May-2020, object space relative moves, rotates, and scales are
supported by Maya code, and move and rotate are supported in world space
as well, although scale is not supported in world space.
Object translation, rotation, and scale is read using the Transform3d
interface and the native run-time interface.
This test performs a sequence of the possible types of operations, and
verifies that the position, rotation, and scale of the object has been
modified according to how such operations should cumulate.
The expected value consists of the translate, rotate, and scale vectors
(in world space). It is computed by:
- initializing the translate, rotate, and scale vectors
- calling updateTRS after each operation; this method will reassemble
the transformation matrix from the three vectors, apply the
appropriate matrix transformation for the given operation, in the
given space, and extract the translate, rotate, and scale vector,
once again in world space.
When a snapshot is taken for comparison purposes, the value extracted from
the runtime objects is extracted for each component, and assembled into a
vector that can be compared to the computed expected value vector.
UFE Feature : Transform3d
Maya Feature : move, rotate, scale
Action : Relative move, rotate, and scale in object space; move, rotate in
object space.
Applied On Selection :
- No selection - Given node as param
- Single Selection [Maya, Non-Maya]
Undo/Redo Test : Yes
Expect Results To Test :
- Maya Dag object world space position.
- USD object world space position.
Edge Cases :
- None.
'''
pluginsLoaded = False
@classmethod
def setUpClass(cls):
fixturesUtils.setUpClass(__file__, loadPlugin=False)
if not cls.pluginsLoaded:
cls.pluginsLoaded = mayaUtils.isMayaUsdPluginLoaded()
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
def setUp(self):
''' Called initially to set up the maya test environment '''
# Load plugins
self.assertTrue(self.pluginsLoaded)
# Set up memento, a list of snapshots.
self.memento = []
# Callables to get current object translation and rotation using the
# run-time and UFE.
self.move = 0
self.rotate = 1
self.scale = 2
self.ops = [self.move, self.rotate, self.scale]
self.runTimes = [None,None,None]
self.ufes = [None,None,None]
self.noSpace = None
self.spaces = [om.MSpace.kObject, om.MSpace.kWorld]
# Open top_layer.ma scene in testSamples
mayaUtils.openTopLayerScene()
# Create some extra Maya nodes
cmds.polySphere()
cmds.polyCube()
# Clear selection to start off
cmds.select(clear=True)
def updateTRS(self, expectedTRS, op, v, space=om.MSpace.kWorld):
'''Update the expected vector based on given operation, vector and space
The expectedTRS vector has 9 entries:
* 0-2 The world position
* 3-5 The world rotation (in degrees)
* 6-8 The world scale
The possible operations are move, rotate, and scale.
The possible spaces are kObject and kWorld (default)
'''
if( expectedTRS is None ):
expectedTRS = [None] * 9
# trs starts as the identity matrix
#
trs = om.MTransformationMatrix()
# Add translation, rotation, and scale, in world space, to recreate
# the last transformation matrix.
#
if( expectedTRS[0] is not None ):
trs.setTranslation( om.MVector( expectedTRS[0], expectedTRS[1], expectedTRS[2]), om.MSpace.kWorld )
if( expectedTRS[3] is not None ):
trs.setRotation( om.MEulerRotation( radians(expectedTRS[3]), radians(expectedTRS[4]), radians(expectedTRS[5])) )
if( expectedTRS[6] is not None ):
trs.setScale( om.MVector( expectedTRS[6], expectedTRS[7], expectedTRS[8]), om.MSpace.kWorld )
# Apply the requested operation. If the space is kObject, and we had a
# scale factor, we must counteract it to get the right matrix, by
# dividing the translation vector by it (otherwise it ends up being
# scaled twice, and the expected value is incorrect).
#
if op == self.move:
if( space == om.MSpace.kObject and expectedTRS[6] is not None):
trs.translateBy( om.MVector( v[0]/expectedTRS[6], v[1]/expectedTRS[7], v[2]/expectedTRS[8] ), space )
else:
trs.translateBy( om.MVector( v[0], v[1], v[2] ), space )
elif op == self.rotate:
trs.rotateBy( om.MEulerRotation( radians(v[0]), radians(v[1]), radians(v[2])), space)
elif op == self.scale:
trs.scaleBy( om.MVector( v[0], v[1], v[2] ), space )
# Recover the world space translate, rotate, and scale, and updated
# the expected vector
#
expectedTRS[0:3] = trs.translation(om.MSpace.kWorld)
r = trs.rotation().asVector();
expectedTRS[3] = degrees(r[0])
expectedTRS[4] = degrees(r[1])
expectedTRS[5] = degrees(r[2])
expectedTRS[6:9] = trs.scale(om.MSpace.kWorld)
return expectedTRS
def extractTRS(self, expectedTRS, op):
'''Extract the move, rotate, or scale component
'''
if op == self.move:
# Translation (x, y, z)
#
return expectedTRS[0:3]
elif op == self.rotate:
# Rotation vector in degrees (x, y, z)
#
return expectedTRS[3:6]
elif op == self.scale:
# Scale (x, y, z)
#
return expectedTRS[6:9]
def snapshotRunTimeUFE(self):
'''Return a pair with an op read from the run-time and from UFE.
Tests that the op read from the run-time interface matches the
UFE op.
'''
# Get translation
#
rtAll = None
ufeAll = None
offset = 0
for op in self.ops:
runTimeVec = self.runTimes[op]()
ufeVec = self.ufes[op]()
if op == self.rotate:
# The runtimes for rotate return an MEulerRotation, which we
# must convert to a vector in degrees, since updateTRS expects
# it in that format.
#
r = runTimeVec.asVector()
rtAll = self.updateTRS( rtAll, op, [degrees(r[0]), degrees(r[1]), degrees(r[2])] )
r = ufeVec.asVector()
ufeAll = self.updateTRS( ufeAll, op, [degrees(r[0]), degrees(r[1]), degrees(r[2])] )
else:
rtAll = self.updateTRS( rtAll, op, runTimeVec )
ufeAll = self.updateTRS( ufeAll, op, ufeVec )
assertVectorAlmostEqual(self, runTimeVec, ufeVec)
assertVectorAlmostEqual(self, rtAll, ufeAll)
return (rtAll, ufeAll)
def runTestCombo(self, expectedTRS):
'''Engine method to run move, rotate, and scale test.'''
# Save the initial values to the memento list.
self.snapShotAndTest(expectedTRS, 6)
# Do a combination of commands, and compare with expected.
# Note: scale not supported in kObject space, hence, no test
# rotate values are in degrees
#
ops = [ [self.rotate,[10,20,30], om.MSpace.kObject]
, [self.move,[4,5,6], om.MSpace.kWorld]
, [self.move,[4,5,6], om.MSpace.kObject]
, [self.scale,[.1,10,100], om.MSpace.kObject]
, [self.rotate,[-10,-20,-30], om.MSpace.kWorld]
, [self.move,[-3,-2,-1], om.MSpace.kWorld]
, [self.scale,[1000,.01,.1], om.MSpace.kObject]
, [self.move,[-3,-2,-1], om.MSpace.kObject]
]
for item in ops:
op = item[0]
if( op not in self.ops ):
continue
v = item[1]
space = item[2]
if( op == self.move ):
if( space == om.MSpace.kObject ):
cmds.move(v[0], v[1], v[2], relative=True, os=True, wd=True)
else:
cmds.move(v[0], v[1], v[2], relative=True)
elif( op == self.rotate ):
if( space == om.MSpace.kObject ):
cmds.rotate(v[0], v[1], v[2], relative=True, os=True, forceOrderXYZ=True)
else:
cmds.rotate(v[0], v[1], v[2], relative=True, ws=True, forceOrderXYZ=True)
elif( op == self.scale ):
if( space == om.MSpace.kObject ):
cmds.scale(v[0], v[1], v[2], relative=True)
else:
# scale is only supported in object space; if it is
# eventually supported in world space, this would be the
# command to emit:
#cmds.scale(v[0], v[1], v[2], relative=True, ws=True)
# Fail if we attempt to test this type of operation
self.assertEqual( space, om.MSpace.kObject, 'scale only supported in object space' )
continue
expectedTRS = self.updateTRS( expectedTRS, op, v, space )
self.snapShotAndTest(expectedTRS, 6)
# Test undo, redo.
self.rewindMemento()
self.fforwardMemento()
def testComboMaya(self):
'''Move, rotate, and scale Maya object, read through the Transform3d interface.'''
# Give the sphere an initial position, rotation, scale, and select it.
sphereObj = om.MSelectionList().add('pSphere1').getDagPath(0).node()
sphereFn = om.MFnTransform(sphereObj)
expectedTRS = None
if( self.move in self.ops ):
expectedTRS = self.updateTRS( expectedTRS, self.move, [1,2,3] )
t = self.extractTRS(expectedTRS,self.move)
sphereFn.setTranslation(om.MVector( t[0], t[1], t[2] ), om.MSpace.kTransform)
if( self.rotate in self.ops ):
expectedTRS = self.updateTRS( expectedTRS, self.rotate, [30,60,90] )
r = self.extractTRS(expectedTRS,self.rotate)
sphereFn.setRotation(om.MEulerRotation(radians(r[0]),radians(r[1]),radians(r[2])), om.MSpace.kTransform)
if( self.scale in self.ops ):
expectedTRS = self.updateTRS( expectedTRS, self.scale, [1,2,3] )
s = self.extractTRS(expectedTRS,self.scale)
sphereFn.setScale(om.MVector( s[0], s[1], s[2] ) )
spherePath = ufe.Path(mayaUtils.createUfePathSegment("|pSphere1"))
sphereItem = ufe.Hierarchy.createItem(spherePath)
ufe.GlobalSelection.get().append(sphereItem)
# Create a Transform3d interface for it.
transform3d = ufe.Transform3d.transform3d(sphereItem)
# Set up the callables that will retrieve the translation.
self.runTimes[self.move] = partial(
sphereFn.translation, om.MSpace.kTransform)
self.ufes[self.move] = partial(transform3dTranslation, transform3d)
# Set up the callables that will retrieve the rotation.
self.runTimes[self.rotate] = partial(
sphereFn.rotation, om.MSpace.kTransform)
self.ufes[self.rotate] = partial(transform3dRotation, transform3d)
# Set up the callables that will retrieve the scale.
self.runTimes[self.scale] = partial(
sphereFn.scale)
self.ufes[self.scale] = partial(transform3dScale, transform3d)
self.runTestCombo(expectedTRS)
def testComboUSD(self):
'''Move, rotate, and scale USD object, read through the Transform3d interface.'''
# Select Ball_35 to move, rotate, and scale it.
ball35Path = ufe.Path([
mayaUtils.createUfePathSegment("|transform1|proxyShape1"),
usdUtils.createUfePathSegment("/Room_set/Props/Ball_35")])
ball35Item = ufe.Hierarchy.createItem(ball35Path)
ufe.GlobalSelection.get().append(ball35Item)
# Create a Transform3d interface for it.
transform3d = ufe.Transform3d.transform3d(ball35Item)
# We compare the UFE ops with the USD run-time ops. To
# obtain the full ops of Ball_35, we need to add the USD
# ops to the Maya proxy shape ops.
proxyShapeXformObj = om.MSelectionList().add('transform1').getDagPath(0).node()
proxyShapeXformFn = om.MFnTransform(proxyShapeXformObj)
def ball35Translation():
ball35Prim = usdUtils.getPrimFromSceneItem(ball35Item)
return addVec(
proxyShapeXformFn.translation(om.MSpace.kTransform),
ball35Prim.GetAttribute('xformOp:translate').Get())
def ball35Rotation():
ball35Prim = usdUtils.getPrimFromSceneItem(ball35Item)
if not ball35Prim.HasAttribute('xformOp:rotateXYZ'):
return proxyShapeXformFn.rotation(om.MSpace.kTransform)
else:
x,y,z = ball35Prim.GetAttribute('xformOp:rotateXYZ').Get()
return proxyShapeXformFn.rotation(om.MSpace.kTransform) + om.MEulerRotation(radians(x), radians(y), radians(z))
def ball35Scale():
ball35Prim = usdUtils.getPrimFromSceneItem(ball35Item)
if not ball35Prim.HasAttribute('xformOp:scale'):
return proxyShapeXformFn.scale()
else:
return combineScales(proxyShapeXformFn.scale(), ball35Prim.GetAttribute('xformOp:scale').Get())
# Set up the callables that will retrieve the translation.
self.runTimes[self.move] = ball35Translation
self.ufes[self.move] = partial(transform3dTranslation, transform3d)
# Set up the callables that will retrieve the rotation.
self.runTimes[self.rotate] = ball35Rotation
self.ufes[self.rotate] = partial(transform3dRotation, transform3d)
# Set up the callables that will retrieve the scale.
self.runTimes[self.scale] = ball35Scale
self.ufes[self.scale] = partial(transform3dScale, transform3d)
# Save the initial position to the memento list.
expectedTRS = None
if( self.move in self.ops ):
v = ball35Translation()
expectedTRS = self.updateTRS( expectedTRS, self.move, [v[0], v[1], v[2]] )
if( self.rotate in self.ops ):
r = ball35Rotation().asVector()
expectedTRS = self.updateTRS( expectedTRS, self.rotate, [degrees(r[0]), degrees(r[1]), degrees(r[2])] )
if( self.scale in self.ops ):
s = ball35Scale()
expectedTRS = self.updateTRS( expectedTRS, self.scale, [s[0], s[1], s[2]] )
self.runTestCombo(expectedTRS)
@unittest.skipUnless(mayaUtils.mayaMajorVersion() >= 2022, 'Rotate and scale pivot compensation only available in Maya 2022 or greater.')
def testRotateScalePivotCompensation(self):
'''Test that rotate and scale pivot compensation match Maya object.'''
cmds.file(new=True, force=True)
mayaSphere = cmds.polySphere()[0]
mayaSpherePath = ufe.PathString.path('|pSphere1')
mayaSphereItem = ufe.Hierarchy.createItem(mayaSpherePath)
import mayaUsd_createStageWithNewLayer
proxyShape = mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
proxyShapePath = ufe.PathString.path(proxyShape)
proxyShapeItem = ufe.Hierarchy.createItem(proxyShapePath)
proxyShapeContextOps = ufe.ContextOps.contextOps(proxyShapeItem)
proxyShapeContextOps.doOp(['Add New Prim', 'Sphere'])
usdSpherePath = ufe.PathString.path('%s,/Sphere1' % proxyShape)
usdSphereItem = ufe.Hierarchy.createItem(usdSpherePath)
usdSphereT3d = ufe.Transform3d.transform3d(usdSphereItem)
# If the Transform3d interface can't handle rotate or scale pivot
# compensation, skip this test.
if usdSphereT3d.translateRotatePivotCmd() is None or \
usdSphereT3d.translateScalePivotCmd() is None:
raise unittest.SkipTest("Rotate or scale pivot compensation unsupported.")
# Select both spheres.
sn = ufe.GlobalSelection.get()
sn.clear()
sn.append(mayaSphereItem)
sn.append(usdSphereItem)
# Rotate both spheres around X, and scale them.
cmds.rotate(30, 0, 0, r=True, os=True, fo=True)
cmds.scale(1, 1, 2, r=True)
# Move pivots in world space. At time of writing (20-Oct-20) UFE
# rotate pivot and scale pivot arguments to move command doesn't accept
# an object argument, so use the selection.
cmds.move(0, -2.104143, 3.139701, "pSphere1.scalePivot", "pSphere1.rotatePivot", r=True)
sn.remove(mayaSphereItem)
cmds.move(0, -2.104143, 3.139701, r=True, urp=True, usp=True)
checkPivotsAndCompensations(self, "pSphere1", usdSphereT3d)
# Scale the spheres again
sn.append(mayaSphereItem)
cmds.scale(1, 1, 2, r=True)
# Move the pivots again.
cmds.move(0, 5.610465, 3.239203, "pSphere1.scalePivot", "pSphere1.rotatePivot", r=True)
sn.remove(mayaSphereItem)
cmds.move(0, 5.610465, 3.239203, r=True, urp=True, usp=True)
checkPivotsAndCompensations(self, "pSphere1", usdSphereT3d)
# Move only the rotate pivot.
cmds.move(0, 0, 3, r=True, urp=True)
cmds.move(0, 0, 3, "pSphere1.rotatePivot", r=True)
checkPivotsAndCompensations(self, "pSphere1", usdSphereT3d)
# Move only the scale pivot.
cmds.move(0, 0, -4, r=True, usp=True)
cmds.move(0, 0, -4, "pSphere1.scalePivot", r=True)
checkPivotsAndCompensations(self, "pSphere1", usdSphereT3d)
@unittest.skipUnless(mayaUtils.mayaMajorVersion() >= 2022, 'Rotate and scale pivot compensation only available in Maya 2022 or greater.')
def testRotateScalePivotCompensationAfterExport(self):
'''Rotate and scale pivots must match after export.'''
cmds.file(new=True, force=True)
mayaSphere = cmds.polySphere()[0]
cmds.rotate(0, 0, -45, r=True, os=True, fo=True)
cmds.scale(4, 3, 2, r=True)
cmds.move(-2, -3, -4, "pSphere1.rotatePivot", r=True)
cmds.move(7, 6, 5, "pSphere1.scalePivot", r=True)
# Export out, reference back in using proxy shape.
usdFilePath = os.path.abspath('UsdExportMayaXformStack.usda')
cmds.mayaUSDExport(file=usdFilePath)
# Reference it back in.
proxyShape = cmds.createNode('mayaUsdProxyShape')
cmds.setAttr('mayaUsdProxyShape1.filePath', usdFilePath, type='string')
# MAYA-101766: awkward plug access for non-interactive stage loading.
outStageData = nameToPlug('mayaUsdProxyShape1.outStageData')
outStageData.asMDataHandle()
proxyShapeMayaPath = cmds.ls(proxyShape, long=True)[0]
proxyShapePathSegment = mayaUtils.createUfePathSegment(
proxyShapeMayaPath)
spherePathSegment = usdUtils.createUfePathSegment('/pSphere1')
spherePath = ufe.Path([proxyShapePathSegment, spherePathSegment])
sphereItem = ufe.Hierarchy.createItem(spherePath)
usdSphereT3d = ufe.Transform3d.transform3d(sphereItem)
# If the Transform3d interface can't handle rotate or scale pivot
# compensation, skip this test.
if usdSphereT3d.translateRotatePivotCmd() is None or \
usdSphereT3d.translateScalePivotCmd() is None:
raise unittest.SkipTest("Rotate or scale pivot compensation unsupported.")
# Maya object and its exported USD object twin should have the
# same pivots and pivot compensations.
checkPivotsAndCompensations(self, "pSphere1", usdSphereT3d)
sn = ufe.GlobalSelection.get()
sn.clear()
sn.append(sphereItem)
# Move only the rotate pivot.
cmds.move(-1, -2, -3, r=True, urp=True)
cmds.move(-1, -2, -3, "pSphere1.rotatePivot", r=True)
checkPivotsAndCompensations(self, "pSphere1", usdSphereT3d)
# Move only the scale pivot.
cmds.move(-4, -3, -2, r=True, usp=True)
cmds.move(-4, -3, -2, "pSphere1.scalePivot", r=True)
checkPivotsAndCompensations(self, "pSphere1", usdSphereT3d)
@unittest.skipUnless(mayaUtils.mayaMajorVersion() >= 2022, 'Fallback transform op handling only available in Maya 2022 or greater.')
def testFallbackCases(self):
'''Fallback handler test cases.'''
cmds.file(new=True, force=True)
import mayaUsd_createStageWithNewLayer
proxyShape = mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
proxyShapePath = ufe.PathString.path(proxyShape)
proxyShapeItem = ufe.Hierarchy.createItem(proxyShapePath)
proxyShapeContextOps = ufe.ContextOps.contextOps(proxyShapeItem)
proxyShapeContextOps.doOp(['Add New Prim', 'Sphere'])
spherePath = ufe.PathString.path('%s,/Sphere1' % proxyShape)
sphereItem = ufe.Hierarchy.createItem(spherePath)
sphereT3d = ufe.Transform3d.transform3d(sphereItem)
spherePrim = mayaUsd.ufe.ufePathToPrim(ufe.PathString.string(spherePath))
sphereXformable = UsdGeom.Xformable(spherePrim)
# Add transform ops that do not match either the Maya transform stack,
# the USD common API transform stack, or a matrix stack.
sphereXformable.AddTranslateOp()
sphereXformable.AddTranslateOp(UsdGeom.XformOp.PrecisionFloat, "pivotCustom")
sphereXformable.AddRotateZOp()
sphereXformable.AddTranslateOp(
UsdGeom.XformOp.PrecisionFloat, "pivotCustom", True)
self.assertEqual(
sphereXformable.GetXformOpOrderAttr().Get(), Vt.TokenArray((
"xformOp:translate", "xformOp:translate:pivotCustom",
"xformOp:rotateZ", "!invert!xformOp:translate:pivotCustom")))
self.assertFalse(UsdGeom.XformCommonAPI(sphereXformable))
self.assertFalse(mayaUsd.lib.XformStack.MayaStack().MatchingSubstack(
sphereXformable.GetOrderedXformOps()))
# Select sphere.
sn = ufe.GlobalSelection.get()
sn.clear()
sn.append(sphereItem)
# Rotate sphere around X.
cmds.rotate(30, 0, 0, r=True, os=True, fo=True)
# Fallback interface will have added a RotXYZ transform op.
self.assertEqual(
sphereXformable.GetXformOpOrderAttr().Get(), Vt.TokenArray((
"xformOp:translate", "xformOp:translate:pivotCustom",
"xformOp:rotateZ", "!invert!xformOp:translate:pivotCustom",
"xformOp:rotateXYZ:maya_fallback")))
@unittest.skipUnless(mayaUtils.mayaMajorVersion() >= 2023, 'Requires Maya fixes only available in Maya 2023 or greater.')
def testBrokenFallback(self):
'''Maya fallback transform stack must be final on prim transform op stack.'''
# Create a prim and add transform ops to it that don't match the Maya
# transform stack. Then, transform it with Maya: this will trigger the
# creation of a Maya fallback transform stack. Finally, append another
# transform op to the prim transform stack. Because there is now a
# transform op beyond the Maya fallback transform stack, the Maya
# fallback has been "corrupted", and any further transformation of the
# prim must be a no-op.
cmds.file(new=True, force=True)
import mayaUsd_createStageWithNewLayer
proxyShape = mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
proxyShapePath = ufe.PathString.path(proxyShape)
proxyShapeItem = ufe.Hierarchy.createItem(proxyShapePath)
proxyShapeContextOps = ufe.ContextOps.contextOps(proxyShapeItem)
proxyShapeContextOps.doOp(['Add New Prim', 'Capsule'])
capsulePath = ufe.PathString.path('%s,/Capsule1' % proxyShape)
capsuleItem = ufe.Hierarchy.createItem(capsulePath)
capsulePrim = mayaUsd.ufe.ufePathToPrim(ufe.PathString.string(capsulePath))
capsuleXformable = UsdGeom.Xformable(capsulePrim)
capsuleXformable.AddRotateXOp()
capsuleXformable.AddRotateYOp()
self.assertEqual(
capsuleXformable.GetXformOpOrderAttr().Get(), Vt.TokenArray((
"xformOp:rotateX", "xformOp:rotateY")))
# Select capsule.
sn = ufe.GlobalSelection.get()
sn.clear()
sn.append(capsuleItem)
# Rotate sphere around X.
cmds.rotate(30, 0, 0, r=True, os=True, fo=True)
# Fallback interface will have added a RotXYZ transform op.
self.assertEqual(
capsuleXformable.GetXformOpOrderAttr().Get(), Vt.TokenArray((
"xformOp:rotateX", "xformOp:rotateY",
"xformOp:rotateXYZ:maya_fallback")))
capsuleT3d = ufe.Transform3d.transform3d(capsuleItem)
self.assertIsNotNone(capsuleT3d)
# Add another transform op to break the Maya fallback stack.
capsuleXformable.AddRotateZOp()
self.assertEqual(
capsuleXformable.GetXformOpOrderAttr().Get(), Vt.TokenArray((
"xformOp:rotateX", "xformOp:rotateY",
"xformOp:rotateXYZ:maya_fallback", "xformOp:rotateZ")))
# Do any transform editing with Maya.
cmds.rotate(0, 0, 30, r=True, os=True, fo=True)
capsuleT3d = ufe.Transform3d.transform3d(capsuleItem)
self.assertIsNone(capsuleT3d)
@unittest.skipUnless(mayaUtils.mayaMajorVersion() >= 2023, 'Fallback transform op handling only available in Maya 2023 or greater.')
def testFallback(self):
'''Transformable not handled by standard Transform3d handlers must be
handled by fallback handler.'''
mayaUtils.openTestScene("xformOpFallback", "fallbackTest.ma")
# We have three objects in the scene, one Maya, one USD with a Maya
# transform stack, and one USD which does not match any Transform3d
# handler. This last object is the one to which fallback transform ops
# will be appended.
mayaObj = '|null1|pSphere1'
mayaSpherePath = ufe.PathString.path(mayaObj)
usdSpherePath = ufe.PathString.path('|fallbackTest|fallbackTestShape,/parent/sphere1')
usdFallbackSpherePath = ufe.PathString.path('|fallbackTest|fallbackTestShape,/sphere1')
mayaSphereItem = ufe.Hierarchy.createItem(mayaSpherePath)
usdSphereItem = ufe.Hierarchy.createItem(usdSpherePath)
usdFallbackSphereItem = ufe.Hierarchy.createItem(usdFallbackSpherePath)
# For scene items with fallback transform ops, the transform3d()
# interface considers the complete object (i.e. all transform ops in
# the stack), which is undesirable when setting and getting fallback
# pivot transform ops. To consider only the fallback transform ops,
# use the editTransform3d() interface. For scene items with only a
# Maya transform stack, editTransform3d() and transform3d() are
# equivalent, so arbitrarily choose editTransform3d().
usdSphere3d = ufe.Transform3d.editTransform3d(usdSphereItem, ufe.EditTransform3dHint())
usdFallbackSphere3d = ufe.Transform3d.editTransform3d(usdFallbackSphereItem, ufe.EditTransform3dHint())
sn = ufe.GlobalSelection.get()
sn.clear()
sn.append(mayaSphereItem)
sn.append(usdSphereItem)
sn.append(usdFallbackSphereItem)
# All objects should have the same world transform. We use the Maya
# world transform as the benchmark.
checkWorldSpaceXform(self, [mayaObj, usdSphere3d, usdFallbackSphere3d])
# Count the number of transform ops in the Maya transform stack sphere
# and the fallback transform stack sphere.
spherePrim = mayaUsd.ufe.ufePathToPrim(
ufe.PathString.string(usdSpherePath))
fallbackSpherePrim = mayaUsd.ufe.ufePathToPrim(
ufe.PathString.string(usdFallbackSpherePath))
sphereXformable = UsdGeom.Xformable(spherePrim)
fallbackSphereXformable = UsdGeom.Xformable(fallbackSpherePrim)
sphereOps = sphereXformable.GetOrderedXformOps()
fallbackSphereOps = fallbackSphereXformable.GetOrderedXformOps()
# Both prims have TRS transform ops.
self.assertEqual(len(sphereOps), 3)
self.assertEqual(len(fallbackSphereOps), 3)
# First, translate all objects.
cmds.move(0, 0, 5, r=True, os=True, wd=True)
checkWorldSpaceXform(self, [mayaObj, usdSphere3d, usdFallbackSphere3d])
# The sphere with the Maya transform stack has no additional transform
# op; the sphere with the fallback stack will have an additional
# translate op.
sphereOps = sphereXformable.GetOrderedXformOps()
fallbackSphereOps = fallbackSphereXformable.GetOrderedXformOps()
self.assertEqual(len(sphereOps), 3)
self.assertEqual(len(fallbackSphereOps), 4)
# Rotate
cmds.rotate(40, 0, 0, r=True, os=True, fo=True)
checkWorldSpaceXform(self, [mayaObj, usdSphere3d, usdFallbackSphere3d])
# The sphere with the Maya transform stack has no additional transform
# op; the sphere with the fallback stack will have an additional
# rotate op.
sphereOps = sphereXformable.GetOrderedXformOps()
fallbackSphereOps = fallbackSphereXformable.GetOrderedXformOps()
self.assertEqual(len(sphereOps), 3)
self.assertEqual(len(fallbackSphereOps), 5)
# Scale
cmds.scale(1, 1, 2.0, r=True)
checkWorldSpaceXform(self, [mayaObj, usdSphere3d, usdFallbackSphere3d])
# The sphere with the Maya transform stack has no additional transform
# op; the sphere with the fallback stack will have an additional
# scale op.
sphereOps = sphereXformable.GetOrderedXformOps()
fallbackSphereOps = fallbackSphereXformable.GetOrderedXformOps()
self.assertEqual(len(sphereOps), 3)
self.assertEqual(len(fallbackSphereOps), 6)
# Command to change the pivots on Maya items and UFE items is
# different, so remove Maya item from selection.
sn.remove(mayaSphereItem)
mayaPivots = [
mayaObj+"."+attrName for attrName in ["scalePivot", "rotatePivot"]]
cmds.move(0, -2.5, 2.5, *mayaPivots, r=True)
cmds.move(0, -2.5, 2.5, r=True, urp=True, usp=True)
checkPivotsAndCompensations(self, mayaObj, usdSphere3d)
checkPivotsAndCompensations(self, mayaObj, usdFallbackSphere3d)
# Both spheres have 6 additional transform ops: rotate pivot and its
# inverse, scale pivot and its inverse, rotate pivot translate, and
# scale pivot translate.
sphereOps = sphereXformable.GetOrderedXformOps()
fallbackSphereOps = fallbackSphereXformable.GetOrderedXformOps()
self.assertEqual(len(sphereOps), 9)
self.assertEqual(len(fallbackSphereOps), 12)
# Perform an additional pivot move, to ensure that the existing pivot
# values are properly considered.
cmds.move(0, -1, 1, *mayaPivots, r=True)
cmds.move(0, -1, 1, r=True, urp=True, usp=True)
checkPivotsAndCompensations(self, mayaObj, usdSphere3d)
checkPivotsAndCompensations(self, mayaObj, usdFallbackSphere3d)
@unittest.skipIf(int(cmds.about(apiVersion=True)) <= 20220000, 'Center pivot command is only available in Maya 2022 or later.')
def testCenterPivotUndo(self):
cmds.file(new=True, force=True)
import mayaUsd_createStageWithNewLayer
mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
proxyShapePath = ufe.PathString.path('|stage1|stageShape1')
proxyShapeItem = ufe.Hierarchy.createItem(proxyShapePath)
proxyShapeContextOps = ufe.ContextOps.contextOps(proxyShapeItem)
proxyShapeContextOps.doOp(['Add New Prim', 'Capsule'])
capsulePath = ufe.PathString.path('|stage1|stageShape1,/Capsule1')
capsuleItem = ufe.Hierarchy.createItem(capsulePath)
usdT3d = ufe.Transform3d.transform3d(capsuleItem)
sn = ufe.GlobalSelection.get()
sn.clear()
sn.append(capsuleItem)
kwArgs = [{'cp' : True}]
if mayaUtils.ufeSupportFixLevel() >= 4:
kwArgs.append({'cpc' : True})
for kwArg in kwArgs:
# center point is expected to be at [0.0, 0.0, 0.0]
assertVectorAlmostEqual(self, usdT3d.rotatePivot().vector, [0.0, 0.0, 0.0])
assertVectorAlmostEqual(self, usdT3d.scalePivot().vector, [0.0, 0.0, 0.0])
# move the pivot location
cmds.move(7, 2, 1, r=True, urp=True, usp=True)
assertVectorAlmostEqual(self, usdT3d.rotatePivot().vector, [7.0, 2.0, 1.0])
assertVectorAlmostEqual(self, usdT3d.scalePivot().vector, [7.0, 2.0, 1.0])
# call center pivot command
cmds.xform(**kwArg)
# center point is expected to be at [0.0, 0.0, 0.0]
assertVectorAlmostEqual(self, usdT3d.rotatePivot().vector, [0.0, 0.0, 0.0])
assertVectorAlmostEqual(self, usdT3d.scalePivot().vector, [0.0, 0.0, 0.0])
# undo
cmds.undo()
assertVectorAlmostEqual(self, usdT3d.rotatePivot().vector, [7.0, 2.0, 1.0])
assertVectorAlmostEqual(self, usdT3d.scalePivot().vector, [7.0, 2.0, 1.0])
# redo
cmds.redo()
assertVectorAlmostEqual(self, usdT3d.rotatePivot().vector, [0.0, 0.0, 0.0])
assertVectorAlmostEqual(self, usdT3d.scalePivot().vector, [0.0, 0.0, 0.0])
@unittest.skipUnless(mayaUtils.ufeSupportFixLevel() > 0, "Requires center pivot Maya fix.")
def testCenterPivotUpdatePivotCompensations(self):
'''Center pivot must correctly update rotate, scale pivot compensations.'''
cmds.file(new=True, force=True)
import mayaUsd_createStageWithNewLayer
mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
proxyShapeItem = ufeUtils.createItem('|stage1|stageShape1')
proxyShapeContextOps = ufe.ContextOps.contextOps(proxyShapeItem)
proxyShapeContextOps.doOp(['Add New Prim', 'Capsule'])
capsuleItem = ufeUtils.createItem('|stage1|stageShape1,/Capsule1')
t3d = ufe.Transform3d.transform3d(capsuleItem)
sn = ufe.GlobalSelection.get()
sn.clear()
sn.append(capsuleItem)
# Pivots and pivot compensations initially [0.0, 0.0, 0.0].
def checkNullPivotsAndCompensations():
for v in [t3d.rotatePivot(), t3d.scalePivot(),
t3d.rotatePivotTranslation(),
t3d.scalePivotTranslation()]:
assertVectorAlmostEqual(self, v.vector, [0.0, 0.0, 0.0])
checkNullPivotsAndCompensations()
# Rotate around x axis so that moving the pivot will create a rotate
# pivot translation as compensation, which keeps the total object
# transformation constant.
cmds.rotate(45, 0, 0, r=True, os=True, fo=True)
# Move the pivot.
cmds.move(0, 5, 0, r=True, urp=True, usp=True)
d = cos(radians(45)) * 5
assertVectorAlmostEqual(self, t3d.rotatePivot().vector, [0.0, d, -d])
assertVectorAlmostEqual(self, t3d.scalePivot().vector, [0, d, -d])
assertVectorAlmostEqual(self, t3d.rotatePivotTranslation().vector,
[0.0, 1.464466095, d])
assertVectorAlmostEqual(self, t3d.scalePivotTranslation().vector,
[0.0, 0.0, 0.0])
# call center pivot command
cmds.xform(cp=True)
# Pivots and pivot compensations expected to be at [0.0, 0.0, 0.0]
checkNullPivotsAndCompensations()
@unittest.skipUnless(mayaUtils.ufeSupportFixLevel() > 0, "Requires center pivot Maya fix.")
def testCenterPivotMatrixOp(self):
'''Center pivot must correctly update prim with matrix transform op.'''
cmds.file(new=True, force=True)
import mayaUsd_createStageWithNewLayer
mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
proxyShapePathStr = '|stage1|stageShape1'
stage = mayaUsd.lib.GetPrim(proxyShapePathStr).GetStage()
xform = stage.DefinePrim('/Xform1', 'Xform')
xformable = UsdGeom.Xformable(xform)
transformOp = xformable.AddTransformOp()
transformOp.Set(Gf.Matrix4d(1.0))
self.assertEqual(xformable.GetXformOpOrderAttr().Get(), Vt.TokenArray([
"xformOp:transform"]))
xformItem = ufeUtils.createItem(proxyShapePathStr + ',/Xform1')
sn = ufe.GlobalSelection.get()
sn.clear()
sn.append(xformItem)
t3d = ufe.Transform3d.transform3d(xformItem)
# Set the rotate and scale pivot. Since the matrix op does not support
# these, this will create a Maya fallback transform stack after the
# matrix op.
cmds.move(3, 2, 1, r=True, urp=True, usp=True)
# Read back the rotate pivot using the Transform3d interface.
t3d = ufe.Transform3d.transform3d(xformItem)
self.assertEqual(t3d.rotatePivot().vector, [3, 2, 1])
# call center pivot command
cmds.xform(cp=True)
self.assertEqual(t3d.rotatePivot().vector, [0, 0, 0])
@unittest.skipUnless(ufeUtils.ufeFeatureSetVersion() >= 2, 'testPrimPropertyPathNotifs only available in UFE v2 or greater.')
def testPrimPropertyPathNotifs(self):
import mayaUsd_createStageWithNewLayer
proxyShape = mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
proxyShapePath = ufe.PathString.path(proxyShape)
proxyShapeItem = ufe.Hierarchy.createItem(proxyShapePath)
proxyShapeContextOps = ufe.ContextOps.contextOps(proxyShapeItem)
proxyShapeContextOps.doOp(['Add New Prim', 'Capsule'])
# Select the capsule
capPath = ufe.PathString.path('%s,/Capsule1' % proxyShape)
capItem = ufe.Hierarchy.createItem(capPath)
ufe.GlobalSelection.get().clear()
ufe.GlobalSelection.get().append(capItem)
# No notifications yet.
obs = TestObserver()
ufe.Attributes.addObserver(capItem, obs)
ufe.Transform3d.addObserver(capItem, obs)
self.assertEqual(obs.notifications, 0)
# Move the capsule
cmds.move(0, 10, 10)
# Verify that we got both ValueChanged and Transform3d notifs.
# Note: we should get notifs on both the "xformOp:translate" and
# "xformOpOrder" attributes. We don't care how many, just that
# we are getting both of these notifs kinds on both the move
# and undo.
self.assertTrue(obs.nbValueChanged > 0)
self.assertTrue(obs.nbTransform3d > 0)
# Reset observer and then undo and again verify notifs.
obs.reset()
self.assertEqual(obs.notifications, 0)
cmds.undo()
self.assertTrue(obs.nbValueChanged > 0)
self.assertTrue(obs.nbTransform3d > 0)
# Reset and test same thing with Rotate.
obs.reset()
cmds.rotate(10, 0, 0)
self.assertTrue(obs.nbValueChanged > 0)
self.assertTrue(obs.nbTransform3d > 0)
obs.reset()
self.assertEqual(obs.notifications, 0)
cmds.undo()
self.assertTrue(obs.nbValueChanged > 0)
self.assertTrue(obs.nbTransform3d > 0)
# Reset and test same thing with Scale.
obs.reset()
cmds.scale(2, 2, 2)
self.assertTrue(obs.nbValueChanged > 0)
self.assertTrue(obs.nbTransform3d > 0)
obs.reset()
self.assertEqual(obs.notifications, 0)
cmds.undo()
self.assertTrue(obs.nbValueChanged > 0)
self.assertTrue(obs.nbTransform3d > 0)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
48183dcf00dbb48bafca3bd51c3517b8e64c103b
|
0fd8922e6b9c6ed20a9c89fb2887056aad16b5e6
|
/examples/bidirect_functional.py
|
661b7c7a1a80b1f6e5a7f0e28d154efac7637891
|
[
"MIT"
] |
permissive
|
philipperemy/cond_rnn
|
52b1fbe4bdf44927d73b7de31b68f5b3ff69b778
|
fd6b2c33f0d961b6bab78255a950deb4e51b87b9
|
refs/heads/master
| 2023-08-16T14:14:34.440172
| 2023-08-08T04:49:03
| 2023-08-08T04:49:03
| 191,509,198
| 219
| 37
|
MIT
| 2023-08-07T08:05:53
| 2019-06-12T06:15:04
|
Python
|
UTF-8
|
Python
| false
| false
| 941
|
py
|
bidirect_functional.py
|
import numpy as np
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import LSTM, Bidirectional
from cond_rnn import ConditionalRecurrent
forward_layer = ConditionalRecurrent(LSTM(units=12, return_sequences=True))
backward_layer = ConditionalRecurrent(LSTM(units=13, return_sequences=True, go_backwards=True))
NUM_SAMPLES = 100
TIME_STEPS = 10
INPUT_DIM = 3
NUM_CLASSES = 2
inputs = (
Input(shape=(TIME_STEPS, INPUT_DIM)),
Input(shape=(NUM_CLASSES,)) # conditions.
)
outputs = Bidirectional(
layer=forward_layer,
backward_layer=backward_layer,
)(inputs=inputs)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy')
model.summary()
train_inputs = np.random.uniform(size=(NUM_SAMPLES, TIME_STEPS, INPUT_DIM))
train_targets = np.zeros(shape=[NUM_SAMPLES, NUM_CLASSES])
assert model.predict(x=[train_inputs, train_targets]).shape == (NUM_SAMPLES, 10, 12 + 13)
|
c4bc2c7d36e9f27a54922e621a8ca2fd7e6f84b3
|
45d48130937378975cd7b9f356f53ae815ee8fb6
|
/examples/smime-sign-hsm.py
|
be19ea751c42b9f6350a4c248f7ff5d03ba62a17
|
[
"MIT",
"LGPL-3.0-only",
"BSD-3-Clause"
] |
permissive
|
m32/endesive
|
e8e329d97f6531acbaa7f2010ce91c70857b7ea0
|
20df6cb8a45b126cc50c604a41cdd36c4c170e11
|
refs/heads/master
| 2023-05-11T11:12:37.525467
| 2023-05-05T04:49:18
| 2023-05-05T04:49:18
| 143,915,688
| 222
| 101
|
MIT
| 2023-04-27T07:41:48
| 2018-08-07T19:03:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,264
|
py
|
smime-sign-hsm.py
|
#!/usr/bin/env vpython3
# *-* coding: utf-8 *-*
import os
import stat
import subprocess
import datetime
import base64
import email
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.serialization import pkcs12
from endesive.hsm import SSHAgentHSM
from endesive import signer
def compose(From, To, Subject, Body, Attachment, signer):
# create message object instance
msg = MIMEMultipart(_subtype="signed", micalg="SHA1", protocol="application/pkcs7-signature")
# setup the parameters of the message
msg['From'] = From
msg['To'] = To
msg['Subject'] = Subject
msg['Date'] = email.utils.format_datetime(datetime.datetime.now())
msg.preamble = "This is a multipart message in MIME format."
env = MIMEMultipart(_subtype='mixed')
body = MIMEText(Body.decode())
del body['MIME-Version']
env.attach(body)
app = MIMEApplication(open(Attachment, 'rb').read(), _subtype="pdf")
app.add_header('content-disposition', 'attachment', filename=Attachment)
env.attach(app)
msg.attach(env)
sig = MIMEBase(_maintype='application', _subtype='pkcs7-signature', name="smime.p7s")
sig.add_header('Content-Disposition', 'attachment', filename='smime.p7s')
sig.add_header('Content-Transfer-Encoding', 'base64')
sig.set_payload(signer(env.as_string().encode()))
del sig['MIME-Version']
msg.attach(sig)
return msg, env, sig
def sign(datau, key, cert, othercerts, hashalgo, hsm):
datau = datau.replace(b'\n', b'\r\n')
datas = signer.sign(datau, key, cert, othercerts, hashalgo, attrs=True, pss=False, hsm=hsm)
return base64.encodebytes(datas)
def main():
# split certificate
# we need the key as seperate file
with open('ca/demo2_user1.p12', 'rb') as fp:
key, cert, othercerts = pkcs12.load_key_and_certificates(fp.read(), b'1234', backends.default_backend())
agent = SSHAgentHSM(cert)
# lookup the ssh fingerprint for the certificates public key
keyid, _ = agent.certificate()
keyfile = None
try:
# is the public key known to the ssh-agent yet?
agent.key(keyid)
except ValueError:
# set file permissions to something ssh-agent accepts
def perms(path):
if stat.S_IMODE(os.stat(path).st_mode) & ~stat.S_IRWXU:
os.chmod(path, (stat.S_IRUSR | stat.S_IWUSR))
# we have to add the key to the ssh-agent
# remove the key password, dump in traditional openssl so ssh-agent can add the key
keyfile = 'ca/demo2_user1.key.nopass.pem'
with open(keyfile, 'wb') as fp:
# dump the key
fp.write(key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
))
perms(keyfile)
# pub file so ssh-add -d can be used
pubfile = 'ca/demo2_user1.key.nopass.pem.pub'
with open(pubfile, 'wb') as fp:
# convert the public key of the certificate to ssh public key format
fp.write(cert.public_key().public_bytes(
encoding=serialization.Encoding.OpenSSH,
format=serialization.PublicFormat.OpenSSH
))
perms(pubfile)
subprocess.call(["ssh-add", keyfile])
# reconnect the agent so the key is visible to paramiko
agent = SSHAgentHSM(cert)
datau = open('smime-unsigned.txt', 'rb').read()
msg, env, sig = compose(
From='root+from@localhost',
To='root+to@localhost',
Subject='this is the subject',
Body=datau,
Attachment='pdf-acrobat.pdf',
signer=lambda data: sign(data, None, cert, othercerts, 'sha256', agent)
)
datas = msg.as_bytes(unixfrom=True)
open('smime-signed-hsm.txt', 'wb').write(datas)
# we added, so we remove the key from ssh-agent
if keyfile:
subprocess.call(['ssh-add', '-d', keyfile])
main()
|
7390683472dc6541e6e361551e1240edf807bbe3
|
83963c19fd120dcc7498b726cc56de7fbb900a47
|
/osxphotos/cli/info.py
|
84012d07c6e515eeb5838db2a76980a6f6cc00d4
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
RhetTbull/osxphotos
|
55ad4f1257bcd26bb3fbadde6ce5dd59c0917354
|
2cb5a4d18a27be6ccf68f5f35abd39418d238016
|
refs/heads/main
| 2023-09-02T18:11:06.227191
| 2023-09-02T16:06:51
| 2023-09-02T16:06:51
| 192,160,985
| 1,287
| 93
|
MIT
| 2023-09-14T14:10:58
| 2019-06-16T07:07:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,292
|
py
|
info.py
|
"""info command for osxphotos CLI"""
import json
import click
import yaml
import osxphotos
from osxphotos._constants import _PHOTOS_4_VERSION
from .cli_params import DB_ARGUMENT, DB_OPTION, JSON_OPTION
from .common import get_photos_db
from .list import _list_libraries
@click.command()
@DB_OPTION
@JSON_OPTION
@DB_ARGUMENT
@click.pass_obj
@click.pass_context
def info(ctx, cli_obj, db, json_, photos_library):
"""Print out descriptive info of the Photos library database."""
db = get_photos_db(*photos_library, db, cli_obj.db)
if db is None:
click.echo(ctx.obj.group.commands["info"].get_help(ctx), err=True)
click.echo("\n\nLocated the following Photos library databases: ", err=True)
_list_libraries()
return
photosdb = osxphotos.PhotosDB(dbfile=db)
info = {"database_path": photosdb.db_path, "database_version": photosdb.db_version}
photos = photosdb.photos(movies=False)
not_shared_photos = [p for p in photos if not p.shared]
info["photo_count"] = len(not_shared_photos)
hidden = [p for p in photos if p.hidden]
info["hidden_photo_count"] = len(hidden)
movies = photosdb.photos(images=False, movies=True)
not_shared_movies = [p for p in movies if not p.shared]
info["movie_count"] = len(not_shared_movies)
if photosdb.db_version > _PHOTOS_4_VERSION:
shared_photos = [p for p in photos if p.shared]
info["shared_photo_count"] = len(shared_photos)
shared_movies = [p for p in movies if p.shared]
info["shared_movie_count"] = len(shared_movies)
keywords = photosdb.keywords_as_dict
info["keywords_count"] = len(keywords)
info["keywords"] = keywords
albums = photosdb.albums_as_dict
info["albums_count"] = len(albums)
info["albums"] = albums
if photosdb.db_version > _PHOTOS_4_VERSION:
albums_shared = photosdb.albums_shared_as_dict
info["shared_albums_count"] = len(albums_shared)
info["shared_albums"] = albums_shared
persons = photosdb.persons_as_dict
info["persons_count"] = len(persons)
info["persons"] = persons
if cli_obj.json or json_:
click.echo(json.dumps(info, ensure_ascii=False))
else:
click.echo(yaml.dump(info, sort_keys=False, allow_unicode=True))
|
239fb3bee3b32daa93c423eb45f43bac460d1887
|
45ba55b4fbdaf1657fde92beaeba4f173265afcd
|
/tests/tools/test_merge_types.py
|
76315752177494d93092fea9b58ec0fd5605feff
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
strawberry-graphql/strawberry
|
af96afd4edd1788c59e150597a12501fbc7bf444
|
6d86d1c08c1244e00535840d9d87925431bc6a1c
|
refs/heads/main
| 2023-08-30T03:34:12.929874
| 2023-08-24T12:01:09
| 2023-08-24T12:01:09
| 162,690,887
| 3,408
| 529
|
MIT
| 2023-09-14T21:49:44
| 2018-12-21T08:56:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,330
|
py
|
test_merge_types.py
|
from textwrap import dedent
import pytest
import strawberry
from strawberry.tools import merge_types
@strawberry.type
class Person:
@strawberry.field
def name(self) -> str:
return "Eve"
@strawberry.field
def age(self) -> int:
return 42
@strawberry.type
class SimpleGreeter:
@strawberry.field
def hi(self) -> str:
return "Hi"
@strawberry.type
class ComplexGreeter:
@strawberry.field
def hi(self, name: str = "world") -> str:
return f"Hello, {name}!"
@strawberry.field
def bye(self, name: str = "world") -> str:
return f"Bye, {name}!"
def test_custom_name():
"""The resulting type should have a custom name is one is specified"""
custom_name = "SuperQuery"
ComboQuery = merge_types(custom_name, (ComplexGreeter, Person))
assert ComboQuery.__name__ == custom_name
def test_inheritance():
"""It should merge multiple types following the regular inheritance rules"""
ComboQuery = merge_types("SuperType", (ComplexGreeter, Person))
definition = ComboQuery.__strawberry_definition__
assert len(definition.fields) == 4
actuals = [(field.python_name, field.type) for field in definition.fields]
expected = [("hi", str), ("bye", str), ("name", str), ("age", int)]
assert actuals == expected
def test_empty_list():
"""It should raise when the `types` argument is empty"""
with pytest.raises(ValueError):
merge_types("EmptyType", ())
def test_schema():
"""It should create a valid, usable schema based on a merged query"""
ComboQuery = merge_types("SuperSchema", (ComplexGreeter, Person))
schema = strawberry.Schema(query=ComboQuery)
sdl = """
schema {
query: SuperSchema
}
type SuperSchema {
hi(name: String! = "world"): String!
bye(name: String! = "world"): String!
name: String!
age: Int!
}
"""
assert dedent(sdl).strip() == str(schema)
result = schema.execute_sync("query { hi }")
assert not result.errors
assert result.data == {"hi": "Hello, world!"}
def test_fields_override():
"""It should warn when merging results in overriding fields"""
with pytest.warns(Warning):
merge_types("FieldsOverride", (ComplexGreeter, SimpleGreeter))
|
92bfb0e56773f453264b4f1ce7ee9ea9cce02950
|
fcc3fcd8da44b7d6bd46098df9693d6fb01cef73
|
/jans-linux-setup/jans_setup/setup_app/installers/eleven.py
|
1dd263a93374b8f6fb34b995fc451bb50c64dd96
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JanssenProject/jans
|
cb633472825787b68ecfba7db97b5b7e5c87e7a5
|
66c4ef766a62788437cce88974357a9a2b20de21
|
refs/heads/main
| 2023-09-01T07:04:48.645163
| 2023-08-31T10:57:05
| 2023-08-31T10:57:05
| 309,721,058
| 400
| 68
|
Apache-2.0
| 2023-09-14T17:42:33
| 2020-11-03T15:00:37
|
Java
|
UTF-8
|
Python
| false
| false
| 2,673
|
py
|
eleven.py
|
import os
import glob
import shutil
from setup_app import paths
from setup_app.utils import base
from setup_app.utils.package_utils import packageUtils
from setup_app.static import AppType, InstallOption
from setup_app.config import Config
from setup_app.installers.jetty import JettyInstaller
class ElevenInstaller(JettyInstaller):
source_files = [
(os.path.join(Config.dist_jans_dir, 'jans-eleven.war'), os.path.join(base.current_app.app_info['JANS_MAVEN'], 'maven/io/jans/jans-eleven-server/{0}/jans-eleven-server-{0}.war').format(base.current_app.app_info['ox_version']))
]
def __init__(self):
setattr(base.current_app, self.__class__.__name__, self)
self.service_name = 'jans-eleven'
self.needdb = True
self.app_type = AppType.SERVICE
self.install_type = InstallOption.OPTONAL
self.install_var = 'installEleven'
self.register_progess()
self.output_folder = os.path.join(Config.output_dir, 'jans-eleven')
self.template_folder = os.path.join(Config.templateFolder, 'jans-eleven')
self.eleven_conf_json = os.path.join(self.output_folder, 'jans-eleven.json')
def install(self):
# install softhsm
if base.clone_type == 'rpm':
self.softhsm_path = '/lib64/pkcs11/libsofthsm2.so'
if not os.path.exists(self.softhsm_path):
self.logIt("Installing softhsm", pbar=self.service_name)
if base.os_version == '7':
packageUtils.installPackage('softhsm', remote=True)
elif base.os_version == '8':
packageUtils.installPackage('http://repo.okay.com.mx/centos/8/x86_64/release/softhsm-2.4.0-2.el8.x86_64.rpm', remote=True)
elif base.clone_type == 'deb':
self.logIt("Installing softhsm", pbar=self.service_name)
self.softhsm_path = '/usr/lib/softhsm/libsofthsm2.so'
if not os.path.exists(self.softhsm_path):
packageUtils.installPackage('softhsm2', remote=True)
self.installJettyService(self.jetty_app_configuration[self.service_name], True)
self.logIt("Copying {} into jetty webapps folder...".format(self.source_files[0][0]))
jettyServiceWebapps = os.path.join(self.jetty_base, self.service_name, 'webapps')
self.copyFile(self.source_files[0][0], jettyServiceWebapps)
self.enable()
def render_import_templates(self):
self.renderTemplateInOut(self.eleven_conf_json, self.template_folder, self.output_folder)
self.copyFile(self.eleven_conf_json, Config.configFolder)
def create_folders(self):
pass
|
e5a87e410b74ba113fe8cea0296b43f956588d26
|
15eb68a30bd1bcd8c153ce3c8774e09ef3f4135d
|
/Examples/torch/quantization/qat_range_learning_ddp_training_lightining.py
|
3e909b0ced3cce9c399aa2a23013412d60690bd2
|
[
"BSD-3-Clause"
] |
permissive
|
quic/aimet
|
77a984af68fc3c46d98c707d18a14c95a3efdacf
|
5a406e657082b6a4f6e4bf48f0e46e085cb1e351
|
refs/heads/develop
| 2023-08-21T12:51:10.500286
| 2023-08-18T18:35:39
| 2023-08-18T18:35:39
| 257,688,216
| 1,676
| 339
|
NOASSERTION
| 2023-09-08T06:59:39
| 2020-04-21T18:57:10
|
Python
|
UTF-8
|
Python
| false
| false
| 10,036
|
py
|
qat_range_learning_ddp_training_lightining.py
|
# =============================================================================
#
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
#
# =============================================================================
"""
This script uses PyTorch lightning is an example of how DDP(distributed data parallel) can be used with AIMET for evaluation.
Please run the qat_range_learning_ddp_eval script first to get a saved model whose checkpoint is passed to args in this module
For this example we use a MV2 model and perform QAT with range learning.
The steps are as follows:
1) Pass a quantized model for which compute encodings has been already performed as part of args
2) Use PyTorch lightning DDP for evaluating the model
3) Use PyTorch lightning DDP for Training the model
"""
# pylint: skip-file
import os
import argparse
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import pytorch_lightning as pl
from pytorch_lightning import LightningModule
from torchmetrics import Accuracy
from aimet_torch import quantsim
#=======================define module==========================#
class LitImageNet(LightningModule):
"""
Creates a Lightning Module for ImageNet dataset
"""
def __init__(self, imagenet_dir, batch_size=1024, model_path="na", learning_rate=0.000001, num_classes=1000):
super().__init__()
# Setup hyper-parameters. setup weight-decay and other optimizer parameters here, and pass it down in the configure optimizers function.
self.imagenet_dir = imagenet_dir
self.learning_rate = learning_rate
self.batch_size = batch_size
# ImageNet specific attributes
self.num_classes = num_classes
file_path = model_path
quant_sim = quantsim.load_checkpoint(file_path)
self.model = quant_sim.model
self.accuracy = Accuracy()
def forward(self, x):
"""
Model forward pass
"""
x = self.model(x)
return x
def training_step(self, batch, _):
""" Training step """
#Notice that no optimizer.step, torch.no_grad. model.eval is required
images, labels = batch
logits = self.model(images)
loss = F.cross_entropy(logits, labels)
return loss
def validation_step(self, batch, _):
""" Validation step used b lightning """
images, labels = batch
logits = self.model(images)
loss = F.cross_entropy(logits, labels)
pred = torch.argmax(logits, dim=1)
self.accuracy(pred, labels)
#self.log is pytorch's inbuilt definition.
self.log("val_loss", loss, on_epoch=True, on_step=False, prog_bar=True)
self.log("val_acc", self.accuracy, on_epoch=True, on_step=False, prog_bar=True)
return loss
def validation_epoch_end(self, _):
""" Runs at the end of validation step to print accuracy """
val_accuracy = self.accuracy.compute()
if self.trainer.is_global_zero:
print("\n------------------------------------------------------------")
print("\nVALIDATION ACCURACY ===> ", val_accuracy.cpu().detach().numpy())
print("\n------------------------------------------------------------")
#Resetting accuracy is not mandatory in more latest releases.
self.accuracy.reset()
def test_step(self, batch, batch_idx):
""" Runs validation """
return self.validation_step(batch, batch_idx)
def configure_optimizers(self):
""" Configures optimizer for training """
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
def train_dataloader(self):
""" Loads training data """
train_dataset = datasets.ImageFolder(os.path.join(self.imagenet_dir, 'train'),
transform=transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]))
return DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=4)
def test_dataloader(self):
""" Loads test data """
#test_dataset = datasets.ImageFolder('/nvme/dataset/imagenet/val',
test_dataset = datasets.ImageFolder(os.path.join(self.imagenet_dir, 'val'),
transform=transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]))
test_sampler = DistributedSampler(dataset=test_dataset)
return DataLoader(test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=4, sampler=test_sampler)
def val_dataloader(self):
""" Loads validation data """
#test_dataloader can be reusued here since test and validation dataloader is the same for Imagenet.
test_dataset = datasets.ImageFolder(os.path.join(self.imagenet_dir, 'val'),
transform=transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]))
return DataLoader(test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=4)
#=======================setting up arguments===================
def main():
""" Main function """
# STEP 1
parser = argparse.ArgumentParser(description='PyTorch Lightning DDP')
parser.add_argument('--epochs', default=1, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batch_size', default=128, type=int,
metavar='N')
parser.add_argument('--learning_rate', default=0.000001, type=float,
help='initial learning rate')
parser.add_argument('--num_classes', default=1000, type=int,
help='Number of classes for the network.')
parser.add_argument('--model_path',
help="path to the quantized model's saved checkpoint for QAT", default='na')
parser.add_argument('--imagenet_dir',
help="path to imagenet_dir", required=True)
args = parser.parse_args()
print("TRAINING QUANTIZED MODEL ON DDP ...")
print("================================================"+str(args.batch_size))
print("================================================"+str(torch.cuda.device_count()))
#For full deterministic reproducible behavior set seed_everything and have deterministic flag to True in trainer function
pl.seed_everything(0, workers=True)
#This is the model class initiation. Importing LitImageNet from lightning_torchvision_ddp.py
model = LitImageNet(
imagenet_dir=args.imagenet_dir,
batch_size=args.batch_size,
model_path=args.model_path,
learning_rate=args.learning_rate,
num_classes=args.num_classes,
)
#Define the trainer here.
trainer = pl.Trainer(
deterministic=True,# For full reproducibility. Does not work with DP. Only for DDP.
strategy='DDP',# Sets the DDP strategy for lightning
accelerator='gpu',
devices=-1,# -1 means all available GPUs
max_epochs=args.epochs,
limit_train_batches=10
)
# STEP 2
trainer.test(model)
# STEP 3
trainer.fit(model)
if __name__ == "__main__":
main()
|
8da45e9f12719a0da1544fde1d3d7d6f6b12528f
|
8cb7399499d582efbc900b530cd7075dd82ec0bd
|
/spockbot/mcdata/recipes.py
|
5525b37d5e854b425684cc9cd0b1a4550af9f3c7
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SpockBotMC/SpockBot
|
115334d3ebb6db806e859a9ea20ea476761702b3
|
f89911551f18357720034fbaa52837a0d09f66ea
|
refs/heads/master
| 2021-01-15T15:33:00.003492
| 2016-05-01T14:57:55
| 2016-05-01T14:57:55
| 7,111,791
| 188
| 61
| null | 2016-04-25T00:06:56
| 2012-12-11T13:01:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,639
|
py
|
recipes.py
|
from collections import defaultdict, namedtuple
from minecraft_data.v1_8 import recipes as raw_recipes
RecipeItem = namedtuple('RecipeItem', 'id meta amount')
class Recipe(object):
def __init__(self, raw):
self.result = reformat_item(raw['result'], None)
if 'ingredients' in raw:
self.ingredients = [reformat_item(item, 0)
for item in raw['ingredients']]
self.in_shape = None
self.out_shape = None
else:
self.in_shape = reformat_shape(raw['inShape'])
self.out_shape = reformat_shape(raw['outShape']) \
if 'outShape' in raw else None
self.ingredients = [item for row in self.in_shape for item in row]
@property
def total_ingredient_amounts(self):
"""
Returns:
dict: In the form { (item_id, metadata) -> amount }
"""
totals = defaultdict(int)
for id, meta, amount in self.ingredients:
totals[(id, meta)] += amount
return totals
@property
def ingredient_positions(self):
"""
Returns:
dict: In the form { (item_id, metadata) -> [(x, y, amount), ...] }
"""
positions = defaultdict(list)
for y, row in enumerate(self.in_shape):
for x, (item_id, metadata, amount) in enumerate(row):
positions[(item_id, metadata)].append((x, y, amount))
return positions
def reformat_item(raw, default_meta=None):
if isinstance(raw, dict):
raw = raw.copy() # do not modify arg
if 'metadata' not in raw:
raw['metadata'] = default_meta
if 'count' not in raw:
raw['count'] = 1
return RecipeItem(raw['id'], raw['metadata'], raw['count'])
elif isinstance(raw, list):
return RecipeItem(raw[0], raw[1], 1)
else: # single ID or None
return RecipeItem(raw or None, default_meta, 1)
def reformat_shape(shape):
return [[reformat_item(item, None) for item in row] for row in shape]
def iter_recipes(item_id, meta=None):
item_id = str(item_id)
meta = meta and int(meta)
try:
recipes_for_item = raw_recipes[item_id]
except KeyError:
return # no recipe found, do not yield anything
else:
for raw in recipes_for_item:
recipe = Recipe(raw)
if meta is None or meta == recipe.result.meta:
yield recipe
def get_any_recipe(item, meta=None):
# TODO return small recipes if present
for matching in iter_recipes(item, meta):
return matching
return None
|
77206b6dfa06e01cdb36d756f80d9f6a7d269c1a
|
8a40a3db07eec18178c9b8757aafdb35724ff324
|
/tests/common/test_payload.py
|
06c3289b125f0657ea44adca00b6ae259a144201
|
[
"MIT"
] |
permissive
|
miguelgrinberg/python-engineio
|
52313e7fd2cd740e5a083976231c056d53c9a590
|
35cc5ec0a69b5274697928af4a163e0ca42e1afb
|
refs/heads/main
| 2023-08-18T05:55:37.901376
| 2023-08-15T18:02:04
| 2023-08-15T18:02:04
| 37,830,040
| 236
| 175
|
MIT
| 2023-09-03T15:13:49
| 2015-06-21T23:17:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,454
|
py
|
test_payload.py
|
import unittest
import pytest
from engineio import packet
from engineio import payload
class TestPayload(unittest.TestCase):
def test_encode_empty_payload(self):
p = payload.Payload()
assert p.packets == []
assert p.encode() == ''
def test_decode_empty_payload(self):
p = payload.Payload(encoded_payload='')
assert p.encode() == ''
def test_encode_payload_text(self):
pkt = packet.Packet(packet.MESSAGE, data='abc')
p = payload.Payload([pkt])
assert p.packets == [pkt]
assert p.encode() == '4abc'
def test_encode_payload_text_multiple(self):
pkt = packet.Packet(packet.MESSAGE, data='abc')
pkt2 = packet.Packet(packet.MESSAGE, data='def')
p = payload.Payload([pkt, pkt2])
assert p.packets == [pkt, pkt2]
assert p.encode() == '4abc\x1e4def'
def test_encode_payload_binary(self):
pkt = packet.Packet(packet.MESSAGE, data=b'\x00\x01\x02')
p = payload.Payload([pkt])
assert p.packets == [pkt]
assert p.encode() == 'bAAEC'
def test_encode_payload_binary_multiple(self):
pkt = packet.Packet(packet.MESSAGE, data=b'\x00\x01\x02')
pkt2 = packet.Packet(packet.MESSAGE, data=b'\x03\x04\x05\x06')
p = payload.Payload([pkt, pkt2])
assert p.packets == [pkt, pkt2]
assert p.encode() == 'bAAEC\x1ebAwQFBg=='
def test_encode_payload_text_binary_multiple(self):
pkt = packet.Packet(packet.MESSAGE, data='abc')
pkt2 = packet.Packet(packet.MESSAGE, data=b'\x03\x04\x05\x06')
p = payload.Payload([pkt, pkt2, pkt2, pkt])
assert p.packets == [pkt, pkt2, pkt2, pkt]
assert p.encode() == '4abc\x1ebAwQFBg==\x1ebAwQFBg==\x1e4abc'
def test_encode_jsonp_payload(self):
pkt = packet.Packet(packet.MESSAGE, data='abc')
p = payload.Payload([pkt])
assert p.packets == [pkt]
assert p.encode(jsonp_index=233) == '___eio[233]("4abc");'
def test_decode_jsonp_payload(self):
p = payload.Payload(encoded_payload='d=4abc')
assert p.encode() == '4abc'
def test_decode_invalid_payload(self):
with pytest.raises(ValueError):
payload.Payload(encoded_payload='bad payload')
def test_decode_multi_payload_with_too_many_packets(self):
with pytest.raises(ValueError):
payload.Payload(encoded_payload='4abc\x1e4def\x1e' * 9 + '6')
|
5a5bbc8083d052e249073c819383126a7d1e0a29
|
7ff3ede9c16952a3874bf941f92a04fdbfe5a089
|
/flamby/strategies/utils.py
|
4f552cd8604e3244f94d1c96a3e200a640f9b799
|
[
"MIT"
] |
permissive
|
owkin/FLamby
|
2be93062d4b4f3db4552d2b62bb491cc97f616a3
|
6d403afca752d21dd25b509cfd9e698a2e7f9cc5
|
refs/heads/main
| 2023-07-19T18:15:45.830543
| 2023-07-17T15:39:25
| 2023-07-17T15:39:25
| 463,146,427
| 156
| 18
|
MIT
| 2023-07-21T13:08:50
| 2022-02-24T12:49:40
|
Python
|
UTF-8
|
Python
| false
| false
| 17,629
|
py
|
utils.py
|
import copy
import os
import time
from datetime import datetime
import numpy as np
import torch
from opacus import PrivacyEngine
from torch.utils.tensorboard import SummaryWriter
class DataLoaderWithMemory:
"""This class allows to iterate the dataloader infinitely batch by batch.
When there are no more batches the iterator is reset silently.
This class allows to keep the memory of the state of the iterator hence its
name.
"""
def __init__(self, dataloader):
"""This initialization takes a dataloader and creates an iterator object
from it.
Parameters
----------
dataloader : torch.utils.data.dataloader
A dataloader object built from one of the datasets of this repository.
"""
self._dataloader = dataloader
self._iterator = iter(self._dataloader)
def _reset_iterator(self):
self._iterator = iter(self._dataloader)
def __len__(self):
return len(self._dataloader.dataset)
def get_samples(self):
"""This method generates the next batch from the iterator or resets it
if needed. It can be called an infinite amount of times.
Returns
-------
tuple
a batch from the iterator
"""
try:
X, y = next(self._iterator)
except StopIteration:
self._reset_iterator()
X, y = next(self._iterator)
return X, y
class _Model:
"""This is a helper class allowing to train a copy of a given model for
num_updates steps by instantiating the user-provided optimizer.
This class posesses method to retrieve current parameters set in np.ndarrays
and to update the weights with a numpy list of the same size as the
parameters of the model.
"""
def __init__(
self,
model,
train_dl,
optimizer_class,
lr,
loss,
nrounds,
client_id=0,
dp_target_epsilon=None,
dp_target_delta=None,
dp_max_grad_norm=None,
log=False,
log_period=100,
log_basename="local_model",
logdir="./runs",
seed=None,
):
"""_summary_
Parameters
----------
model : torch.nn.Module
_description_
train_dl : torch.utils.data.DataLoader
_description_
optimizer_class : torch.optim
A torch optimizer class that will be instantiated by calling:
optimizer_class(self.model.parameters(), lr)
lr : float
The learning rate to use with th optimizer class.
loss : torch.nn.modules.loss._loss
an instantiated torch loss.
nrounds: int
The number of communication rounds to do.
log: bool
Whether or not to log quantities with tensorboard. Defaults to False.
client_id: int
The id of the client for logging purposes. Default to 0.
dp_target_epsilon: float
The target epsilon for (epsilon, delta)-differential
private guarantee. Defaults to None.
dp_target_delta: float
The target delta for (epsilon, delta)-differential
private guarantee. Defaults to None.
dp_max_grad_norm: float
The maximum L2 norm of per-sample gradients;
used to enforce differential privacy. Defaults to None.
log_period: int
The period at which to log quantities. Defaults to 100.
log_basename: str
The basename of the created log file if log=True. Defaults to fed_avg.
logdir: str
Where to create the log file. Defaults to ./runs.
seed: int
Seed provided to torch.Generator. Defaults to None.
"""
self.model = copy.deepcopy(model)
self._train_dl = train_dl
self._optimizer = optimizer_class(self.model.parameters(), lr)
self._loss = copy.deepcopy(loss)
self._device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model = self.model.to(self._device)
self.num_batches_seen = 0
self.log = log
self.log_period = log_period
self.client_id = client_id
self.dp_target_epsilon = dp_target_epsilon
self.dp_target_delta = dp_target_delta
self.dp_max_grad_norm = dp_max_grad_norm
if self.log:
os.makedirs(logdir, exist_ok=True)
date_now = str(datetime.now())
self.writer = SummaryWriter(
log_dir=os.path.join(logdir, f"{log_basename}-{date_now}")
)
self._apply_dp = (
(self.dp_target_epsilon is not None)
and (self.dp_max_grad_norm is not None)
and (self.dp_target_delta is not None)
)
if self._apply_dp:
seed = seed if seed is not None else int(time.time())
privacy_engine = PrivacyEngine()
(
self.model,
self._optimizer,
self._train_dl,
) = privacy_engine.make_private_with_epsilon(
module=self.model,
optimizer=self._optimizer,
data_loader=self._train_dl,
epochs=nrounds,
target_epsilon=dp_target_epsilon,
target_delta=dp_target_delta,
max_grad_norm=dp_max_grad_norm,
noise_generator=torch.Generator(self._device).manual_seed(seed),
)
self.current_epoch = 0
self.batch_size = None
self.num_batches_per_epoch = None
def _local_train(self, dataloader_with_memory, num_updates):
"""This method trains the model using the dataloader_with_memory given
for num_updates steps.
Parameters
----------
dataloader_with_memory : DataLoaderWithMemory
A dataloader that can be called infinitely using its get_samples()
method.
num_updates : int
The number of batches to train on.
"""
# Local train
_size = len(dataloader_with_memory)
self.model = self.model.train()
for _batch in range(num_updates):
X, y = dataloader_with_memory.get_samples()
X, y = X.to(self._device), y.to(self._device)
if _batch == 0:
# Initialize the batch-size using the first batch to avoid
# edge cases with drop_last=False
_batch_size = X.shape[0]
_num_batches_per_epoch = (_size // _batch_size) + int(
(_size % _batch_size) != 0
)
# Compute prediction and loss
_pred = self.model(X)
_loss = self._loss(_pred, y)
# Backpropagation
_loss.backward()
self._optimizer.step()
self._optimizer.zero_grad()
self.num_batches_seen += 1
_loss, _current_epoch = (
_loss.item(),
self.num_batches_seen // _num_batches_per_epoch,
)
if self.log:
if _batch % self.log_period == 0:
print(
f"loss: {_loss:>7f} after {self.num_batches_seen:>5d}"
f" batches of data amounting to {_current_epoch:>5d}"
" epochs."
)
self.writer.add_scalar(
f"client{self.client_id}/train/Loss",
_loss,
self.num_batches_seen,
)
if _current_epoch > self.current_epoch:
# At each epoch we look at the histograms of all the
# network's parameters
for name, p in self.model.named_parameters():
self.writer.add_histogram(
f"client{self.client_id}/{name}", p, _current_epoch
)
self.current_epoch = _current_epoch
def _prox_local_train(self, dataloader_with_memory, num_updates, mu):
"""This method trains the model using the dataloader_with_memory given
for num_updates steps.
Parameters
----------
dataloader_with_memory : dataloaderwithmemory
A dataloader that can be called infinitely using its get_samples()
method.
num_updates : int
The number of batches to train on.
mu: float
The mu parameter involved in the proximal term.
"""
# Model used for FedProx for regularization at every optimization round
model_initial = copy.deepcopy(self.model)
# Local train
_size = len(dataloader_with_memory)
self.model = self.model.train()
for idx, _batch in enumerate(range(num_updates)):
X, y = dataloader_with_memory.get_samples()
X, y = X.to(self._device), y.to(self._device)
if idx == 0:
# Initialize the batch-size using the first batch to avoid
# edge cases with drop_last=False
_batch_size = X.shape[0]
_num_batches_per_epoch = (_size // _batch_size) + int(
(_size % _batch_size) != 0
)
# Compute prediction and loss
_pred = self.model(X)
_prox_loss = self._loss(_pred, y)
# We preserve the true loss before adding the proximal term
# and doing the backward step on the sum.
_loss = _prox_loss.detach()
if mu > 0.0:
squared_norm = compute_model_diff_squared_norm(model_initial, self.model)
_prox_loss += mu / 2 * squared_norm
# Backpropagation
_prox_loss.backward()
self._optimizer.step()
self._optimizer.zero_grad()
self.num_batches_seen += 1
_loss, _current_epoch = (
_loss.item(),
self.num_batches_seen // _num_batches_per_epoch,
)
if self.log:
if _batch % self.log_period == 0:
if _current_epoch > self.current_epoch:
# At each epoch we look at the histograms of all the
# network's parameters
for name, p in self.model.named_parameters():
self.writer.add_histogram(
f"client{self.client_id}/{name}", p, _current_epoch
)
print(
f"loss: {_loss:>7f} after {self.num_batches_seen:>5d}"
f" batches of data amounting to {_current_epoch:>5d}"
" epochs."
)
self.writer.add_scalar(
f"client{self.client_id}/train/Loss",
_loss,
self.num_batches_seen,
)
self.current_epoch = _current_epoch
def _local_train_with_correction(
self, dataloader_with_memory, num_updates, correction_state
):
"""This method trains the model using the dataloader_with_memory given
for num_updates steps while applying a correction during every update.
Parameters
----------
dataloader_with_memory : dataloaderwithmemory
A dataloader that can be called infinitely using its get_samples()
method.
num_updates : int
The number of batches to train on.
correction_state: List
Correction to be applied to the model state during every local update.
"""
# Local train
_size = len(dataloader_with_memory)
self.model = self.model.train()
for idx, _batch in enumerate(range(num_updates)):
X, y = dataloader_with_memory.get_samples()
X, y = X.to(self._device), y.to(self._device)
if idx == 0:
# Initialize the batch-size using the first batch to avoid
# edge cases with drop_last=False
_batch_size = X.shape[0]
_num_batches_per_epoch = (_size // _batch_size) + int(
(_size % _batch_size) != 0
)
# We will implement correction by modifying loss as
# corrected_loss = loss - correction @ model_params.
# Then, we have corrected gradient = gradient - correction.
# Compute prediction and loss
_pred = self.model(X)
_corrected_loss = self._loss(_pred, y)
# We preserve the true loss before adding the correction term
# and doing the backward step on the sum.
_loss = _corrected_loss.detach()
_corrected_loss -= compute_dot_product(self.model, correction_state)
# Backpropagation
_corrected_loss.backward()
self._optimizer.step()
self._optimizer.zero_grad()
self.num_batches_seen += 1
_loss, _current_epoch = (
_loss.item(),
self.num_batches_seen // _num_batches_per_epoch,
)
if self.log:
if _batch % self.log_period == 0:
if _current_epoch > self.current_epoch:
# At each epoch we look at the histograms of all the
# network's parameters
for name, p in self.model.named_parameters():
self.writer.add_histogram(
f"client{self.client_id}/{name}", p, _current_epoch
)
print(
f"loss: {_loss:>7f} after {self.num_batches_seen:>5d}"
f" batches of data amounting to {_current_epoch:>5d}"
" epochs."
)
self.writer.add_scalar(
f"client{self.client_id}/train/Loss",
_loss,
self.num_batches_seen,
)
self.current_epoch = _current_epoch
@torch.no_grad()
def _get_current_params(self):
"""Returns the current weights of the pytorch model.
Returns
-------
list[np.ndarray]
A list of numpy versions of the weights.
"""
return [
param.cpu().detach().clone().numpy() for param in self.model.parameters()
]
@torch.no_grad()
def _update_params(self, new_params):
"""Update in place the weights of the pytorch model by adding the
new_params list of the same size to it.
"""
# update all the parameters
for old_param, new_param in zip(self.model.parameters(), new_params):
old_param.data += torch.from_numpy(new_param).to(old_param.device)
def compute_model_diff_squared_norm(model1: torch.nn.Module, model2: torch.nn.Module):
"""Compute the squared norm of the difference between two models.
Parameters
----------
model1 : torch.nn.Module
model2 : torch.nn.Module
"""
tensor1 = list(model1.parameters())
tensor2 = list(model2.parameters())
norm = sum([torch.sum((tensor1[i] - tensor2[i]) ** 2) for i in range(len(tensor1))])
return norm
def compute_dot_product(model: torch.nn.Module, params):
"""Compute the dot prodcut between model and input parameters.
Parameters
----------
model : torch.nn.Module
params : List containing model parameters
"""
model_p = list(model.parameters())
device = model_p[0].device
dot_prod = sum([torch.sum(m * p.to(device)) for m, p in zip(model_p, params)])
return dot_prod
def check_exchange_compliance(tensors_list, max_bytes, units="bytes"):
"""
Check that for each round the quantities exchanged are below the dataset
specific limit.
Parameters
----------
tensors_list: List[Union[torch.Tensor, np.ndarray]]
The list of quantities sent by the client.
max_bytes: int
The number of bytes max to exchange per round per client.
units: str
The units in which to return the result. Default to bytes.$
Returns
-------
int
Returns the number of bits exchanged in the provided unit or raises an
error if it went above the limit.
"""
assert units in ["bytes", "bits", "megabytes", "gigabytes"]
assert isinstance(tensors_list, list), "You should provide a list of tensors."
assert all(
[
(isinstance(t, np.ndarray) or isinstance(t, torch.Tensor))
for t in tensors_list
]
)
bytes_count = 0
for t in tensors_list:
if isinstance(t, np.ndarray):
bytes_count += t.nbytes
else:
bytes_count += t.shape.numel() * torch.finfo(t.dtype).bits // 8
if bytes_count > max_bytes:
raise ValueError(
f"You cannot send more than {max_bytes} bytes, this "
f"round. You tried sending more than {bytes_count} bytes already"
)
if units == "bytes":
res = bytes_count
elif units == "bits":
res = bytes_count * 8
elif units == "megabytes":
res = 1e-6 * bytes_count
elif units == "gigabytes":
res = 1e-9 * bytes_count
else:
raise NotImplementedError(f"{units} is not a possible unit")
return res
|
2b09b17248bbcec59d7490a26fd7d585d54e8c54
|
49b861ac5ca3adfe8861a10839b18d9448eb8020
|
/python/blur.py
|
1369c47d5075a0f7296e8a55a5b8cb7910dac3f9
|
[
"BSD-2-Clause"
] |
permissive
|
symisc/pixlab
|
deee8dcd8b4816c135ad666340e4023dbcefbb66
|
b4ffb1d6b8ff2204ac0db51842ada921478ed66c
|
refs/heads/master
| 2023-08-04T19:39:44.813119
| 2023-07-31T03:51:16
| 2023-07-31T03:51:16
| 85,357,712
| 107
| 34
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
blur.py
|
import requests
import json
# Blur the bird picture
# https://pixlab.io/#/cmd?id=blur for more info.
req = requests.get('https://api.pixlab.io/blur',params={
'img':'https://www.allaboutbirds.org/guide/PHOTO/LARGE/blue_jay_8.jpg',
'radius':50,
'sigma':30,
'key':'My_PixLab_Key'
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the Blurred picture: "+ reply['link'])
|
dfea669c4519269a2654b492fe8a992552b69e3a
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/scipy/py2/scipy/optimize/tests/test__basinhopping.py
|
84deeb847253a4b53ed032c0aaecd9b4a43171a0
|
[
"Python-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Qhull",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 15,398
|
py
|
test__basinhopping.py
|
"""
Unit tests for the basin hopping global minimization algorithm.
"""
from __future__ import division, print_function, absolute_import
import copy
from numpy.testing import assert_almost_equal, assert_equal, assert_
from pytest import raises as assert_raises
import numpy as np
from numpy import cos, sin
from scipy.optimize import basinhopping, OptimizeResult
from scipy.optimize._basinhopping import (
Storage, RandomDisplacement, Metropolis, AdaptiveStepsize)
def func1d(x):
f = cos(14.5 * x - 0.3) + (x + 0.2) * x
df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2)
return f, df
def func2d_nograd(x):
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
return f
def func2d(x):
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
df = np.zeros(2)
df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
df[1] = 2. * x[1] + 0.2
return f, df
def func2d_easyderiv(x):
f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0]
df = np.zeros(2)
df[0] = 4.0*x[0] + 2.0*x[1] - 6.0
df[1] = 2.0*x[0] + 4.0*x[1]
return f, df
class MyTakeStep1(RandomDisplacement):
"""use a copy of displace, but have it set a special parameter to
make sure it's actually being used."""
def __init__(self):
self.been_called = False
super(MyTakeStep1, self).__init__()
def __call__(self, x):
self.been_called = True
return super(MyTakeStep1, self).__call__(x)
def myTakeStep2(x):
"""redo RandomDisplacement in function form without the attribute stepsize
to make sure everything still works ok
"""
s = 0.5
x += np.random.uniform(-s, s, np.shape(x))
return x
class MyAcceptTest(object):
"""pass a custom accept test
This does nothing but make sure it's being used and ensure all the
possible return values are accepted
"""
def __init__(self):
self.been_called = False
self.ncalls = 0
self.testres = [False, 'force accept', True, np.bool_(True),
np.bool_(False), [], {}, 0, 1]
def __call__(self, **kwargs):
self.been_called = True
self.ncalls += 1
if self.ncalls - 1 < len(self.testres):
return self.testres[self.ncalls - 1]
else:
return True
class MyCallBack(object):
"""pass a custom callback function
This makes sure it's being used. It also returns True after 10
steps to ensure that it's stopping early.
"""
def __init__(self):
self.been_called = False
self.ncalls = 0
def __call__(self, x, f, accepted):
self.been_called = True
self.ncalls += 1
if self.ncalls == 10:
return True
class TestBasinHopping(object):
def setup_method(self):
""" Tests setup.
Run tests based on the 1-D and 2-D functions described above.
"""
self.x0 = (1.0, [1.0, 1.0])
self.sol = (-0.195, np.array([-0.195, -0.1]))
self.tol = 3 # number of decimal places
self.niter = 100
self.disp = False
# fix random seed
np.random.seed(1234)
self.kwargs = {"method": "L-BFGS-B", "jac": True}
self.kwargs_nograd = {"method": "L-BFGS-B"}
def test_TypeError(self):
# test the TypeErrors are raised on bad input
i = 1
# if take_step is passed, it must be callable
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
take_step=1)
# if accept_test is passed, it must be callable
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
accept_test=1)
def test_1d_grad(self):
# test 1d minimizations with gradient
i = 0
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_2d(self):
# test 2d minimizations with gradient
i = 1
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
assert_(res.nfev > 0)
def test_njev(self):
# test njev is returned correctly
i = 1
minimizer_kwargs = self.kwargs.copy()
# L-BFGS-B doesn't use njev, but BFGS does
minimizer_kwargs["method"] = "BFGS"
res = basinhopping(func2d, self.x0[i],
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
disp=self.disp)
assert_(res.nfev > 0)
assert_equal(res.nfev, res.njev)
def test_jac(self):
# test jacobian returned
minimizer_kwargs = self.kwargs.copy()
# BFGS returns a Jacobian
minimizer_kwargs["method"] = "BFGS"
res = basinhopping(func2d_easyderiv, [0.0, 0.0],
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
disp=self.disp)
assert_(hasattr(res.lowest_optimization_result, "jac"))
# in this case, the jacobian is just [df/dx, df/dy]
_, jacobian = func2d_easyderiv(res.x)
assert_almost_equal(res.lowest_optimization_result.jac, jacobian,
self.tol)
def test_2d_nograd(self):
# test 2d minimizations without gradient
i = 1
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=self.kwargs_nograd,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_all_minimizers(self):
# test 2d minimizations with gradient. Nelder-Mead, Powell and COBYLA
# don't accept jac=True, so aren't included here.
i = 1
methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP']
minimizer_kwargs = copy.copy(self.kwargs)
for method in methods:
minimizer_kwargs["method"] = method
res = basinhopping(func2d, self.x0[i],
minimizer_kwargs=minimizer_kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_all_nograd_minimizers(self):
# test 2d minimizations without gradient. Newton-CG requires jac=True,
# so not included here.
i = 1
methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP',
'Nelder-Mead', 'Powell', 'COBYLA']
minimizer_kwargs = copy.copy(self.kwargs_nograd)
for method in methods:
minimizer_kwargs["method"] = method
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=minimizer_kwargs,
niter=self.niter, disp=self.disp)
tol = self.tol
if method == 'COBYLA':
tol = 2
assert_almost_equal(res.x, self.sol[i], decimal=tol)
def test_pass_takestep(self):
# test that passing a custom takestep works
# also test that the stepsize is being adjusted
takestep = MyTakeStep1()
initial_step_size = takestep.stepsize
i = 1
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp,
take_step=takestep)
assert_almost_equal(res.x, self.sol[i], self.tol)
assert_(takestep.been_called)
# make sure that the built in adaptive step size has been used
assert_(initial_step_size != takestep.stepsize)
def test_pass_simple_takestep(self):
# test that passing a custom takestep without attribute stepsize
takestep = myTakeStep2
i = 1
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=self.kwargs_nograd,
niter=self.niter, disp=self.disp,
take_step=takestep)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_pass_accept_test(self):
# test passing a custom accept test
# makes sure it's being used and ensures all the possible return values
# are accepted.
accept_test = MyAcceptTest()
i = 1
# there's no point in running it more than a few steps.
basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=10, disp=self.disp, accept_test=accept_test)
assert_(accept_test.been_called)
def test_pass_callback(self):
# test passing a custom callback function
# This makes sure it's being used. It also returns True after 10 steps
# to ensure that it's stopping early.
callback = MyCallBack()
i = 1
# there's no point in running it more than a few steps.
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=30, disp=self.disp, callback=callback)
assert_(callback.been_called)
assert_("callback" in res.message[0])
assert_equal(res.nit, 10)
def test_minimizer_fail(self):
# test if a minimizer fails
i = 1
self.kwargs["options"] = dict(maxiter=0)
self.niter = 10
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
# the number of failed minimizations should be the number of
# iterations + 1
assert_equal(res.nit + 1, res.minimization_failures)
def test_niter_zero(self):
# gh5915, what happens if you call basinhopping with niter=0
i = 0
basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=0, disp=self.disp)
def test_seed_reproducibility(self):
# seed should ensure reproducibility between runs
minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
f_1 = []
def callback(x, f, accepted):
f_1.append(f)
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
niter=10, callback=callback, seed=10)
f_2 = []
def callback2(x, f, accepted):
f_2.append(f)
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
niter=10, callback=callback2, seed=10)
assert_equal(np.array(f_1), np.array(f_2))
def test_monotonic_basin_hopping(self):
# test 1d minimizations with gradient and T=0
i = 0
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp, T=0)
assert_almost_equal(res.x, self.sol[i], self.tol)
class Test_Storage(object):
def setup_method(self):
self.x0 = np.array(1)
self.f0 = 0
minres = OptimizeResult()
minres.x = self.x0
minres.fun = self.f0
self.storage = Storage(minres)
def test_higher_f_rejected(self):
new_minres = OptimizeResult()
new_minres.x = self.x0 + 1
new_minres.fun = self.f0 + 1
ret = self.storage.update(new_minres)
minres = self.storage.get_lowest()
assert_equal(self.x0, minres.x)
assert_equal(self.f0, minres.fun)
assert_(not ret)
def test_lower_f_accepted(self):
new_minres = OptimizeResult()
new_minres.x = self.x0 + 1
new_minres.fun = self.f0 - 1
ret = self.storage.update(new_minres)
minres = self.storage.get_lowest()
assert_(self.x0 != minres.x)
assert_(self.f0 != minres.fun)
assert_(ret)
class Test_RandomDisplacement(object):
def setup_method(self):
self.stepsize = 1.0
self.displace = RandomDisplacement(stepsize=self.stepsize)
self.N = 300000
self.x0 = np.zeros([self.N])
def test_random(self):
# the mean should be 0
# the variance should be (2*stepsize)**2 / 12
# note these tests are random, they will fail from time to time
x = self.displace(self.x0)
v = (2. * self.stepsize) ** 2 / 12
assert_almost_equal(np.mean(x), 0., 1)
assert_almost_equal(np.var(x), v, 1)
class Test_Metropolis(object):
def setup_method(self):
self.T = 2.
self.met = Metropolis(self.T)
def test_boolean_return(self):
# the return must be a bool. else an error will be raised in
# basinhopping
ret = self.met(f_new=0., f_old=1.)
assert isinstance(ret, bool)
def test_lower_f_accepted(self):
assert_(self.met(f_new=0., f_old=1.))
def test_KeyError(self):
# should raise KeyError if kwargs f_old or f_new is not passed
assert_raises(KeyError, self.met, f_old=1.)
assert_raises(KeyError, self.met, f_new=1.)
def test_accept(self):
# test that steps are randomly accepted for f_new > f_old
one_accept = False
one_reject = False
for i in range(1000):
if one_accept and one_reject:
break
ret = self.met(f_new=1., f_old=0.5)
if ret:
one_accept = True
else:
one_reject = True
assert_(one_accept)
assert_(one_reject)
def test_GH7495(self):
# an overflow in exp was producing a RuntimeWarning
# create own object here in case someone changes self.T
met = Metropolis(2)
with np.errstate(over='raise'):
met.accept_reject(0, 2000)
class Test_AdaptiveStepsize(object):
def setup_method(self):
self.stepsize = 1.
self.ts = RandomDisplacement(stepsize=self.stepsize)
self.target_accept_rate = 0.5
self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False,
accept_rate=self.target_accept_rate)
def test_adaptive_increase(self):
# if few steps are rejected, the stepsize should increase
x = 0.
self.takestep(x)
self.takestep.report(False)
for i in range(self.takestep.interval):
self.takestep(x)
self.takestep.report(True)
assert_(self.ts.stepsize > self.stepsize)
def test_adaptive_decrease(self):
# if few steps are rejected, the stepsize should increase
x = 0.
self.takestep(x)
self.takestep.report(True)
for i in range(self.takestep.interval):
self.takestep(x)
self.takestep.report(False)
assert_(self.ts.stepsize < self.stepsize)
def test_all_accepted(self):
# test that everything works OK if all steps were accepted
x = 0.
for i in range(self.takestep.interval + 1):
self.takestep(x)
self.takestep.report(True)
assert_(self.ts.stepsize > self.stepsize)
def test_all_rejected(self):
# test that everything works OK if all steps were rejected
x = 0.
for i in range(self.takestep.interval + 1):
self.takestep(x)
self.takestep.report(False)
assert_(self.ts.stepsize < self.stepsize)
|
da3e79c306cbf178f8d704d99e3a83120d52f3d6
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/prb_control/entities/epic_battle_training/requester.py
|
e175785666bc58a391f7c40e78ed909aca29047e
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,147
|
py
|
requester.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/prb_control/entities/epic_battle_training/requester.py
import BigWorld
from PlayerEvents import g_playerEvents
from constants import PREBATTLE_TYPE, PREBATTLE_CACHE_KEY
from debug_utils import LOG_ERROR
from gui.prb_control.entities.base.requester import IPrbListRequester
from gui.prb_control.items import prb_seqs
class EpicBattleTrainingListRequester(IPrbListRequester):
UPDATE_LIST_TIMEOUT = 5
def __init__(self):
super(EpicBattleTrainingListRequester, self).__init__()
self.__callbackID = None
self.__callback = None
return
def start(self, callback):
if self.__callbackID is not None:
LOG_ERROR('EpicBattleListRequester already started')
return
else:
if callback is not None and callable(callback):
g_playerEvents.onPrebattlesListReceived += self.__pe_onPrebattlesListReceived
self.__callback = callback
self.__request()
else:
LOG_ERROR('Callback is None or is not callable')
return
return
def stop(self):
g_playerEvents.onPrebattlesListReceived -= self.__pe_onPrebattlesListReceived
self.__callback = None
if self.__callbackID is not None:
BigWorld.cancelCallback(self.__callbackID)
self.__callbackID = None
return
def request(self, ctx=None):
self.__request()
def __request(self):
self.__callbackID = None
if hasattr(BigWorld.player(), 'requestPrebattles'):
BigWorld.player().requestPrebattles(PREBATTLE_TYPE.EPIC_TRAINING, PREBATTLE_CACHE_KEY.CREATE_TIME, False, 0, 50)
return
def __setTimeout(self):
self.__callbackID = BigWorld.callback(self.UPDATE_LIST_TIMEOUT, self.__request)
def __pe_onPrebattlesListReceived(self, prbType, count, prebattles):
if prbType != PREBATTLE_TYPE.EPIC_TRAINING:
return
self.__callback(prb_seqs.PrbListIterator(prebattles))
self.__setTimeout()
|
b36b771fc1ec53b7dcd30750d0bbeec71f58c927
|
c24b28c0dc4ad8f83845f4c61882f1e04d49b5cd
|
/Dash Components/Daq_Components/daq.py
|
92ddc4567612b37bd027b41631c04a84f113ba77
|
[] |
no_license
|
Coding-with-Adam/Dash-by-Plotly
|
759e927759513d96060a770b1e0b0a66db13f54f
|
9f178f1d52536efd33827758b741acc4039d8d9b
|
refs/heads/master
| 2023-08-31T17:23:02.029281
| 2023-08-08T05:12:50
| 2023-08-08T05:12:50
| 234,687,337
| 1,293
| 1,822
| null | 2023-07-31T15:47:07
| 2020-01-18T05:36:28
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,184
|
py
|
daq.py
|
# Video: [Introduction to Dash DAQ for manufacturing dashboards](https://youtu.be/t3cLkzJAUgo)
# Docs: [Dash DAQ](https://dash.plotly.com/dash-daq)
import dash # pip install dash
from dash import dcc, html, Input, Output
import dash_daq as daq
import plotly.graph_objects as go
from random import randrange
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# *************************************************************************
app.layout = html.Div(
id="dark-light-theme",
children=[
html.Div(
[
html.H1("Water Valve Pressure", style={"textAlign": "center"}),
html.Div(
daq.Tank(
id="my-tank",
max=400,
value=197,
showCurrentValue=True,
units="gallons",
style={"margin-left": "50px"},
),
className="three columns",
),
html.Div(
daq.Gauge(
id="my-daq-gauge1", min=0, max=10, value=6, label="Valve 1"
),
className="four columns",
),
html.Div(
daq.Gauge(
id="my-daq-gauge2", min=0, max=10, value=9, label="Valve 2"
),
className="four columns",
),
],
className="row",
),
html.Div(
html.Div(
daq.ToggleSwitch(
id="my-toggle-switch", label="Liters | Gallons", value=True
),
className="three columns",
),
className="row",
),
html.Div(
dcc.Graph(id="my-graph", figure={}),
className="row",
),
dcc.Interval(id="timing", interval=1000, n_intervals=0),
],
)
# *************************************************************************
# must have Dash 1.16.0 or higher for this to work
@app.callback(
Output("my-daq-gauge1", "value"),
Output("my-daq-gauge2", "value"),
Output("my-graph", "figure"),
Input("timing", "n_intervals"),
)
def update_g(n_intervals):
pressure_1 = randrange(10) # mimics data pulled from live database
pressure_2 = randrange(10) # mimics data pulled from live database
fig = go.Figure(
[
go.Bar(
x=["valve 1", "valve 2"],
y=[pressure_1, pressure_2],
)
]
)
fig.update_layout(yaxis={"range": [0, 10]})
return pressure_1, pressure_2, fig
@app.callback(
Output("my-tank", "units"),
Input("my-toggle-switch", "value"),
)
def update_g(toggle):
if toggle:
return "gallons"
else:
return "liters"
if __name__ == "__main__":
app.run_server(debug=True, port=3030)
|
a41c271c6e7ac3392f3423024226cc4b77aa72a7
|
c6759b857e55991fea3ef0b465dbcee53fa38714
|
/tools/nntool/nntool/importer/common/broadcast_mixin.py
|
3cda3a186ccda48059fcb38aa30df8bf90a39bdd
|
[
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] |
permissive
|
GreenWaves-Technologies/gap_sdk
|
1b343bba97b7a5ce62a24162bd72eef5cc67e269
|
3fea306d52ee33f923f2423c5a75d9eb1c07e904
|
refs/heads/master
| 2023-09-01T14:38:34.270427
| 2023-08-10T09:04:44
| 2023-08-10T09:04:44
| 133,324,605
| 145
| 96
|
Apache-2.0
| 2023-08-27T19:03:52
| 2018-05-14T07:50:29
|
C
|
UTF-8
|
Python
| false
| false
| 3,562
|
py
|
broadcast_mixin.py
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from nntool.graph.dim import Dim
from nntool.graph.types import ConstantInputNode
from .provisional_dim import ProvisionalDim
# reduces broadcasted constants on unknown dimensions.
# Setting this to false can provoke conception errors in matchers
FIX_CONSTANTS = True
class BroadcastMixin(object):
@classmethod
def get_broadcasted_shape(cls, x, y, is_constant=None):
if is_constant is None:
is_constant = (False, False)
if len(x) < len(y):
x = ([1] * (len(y) - len(x))) + x
elif len(y) < len(x):
y = ([1] * (len(x) - len(y))) + y
assert all(elem_x == elem_y or (elem_x == 1 or elem_y == 1) for elem_x, elem_y in zip(x, y)
if elem_x is not None and elem_y is not None),\
"{} and {} cannot be broadcasted".format(x, y)
def broad(elem_x, elem_y):
# if one element is not None then take it since that dimension will be broadcasted
if elem_x is None:
if elem_y is None or (FIX_CONSTANTS and is_constant[1] and elem_y == 1):
return None
else:
return elem_y
else:
if elem_y is None:
if FIX_CONSTANTS and is_constant[0] and elem_x == 1:
return None
else:
return elem_x
else:
return elem_x if elem_y == 1 else elem_y
return [broad(elem_x, elem_y) for elem_x, elem_y in zip(x, y)]
@classmethod
def _fix_constant_inputs(cls, inputs, shape):
const_inputs = list([inp
for inp in inputs if isinstance(inp[0], ConstantInputNode)])
if not const_inputs or not shape:
return
for inp in const_inputs:
node = inp[0]
cur_shape = tuple(node.value.shape)
new_shape = []
for i in range(1, len(cur_shape) + 1):
if shape[-i] is None:
if cur_shape[-i] != 1:
raise ValueError('unknown dimension in constant is not equal to 1')
else:
new_shape.insert(0, cur_shape[-i])
new_shape = tuple(new_shape)
if new_shape != cur_shape:
node.value = np.reshape(node.value, new_shape)
inp[2].shape = list(new_shape)
node.dims = Dim.unnamed(new_shape)
@classmethod
def implied_broadcast(cls, inputs):
is_constant = [isinstance(inp[0], ConstantInputNode) for inp in inputs]
x = inputs[0][2].shape
y = inputs[1][2].shape
shape = cls.get_broadcasted_shape(x, y, is_constant=is_constant)
cls._fix_constant_inputs(inputs, shape)
return [ProvisionalDim(shape)]
|
97a80ac286d7cd3339989f454b918b38779c7460
|
32712c478ff9dff44de085cb50a1302bfc2eba67
|
/debug/utils_for_tests.py
|
bd30ee48d73779bb33c03004fa34a8b65f449b5e
|
[
"MIT"
] |
permissive
|
vas3k/vas3k.club
|
158af17c329fe693178ca1bce36466922604df3b
|
b3ff2fd95ef1d6c593c57d3bcd501240f2705fbb
|
refs/heads/master
| 2023-09-03T07:10:10.859004
| 2023-09-01T09:08:32
| 2023-09-01T09:08:32
| 254,190,180
| 697
| 326
|
MIT
| 2023-09-04T09:02:12
| 2020-04-08T20:11:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,622
|
py
|
utils_for_tests.py
|
def todict(obj, include_class_attrs=False, convert_private=False, include_none_fields=True):
"""Convert object to dict"""
if isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = todict(v, include_class_attrs, convert_private)
return data
elif hasattr(obj, "_ast"):
return todict(obj._ast(), convert_private=convert_private)
elif hasattr(obj, "__iter__") and not isinstance(obj, str):
return [todict(v, include_class_attrs, convert_private) for v in obj]
elif hasattr(obj, "__dict__"):
if convert_private:
instance_attributes = [(key, value) for key, value in obj.__dict__.items() if not callable(value)]
else:
instance_attributes = [(key, value) for key, value in obj.__dict__.items() if
not callable(value) and not key.startswith('_')]
if include_class_attrs and hasattr(obj, "__class__"):
class_attributes = [(key, value) for key, value in obj.__class__.__dict__.items() if
(key[:2] != "__") and (not callable(value))]
else:
class_attributes = []
items = instance_attributes
items.extend(class_attributes)
# if include_none_fields or value: for include or exclude none fields
data = dict(
[(key, todict(value, include_class_attrs, convert_private, include_none_fields)) for key, value in items if
include_none_fields or (value is not None and value != [] and value != "")])
return data
else:
return obj
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.