hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2e37655c7659dc37db772ca1af7fa4bf6b6ece46 | 6,218 | py | Python | shape/shape.py | jcolekaplan/computer_vision | 48d39b081a7b6b699019052eeae36ab703bb34eb | [
"MIT"
] | null | null | null | shape/shape.py | jcolekaplan/computer_vision | 48d39b081a7b6b699019052eeae36ab703bb34eb | [
"MIT"
] | null | null | null | shape/shape.py | jcolekaplan/computer_vision | 48d39b081a7b6b699019052eeae36ab703bb34eb | [
"MIT"
] | null | null | null | """
Jacob Kaplan
shape.py
Purpose: Given a set of point coordinates in R^2 , compute and output the following:
(a) The minimum and maximum x and y values.
(b) The center of mass (average) x and y values.
(c) The axis along which the data varies the least and the standard deviation of this variation.
(d) The axis along which the data varies the most and the standard deviation of that variation.
(e) The closet point form of the best fitting line (through the original data).
(f) The implicit form of the line.
(g) A decision about the shape that best describes the data
(h) Output a MatPlotLib plot saved as an image containing a scatter plot of
the points and of the center of mass
* ,MMM8&&&. *
MMMM88&&&&& .
MMMM88&&&&&&&
* MMM88&&&&&&&&
MMM88&&&&&&&&
'MMM88&&&&&&'
'MMM8&&&' *
|\___/|
) ( . '
=\ /=
)===( *
/ \
| |
/ \
\ /
_/\_/\_/\__ _/_/\_/\_/\_/\_/\_/\_/\_/\_/\_
| | | |( ( | | | | | | | | | |
| | | | ) ) | | | | | | | | | |
| | | |(_( | | | | | | | | | |
| | | | | | | | | | | | | | |
| | | | | | | | | | | | | | |
Sure you can take the eigenvalue of some points, but can you
ever find the eigenvalue of your soul?
"""
import sys
import numpy as np
from numpy import linalg as la
#import matplotlib.pyplot as plt
def xyValues(points):
"""
Take in array of points
Return x values and y values of the points, respectively
"""
return points[:,0], points[:,1]
def eigen(points):
"""
Take in array of points
Center points around the center of mass (mean)
Create new 2xN matrix with the centered x values as the first row and
the centered y values as the second row
Use new 2xN matrix to create a covariance matrix
Get eigenvalues and eigenvectors of the covariance matrix
Return eigenvalues and eigenvectors
"""
N = points.shape[0]
xVals, yVals = xyValues(points)
xVals -= np.mean(xVals)
yVals -= np.mean(yVals)
stackPoints = np.stack((xVals,yVals))
covarMatrix = np.cov(stackPoints)
eigenvals, eigenvecs = la.eig(covarMatrix)
eigenvals = np.sqrt(eigenvals - eigenvals/N)
return eigenvals, eigenvecs
def getMinAxis(evals, evecs):
"""
Take in eigenvalues and eigenvectors of the points
Return the first eigenvector and the second eigenvalue (these correspond
to the minimum axis of the points)
"""
minAxis = evecs[0]
sMin = evals[1]
return minAxis, sMin
def getMaxAxis(evals, evecs):
"""
Take in eigenvalues and eigenvectors of the points
Return the second eigenvector and the first eigenvalue (these correspond
to the maximum axis of the points)
"""
maxAxis = evecs[1]
sMax = evals[0]
return maxAxis, sMax
def getClosestPoint(minAxis, xAvg, yAvg):
"""
Take in the info for the minimum axis, and the averages of the x and y values
Calculate rho and p
Return rho and p
"""
rho = minAxis[0] * yAvg + minAxis[1] * xAvg
p = np.arccos(minAxis[1])
return rho, p
def getShape(sMin, sMax, tau):
"""
Take in sMin, sMax, and tau
Determine best fit and return it
"""
if sMin < (tau * sMax):
return "line"
else:
return "ellipse"
def plot(xVals, yVals, comX, comY, a, b, c, outfig):
"""
Take in x and y values, the average of x and y values, the line of best fit,
and the name of the file to save plot
Set axes, plot x and y values, plot line of best fit, plot center of mass
"""
axes = plt.gca()
axes.set_xlim([0,55])
axes.set_ylim([0,55])
plt.scatter(xVals, yVals)
x = np.linspace(0,51,102)
a = (a/b)
c = -1*(c/b)
y = c - a*x
plt.plot(x, y,'-k')
plt.plot(comX, comY, markersize=8, color="red")
plt.savefig(outfig)
if __name__ == "__main__":
"""
Handle command line arguments
"""
if len(sys.argv) != 4:
print("Correct usage: p3_shape points tau outfig")
sys.exit()
else:
pointsFile = sys.argv[1]
tau = sys.argv[2]
outfig = sys.argv[3]
try:
openFile = open(pointsFile, "r")
except FileNotFoundError:
print("No file {} found".format(pointsFile))
sys.exit()
try:
points = np.loadtxt(openFile, dtype=np.float64)
except ValueError:
print("Malformed points file: {}, must be numbers".format(pointsFile))
sys.exit()
try:
tau = float(tau)
except ValueError:
print("Tau must be number!")
sys.exit()
"""
Calculate and output stats
"""
xValues, yValues = xyValues(points)
xCopy = np.copy(xValues)
yCopy = np.copy(yValues)
xAvg, yAvg = np.mean(xValues), np.mean(yValues)
print("min: ({:.3f},{:.3f})".format(np.min(xValues), np.min(yValues)))
print("max: ({:.3f},{:.3f})".format(np.max(xValues), np.max(yValues)))
print("com: ({:.3f},{:.3f})".format(xAvg, yAvg))
eigenvals, eigenvecs = eigen(points)
minAxis, sMin = getMinAxis(eigenvals, eigenvecs)
maxAxis, sMax = getMaxAxis(eigenvals, eigenvecs)
print("min axis: ({:.3f},{:.3f}), sd {:.3f}".format(minAxis[1], minAxis[0], sMin))
print("max axis: ({:.3f},{:.3f}), sd {:.3f}".format(maxAxis[1], maxAxis[0], sMax))
rho, theta = getClosestPoint(minAxis, xAvg, yAvg)
a,b,c = minAxis[1], minAxis[0], -1*rho
print("closest point: rho {:.3f}, theta {:.3f}".format(rho, theta))
print("implicit: a {:.3f}, b {:.3f}, c {:.3f}".format(a,b,c))
print("best as {}".format(getShape(sMin, sMax, tau)))
#plot(xCopy, yCopy, xAvg, yAvg, a, b, c, outfig)
| 33.793478 | 100 | 0.542618 |
1940e4a739882716d2e9e4f8e6a181ae12b793c5 | 2,641 | py | Python | bot1.py | Nihal-Srivastava05/Hackoween-Hacktoberfest2021 | 531041ab6e68488d4d491c10b7a5949c61618cc8 | [
"MIT"
] | 19 | 2021-10-03T06:12:28.000Z | 2021-10-30T13:07:56.000Z | bot1.py | Nihal-Srivastava05/Hackoween-Hacktoberfest2021 | 531041ab6e68488d4d491c10b7a5949c61618cc8 | [
"MIT"
] | 68 | 2021-10-03T05:59:13.000Z | 2021-10-31T17:34:40.000Z | bot1.py | Nihal-Srivastava05/Hackoween-Hacktoberfest2021 | 531041ab6e68488d4d491c10b7a5949c61618cc8 | [
"MIT"
] | 122 | 2021-10-01T03:01:59.000Z | 2021-11-02T16:45:42.000Z | import pyautogui
import webbrowser
import time
import os
import fnmatch
import shutil
pyautogui.moveTo(427,398, duration=0.25)
pyautogui.click(427,398, button='left',duration=0.25)
a=0.25
b=a*2
c=a/2
pyautogui.press('space')
pyautogui.moveTo(412, 275, duration=0.25)
pyautogui.click(412, 275,button='left',duration=0.25)
time.sleep(0.25)
pyautogui.moveTo(478,163, duration=0.25)
pyautogui.click(478,163,button='left',duration=0.25)
time.sleep(a)
pyautogui.press('tab')
time.sleep(a)
pyautogui.press('tab')
time.sleep(a)
pyautogui.write('160921', interval=a)
pyautogui.press('tab')
time.sleep(a)
pyautogui.write('cola', interval=a)
pyautogui.press('tab')
time.sleep(a)
pyautogui.write('fixar objetos', interval=a)
pyautogui.press('tab')
time.sleep(a)
pyautogui.write('250', interval=a)
pyautogui.press('tab')
time.sleep(a)
pyautogui.write('ml', interval=a)
pyautogui.press('tab')
time.sleep(a)
pyautogui.write('5', interval=a)
pyautogui.press('tab')
time.sleep(a)
pyautogui.write('5', interval=a)
pyautogui.press('tab')
time.sleep(a)
pyautogui.write('estoque', interval=a)
pyautogui.moveTo(476,663, duration=0.25)
pyautogui.click(476,663,button='left',duration=0.25)
time.sleep(b)
pyautogui.press('space')
time.sleep(a)
pyautogui.press('space')
pyautogui.moveTo(654, 219, duration=0.25)
pyautogui.click(654, 219,button='left',duration=0.25)
time.sleep(b)
pyautogui.press('space')
time.sleep(a)
pyautogui.press('space')
time.sleep(a)
aux=0
while aux<30:
aux=aux+1
time.sleep(c)
pyautogui.press('up')
pyautogui.moveTo(74, 171, duration=0.25)
pyautogui.click(74, 171,button='left',duration=0.25)
time.sleep(b)
pyautogui.press('space')
time.sleep(a)
pyautogui.press('space')
pyautogui.moveTo(641,509, duration=0.25)
pyautogui.click(641,509,button='left',duration=0.25)
time.sleep(b)
pyautogui.moveTo(449,403, duration=0.25)
pyautogui.click(449,403,button='left',duration=0.25)
time.sleep(b)
pyautogui.moveTo(563,212, duration=0.25)
pyautogui.click(563,212,button='left',duration=0.25)
aux=0
while aux<25:
aux=aux+1
time.sleep(c)
pyautogui.press('tab')
pyautogui.press('down')
time.sleep(c)
pyautogui.press('down')
pyautogui.press('enter')
time.sleep(c)
pyautogui.press('tab')
pyautogui.write('100', interval=a)
pyautogui.press('tab')
time.sleep(c)
pyautogui.press('enter')
time.sleep(a)
pyautogui.moveTo(448,400, duration=0.25)
pyautogui.click(448,400,button='left',duration=0.25)
time.sleep(a)
aux=0
while aux<25:
aux=aux+1
time.sleep(c)
pyautogui.press('tab')
| 21.471545 | 54 | 0.699356 |
0f6c965351a0ecfe39185b5276edd6f185010e1d | 707 | py | Python | src/oop/deck.py | JadielTeofilo/General-Algorithms | dfcf86c6ecd727573079f8971187c47bdb7a37bb | [
"MIT"
] | null | null | null | src/oop/deck.py | JadielTeofilo/General-Algorithms | dfcf86c6ecd727573079f8971187c47bdb7a37bb | [
"MIT"
] | null | null | null | src/oop/deck.py | JadielTeofilo/General-Algorithms | dfcf86c6ecd727573079f8971187c47bdb7a37bb | [
"MIT"
] | null | null | null | """
Deck of Cards: Design the data structures for a generic deck of cards. Explain how you would subclass the data structures to implement blackjack.
Does it mean the classical deck of cards?
Yes, 52 cards, 13 ranks, 4 suits
"""
import enum
import dataclasses
class Suit(enum.Enum):
hearts = 1
spades = 2
diamonds = 3
clubs = 4
class Rank(enum.Enum):
ace = 1
two = 2
three = 3
four = 4
five = 5
six = 6
seven = 7
eight = 8
nine = 9
ten = 10
jack = 11
queen = 12
king = 13
@dataclasses.dataclass
class Card:
rank: Rank
suit: Suit
@dataclasses.dataclass
class Deck:
cards: List[Card]
| 13.862745 | 145 | 0.595474 |
57d9718124f9a19f52eb845ea535d6fe90b3ce4b | 1,626 | py | Python | admin/base/urls.py | h-ci-user01/osf.h-test | a61db2c639a26031aa5b7f58c4dd719919aa5ece | [
"Apache-2.0"
] | null | null | null | admin/base/urls.py | h-ci-user01/osf.h-test | a61db2c639a26031aa5b7f58c4dd719919aa5ece | [
"Apache-2.0"
] | 18 | 2020-03-24T15:26:02.000Z | 2022-03-08T21:30:39.000Z | admin/base/urls.py | h-ci-user01/osf.h-test | a61db2c639a26031aa5b7f58c4dd719919aa5ece | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import include, url
from django.contrib import admin
from settings import ADMIN_BASE
from . import views
base_pattern = '^{}'.format(ADMIN_BASE)
urlpatterns = [
### ADMIN ###
url(
base_pattern,
include([
url(r'^$', views.home, name='home'),
url(r'^admin/', admin.site.urls),
url(r'^spam/', include('admin.spam.urls', namespace='spam')),
url(r'^institutions/', include('admin.institutions.urls', namespace='institutions')),
url(r'^preprint_providers/', include('admin.preprint_providers.urls', namespace='preprint_providers')),
url(r'^account/', include('admin.common_auth.urls', namespace='auth')),
url(r'^password/', include('password_reset.urls')),
url(r'^nodes/', include('admin.nodes.urls', namespace='nodes')),
url(r'^preprints/', include('admin.preprints.urls', namespace='preprints')),
url(r'^subjects/', include('admin.subjects.urls', namespace='subjects')),
url(r'^users/', include('admin.users.urls', namespace='users')),
url(r'^meetings/', include('admin.meetings.urls',
namespace='meetings')),
url(r'^project/', include('admin.pre_reg.urls', namespace='pre_reg')),
url(r'^metrics/', include('admin.metrics.urls',
namespace='metrics')),
url(r'^desk/', include('admin.desk.urls',
namespace='desk')),
]),
),
]
admin.site.site_header = 'OSF-Admin administration'
| 42.789474 | 115 | 0.568266 |
3a2232d1f5aa555cb815acc2f1fdfae2beb52c82 | 7,274 | py | Python | mars/tensor/stats/tests/test_stats_execute.py | snsnlou/mars | 6b8eec162eccc8bb980a98ca2cf1e6a4b866d302 | [
"Apache-2.0"
] | 1 | 2021-11-30T12:07:21.000Z | 2021-11-30T12:07:21.000Z | mars/tensor/stats/tests/test_stats_execute.py | snsnlou/mars | 6b8eec162eccc8bb980a98ca2cf1e6a4b866d302 | [
"Apache-2.0"
] | null | null | null | mars/tensor/stats/tests/test_stats_execute.py | snsnlou/mars | 6b8eec162eccc8bb980a98ca2cf1e6a4b866d302 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import unittest
from distutils.version import LooseVersion
import numpy as np
from mars.tensor import tensor
from mars.tests.core import TestBase
try:
import scipy
from scipy.stats import (
entropy as sp_entropy,
power_divergence as sp_power_divergence,
chisquare as sp_chisquare,
ttest_rel as sp_ttest_rel,
ttest_ind as sp_ttest_ind,
ttest_ind_from_stats as sp_ttest_ind_from_stats,
ttest_1samp as sp_ttest_1samp,
)
from mars.tensor.stats import (
entropy, power_divergence, chisquare,
ttest_ind, ttest_rel, ttest_1samp, ttest_ind_from_stats,
)
except ImportError:
scipy = None
@unittest.skipIf(scipy is None, 'scipy not installed')
class Test(TestBase):
def setUp(self):
self.ctx, self.executor = self._create_test_context()
self.ctx.__enter__()
def tearDown(self) -> None:
self.ctx.__exit__()
def testEntropyExecution(self):
rs = np.random.RandomState(0)
a = rs.rand(10)
t1 = tensor(a, chunk_size=4)
r = entropy(t1)
result = self.executor.execute_tensor(r, concat=True)[0]
expected = sp_entropy(a)
np.testing.assert_array_almost_equal(result, expected)
b = rs.rand(10)
base = 3.1
t2 = tensor(b, chunk_size=4)
r = entropy(t1, t2, base)
result = self.executor.execute_tensor(r, concat=True)[0]
expected = sp_entropy(a, b, base)
np.testing.assert_array_almost_equal(result, expected)
b = rs.rand(10)
base = 3.1
t2 = tensor(b, chunk_size=4)
r = entropy(t1, t2, base)
result = self.executor.execute_tensor(r, concat=True)[0]
expected = sp_entropy(a, b, base)
np.testing.assert_array_almost_equal(result, expected)
r = entropy(t1, t2, t1.sum())
result = self.executor.execute_tensor(r, concat=True)[0]
expected = sp_entropy(a, b, a.sum())
np.testing.assert_array_almost_equal(result, expected)
with self.assertRaises(ValueError):
entropy(t1, t2[:7])
def testPowerDivergenceExecution(self):
f_obs_raw = np.array([16, 18, 16, 14, 12, 12])
f_exp_raw = np.array([16, 16, 16, 16, 16, 8])
f_obs = tensor(f_obs_raw, chunk_size=4)
f_exp = tensor(f_exp_raw, chunk_size=4)
with self.assertRaises(ValueError):
power_divergence(f_obs, f_exp, lambda_='non-exist-lambda')
r = power_divergence(f_obs, lambda_='pearson')
result = r.execute().fetch()
expected = sp_power_divergence(f_obs_raw, lambda_='pearson')
np.testing.assert_almost_equal(expected[0], result[0])
np.testing.assert_almost_equal(expected[1], result[1])
modes = [
None,
'pearson',
'log-likelihood',
'mod-log-likelihood',
'neyman',
]
for mode in modes:
r = power_divergence(f_obs, f_exp, lambda_=mode)
result = r.execute().fetch()
expected = sp_power_divergence(
f_obs_raw, f_exp_raw, lambda_=mode)
np.testing.assert_almost_equal(expected[0], result[0])
np.testing.assert_almost_equal(expected[1], result[1])
def testChisquareExecution(self):
f_obs_raw = np.array([16, 18, 16, 14, 12, 12])
f_exp_raw = np.array([16, 16, 16, 16, 16, 8])
f_obs = tensor(f_obs_raw, chunk_size=4)
f_exp = tensor(f_exp_raw, chunk_size=4)
r = chisquare(f_obs, f_exp)
result = r.execute().fetch()
expected = sp_chisquare(f_obs_raw, f_exp_raw)
np.testing.assert_almost_equal(expected[0], result[0])
np.testing.assert_almost_equal(expected[1], result[1])
def testTTestExecution(self):
if LooseVersion(scipy.__version__) >= '1.6.0':
alternatives = ['less', 'greater', 'two-sided']
mt_from_stats = lambda a, b, alternative=None, equal_var=True: ttest_ind_from_stats(
a.mean(), a.std(), a.shape[0], b.mean(), b.std(), b.shape[0],
alternative=alternative, equal_var=equal_var)
sp_from_stats = lambda a, b, alternative=None, equal_var=True: sp_ttest_ind_from_stats(
a.mean(), a.std(), a.shape[0], b.mean(), b.std(), b.shape[0],
alternative=alternative, equal_var=equal_var)
else:
alternatives = ['two-sided']
mt_from_stats = lambda a, b, equal_var=True: ttest_ind_from_stats(
a.mean(), a.std(), a.shape[0], b.mean(), b.std(), b.shape[0],
equal_var=equal_var)
sp_from_stats = lambda a, b, equal_var=True: sp_ttest_ind_from_stats(
a.mean(), a.std(), a.shape[0], b.mean(), b.std(), b.shape[0],
equal_var=equal_var)
funcs = [
(ttest_rel, sp_ttest_rel),
(
functools.partial(ttest_ind, equal_var=True),
functools.partial(sp_ttest_ind, equal_var=True),
),
(
functools.partial(ttest_ind, equal_var=False),
functools.partial(sp_ttest_ind, equal_var=False),
),
(
functools.partial(mt_from_stats, equal_var=True),
functools.partial(sp_from_stats, equal_var=True),
),
(
functools.partial(mt_from_stats, equal_var=False),
functools.partial(sp_from_stats, equal_var=False),
),
(ttest_1samp, sp_ttest_1samp),
]
fa_raw = np.array([16, 18, 16, 14, 12, 12])
fb_raw = np.array([16, 16, 16, 16, 16, 8])
fa = tensor(fa_raw, chunk_size=4)
fb = tensor(fb_raw, chunk_size=4)
for mt_func, sp_func in funcs:
if LooseVersion(scipy.__version__) >= '1.6.0':
with self.assertRaises(ValueError):
mt_func(fa, fb, alternative='illegal-alternative')
for alt in alternatives:
if LooseVersion(scipy.__version__) >= '1.6.0':
r = mt_func(fa, fb, alternative=alt)
else:
r = mt_func(fa, fb)
result = self.executor.execute_tensors(r)
if LooseVersion(scipy.__version__) >= '1.6.0':
expected = sp_func(fa_raw, fb_raw, alternative=alt)
else:
expected = sp_func(fa_raw, fb_raw)
np.testing.assert_almost_equal(expected[0], result[0])
np.testing.assert_almost_equal(expected[1], result[1])
| 35.140097 | 99 | 0.60022 |
6af2f20742d8aa62cfe724b8ce8112d7b6bc9b34 | 4,349 | py | Python | vyvodi/layers/dense_hierarchical.py | nickolasgryga/vyvodi | e5390119152f7f40b3ba2a748e75e1ef25b5d240 | [
"Apache-2.0"
] | 1 | 2022-01-31T15:21:45.000Z | 2022-01-31T15:21:45.000Z | vyvodi/layers/dense_hierarchical.py | nickolasgryga/vyvodi | e5390119152f7f40b3ba2a748e75e1ef25b5d240 | [
"Apache-2.0"
] | 6 | 2022-01-31T15:22:31.000Z | 2022-02-02T16:22:44.000Z | vyvodi/layers/dense_hierarchical.py | vyvodi/vyvodi | 06702fc59c90766f6d15e975ce4f8d60fa3481ee | [
"Apache-2.0"
] | 1 | 2022-02-04T18:30:39.000Z | 2022-02-04T18:30:39.000Z | import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.layers.dense_variational_v2 import (
_make_kl_divergence_penalty
)
tfk = tf.keras
tfkl = tf.keras.layers
tfpl = tfp.layers
tfd = tfp.distributions
tfb = tfp.bijectors
class DenseHierarchical(tfkl.Layer):
"""Dense layer with random `kernel` and `bias` which are sampled from
a shared normal distribution.
This layers uses variational inference to approximate the posterior
distribution of the random effects.
"""
def __init__(
self, n_units, n_samples,
kl_weight=None,
kl_use_exact=False,
activation=None,
use_bias=True,
activity_regularizer=None,
**kwargs
):
"""Create a random effect layer with the specified number of units
and categories.
Args:
n_units: Number of units in the random effect layer.
n_categories: Number of categories in the random effect layer.
kl_weight: Weight of the KL divergence term in the loss function.
kl_use_exact: Whether to use the exact KL divergence or
approximate KL divergence.
activation: Activation function to use.
use_bias: Whether to use a bias term.
activity_regularizer: Regularizer function for the output.
**kwargs: Extra arguments forwards to `tf.keras.layers.Layer`.
"""
super().__init__(
activity_regularizer=tfk.regularizers.get(activity_regularizer),
**kwargs
)
self.n_units = int(n_units)
self.n_samples = int(n_samples)
self.activation = tfk.activations.get(activation)
self.use_bias = use_bias
self._kl_divergence_fn = _make_kl_divergence_penalty(
kl_use_exact, weight=kl_weight
)
def build(self, input_shape):
dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())
x_input_shape = input_shape[0]
x_input_shape = tf.TensorShape(x_input_shape)
last_dim = tf.compat.dimension_value(x_input_shape[-1])
if last_dim is None:
raise ValueError(
'The last dimension of the inputs to `RandomEffects` '
'must be defined. Found `None`.'
)
n_priors = (last_dim + self.use_bias) * self.n_units
self._prior = tfk.Sequential([
tfpl.VariableLayer(tfpl.IndependentNormal.params_size(n_priors)),
tfpl.IndependentNormal((self.n_units, last_dim + self.use_bias))
])
n_posteriors = n_priors * (self.n_samples + 1)
self._posterior = tfk.Sequential([
tfpl.VariableLayer(
tfpl.IndependentNormal.params_size(n_posteriors)
),
tfpl.IndependentNormal(
(self.n_samples + 1, self.n_units, last_dim + self.use_bias)
)
]) # mean-field approximation
self.built = True
def call(self, inputs, **kwargs):
dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())
x, category = self._parse_inputs(inputs)
q = self._posterior(x)
p = self._prior(x)
self.add_loss(self._kl_divergence_fn(q, p))
w = tf.convert_to_tensor(q)
w = tf.gather(w, category, axis=0)
if self.use_bias:
w, b = tf.split(w, (self.n_units, 1), axis=2)
else:
b = tf.zeros((self.n_units, 1), dtype=dtype)
outputs = tf.matmul(w, tf.expand_dims(x, axis=1), transpose_b=True)
outputs = outputs + b
outputs = tf.squeeze(outputs, axis=-1)
if self.activation is not None:
outputs = self.activation(outputs)
return outputs
def _parse_inputs(self, inputs):
dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())
if isinstance(inputs, (list, tuple)):
x, category = inputs
# TODO: add support for other types of inputs
else:
raise ValueError(
'`RandomEffects` expects a list or tuple of two tensors: '
'`x` and `category`.'
)
x = tf.cast(x, dtype)
category = tf.cast(category, tf.int32)
return x, category | 32.699248 | 77 | 0.604277 |
fc71eeae71cee189adb8cbd414ce26921b83fd25 | 9,282 | py | Python | kolga/libs/docker.py | riksu-raksu/k-lga | d9f940a34decbf805456e6b04bc3b641b0e860fd | [
"MIT"
] | null | null | null | kolga/libs/docker.py | riksu-raksu/k-lga | d9f940a34decbf805456e6b04bc3b641b0e860fd | [
"MIT"
] | null | null | null | kolga/libs/docker.py | riksu-raksu/k-lga | d9f940a34decbf805456e6b04bc3b641b0e860fd | [
"MIT"
] | null | null | null | import re
from pathlib import Path
from typing import Dict, List
from kolga.utils.logger import logger
from kolga.utils.models import DockerImage, ImageStage
from ..settings import settings
from ..utils.general import get_environment_vars_by_prefix, run_os_command
class Docker:
"""
A wrapper class around various Docker tools
"""
STAGE_REGEX = re.compile(
r"^FROM .*?(?: +AS +(?P<stage>.*))?$", re.IGNORECASE | re.MULTILINE
)
ICON = "🐳"
def __init__(self, dockerfile: str = settings.DOCKER_BUILD_SOURCE) -> None:
self.dockerfile = Path(dockerfile)
self.docker_context = Path(settings.DOCKER_BUILD_CONTEXT)
self.image_repo = f"{settings.CONTAINER_REGISTRY_REPO}"
if settings.DOCKER_IMAGE_NAME:
self.image_repo = f"{self.image_repo}/{settings.DOCKER_IMAGE_NAME}"
self.image_tag = f"{self.image_repo}:{settings.GIT_COMMIT_SHA}"
self.cache_repo = f"{self.image_repo}/{settings.BUILDKIT_CACHE_REPO}"
if not self.dockerfile.exists():
raise FileNotFoundError(f"No Dockerfile found at {self.dockerfile}")
if not self.docker_context.exists():
raise NotADirectoryError(f"No such folder found, {self.docker_context}")
if self.docker_context not in self.dockerfile.parents:
raise ValueError(
f"Dockerfile {self.dockerfile} not in build context {self.docker_context}"
)
def stage_image_tag(self, stage: str) -> str:
if not stage:
return self.image_tag
return f"{self.image_tag}-{stage}"
def test_image_tag(self, stage: str = settings.DOCKER_TEST_IMAGE_STAGE) -> str:
return self.stage_image_tag(stage)
def setup_buildkit(self, name: str = "kolgabk") -> None:
setup_command = [
"docker",
"buildx",
"create",
"--name",
name,
"--use",
]
result = run_os_command(setup_command)
if result.return_code:
logger.std(result, raise_exception=True)
else:
logger.success(
icon=f"{self.ICON} 🔑",
message=f"New buildx builder instace is set up (Instance name: {name})",
)
def login(
self,
username: str = settings.CONTAINER_REGISTRY_USER,
password: str = settings.CONTAINER_REGISTRY_PASSWORD,
registry: str = settings.CONTAINER_REGISTRY,
) -> None:
login_command = [
"docker",
"login",
"-u",
username,
"-p",
password,
registry,
]
result = run_os_command(login_command)
if result.return_code:
logger.std(result, raise_exception=True)
else:
logger.success(
icon=f"{self.ICON} 🔑",
message=f"Logged in to registry (User: {username})",
)
@staticmethod
def get_docker_git_ref_tag(
git_commit_ref: str = settings.GIT_COMMIT_REF_NAME,
) -> str:
"""
Creates a tag from the git reference that can be used as a Docker tag
Docker does not support all characters in its tag names, for instance
/ would be seen as a separator which would break the docker tag command.
:return:
"""
return git_commit_ref.translate(str.maketrans("_/", "--"))
@staticmethod
def get_build_arguments() -> Dict[str, str]:
"""
Get build arguments from environment
Returns:
Dict of build arguments
"""
return get_environment_vars_by_prefix(settings.DOCKER_BUILD_ARG_PREFIX)
def get_stage_names(self) -> List[str]:
stage_names = []
with open(self.dockerfile) as f:
while True:
line = f.readline()
if not line:
break
matched_stage = self.STAGE_REGEX.match(line)
if not matched_stage:
continue
stage_name = (
matched_stage.group("stage") if matched_stage.group("stage") else ""
)
stage_names.append(stage_name)
return stage_names
def get_stages(self) -> List[ImageStage]:
stages: List[ImageStage] = []
stage_names = self.get_stage_names()
if not stage_names:
return stages
for stage in stage_names[:-1]:
image_stage = ImageStage(name=stage)
if (
settings.DOCKER_TEST_IMAGE_STAGE
and stage == settings.DOCKER_TEST_IMAGE_STAGE
):
image_stage.development = True
image_stage.build = True
stages.append(image_stage)
final_image = ImageStage(name=stage_names[-1], final=True, build=True)
stages.append(final_image)
return stages
def get_image_tags(self, stage: str = "", final_image: bool = False) -> List[str]:
# Add - prefix to tag name if prefix is present
stage_tag = f"-{stage}" if stage else stage
git_ref_tag = self.get_docker_git_ref_tag()
tags = {f"{settings.GIT_COMMIT_SHA}{stage_tag}", f"{git_ref_tag}{stage_tag}"}
if final_image:
tags |= {f"{settings.GIT_COMMIT_SHA}", f"{git_ref_tag}"}
return sorted(tags)
def pull_image(self, image: str) -> bool:
logger.info(icon=f"{self.ICON} ⏬", title=f"Pulling {image}:", end=" ")
pull_command = ["docker", "pull", image]
result = run_os_command(pull_command, shell=False)
if result.return_code:
logger.std(result, raise_exception=False)
else:
logger.success()
return True
return False
def create_cache_tag(self, postfix: str = "") -> str:
git_ref_tag = self.get_docker_git_ref_tag()
stage_postfix = f"-{postfix}" if postfix else ""
return f"{self.cache_repo}:{git_ref_tag}{stage_postfix}"
def get_cache_tags(self) -> List[str]:
cache_tags = []
target_branch = settings.GIT_TARGET_BRANCH or settings.GIT_DEFAULT_TARGET_BRANCH
target_image = f"{self.cache_repo}:{target_branch}"
cache_tags.append(target_image)
for stage in self.get_stages():
if stage.build:
cache_tags.append(self.create_cache_tag(postfix=stage.name))
return cache_tags
def build_stages(self, push_images: bool = True) -> List[DockerImage]:
"""
Build all stages of a Dockerfile and tag them
"""
built_images = []
stages = self.get_stages()
for stage in stages:
if not stage.build:
continue
if stage.development:
logger.info(
icon="ℹ️",
title=f"Found test/development stage '{stage.name}', building that as well",
)
built_images.append(
self.build_stage(
stage.name, final_image=stage.final, push_images=push_images
)
)
return built_images
def build_stage(
self, stage: str = "", final_image: bool = False, push_images: bool = True
) -> DockerImage:
logger.info(icon=f"{self.ICON} 🔨", title=f"Building stage '{stage}': ")
cache_tags = self.get_cache_tags()
postfix = stage if not final_image else ""
build_command = [
"docker",
"buildx",
"build",
f"--file={self.dockerfile.absolute()}",
f"--target={stage}",
"--progress=plain",
]
if push_images:
build_command.append("--push")
cache_to = self.create_cache_tag(postfix=postfix)
logger.info(title=f"\t ℹ️ Cache to: {cache_to}")
build_command.append(f"--cache-to=type=registry,ref={cache_to},mode=max")
for cache_tag in cache_tags:
logger.info(title=f"\t ℹ️ Cache from: {cache_tag}")
build_command.append(f"--cache-from=type=registry,ref={cache_tag}")
tags = self.get_image_tags(stage, final_image=final_image)
for tag in tags:
build_command.append(f"--tag={self.image_repo}:{tag}")
build_command.append(f"{self.docker_context.absolute()}")
result = run_os_command(build_command, shell=False)
if result.return_code:
logger.std(result, raise_exception=True)
else:
for tag in tags:
logger.info(title=f"\t 🏷 Tagged: {self.image_repo}:{tag}")
image = DockerImage(repository=self.image_repo, tags=tags)
return image
def delete_image(self, image: DockerImage) -> None:
logger.warning(icon=f"{self.ICON}", message="Removing Docker image")
for tag in image.tags:
logger.info(message=f"\t {image.repository}:{tag}: ", end="")
delete_command = ["docker", "rmi", f"{image.repository}:{tag}"]
result = run_os_command(delete_command, shell=False)
if result.return_code:
logger.std(result, raise_exception=False)
else:
logger.success()
| 33.268817 | 96 | 0.58371 |
40eeb811b0e8536a5fe9bf934debf738fc287e89 | 21,089 | py | Python | TICC_solver.py | PKandarp/TICC | 6d380ec45720027080f59857991bb33337058977 | [
"BSD-2-Clause"
] | 393 | 2017-04-21T18:18:16.000Z | 2022-03-30T10:55:34.000Z | TICC_solver.py | PKandarp/TICC | 6d380ec45720027080f59857991bb33337058977 | [
"BSD-2-Clause"
] | 70 | 2017-05-01T13:21:00.000Z | 2022-03-24T03:02:26.000Z | TICC_solver.py | PKandarp/TICC | 6d380ec45720027080f59857991bb33337058977 | [
"BSD-2-Clause"
] | 150 | 2017-03-04T00:07:16.000Z | 2022-03-23T23:59:03.000Z | import numpy as np
import math, time, collections, os, errno, sys, code, random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn import mixture
from sklearn.cluster import KMeans
import pandas as pd
from multiprocessing import Pool
from src.TICC_helper import *
from src.admm_solver import ADMMSolver
class TICC:
def __init__(self, window_size=10, number_of_clusters=5, lambda_parameter=11e-2,
beta=400, maxIters=1000, threshold=2e-5, write_out_file=False,
prefix_string="", num_proc=1, compute_BIC=False, cluster_reassignment=20, biased=False):
"""
Parameters:
- window_size: size of the sliding window
- number_of_clusters: number of clusters
- lambda_parameter: sparsity parameter
- switch_penalty: temporal consistency parameter
- maxIters: number of iterations
- threshold: convergence threshold
- write_out_file: (bool) if true, prefix_string is output file dir
- prefix_string: output directory if necessary
- cluster_reassignment: number of points to reassign to a 0 cluster
- biased: Using the biased or the unbiased covariance
"""
self.window_size = window_size
self.number_of_clusters = number_of_clusters
self.lambda_parameter = lambda_parameter
self.switch_penalty = beta
self.maxIters = maxIters
self.threshold = threshold
self.write_out_file = write_out_file
self.prefix_string = prefix_string
self.num_proc = num_proc
self.compute_BIC = compute_BIC
self.cluster_reassignment = cluster_reassignment
self.num_blocks = self.window_size + 1
self.biased = biased
pd.set_option('display.max_columns', 500)
np.set_printoptions(formatter={'float': lambda x: "{0:0.4f}".format(x)})
np.random.seed(102)
def fit(self, input_file):
"""
Main method for TICC solver.
Parameters:
- input_file: location of the data file
"""
assert self.maxIters > 0 # must have at least one iteration
self.log_parameters()
# Get data into proper format
times_series_arr, time_series_rows_size, time_series_col_size = self.load_data(input_file)
############
# The basic folder to be created
str_NULL = self.prepare_out_directory()
# Train test split
training_indices = getTrainTestSplit(time_series_rows_size, self.num_blocks,
self.window_size) # indices of the training samples
num_train_points = len(training_indices)
# Stack the training data
complete_D_train = self.stack_training_data(times_series_arr, time_series_col_size, num_train_points,
training_indices)
# Initialization
# Gaussian Mixture
gmm = mixture.GaussianMixture(n_components=self.number_of_clusters, covariance_type="full")
gmm.fit(complete_D_train)
clustered_points = gmm.predict(complete_D_train)
gmm_clustered_pts = clustered_points + 0
# K-means
kmeans = KMeans(n_clusters=self.number_of_clusters, random_state=0).fit(complete_D_train)
clustered_points_kmeans = kmeans.labels_ # todo, is there a difference between these two?
kmeans_clustered_pts = kmeans.labels_
train_cluster_inverse = {}
log_det_values = {} # log dets of the thetas
computed_covariance = {}
cluster_mean_info = {}
cluster_mean_stacked_info = {}
old_clustered_points = None # points from last iteration
empirical_covariances = {}
# PERFORM TRAINING ITERATIONS
pool = Pool(processes=self.num_proc) # multi-threading
for iters in range(self.maxIters):
print("\n\n\nITERATION ###", iters)
# Get the train and test points
train_clusters_arr = collections.defaultdict(list) # {cluster: [point indices]}
for point, cluster_num in enumerate(clustered_points):
train_clusters_arr[cluster_num].append(point)
len_train_clusters = {k: len(train_clusters_arr[k]) for k in range(self.number_of_clusters)}
# train_clusters holds the indices in complete_D_train
# for each of the clusters
opt_res = self.train_clusters(cluster_mean_info, cluster_mean_stacked_info, complete_D_train,
empirical_covariances, len_train_clusters, time_series_col_size, pool,
train_clusters_arr)
self.optimize_clusters(computed_covariance, len_train_clusters, log_det_values, opt_res,
train_cluster_inverse)
# update old computed covariance
old_computed_covariance = computed_covariance
print("UPDATED THE OLD COVARIANCE")
self.trained_model = {'cluster_mean_info': cluster_mean_info,
'computed_covariance': computed_covariance,
'cluster_mean_stacked_info': cluster_mean_stacked_info,
'complete_D_train': complete_D_train,
'time_series_col_size': time_series_col_size}
clustered_points = self.predict_clusters()
# recalculate lengths
new_train_clusters = collections.defaultdict(list) # {cluster: [point indices]}
for point, cluster in enumerate(clustered_points):
new_train_clusters[cluster].append(point)
len_new_train_clusters = {k: len(new_train_clusters[k]) for k in range(self.number_of_clusters)}
before_empty_cluster_assign = clustered_points.copy()
if iters != 0:
cluster_norms = [(np.linalg.norm(old_computed_covariance[self.number_of_clusters, i]), i) for i in
range(self.number_of_clusters)]
norms_sorted = sorted(cluster_norms, reverse=True)
# clusters that are not 0 as sorted by norm
valid_clusters = [cp[1] for cp in norms_sorted if len_new_train_clusters[cp[1]] != 0]
# Add a point to the empty clusters
# assuming more non empty clusters than empty ones
counter = 0
for cluster_num in range(self.number_of_clusters):
if len_new_train_clusters[cluster_num] == 0:
cluster_selected = valid_clusters[counter] # a cluster that is not len 0
counter = (counter + 1) % len(valid_clusters)
print("cluster that is zero is:", cluster_num, "selected cluster instead is:", cluster_selected)
start_point = np.random.choice(
new_train_clusters[cluster_selected]) # random point number from that cluster
for i in range(0, self.cluster_reassignment):
# put cluster_reassignment points from point_num in this cluster
point_to_move = start_point + i
if point_to_move >= len(clustered_points):
break
clustered_points[point_to_move] = cluster_num
computed_covariance[self.number_of_clusters, cluster_num] = old_computed_covariance[
self.number_of_clusters, cluster_selected]
cluster_mean_stacked_info[self.number_of_clusters, cluster_num] = complete_D_train[
point_to_move, :]
cluster_mean_info[self.number_of_clusters, cluster_num] \
= complete_D_train[point_to_move, :][
(self.window_size - 1) * time_series_col_size:self.window_size * time_series_col_size]
for cluster_num in range(self.number_of_clusters):
print("length of cluster #", cluster_num, "-------->", sum([x == cluster_num for x in clustered_points]))
self.write_plot(clustered_points, str_NULL, training_indices)
# TEST SETS STUFF
# LLE + swtiching_penalty
# Segment length
# Create the F1 score from the graphs from k-means and GMM
# Get the train and test points
train_confusion_matrix_EM = compute_confusion_matrix(self.number_of_clusters, clustered_points,
training_indices)
train_confusion_matrix_GMM = compute_confusion_matrix(self.number_of_clusters, gmm_clustered_pts,
training_indices)
train_confusion_matrix_kmeans = compute_confusion_matrix(self.number_of_clusters, kmeans_clustered_pts,
training_indices)
###compute the matchings
matching_EM, matching_GMM, matching_Kmeans = self.compute_matches(train_confusion_matrix_EM,
train_confusion_matrix_GMM,
train_confusion_matrix_kmeans)
print("\n\n\n")
if np.array_equal(old_clustered_points, clustered_points):
print("\n\n\n\nCONVERGED!!! BREAKING EARLY!!!")
break
old_clustered_points = before_empty_cluster_assign
# end of training
if pool is not None:
pool.close()
pool.join()
train_confusion_matrix_EM = compute_confusion_matrix(self.number_of_clusters, clustered_points,
training_indices)
train_confusion_matrix_GMM = compute_confusion_matrix(self.number_of_clusters, gmm_clustered_pts,
training_indices)
train_confusion_matrix_kmeans = compute_confusion_matrix(self.number_of_clusters, clustered_points_kmeans,
training_indices)
self.compute_f_score(matching_EM, matching_GMM, matching_Kmeans, train_confusion_matrix_EM,
train_confusion_matrix_GMM, train_confusion_matrix_kmeans)
if self.compute_BIC:
bic = computeBIC(self.number_of_clusters, time_series_rows_size, clustered_points, train_cluster_inverse,
empirical_covariances)
return clustered_points, train_cluster_inverse, bic
return clustered_points, train_cluster_inverse
def compute_f_score(self, matching_EM, matching_GMM, matching_Kmeans, train_confusion_matrix_EM,
train_confusion_matrix_GMM, train_confusion_matrix_kmeans):
f1_EM_tr = -1 # computeF1_macro(train_confusion_matrix_EM,matching_EM,num_clusters)
f1_GMM_tr = -1 # computeF1_macro(train_confusion_matrix_GMM,matching_GMM,num_clusters)
f1_kmeans_tr = -1 # computeF1_macro(train_confusion_matrix_kmeans,matching_Kmeans,num_clusters)
print("\n\n")
print("TRAINING F1 score:", f1_EM_tr, f1_GMM_tr, f1_kmeans_tr)
correct_e_m = 0
correct_g_m_m = 0
correct_k_means = 0
for cluster in range(self.number_of_clusters):
matched_cluster__e_m = matching_EM[cluster]
matched_cluster__g_m_m = matching_GMM[cluster]
matched_cluster__k_means = matching_Kmeans[cluster]
correct_e_m += train_confusion_matrix_EM[cluster, matched_cluster__e_m]
correct_g_m_m += train_confusion_matrix_GMM[cluster, matched_cluster__g_m_m]
correct_k_means += train_confusion_matrix_kmeans[cluster, matched_cluster__k_means]
def compute_matches(self, train_confusion_matrix_EM, train_confusion_matrix_GMM, train_confusion_matrix_kmeans):
matching_Kmeans = find_matching(train_confusion_matrix_kmeans)
matching_GMM = find_matching(train_confusion_matrix_GMM)
matching_EM = find_matching(train_confusion_matrix_EM)
correct_e_m = 0
correct_g_m_m = 0
correct_k_means = 0
for cluster in range(self.number_of_clusters):
matched_cluster_e_m = matching_EM[cluster]
matched_cluster_g_m_m = matching_GMM[cluster]
matched_cluster_k_means = matching_Kmeans[cluster]
correct_e_m += train_confusion_matrix_EM[cluster, matched_cluster_e_m]
correct_g_m_m += train_confusion_matrix_GMM[cluster, matched_cluster_g_m_m]
correct_k_means += train_confusion_matrix_kmeans[cluster, matched_cluster_k_means]
return matching_EM, matching_GMM, matching_Kmeans
def write_plot(self, clustered_points, str_NULL, training_indices):
# Save a figure of segmentation
plt.figure()
plt.plot(training_indices[0:len(clustered_points)], clustered_points, color="r") # ,marker = ".",s =100)
plt.ylim((-0.5, self.number_of_clusters + 0.5))
if self.write_out_file: plt.savefig(
str_NULL + "TRAINING_EM_lam_sparse=" + str(self.lambda_parameter) + "switch_penalty = " + str(
self.switch_penalty) + ".jpg")
plt.close("all")
print("Done writing the figure")
def smoothen_clusters(self, cluster_mean_info, computed_covariance,
cluster_mean_stacked_info, complete_D_train, n):
clustered_points_len = len(complete_D_train)
inv_cov_dict = {} # cluster to inv_cov
log_det_dict = {} # cluster to log_det
for cluster in range(self.number_of_clusters):
cov_matrix = computed_covariance[self.number_of_clusters, cluster][0:(self.num_blocks - 1) * n,
0:(self.num_blocks - 1) * n]
inv_cov_matrix = np.linalg.inv(cov_matrix)
log_det_cov = np.log(np.linalg.det(cov_matrix)) # log(det(sigma2|1))
inv_cov_dict[cluster] = inv_cov_matrix
log_det_dict[cluster] = log_det_cov
# For each point compute the LLE
print("beginning the smoothening ALGORITHM")
LLE_all_points_clusters = np.zeros([clustered_points_len, self.number_of_clusters])
for point in range(clustered_points_len):
if point + self.window_size - 1 < complete_D_train.shape[0]:
for cluster in range(self.number_of_clusters):
cluster_mean = cluster_mean_info[self.number_of_clusters, cluster]
cluster_mean_stacked = cluster_mean_stacked_info[self.number_of_clusters, cluster]
x = complete_D_train[point, :] - cluster_mean_stacked[0:(self.num_blocks - 1) * n]
inv_cov_matrix = inv_cov_dict[cluster]
log_det_cov = log_det_dict[cluster]
lle = np.dot(x.reshape([1, (self.num_blocks - 1) * n]),
np.dot(inv_cov_matrix, x.reshape([n * (self.num_blocks - 1), 1]))) + log_det_cov
LLE_all_points_clusters[point, cluster] = lle
return LLE_all_points_clusters
def optimize_clusters(self, computed_covariance, len_train_clusters, log_det_values, optRes, train_cluster_inverse):
for cluster in range(self.number_of_clusters):
if optRes[cluster] == None:
continue
val = optRes[cluster].get()
print("OPTIMIZATION for Cluster #", cluster, "DONE!!!")
# THIS IS THE SOLUTION
S_est = upperToFull(val, 0)
X2 = S_est
u, _ = np.linalg.eig(S_est)
cov_out = np.linalg.inv(X2)
# Store the log-det, covariance, inverse-covariance, cluster means, stacked means
log_det_values[self.number_of_clusters, cluster] = np.log(np.linalg.det(cov_out))
computed_covariance[self.number_of_clusters, cluster] = cov_out
train_cluster_inverse[cluster] = X2
for cluster in range(self.number_of_clusters):
print("length of the cluster ", cluster, "------>", len_train_clusters[cluster])
def train_clusters(self, cluster_mean_info, cluster_mean_stacked_info, complete_D_train, empirical_covariances,
len_train_clusters, n, pool, train_clusters_arr):
optRes = [None for i in range(self.number_of_clusters)]
for cluster in range(self.number_of_clusters):
cluster_length = len_train_clusters[cluster]
if cluster_length != 0:
size_blocks = n
indices = train_clusters_arr[cluster]
D_train = np.zeros([cluster_length, self.window_size * n])
for i in range(cluster_length):
point = indices[i]
D_train[i, :] = complete_D_train[point, :]
cluster_mean_info[self.number_of_clusters, cluster] = np.mean(D_train, axis=0)[
(
self.window_size - 1) * n:self.window_size * n].reshape(
[1, n])
cluster_mean_stacked_info[self.number_of_clusters, cluster] = np.mean(D_train, axis=0)
##Fit a model - OPTIMIZATION
probSize = self.window_size * size_blocks
lamb = np.zeros((probSize, probSize)) + self.lambda_parameter
S = np.cov(np.transpose(D_train), bias=self.biased)
empirical_covariances[cluster] = S
rho = 1
solver = ADMMSolver(lamb, self.window_size, size_blocks, 1, S)
# apply to process pool
optRes[cluster] = pool.apply_async(solver, (1000, 1e-6, 1e-6, False,))
return optRes
def stack_training_data(self, Data, n, num_train_points, training_indices):
complete_D_train = np.zeros([num_train_points, self.window_size * n])
for i in range(num_train_points):
for k in range(self.window_size):
if i + k < num_train_points:
idx_k = training_indices[i + k]
complete_D_train[i][k * n:(k + 1) * n] = Data[idx_k][0:n]
return complete_D_train
def prepare_out_directory(self):
str_NULL = self.prefix_string + "lam_sparse=" + str(self.lambda_parameter) + "maxClusters=" + str(
self.number_of_clusters + 1) + "/"
if not os.path.exists(os.path.dirname(str_NULL)):
try:
os.makedirs(os.path.dirname(str_NULL))
except OSError as exc: # Guard against race condition of path already existing
if exc.errno != errno.EEXIST:
raise
return str_NULL
def load_data(self, input_file):
Data = np.loadtxt(input_file, delimiter=",")
(m, n) = Data.shape # m: num of observations, n: size of observation vector
print("completed getting the data")
return Data, m, n
def log_parameters(self):
print("lam_sparse", self.lambda_parameter)
print("switch_penalty", self.switch_penalty)
print("num_cluster", self.number_of_clusters)
print("num stacked", self.window_size)
def predict_clusters(self, test_data = None):
'''
Given the current trained model, predict clusters. If the cluster segmentation has not been optimized yet,
than this will be part of the interative process.
Args:
numpy array of data for which to predict clusters. Columns are dimensions of the data, each row is
a different timestamp
Returns:
vector of predicted cluster for the points
'''
if test_data is not None:
if not isinstance(test_data, np.ndarray):
raise TypeError("input must be a numpy array!")
else:
test_data = self.trained_model['complete_D_train']
# SMOOTHENING
lle_all_points_clusters = self.smoothen_clusters(self.trained_model['cluster_mean_info'],
self.trained_model['computed_covariance'],
self.trained_model['cluster_mean_stacked_info'],
test_data,
self.trained_model['time_series_col_size'])
# Update cluster points - using NEW smoothening
clustered_points = updateClusters(lle_all_points_clusters, switch_penalty=self.switch_penalty)
return(clustered_points)
| 52.200495 | 130 | 0.611978 |
1faf2a8187170ea8e67cbb26c38caaaf98862a06 | 5,076 | py | Python | profiles_api/views.py | ConnorDBurge/profiles-rest-api | cea2adcbad7a8089af2489d3e062650623b15171 | [
"MIT"
] | null | null | null | profiles_api/views.py | ConnorDBurge/profiles-rest-api | cea2adcbad7a8089af2489d3e062650623b15171 | [
"MIT"
] | null | null | null | profiles_api/views.py | ConnorDBurge/profiles-rest-api | cea2adcbad7a8089af2489d3e062650623b15171 | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework import filters
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from .serializers import HelloSerializer
from .serializers import UserProfileSerializer
from .serializers import ProfileFeedItemSerializer
from .permissions import UpdateOwnProfile, UpdateOwnStatus
from .models import UserProfile, ProfileFeedItem
class HelloApiView(APIView):
"""Test API View"""
"""For an APIView, you add methods that match the type of HTTP request"""
serializer_class = HelloSerializer
# api/hello-view/
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
'Uses HTTP methods as function (get, post, path, put, delete)',
'Is similar to traditional Django view',
'Gives you the most control over your application logic',
'is mapped manually to URLs'
]
return Response({'message': 'Hello', 'an_apiview': an_apiview})
# api/hello-view/
def post(self, request):
"""Create a hellow message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# api/hello-view/
def put(self, request, pk=None):
"""Handle complete updating an object"""
return Response({'method': 'PUT'})
# api/hello-view/
def patch(self, request, pk=None):
"""Handle updating an object"""
return Response({'method': 'PATCH'})
# api/hello-view/
def delete(self, request, pk=None):
"""Handle deleting an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API View Set"""
"""For an API View Set, you add methods that are actions"""
serializer_class = HelloSerializer
# api/hello-viewset/
def list(self, request):
"""Return a hello message"""
a_viewset = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code'
]
return Response({'message': 'Hello', 'a_viewset': a_viewset})
# api/hello-viewset/
def create(self, request):
"""Create a new hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# api/hello-viewset/:pk
def retrieve(self, request, pk=None):
"""Handle getting an object by its id"""
return Response({'method': 'GET'})
# api/hello-viewset/:pk
def update(self, request, pk=None):
"""Handle updating an object by its id"""
return Response({'method': 'PUT'})
# api/hello-viewset/:pk
def partial_update(self, request, pk=None):
"""Handle updating part of an object by its id"""
return Response({'method': 'PATCH'})
# api/hello-viewset/:pk
def destroy(self, request, pk=None):
"""Handle deleting an object by its id"""
return Response({'method': 'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
# api/profile/ (GET, POST)
# api/profile/:id (GET, POST, PUT, PATCH, DELETE)
# api/profile/?search=<TERM>
"""Handle creating and updating profiles"""
serializer_class = UserProfileSerializer
queryset = UserProfile.objects.all()
authentication_classes = (TokenAuthentication,) # tuple
permission_classes = (UpdateOwnProfile,) # tuple
filter_backends = (filters.SearchFilter,) # tuple
search_fields = ('name', 'email',) # tuple
class UserLoginApiView(ObtainAuthToken):
"""Handle creating user authentication tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handle creating and updating user profile feed items"""
serializer_class = ProfileFeedItemSerializer
authentication_classes = (TokenAuthentication,) # tuple
queryset = ProfileFeedItem.objects.all()
permission_classes = (UpdateOwnStatus, IsAuthenticated,) # tuple
def perform_create(self, serializer):
"""Sets the user profile to the logged in user"""
serializer.save(user_profile=self.request.user) | 38.454545 | 82 | 0.666864 |
49c3c55b9bd427040a9e09e289661866fbbb79ce | 2,472 | py | Python | src/pytest_zebrunner/selenium_integration.py | aliscovsky/python-agent-pytest | d1bd4017fc5355c18da1da92272b689386d9d790 | [
"Apache-2.0"
] | null | null | null | src/pytest_zebrunner/selenium_integration.py | aliscovsky/python-agent-pytest | d1bd4017fc5355c18da1da92272b689386d9d790 | [
"Apache-2.0"
] | null | null | null | src/pytest_zebrunner/selenium_integration.py | aliscovsky/python-agent-pytest | d1bd4017fc5355c18da1da92272b689386d9d790 | [
"Apache-2.0"
] | null | null | null | import logging
from typing import Any, Dict
from pytest_zebrunner.context import zebrunner_context
logger = logging.getLogger(__name__)
class SeleniumSession:
def __init__(self, reporting_service) -> None: # type: ignore
self._active_sessions: Dict[str, Any] = {}
self.reporting_service = reporting_service
def start_session(self, session_id: str, capabilities: dict, desired_capabilities: dict) -> None:
self._active_sessions[session_id] = {"related_tests": []}
zebrunner_session_id = self.reporting_service.start_test_session(
session_id, capabilities, desired_capabilities
)
if zebrunner_session_id:
self._active_sessions[session_id]["zebrunner_session_id"] = zebrunner_session_id
def finish_session(self, session_id: str) -> None:
self.reporting_service.finish_test_session(
self._active_sessions[session_id]["zebrunner_session_id"],
self._active_sessions[session_id]["related_tests"],
)
del self._active_sessions[session_id]
def finish_all_sessions(self) -> None:
for session_id in list(self._active_sessions):
self.finish_session(session_id)
def add_test(self, test_id: int) -> None:
for session_id in self._active_sessions:
if self._active_sessions[session_id].get("related_tests") is not None:
self._active_sessions[session_id]["related_tests"].append(test_id)
else:
self._active_sessions[session_id]["related_tests"] = [test_id]
def inject_driver(session_manager: SeleniumSession) -> None:
try:
from selenium.webdriver.remote.webdriver import WebDriver
base_init = WebDriver.__init__
base_close = WebDriver.close
def init(session, *args, **kwargs) -> None: # type: ignore
base_init(session, *args, **kwargs)
session_manager.start_session(
session.session_id, session.capabilities, kwargs.get("desired_capabilities", {})
)
if zebrunner_context.test_is_active:
session_manager.add_test(zebrunner_context.test_id)
def quit(session) -> None: # type: ignore
session_manager.finish_session(session.session_id)
base_close(session)
WebDriver.__init__ = init
WebDriver.quit = quit
except ImportError:
logger.warning("Selenium library is not installed.")
| 38.030769 | 101 | 0.677184 |
c64a67bf463c782bb227f972e74d0a2f39b8aa1e | 1,641 | py | Python | examples/ad_manager/v202011/label_service/create_labels.py | bx2/googleads-python-lib | 72481b1dd05266a760034ef853596e014cc48805 | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v202011/label_service/create_labels.py | bx2/googleads-python-lib | 72481b1dd05266a760034ef853596e014cc48805 | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v202011/label_service/create_labels.py | bx2/googleads-python-lib | 72481b1dd05266a760034ef853596e014cc48805 | [
"Apache-2.0"
] | 1 | 2021-06-23T09:15:34.000Z | 2021-06-23T09:15:34.000Z | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new labels.
To determine which labels exist, run get_all_labels.py. This feature is only
available to Ad Manager 360 solution networks.
"""
import uuid
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
label_service = client.GetService('LabelService', version='v202011')
# Create label objects.
labels = []
for _ in xrange(5):
label = {
'name': 'Label #%s' % uuid.uuid4(),
'isActive': 'true',
'types': ['COMPETITIVE_EXCLUSION']
}
labels.append(label)
# Add Labels.
labels = label_service.createLabels(labels)
# Display results.
for label in labels:
print('Label with id "%s", name "%s", and types {%s} was found.'
% (label['id'], label['name'], ','.join(label['types'])))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| 29.303571 | 77 | 0.707495 |
4f90b9bc9da8c49b3e133dd01375bd7cb77e832a | 2,912 | py | Python | pkgs/burp-ui-monitor/burpui_monitor-decoy/security.py | PaliPalo/burp-ui | affbed705f5b35a630ca1a96c01e6dea1bfbeddb | [
"BSD-3-Clause"
] | 93 | 2015-02-10T16:01:46.000Z | 2021-12-02T21:21:42.000Z | pkgs/burp-ui-monitor/burpui_monitor-decoy/security.py | PaliPalo/burp-ui | affbed705f5b35a630ca1a96c01e6dea1bfbeddb | [
"BSD-3-Clause"
] | 5 | 2015-12-18T19:34:46.000Z | 2021-09-17T14:18:10.000Z | pkgs/burp-ui-monitor/burpui_monitor-decoy/security.py | PaliPalo/burp-ui | affbed705f5b35a630ca1a96c01e6dea1bfbeddb | [
"BSD-3-Clause"
] | 17 | 2015-09-21T22:24:05.000Z | 2021-10-01T14:28:47.000Z | # -*- coding: utf8 -*-
"""
.. module:: burpui.security
:platform: Unix
:synopsis: Burp-UI security module.
.. moduleauthor:: Ziirish <hi+burpui@ziirish.me>
"""
from ._compat import to_unicode, urlparse, urljoin
def sanitize_string(string, strict=True, paranoid=False):
"""Return a 'safe' version of the string (ie. remove malicious chars like
'\n')
:param string: String to escape
:type string: str
"""
if not string:
return ""
if paranoid:
return to_unicode(string.encode("unicode_escape"))
elif strict:
return to_unicode(string).split("\n")[0]
else:
import re
ret = repr(string).replace("\\\\", "\\")
ret = re.sub(r"^u?(?P<quote>['\"])(.*)(?P=quote)$", r"\2", ret)
return to_unicode(ret)
def basic_login_from_request(request, app):
"""Check 'Authorization' headers and log the user in if possible.
:param request: The input request
:type request: :class:`flask.Request`
:param app: The application context
:type app: :class:`burpui.engines.server.BUIServer`
"""
if app.auth != "none":
if request.headers.get("X-From-UI", False):
return None
auth = request.authorization
if auth:
from flask import session, g
app.logger.debug("Found Basic user: {}".format(auth.username))
refresh = True
if "login" in session and session["login"] == auth.username:
refresh = False
session["language"] = request.headers.get("X-Language", "en")
user = app.uhandler.user(auth.username, refresh)
if user and user.active and user.login(auth.password):
from flask_login import login_user
from .sessions import session_manager
if "login" in session and session["login"] != auth.username:
session.clear()
session["login"] = auth.username
session["language"] = request.headers.get("X-Language", "en")
login_user(user)
if request.headers.get("X-Reuse-Session", False):
session_manager.store_session(
auth.username,
request.remote_addr,
request.headers.get("User-Agent"),
remember=False,
api=True,
)
else:
g.basic_session = True
app.logger.debug("Successfully logged in")
return user
app.logger.warning("Failed to log-in")
return None
def is_safe_url(target):
from flask import request
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ("http", "https") and ref_url.netloc == test_url.netloc
| 33.471264 | 85 | 0.567308 |
5ec56f32c96b212941cfaee86e77c94b1e1bd129 | 2,512 | py | Python | metrics/accf1.py | ColinWine/Multi-modal-Multi-label-Facial-Action-Unit-Detection-with-Transformer | 93871bed9078d5bf6b4bb37407c9dce87c569b55 | [
"MIT"
] | null | null | null | metrics/accf1.py | ColinWine/Multi-modal-Multi-label-Facial-Action-Unit-Detection-with-Transformer | 93871bed9078d5bf6b4bb37407c9dce87c569b55 | [
"MIT"
] | null | null | null | metrics/accf1.py | ColinWine/Multi-modal-Multi-label-Facial-Action-Unit-Detection-with-Transformer | 93871bed9078d5bf6b4bb37407c9dce87c569b55 | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.metrics import f1_score, accuracy_score
def acc_f1_score(y_true, y_pred, ignore_index=None, normalize=False, average='macro', **kwargs):
"""Multi-class f1 score and accuracy"""
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
if ignore_index is not None:
leave = y_true != ignore_index
else:
leave = np.ones_like(y_true)
y_true = y_true[leave]
y_pred = y_pred[leave]
f1 = f1_score(y_true=y_true, y_pred=y_pred, average=average, **kwargs)
acc = accuracy_score(y_true=y_true, y_pred=y_pred, normalize=normalize)
return acc, f1
class AccF1Metric(object):
def __init__(self, ignore_index, average='macro'):
self.ignore_index = ignore_index
self.average = average
self.y_pred = []
self.y_true = []
def update(self, y_pred, y_true):
self.y_pred.append(y_pred)
self.y_true.append(y_true)
def clear(self):
self.y_true = []
self.y_pred = []
def get(self):
y_true = np.stack(self.y_true, axis=0).reshape(-1)
y_pred = np.stack(self.y_pred, axis=0).reshape(-1)
acc, f1 = acc_f1_score(y_true=y_true, y_pred=y_pred,
average=self.average,
normalize=True,
ignore_index=self.ignore_index)
return acc, f1
class MultiLabelAccF1(object):
def __init__(self, ignore_index=None, average='binary'):
self.ignore_index = ignore_index
self.average = average
self.y_pred = []
self.y_true = []
def update(self, y_pred, y_true):
self.y_pred.append(y_pred)
self.y_true.append(y_true)
def clear(self):
self.y_true = []
self.y_pred = []
def get(self):
y_true = np.vstack(self.y_true)
y_pred = np.vstack(self.y_pred)
total_num = y_pred.shape[0] * y_pred.shape[1]
labeled_idx = y_true != self.ignore_index
labeled_num = np.sum(labeled_idx)
acc = 0
f1 = []
for i in range(y_pred.shape[1]):
acc_i, f1_i = acc_f1_score(y_true=y_true[:, i], y_pred=y_pred[:, i],
average=self.average,
normalize=False,
ignore_index=self.ignore_index)
acc += acc_i
f1.append(f1_i)
acc = acc / labeled_num
f1 = np.mean(f1)
return acc, f1
| 32.205128 | 96 | 0.57285 |
77e13aec01637c051d31ff315fefbf23b734a76d | 4,025 | py | Python | flickr-data-tool.py | lucasrangit/flickr-data-tool | 1df74c7910fc02b29fadf835cb623f1699638d0f | [
"Apache-2.0"
] | 1 | 2019-01-04T00:22:53.000Z | 2019-01-04T00:22:53.000Z | flickr-data-tool.py | lucasrangit/flickr-data-tool | 1df74c7910fc02b29fadf835cb623f1699638d0f | [
"Apache-2.0"
] | null | null | null | flickr-data-tool.py | lucasrangit/flickr-data-tool | 1df74c7910fc02b29fadf835cb623f1699638d0f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
Flickr Data Tool
Requires Flickr data in two directories: metadata (json) and data (photo/video).
Features:
* Recreate albums as "Title - Description" and hardlink photos/videos.
"""
import argparse
from glob import glob
import json
import os
from pprint import pprint
import shutil
import sys
photos_processed = list()
def photo_get_path(args, photo_json):
# TOOD Flickr file naming scheme: lower case, remove "."
photo_path = photo_json["name"].lower().replace(".", "") + "_" + photo_json["id"] + "*"
matches = glob(os.path.join(args.src, photo_path))
# skip not found (allows restart if interrupted)
if len(matches) == 0:
return # skip and don't assume processed
if len(matches) > 1:
print(matches)
raise Exception("FIXME multiple file match found")
return matches[0]
def photo_handler(args, photo, album_path):
"""
Can also be a movie file.
"""
print("Photo: %s (id %s albums %d)" % (photo["name"], photo["id"], len(photo["albums"])))
# print(photo)
# validate
# TODO photo["albums"] is not reliable and may not match the data in albums.json
# get path
photo_path = photo_get_path(args, photo)
if not photo_path:
return
# track
if photo["id"] in photos_processed:
print(photo)
raise Exception("FIXME multiple destination albums")
photos_processed.append(photo["id"])
# organize
photo_dest_path = os.path.join(album_path, os.path.basename(photo_path))
shutil.move(photo_path, photo_dest_path)
def album_handler(args, album):
"""
"""
print("Album: %s (id %s %d photos)" % (album["title"], album["id"], int(album["photo_count"])))
# print(album)
# create album directories from titles
album_path = os.path.join(args.dst, album["title"])
if not os.path.exists(album_path):
try:
os.makedirs(album_path)
except:
raise Exception("FIXME failed to create directory")
for photo_id in album["photos"]:
photo_json_path = os.path.join(args.metadata, "photo_" + photo_id + ".json")
if not os.path.exists(photo_json_path):
# print("%s not found" % (photo_json_path))
continue
with open(photo_json_path) as read_file:
data = json.load(read_file)
photo_handler(args, data, album_path)
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--metadata', help="metadata path")
parser.add_argument('--src', help="path to source photo and video files")
parser.add_argument('--dst', help="path to source files organized in albums")
args = parser.parse_args(arguments)
if os.path.realpath(args.src) == os.path.realpath(args.dst):
print("Source and Destination must be different")
return 1
if not os.path.exists(os.path.join(args.metadata, "albums.json")):
raise Exception("metadata/albums.json not found")
with open(os.path.join(args.metadata, "albums.json")) as read_file:
data = json.load(read_file)
if not os.path.exists(args.dst):
os.makedirs(args.dst)
print("Albums:", len(data["albums"]))
# all albums must have titles
for album in data["albums"]:
if not album["title"]:
print(album)
raise Exception("FIXME missing album title")
# all albums titles must be unique
titles = []
titles_dup = []
for album in data["albums"]:
if album["title"] not in titles:
titles.append(album["title"])
else:
titles_dup.append(album["title"])
if len(titles_dup) > 0:
print(titles_dup)
raise Exception("FIXME duplicate album title")
for album in data["albums"]:
album_handler(args, album)
# pprint(data)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 28.75 | 99 | 0.637764 |
aab3fafdeb8199a03db2a05c91b3a195bd861531 | 74,997 | py | Python | API_Automation/Shared/resources/shared_nbs_nasa/parametros_nbs_nasa.py | yjoaoMarco/automacao-site-vw-epeca | 7c3ab025b280b0af5e8bae9c3060e8335e978e78 | [
"MIT"
] | null | null | null | API_Automation/Shared/resources/shared_nbs_nasa/parametros_nbs_nasa.py | yjoaoMarco/automacao-site-vw-epeca | 7c3ab025b280b0af5e8bae9c3060e8335e978e78 | [
"MIT"
] | null | null | null | API_Automation/Shared/resources/shared_nbs_nasa/parametros_nbs_nasa.py | yjoaoMarco/automacao-site-vw-epeca | 7c3ab025b280b0af5e8bae9c3060e8335e978e78 | [
"MIT"
] | null | null | null | def nbs_nasa_token():
return{
"method": "POST",
"endpoint": "http://201.47.184.196:8080/nbs-infra/security/token?usuario=EPECASVW&senha=nbs&idioma=PT&pacote=ASSOBRAV",
"body": "",
"headers": ""
}
def nbs_nasa_conexao():
return{
"method": "GET",
"endpoint": "http://201.47.184.196:8080/assobrav/conexao/status",
"body": "",
"headers": {
"Authorization": "bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImp3ayI6eyJrdHkiOiJSU0EiLCJlIjoiQVFBQiIsImtpZCI6IjdjNzRlODU4LTQ1NTItNGMzYS1iZjVkLTEyNzAyOTk2NGExOSIsIm4iOiJxUnB2VVhSWHAtUTVIYkQ2QVFnSkFYVkRXU3l0cWItVnNJam43LU5oYXd2dl9wa3d6QnhrR2ZpZDFYekdLdTgzNjZFdXJhazZjZzRpcGdSUTVhYnM2STNHdzFkTzFVT0lMY0hBdnJHV0NEVVZseU9hX2hMaEwySUJOOFlpQ052YkdnUDZhUkxRVHljY1BBMFpfZUNKbm1maDM0X1QtQkFSRjM0eUl2RkhJakt6MjgzR1VHc1phMEh6VEZjUG5VQW5GN1ExVElTYzBzRjVFWUpKUUhIN1N1ZVJxZF8wbjlkNWR2ZVhsekJCNy1OWTVIajhwUUdOQlY3bHhRMlJfY0dSenBUaW9VZk9nak1lOTQwbDdMeWJUb3FEcWxZdFB4ZWI4Q19EMUNPVEFpcHdEMmh2TVhfa1BXcHFTR1FvNW80YU5CVkdiNE5MNVpZMXhlc19wQnNMZVEifX0.eyJpc3MiOiJodHRwOlwvXC9sb2NhbGhvc3Q6ODA4MCIsInN1YiI6IkVQRUNBU1ZXIiwiZXhwIjoxNjQxMzE4ODg2LCJ1c2VySWQiOjIsImlhdCI6MTY0MTIzMjQ4Nn0.fW7J_BAN_eF3M6eaxTim-uoSKbM_wjxJxvhBbOASrMJ3W0KqppVRLzN61WbRXAzFlrV76-dn4uODoH8Fgx0R8vQezd54lXRYUWGaBrVlWA8oVtpIthMX7NwKvhgAKAOEwnqtthXww7msuOY3DEzdNky5bQpdFDR7Ros3DhyG0zQgWewvkqIs94GDQ8mV7fbiIczQVL5bSY8BgT4D73RvrLI66qYyvQ2LEL_w5PVdLkpSIjcFbW6KP89WxOXXpI0r2hABRkQ4JoqvjCZB02a6RX68qAfxEXxasFr2S_wA69VhAicAgWYWv58Xkwmq2BSQfwZoLkHxFHcL5omE0wOJ9w",
"Content-Type": "application/json"
}
}
def nbs_nasa_ativar_10_produtos():
return{
"method": "POST",
"endpoint": "http://201.47.184.196:8080/assobrav/api/produtos",
"body": [
{
"produtoId": "JZZ915105",
"erpEmpresaId": "2",
"codigoOriginal": "JZZ915105",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZZ698302B",
"erpEmpresaId": "2",
"codigoOriginal": "JZZ698302B",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZZ698151AB",
"erpEmpresaId": "2",
"codigoOriginal": "JZZ698151AB",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZZ998051",
"erpEmpresaId": "2",
"codigoOriginal": "JZZ998051",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZZ698151AH",
"erpEmpresaId": "2",
"codigoOriginal": "JZZ698151AH",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZW698451A",
"erpEmpresaId": "2",
"codigoOriginal": "JZW698451A",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZZ129620B",
"erpEmpresaId": "2",
"codigoOriginal": "JZZ129620B",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZW615301H",
"erpEmpresaId": "2",
"codigoOriginal": "JZW615301H",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZW698151AC",
"erpEmpresaId": "2",
"codigoOriginal": "JZW698151AC",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZW998002",
"erpEmpresaId": "2",
"codigoOriginal": "JZW998002",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
}
],
"headers": {
"Authorization": "bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImp3ayI6eyJrdHkiOiJSU0EiLCJlIjoiQVFBQiIsImtpZCI6IjdjNzRlODU4LTQ1NTItNGMzYS1iZjVkLTEyNzAyOTk2NGExOSIsIm4iOiJxUnB2VVhSWHAtUTVIYkQ2QVFnSkFYVkRXU3l0cWItVnNJam43LU5oYXd2dl9wa3d6QnhrR2ZpZDFYekdLdTgzNjZFdXJhazZjZzRpcGdSUTVhYnM2STNHdzFkTzFVT0lMY0hBdnJHV0NEVVZseU9hX2hMaEwySUJOOFlpQ052YkdnUDZhUkxRVHljY1BBMFpfZUNKbm1maDM0X1QtQkFSRjM0eUl2RkhJakt6MjgzR1VHc1phMEh6VEZjUG5VQW5GN1ExVElTYzBzRjVFWUpKUUhIN1N1ZVJxZF8wbjlkNWR2ZVhsekJCNy1OWTVIajhwUUdOQlY3bHhRMlJfY0dSenBUaW9VZk9nak1lOTQwbDdMeWJUb3FEcWxZdFB4ZWI4Q19EMUNPVEFpcHdEMmh2TVhfa1BXcHFTR1FvNW80YU5CVkdiNE5MNVpZMXhlc19wQnNMZVEifX0.eyJpc3MiOiJodHRwOlwvXC9sb2NhbGhvc3Q6ODA4MCIsInN1YiI6IkVQRUNBU1ZXIiwiZXhwIjoxNjQxMzE4ODg2LCJ1c2VySWQiOjIsImlhdCI6MTY0MTIzMjQ4Nn0.fW7J_BAN_eF3M6eaxTim-uoSKbM_wjxJxvhBbOASrMJ3W0KqppVRLzN61WbRXAzFlrV76-dn4uODoH8Fgx0R8vQezd54lXRYUWGaBrVlWA8oVtpIthMX7NwKvhgAKAOEwnqtthXww7msuOY3DEzdNky5bQpdFDR7Ros3DhyG0zQgWewvkqIs94GDQ8mV7fbiIczQVL5bSY8BgT4D73RvrLI66qYyvQ2LEL_w5PVdLkpSIjcFbW6KP89WxOXXpI0r2hABRkQ4JoqvjCZB02a6RX68qAfxEXxasFr2S_wA69VhAicAgWYWv58Xkwmq2BSQfwZoLkHxFHcL5omE0wOJ9w",
"Content-Type": "application/json"
}
}
def nbs_nasa_ativar_2_produtos():
return{
"method": "POST",
"endpoint": "http://201.47.184.196:8080/assobrav/api/produtos",
"body": [
{
"produtoId": "JZZ915105",
"erpEmpresaId": "2",
"codigoOriginal": "JZZ915105",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZZ698302B",
"erpEmpresaId": "2",
"codigoOriginal": "JZZ698302B",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
}
],
"headers": {
"Authorization": "bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImp3ayI6eyJrdHkiOiJSU0EiLCJlIjoiQVFBQiIsImtpZCI6IjdjNzRlODU4LTQ1NTItNGMzYS1iZjVkLTEyNzAyOTk2NGExOSIsIm4iOiJxUnB2VVhSWHAtUTVIYkQ2QVFnSkFYVkRXU3l0cWItVnNJam43LU5oYXd2dl9wa3d6QnhrR2ZpZDFYekdLdTgzNjZFdXJhazZjZzRpcGdSUTVhYnM2STNHdzFkTzFVT0lMY0hBdnJHV0NEVVZseU9hX2hMaEwySUJOOFlpQ052YkdnUDZhUkxRVHljY1BBMFpfZUNKbm1maDM0X1QtQkFSRjM0eUl2RkhJakt6MjgzR1VHc1phMEh6VEZjUG5VQW5GN1ExVElTYzBzRjVFWUpKUUhIN1N1ZVJxZF8wbjlkNWR2ZVhsekJCNy1OWTVIajhwUUdOQlY3bHhRMlJfY0dSenBUaW9VZk9nak1lOTQwbDdMeWJUb3FEcWxZdFB4ZWI4Q19EMUNPVEFpcHdEMmh2TVhfa1BXcHFTR1FvNW80YU5CVkdiNE5MNVpZMXhlc19wQnNMZVEifX0.eyJpc3MiOiJodHRwOlwvXC9sb2NhbGhvc3Q6ODA4MCIsInN1YiI6IkVQRUNBU1ZXIiwiZXhwIjoxNjQxMzE4ODg2LCJ1c2VySWQiOjIsImlhdCI6MTY0MTIzMjQ4Nn0.fW7J_BAN_eF3M6eaxTim-uoSKbM_wjxJxvhBbOASrMJ3W0KqppVRLzN61WbRXAzFlrV76-dn4uODoH8Fgx0R8vQezd54lXRYUWGaBrVlWA8oVtpIthMX7NwKvhgAKAOEwnqtthXww7msuOY3DEzdNky5bQpdFDR7Ros3DhyG0zQgWewvkqIs94GDQ8mV7fbiIczQVL5bSY8BgT4D73RvrLI66qYyvQ2LEL_w5PVdLkpSIjcFbW6KP89WxOXXpI0r2hABRkQ4JoqvjCZB02a6RX68qAfxEXxasFr2S_wA69VhAicAgWYWv58Xkwmq2BSQfwZoLkHxFHcL5omE0wOJ9w",
"Content-Type": "application/json"
}
}
def nbs_nasa_retirar_1_produto_da_fila():
return{
"method": "PUT",
"endpoint": "http://201.47.184.196:8080/assobrav/api/produtos/fila",
"body": [
{
"produtoId": "JZZ915105",
"erpEmpresaId": 2
},
],
"headers": {
"Authorization": "bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImp3ayI6eyJrdHkiOiJSU0EiLCJlIjoiQVFBQiIsImtpZCI6IjdjNzRlODU4LTQ1NTItNGMzYS1iZjVkLTEyNzAyOTk2NGExOSIsIm4iOiJxUnB2VVhSWHAtUTVIYkQ2QVFnSkFYVkRXU3l0cWItVnNJam43LU5oYXd2dl9wa3d6QnhrR2ZpZDFYekdLdTgzNjZFdXJhazZjZzRpcGdSUTVhYnM2STNHdzFkTzFVT0lMY0hBdnJHV0NEVVZseU9hX2hMaEwySUJOOFlpQ052YkdnUDZhUkxRVHljY1BBMFpfZUNKbm1maDM0X1QtQkFSRjM0eUl2RkhJakt6MjgzR1VHc1phMEh6VEZjUG5VQW5GN1ExVElTYzBzRjVFWUpKUUhIN1N1ZVJxZF8wbjlkNWR2ZVhsekJCNy1OWTVIajhwUUdOQlY3bHhRMlJfY0dSenBUaW9VZk9nak1lOTQwbDdMeWJUb3FEcWxZdFB4ZWI4Q19EMUNPVEFpcHdEMmh2TVhfa1BXcHFTR1FvNW80YU5CVkdiNE5MNVpZMXhlc19wQnNMZVEifX0.eyJpc3MiOiJodHRwOlwvXC9sb2NhbGhvc3Q6ODA4MCIsInN1YiI6IkVQRUNBU1ZXIiwiZXhwIjoxNjQxMzE4ODg2LCJ1c2VySWQiOjIsImlhdCI6MTY0MTIzMjQ4Nn0.fW7J_BAN_eF3M6eaxTim-uoSKbM_wjxJxvhBbOASrMJ3W0KqppVRLzN61WbRXAzFlrV76-dn4uODoH8Fgx0R8vQezd54lXRYUWGaBrVlWA8oVtpIthMX7NwKvhgAKAOEwnqtthXww7msuOY3DEzdNky5bQpdFDR7Ros3DhyG0zQgWewvkqIs94GDQ8mV7fbiIczQVL5bSY8BgT4D73RvrLI66qYyvQ2LEL_w5PVdLkpSIjcFbW6KP89WxOXXpI0r2hABRkQ4JoqvjCZB02a6RX68qAfxEXxasFr2S_wA69VhAicAgWYWv58Xkwmq2BSQfwZoLkHxFHcL5omE0wOJ9w",
"Content-Type": "application/json"
}
}
def nbs_nasa_ativar_5_produtos():
return{
"method": "POST",
"endpoint": "http://201.47.184.196:8080/assobrav/api/produtos",
"body": [
{
"produtoId": "JZZ915105",
"erpEmpresaId": "2",
"codigoOriginal": "JZZ915105",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZZ698302B",
"erpEmpresaId": "2",
"codigoOriginal": "JZZ698302B",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZZ698151AB",
"erpEmpresaId": "2",
"codigoOriginal": "JZZ698151AB",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZZ998051",
"erpEmpresaId": "2",
"codigoOriginal": "JZZ998051",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
},
{
"produtoId": "JZZ698151AH",
"erpEmpresaId": "2",
"codigoOriginal": "JZZ698151AH",
"status": "ATIVAR",
"dtAtualizacao": "2021-12-01T20:18:11.994Z"
}
],
"headers": {
"Authorization": "bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImp3ayI6eyJrdHkiOiJSU0EiLCJlIjoiQVFBQiIsImtpZCI6IjdjNzRlODU4LTQ1NTItNGMzYS1iZjVkLTEyNzAyOTk2NGExOSIsIm4iOiJxUnB2VVhSWHAtUTVIYkQ2QVFnSkFYVkRXU3l0cWItVnNJam43LU5oYXd2dl9wa3d6QnhrR2ZpZDFYekdLdTgzNjZFdXJhazZjZzRpcGdSUTVhYnM2STNHdzFkTzFVT0lMY0hBdnJHV0NEVVZseU9hX2hMaEwySUJOOFlpQ052YkdnUDZhUkxRVHljY1BBMFpfZUNKbm1maDM0X1QtQkFSRjM0eUl2RkhJakt6MjgzR1VHc1phMEh6VEZjUG5VQW5GN1ExVElTYzBzRjVFWUpKUUhIN1N1ZVJxZF8wbjlkNWR2ZVhsekJCNy1OWTVIajhwUUdOQlY3bHhRMlJfY0dSenBUaW9VZk9nak1lOTQwbDdMeWJUb3FEcWxZdFB4ZWI4Q19EMUNPVEFpcHdEMmh2TVhfa1BXcHFTR1FvNW80YU5CVkdiNE5MNVpZMXhlc19wQnNMZVEifX0.eyJpc3MiOiJodHRwOlwvXC9sb2NhbGhvc3Q6ODA4MCIsInN1YiI6IkVQRUNBU1ZXIiwiZXhwIjoxNjQxMzE4ODg2LCJ1c2VySWQiOjIsImlhdCI6MTY0MTIzMjQ4Nn0.fW7J_BAN_eF3M6eaxTim-uoSKbM_wjxJxvhBbOASrMJ3W0KqppVRLzN61WbRXAzFlrV76-dn4uODoH8Fgx0R8vQezd54lXRYUWGaBrVlWA8oVtpIthMX7NwKvhgAKAOEwnqtthXww7msuOY3DEzdNky5bQpdFDR7Ros3DhyG0zQgWewvkqIs94GDQ8mV7fbiIczQVL5bSY8BgT4D73RvrLI66qYyvQ2LEL_w5PVdLkpSIjcFbW6KP89WxOXXpI0r2hABRkQ4JoqvjCZB02a6RX68qAfxEXxasFr2S_wA69VhAicAgWYWv58Xkwmq2BSQfwZoLkHxFHcL5omE0wOJ9w",
"Content-Type": "application/json"
}
}
def nbs_nasa_buscar_produtos_fila():
return{
"method": "GET",
"endpoint": "http://201.47.184.196:8080/assobrav/api/produtos/fila",
"body": "",
"headers": {
"Authorization": "bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImp3ayI6eyJrdHkiOiJSU0EiLCJlIjoiQVFBQiIsImtpZCI6IjdjNzRlODU4LTQ1NTItNGMzYS1iZjVkLTEyNzAyOTk2NGExOSIsIm4iOiJxUnB2VVhSWHAtUTVIYkQ2QVFnSkFYVkRXU3l0cWItVnNJam43LU5oYXd2dl9wa3d6QnhrR2ZpZDFYekdLdTgzNjZFdXJhazZjZzRpcGdSUTVhYnM2STNHdzFkTzFVT0lMY0hBdnJHV0NEVVZseU9hX2hMaEwySUJOOFlpQ052YkdnUDZhUkxRVHljY1BBMFpfZUNKbm1maDM0X1QtQkFSRjM0eUl2RkhJakt6MjgzR1VHc1phMEh6VEZjUG5VQW5GN1ExVElTYzBzRjVFWUpKUUhIN1N1ZVJxZF8wbjlkNWR2ZVhsekJCNy1OWTVIajhwUUdOQlY3bHhRMlJfY0dSenBUaW9VZk9nak1lOTQwbDdMeWJUb3FEcWxZdFB4ZWI4Q19EMUNPVEFpcHdEMmh2TVhfa1BXcHFTR1FvNW80YU5CVkdiNE5MNVpZMXhlc19wQnNMZVEifX0.eyJpc3MiOiJodHRwOlwvXC9sb2NhbGhvc3Q6ODA4MCIsInN1YiI6IkVQRUNBU1ZXIiwiZXhwIjoxNjQxMzE4ODg2LCJ1c2VySWQiOjIsImlhdCI6MTY0MTIzMjQ4Nn0.fW7J_BAN_eF3M6eaxTim-uoSKbM_wjxJxvhBbOASrMJ3W0KqppVRLzN61WbRXAzFlrV76-dn4uODoH8Fgx0R8vQezd54lXRYUWGaBrVlWA8oVtpIthMX7NwKvhgAKAOEwnqtthXww7msuOY3DEzdNky5bQpdFDR7Ros3DhyG0zQgWewvkqIs94GDQ8mV7fbiIczQVL5bSY8BgT4D73RvrLI66qYyvQ2LEL_w5PVdLkpSIjcFbW6KP89WxOXXpI0r2hABRkQ4JoqvjCZB02a6RX68qAfxEXxasFr2S_wA69VhAicAgWYWv58Xkwmq2BSQfwZoLkHxFHcL5omE0wOJ9w",
"Content-Type": "application/json"
}
}
def nbs_nasa_retirar_5_produtos_da_fila():
return{
"method": "PUT",
"endpoint": "http://201.47.184.196:8080/assobrav/api/produtos/fila",
"body": [
{
"produtoId": "JZZ915105",
"erpEmpresaId": 2
},
{
"produtoId": "JZZ698302B",
"erpEmpresaId": 2
},
{
"produtoId": "JZZ698151AB",
"erpEmpresaId": 2
},
{
"produtoId": "JZZ998051",
"erpEmpresaId": 2
},
{
"produtoId": "JZZ698151AH",
"erpEmpresaId": 2
},
],
"headers": {
"Authorization": "bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImp3ayI6eyJrdHkiOiJSU0EiLCJlIjoiQVFBQiIsImtpZCI6IjdjNzRlODU4LTQ1NTItNGMzYS1iZjVkLTEyNzAyOTk2NGExOSIsIm4iOiJxUnB2VVhSWHAtUTVIYkQ2QVFnSkFYVkRXU3l0cWItVnNJam43LU5oYXd2dl9wa3d6QnhrR2ZpZDFYekdLdTgzNjZFdXJhazZjZzRpcGdSUTVhYnM2STNHdzFkTzFVT0lMY0hBdnJHV0NEVVZseU9hX2hMaEwySUJOOFlpQ052YkdnUDZhUkxRVHljY1BBMFpfZUNKbm1maDM0X1QtQkFSRjM0eUl2RkhJakt6MjgzR1VHc1phMEh6VEZjUG5VQW5GN1ExVElTYzBzRjVFWUpKUUhIN1N1ZVJxZF8wbjlkNWR2ZVhsekJCNy1OWTVIajhwUUdOQlY3bHhRMlJfY0dSenBUaW9VZk9nak1lOTQwbDdMeWJUb3FEcWxZdFB4ZWI4Q19EMUNPVEFpcHdEMmh2TVhfa1BXcHFTR1FvNW80YU5CVkdiNE5MNVpZMXhlc19wQnNMZVEifX0.eyJpc3MiOiJodHRwOlwvXC9sb2NhbGhvc3Q6ODA4MCIsInN1YiI6IkVQRUNBU1ZXIiwiZXhwIjoxNjQxMzE4ODg2LCJ1c2VySWQiOjIsImlhdCI6MTY0MTIzMjQ4Nn0.fW7J_BAN_eF3M6eaxTim-uoSKbM_wjxJxvhBbOASrMJ3W0KqppVRLzN61WbRXAzFlrV76-dn4uODoH8Fgx0R8vQezd54lXRYUWGaBrVlWA8oVtpIthMX7NwKvhgAKAOEwnqtthXww7msuOY3DEzdNky5bQpdFDR7Ros3DhyG0zQgWewvkqIs94GDQ8mV7fbiIczQVL5bSY8BgT4D73RvrLI66qYyvQ2LEL_w5PVdLkpSIjcFbW6KP89WxOXXpI0r2hABRkQ4JoqvjCZB02a6RX68qAfxEXxasFr2S_wA69VhAicAgWYWv58Xkwmq2BSQfwZoLkHxFHcL5omE0wOJ9w",
"Content-Type": "application/json"
}
}
def nbs_nasa_limpar_fila():
return{
"method": "PUT",
"endpoint": "http://201.47.184.196:8080/assobrav/api/produtos/fila",
"body": [
{
"produtoId": "APR057001IV",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GD",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ129620B",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698151AB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005CQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004NL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005FB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QE",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ129620E",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005KS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004F",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031K",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW998002K",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GR",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998002D",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005CP",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998002",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004TH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QL",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ198015B",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005BJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005M",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057003AP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004PG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JG",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW615301",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005ML",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057002T",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FH",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ198015E",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JM",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ513025H",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698451AE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004J",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005LT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JS",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998051A",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998051",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005BT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NA",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031B",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW998003A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004RQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004G",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005CS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DM",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998051D",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004DS",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW998002Q",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW998002",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PN",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ513025B",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031D",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005BS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004LD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004RT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004MN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004NS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005ES",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005KT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005LB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MJ",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998051C",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FR",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ198015C",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698520",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057002TA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005GS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KL",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW413031A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057002SS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QM",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW615301A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005GP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004DQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JB",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031G",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004TG",
"erpEmpresaId": "2"
},
{
"produtoId": "V04010054A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005Q",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057002ST",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TF",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ513025A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QN",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698151AD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QS",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ513025E",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005ME",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ513025G",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005GR",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ819653D",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004TK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005N",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005RB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005BG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004LP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004RR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TR",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ915105",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ129620B",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ915105B",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698302B",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW698151AC",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW615301H",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004MM",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW698451A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GA",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698151AH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HP",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ915105A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CT",
"erpEmpresaId": "2"
},
{
"produtoId": "V04010054C",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005GT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GN",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698151AC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004NG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KM",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW698451C",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031F",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004DC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005RA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005LA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004NH",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057003AQ",
"erpEmpresaId": "2"
},
{
"produtoId": "V04010054B",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MN",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698302",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004TJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HJ",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW998003C",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TA",
"erpEmpresaId": "2"
},
{
"produtoId": "V04010054",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005T",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005FD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QF",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ513025D",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005R",
"erpEmpresaId": "2"
},
{
"produtoId": "GJZZ250R2BRA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005CT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CD",
"erpEmpresaId": "2"
},
{
"produtoId": "GJZZ530R2BRA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057003AN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004NJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005CR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HF",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ129620D",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004RS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004K",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW998003B",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698151AE",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ513025",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HD",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW998002M",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004TM",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998002A",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998002E",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ819653B",
"erpEmpresaId": "2"
},
{
"produtoId": "GJZW012R2",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005H",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004TE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057002RF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BL",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031C",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JD",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW413031B",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KN",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ115561A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057001PJ",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW698151AD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005D",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004ED",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW998003D",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998051B",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998051E",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005ST",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004DT",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ129620C",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QG",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ129620",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005C",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005CA",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031E",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004DB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004TF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005GQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004LL",
"erpEmpresaId": "2"
},
{
"produtoId": "GJZZ502M2BRA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HC",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031H",
"erpEmpresaId": "2"
},
{
"produtoId": "GJZZ040R3",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004RE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QD",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698451AB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004TJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005T",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005FD",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998051C",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ513025D",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004NJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005CR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004K",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HD",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998002A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004NK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KH",
"erpEmpresaId": "2"
},
{
"produtoId": "V04010054",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004B",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005R",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005CT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KR",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698520",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057003AN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PR",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW615301A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005L",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005GP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004RS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MR",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW413031B",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057002ST",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004TE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057002RF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005FC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005D",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998002F",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004ED",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ915105B",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998051E",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004DA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005P",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004DT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PF",
"erpEmpresaId": "2"
},
{
"produtoId": "V04010054C",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005GT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004S",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004TF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FD",
"erpEmpresaId": "2"
},
{
"produtoId": "GJZZ040R3",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004NG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NQ",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW698151AD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KM",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW698451C",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031F",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EA",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998051B",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004DC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004LC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005CA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004DB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005RA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005LA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004NH",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057003AQ",
"erpEmpresaId": "2"
},
{
"produtoId": "V04010054B",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MN",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698302",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005GQ",
"erpEmpresaId": "2"
},
{
"produtoId": "GJZZ502M2BRA",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698451AC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057002TA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005CQ",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW413031A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HM",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ513025H",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TS",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698451AE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004NL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004J",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005LT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004DQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005BT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SP",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW998003A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005KS",
"erpEmpresaId": "2"
},
{
"produtoId": "V04010054A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004F",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TB",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031K",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004RQ",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998002D",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004G",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005CS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005CP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PE",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW998002Q",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QS",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ513025B",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031D",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005ME",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005GR",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ819653D",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005PA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004TK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005N",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005BG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004MN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004LP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004KA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004PG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004NS",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW615301",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005ML",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057002T",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW998003C",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031J",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HD",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NS",
"erpEmpresaId": "2"
},
{
"produtoId": "GJZZ250R2BRA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GM",
"erpEmpresaId": "2"
},
{
"produtoId": "GJZZ530R2BRA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005HE",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ819653C",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FH",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ198015E",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW201511",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ129620D",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW998003B",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698151AE",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW998002M",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698151AJ",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ198015D",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SM",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ819653B",
"erpEmpresaId": "2"
},
{
"produtoId": "GJZW012R2",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004D",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004LT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DJ",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ998002C",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031C",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HN",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698151AK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BP",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW615301J",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005BL",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ819653",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ513025F",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004LE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005ST",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057003BF",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ129620C",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MC",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ129620",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EN",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031E",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QQ",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698151AG",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW615601F",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698151AA",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ129620A",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698302A",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004LL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004ES",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TM",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ413031H",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004RE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004QD",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698451AB",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005FA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004E",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057003AM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004DR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FP",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW698451",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004ET",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004C",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005ND",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FT",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ698451AA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004LT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MH",
"erpEmpresaId": "2"
},
{
"produtoId": "JZZ513025C",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005BL",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004MK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004LE",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057003BF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MC",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QQ",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW615601F",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057001JA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NP",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004ES",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JH",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005ER",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005ET",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005SS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005FA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004GG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004E",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004CR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004HG",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057003AM",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QA",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004DR",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BS",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004EJ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FP",
"erpEmpresaId": "2"
},
{
"produtoId": "JZW698451",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004ET",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004JK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005QK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DQ",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004C",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005MT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FN",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005TT",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005ND",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004BK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005NF",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057005DK",
"erpEmpresaId": "2"
},
{
"produtoId": "APR057004FT",
"erpEmpresaId": "2"
},
],
"headers": {
"Authorization": "bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImp3ayI6eyJrdHkiOiJSU0EiLCJlIjoiQVFBQiIsImtpZCI6IjdjNzRlODU4LTQ1NTItNGMzYS1iZjVkLTEyNzAyOTk2NGExOSIsIm4iOiJxUnB2VVhSWHAtUTVIYkQ2QVFnSkFYVkRXU3l0cWItVnNJam43LU5oYXd2dl9wa3d6QnhrR2ZpZDFYekdLdTgzNjZFdXJhazZjZzRpcGdSUTVhYnM2STNHdzFkTzFVT0lMY0hBdnJHV0NEVVZseU9hX2hMaEwySUJOOFlpQ052YkdnUDZhUkxRVHljY1BBMFpfZUNKbm1maDM0X1QtQkFSRjM0eUl2RkhJakt6MjgzR1VHc1phMEh6VEZjUG5VQW5GN1ExVElTYzBzRjVFWUpKUUhIN1N1ZVJxZF8wbjlkNWR2ZVhsekJCNy1OWTVIajhwUUdOQlY3bHhRMlJfY0dSenBUaW9VZk9nak1lOTQwbDdMeWJUb3FEcWxZdFB4ZWI4Q19EMUNPVEFpcHdEMmh2TVhfa1BXcHFTR1FvNW80YU5CVkdiNE5MNVpZMXhlc19wQnNMZVEifX0.eyJpc3MiOiJodHRwOlwvXC9sb2NhbGhvc3Q6ODA4MCIsInN1YiI6IkVQRUNBU1ZXIiwiZXhwIjoxNjQxMzE4ODg2LCJ1c2VySWQiOjIsImlhdCI6MTY0MTIzMjQ4Nn0.fW7J_BAN_eF3M6eaxTim-uoSKbM_wjxJxvhBbOASrMJ3W0KqppVRLzN61WbRXAzFlrV76-dn4uODoH8Fgx0R8vQezd54lXRYUWGaBrVlWA8oVtpIthMX7NwKvhgAKAOEwnqtthXww7msuOY3DEzdNky5bQpdFDR7Ros3DhyG0zQgWewvkqIs94GDQ8mV7fbiIczQVL5bSY8BgT4D73RvrLI66qYyvQ2LEL_w5PVdLkpSIjcFbW6KP89WxOXXpI0r2hABRkQ4JoqvjCZB02a6RX68qAfxEXxasFr2S_wA69VhAicAgWYWv58Xkwmq2BSQfwZoLkHxFHcL5omE0wOJ9w",
"Content-Type": "application/json"
}
}
def nbs_nasa_retirar_10_produtos_da_fila():
return{
"method": "PUT",
"endpoint": "http://201.47.184.196:8080/assobrav/api/produtos/fila",
"body": [
{
"produtoId": "JZZ915105",
"erpEmpresaId": 2
},
{
"produtoId": "JZZ698302B",
"erpEmpresaId": 2
},
{
"produtoId": "JZZ698151AB",
"erpEmpresaId": 2
},
{
"produtoId": "JZZ998051",
"erpEmpresaId": 2
},
{
"produtoId": "JZZ698151AH",
"erpEmpresaId": 2
},
{
"produtoId": "JZW698451A",
"erpEmpresaId": 2
},
{
"produtoId": "JZZ129620B",
"erpEmpresaId": 2
},
{
"produtoId": "JZW615301H",
"erpEmpresaId": 2
},
{
"produtoId": "JZW698151AC",
"erpEmpresaId": 2
},
{
"produtoId": "JZW998002",
"erpEmpresaId": 2
},
],
"headers": {
"Authorization": "bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImp3ayI6eyJrdHkiOiJSU0EiLCJlIjoiQVFBQiIsImtpZCI6IjdjNzRlODU4LTQ1NTItNGMzYS1iZjVkLTEyNzAyOTk2NGExOSIsIm4iOiJxUnB2VVhSWHAtUTVIYkQ2QVFnSkFYVkRXU3l0cWItVnNJam43LU5oYXd2dl9wa3d6QnhrR2ZpZDFYekdLdTgzNjZFdXJhazZjZzRpcGdSUTVhYnM2STNHdzFkTzFVT0lMY0hBdnJHV0NEVVZseU9hX2hMaEwySUJOOFlpQ052YkdnUDZhUkxRVHljY1BBMFpfZUNKbm1maDM0X1QtQkFSRjM0eUl2RkhJakt6MjgzR1VHc1phMEh6VEZjUG5VQW5GN1ExVElTYzBzRjVFWUpKUUhIN1N1ZVJxZF8wbjlkNWR2ZVhsekJCNy1OWTVIajhwUUdOQlY3bHhRMlJfY0dSenBUaW9VZk9nak1lOTQwbDdMeWJUb3FEcWxZdFB4ZWI4Q19EMUNPVEFpcHdEMmh2TVhfa1BXcHFTR1FvNW80YU5CVkdiNE5MNVpZMXhlc19wQnNMZVEifX0.eyJpc3MiOiJodHRwOlwvXC9sb2NhbGhvc3Q6ODA4MCIsInN1YiI6IkVQRUNBU1ZXIiwiZXhwIjoxNjQxMzE4ODg2LCJ1c2VySWQiOjIsImlhdCI6MTY0MTIzMjQ4Nn0.fW7J_BAN_eF3M6eaxTim-uoSKbM_wjxJxvhBbOASrMJ3W0KqppVRLzN61WbRXAzFlrV76-dn4uODoH8Fgx0R8vQezd54lXRYUWGaBrVlWA8oVtpIthMX7NwKvhgAKAOEwnqtthXww7msuOY3DEzdNky5bQpdFDR7Ros3DhyG0zQgWewvkqIs94GDQ8mV7fbiIczQVL5bSY8BgT4D73RvrLI66qYyvQ2LEL_w5PVdLkpSIjcFbW6KP89WxOXXpI0r2hABRkQ4JoqvjCZB02a6RX68qAfxEXxasFr2S_wA69VhAicAgWYWv58Xkwmq2BSQfwZoLkHxFHcL5omE0wOJ9w",
"Content-Type": "application/json"
}
}
def nbs_nasa_pedidos():
return{
"method": "POST",
"endpoint": "http://201.47.184.196:8080/assobrav/api/pedidos",
"body": [
{
"ecommercePedidoId": "251800972",
"vendedorId": "EPECASVW",
"erpEmpresaId": "2",
"valorPedido": 714.08,
"valorFrete": 168.13,
"dtCriacao": "2021-11-01",
"dtPagamento": "2021-11-01",
"cliente": {
"tipoCliente": "PF",
"cnpjCpf": "16487465003",
"inscricaoRg": "196182098",
"nomeRazao": "Sergio Teste",
"nomeFantasia": "None",
"dddTelefone01": "41",
"telefone01": "996786632",
"dddTelefone02": "41",
"telefone02": "996786632",
"email": "sergio@e-peca.com.br",
"genero": "M",
"dtNascimento": "1986-06-24",
"endereco": {
"rua": "MARCO BIGARELLA",
"numero": "455",
"complemento": "apto",
"uf": "PR",
"cidade": "Curitiba",
"bairro": "Tarumã",
"cep": "82530350",
"observacao": "edificio teste",
"codigoIbge": "4106902"
}
},
"logistica": {
"freteId": "421",
"dtEntregaEstimada": "2021-11-06",
"presente": False,
"presenteMensagem": "",
"enderecoEntrega": {
"rua": "MARCO BIGARELLA",
"numero": "455",
"complemento": "apto",
"uf": "PR",
"cidade": "Curitiba",
"bairro": "Tarumã",
"cep": "82530350",
"observacao": "edificio teste",
"codigoIbge": "4106902"
}
},
"pagamento": {
"formaPagamentoId": "25366",
"bandeiraCartao": "Hipercard Crédito",
"numeroParcelas": "1",
"nsu": "1233",
"authCodePagamento": "1233",
"transactionId": "1233"
},
"produtos": [
{
"produtoId": "G1CL003",
"precoVenda": 545.95,
"quantidade": 1
}
],
"informacoesAdicionais": {
"intermediador": {
"cnpjAdquirencia": "14338304000178",
"razaoSocialAdquirencia": "YAPAY PAGAMENTOS ONLINE LTDA",
"cnpjIntermediador": "25382893000108",
"razaoSocialIntermediador": "E-PECA DESENVOLVIMENTO DE SISTEMAS LTDA",
"idRegraValidacao": "2"
}
}
}
],
"headers": {
"Authorization": "bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImp3ayI6eyJrdHkiOiJSU0EiLCJlIjoiQVFBQiIsImtpZCI6IjZmMzNiOTk4LTFjNjAtNGI0ZC1hZjk3LWQwZTE2ZGQ5OTFhYiIsIm4iOiJpQ1VWcjNELXZpYnZ1bV9BYnRjQjRkTzFkZ09wYWRjTllLSXF4M3B5MUphTDI4M1F3cUY4Q0V2VVE0UDFxZkFsSkR0SWFQSTlxQ3haVy1VNThDZ1ByVmxHemxLVmsyN1o3SlVOZVU2UGF6SlBwOW12QVJ2dnd5ci1IbWlnQzFmOXdmdE53ZGZmN05vUjI3X3Y4S2NMOE94TnQ3RndrTmdWcHY2NkNzZnIxRlg5dzM2ZDNFV0NUUFdtRlNsalhxbXJWYXl1VXNxRlI3RlFFMEFPSEl0dWZZaVNZZU1nY0d2SU5XcGZGZlRUVzV0Tm56NzlHOUhUOTVXVjBnTUJwT0lBV0JsNWR2WngyUmtjWFg0Zm1yS2xyWEl4Y2JfQm4yT0JvdkhBbGloQk1ibEFMYjBFSklWQ3pqZnF3bUozaVNUV0pTUkNKTUVrbmJBaEhBY3gxNkJaOFEifX0.eyJpc3MiOiJodHRwOlwvXC9sb2NhbGhvc3Q6ODA4MCIsInN1YiI6IkVQRUNBU1ZXIiwiZXhwIjoxNjM5MjMzNjgyLCJ1c2VySWQiOjIsImlhdCI6MTYzOTE0NzI4Mn0.EG9zHwWbHgChv9xmN8KHgsbkDEDAwRFhahHpF2C3nUY7XYaz-N8pX15GugWz5L8vpFVYoaFItUMut1u5jprcRef3dR7pTl1CZAI_ms4F1ByMkuPnfQufSFqsLzirMjw7okJOeziqKFE8H6t-0sm-ubzO-RQamj2L64Be3LQ7cJzWrrJmI4TV5fr7HO7-xO-5ot8-0bV0SucFbUAMegyjz4jVaUjyRzUYORw9V_GzKo0_4Mfj95EJB_Tx8la-PblYt7zvFp0DHFOdUv_ip0SUqLobEqkURaXBGqSiT3Yrxshf-SzcMS9AK-pj_oXufLC7EQ1EH5dhWL73ozBrHSDCog",
"Content-Type": "application/json"
}
}
def nbs_nasa_nota_fiscal():
return{
"method": "GET",
"endpoint": "http://201.47.184.196:8080/assobrav/api/pedido/251794972/nota_fiscal",
"body": [],
"headers": {
"Authorization": "bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImp3ayI6eyJrdHkiOiJSU0EiLCJlIjoiQVFBQiIsImtpZCI6IjdjNzRlODU4LTQ1NTItNGMzYS1iZjVkLTEyNzAyOTk2NGExOSIsIm4iOiJxUnB2VVhSWHAtUTVIYkQ2QVFnSkFYVkRXU3l0cWItVnNJam43LU5oYXd2dl9wa3d6QnhrR2ZpZDFYekdLdTgzNjZFdXJhazZjZzRpcGdSUTVhYnM2STNHdzFkTzFVT0lMY0hBdnJHV0NEVVZseU9hX2hMaEwySUJOOFlpQ052YkdnUDZhUkxRVHljY1BBMFpfZUNKbm1maDM0X1QtQkFSRjM0eUl2RkhJakt6MjgzR1VHc1phMEh6VEZjUG5VQW5GN1ExVElTYzBzRjVFWUpKUUhIN1N1ZVJxZF8wbjlkNWR2ZVhsekJCNy1OWTVIajhwUUdOQlY3bHhRMlJfY0dSenBUaW9VZk9nak1lOTQwbDdMeWJUb3FEcWxZdFB4ZWI4Q19EMUNPVEFpcHdEMmh2TVhfa1BXcHFTR1FvNW80YU5CVkdiNE5MNVpZMXhlc19wQnNMZVEifX0.eyJpc3MiOiJodHRwOlwvXC9sb2NhbGhvc3Q6ODA4MCIsInN1YiI6IkVQRUNBU1ZXIiwiZXhwIjoxNjQxMzE4ODg2LCJ1c2VySWQiOjIsImlhdCI6MTY0MTIzMjQ4Nn0.fW7J_BAN_eF3M6eaxTim-uoSKbM_wjxJxvhBbOASrMJ3W0KqppVRLzN61WbRXAzFlrV76-dn4uODoH8Fgx0R8vQezd54lXRYUWGaBrVlWA8oVtpIthMX7NwKvhgAKAOEwnqtthXww7msuOY3DEzdNky5bQpdFDR7Ros3DhyG0zQgWewvkqIs94GDQ8mV7fbiIczQVL5bSY8BgT4D73RvrLI66qYyvQ2LEL_w5PVdLkpSIjcFbW6KP89WxOXXpI0r2hABRkQ4JoqvjCZB02a6RX68qAfxEXxasFr2S_wA69VhAicAgWYWv58Xkwmq2BSQfwZoLkHxFHcL5omE0wOJ9w",
"Content-Type": "application/json"
}
}
| 22.913841 | 1,113 | 0.536115 |
7f467675fad3ea982c5bed72aa0a85f787798221 | 8,763 | py | Python | tasklib/serializing.py | xerus2000/tasklib | d912f4371ceef0c7028a63d76d2152b27cc12943 | [
"BSD-3-Clause"
] | null | null | null | tasklib/serializing.py | xerus2000/tasklib | d912f4371ceef0c7028a63d76d2152b27cc12943 | [
"BSD-3-Clause"
] | null | null | null | tasklib/serializing.py | xerus2000/tasklib | d912f4371ceef0c7028a63d76d2152b27cc12943 | [
"BSD-3-Clause"
] | null | null | null | import datetime
import importlib
import json
import pytz
import tzlocal
from .lazy import LazyUUIDTaskSet, LazyUUIDTask
DATE_FORMAT = '%Y%m%dT%H%M%SZ'
local_zone = tzlocal.get_localzone()
class SerializingObject(object):
"""
Common ancestor for TaskResource & TaskWarriorFilter, since they both
need to serialize arguments.
Serializing method should hold the following contract:
- any empty value (meaning removal of the attribute)
is deserialized into a empty string
- None denotes a empty value for any attribute
Deserializing method should hold the following contract:
- None denotes a empty value for any attribute (however,
this is here as a safeguard, TaskWarrior currently does
not export empty-valued attributes) if the attribute
is not iterable (e.g. list or set), in which case
a empty iterable should be used.
Normalizing methods should hold the following contract:
- They are used to validate and normalize the user input.
Any attribute value that comes from the user (during Task
initialization, assignign values to Task attributes, or
filtering by user-provided values of attributes) is first
validated and normalized using the normalize_{key} method.
- If validation or normalization fails, normalizer is expected
to raise ValueError.
"""
def __init__(self, backend):
self.backend = backend
def _deserialize(self, key, value):
hydrate_func = getattr(self, 'deserialize_{0}'.format(key),
lambda x: x if x != '' else None)
return hydrate_func(value)
def _serialize(self, key, value):
dehydrate_func = getattr(self, 'serialize_{0}'.format(key),
lambda x: x if x is not None else '')
return dehydrate_func(value)
def _normalize(self, key, value):
"""
Use normalize_<key> methods to normalize user input. Any user
input will be normalized at the moment it is used as filter,
or entered as a value of Task attribute.
"""
# None value should not be converted by normalizer
if value is None:
return None
normalize_func = getattr(self, 'normalize_{0}'.format(key),
lambda x: x)
return normalize_func(value)
def timestamp_serializer(self, date):
if not date:
return ''
# Any serialized timestamp should be localized, we need to
# convert to UTC before converting to string (DATE_FORMAT uses UTC)
date = date.astimezone(pytz.utc)
return date.strftime(DATE_FORMAT)
def timestamp_deserializer(self, date_str):
if not date_str:
return None
# Return timestamp localized in the local zone
naive_timestamp = datetime.datetime.strptime(date_str, DATE_FORMAT)
localized_timestamp = pytz.utc.localize(naive_timestamp)
return localized_timestamp.astimezone(local_zone)
def serialize_entry(self, value):
return self.timestamp_serializer(value)
def deserialize_entry(self, value):
return self.timestamp_deserializer(value)
def normalize_entry(self, value):
return self.datetime_normalizer(value)
def serialize_modified(self, value):
return self.timestamp_serializer(value)
def deserialize_modified(self, value):
return self.timestamp_deserializer(value)
def normalize_modified(self, value):
return self.datetime_normalizer(value)
def serialize_start(self, value):
return self.timestamp_serializer(value)
def deserialize_start(self, value):
return self.timestamp_deserializer(value)
def normalize_start(self, value):
return self.datetime_normalizer(value)
def serialize_end(self, value):
return self.timestamp_serializer(value)
def deserialize_end(self, value):
return self.timestamp_deserializer(value)
def normalize_end(self, value):
return self.datetime_normalizer(value)
def serialize_due(self, value):
return self.timestamp_serializer(value)
def deserialize_due(self, value):
return self.timestamp_deserializer(value)
def normalize_due(self, value):
return self.datetime_normalizer(value)
def serialize_scheduled(self, value):
return self.timestamp_serializer(value)
def deserialize_scheduled(self, value):
return self.timestamp_deserializer(value)
def normalize_scheduled(self, value):
return self.datetime_normalizer(value)
def serialize_until(self, value):
return self.timestamp_serializer(value)
def deserialize_until(self, value):
return self.timestamp_deserializer(value)
def normalize_until(self, value):
return self.datetime_normalizer(value)
def serialize_wait(self, value):
return self.timestamp_serializer(value)
def deserialize_wait(self, value):
return self.timestamp_deserializer(value)
def normalize_wait(self, value):
return self.datetime_normalizer(value)
def serialize_annotations(self, value):
value = value if value is not None else []
# This may seem weird, but it's correct, we want to export
# a list of dicts as serialized value
serialized_annotations = [json.loads(annotation.export_data())
for annotation in value]
return serialized_annotations if serialized_annotations else ''
def deserialize_annotations(self, data):
task_module = importlib.import_module('tasklib.task')
TaskAnnotation = getattr(task_module, 'TaskAnnotation')
return [TaskAnnotation(self, d) for d in data] if data else []
def serialize_tags(self, tags):
return list(tags or [])
def deserialize_tags(self, tags):
if isinstance(tags, str):
return set(tags.split(','))
return set(tags or [])
def serialize_parent(self, parent):
return parent['uuid'] if parent else ''
def deserialize_parent(self, uuid):
return LazyUUIDTask(self.backend, uuid) if uuid else None
def serialize_depends(self, value):
# Return the list of uuids
value = value if value is not None else set()
if isinstance(value, LazyUUIDTaskSet):
return ','.join(value._uuids)
else:
return ','.join(task['uuid'] for task in value)
def deserialize_depends(self, raw_uuids):
raw_uuids = raw_uuids or [] # Convert None to empty list
if not raw_uuids:
return set()
# TW 2.4.4 encodes list of dependencies as a single string
if type(raw_uuids) is not list:
uuids = raw_uuids.split(',')
# TW 2.4.5 and later exports them as a list, no conversion needed
else:
uuids = raw_uuids
return LazyUUIDTaskSet(self.backend, uuids)
def datetime_normalizer(self, value):
"""
Normalizes date/datetime value (considered to come from user input)
to localized datetime value. Following conversions happen:
naive date -> localized datetime with the same date, and time=midnight
naive datetime -> localized datetime with the same value
localized datetime -> localized datetime (no conversion)
"""
if (
isinstance(value, datetime.date)
and not isinstance(value, datetime.datetime)
):
# Convert to local midnight
value_full = datetime.datetime.combine(value, datetime.time.min)
localized = local_zone.localize(value_full)
elif isinstance(value, datetime.datetime):
if value.tzinfo is None:
# Convert to localized datetime object
localized = local_zone.localize(value)
else:
# If the value is already localized, there is no need to change
# time zone at this point. Also None is a valid value too.
localized = value
elif isinstance(value, str):
localized = self.backend.convert_datetime_string(value)
else:
raise ValueError("Provided value could not be converted to "
"datetime, its type is not supported: {}"
.format(type(value)))
return localized
def normalize_uuid(self, value):
# Enforce sane UUID
if not isinstance(value, str) or value == '':
raise ValueError("UUID must be a valid non-empty string, "
"not: {}".format(value))
return value
| 34.5 | 79 | 0.652288 |
d39c256d6bc350df51df242952f9c1953dae4c50 | 36,385 | py | Python | EQTransformer/utils/associator.py | malcolmw/EQTransformer | 130dcfbeb72d1d2044fe06cdf755d70e241d3281 | [
"MIT"
] | null | null | null | EQTransformer/utils/associator.py | malcolmw/EQTransformer | 130dcfbeb72d1d2044fe06cdf755d70e241d3281 | [
"MIT"
] | null | null | null | EQTransformer/utils/associator.py | malcolmw/EQTransformer | 130dcfbeb72d1d2044fe06cdf755d70e241d3281 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 27 18:52:42 2019
@author: mostafamousavi
last update: 06/23/2020
"""
from datetime import datetime, timedelta
from tqdm import tqdm
import numpy as np
import json
import os
import platform
import sqlite3
import pandas as pd
import csv
from os import listdir
import h5py
#import matplotlib.pyplot as plt
from obspy import UTCDateTime
from obspy.signal.trigger import ar_pick
from obspy.signal.trigger import recursive_sta_lta, trigger_onset
from itertools import combinations
from obspy.core.event import Catalog, Event, Origin, Arrival, Pick, WaveformStreamID
def run_associator(input_dir,
start_time,
end_time,
moving_window=15,
pair_n=3,
output_dir='.',
consider_combination=False):
"""
It performs a very simple association based on detection times on multiple stations. It works fine when you have a small and local network of seismic stations.
Parameters
----------
input_dir: str, default=None
Directory name containing hdf5 and csv files-preprocessed data.
start_time: str, default=None
Start of a time period of interest in 'YYYY-MM-DD hh:mm:ss.f' format.
end_time: str, default=None
End of a timeperiod of interest in 'YYYY-MM-DD hh:mm:ss.f' format.
moving_window: int, default=15
The length of time window used for association in second.
pair_n: int, default=2
The minimum number of stations used for the association.
output_dir: str, default='.'
Path to the directory to write the output file.
consider_combination: bool, default=False
If True, it will write down all possible combinations of picked arrival times for each event. This will generate multiple events with the same ID, and you will need to remove those with poor solutions after location. This helps to remove the false positives from the associated event.
Returns
----------
output_dir/Y2000.phs: Phase information for the associated events in hypoInverse format.
output_dir/traceNmae_dic.json: A dictionary where the trace name for all the detections associated to an event are listed. This can be used later to access the traces for calculating the cross-correlations during the relocation process.
Warning
----------
Unlike the other modules, this function does not create the ouput directory. So if the given path does not exist will give an error.
"""
if os.path.exists("phase_dataset"):
os.remove("phase_dataset")
conn = sqlite3.connect("phase_dataset")
cur = conn.cursor()
cur.execute('''
CREATE TABLE phase_dataset (traceID TEXT,
network TEXT,
station TEXT,
instrument_type TEXT,
stlat NUMERIC,
stlon NUMERIC,
stelv NUMERIC,
event_start_time DateTime,
event_end_time DateTime,
detection_prob NUMERIC,
detection_unc NUMERIC,
p_arrival_time DateTime,
p_prob NUMERIC,
p_unc NUMERIC,
p_snr NUMERIC,
s_arrival_time DateTime,
s_prob NUMERIC,
s_unc NUMERIC,
s_snr NUMERIC,
amp NUMERIC
)''')
if platform.system() == 'Windows':
station_list = [ev for ev in listdir(input_dir) if ev.split("\\")[-1] != ".DS_Store"];
else:
station_list = [ev for ev in listdir(input_dir) if ev.split("/")[-1] != ".DS_Store"];
station_list = sorted(set(station_list))
for st in station_list:
print(f'reading {st} ...')
if platform.system() == 'Windows':
_pick_database_maker(conn, cur, input_dir+"\\"+st+'"\\"X_prediction_results.csv')
else:
_pick_database_maker(conn, cur, input_dir+"/"+st+'/X_prediction_results.csv')
# read the database as dataframe
conn = sqlite3.connect("phase_dataset")
tbl = pd.read_sql_query("SELECT * FROM phase_dataset", conn);
#tbl = tbl[tbl.p_prob > 0.3]
#tbl = tbl[tbl.s_prob > 0.3]
tbl['event_start_time'] = tbl['event_start_time'].apply(lambda row : _date_convertor(row))
tbl['event_end_time'] = tbl['event_end_time'].apply(lambda row : _date_convertor(row))
tbl['p_arrival_time'] = tbl['p_arrival_time'].apply(lambda row : _date_convertor(row))
tbl['s_arrival_time'] = tbl['s_arrival_time'].apply(lambda row : _date_convertor(row))
_dbs_associator(start_time,
end_time,
moving_window,
tbl,
pair_n,
output_dir,
station_list,
consider_combination)
os.remove("phase_dataset")
def _pick_database_maker(conn, cur, input_file):
csv_file = open(input_file)
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
# print(f'Column names are {", ".join(row)}')
line_count += 1
else:
line_count += 1
traceID = row[0]
network = row[1]
station = row[2]
instrument_type = row[3]
stlat = float(row[4])
stlon = float(row[5])
stelv = float(row[6])
mls = row[7].split('.')
if len(mls) == 1:
event_start_time = datetime.strptime(row[7], '%Y-%m-%d %H:%M:%S')
else:
event_start_time = datetime.strptime(row[7], '%Y-%m-%d %H:%M:%S.%f')
mls = row[8].split('.')
if len(mls) == 1:
event_end_time = datetime.strptime(row[8], '%Y-%m-%d %H:%M:%S')
else:
event_end_time = datetime.strptime(row[8], '%Y-%m-%d %H:%M:%S.%f')
detection_prob = float(row[9])
try:
detection_unc = float(row[10])
except Exception:
detection_unc = None
if len(row[11]) > 10:
# p_arrival_time = UTCDateTime(row[11].replace(' ', 'T')+'Z')
mls = row[11].split('.')
if len(mls) == 1:
p_arrival_time = datetime.strptime(row[11], '%Y-%m-%d %H:%M:%S')
else:
p_arrival_time = datetime.strptime(row[11], '%Y-%m-%d %H:%M:%S.%f')
p_prob = float(row[12])
try:
p_unc = float(row[13])
except Exception:
p_unc = None
else:
p_arrival_time = None
p_prob = None
p_unc = None
try:
p_snr = float(row[14])
except Exception:
p_snr = None
if len(row[15]) > 10:
mls = row[15].split('.')
if len(mls) == 1:
s_arrival_time = datetime.strptime(row[15], '%Y-%m-%d %H:%M:%S')
else:
s_arrival_time = datetime.strptime(row[15], '%Y-%m-%d %H:%M:%S.%f')
s_prob = float(row[16])
try:
s_unc = float(row[17])
except Exception:
s_unc = None
else:
s_arrival_time = None
s_prob = None
s_unc = None
try:
s_snr = float(row[18])
except Exception:
s_snr = None
amp = None
cur.execute('''INSERT INTO phase_dataset VALUES
(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?, ?)''',
(traceID, network, station, instrument_type, stlat, stlon, stelv,
event_start_time, event_end_time, detection_prob, detection_unc,
p_arrival_time, p_prob, p_unc, p_snr, s_arrival_time, s_prob, s_unc, s_snr,
amp))
conn.commit()
def _decimalDegrees2DMS(value,type):
'Converts a Decimal Degree Value into Degrees Minute Seconds Notation. Pass value as double type = {Latitude or Longitude} as string returns a string as D:M:S:Direction created by: anothergisblog.blogspot.com'
degrees = int(value)
submin = abs( (value - int(value) ) * 60)
direction = ""
if type == "Longitude":
if degrees < 0:
direction = "W"
elif degrees > 0:
direction = " "
else:
direction = ""
notation = ["{:>3}".format(str(abs(degrees))), direction, "{:>5}".format(str(round(submin, 2)))]
elif type == "Latitude":
if degrees < 0:
direction = "S"
elif degrees > 0:
direction = " "
else:
direction = ""
notation =["{:>2}".format(str(abs(degrees))), direction, "{:>5}".format(str(round(submin, 2)))]
return notation
def _weighcalculator_prob(pr):
'calculate the picks weights'
weight = 4
if pr > 0.6:
weight = 0
elif pr <= 0.6 and pr > 0.5:
weight = 1
elif pr <= 0.5 and pr > 0.2:
weight = 2
elif pr <= 0.2 and pr > 0.1:
weight = 3
elif pr <= 0.1:
weight = 4
return weight
def _date_convertor(r):
'convert datatime form string'
if r and len(r)>5:
mls = r.split('.')
if len(mls) == 1:
new_t = datetime.strptime(r, '%Y-%m-%d %H:%M:%S')
else:
new_t = datetime.strptime(r, '%Y-%m-%d %H:%M:%S.%f')
return new_t
def _doubleChecking(station_list, detections, preprocessed_dir, moving_window, thr_on=3.7, thr_of=0.5):
'this function perform traditional detection (STA/LTA) and picker (AIC) to double check for events on the remaining stations when an event has been detected on more than two stations'
for stt in station_list:
sttt = stt.split('_')[0]
# print(sttt)
if sttt not in detections['station'].to_list():
new_picks = {}
if platform.system() == 'Windows':
file_name = preprocessed_dir+"\\"+sttt+".hdf5"
file_csv = preprocessed_dir+"\\"+sttt+".csv"
else:
file_name = preprocessed_dir+"/"+sttt+".hdf5"
file_csv = preprocessed_dir+"/"+sttt+".csv"
df = pd.read_csv(file_csv)
df['start_time'] = pd.to_datetime(df['start_time'])
mask = (df['start_time'] > detections.iloc[0]['event_start_time']-timedelta(seconds = moving_window)) & (df['start_time'] < detections.iloc[0]['event_start_time']+timedelta(seconds = moving_window))
df = df.loc[mask]
dtfl = h5py.File(file_name, 'r')
dataset = dtfl.get('data/'+df['trace_name'].to_list()[0])
data = np.array(dataset)
cft = recursive_sta_lta(data[:,2], int(2.5 * 100), int(10. * 100))
on_of = trigger_onset(cft, thr_on, thr_of)
if len(on_of) >= 1:
p_pick, s_pick = ar_pick(data[:,2], data[:,1], data[:,0], 100, 1.0, 20.0, 1.0, 0.1, 4.0, 1.0, 2, 8, 0.1, 0.2)
if (on_of[0][1]+100)/100 > p_pick > (on_of[0][0]-100)/100:
# print('got one')
new_picks['traceID'] = df['trace_name'].to_list()[0]
new_picks['network'] = dataset.attrs["network_code"]
new_picks['station'] = sttt
new_picks['instrument_type'] = df['trace_name'].to_list()[0].split('_')[2]
new_picks['stlat'] = round(dataset.attrs["receiver_latitude"], 4)
new_picks['stlon'] = round(dataset.attrs["receiver_longitude"], 4)
new_picks['stelv'] = round(dataset.attrs["receiver_elevation_m"], 2)
new_picks['event_start_time'] = datetime.strptime(str(UTCDateTime(dataset.attrs['trace_start_time'].replace(' ', 'T')+'Z')+(on_of[0][0]/100)).replace('T', ' ').replace('Z', ''), '%Y-%m-%d %H:%M:%S.%f')
new_picks['event_end_time'] = datetime.strptime(str(UTCDateTime(dataset.attrs['trace_start_time'].replace(' ', 'T')+'Z')+(on_of[0][1]/100)).replace('T', ' ').replace('Z', ''), '%Y-%m-%d %H:%M:%S.%f')
new_picks['detection_prob'] = 0.3
new_picks['detection_unc'] = 0.6
new_picks['p_arrival_time'] = datetime.strptime(str(UTCDateTime(dataset.attrs['trace_start_time'].replace(' ', 'T')+'Z')+p_pick).replace('T', ' ').replace('Z', ''), '%Y-%m-%d %H:%M:%S.%f')
new_picks['p_prob'] = 0.3
new_picks['p_unc'] = 0.6
new_picks['p_snr'] = None
new_picks['s_arrival_time'] = None
new_picks['s_prob'] = 0.0
new_picks['s_unc'] = None
new_picks['s_snr'] = None
new_picks['amp'] = None
detections = detections.append(new_picks , ignore_index=True)
return detections
def _dbs_associator(start_time, end_time, moving_window,
tbl, pair_n, save_dir, station_list,
consider_combination=False):
if consider_combination==True:
if platform.system() == 'Windows':
Y2000_writer = open(save_dir+"\\"+"Y2000.phs", "w")
else:
Y2000_writer = open(save_dir+"/"+"Y2000.phs", "w")
traceNmae_dic = dict()
st = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S.%f')
et = datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S.%f')
total_t = et-st;
evid = 0;
tt = st
pbar = tqdm(total= int(np.ceil(total_t.total_seconds()/moving_window)), ncols=100)
while tt < et:
detections = tbl[(tbl.event_start_time >= tt) & (tbl.event_start_time < tt+timedelta(seconds = moving_window))]
pbar.update()
if len(detections) >= pair_n:
evid += 1
yr = "{:>4}".format(str(detections.iloc[0]['event_start_time']).split(' ')[0].split('-')[0])
mo = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[0].split('-')[1])
dy = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[0].split('-')[2])
hr = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[1].split(':')[0])
mi = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[1].split(':')[1])
sec = "{:>4}".format(str(detections.iloc[0]['event_start_time']).split(' ')[1].split(':')[2])
st_lat_DMS = _decimalDegrees2DMS(float(detections.iloc[0]['stlat']), "Latitude")
st_lon_DMS = _decimalDegrees2DMS(float(detections.iloc[0]['stlon']), "Longitude")
depth = 5.0
mag = 0.0
# QuakeML
print(detections.iloc[0]['event_start_time'])
if len(detections)/pair_n <= 2:
ch = pair_n
else:
ch = int(len(detections)-pair_n)
picks = []
for ns in range(ch, len(detections)+1):
comb = 0
for ind in list(combinations(detections.index, ns)):
comb+=1
selected_detections = detections.loc[ind,:]
sorted_detections = selected_detections.sort_values('p_arrival_time')
Y2000_writer.write("%4d%2d%2d%2d%2d%4.2f%2.0f%1s%4.2f%3.0f%1s%4.2f%5.2f%3.2f\n"%
(int(yr),int(mo),int(dy), int(hr),int(mi),float(sec),float(st_lat_DMS[0]),
str(st_lat_DMS[1]), float(st_lat_DMS[2]),float(st_lon_DMS[0]), str(st_lon_DMS[1]),
float(st_lon_DMS[2]),float(depth), float(mag)));
station_buffer=[]; row_buffer=[]; tr_names=[]; tr_names2=[]
for _, row in sorted_detections.iterrows():
trace_name = row['traceID']+'*'+row['station']+'*'+str(row['event_start_time'])
p_unc = row['p_unc']
p_prob = row['p_prob']
s_unc = row['s_unc']
s_prob = row['s_prob']
if p_unc:
Pweihgt = _weighcalculator_prob(p_prob*(1-p_unc))
else:
Pweihgt = _weighcalculator_prob(p_prob)
try:
Pweihgt = int(Pweihgt)
except Exception:
Pweihgt = 4
if s_unc:
Sweihgt = _weighcalculator_prob(s_prob*(1-s_unc))
else:
Sweihgt = _weighcalculator_prob(s_prob)
try:
Sweihgt = int(Sweihgt)
except Exception:
Sweihgt = 4
station = "{:<5}".format(row['station'])
network = "{:<2}".format(row['network'])
try:
yrp = "{:>4}".format(str(row['p_arrival_time']).split(' ')[0].split('-')[0])
mop = "{:>2}".format(str(row['p_arrival_time']).split(' ')[0].split('-')[1])
dyp = "{:>2}".format(str(row['p_arrival_time']).split(' ')[0].split('-')[2])
hrp = "{:>2}".format(str(row['p_arrival_time']).split(' ')[1].split(':')[0])
mip = "{:>2}".format(str(row['p_arrival_time']).split(' ')[1].split(':')[1])
sec_p = "{:>4}".format(str(row['p_arrival_time']).split(' ')[1].split(':')[2])
p = Pick(time=UTCDateTime(row['p_arrival_time']),
waveform_id=WaveformStreamID(network_code=network,
station_code=station.rstrip()),
phase_hint="P")
picks.append(p)
except Exception:
sec_p = None
try:
yrs = "{:>4}".format(str(row['s_arrival_time']).split(' ')[0].split('-')[0])
mos = "{:>2}".format(str(row['s_arrival_time']).split(' ')[0].split('-')[1])
dys = "{:>2}".format(str(row['s_arrival_time']).split(' ')[0].split('-')[2])
hrs = "{:>2}".format(str(row['s_arrival_time']).split(' ')[1].split(':')[0])
mis = "{:>2}".format(str(row['s_arrival_time']).split(' ')[1].split(':')[1])
sec_s = "{:>4}".format(str(row['s_arrival_time']).split(' ')[1].split(':')[2])
p = Pick(time=UTCDateTime(row['p_arrival_time']),
waveform_id=WaveformStreamID(network_code=network, station_code=station.rstrip()),
phase_hint="S")
picks.append(p)
except Exception:
sec_s = None
if row['station'] not in station_buffer:
tr_names.append(trace_name)
station_buffer.append(row['station'])
if sec_s:
Y2000_writer.write("%5s%2s HHE %4d%2d%2d%2d%2d%5.2f %5.2fES %1d\n"%
(station,network,int(yrs),int(mos),int(dys),int(hrs),int(mis),
float(0.0),float(sec_s), Sweihgt))
if sec_p:
Y2000_writer.write("%5s%2s HHZ IP %1d%4d%2d%2d%2d%2d%5.2f %5.2f 0\n"%
(station,network,Pweihgt,int(yrp),int(mop),int(dyp),int(hrp),
int(mip),float(sec_p),float(0.0)))
else :
tr_names2.append(trace_name)
if sec_s:
row_buffer.append("%5s%2s HHE %4d%2d%2d%2d%2d%5.2f %5.2fES %1d\n"%(station,network,
int(yrs),int(mos),int(dys),
int(hrs),int(mis),0.0,
float(sec_s), Sweihgt));
if sec_p:
row_buffer.append("%5s%2s HHZ IP %1d%4d%2d%2d%2d%2d%5.2f %5.2f 0\n"%(station,network,
Pweihgt,
int(yrp),int(mop),int(dyp),
int(hrp),int(mip),float(sec_p),
float(0.0)));
Y2000_writer.write("{:<62}".format(' ')+"%10d"%(evid)+'\n');
traceNmae_dic[str(evid)] = tr_names
if len(row_buffer) >= 2*pair_n:
Y2000_writer.write("%4d%2d%2d%2d%2d%4.2f%2.0f%1s%4.2f%3.0f%1s%4.2f%5.2f%3.2f\n"%
(int(yr),int(mo),int(dy),int(hr),int(mi),float(sec),
float(st_lat_DMS[0]), str(st_lat_DMS[1]), float(st_lat_DMS[2]),
float(st_lon_DMS[0]), str(st_lon_DMS[1]), float(st_lon_DMS[2]),
float(depth), float(mag)));
for rr in row_buffer:
Y2000_writer.write(rr);
Y2000_writer.write("{:<62}".format(' ')+"%10d"%(evid)+'\n');
traceNmae_dic[str(evid)] = tr_names2
tt += timedelta(seconds= moving_window)
# plt.scatter(LTTP, TTP, s=10, marker='o', c='b', alpha=0.4, label='P')
# plt.scatter(LTTS, TTS, s=10, marker='o', c='r', alpha=0.4, label='S')
# plt.legend('upper right')
# plt.show()
print('The Number of Realizations: '+str(evid)+'\n', flush=True)
jj = json.dumps(traceNmae_dic)
if platform.system() == 'Windows':
f = open(save_dir+"\\"+"traceNmae_dic.json","w")
else:
f = open(save_dir+"/"+"traceNmae_dic.json","w")
f.write(jj)
f.close()
else:
if platform.system() == 'Windows':
Y2000_writer = open(save_dir+"\\"+"Y2000.phs", "w")
else:
Y2000_writer = open(save_dir+"/"+"Y2000.phs", "w")
cat = Catalog()
traceNmae_dic = dict()
st = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S.%f')
et = datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S.%f')
total_t = et-st;
evid = 200000; evidd = 100000
tt = st
pbar = tqdm(total= int(np.ceil(total_t.total_seconds()/moving_window)))
while tt < et:
detections = tbl[(tbl.event_start_time >= tt) & (tbl.event_start_time < tt+timedelta(seconds = moving_window))]
pbar.update()
if len(detections) >= pair_n:
yr = "{:>4}".format(str(detections.iloc[0]['event_start_time']).split(' ')[0].split('-')[0])
mo = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[0].split('-')[1])
dy = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[0].split('-')[2])
hr = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[1].split(':')[0])
mi = "{:>2}".format(str(detections.iloc[0]['event_start_time']).split(' ')[1].split(':')[1])
sec = "{:>4}".format(str(detections.iloc[0]['event_start_time']).split(' ')[1].split(':')[2])
st_lat_DMS = _decimalDegrees2DMS(float(detections.iloc[0]['stlat']), "Latitude")
st_lon_DMS = _decimalDegrees2DMS(float(detections.iloc[0]['stlon']), "Longitude")
depth = 5.0
mag = 0.0
Y2000_writer.write("%4d%2d%2d%2d%2d%4.2f%2.0f%1s%4.2f%3.0f%1s%4.2f%5.2f%3.2f\n"%(int(yr),int(mo),int(dy),
int(hr),int(mi),float(sec),
float(st_lat_DMS[0]), str(st_lat_DMS[1]), float(st_lat_DMS[2]),
float(st_lon_DMS[0]), str(st_lon_DMS[1]), float(st_lon_DMS[2]),
float(depth), float(mag)));
event = Event()
origin = Origin(time=UTCDateTime(detections.iloc[0]['event_start_time']),
longitude=detections.iloc[0]['stlon'],
latitude=detections.iloc[0]['stlat'],
method="EqTransformer")
event.origins.append(origin)
station_buffer = []
row_buffer = []
sorted_detections = detections.sort_values('p_arrival_time')
tr_names = []
tr_names2 = []
picks = []
for _, row in sorted_detections.iterrows():
trace_name = row['traceID']+'*'+row['station']+'*'+str(row['event_start_time'])
p_unc = row['p_unc']
p_prob = row['p_prob']
s_unc = row['s_unc']
s_prob = row['s_prob']
if p_unc:
Pweihgt = _weighcalculator_prob(p_prob*(1-p_unc))
else:
Pweihgt =_weighcalculator_prob(p_prob)
try:
Pweihgt = int(Pweihgt)
except Exception:
Pweihgt = 4
if s_unc:
Sweihgt = _weighcalculator_prob(s_prob*(1-s_unc))
else:
Sweihgt = _weighcalculator_prob(s_prob)
try:
Sweihgt = int(Sweihgt)
except Exception:
Sweihgt = 4
station = "{:<5}".format(row['station'])
network = "{:<2}".format(row['network'])
try:
yrp = "{:>4}".format(str(row['p_arrival_time']).split(' ')[0].split('-')[0])
mop = "{:>2}".format(str(row['p_arrival_time']).split(' ')[0].split('-')[1])
dyp = "{:>2}".format(str(row['p_arrival_time']).split(' ')[0].split('-')[2])
hrp = "{:>2}".format(str(row['p_arrival_time']).split(' ')[1].split(':')[0])
mip = "{:>2}".format(str(row['p_arrival_time']).split(' ')[1].split(':')[1])
sec_p = "{:>4}".format(str(row['p_arrival_time']).split(' ')[1].split(':')[2])
p = Pick(time=UTCDateTime(row['p_arrival_time']),
waveform_id=WaveformStreamID(network_code=network, station_code=station.rstrip()),
phase_hint="P", method_id="EqTransformer")
picks.append(p)
except Exception:
sec_p = None
try:
yrs = "{:>4}".format(str(row['s_arrival_time']).split(' ')[0].split('-')[0])
mos = "{:>2}".format(str(row['s_arrival_time']).split(' ')[0].split('-')[1])
dys = "{:>2}".format(str(row['s_arrival_time']).split(' ')[0].split('-')[2])
hrs = "{:>2}".format(str(row['s_arrival_time']).split(' ')[1].split(':')[0])
mis = "{:>2}".format(str(row['s_arrival_time']).split(' ')[1].split(':')[1])
sec_s = "{:>4}".format(str(row['s_arrival_time']).split(' ')[1].split(':')[2])
p = Pick(time=UTCDateTime(row['s_arrival_time']),
waveform_id=WaveformStreamID(network_code=network, station_code=station.rstrip()),
phase_hint="S", method_id="EqTransformer")
picks.append(p)
except Exception:
sec_s = None
if row['station'] not in station_buffer:
tr_names.append(trace_name)
station_buffer.append(row['station'])
if sec_s:
Y2000_writer.write("%5s%2s HHE %4d%2d%2d%2d%2d%5.2f %5.2fES %1d\n"%(station,network,
int(yrs),int(mos),int(dys),
int(hrs),int(mis),float(0.0),
float(sec_s), Sweihgt))
if sec_p:
Y2000_writer.write("%5s%2s HHZ IP %1d%4d%2d%2d%2d%2d%5.2f %5.2f 0\n"%(station,network,
Pweihgt,
int(yrp),int(mop),int(dyp),
int(hrp),int(mip),float(sec_p),
float(0.0)))
else :
tr_names2.append(trace_name)
if sec_s:
row_buffer.append("%5s%2s HHE %4d%2d%2d%2d%2d%5.2f %5.2fES %1d\n"%(station,network,
int(yrs),int(mos),int(dys),
int(hrs),int(mis),0.0,
float(sec_s), Sweihgt));
if sec_p:
row_buffer.append("%5s%2s HHZ IP %1d%4d%2d%2d%2d%2d%5.2f %5.2f 0\n"%(station,network,
Pweihgt,
int(yrp),int(mop),int(dyp),
int(hrp),int(mip),float(sec_p),
float(0.0)));
event.picks = picks
event.preferred_origin_id = event.origins[0].resource_id
cat.append(event)
evid += 1
Y2000_writer.write("{:<62}".format(' ')+"%10d"%(evid)+'\n');
traceNmae_dic[str(evid)] = tr_names
if len(row_buffer) >= 2*pair_n:
Y2000_writer.write("%4d%2d%2d%2d%2d%4.2f%2.0f%1s%4.2f%3.0f%1s%4.2f%5.2f%3.2f\n"%
(int(yr),int(mo),int(dy),int(hr),int(mi),float(sec),
float(st_lat_DMS[0]), str(st_lat_DMS[1]), float(st_lat_DMS[2]),
float(st_lon_DMS[0]), str(st_lon_DMS[1]), float(st_lon_DMS[2]),
float(depth), float(mag)));
for rr in row_buffer:
Y2000_writer.write(rr);
evid += 1
Y2000_writer.write("{:<62}".format(' ')+"%10d"%(evid)+'\n');
traceNmae_dic[str(evid)] = tr_names2
elif len(row_buffer) < pair_n and len(row_buffer) != 0:
evidd += 1
traceNmae_dic[str(evidd)] = tr_names2
elif len(detections) < pair_n and len(detections) != 0:
tr_names = []
for _, row in detections.iterrows():
trace_name = row['traceID']
tr_names.append(trace_name)
evidd += 1
traceNmae_dic[str(evidd)] = tr_names
tt += timedelta(seconds= moving_window)
print('The Number of Associated Events: '+str(evid-200000)+'\n', flush=True)
jj = json.dumps(traceNmae_dic)
if platform.system() == 'Windows':
f = open(save_dir+"\\"+"traceNmae_dic.json","w")
else:
f = open(save_dir+"/"+"traceNmae_dic.json","w")
f.write(jj)
f.close()
print(cat.__str__(print_all=True))
cat.write(save_dir+"/associations.xml", format="QUAKEML")
| 51.246479 | 293 | 0.425587 |
dfed16dafca39835d8c05ee21101ae4b27c154ef | 29,989 | py | Python | tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py | iLuSIAnn/test | 10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e | [
"Apache-2.0"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py | iLuSIAnn/test | 10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e | [
"Apache-2.0"
] | 37 | 2020-10-20T08:30:53.000Z | 2020-12-22T13:15:45.000Z | tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py | iLuSIAnn/test | 10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e | [
"Apache-2.0"
] | 13 | 2020-09-07T07:24:35.000Z | 2022-02-24T04:56:16.000Z | import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from sklearn.datasets import make_classification, make_regression
from sklearn.datasets import make_low_rank_matrix
from sklearn.preprocessing import KBinsDiscretizer, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.base import clone, BaseEstimator, TransformerMixin
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_poisson_deviance
from sklearn.dummy import DummyRegressor
# To use this experimental feature, we need to explicitly ask for it:
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble._hist_gradient_boosting.loss import _LOSSES
from sklearn.ensemble._hist_gradient_boosting.loss import LeastSquares
from sklearn.ensemble._hist_gradient_boosting.loss import BinaryCrossEntropy
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.utils import shuffle
X_classification, y_classification = make_classification(random_state=0)
X_regression, y_regression = make_regression(random_state=0)
def _make_dumb_dataset(n_samples):
"""Make a dumb dataset to test early stopping."""
rng = np.random.RandomState(42)
X_dumb = rng.randn(n_samples, 1)
y_dumb = (X_dumb[:, 0] > 0).astype('int64')
return X_dumb, y_dumb
@pytest.mark.parametrize('GradientBoosting, X, y', [
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression)
])
@pytest.mark.parametrize(
'params, err_msg',
[({'loss': 'blah'}, 'Loss blah is not supported for'),
({'learning_rate': 0}, 'learning_rate=0 must be strictly positive'),
({'learning_rate': -1}, 'learning_rate=-1 must be strictly positive'),
({'max_iter': 0}, 'max_iter=0 must not be smaller than 1'),
({'max_leaf_nodes': 0}, 'max_leaf_nodes=0 should not be smaller than 2'),
({'max_leaf_nodes': 1}, 'max_leaf_nodes=1 should not be smaller than 2'),
({'max_depth': 0}, 'max_depth=0 should not be smaller than 1'),
({'min_samples_leaf': 0}, 'min_samples_leaf=0 should not be smaller'),
({'l2_regularization': -1}, 'l2_regularization=-1 must be positive'),
({'max_bins': 1}, 'max_bins=1 should be no smaller than 2 and no larger'),
({'max_bins': 256}, 'max_bins=256 should be no smaller than 2 and no'),
({'n_iter_no_change': -1}, 'n_iter_no_change=-1 must be positive'),
({'validation_fraction': -1}, 'validation_fraction=-1 must be strictly'),
({'validation_fraction': 0}, 'validation_fraction=0 must be strictly'),
({'tol': -1}, 'tol=-1 must not be smaller than 0')]
)
def test_init_parameters_validation(GradientBoosting, X, y, params, err_msg):
with pytest.raises(ValueError, match=err_msg):
GradientBoosting(**params).fit(X, y)
def test_invalid_classification_loss():
binary_clf = HistGradientBoostingClassifier(loss="binary_crossentropy")
err_msg = ("loss='binary_crossentropy' is not defined for multiclass "
"classification with n_classes=3, use "
"loss='categorical_crossentropy' instead")
with pytest.raises(ValueError, match=err_msg):
binary_clf.fit(np.zeros(shape=(3, 2)), np.arange(3))
@pytest.mark.parametrize(
'scoring, validation_fraction, early_stopping, n_iter_no_change, tol', [
('neg_mean_squared_error', .1, True, 5, 1e-7), # use scorer
('neg_mean_squared_error', None, True, 5, 1e-1), # use scorer on train
(None, .1, True, 5, 1e-7), # same with default scorer
(None, None, True, 5, 1e-1),
('loss', .1, True, 5, 1e-7), # use loss
('loss', None, True, 5, 1e-1), # use loss on training data
(None, None, False, 5, None), # no early stopping
])
def test_early_stopping_regression(scoring, validation_fraction,
early_stopping, n_iter_no_change, tol):
max_iter = 200
X, y = make_regression(n_samples=50, random_state=0)
gb = HistGradientBoostingRegressor(
verbose=1, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0
)
gb.fit(X, y)
if early_stopping:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize('data', (
make_classification(n_samples=30, random_state=0),
make_classification(n_samples=30, n_classes=3, n_clusters_per_class=1,
random_state=0)
))
@pytest.mark.parametrize(
'scoring, validation_fraction, early_stopping, n_iter_no_change, tol', [
('accuracy', .1, True, 5, 1e-7), # use scorer
('accuracy', None, True, 5, 1e-1), # use scorer on training data
(None, .1, True, 5, 1e-7), # same with default scorer
(None, None, True, 5, 1e-1),
('loss', .1, True, 5, 1e-7), # use loss
('loss', None, True, 5, 1e-1), # use loss on training data
(None, None, False, 5, None), # no early stopping
])
def test_early_stopping_classification(data, scoring, validation_fraction,
early_stopping, n_iter_no_change, tol):
max_iter = 50
X, y = data
gb = HistGradientBoostingClassifier(
verbose=1, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0
)
gb.fit(X, y)
if early_stopping is True:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize('GradientBoosting, X, y', [
(HistGradientBoostingClassifier, *_make_dumb_dataset(10000)),
(HistGradientBoostingClassifier, *_make_dumb_dataset(10001)),
(HistGradientBoostingRegressor, *_make_dumb_dataset(10000)),
(HistGradientBoostingRegressor, *_make_dumb_dataset(10001))
])
def test_early_stopping_default(GradientBoosting, X, y):
# Test that early stopping is enabled by default if and only if there
# are more than 10000 samples
gb = GradientBoosting(max_iter=10, n_iter_no_change=2, tol=1e-1)
gb.fit(X, y)
if X.shape[0] > 10000:
assert gb.n_iter_ < gb.max_iter
else:
assert gb.n_iter_ == gb.max_iter
@pytest.mark.parametrize(
'scores, n_iter_no_change, tol, stopping',
[
([], 1, 0.001, False), # not enough iterations
([1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 1, 1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 2, 3, 4, 5, 6], 5, 0.001, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0., False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0.999, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False), # significant improvement
([1] * 6, 5, 0., True), # no significant improvement
([1] * 6, 5, 0.001, True), # no significant improvement
([1] * 6, 5, 5, True), # no significant improvement
]
)
def test_should_stop(scores, n_iter_no_change, tol, stopping):
gbdt = HistGradientBoostingClassifier(
n_iter_no_change=n_iter_no_change, tol=tol
)
assert gbdt._should_stop(scores) == stopping
def test_least_absolute_deviation():
# For coverage only.
X, y = make_regression(n_samples=500, random_state=0)
gbdt = HistGradientBoostingRegressor(loss='least_absolute_deviation',
random_state=0)
gbdt.fit(X, y)
assert gbdt.score(X, y) > .9
@pytest.mark.parametrize('y', [([1., -2., 0.]), ([0., 0., 0.])])
def test_poisson_y_positive(y):
# Test that ValueError is raised if either one y_i < 0 or sum(y_i) <= 0.
err_msg = r"loss='poisson' requires non-negative y and sum\(y\) > 0."
gbdt = HistGradientBoostingRegressor(loss='poisson', random_state=0)
with pytest.raises(ValueError, match=err_msg):
gbdt.fit(np.zeros(shape=(len(y), 1)), y)
def test_poisson():
# For Poisson distributed target, Poisson loss should give better results
# than least squares measured in Poisson deviance as metric.
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 100, 100
X = make_low_rank_matrix(n_samples=n_train+n_test, n_features=n_features,
random_state=rng)
# We create a log-linear Poisson model and downscale coef as it will get
# exponentiated.
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
y = rng.poisson(lam=np.exp(X @ coef))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=n_test,
random_state=rng)
gbdt_pois = HistGradientBoostingRegressor(loss='poisson', random_state=rng)
gbdt_ls = HistGradientBoostingRegressor(loss='least_squares',
random_state=rng)
gbdt_pois.fit(X_train, y_train)
gbdt_ls.fit(X_train, y_train)
dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
for X, y in [(X_train, y_train), (X_test, y_test)]:
metric_pois = mean_poisson_deviance(y, gbdt_pois.predict(X))
# least_squares might produce non-positive predictions => clip
metric_ls = mean_poisson_deviance(y, np.clip(gbdt_ls.predict(X), 1e-15,
None))
metric_dummy = mean_poisson_deviance(y, dummy.predict(X))
assert metric_pois < metric_ls
assert metric_pois < metric_dummy
def test_binning_train_validation_are_separated():
# Make sure training and validation data are binned separately.
# See issue 13926
rng = np.random.RandomState(0)
validation_fraction = .2
gb = HistGradientBoostingClassifier(
early_stopping=True,
validation_fraction=validation_fraction,
random_state=rng
)
gb.fit(X_classification, y_classification)
mapper_training_data = gb.bin_mapper_
# Note that since the data is small there is no subsampling and the
# random_state doesn't matter
mapper_whole_data = _BinMapper(random_state=0)
mapper_whole_data.fit(X_classification)
n_samples = X_classification.shape[0]
assert np.all(mapper_training_data.n_bins_non_missing_ ==
int((1 - validation_fraction) * n_samples))
assert np.all(mapper_training_data.n_bins_non_missing_ !=
mapper_whole_data.n_bins_non_missing_)
def test_missing_values_trivial():
# sanity check for missing values support. With only one feature and
# y == isnan(X), the gbdt is supposed to reach perfect accuracy on the
# training set.
n_samples = 100
n_features = 1
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
mask = rng.binomial(1, .5, size=X.shape).astype(np.bool)
X[mask] = np.nan
y = mask.ravel()
gb = HistGradientBoostingClassifier()
gb.fit(X, y)
assert gb.score(X, y) == pytest.approx(1)
@pytest.mark.parametrize('problem', ('classification', 'regression'))
@pytest.mark.parametrize(
'missing_proportion, expected_min_score_classification, '
'expected_min_score_regression', [
(.1, .97, .89),
(.2, .93, .81),
(.5, .79, .52)])
def test_missing_values_resilience(problem, missing_proportion,
expected_min_score_classification,
expected_min_score_regression):
# Make sure the estimators can deal with missing values and still yield
# decent predictions
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 2
if problem == 'regression':
X, y = make_regression(n_samples=n_samples, n_features=n_features,
n_informative=n_features, random_state=rng)
gb = HistGradientBoostingRegressor()
expected_min_score = expected_min_score_regression
else:
X, y = make_classification(n_samples=n_samples, n_features=n_features,
n_informative=n_features, n_redundant=0,
n_repeated=0, random_state=rng)
gb = HistGradientBoostingClassifier()
expected_min_score = expected_min_score_classification
mask = rng.binomial(1, missing_proportion, size=X.shape).astype(np.bool)
X[mask] = np.nan
gb.fit(X, y)
assert gb.score(X, y) > expected_min_score
@pytest.mark.parametrize('data', [
make_classification(random_state=0, n_classes=2),
make_classification(random_state=0, n_classes=3, n_informative=3)
], ids=['binary_crossentropy', 'categorical_crossentropy'])
def test_zero_division_hessians(data):
# non regression test for issue #14018
# make sure we avoid zero division errors when computing the leaves values.
# If the learning rate is too high, the raw predictions are bad and will
# saturate the softmax (or sigmoid in binary classif). This leads to
# probabilities being exactly 0 or 1, gradients being constant, and
# hessians being zero.
X, y = data
gb = HistGradientBoostingClassifier(learning_rate=100, max_iter=10)
gb.fit(X, y)
def test_small_trainset():
# Make sure that the small trainset is stratified and has the expected
# length (10k samples)
n_samples = 20000
original_distrib = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
rng = np.random.RandomState(42)
X = rng.randn(n_samples).reshape(n_samples, 1)
y = [[class_] * int(prop * n_samples) for (class_, prop)
in original_distrib.items()]
y = shuffle(np.concatenate(y))
gb = HistGradientBoostingClassifier()
# Compute the small training set
X_small, y_small, _ = gb._get_small_trainset(X, y, seed=42,
sample_weight_train=None)
# Compute the class distribution in the small training set
unique, counts = np.unique(y_small, return_counts=True)
small_distrib = {class_: count / 10000 for (class_, count)
in zip(unique, counts)}
# Test that the small training set has the expected length
assert X_small.shape[0] == 10000
assert y_small.shape[0] == 10000
# Test that the class distributions in the whole dataset and in the small
# training set are identical
assert small_distrib == pytest.approx(original_distrib)
def test_missing_values_minmax_imputation():
# Compare the buit-in missing value handling of Histogram GBC with an
# a-priori missing value imputation strategy that should yield the same
# results in terms of decision function.
#
# Each feature (containing NaNs) is replaced by 2 features:
# - one where the nans are replaced by min(feature) - 1
# - one where the nans are replaced by max(feature) + 1
# A split where nans go to the left has an equivalent split in the
# first (min) feature, and a split where nans go to the right has an
# equivalent split in the second (max) feature.
#
# Assuming the data is such that there is never a tie to select the best
# feature to split on during training, the learned decision trees should be
# strictly equivalent (learn a sequence of splits that encode the same
# decision function).
#
# The MinMaxImputer transformer is meant to be a toy implementation of the
# "Missing In Attributes" (MIA) missing value handling for decision trees
# https://www.sciencedirect.com/science/article/abs/pii/S0167865508000305
# The implementation of MIA as an imputation transformer was suggested by
# "Remark 3" in https://arxiv.org/abs/1902.06931
class MinMaxImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
mm = MinMaxScaler().fit(X)
self.data_min_ = mm.data_min_
self.data_max_ = mm.data_max_
return self
def transform(self, X):
X_min, X_max = X.copy(), X.copy()
for feature_idx in range(X.shape[1]):
nan_mask = np.isnan(X[:, feature_idx])
X_min[nan_mask, feature_idx] = self.data_min_[feature_idx] - 1
X_max[nan_mask, feature_idx] = self.data_max_[feature_idx] + 1
return np.concatenate([X_min, X_max], axis=1)
def make_missing_value_data(n_samples=int(1e4), seed=0):
rng = np.random.RandomState(seed)
X, y = make_regression(n_samples=n_samples, n_features=4,
random_state=rng)
# Pre-bin the data to ensure a deterministic handling by the 2
# strategies and also make it easier to insert np.nan in a structured
# way:
X = KBinsDiscretizer(n_bins=42, encode="ordinal").fit_transform(X)
# First feature has missing values completely at random:
rnd_mask = rng.rand(X.shape[0]) > 0.9
X[rnd_mask, 0] = np.nan
# Second and third features have missing values for extreme values
# (censoring missingness):
low_mask = X[:, 1] == 0
X[low_mask, 1] = np.nan
high_mask = X[:, 2] == X[:, 2].max()
X[high_mask, 2] = np.nan
# Make the last feature nan pattern very informative:
y_max = np.percentile(y, 70)
y_max_mask = y >= y_max
y[y_max_mask] = y_max
X[y_max_mask, 3] = np.nan
# Check that there is at least one missing value in each feature:
for feature_idx in range(X.shape[1]):
assert any(np.isnan(X[:, feature_idx]))
# Let's use a test set to check that the learned decision function is
# the same as evaluated on unseen data. Otherwise it could just be the
# case that we find two independent ways to overfit the training set.
return train_test_split(X, y, random_state=rng)
# n_samples need to be large enough to minimize the likelihood of having
# several candidate splits with the same gain value in a given tree.
X_train, X_test, y_train, y_test = make_missing_value_data(
n_samples=int(1e4), seed=0)
# Use a small number of leaf nodes and iterations so as to keep
# under-fitting models to minimize the likelihood of ties when training the
# model.
gbm1 = HistGradientBoostingRegressor(max_iter=100,
max_leaf_nodes=5,
random_state=0)
gbm1.fit(X_train, y_train)
gbm2 = make_pipeline(MinMaxImputer(), clone(gbm1))
gbm2.fit(X_train, y_train)
# Check that the model reach the same score:
assert gbm1.score(X_train, y_train) == \
pytest.approx(gbm2.score(X_train, y_train))
assert gbm1.score(X_test, y_test) == \
pytest.approx(gbm2.score(X_test, y_test))
# Check the individual prediction match as a finer grained
# decision function check.
assert_allclose(gbm1.predict(X_train), gbm2.predict(X_train))
assert_allclose(gbm1.predict(X_test), gbm2.predict(X_test))
def test_infinite_values():
# Basic test for infinite values
X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
y = np.array([0, 0, 1, 1])
gbdt = HistGradientBoostingRegressor(min_samples_leaf=1)
gbdt.fit(X, y)
np.testing.assert_allclose(gbdt.predict(X), y, atol=1e-4)
def test_consistent_lengths():
X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
y = np.array([0, 0, 1, 1])
sample_weight = np.array([.1, .3, .1])
gbdt = HistGradientBoostingRegressor()
with pytest.raises(ValueError,
match=r"sample_weight.shape == \(3,\), expected"):
gbdt.fit(X, y, sample_weight)
with pytest.raises(ValueError,
match="Found input variables with inconsistent number"):
gbdt.fit(X, y[1:])
def test_infinite_values_missing_values():
# High level test making sure that inf and nan values are properly handled
# when both are present. This is similar to
# test_split_on_nan_with_infinite_values() in test_grower.py, though we
# cannot check the predictions for binned values here.
X = np.asarray([-np.inf, 0, 1, np.inf, np.nan]).reshape(-1, 1)
y_isnan = np.isnan(X.ravel())
y_isinf = X.ravel() == np.inf
stump_clf = HistGradientBoostingClassifier(min_samples_leaf=1, max_iter=1,
learning_rate=1, max_depth=2)
assert stump_clf.fit(X, y_isinf).score(X, y_isinf) == 1
assert stump_clf.fit(X, y_isnan).score(X, y_isnan) == 1
def test_crossentropy_binary_problem():
# categorical_crossentropy should only be used if there are more than two
# classes present. PR #14869
X = [[1], [0]]
y = [0, 1]
gbrt = HistGradientBoostingClassifier(loss='categorical_crossentropy')
with pytest.raises(ValueError,
match="'categorical_crossentropy' is not suitable for"):
gbrt.fit(X, y)
@pytest.mark.parametrize("scoring", [None, 'loss'])
def test_string_target_early_stopping(scoring):
# Regression tests for #14709 where the targets need to be encoded before
# to compute the score
rng = np.random.RandomState(42)
X = rng.randn(100, 10)
y = np.array(['x'] * 50 + ['y'] * 50, dtype=object)
gbrt = HistGradientBoostingClassifier(n_iter_no_change=10, scoring=scoring)
gbrt.fit(X, y)
def test_zero_sample_weights_regression():
# Make sure setting a SW to zero amounts to ignoring the corresponding
# sample
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = HistGradientBoostingRegressor(min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert gb.predict([[1, 0]])[0] > 0.5
def test_zero_sample_weights_classification():
# Make sure setting a SW to zero amounts to ignoring the corresponding
# sample
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = HistGradientBoostingClassifier(loss='binary_crossentropy',
min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
[1, 1]]
y = [0, 0, 1, 0, 2]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1, 1]
gb = HistGradientBoostingClassifier(loss='categorical_crossentropy',
min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
@pytest.mark.parametrize('problem', (
'regression',
'binary_classification',
'multiclass_classification'
))
@pytest.mark.parametrize('duplication', ('half', 'all'))
def test_sample_weight_effect(problem, duplication):
# High level test to make sure that duplicating a sample is equivalent to
# giving it weight of 2.
# fails for n_samples > 255 because binning does not take sample weights
# into account. Keeping n_samples <= 255 makes
# sure only unique values are used so SW have no effect on binning.
n_samples = 255
n_features = 2
if problem == 'regression':
X, y = make_regression(n_samples=n_samples, n_features=n_features,
n_informative=n_features, random_state=0)
Klass = HistGradientBoostingRegressor
else:
n_classes = 2 if problem == 'binary_classification' else 3
X, y = make_classification(n_samples=n_samples, n_features=n_features,
n_informative=n_features, n_redundant=0,
n_clusters_per_class=1,
n_classes=n_classes, random_state=0)
Klass = HistGradientBoostingClassifier
# This test can't pass if min_samples_leaf > 1 because that would force 2
# samples to be in the same node in est_sw, while these samples would be
# free to be separate in est_dup: est_dup would just group together the
# duplicated samples.
est = Klass(min_samples_leaf=1)
# Create dataset with duplicate and corresponding sample weights
if duplication == 'half':
lim = n_samples // 2
else:
lim = n_samples
X_dup = np.r_[X, X[:lim]]
y_dup = np.r_[y, y[:lim]]
sample_weight = np.ones(shape=(n_samples))
sample_weight[:lim] = 2
est_sw = clone(est).fit(X, y, sample_weight=sample_weight)
est_dup = clone(est).fit(X_dup, y_dup)
# checking raw_predict is stricter than just predict for classification
assert np.allclose(est_sw._raw_predict(X_dup),
est_dup._raw_predict(X_dup))
@pytest.mark.parametrize('loss_name', ('least_squares',
'least_absolute_deviation'))
def test_sum_hessians_are_sample_weight(loss_name):
# For losses with constant hessians, the sum_hessians field of the
# histograms must be equal to the sum of the sample weight of samples at
# the corresponding bin.
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 2
X, y = make_regression(n_samples=n_samples, n_features=n_features,
random_state=rng)
bin_mapper = _BinMapper()
X_binned = bin_mapper.fit_transform(X)
sample_weight = rng.normal(size=n_samples)
loss = _LOSSES[loss_name](sample_weight=sample_weight)
gradients, hessians = loss.init_gradients_and_hessians(
n_samples=n_samples, prediction_dim=1, sample_weight=sample_weight)
raw_predictions = rng.normal(size=(1, n_samples))
loss.update_gradients_and_hessians(gradients, hessians, y,
raw_predictions, sample_weight)
# build sum_sample_weight which contains the sum of the sample weights at
# each bin (for each feature). This must be equal to the sum_hessians
# field of the corresponding histogram
sum_sw = np.zeros(shape=(n_features, bin_mapper.n_bins))
for feature_idx in range(n_features):
for sample_idx in range(n_samples):
sum_sw[feature_idx, X_binned[sample_idx, feature_idx]] += (
sample_weight[sample_idx])
# Build histogram
grower = TreeGrower(X_binned, gradients[0], hessians[0],
n_bins=bin_mapper.n_bins)
histograms = grower.histogram_builder.compute_histograms_brute(
grower.root.sample_indices)
for feature_idx in range(n_features):
for bin_idx in range(bin_mapper.n_bins):
assert histograms[feature_idx, bin_idx]['sum_hessians'] == (
pytest.approx(sum_sw[feature_idx, bin_idx], rel=1e-5))
def test_max_depth_max_leaf_nodes():
# Non regression test for
# https://github.com/scikit-learn/scikit-learn/issues/16179
# there was a bug when the max_depth and the max_leaf_nodes criteria were
# met at the same time, which would lead to max_leaf_nodes not being
# respected.
X, y = make_classification(random_state=0)
est = HistGradientBoostingClassifier(max_depth=2, max_leaf_nodes=3,
max_iter=1).fit(X, y)
tree = est._predictors[0][0]
assert tree.get_max_depth() == 2
assert tree.get_n_leaf_nodes() == 3 # would be 4 prior to bug fix
def test_early_stopping_on_test_set_with_warm_start():
# Non regression test for #16661 where second fit fails with
# warm_start=True, early_stopping is on, and no validation set
X, y = make_classification(random_state=0)
gb = HistGradientBoostingClassifier(
max_iter=1, scoring='loss', warm_start=True, early_stopping=True,
n_iter_no_change=1, validation_fraction=None)
gb.fit(X, y)
# does not raise on second call
gb.set_params(max_iter=2)
gb.fit(X, y)
@pytest.mark.parametrize('Est', (HistGradientBoostingClassifier,
HistGradientBoostingRegressor))
def test_single_node_trees(Est):
# Make sure it's still possible to build single-node trees. In that case
# the value of the root is set to 0. That's a correct value: if the tree is
# single-node that's because min_gain_to_split is not respected right from
# the root, so we don't want the tree to have any impact on the
# predictions.
X, y = make_classification(random_state=0)
y[:] = 1 # constant target will lead to a single root node
est = Est(max_iter=20)
est.fit(X, y)
assert all(len(predictor[0].nodes) == 1 for predictor in est._predictors)
assert all(predictor[0].nodes[0]['value'] == 0
for predictor in est._predictors)
# Still gives correct predictions thanks to the baseline prediction
assert_allclose(est.predict(X), y)
@pytest.mark.parametrize('Est, loss, X, y', [
(
HistGradientBoostingClassifier,
BinaryCrossEntropy(sample_weight=None),
X_classification,
y_classification
),
(
HistGradientBoostingRegressor,
LeastSquares(sample_weight=None),
X_regression,
y_regression
)
])
def test_custom_loss(Est, loss, X, y):
est = Est(loss=loss, max_iter=20)
est.fit(X, y)
| 40.145917 | 79 | 0.661476 |
ea27b534b8676ffc1abddcfeb4380adcf0e073ab | 934 | py | Python | useful_scripts/src/worm_gene_length_calculator.py | dangeles/WormFiles | fbdddc1700cb9c21a6ca0fc9430f63f6e32b441b | [
"MIT"
] | null | null | null | useful_scripts/src/worm_gene_length_calculator.py | dangeles/WormFiles | fbdddc1700cb9c21a6ca0fc9430f63f6e32b441b | [
"MIT"
] | null | null | null | useful_scripts/src/worm_gene_length_calculator.py | dangeles/WormFiles | fbdddc1700cb9c21a6ca0fc9430f63f6e32b441b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Spyder Editor
A script to obtain all gene lengths in c. elegans
@david angeles
dangeles@caltech.edu
"""
import pandas as pd
fname= '../input/Caenorhabditis_elegans.WBcel235.rel79.cdna.all.fa'
wbids=[]
gene_lengths= []
with open(fname, 'r') as f:
i= 0
gene= ''
for line in f:
if line[0] == '>':
start= line.find('gene:') + len('gene:')
end= start + 15
wbid= line[start:end].rstrip()
wbids.append(wbid)
if i != 0:
gene= gene.rstrip()
gene_lengths.append(len(gene))
else:
i+=1
gene= ''
else:
gene= gene+line.rstrip()
gene_lengths.append(len(gene))
cols= ['WBID', 'length']
data= list(zip(wbids, gene_lengths))
df= pd.DataFrame(data, columns= cols)
df.to_csv('../output/c_elegans_gene_lengths_PRJNA13758.txt', index = False) | 21.72093 | 75 | 0.555675 |
64f9d7d9a3fab2422aedfe04a673b3ff93a5372a | 2,649 | py | Python | SRC/orientationmap/GUI/orientmapwhowidget.py | usnistgov/OOF3D | 4fd423a48aea9c5dc207520f02de53ae184be74c | [
"X11"
] | 31 | 2015-04-01T15:59:36.000Z | 2022-03-18T20:21:47.000Z | SRC/orientationmap/GUI/orientmapwhowidget.py | usnistgov/OOF3D | 4fd423a48aea9c5dc207520f02de53ae184be74c | [
"X11"
] | 3 | 2015-02-06T19:30:24.000Z | 2017-05-25T14:14:31.000Z | SRC/orientationmap/GUI/orientmapwhowidget.py | usnistgov/OOF3D | 4fd423a48aea9c5dc207520f02de53ae184be74c | [
"X11"
] | 7 | 2015-01-23T15:19:22.000Z | 2021-06-09T09:03:59.000Z | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
# Widgets for listing Microstructures with or without Orientation Map data.
from ooflib.SWIG.orientationmap import orientmapdata
from ooflib.common import debug
from ooflib.common.IO import whoville
from ooflib.common.IO.GUI import whowidget
from ooflib.orientationmap import orientmapmenu
def _withOrientationMap(who):
return (whoville.excludeProxies(who) and
orientmapdata.getOrientationMap(who.getObject()) is not None)
class MicrostructureWithOrientationMapWidget(whowidget.WhoParameterWidget):
def __init__(self, value=None, scope=None, name=None, verbose=False):
whowidget.WhoParameterWidget.__init__(
self, whoclass=whoville.getClass('Microstructure'),
value=value, scope=scope, name=name,
condition=_withOrientationMap,
verbose=verbose)
def _MicrostructureWithOrientMapParameter_makeWidget(self, scope=None,
verbose=False):
return MicrostructureWithOrientationMapWidget(
self.value, scope=scope, name=self.name, verbose=verbose)
orientmapmenu.MicrostructureWithOrientMapParameter.makeWidget = \
_MicrostructureWithOrientMapParameter_makeWidget
##########
def _withoutOrientationMap(who):
result = (whoville.excludeProxies(who) and
orientmapdata.getOrientationMap(who.getObject()) is None)
return result
class MicrostructureWithoutOrientationMapWidget(whowidget.WhoParameterWidget):
def __init__(self, value=None, scope=None, name=None, verbose=False):
whowidget.WhoParameterWidget.__init__(
self, whoclass=whoville.getClass('Microstructure'),
value=value, scope=scope, name=name,
condition=_withoutOrientationMap, verbose=verbose)
def _MicrostructureWithoutOrientMapParameter_makeWidget(self, scope=None,
verbose=False):
return MicrostructureWithoutOrientationMapWidget(
self.value, scope=scope, name=self.name, verbose=verbose)
orientmapmenu.MicrostructureWithoutOrientMapParameter.makeWidget = \
_MicrostructureWithoutOrientMapParameter_makeWidget
| 43.42623 | 79 | 0.727444 |
1da5d5ca65dffbf2f838fd8b83b18eea6c102d73 | 536 | py | Python | code/test.py | tas09009/Thredup-database | c52532239800463c850676f3e827ae955bef6e32 | [
"CNRI-Python"
] | null | null | null | code/test.py | tas09009/Thredup-database | c52532239800463c850676f3e827ae955bef6e32 | [
"CNRI-Python"
] | null | null | null | code/test.py | tas09009/Thredup-database | c52532239800463c850676f3e827ae955bef6e32 | [
"CNRI-Python"
] | 4 | 2020-11-02T17:10:19.000Z | 2022-03-18T18:08:45.000Z | import glob
import pandas as pd
import os, re
file_name = "coats"
# product = '61% Cotton, 36% Acrylic, 3% Other'
df = pd.read_csv(f'/home/taniya/Projects/thredup-scraper-api/data/test_runs/merged_{file_name}.csv')
df_materials_banned = ~df.Materials.str.contains("Polyester|Polyamide|Polyethylene|Polymide|Acrylic|Synthetic|No Fabric Content")
df_materials_banned_removed = df[df_materials_banned]
df_materials_banned_removed.to_csv(f'/home/taniya/Projects/thredup-scraper-api/data/test_runs/clean_{file_name}.csv', index=False)
| 33.5 | 130 | 0.798507 |
7e00bd9760cda68822ea1b1ad416fc8a995bb3ed | 24,284 | py | Python | test/test_uri_parser.py | naomielst/mongo-python-driver | e3d1d6f5b48101654a05493fd6eec7fe3fa014bd | [
"Apache-2.0"
] | null | null | null | test/test_uri_parser.py | naomielst/mongo-python-driver | e3d1d6f5b48101654a05493fd6eec7fe3fa014bd | [
"Apache-2.0"
] | 1 | 2021-12-24T11:32:17.000Z | 2021-12-24T11:32:17.000Z | test/test_uri_parser.py | naomielst/mongo-python-driver | e3d1d6f5b48101654a05493fd6eec7fe3fa014bd | [
"Apache-2.0"
] | null | null | null | # Copyright 2011-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the pymongo uri_parser module."""
import copy
import sys
import warnings
from urllib.parse import quote_plus
sys.path[0:0] = [""]
from bson.binary import JAVA_LEGACY
from pymongo import ReadPreference
from pymongo.errors import ConfigurationError, InvalidURI
from pymongo.uri_parser import (parse_userinfo,
split_hosts,
split_options,
parse_uri)
from test import unittest
class TestURI(unittest.TestCase):
def test_validate_userinfo(self):
self.assertRaises(InvalidURI, parse_userinfo,
'foo@')
self.assertRaises(InvalidURI, parse_userinfo,
':password')
self.assertRaises(InvalidURI, parse_userinfo,
'fo::o:p@ssword')
self.assertRaises(InvalidURI, parse_userinfo, ':')
self.assertTrue(parse_userinfo('user:password'))
self.assertEqual(('us:r', 'p@ssword'),
parse_userinfo('us%3Ar:p%40ssword'))
self.assertEqual(('us er', 'p ssword'),
parse_userinfo('us+er:p+ssword'))
self.assertEqual(('us er', 'p ssword'),
parse_userinfo('us%20er:p%20ssword'))
self.assertEqual(('us+er', 'p+ssword'),
parse_userinfo('us%2Ber:p%2Bssword'))
self.assertEqual(('dev1@FOO.COM', ''),
parse_userinfo('dev1%40FOO.COM'))
self.assertEqual(('dev1@FOO.COM', ''),
parse_userinfo('dev1%40FOO.COM:'))
def test_split_hosts(self):
self.assertRaises(ConfigurationError, split_hosts,
'localhost:27017,')
self.assertRaises(ConfigurationError, split_hosts,
',localhost:27017')
self.assertRaises(ConfigurationError, split_hosts,
'localhost:27017,,localhost:27018')
self.assertEqual([('localhost', 27017), ('example.com', 27017)],
split_hosts('localhost,example.com'))
self.assertEqual([('localhost', 27018), ('example.com', 27019)],
split_hosts('localhost:27018,example.com:27019'))
self.assertEqual([('/tmp/mongodb-27017.sock', None)],
split_hosts('/tmp/mongodb-27017.sock'))
self.assertEqual([('/tmp/mongodb-27017.sock', None),
('example.com', 27017)],
split_hosts('/tmp/mongodb-27017.sock,'
'example.com:27017'))
self.assertEqual([('example.com', 27017),
('/tmp/mongodb-27017.sock', None)],
split_hosts('example.com:27017,'
'/tmp/mongodb-27017.sock'))
self.assertRaises(ValueError, split_hosts, '::1', 27017)
self.assertRaises(ValueError, split_hosts, '[::1:27017')
self.assertRaises(ValueError, split_hosts, '::1')
self.assertRaises(ValueError, split_hosts, '::1]:27017')
self.assertEqual([('::1', 27017)], split_hosts('[::1]:27017'))
self.assertEqual([('::1', 27017)], split_hosts('[::1]'))
def test_split_options(self):
self.assertRaises(ConfigurationError, split_options, 'foo')
self.assertRaises(ConfigurationError, split_options, 'foo=bar;foo')
self.assertTrue(split_options('ssl=true'))
self.assertTrue(split_options('connect=true'))
self.assertTrue(split_options('tlsAllowInvalidHostnames=false'))
# Test Invalid URI options that should throw warnings.
with warnings.catch_warnings():
warnings.filterwarnings('error')
self.assertRaises(Warning, split_options,
'foo=bar', warn=True)
self.assertRaises(Warning, split_options,
'socketTimeoutMS=foo', warn=True)
self.assertRaises(Warning, split_options,
'socketTimeoutMS=0.0', warn=True)
self.assertRaises(Warning, split_options,
'connectTimeoutMS=foo', warn=True)
self.assertRaises(Warning, split_options,
'connectTimeoutMS=0.0', warn=True)
self.assertRaises(Warning, split_options,
'connectTimeoutMS=1e100000', warn=True)
self.assertRaises(Warning, split_options,
'connectTimeoutMS=-1e100000', warn=True)
self.assertRaises(Warning, split_options,
'ssl=foo', warn=True)
self.assertRaises(Warning, split_options,
'connect=foo', warn=True)
self.assertRaises(Warning, split_options,
'tlsAllowInvalidHostnames=foo', warn=True)
self.assertRaises(Warning, split_options,
'connectTimeoutMS=inf', warn=True)
self.assertRaises(Warning, split_options,
'connectTimeoutMS=-inf', warn=True)
self.assertRaises(Warning, split_options, 'wtimeoutms=foo',
warn=True)
self.assertRaises(Warning, split_options, 'wtimeoutms=5.5',
warn=True)
self.assertRaises(Warning, split_options, 'fsync=foo',
warn=True)
self.assertRaises(Warning, split_options, 'fsync=5.5',
warn=True)
self.assertRaises(Warning,
split_options, 'authMechanism=foo',
warn=True)
# Test invalid options with warn=False.
self.assertRaises(ConfigurationError, split_options, 'foo=bar')
self.assertRaises(ValueError, split_options, 'socketTimeoutMS=foo')
self.assertRaises(ValueError, split_options, 'socketTimeoutMS=0.0')
self.assertRaises(ValueError, split_options, 'connectTimeoutMS=foo')
self.assertRaises(ValueError, split_options, 'connectTimeoutMS=0.0')
self.assertRaises(ValueError, split_options,
'connectTimeoutMS=1e100000')
self.assertRaises(ValueError, split_options,
'connectTimeoutMS=-1e100000')
self.assertRaises(ValueError, split_options, 'ssl=foo')
self.assertRaises(ValueError, split_options, 'connect=foo')
self.assertRaises(ValueError, split_options, 'tlsAllowInvalidHostnames=foo')
self.assertRaises(ValueError, split_options, 'connectTimeoutMS=inf')
self.assertRaises(ValueError, split_options, 'connectTimeoutMS=-inf')
self.assertRaises(ValueError, split_options, 'wtimeoutms=foo')
self.assertRaises(ValueError, split_options, 'wtimeoutms=5.5')
self.assertRaises(ValueError, split_options, 'fsync=foo')
self.assertRaises(ValueError, split_options, 'fsync=5.5')
self.assertRaises(ValueError,
split_options, 'authMechanism=foo')
# Test splitting options works when valid.
self.assertTrue(split_options('socketTimeoutMS=300'))
self.assertTrue(split_options('connectTimeoutMS=300'))
self.assertEqual({'sockettimeoutms': 0.3},
split_options('socketTimeoutMS=300'))
self.assertEqual({'sockettimeoutms': 0.0001},
split_options('socketTimeoutMS=0.1'))
self.assertEqual({'connecttimeoutms': 0.3},
split_options('connectTimeoutMS=300'))
self.assertEqual({'connecttimeoutms': 0.0001},
split_options('connectTimeoutMS=0.1'))
self.assertTrue(split_options('connectTimeoutMS=300'))
self.assertTrue(isinstance(split_options('w=5')['w'], int))
self.assertTrue(isinstance(split_options('w=5.5')['w'], str))
self.assertTrue(split_options('w=foo'))
self.assertTrue(split_options('w=majority'))
self.assertTrue(split_options('wtimeoutms=500'))
self.assertEqual({'fsync': True}, split_options('fsync=true'))
self.assertEqual({'fsync': False}, split_options('fsync=false'))
self.assertEqual({'authmechanism': 'GSSAPI'},
split_options('authMechanism=GSSAPI'))
self.assertEqual({'authmechanism': 'MONGODB-CR'},
split_options('authMechanism=MONGODB-CR'))
self.assertEqual({'authmechanism': 'SCRAM-SHA-1'},
split_options('authMechanism=SCRAM-SHA-1'))
self.assertEqual({'authsource': 'foobar'},
split_options('authSource=foobar'))
self.assertEqual({'maxpoolsize': 50}, split_options('maxpoolsize=50'))
def test_parse_uri(self):
self.assertRaises(InvalidURI, parse_uri, "http://foobar.com")
self.assertRaises(InvalidURI, parse_uri, "http://foo@foobar.com")
self.assertRaises(ValueError,
parse_uri, "mongodb://::1", 27017)
orig = {
'nodelist': [("localhost", 27017)],
'username': None,
'password': None,
'database': None,
'collection': None,
'options': {},
'fqdn': None
}
res = copy.deepcopy(orig)
self.assertEqual(res, parse_uri("mongodb://localhost"))
res.update({'username': 'fred', 'password': 'foobar'})
self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost"))
res.update({'database': 'baz'})
self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost/baz"))
res = copy.deepcopy(orig)
res['nodelist'] = [("example1.com", 27017), ("example2.com", 27017)]
self.assertEqual(res,
parse_uri("mongodb://example1.com:27017,"
"example2.com:27017"))
res = copy.deepcopy(orig)
res['nodelist'] = [("localhost", 27017),
("localhost", 27018),
("localhost", 27019)]
self.assertEqual(res,
parse_uri("mongodb://localhost,"
"localhost:27018,localhost:27019"))
res = copy.deepcopy(orig)
res['database'] = 'foo'
self.assertEqual(res, parse_uri("mongodb://localhost/foo"))
res = copy.deepcopy(orig)
self.assertEqual(res, parse_uri("mongodb://localhost/"))
res.update({'database': 'test', 'collection': 'yield_historical.in'})
self.assertEqual(res, parse_uri("mongodb://"
"localhost/test.yield_historical.in"))
res.update({'username': 'fred', 'password': 'foobar'})
self.assertEqual(res,
parse_uri("mongodb://fred:foobar@localhost/"
"test.yield_historical.in"))
res = copy.deepcopy(orig)
res['nodelist'] = [("example1.com", 27017), ("example2.com", 27017)]
res.update({'database': 'test', 'collection': 'yield_historical.in'})
self.assertEqual(res,
parse_uri("mongodb://example1.com:27017,example2.com"
":27017/test.yield_historical.in"))
# Test socket path without escaped characters.
self.assertRaises(InvalidURI, parse_uri,
"mongodb:///tmp/mongodb-27017.sock")
# Test with escaped characters.
res = copy.deepcopy(orig)
res['nodelist'] = [("example2.com", 27017),
("/tmp/mongodb-27017.sock", None)]
self.assertEqual(res,
parse_uri("mongodb://example2.com,"
"%2Ftmp%2Fmongodb-27017.sock"))
res = copy.deepcopy(orig)
res['nodelist'] = [("shoe.sock.pants.co.uk", 27017),
("/tmp/mongodb-27017.sock", None)]
res['database'] = "nethers_db"
self.assertEqual(res,
parse_uri("mongodb://shoe.sock.pants.co.uk,"
"%2Ftmp%2Fmongodb-27017.sock/nethers_db"))
res = copy.deepcopy(orig)
res['nodelist'] = [("/tmp/mongodb-27017.sock", None),
("example2.com", 27017)]
res.update({'database': 'test', 'collection': 'yield_historical.in'})
self.assertEqual(res,
parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock,"
"example2.com:27017"
"/test.yield_historical.in"))
res = copy.deepcopy(orig)
res['nodelist'] = [("/tmp/mongodb-27017.sock", None),
("example2.com", 27017)]
res.update({'database': 'test', 'collection': 'yield_historical.sock'})
self.assertEqual(res,
parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock,"
"example2.com:27017/test.yield_historical"
".sock"))
res = copy.deepcopy(orig)
res['nodelist'] = [("example2.com", 27017)]
res.update({'database': 'test', 'collection': 'yield_historical.sock'})
self.assertEqual(res,
parse_uri("mongodb://example2.com:27017"
"/test.yield_historical.sock"))
res = copy.deepcopy(orig)
res['nodelist'] = [("/tmp/mongodb-27017.sock", None)]
res.update({'database': 'test', 'collection': 'mongodb-27017.sock'})
self.assertEqual(res,
parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock"
"/test.mongodb-27017.sock"))
res = copy.deepcopy(orig)
res['nodelist'] = [('/tmp/mongodb-27020.sock', None),
("::1", 27017),
("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 27018),
("192.168.0.212", 27019),
("localhost", 27018)]
self.assertEqual(res, parse_uri("mongodb://%2Ftmp%2Fmongodb-27020.sock"
",[::1]:27017,[2001:0db8:"
"85a3:0000:0000:8a2e:0370:7334],"
"192.168.0.212:27019,localhost",
27018))
res = copy.deepcopy(orig)
res.update({'username': 'fred', 'password': 'foobar'})
res.update({'database': 'test', 'collection': 'yield_historical.in'})
self.assertEqual(res,
parse_uri("mongodb://fred:foobar@localhost/"
"test.yield_historical.in"))
res = copy.deepcopy(orig)
res['database'] = 'test'
res['collection'] = 'name/with "delimiters'
self.assertEqual(
res, parse_uri("mongodb://localhost/test.name/with \"delimiters"))
res = copy.deepcopy(orig)
res['options'] = {
'readpreference': ReadPreference.SECONDARY.mongos_mode
}
self.assertEqual(res, parse_uri(
"mongodb://localhost/?readPreference=secondary"))
# Various authentication tests
res = copy.deepcopy(orig)
res['options'] = {'authmechanism': 'MONGODB-CR'}
res['username'] = 'user'
res['password'] = 'password'
self.assertEqual(res,
parse_uri("mongodb://user:password@localhost/"
"?authMechanism=MONGODB-CR"))
res = copy.deepcopy(orig)
res['options'] = {'authmechanism': 'MONGODB-CR', 'authsource': 'bar'}
res['username'] = 'user'
res['password'] = 'password'
res['database'] = 'foo'
self.assertEqual(res,
parse_uri("mongodb://user:password@localhost/foo"
"?authSource=bar;authMechanism=MONGODB-CR"))
res = copy.deepcopy(orig)
res['options'] = {'authmechanism': 'MONGODB-CR'}
res['username'] = 'user'
res['password'] = ''
self.assertEqual(res,
parse_uri("mongodb://user:@localhost/"
"?authMechanism=MONGODB-CR"))
res = copy.deepcopy(orig)
res['username'] = 'user@domain.com'
res['password'] = 'password'
res['database'] = 'foo'
self.assertEqual(res,
parse_uri("mongodb://user%40domain.com:password"
"@localhost/foo"))
res = copy.deepcopy(orig)
res['options'] = {'authmechanism': 'GSSAPI'}
res['username'] = 'user@domain.com'
res['password'] = 'password'
res['database'] = 'foo'
self.assertEqual(res,
parse_uri("mongodb://user%40domain.com:password"
"@localhost/foo?authMechanism=GSSAPI"))
res = copy.deepcopy(orig)
res['options'] = {'authmechanism': 'GSSAPI'}
res['username'] = 'user@domain.com'
res['password'] = ''
res['database'] = 'foo'
self.assertEqual(res,
parse_uri("mongodb://user%40domain.com"
"@localhost/foo?authMechanism=GSSAPI"))
res = copy.deepcopy(orig)
res['options'] = {
'readpreference': ReadPreference.SECONDARY.mongos_mode,
'readpreferencetags': [
{'dc': 'west', 'use': 'website'},
{'dc': 'east', 'use': 'website'}
]
}
res['username'] = 'user@domain.com'
res['password'] = 'password'
res['database'] = 'foo'
self.assertEqual(res,
parse_uri("mongodb://user%40domain.com:password"
"@localhost/foo?readpreference=secondary&"
"readpreferencetags=dc:west,use:website&"
"readpreferencetags=dc:east,use:website"))
res = copy.deepcopy(orig)
res['options'] = {
'readpreference': ReadPreference.SECONDARY.mongos_mode,
'readpreferencetags': [
{'dc': 'west', 'use': 'website'},
{'dc': 'east', 'use': 'website'},
{}
]
}
res['username'] = 'user@domain.com'
res['password'] = 'password'
res['database'] = 'foo'
self.assertEqual(res,
parse_uri("mongodb://user%40domain.com:password"
"@localhost/foo?readpreference=secondary&"
"readpreferencetags=dc:west,use:website&"
"readpreferencetags=dc:east,use:website&"
"readpreferencetags="))
res = copy.deepcopy(orig)
res['options'] = {'uuidrepresentation': JAVA_LEGACY}
res['username'] = 'user@domain.com'
res['password'] = 'password'
res['database'] = 'foo'
self.assertEqual(res,
parse_uri("mongodb://user%40domain.com:password"
"@localhost/foo?uuidrepresentation="
"javaLegacy"))
with warnings.catch_warnings():
warnings.filterwarnings('error')
self.assertRaises(Warning, parse_uri,
"mongodb://user%40domain.com:password"
"@localhost/foo?uuidrepresentation=notAnOption",
warn=True)
self.assertRaises(ValueError, parse_uri,
"mongodb://user%40domain.com:password"
"@localhost/foo?uuidrepresentation=notAnOption")
def test_parse_ssl_paths(self):
# Turn off "validate" since these paths don't exist on filesystem.
self.assertEqual(
{'collection': None,
'database': None,
'nodelist': [('/MongoDB.sock', None)],
'options': {'tlsCertificateKeyFile': '/a/b'},
'password': 'foo/bar',
'username': 'jesse',
'fqdn': None},
parse_uri(
'mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=/a/b',
validate=False))
self.assertEqual(
{'collection': None,
'database': None,
'nodelist': [('/MongoDB.sock', None)],
'options': {'tlsCertificateKeyFile': 'a/b'},
'password': 'foo/bar',
'username': 'jesse',
'fqdn': None},
parse_uri(
'mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=a/b',
validate=False))
def test_tlsinsecure_simple(self):
# check that tlsInsecure is expanded correctly.
self.maxDiff = None
uri = "mongodb://example.com/?tlsInsecure=true"
res = {
"tlsAllowInvalidHostnames": True,
"tlsAllowInvalidCertificates": True,
"tlsInsecure": True, 'tlsDisableOCSPEndpointCheck': True}
self.assertEqual(res, parse_uri(uri)["options"])
def test_normalize_options(self):
# check that options are converted to their internal names correctly.
uri = ("mongodb://example.com/?ssl=true&appname=myapp")
res = {"tls": True, "appname": "myapp"}
self.assertEqual(res, parse_uri(uri)["options"])
def test_unquote_after_parsing(self):
quoted_val = "val%21%40%23%24%25%5E%26%2A%28%29_%2B%2C%3A+etc"
unquoted_val = "val!@#$%^&*()_+,: etc"
uri = ("mongodb://user:password@localhost/?authMechanism=MONGODB-AWS"
"&authMechanismProperties=AWS_SESSION_TOKEN:"+quoted_val)
res = parse_uri(uri)
options = {
'authmechanism': 'MONGODB-AWS',
'authmechanismproperties': {
'AWS_SESSION_TOKEN': unquoted_val}}
self.assertEqual(options, res['options'])
uri = (("mongodb://localhost/foo?readpreference=secondary&"
"readpreferencetags=dc:west,"+quoted_val+":"+quoted_val+"&"
"readpreferencetags=dc:east,use:"+quoted_val))
res = parse_uri(uri)
options = {
'readpreference': ReadPreference.SECONDARY.mongos_mode,
'readpreferencetags': [
{'dc': 'west', unquoted_val: unquoted_val},
{'dc': 'east', 'use': unquoted_val}
]
}
self.assertEqual(options, res['options'])
def test_redact_AWS_SESSION_TOKEN(self):
unquoted_colon = "token:"
uri = ("mongodb://user:password@localhost/?authMechanism=MONGODB-AWS"
"&authMechanismProperties=AWS_SESSION_TOKEN:"+unquoted_colon)
with self.assertRaisesRegex(
ValueError,
'auth mechanism properties must be key:value pairs like '
'SERVICE_NAME:mongodb, not AWS_SESSION_TOKEN:<redacted token>'
', did you forget to percent-escape the token with '
'quote_plus?'):
parse_uri(uri)
def test_special_chars(self):
user = "user@ /9+:?~!$&'()*+,;="
pwd = "pwd@ /9+:?~!$&'()*+,;="
uri = 'mongodb://%s:%s@localhost' % (quote_plus(user), quote_plus(pwd))
res = parse_uri(uri)
self.assertEqual(user, res['username'])
self.assertEqual(pwd, res['password'])
if __name__ == "__main__":
unittest.main()
| 46.079696 | 88 | 0.543444 |
fed67d4f8ed2abe095aa1b142c375288f7119fbc | 8,139 | py | Python | easyai/model/seg/encnet_seg.py | lpj0822/image_point_cloud_det | 7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f | [
"MIT"
] | 1 | 2020-09-05T09:18:56.000Z | 2020-09-05T09:18:56.000Z | easyai/model/seg/encnet_seg.py | lpj0822/image_point_cloud_det | 7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f | [
"MIT"
] | 8 | 2020-04-20T02:18:55.000Z | 2022-03-12T00:24:50.000Z | easyai/model/seg/encnet_seg.py | lpj0822/image_point_cloud_det | 7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:
"""
title = {FastFCN: Rethinking Dilated Convolution in the Backbone for Semantic Segmentation},
author = {Wu, Huikai and Zhang, Junge and Huang, Kaiqi and Liang, Kongming and Yu Yizhou},
booktitle = {arXiv preprint arXiv:1903.11816},
year = {2019}
"""
from easyai.base_name.model_name import ModelName
from easyai.base_name.backbone_name import BackboneName
from easyai.base_name.block_name import NormalizationType, ActivationType
from easyai.base_name.block_name import LayerType, BlockType
from easyai.base_name.loss_name import LossType
from easyai.loss.seg.encnet_loss import EncNetLoss
from easyai.model.base_block.utility.upsample_layer import Upsample
from easyai.model.base_block.utility.utility_layer import RouteLayer
from easyai.model.base_block.utility.utility_block import ConvBNActivationBlock
from easyai.model.base_block.seg.encnet_block import EncNetBlockName
from easyai.model.base_block.seg.encnet_block import JPUBlock, EncBlock, FCNHeadBlock
from easyai.model.utility.base_model import *
from easyai.model.backbone.utility.backbone_factory import BackboneFactory
class EncNetSeg(BaseModel):
def __init__(self, data_channel=3, class_num=150):
super().__init__()
self.set_name(ModelName.EncNetSeg)
self.data_channel = data_channel
self.class_number = class_num
self.is_jpu = True
self.lateral = False
self.is_se_loss = True
self.is_aux = True
self.bn_name = NormalizationType.BatchNormalize2d
self.activation_name = ActivationType.ReLU
self.factory = BackboneFactory()
self.create_block_list()
def create_block_list(self):
self.clear_list()
backbone = self.factory.get_base_model(BackboneName.ResNet50)
base_out_channels = backbone.get_outchannel_list()
self.add_block_list(BlockType.BaseNet, backbone, base_out_channels[-1])
if self.is_jpu:
jup = JPUBlock(layers='4,8,14,17', in_planes=(512, 1024, 2048), width=512,
bn_name=self.bn_name, activation_name=self.activation_name)
self.add_block_list(jup.get_name(), jup, 512 + 512 + 512 + 512)
self.enc_head(2048, base_out_channels)
self.create_loss()
if self.is_aux:
route = RouteLayer('14')
output_channel = sum([base_out_channels[i] if i >= 0
else self.block_out_channels[i] for i in route.layers])
self.add_block_list(route.get_name(), route, output_channel)
fcn_head = FCNHeadBlock(1024, self.class_number, 16,
bn_name=self.bn_name, activation_name=self.activation_name)
self.add_block_list(fcn_head.get_name(), fcn_head, self.class_number)
def enc_head(self, in_channels, base_out_channels):
if self.is_jpu:
conv1 = ConvBNActivationBlock(in_channels=in_channels,
out_channels=512,
kernel_size=1,
bias=False,
bnName=self.bn_name,
activationName=self.activation_name)
self.add_block_list(conv1.get_name(), conv1, 512)
else:
conv1 = ConvBNActivationBlock(in_channels=in_channels,
out_channels=512,
kernel_size=3,
padding=1,
bias=False,
bnName=self.bn_name,
activationName=self.activation_name)
self.add_block_list(conv1.get_name(), conv1, 512)
if self.lateral:
route1 = RouteLayer('8')
output_channel = sum([base_out_channels[i] if i >= 0
else self.block_out_channels[i] for i in route1.layers])
self.add_block_list(route1.get_name(), route1, output_channel)
connect1 = ConvBNActivationBlock(in_channels=output_channel,
out_channels=512,
kernel_size=1,
bias=False,
bnName=self.bn_name,
activationName=self.activation_name)
self.add_block_list(connect1.get_name(), connect1, 512)
route2 = RouteLayer('14')
output_channel = sum([base_out_channels[i] if i >= 0
else self.block_out_channels[i] for i in route2.layers])
self.add_block_list(route2.get_name(), route2, output_channel)
connect2 = ConvBNActivationBlock(in_channels=output_channel,
out_channels=512,
kernel_size=1,
bias=False,
bnName=self.bn_name,
activationName=self.activation_name)
self.add_block_list(connect2.get_name(), connect2, 512)
route3 = RouteLayer('-5,-3,-1')
output_channel = sum([base_out_channels[i] if i >= 0
else self.block_out_channels[i] for i in route2.layers])
self.add_block_list(route3.get_name(), route3, output_channel)
fusion = ConvBNActivationBlock(in_channels=output_channel,
out_channels=512,
kernel_size=3,
padding=1,
bias=False,
bnName=self.bn_name,
activationName=self.activation_name)
self.add_block_list(fusion.get_name(), fusion, 512)
encmodule = EncBlock(in_channels=512, nclass=self.class_number, se_loss=self.is_se_loss,
bn_name=self.bn_name, activation_name=self.activation_name)
self.add_block_list(encmodule.get_name(), encmodule, 512)
dropout = nn.Dropout2d(0.1, False)
self.add_block_list(LayerType.Dropout, dropout, self.block_out_channels[-1])
conv2 = nn.Conv2d(self.block_out_channels[-1], self.class_number, 1)
self.add_block_list(LayerType.Convolutional, conv2, self.class_number)
up = Upsample(scale_factor=8, mode='bilinear')
self.add_block_list(up.get_name(), up, self.class_number)
def create_loss(self, input_dict=None):
self.lossList = []
loss = EncNetLoss(self.class_number, se_loss=self.is_se_loss,
aux=self.is_aux, ignore_index=250)
self.add_block_list(LossType.EncNetLoss, loss, self.block_out_channels[-1])
self.lossList.append(loss)
def forward(self, x):
base_outputs = []
layer_outputs = []
output = []
se_loss = None
aux_loss = None
for key, block in self._modules.items():
if BlockType.BaseNet in key:
base_outputs = block(x)
x = base_outputs[-1]
elif LayerType.RouteLayer in key:
x = block(layer_outputs, base_outputs)
elif EncNetBlockName.JPUBlock in key:
x = block(layer_outputs, base_outputs)
elif EncNetBlockName.EncBlock in key:
x, se_loss = block(x)
elif LossType.EncNetLoss in key:
output.append(x)
elif EncNetBlockName.FCNHeadBlock in key:
x = block(x)
aux_loss = x
else:
x = block(x)
layer_outputs.append(x)
print(key, x.shape)
output.append(aux_loss)
output.append(se_loss)
return output
| 45.724719 | 96 | 0.56948 |
706214c426fea3b063bf62fdcb4e466ba022aca3 | 897 | py | Python | share/qt/clean_mac_info_plist.py | GWaddell/Musicoin | c90377b2ac16423733011d174882d211140ff4d1 | [
"MIT"
] | 1 | 2016-11-06T09:28:35.000Z | 2016-11-06T09:28:35.000Z | share/qt/clean_mac_info_plist.py | Musicoin/musicoin | c90377b2ac16423733011d174882d211140ff4d1 | [
"MIT"
] | 1 | 2016-12-18T14:34:12.000Z | 2016-12-18T14:34:12.000Z | share/qt/clean_mac_info_plist.py | GWaddell/Musicoin | c90377b2ac16423733011d174882d211140ff4d1 | [
"MIT"
] | 1 | 2016-10-04T00:53:47.000Z | 2016-10-04T00:53:47.000Z | #!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Musicoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Musicoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| 29.9 | 109 | 0.725753 |
8684821d1ca5ef889b49372c0b609217f147f8d5 | 366 | py | Python | QuotesScrape/QuotesScrape/pipelines.py | Andrewzekid/BrainyQuoteScraper | 741f4696e17d052e967570dd922b49f337abf16f | [
"MIT"
] | null | null | null | QuotesScrape/QuotesScrape/pipelines.py | Andrewzekid/BrainyQuoteScraper | 741f4696e17d052e967570dd922b49f337abf16f | [
"MIT"
] | null | null | null | QuotesScrape/QuotesScrape/pipelines.py | Andrewzekid/BrainyQuoteScraper | 741f4696e17d052e967570dd922b49f337abf16f | [
"MIT"
] | null | null | null | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class QuotesscrapePipeline:
def process_item(self, item, spider):
return item
| 26.142857 | 66 | 0.770492 |
c2c499001bcc1a12394c049b0acc5611dde8bda4 | 18,730 | py | Python | lib/id3c/api/datastore.py | UWIT-IAM/uw-redcap-client | 38a1eb426fa80697446df7a466a41e0305382606 | [
"MIT"
] | null | null | null | lib/id3c/api/datastore.py | UWIT-IAM/uw-redcap-client | 38a1eb426fa80697446df7a466a41e0305382606 | [
"MIT"
] | null | null | null | lib/id3c/api/datastore.py | UWIT-IAM/uw-redcap-client | 38a1eb426fa80697446df7a466a41e0305382606 | [
"MIT"
] | null | null | null | """
Datastore abstraction for our database.
"""
import logging
import psycopg2
from functools import wraps
from psycopg2 import DataError, DatabaseError, IntegrityError, ProgrammingError
from psycopg2.errors import InsufficientPrivilege
from typing import Any
from uuid import UUID
from werkzeug.exceptions import Forbidden, NotFound
from .. import db
from ..db import find_identifier, upsert_sample
from ..db.session import DatabaseSession
from .exceptions import AuthenticationRequired, BadRequest
from .utils import export
LOG = logging.getLogger(__name__)
def catch_permission_denied(function):
"""
Decorator to catch :class:`psycopg2.ProgrammingError` exceptions with the
``INSUFFICIENT_PRIVILEGE`` error code and rethrow them as
:class:`~werkzeug.exceptions.Forbidden` exceptions instead.
"""
@wraps(function)
def decorated(*args, **kwargs):
try:
return function(*args, **kwargs)
except InsufficientPrivilege as error:
LOG.error("Forbidden: %s", error)
raise Forbidden()
return decorated
@export
def login(username: str, password: str) -> DatabaseSession:
"""
Creates a new database session authenticated as the given user.
Returns an opaque session object which other functions in this module
require.
"""
LOG.debug(f"Logging into PostgreSQL database as '{username}'")
try:
return DatabaseSession(username = username, password = password)
except DatabaseError as error:
raise AuthenticationRequired() from None
@export
@catch_permission_denied
def store_enrollment(session: DatabaseSession, document: str) -> None:
"""
Store the given enrollment JSON *document* (a **string**) in the backing
database using *session*.
Raises a :class:`BadRequestDatabaseError` exception if the given *document*
isn't valid and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
try:
cursor.execute(
"INSERT INTO receiving.enrollment (document) VALUES (%s)",
(document,))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
@export
@catch_permission_denied
def store_presence_absence(session: DatabaseSession, document: str) -> None:
"""
Store the given presence/absence *document* (a **string**) in the backing
database using *session*.
Raises a :class:`BadRequestDatabaseError` exception if the given *document*
isn't valid and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
try:
cursor.execute(
"insert into receiving.presence_absence (document) VALUES (%s)",
(document,))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
@export
@catch_permission_denied
def store_sequence_read_set(session: DatabaseSession, document: str) -> None:
"""
Store the given sequence read set *document* (a **string**) in the backing
database using *session*.
Raises a :class:`BadRequestDatabaseError` exception if the given *document*
isn't valid and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
try:
cursor.execute(
"insert into receiving.sequence_read_set (document) values (%s)",
(document,))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
@export
@catch_permission_denied
def store_consensus_genome(session: DatabaseSession, document: str) -> None:
"""
Store the given consensus genome *document* (a **string**) in the backing
database using *session*.
Raises a :class:`BadRequestDatabaseError` exception if the given *document*
isn't valid and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
try:
cursor.execute(
"insert into receiving.consensus_genome (document) values (%s)",
(document,))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
@export
@catch_permission_denied
def store_redcap_det(session: DatabaseSession, document: str) -> None:
"""
Store the given REDCap DET *document* (a **string**) in the backing
database using *session*.
Raises a :class:`BadRequestDatabaseError` exception if the given *document*
isn't valid and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
try:
cursor.execute(
"insert into receiving.redcap_det (document) values (%s)",
(document,))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
@export
@catch_permission_denied
def store_fhir(session: DatabaseSession, document: str) -> None:
"""
Store the given FHIR *document* (a **string**) in the backing
database using *session*.
Raises a :class:`BadRequestDatabaseError` exception if the given *document*
isn't valid and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
try:
cursor.execute(
"insert into receiving.fhir (document) values (%s)",
(document,))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
@export
@catch_permission_denied
def verify_barcode_use_list(session: DatabaseSession, barcode_use_list: list) -> Any:
"""
Check the given *barcode_use_list* containing objects with ``barcode`` and ``use``
keys and values to verify that each barcode exists in the backing database and that the
given use matches the stored use.
Returns a list of objects in the same order as the input, with each object including the
``barcode`` (string) and ``use`` (string) being verified, ``barcode_found`` (boolean)
indicating whether the given barcode exists, and ``use_match`` (boolean) indicating whether
the given use matches the stored use. The ``use_match`` value will be `null` if the barcode
does not exist.
"""
barcode_use_tuples = [(bu["barcode"],bu["use"]) for bu in barcode_use_list]
args_str = ','.join(['%s'] * len(barcode_use_tuples))
sql = "select q.barcode, q.use, \
case \
when identifier.barcode is not null then true else false \
end as barcode_found, \
case \
when identifier_set.use IS NULL then null \
when q.use::citext=identifier_set.use then true \
else false \
end as use_match \
from (values {}) as q (barcode, use) \
left join warehouse.identifier on q.barcode::citext = identifier.barcode \
left join warehouse.identifier_set using (identifier_set_id)".format(args_str)
result = session.fetch_all(sql, tuple(barcode_use_tuples))
return result
@export
@catch_permission_denied
def fetch_identifier(session: DatabaseSession, id: str) -> Any:
"""
Fetch the identifier *id* from the backing database using *session*.
*id* may be a full UUID or shortened barcode.
Returns a named tuple with ``uuid``, ``barcode``, ``generated``, ``set``,
and ``use`` attributes. If the identifier doesn't exist, raises a
:class:`~werkzeug.exceptions.NotFound` exception.
"""
try:
uuid = UUID(id)
id_field = "uuid"
except ValueError:
id_field = "barcode"
with session:
identifier = session.fetch_row(f"""
select uuid, barcode, generated, identifier_set.name as set, identifier_set.use
from warehouse.identifier
join warehouse.identifier_set using (identifier_set_id)
where {id_field} = %s
""", (id,))
if not identifier:
LOG.error(f"Identifier {id_field} «{id}» not found")
raise NotFound(f"Identifier {id_field} «{id}» not found")
return identifier
@export
@catch_permission_denied
def fetch_identifier_sets(session: DatabaseSession) -> Any:
"""
Fetch all identifier sets from the backing database using *session*.
Returns a list of named tuples with ``name``, ``description``, and ``use``
attributes.
"""
with session, session.cursor() as cursor:
cursor.execute("""
select name, description, use
from warehouse.identifier_set
""")
return list(cursor)
@export
@catch_permission_denied
def fetch_identifier_set(session: DatabaseSession, name: str) -> Any:
"""
Fetch the identifier set *name* from the backing database using *session*.
Returns a named tuple with ``name``, ``description``, and ``use`` attributes.
If the set doesn't exist, raises a :class:`~werkzeug.exceptions.NotFound`
exception.
"""
with session:
set = session.fetch_row("""
select name, description, use
from warehouse.identifier_set
where name = %s
""", (name,))
if not set:
LOG.error(f"Identifier set «{name}» not found")
raise NotFound(f"Identifier set «{name}» not found")
return set
@export
@catch_permission_denied
def make_identifier_set(session: DatabaseSession, name: str, **fields) -> bool:
"""
Create a new identifier set *name* in the backing database using *session*
if it doesn't already exist, or update if it does exist.
If *use* and/or *description* are provided as keyword arguments, their values
are set in the database. Becuase *use* is a required field in the target table,
if it is not provided as a keyword argument the query will attempt to retrieve
its value from an existing record.
Returns ``True`` if the set was created or updated and ``False`` if it
already existed and was not updated.
Raises a :class:`BadRequestDatabaseError` exception if the database reports a
`constraint` error and a :class:`Forbidden` exception if the database reports a
`permission denied` error.
"""
with session, session.cursor() as cursor:
if "use" in fields and "description" in fields:
try:
cursor.execute("""
insert into warehouse.identifier_set (name, use, description)
values (%s, %s, nullif(%s, ''))
on conflict (name) do update
set use = excluded.use,
description = excluded.description
where identifier_set.use <> excluded.use
or coalesce(identifier_set.description,'') <> coalesce(excluded.description,'')
""", (name, fields["use"], fields["description"]))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
elif "use" in fields:
try:
cursor.execute("""
insert into warehouse.identifier_set (name, use)
values (%s, %s)
on conflict (name) do update
set use = excluded.use
where identifier_set.use <> excluded.use
""", (name, fields["use"]))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
elif "description" in fields:
try:
cursor.execute("""
insert into warehouse.identifier_set (name, use, description)
select s.name, t.use, s.description
from (values(%s, nullif(%s,''))) s(name, description)
left join (
select name, use
FROM warehouse.identifier_set WHERE name = %s
) t using (name)
on conflict (name) do update
set use = excluded.use, description = excluded.description
where identifier_set.use <> excluded.use
or coalesce(identifier_set.description,'') <> coalesce(excluded.description,'')
""", (name, fields["description"], name))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
else:
try:
cursor.execute("""
insert into warehouse.identifier_set (name, use)
select s.name, t.use
from (values(%s)) s(name)
left join (
select name, use
FROM warehouse.identifier_set WHERE name = %s
) t using (name)
on conflict (name) do update
set use = excluded.use
where identifier_set.use <> excluded.use
""", (name, name))
except (DataError, IntegrityError) as error:
raise BadRequestDatabaseError(error) from None
return cursor.rowcount == 1
@export
@catch_permission_denied
def fetch_identifier_set_uses(session: DatabaseSession) -> Any:
"""
Fetch all identifier set uses from the backing database using *session*.
Returns a list of named tuples with ``use`` and ``description`` attributes.
"""
with session, session.cursor() as cursor:
cursor.execute("""
select use, description
from warehouse.identifier_set_use
""")
return list(cursor)
@export
@catch_permission_denied
def store_sample(session: DatabaseSession, sample: dict) -> Any:
""""
Validate the given *sample* and insert or update in the backing database.
Returns a list of in the same order as the input, with each object including
the ``sample_id`` (string), ``status`` (string) to indicate if inserted,
updated, or validation failed, and ``details`` to indicate reason for
failed validation.
"""
with session:
sample_barcode = sample.pop("sample_id", None)
sample_identifier = find_identifier(session, sample_barcode) if sample_barcode else None
collection_barcode = sample.pop("collection_id", None)
collection_identifier = find_identifier(session, collection_barcode) if collection_barcode else None
result = {
"sample_barcode": sample_barcode,
"collection_barcode": collection_barcode
}
# validate barcodes
if sample_barcode and not sample_identifier:
result["status"] = "validation_failed"
result["details"] = f"sample barcode «{sample_barcode}» not found"
elif sample_identifier and sample_identifier.set_use != 'sample':
result["status"] = "validation_failed"
result["details"] = f"barcode «{sample_barcode}» has use type «{sample_identifier.set_use}» instead of expected use type «sample»"
elif collection_barcode and not collection_identifier:
result["status"] = "validation_failed"
result["details"] = f"collection barcode «{collection_barcode}» not found"
elif collection_identifier and collection_identifier.set_use != 'collection':
result["status"] = "validation_failed"
result["details"] = f"barcode «{collection_barcode}» has use type «{collection_identifier.set_use}» instead of expected use type «collection»"
if result.get("status", None) == "validation_failed":
LOG.debug(f"Validation failed for {sample} with details: {result.get('details')}")
return result
collected_date = sample.pop("collection_date", None)
# Add date to sample so that it gets written to the 'details' column in warehouse.sample
if collected_date:
sample["date"] = collected_date
# Rename specific properties to include in 'details' column in warehouse.sample
if "clia_id" in sample:
sample["clia_barcode"] = sample.pop("clia_id")
if "aliquoted_date" in sample:
sample["aliquot_date"] = sample.pop("aliquoted_date")
if "received_date" in sample:
sample["arrival_date"] = sample.pop("received_date")
# When updating an existing row, update the identifiers only if the record has both
# the 'sample_barcode' and 'collection_barcode' keys
should_update_identifiers = True if (sample_identifier and collection_identifier) else False
try:
sample, status = upsert_sample(session,
update_identifiers = should_update_identifiers,
identifier = sample_identifier.uuid if sample_identifier else None,
collection_identifier = collection_identifier.uuid if collection_identifier else None,
collection_date = collected_date,
encounter_id = None,
additional_details = sample)
result["sample"] = sample
result["status"] = status
except Exception as e:
result["status"] = "upsert_error"
result["details"] = f"error upserting sample record: {str(e)}"
LOG.debug(f"Error on upsert_sample: {str(e)}")
return result
@export
class BadRequestDatabaseError(BadRequest):
"""
Subclass of :class:`id3c.api.exceptions.BadRequest` which takes a
:class:`psycopg2.DatabaseError` and forms a JSON response detailing the
error.
This intentionally does not expose the query context itself, only the
context related to the data handling.
"""
def __init__(self, error: DatabaseError) -> None:
super().__init__(
error = error.diag.message_primary,
extra = {
"detail": error.diag.message_detail,
"context": error.diag.context,
}
)
| 39.020833 | 154 | 0.625734 |
a04534a0667316cefe26778be85a10d3eb9fe38e | 6,780 | py | Python | finance/migrations/0001_initial.py | Evineit/Savings-django-webapp | 710ddf2a9b5287f769a299168c4741751c756d8d | [
"Apache-2.0"
] | null | null | null | finance/migrations/0001_initial.py | Evineit/Savings-django-webapp | 710ddf2a9b5287f769a299168c4741751c756d8d | [
"Apache-2.0"
] | null | null | null | finance/migrations/0001_initial.py | Evineit/Savings-django-webapp | 710ddf2a9b5287f769a299168c4741751c756d8d | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1 on 2020-10-19 03:25
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.DecimalField(decimal_places=3, max_digits=10)),
('name', models.CharField(blank=True, max_length=50)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accounts', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='RecurringPayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=50)),
('amount', models.DecimalField(decimal_places=3, max_digits=10)),
('added_date', models.DateTimeField(auto_now_add=True)),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
('schedule_type', models.CharField(max_length=50)),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='finance.account')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='recpayments', to='finance.category')),
],
),
migrations.CreateModel(
name='RecurringIncome',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=50)),
('amount', models.DecimalField(decimal_places=3, max_digits=10)),
('added_date', models.DateTimeField(auto_now_add=True)),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
('schedule_type', models.CharField(max_length=50)),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='finance.account')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='recincomes', to='finance.category')),
],
),
migrations.CreateModel(
name='Income',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=3, max_digits=10)),
('added_date', models.DateTimeField(auto_now_add=True)),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='finance.account')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='incomes', to='finance.category')),
],
),
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=3, max_digits=10)),
('added_date', models.DateTimeField(auto_now_add=True)),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='finance.account')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='expenses', to='finance.category')),
],
),
]
| 61.081081 | 329 | 0.626254 |
d5cd175f46699b2362d6b184be3aa9b7b62cc13b | 7,873 | py | Python | examples/DeepWisdom/Auto_Tabular/explore.py | zichuan-scott-xu/automl-workflow | d108e55da943775953b9f1801311a86ac07e58a0 | [
"Apache-2.0"
] | 3 | 2020-12-15T02:40:43.000Z | 2021-01-14T02:32:13.000Z | examples/DeepWisdom/Auto_Tabular/explore.py | zichuan-scott-xu/automl-workflow | d108e55da943775953b9f1801311a86ac07e58a0 | [
"Apache-2.0"
] | null | null | null | examples/DeepWisdom/Auto_Tabular/explore.py | zichuan-scott-xu/automl-workflow | d108e55da943775953b9f1801311a86ac07e58a0 | [
"Apache-2.0"
] | 4 | 2021-01-07T05:41:38.000Z | 2021-04-07T08:02:22.000Z | import numpy as np
import gc
import collections
from tensorflow.python.keras import backend as K
from Auto_Tabular.feature.feat_engine import FeatEngine
from Auto_Tabular.utils.log_utils import log, timeit
class Explore:
def __init__(self, metadata, info, model_space, data_space):
self.metadata = metadata
self.info = info
self.info['mode'] = 'first_round'
self.model_space = model_space
self.data_space = data_space
self.model = None
self.model_prior = model_space.model_prior
self.model_idx = 0
self.input_shape = None
self.patience = 3
self.auc_gain_threshold = 1e-4
self.ensemble_std_threshold = 1e-2
self.round_num = 1
self.hist_info = {}
self.dataloader = None
self.feat_engine = FeatEngine()
self.update_predcit = True
self.use_all_data = False
def explore_space(self, train_loop_num, time_remain=None):
self.explore_model_space(train_loop_num)
self.explore_data_space(train_loop_num)
self.create_model(self.metadata.output_dim)
# train and evaluate
self.model.epoch_train(self.dataloader, run_num=self.model.run_num,
is_multi_label=self.info['is_multi_label'], info=self.info, time_remain=time_remain)
if not self.use_all_data:
val_auc = self.model.epoch_valid(self.dataloader)
log('explore model {}, val auc is {}'.format(self.model.name, val_auc))
else:
val_auc = self.model.best_auc+0.0001
self.use_all_data = False
self.update_model_hist(val_auc)
def explore_model_space(self, train_loop_num):
if train_loop_num == 1:
self.model = self.model_space.get_model(self.model_prior[self.model_idx], self.round_num)
self.last_model_type = self.model.type
else:
if self.model.not_rise_num == self.model.patience \
or (self.model.not_gain_num > self.model.not_gain_threhlod) \
or self.model.run_num >= self.model.max_run or self.info['mode'] =='bagging':
log('model {}'.format(self.model.name))
log('not rise num {}'.format(self.model.not_rise_num))
log('not gain num {}'.format(self.model.not_gain_num))
log('run num {}'.format(self.model.run_num))
log('last auc gain {}'.format(self.model.auc_gain))
self.model_idx += 1
self.reset_model_cache()
if self.model_idx == len(self.model_prior):
self.sort_model_prior()
self.info['mode'] = 'bagging'
self.data_space.update = True
self.model = self.model_space.get_model(self.model_prior[self.model_idx], self.round_num)
self.use_all_data = False
if self.model.type != self.last_model_type:
self.dataloader = None
gc.collect()
def explore_data_space(self, train_loop_num):
self.feat_engine.fit_transform(self.data_space, train_loop_num, info=self.info)
if self.data_space.update or self.dataloader is None:
self.dataloader = self.data_space.get_dataloader(train_loop_num=train_loop_num,
round_num=self.round_num,
run_num=self.model.run_num,
use_all_data=self.use_all_data,
model_type=self.model.type)
self.data_space.update = False
def update_model_hist(self, val_auc):
self.model.run_num += 1
self.model.auc_gain = val_auc - self.model.hist_auc[-1]
if self.model.auc_gain < self.auc_gain_threshold:
self.model.not_gain_num += 1
else:
self.model.not_gain_num = 0
self.model.hist_auc.append(val_auc)
if val_auc > self.model.best_auc:
self.model.best_auc = val_auc
self.update_predcit = True
else:
self.update_predcit = False
self.model.not_rise_num += 1
if self.model.run_num >= self.model.all_data_round or self.model.not_gain_num > 3:
self.use_all_data = True
else:
self.use_all_data = False
if hasattr(self.model, 'all_data_round_pre'):
if self.model.run_num == self.model.all_data_round_pre:
self.use_all_data = True
def reset_model_cache(self):
log('clear model cache')
del self.model
self.model = None
gc.collect()
K.clear_session()
def create_model(self, class_num):
if not self.model.is_init:
if self.model.type == 'nn_keras':
self.model.init_model(class_num, shape=self.dataloader['shape'], is_multi_label=self.info['is_multi_label'])
else:
self.model.init_model(class_num)
def sort_model_prior(self):
log('old models prior is {}'.format(self.model_prior))
model_perform = collections.defaultdict(list)
for name, info in self.hist_info.items():
first_name = name.split('_')[0]
auc = info[0]
if first_name in model_perform:
model_perform[first_name].append(auc)
self.model_prior = sorted(self.model_prior, key=lambda x: np.mean(model_perform[x]), reverse=True)
log('new models prior is {}'.format(self.model_prior))
self.model_idx = 0
self.round_num += 1
def get_top_preds(self):
models_name = self.hist_info.keys()
models_auc = [self.hist_info[name][0] for name in models_name]
models_name_sorted, models_auc_sored = (list(i) for i in
zip(*sorted(zip(models_name, models_auc), key=lambda x: x[1], reverse=True)))
for i in range(len(models_auc_sored), 0, -1):
std = np.std(models_auc_sored[:i])
top_num = i
if std < self.ensemble_std_threshold:
break
log('top {} model auc std is {}'.format(top_num, std))
top_auc = np.array(models_auc_sored[:top_num])
# weights = top_auc / top_auc.sum()
# print(weights)
top_auc = top_auc + 15*(top_auc - top_auc.mean())
top_auc = np.array([max(0.01, i) for i in top_auc])
weights = top_auc / top_auc.sum()
print(weights)
top_preds = []
for i in range(top_num):
name = models_name_sorted[i]
rank = i + 1
auc = models_auc_sored[i]
weight = weights[i]
preds = self.hist_info[name][1]
top_preds.append((name, rank, auc, weight, preds))
return top_preds
def predict(self):
if self.update_predcit:
preds = self.model.predict(self.dataloader)
if self.model.hist_auc[-1] == self.model.best_auc:
self.model.best_preds = preds
self.hist_info[self.model.name] = (self.model.best_auc, self.model.best_preds)
preds = self.blending_predict()
return preds
#@timeit
def blending_predict(self):
top_preds = self.get_top_preds()
ensemble_preds = 0
for name, rank, auc, weight, preds in top_preds:
m = np.mean(preds)
log('blending: {}, rank: {}, mean {}, val auc: {} weight {}'.format(name, rank, m, auc, weight))
ensemble_preds += weight * preds/m
return ensemble_preds
def stacking_predict(self):
pass
def softmax(self, x):
x = x - x.max()
e = np.exp(x)
return e / e.sum()
| 36.962441 | 125 | 0.583259 |
9e37b7caeb8933cd6154466cb8ae149fcf13b5ba | 52,801 | py | Python | superset/utils/core.py | ditutu/superset | 4cb79e50172cc857d73dc3ba76f9f2063d97d762 | [
"Apache-2.0"
] | 2 | 2021-03-17T18:41:18.000Z | 2021-05-27T16:45:12.000Z | superset/utils/core.py | ditutu/superset | 4cb79e50172cc857d73dc3ba76f9f2063d97d762 | [
"Apache-2.0"
] | 17 | 2021-03-18T21:17:31.000Z | 2021-12-06T13:54:03.000Z | superset/utils/core.py | ditutu/superset | 4cb79e50172cc857d73dc3ba76f9f2063d97d762 | [
"Apache-2.0"
] | 1 | 2022-01-10T13:31:22.000Z | 2022-01-10T13:31:22.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility functions used across Superset"""
import collections
import decimal
import errno
import json
import logging
import os
import platform
import re
import signal
import smtplib
import tempfile
import threading
import traceback
import uuid
import zlib
from datetime import date, datetime, time, timedelta
from distutils.util import strtobool
from email.mime.application import MIMEApplication
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
from enum import Enum, IntEnum
from timeit import default_timer
from types import TracebackType
from typing import (
Any,
Callable,
cast,
Dict,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from urllib.parse import unquote_plus
import bleach
import markdown as md
import numpy as np
import pandas as pd
import sqlalchemy as sa
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.backends.openssl.x509 import _Certificate
from flask import current_app, flash, g, Markup, render_template, request
from flask_appbuilder import SQLA
from flask_appbuilder.security.sqla.models import Role, User
from flask_babel import gettext as __
from flask_babel.speaklater import LazyString
from pandas.api.types import infer_dtype
from pandas.core.dtypes.common import is_numeric_dtype
from sqlalchemy import event, exc, select, Text
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.engine import Connection, Engine
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.sql.type_api import Variant
from sqlalchemy.types import TEXT, TypeDecorator, TypeEngine
from typing_extensions import TypedDict
import _thread # pylint: disable=C0411
from superset.constants import (
EXAMPLES_DB_UUID,
EXTRA_FORM_DATA_APPEND_KEYS,
EXTRA_FORM_DATA_OVERRIDE_EXTRA_KEYS,
EXTRA_FORM_DATA_OVERRIDE_REGULAR_MAPPINGS,
)
from superset.errors import ErrorLevel, SupersetErrorType
from superset.exceptions import (
CertificateException,
SupersetException,
SupersetTimeoutException,
)
from superset.typing import AdhocMetric, FlaskResponse, FormData, Metric
from superset.utils.dates import datetime_to_epoch, EPOCH
from superset.utils.hashing import md5_sha_from_dict, md5_sha_from_str
try:
from pydruid.utils.having import Having
except ImportError:
pass
if TYPE_CHECKING:
from superset.connectors.base.models import BaseColumn, BaseDatasource
from superset.models.core import Database
logging.getLogger("MARKDOWN").setLevel(logging.INFO)
logger = logging.getLogger(__name__)
DTTM_ALIAS = "__timestamp"
TIME_COMPARISION = "__"
JS_MAX_INTEGER = 9007199254740991 # Largest int Java Script can handle 2^53-1
InputType = TypeVar("InputType")
class LenientEnum(Enum):
"""Enums with a `get` method that convert a enum value to `Enum` if it is a
valid value."""
@classmethod
def get(cls, value: Any) -> Any:
try:
return super().__new__(cls, value)
except ValueError:
return None
class AdhocMetricExpressionType(str, Enum):
SIMPLE = "SIMPLE"
SQL = "SQL"
class AnnotationType(str, Enum):
FORMULA = "FORMULA"
INTERVAL = "INTERVAL"
EVENT = "EVENT"
TIME_SERIES = "TIME_SERIES"
class GenericDataType(IntEnum):
"""
Generic database column type that fits both frontend and backend.
"""
NUMERIC = 0
STRING = 1
TEMPORAL = 2
BOOLEAN = 3
# ARRAY = 4 # Mapping all the complex data types to STRING for now
# JSON = 5 # and leaving these as a reminder.
# MAP = 6
# ROW = 7
class ChartDataResultFormat(str, Enum):
"""
Chart data response format
"""
CSV = "csv"
JSON = "json"
class ChartDataResultType(str, Enum):
"""
Chart data response type
"""
COLUMNS = "columns"
FULL = "full"
QUERY = "query"
RESULTS = "results"
SAMPLES = "samples"
TIMEGRAINS = "timegrains"
POST_PROCESSED = "post_processed"
class DatasourceDict(TypedDict):
type: str
id: int
class ExtraFiltersTimeColumnType(str, Enum):
GRANULARITY = "__granularity"
TIME_COL = "__time_col"
TIME_GRAIN = "__time_grain"
TIME_ORIGIN = "__time_origin"
TIME_RANGE = "__time_range"
class FilterOperator(str, Enum):
"""
Operators used filter controls
"""
EQUALS = "=="
NOT_EQUALS = "!="
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUALS = ">="
LESS_THAN_OR_EQUALS = "<="
LIKE = "LIKE"
ILIKE = "ILIKE"
IS_NULL = "IS NULL"
IS_NOT_NULL = "IS NOT NULL"
IN = "IN" # pylint: disable=invalid-name
NOT_IN = "NOT IN"
REGEX = "REGEX"
IS_TRUE = "IS TRUE"
IS_FALSE = "IS FALSE"
class PostProcessingBoxplotWhiskerType(str, Enum):
"""
Calculate cell contribution to row/column total
"""
TUKEY = "tukey"
MINMAX = "min/max"
PERCENTILE = "percentile"
class PostProcessingContributionOrientation(str, Enum):
"""
Calculate cell contribution to row/column total
"""
ROW = "row"
COLUMN = "column"
class QueryMode(str, LenientEnum):
"""
Whether the query runs on aggregate or returns raw records
"""
RAW = "raw"
AGGREGATE = "aggregate"
class QuerySource(Enum):
"""
The source of a SQL query.
"""
CHART = 0
DASHBOARD = 1
SQL_LAB = 2
class QueryStatus(str, Enum): # pylint: disable=too-few-public-methods
"""Enum-type class for query statuses"""
STOPPED: str = "stopped"
FAILED: str = "failed"
PENDING: str = "pending"
RUNNING: str = "running"
SCHEDULED: str = "scheduled"
SUCCESS: str = "success"
FETCHING: str = "fetching"
TIMED_OUT: str = "timed_out"
class DashboardStatus(str, Enum):
"""Dashboard status used for frontend filters"""
PUBLISHED = "published"
DRAFT = "draft"
class ReservedUrlParameters(str, Enum):
"""
Reserved URL parameters that are used internally by Superset. These will not be
passed to chart queries, as they control the behavior of the UI.
"""
STANDALONE = "standalone"
EDIT_MODE = "edit"
@staticmethod
def is_standalone_mode() -> Optional[bool]:
standalone_param = request.args.get(ReservedUrlParameters.STANDALONE.value)
standalone: Optional[bool] = (
standalone_param and standalone_param != "false" and standalone_param != "0"
)
return standalone
class RowLevelSecurityFilterType(str, Enum):
REGULAR = "Regular"
BASE = "Base"
class TimeRangeEndpoint(str, Enum):
"""
The time range endpoint types which represent inclusive, exclusive, or unknown.
Unknown represents endpoints which are ill-defined as though the interval may be
[start, end] the filter may behave like (start, end] due to mixed data types and
lexicographical ordering.
:see: https://github.com/apache/superset/issues/6360
"""
EXCLUSIVE = "exclusive"
INCLUSIVE = "inclusive"
UNKNOWN = "unknown"
class TemporalType(str, Enum):
"""
Supported temporal types
"""
DATE = "DATE"
DATETIME = "DATETIME"
SMALLDATETIME = "SMALLDATETIME"
TEXT = "TEXT"
TIME = "TIME"
TIMESTAMP = "TIMESTAMP"
class ColumnTypeSource(Enum):
GET_TABLE = 1
CURSOR_DESCRIPION = 2
class ColumnSpec(NamedTuple):
sqla_type: Union[TypeEngine, str]
generic_type: GenericDataType
is_dttm: bool
python_date_format: Optional[str] = None
try:
# Having might not have been imported.
class DimSelector(Having):
def __init__(self, **args: Any) -> None:
# Just a hack to prevent any exceptions
Having.__init__(self, type="equalTo", aggregation=None, value=None)
self.having = {
"having": {
"type": "dimSelector",
"dimension": args["dimension"],
"value": args["value"],
}
}
except NameError:
pass
def flasher(msg: str, severity: str = "message") -> None:
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == "danger":
logger.error(msg, exc_info=True)
else:
logger.info(msg)
def parse_js_uri_path_item(
item: Optional[str], unquote: bool = True, eval_undefined: bool = False
) -> Optional[str]:
"""Parse a uri path item made with js.
:param item: a uri path component
:param unquote: Perform unquoting of string using urllib.parse.unquote_plus()
:param eval_undefined: When set to True and item is either 'null' or 'undefined',
assume item is undefined and return None.
:return: Either None, the original item or unquoted item
"""
item = None if eval_undefined and item in ("null", "undefined") else item
return unquote_plus(item) if unquote and item else item
def cast_to_num(value: Optional[Union[float, int, str]]) -> Optional[Union[float, int]]:
"""Casts a value to an int/float
>>> cast_to_num('1 ')
1.0
>>> cast_to_num(' 2')
2.0
>>> cast_to_num('5')
5
>>> cast_to_num('5.2')
5.2
>>> cast_to_num(10)
10
>>> cast_to_num(10.1)
10.1
>>> cast_to_num(None) is None
True
>>> cast_to_num('this is not a string') is None
True
:param value: value to be converted to numeric representation
:returns: value cast to `int` if value is all digits, `float` if `value` is
decimal value and `None`` if it can't be converted
"""
if value is None:
return None
if isinstance(value, (int, float)):
return value
if value.isdigit():
return int(value)
try:
return float(value)
except ValueError:
return None
def list_minus(l: List[Any], minus: List[Any]) -> List[Any]:
"""Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
"""
return [o for o in l if o not in minus]
class DashboardEncoder(json.JSONEncoder):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.sort_keys = True
def default(self, o: Any) -> Union[Dict[Any, Any], str]:
if isinstance(o, uuid.UUID):
return str(o)
try:
vals = {k: v for k, v in o.__dict__.items() if k != "_sa_instance_state"}
return {"__{}__".format(o.__class__.__name__): vals}
except Exception: # pylint: disable=broad-except
if isinstance(o, datetime):
return {"__datetime__": o.replace(microsecond=0).isoformat()}
return json.JSONEncoder(sort_keys=True).default(o)
class JSONEncodedDict(TypeDecorator): # pylint: disable=abstract-method
"""Represents an immutable structure as a json-encoded string."""
impl = TEXT
def process_bind_param(
self, value: Optional[Dict[Any, Any]], dialect: str
) -> Optional[str]:
return json.dumps(value) if value is not None else None
def process_result_value(
self, value: Optional[str], dialect: str
) -> Optional[Dict[Any, Any]]:
return json.loads(value) if value is not None else None
def format_timedelta(time_delta: timedelta) -> str:
"""
Ensures negative time deltas are easily interpreted by humans
>>> td = timedelta(0) - timedelta(days=1, hours=5,minutes=6)
>>> str(td)
'-2 days, 18:54:00'
>>> format_timedelta(td)
'-1 day, 5:06:00'
"""
if time_delta < timedelta(0):
return "-" + str(abs(time_delta))
# Change this to format positive time deltas the way you want
return str(time_delta)
def base_json_conv( # pylint: disable=inconsistent-return-statements,too-many-return-statements
obj: Any,
) -> Any:
if isinstance(obj, memoryview):
obj = obj.tobytes()
if isinstance(obj, np.int64):
return int(obj)
if isinstance(obj, np.bool_):
return bool(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, set):
return list(obj)
if isinstance(obj, decimal.Decimal):
return float(obj)
if isinstance(obj, uuid.UUID):
return str(obj)
if isinstance(obj, timedelta):
return format_timedelta(obj)
if isinstance(obj, bytes):
try:
return obj.decode("utf-8")
except Exception: # pylint: disable=broad-except
return "[bytes]"
if isinstance(obj, LazyString):
return str(obj)
def json_iso_dttm_ser(obj: Any, pessimistic: bool = False) -> str:
"""
json serializer that deals with dates
>>> dttm = datetime(1970, 1, 1)
>>> json.dumps({'dttm': dttm}, default=json_iso_dttm_ser)
'{"dttm": "1970-01-01T00:00:00"}'
"""
val = base_json_conv(obj)
if val is not None:
return val
if isinstance(obj, (datetime, date, time, pd.Timestamp)):
obj = obj.isoformat()
else:
if pessimistic:
return "Unserializable [{}]".format(type(obj))
raise TypeError("Unserializable object {} of type {}".format(obj, type(obj)))
return obj
def pessimistic_json_iso_dttm_ser(obj: Any) -> str:
"""Proxy to call json_iso_dttm_ser in a pessimistic way
If one of object is not serializable to json, it will still succeed"""
return json_iso_dttm_ser(obj, pessimistic=True)
def json_int_dttm_ser(obj: Any) -> float:
"""json serializer that deals with dates"""
val = base_json_conv(obj)
if val is not None:
return val
if isinstance(obj, (datetime, pd.Timestamp)):
obj = datetime_to_epoch(obj)
elif isinstance(obj, date):
obj = (obj - EPOCH.date()).total_seconds() * 1000
else:
raise TypeError("Unserializable object {} of type {}".format(obj, type(obj)))
return obj
def json_dumps_w_dates(payload: Dict[Any, Any]) -> str:
return json.dumps(payload, default=json_int_dttm_ser)
def error_msg_from_exception(ex: Exception) -> str:
"""Translate exception into error message
Database have different ways to handle exception. This function attempts
to make sense of the exception object and construct a human readable
sentence.
TODO(bkyryliuk): parse the Presto error message from the connection
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function.
"""
msg = ""
if hasattr(ex, "message"):
if isinstance(ex.message, dict): # type: ignore
msg = ex.message.get("message") # type: ignore
elif ex.message: # type: ignore
msg = ex.message # type: ignore
return msg or str(ex)
def markdown(raw: str, markup_wrap: Optional[bool] = False) -> str:
safe_markdown_tags = [
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"b",
"i",
"strong",
"em",
"tt",
"p",
"br",
"span",
"div",
"blockquote",
"code",
"hr",
"ul",
"ol",
"li",
"dd",
"dt",
"img",
"a",
]
safe_markdown_attrs = {
"img": ["src", "alt", "title"],
"a": ["href", "alt", "title"],
}
safe = md.markdown(
raw or "",
extensions=[
"markdown.extensions.tables",
"markdown.extensions.fenced_code",
"markdown.extensions.codehilite",
],
)
safe = bleach.clean(safe, safe_markdown_tags, safe_markdown_attrs)
if markup_wrap:
safe = Markup(safe)
return safe
def readfile(file_path: str) -> Optional[str]:
with open(file_path) as f:
content = f.read()
return content
def generic_find_constraint_name(
table: str, columns: Set[str], referenced: str, database: SQLA
) -> Optional[str]:
"""Utility to find a constraint name in alembic migrations"""
tbl = sa.Table(
table, database.metadata, autoload=True, autoload_with=database.engine
)
for fk in tbl.foreign_key_constraints:
if fk.referred_table.name == referenced and set(fk.column_keys) == columns:
return fk.name
return None
def generic_find_fk_constraint_name( # pylint: disable=invalid-name
table: str, columns: Set[str], referenced: str, insp: Inspector
) -> Optional[str]:
"""Utility to find a foreign-key constraint name in alembic migrations"""
for fk in insp.get_foreign_keys(table):
if (
fk["referred_table"] == referenced
and set(fk["referred_columns"]) == columns
):
return fk["name"]
return None
def generic_find_fk_constraint_names( # pylint: disable=invalid-name
table: str, columns: Set[str], referenced: str, insp: Inspector
) -> Set[str]:
"""Utility to find foreign-key constraint names in alembic migrations"""
names = set()
for fk in insp.get_foreign_keys(table):
if (
fk["referred_table"] == referenced
and set(fk["referred_columns"]) == columns
):
names.add(fk["name"])
return names
def generic_find_uq_constraint_name(
table: str, columns: Set[str], insp: Inspector
) -> Optional[str]:
"""Utility to find a unique constraint name in alembic migrations"""
for uq in insp.get_unique_constraints(table):
if columns == set(uq["column_names"]):
return uq["name"]
return None
def get_datasource_full_name(
database_name: str, datasource_name: str, schema: Optional[str] = None
) -> str:
if not schema:
return "[{}].[{}]".format(database_name, datasource_name)
return "[{}].[{}].[{}]".format(database_name, schema, datasource_name)
def validate_json(obj: Union[bytes, bytearray, str]) -> None:
if obj:
try:
json.loads(obj)
except Exception as ex:
logger.error("JSON is not valid %s", str(ex), exc_info=True)
raise SupersetException("JSON is not valid")
class SigalrmTimeout:
"""
To be used in a ``with`` block and timeout its content.
"""
def __init__(self, seconds: int = 1, error_message: str = "Timeout") -> None:
self.seconds = seconds
self.error_message = error_message
def handle_timeout( # pylint: disable=unused-argument
self, signum: int, frame: Any
) -> None:
logger.error("Process timed out", exc_info=True)
raise SupersetTimeoutException(
error_type=SupersetErrorType.BACKEND_TIMEOUT_ERROR,
message=self.error_message,
level=ErrorLevel.ERROR,
extra={"timeout": self.seconds},
)
def __enter__(self) -> None:
try:
if threading.current_thread() == threading.main_thread():
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
except ValueError as ex:
logger.warning("timeout can't be used in the current context")
logger.exception(ex)
def __exit__( # pylint: disable=redefined-outer-name,unused-variable,redefined-builtin
self, type: Any, value: Any, traceback: TracebackType
) -> None:
try:
signal.alarm(0)
except ValueError as ex:
logger.warning("timeout can't be used in the current context")
logger.exception(ex)
class TimerTimeout:
def __init__(self, seconds: int = 1, error_message: str = "Timeout") -> None:
self.seconds = seconds
self.error_message = error_message
self.timer = threading.Timer(seconds, _thread.interrupt_main)
def __enter__(self) -> None:
self.timer.start()
def __exit__( # pylint: disable=redefined-outer-name,unused-variable,redefined-builtin
self, type: Any, value: Any, traceback: TracebackType
) -> None:
self.timer.cancel()
if type is KeyboardInterrupt: # raised by _thread.interrupt_main
raise SupersetTimeoutException(
error_type=SupersetErrorType.BACKEND_TIMEOUT_ERROR,
message=self.error_message,
level=ErrorLevel.ERROR,
extra={"timeout": self.seconds},
)
# Windows has no support for SIGALRM, so we use the timer based timeout
timeout: Union[Type[TimerTimeout], Type[SigalrmTimeout]] = (
TimerTimeout if platform.system() == "Windows" else SigalrmTimeout
)
def pessimistic_connection_handling(some_engine: Engine) -> None:
@event.listens_for(some_engine, "engine_connect")
def ping_connection( # pylint: disable=unused-variable
connection: Connection, branch: bool
) -> None:
if branch:
# 'branch' refers to a sub-connection of a connection,
# we don't want to bother pinging on these.
return
# turn off 'close with result'. This flag is only used with
# 'connectionless' execution, otherwise will be False in any case
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
try:
# run a SELECT 1. use a core select() so that
# the SELECT of a scalar value without a table is
# appropriately formatted for the backend
connection.scalar(select([1]))
except exc.DBAPIError as err:
# catch SQLAlchemy's DBAPIError, which is a wrapper
# for the DBAPI's exception. It includes a .connection_invalidated
# attribute which specifies if this connection is a 'disconnect'
# condition, which is based on inspection of the original exception
# by the dialect in use.
if err.connection_invalidated:
# run the same SELECT again - the connection will re-validate
# itself and establish a new connection. The disconnect detection
# here also causes the whole connection pool to be invalidated
# so that all stale connections are discarded.
connection.scalar(select([1]))
else:
raise
finally:
# restore 'close with result'
connection.should_close_with_result = save_should_close_with_result
def notify_user_about_perm_udate( # pylint: disable=too-many-arguments
granter: User,
user: User,
role: Role,
datasource: "BaseDatasource",
tpl_name: str,
config: Dict[str, Any],
) -> None:
msg = render_template(
tpl_name, granter=granter, user=user, role=role, datasource=datasource
)
logger.info(msg)
subject = __(
"[Superset] Access to the datasource %(name)s was granted",
name=datasource.full_name,
)
send_email_smtp(
user.email,
subject,
msg,
config,
bcc=granter.email,
dryrun=not config["EMAIL_NOTIFICATIONS"],
)
def send_email_smtp( # pylint: disable=invalid-name,too-many-arguments,too-many-locals
to: str,
subject: str,
html_content: str,
config: Dict[str, Any],
files: Optional[List[str]] = None,
data: Optional[Dict[str, str]] = None,
images: Optional[Dict[str, bytes]] = None,
dryrun: bool = False,
cc: Optional[str] = None,
bcc: Optional[str] = None,
mime_subtype: str = "mixed",
) -> None:
"""
Send an email with html content, eg:
send_email_smtp(
'test@example.com', 'foo', '<b>Foo</b> bar',['/dev/null'], dryrun=True)
"""
smtp_mail_from = config["SMTP_MAIL_FROM"]
smtp_mail_to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg["Subject"] = subject
msg["From"] = smtp_mail_from
msg["To"] = ", ".join(smtp_mail_to)
msg.preamble = "This is a multi-part message in MIME format."
recipients = smtp_mail_to
if cc:
smtp_mail_cc = get_email_address_list(cc)
msg["CC"] = ", ".join(smtp_mail_cc)
recipients = recipients + smtp_mail_cc
if bcc:
# don't add bcc in header
smtp_mail_bcc = get_email_address_list(bcc)
recipients = recipients + smtp_mail_bcc
msg["Date"] = formatdate(localtime=True)
mime_text = MIMEText(html_content, "html")
msg.attach(mime_text)
# Attach files by reading them from disk
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, "rb") as f:
msg.attach(
MIMEApplication(
f.read(),
Content_Disposition="attachment; filename='%s'" % basename,
Name=basename,
)
)
# Attach any files passed directly
for name, body in (data or {}).items():
msg.attach(
MIMEApplication(
body, Content_Disposition="attachment; filename='%s'" % name, Name=name
)
)
# Attach any inline images, which may be required for display in
# HTML content (inline)
for msgid, imgdata in (images or {}).items():
image = MIMEImage(imgdata)
image.add_header("Content-ID", "<%s>" % msgid)
image.add_header("Content-Disposition", "inline")
msg.attach(image)
send_mime_email(smtp_mail_from, recipients, msg, config, dryrun=dryrun)
def send_mime_email(
e_from: str,
e_to: List[str],
mime_msg: MIMEMultipart,
config: Dict[str, Any],
dryrun: bool = False,
) -> None:
smtp_host = config["SMTP_HOST"]
smtp_port = config["SMTP_PORT"]
smtp_user = config["SMTP_USER"]
smtp_password = config["SMTP_PASSWORD"]
smtp_starttls = config["SMTP_STARTTLS"]
smtp_ssl = config["SMTP_SSL"]
if not dryrun:
smtp = (
smtplib.SMTP_SSL(smtp_host, smtp_port)
if smtp_ssl
else smtplib.SMTP(smtp_host, smtp_port)
)
if smtp_starttls:
smtp.starttls()
if smtp_user and smtp_password:
smtp.login(smtp_user, smtp_password)
logger.debug("Sent an email to %s", str(e_to))
smtp.sendmail(e_from, e_to, mime_msg.as_string())
smtp.quit()
else:
logger.info("Dryrun enabled, email notification content is below:")
logger.info(mime_msg.as_string())
def get_email_address_list(address_string: str) -> List[str]:
address_string_list: List[str] = []
if isinstance(address_string, str):
address_string_list = re.split(r",|\s|;", address_string)
return [x.strip() for x in address_string_list if x.strip()]
def get_email_address_str(address_string: str) -> str:
address_list = get_email_address_list(address_string)
address_list_str = ", ".join(address_list)
return address_list_str
def choicify(values: Iterable[Any]) -> List[Tuple[Any, Any]]:
"""Takes an iterable and makes an iterable of tuples with it"""
return [(v, v) for v in values]
def zlib_compress(data: Union[bytes, str]) -> bytes:
"""
Compress things in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
"""
if isinstance(data, str):
return zlib.compress(bytes(data, "utf-8"))
return zlib.compress(data)
def zlib_decompress(blob: bytes, decode: Optional[bool] = True) -> Union[bytes, str]:
"""
Decompress things to a string in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
>>> got_str = zlib_decompress(blob)
>>> got_str == json_str
True
"""
if isinstance(blob, bytes):
decompressed = zlib.decompress(blob)
else:
decompressed = zlib.decompress(bytes(blob, "utf-8"))
return decompressed.decode("utf-8") if decode else decompressed
def to_adhoc(
filt: Dict[str, Any], expression_type: str = "SIMPLE", clause: str = "where"
) -> Dict[str, Any]:
result = {
"clause": clause.upper(),
"expressionType": expression_type,
"isExtra": bool(filt.get("isExtra")),
}
if expression_type == "SIMPLE":
result.update(
{
"comparator": filt.get("val"),
"operator": filt.get("op"),
"subject": filt.get("col"),
}
)
elif expression_type == "SQL":
result.update({"sqlExpression": filt.get(clause)})
deterministic_name = md5_sha_from_dict(result)
result["filterOptionName"] = deterministic_name
return result
def merge_extra_form_data(form_data: Dict[str, Any]) -> None:
"""
Merge extra form data (appends and overrides) into the main payload
and add applied time extras to the payload.
"""
filter_keys = ["filters", "adhoc_filters"]
extra_form_data = form_data.pop("extra_form_data", {})
append_filters = extra_form_data.get("filters", None)
# merge append extras
for key in [key for key in EXTRA_FORM_DATA_APPEND_KEYS if key not in filter_keys]:
extra_value = getattr(extra_form_data, key, {})
form_value = getattr(form_data, key, {})
form_value.update(extra_value)
if form_value:
form_data["key"] = extra_value
# map regular extras that apply to form data properties
for src_key, target_key in EXTRA_FORM_DATA_OVERRIDE_REGULAR_MAPPINGS.items():
value = extra_form_data.get(src_key)
if value is not None:
form_data[target_key] = value
# map extras that apply to form data extra properties
extras = form_data.get("extras", {})
for key in EXTRA_FORM_DATA_OVERRIDE_EXTRA_KEYS:
value = extra_form_data.get(key)
if value is not None:
extras[key] = value
if extras:
form_data["extras"] = extras
adhoc_filters = form_data.get("adhoc_filters", [])
form_data["adhoc_filters"] = adhoc_filters
append_adhoc_filters = extra_form_data.get("adhoc_filters", [])
adhoc_filters.extend({"isExtra": True, **fltr} for fltr in append_adhoc_filters)
if append_filters:
adhoc_filters.extend(
to_adhoc({"isExtra": True, **fltr}) for fltr in append_filters if fltr
)
def merge_extra_filters( # pylint: disable=too-many-branches
form_data: Dict[str, Any],
) -> None:
# extra_filters are temporary/contextual filters (using the legacy constructs)
# that are external to the slice definition. We use those for dynamic
# interactive filters like the ones emitted by the "Filter Box" visualization.
# Note extra_filters only support simple filters.
applied_time_extras: Dict[str, str] = {}
form_data["applied_time_extras"] = applied_time_extras
adhoc_filters = form_data.get("adhoc_filters", [])
form_data["adhoc_filters"] = adhoc_filters
merge_extra_form_data(form_data)
if "extra_filters" in form_data:
# __form and __to are special extra_filters that target time
# boundaries. The rest of extra_filters are simple
# [column_name in list_of_values]. `__` prefix is there to avoid
# potential conflicts with column that would be named `from` or `to`
date_options = {
"__time_range": "time_range",
"__time_col": "granularity_sqla",
"__time_grain": "time_grain_sqla",
"__time_origin": "druid_time_origin",
"__granularity": "granularity",
}
# Grab list of existing filters 'keyed' on the column and operator
def get_filter_key(f: Dict[str, Any]) -> str:
if "expressionType" in f:
return "{}__{}".format(f["subject"], f["operator"])
return "{}__{}".format(f["col"], f["op"])
existing_filters = {}
for existing in adhoc_filters:
if (
existing["expressionType"] == "SIMPLE"
and existing.get("comparator") is not None
and existing.get("subject") is not None
):
existing_filters[get_filter_key(existing)] = existing["comparator"]
for filtr in form_data[ # pylint: disable=too-many-nested-blocks
"extra_filters"
]:
filtr["isExtra"] = True
# Pull out time filters/options and merge into form data
filter_column = filtr["col"]
time_extra = date_options.get(filter_column)
if time_extra:
time_extra_value = filtr.get("val")
if time_extra_value:
form_data[time_extra] = time_extra_value
applied_time_extras[filter_column] = time_extra_value
elif filtr["val"]:
# Merge column filters
filter_key = get_filter_key(filtr)
if filter_key in existing_filters:
# Check if the filter already exists
if isinstance(filtr["val"], list):
if isinstance(existing_filters[filter_key], list):
# Add filters for unequal lists
# order doesn't matter
if set(existing_filters[filter_key]) != set(filtr["val"]):
adhoc_filters.append(to_adhoc(filtr))
else:
adhoc_filters.append(to_adhoc(filtr))
else:
# Do not add filter if same value already exists
if filtr["val"] != existing_filters[filter_key]:
adhoc_filters.append(to_adhoc(filtr))
else:
# Filter not found, add it
adhoc_filters.append(to_adhoc(filtr))
# Remove extra filters from the form data since no longer needed
del form_data["extra_filters"]
def merge_request_params(form_data: Dict[str, Any], params: Dict[str, Any]) -> None:
"""
Merge request parameters to the key `url_params` in form_data. Only updates
or appends parameters to `form_data` that are defined in `params; pre-existing
parameters not defined in params are left unchanged.
:param form_data: object to be updated
:param params: request parameters received via query string
"""
url_params = form_data.get("url_params", {})
for key, value in params.items():
if key in ("form_data", "r"):
continue
url_params[key] = value
form_data["url_params"] = url_params
def user_label(user: User) -> Optional[str]:
"""Given a user ORM FAB object, returns a label"""
if user:
if user.first_name and user.last_name:
return user.first_name + " " + user.last_name
return user.username
return None
def get_or_create_db(
database_name: str, sqlalchemy_uri: str, always_create: Optional[bool] = True
) -> "Database":
from superset import db
from superset.models import core as models
database = (
db.session.query(models.Database).filter_by(database_name=database_name).first()
)
# databases with a fixed UUID
uuids = {
"examples": EXAMPLES_DB_UUID,
}
if not database and always_create:
logger.info("Creating database reference for %s", database_name)
database = models.Database(
database_name=database_name, uuid=uuids.get(database_name)
)
db.session.add(database)
if database:
database.set_sqlalchemy_uri(sqlalchemy_uri)
db.session.commit()
return database
def get_example_database() -> "Database":
from superset import conf
db_uri = conf.get("SQLALCHEMY_EXAMPLES_URI") or conf.get("SQLALCHEMY_DATABASE_URI")
return get_or_create_db("examples", db_uri)
def get_main_database() -> "Database":
from superset import conf
db_uri = conf.get("SQLALCHEMY_DATABASE_URI")
return get_or_create_db("main", db_uri)
def backend() -> str:
return get_example_database().backend
def is_adhoc_metric(metric: Metric) -> bool:
return isinstance(metric, dict) and "expressionType" in metric
def get_metric_name(metric: Metric) -> str:
return metric["label"] if is_adhoc_metric(metric) else metric # type: ignore
def get_metric_names(metrics: Sequence[Metric]) -> List[str]:
return [get_metric_name(metric) for metric in metrics]
def get_main_metric_name(metrics: Sequence[Metric]) -> Optional[str]:
metric_labels = get_metric_names(metrics)
return metric_labels[0] if metric_labels else None
def ensure_path_exists(path: str) -> None:
try:
os.makedirs(path)
except OSError as exc:
if not (os.path.isdir(path) and exc.errno == errno.EEXIST):
raise
def convert_legacy_filters_into_adhoc( # pylint: disable=invalid-name
form_data: FormData,
) -> None:
mapping = {"having": "having_filters", "where": "filters"}
if not form_data.get("adhoc_filters"):
form_data["adhoc_filters"] = []
for clause, filters in mapping.items():
if clause in form_data and form_data[clause] != "":
form_data["adhoc_filters"].append(to_adhoc(form_data, "SQL", clause))
if filters in form_data:
for filt in filter(lambda x: x is not None, form_data[filters]):
form_data["adhoc_filters"].append(to_adhoc(filt, "SIMPLE", clause))
for key in ("filters", "having", "having_filters", "where"):
if key in form_data:
del form_data[key]
def split_adhoc_filters_into_base_filters( # pylint: disable=invalid-name
form_data: FormData,
) -> None:
"""
Mutates form data to restructure the adhoc filters in the form of the four base
filters, `where`, `having`, `filters`, and `having_filters` which represent
free form where sql, free form having sql, structured where clauses and structured
having clauses.
"""
adhoc_filters = form_data.get("adhoc_filters")
if isinstance(adhoc_filters, list):
simple_where_filters = []
simple_having_filters = []
sql_where_filters = []
sql_having_filters = []
for adhoc_filter in adhoc_filters:
expression_type = adhoc_filter.get("expressionType")
clause = adhoc_filter.get("clause")
if expression_type == "SIMPLE":
if clause == "WHERE":
simple_where_filters.append(
{
"col": adhoc_filter.get("subject"),
"op": adhoc_filter.get("operator"),
"val": adhoc_filter.get("comparator"),
}
)
elif clause == "HAVING":
simple_having_filters.append(
{
"col": adhoc_filter.get("subject"),
"op": adhoc_filter.get("operator"),
"val": adhoc_filter.get("comparator"),
}
)
elif expression_type == "SQL":
if clause == "WHERE":
sql_where_filters.append(adhoc_filter.get("sqlExpression"))
elif clause == "HAVING":
sql_having_filters.append(adhoc_filter.get("sqlExpression"))
form_data["where"] = " AND ".join(
["({})".format(sql) for sql in sql_where_filters]
)
form_data["having"] = " AND ".join(
["({})".format(sql) for sql in sql_having_filters]
)
form_data["having_filters"] = simple_having_filters
form_data["filters"] = simple_where_filters
def get_username() -> Optional[str]:
"""Get username if within the flask context, otherwise return noffin'"""
try:
return g.user.username
except Exception: # pylint: disable=broad-except
return None
def parse_ssl_cert(certificate: str) -> _Certificate:
"""
Parses the contents of a certificate and returns a valid certificate object
if valid.
:param certificate: Contents of certificate file
:return: Valid certificate instance
:raises CertificateException: If certificate is not valid/unparseable
"""
try:
return x509.load_pem_x509_certificate(
certificate.encode("utf-8"), default_backend()
)
except ValueError:
raise CertificateException("Invalid certificate")
def create_ssl_cert_file(certificate: str) -> str:
"""
This creates a certificate file that can be used to validate HTTPS
sessions. A certificate is only written to disk once; on subsequent calls,
only the path of the existing certificate is returned.
:param certificate: The contents of the certificate
:return: The path to the certificate file
:raises CertificateException: If certificate is not valid/unparseable
"""
filename = f"{md5_sha_from_str(certificate)}.crt"
cert_dir = current_app.config["SSL_CERT_PATH"]
path = cert_dir if cert_dir else tempfile.gettempdir()
path = os.path.join(path, filename)
if not os.path.exists(path):
# Validate certificate prior to persisting to temporary directory
parse_ssl_cert(certificate)
cert_file = open(path, "w")
cert_file.write(certificate)
cert_file.close()
return path
def time_function(
func: Callable[..., FlaskResponse], *args: Any, **kwargs: Any
) -> Tuple[float, Any]:
"""
Measures the amount of time a function takes to execute in ms
:param func: The function execution time to measure
:param args: args to be passed to the function
:param kwargs: kwargs to be passed to the function
:return: A tuple with the duration and response from the function
"""
start = default_timer()
response = func(*args, **kwargs)
stop = default_timer()
return (stop - start) * 1000.0, response
def MediumText() -> Variant: # pylint:disable=invalid-name
return Text().with_variant(MEDIUMTEXT(), "mysql")
def shortid() -> str:
return "{}".format(uuid.uuid4())[-12:]
class DatasourceName(NamedTuple):
table: str
schema: str
def get_stacktrace() -> Optional[str]:
if current_app.config["SHOW_STACKTRACE"]:
return traceback.format_exc()
return None
def split(
string: str, delimiter: str = " ", quote: str = '"', escaped_quote: str = r"\""
) -> Iterator[str]:
"""
A split function that is aware of quotes and parentheses.
:param string: string to split
:param delimiter: string defining where to split, usually a comma or space
:param quote: string, either a single or a double quote
:param escaped_quote: string representing an escaped quote
:return: list of strings
"""
parens = 0
quotes = False
i = 0
for j, character in enumerate(string):
complete = parens == 0 and not quotes
if complete and character == delimiter:
yield string[i:j]
i = j + len(delimiter)
elif character == "(":
parens += 1
elif character == ")":
parens -= 1
elif character == quote:
if quotes and string[j - len(escaped_quote) + 1 : j + 1] != escaped_quote:
quotes = False
elif not quotes:
quotes = True
yield string[i:]
def get_iterable(x: Any) -> List[Any]:
"""
Get an iterable (list) representation of the object.
:param x: The object
:returns: An iterable representation
"""
return x if isinstance(x, list) else [x]
def get_form_data_token(form_data: Dict[str, Any]) -> str:
"""
Return the token contained within form data or generate a new one.
:param form_data: chart form data
:return: original token if predefined, otherwise new uuid4 based token
"""
return form_data.get("token") or "token_" + uuid.uuid4().hex[:8]
def get_column_name_from_metric(metric: Metric) -> Optional[str]:
"""
Extract the column that a metric is referencing. If the metric isn't
a simple metric, always returns `None`.
:param metric: Ad-hoc metric
:return: column name if simple metric, otherwise None
"""
if is_adhoc_metric(metric):
metric = cast(AdhocMetric, metric)
if metric["expressionType"] == AdhocMetricExpressionType.SIMPLE:
return cast(Dict[str, Any], metric["column"])["column_name"]
return None
def get_column_names_from_metrics(metrics: List[Metric]) -> List[str]:
"""
Extract the columns that a list of metrics are referencing. Expcludes all
SQL metrics.
:param metrics: Ad-hoc metric
:return: column name if simple metric, otherwise None
"""
columns: List[str] = []
for metric in metrics:
column_name = get_column_name_from_metric(metric)
if column_name:
columns.append(column_name)
return columns
def extract_dataframe_dtypes(df: pd.DataFrame) -> List[GenericDataType]:
"""Serialize pandas/numpy dtypes to generic types"""
# omitting string types as those will be the default type
inferred_type_map: Dict[str, GenericDataType] = {
"floating": GenericDataType.NUMERIC,
"integer": GenericDataType.NUMERIC,
"mixed-integer-float": GenericDataType.NUMERIC,
"decimal": GenericDataType.NUMERIC,
"boolean": GenericDataType.BOOLEAN,
"datetime64": GenericDataType.TEMPORAL,
"datetime": GenericDataType.TEMPORAL,
"date": GenericDataType.TEMPORAL,
}
generic_types: List[GenericDataType] = []
for column in df.columns:
series = df[column]
inferred_type = infer_dtype(series)
generic_type = inferred_type_map.get(inferred_type, GenericDataType.STRING)
generic_types.append(generic_type)
return generic_types
def extract_column_dtype(col: "BaseColumn") -> GenericDataType:
if col.is_temporal:
return GenericDataType.TEMPORAL
if col.is_numeric:
return GenericDataType.NUMERIC
# TODO: add check for boolean data type when proper support is added
return GenericDataType.STRING
def indexed(
items: List[Any], key: Union[str, Callable[[Any], Any]]
) -> Dict[Any, List[Any]]:
"""Build an index for a list of objects"""
idx: Dict[Any, Any] = {}
for item in items:
key_ = getattr(item, key) if isinstance(key, str) else key(item)
idx.setdefault(key_, []).append(item)
return idx
def is_test() -> bool:
return strtobool(os.environ.get("SUPERSET_TESTENV", "false"))
def get_time_filter_status( # pylint: disable=too-many-branches
datasource: "BaseDatasource", applied_time_extras: Dict[str, str],
) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
temporal_columns = {col.column_name for col in datasource.columns if col.is_dttm}
applied: List[Dict[str, str]] = []
rejected: List[Dict[str, str]] = []
time_column = applied_time_extras.get(ExtraFiltersTimeColumnType.TIME_COL)
if time_column:
if time_column in temporal_columns:
applied.append({"column": ExtraFiltersTimeColumnType.TIME_COL})
else:
rejected.append(
{
"reason": "not_in_datasource",
"column": ExtraFiltersTimeColumnType.TIME_COL,
}
)
if ExtraFiltersTimeColumnType.TIME_GRAIN in applied_time_extras:
# are there any temporal columns to assign the time grain to?
if temporal_columns:
applied.append({"column": ExtraFiltersTimeColumnType.TIME_GRAIN})
else:
rejected.append(
{
"reason": "no_temporal_column",
"column": ExtraFiltersTimeColumnType.TIME_GRAIN,
}
)
if ExtraFiltersTimeColumnType.TIME_RANGE in applied_time_extras:
# are there any temporal columns to assign the time grain to?
if temporal_columns:
applied.append({"column": ExtraFiltersTimeColumnType.TIME_RANGE})
else:
rejected.append(
{
"reason": "no_temporal_column",
"column": ExtraFiltersTimeColumnType.TIME_RANGE,
}
)
if ExtraFiltersTimeColumnType.TIME_ORIGIN in applied_time_extras:
if datasource.type == "druid":
applied.append({"column": ExtraFiltersTimeColumnType.TIME_ORIGIN})
else:
rejected.append(
{
"reason": "not_druid_datasource",
"column": ExtraFiltersTimeColumnType.TIME_ORIGIN,
}
)
if ExtraFiltersTimeColumnType.GRANULARITY in applied_time_extras:
if datasource.type == "druid":
applied.append({"column": ExtraFiltersTimeColumnType.GRANULARITY})
else:
rejected.append(
{
"reason": "not_druid_datasource",
"column": ExtraFiltersTimeColumnType.GRANULARITY,
}
)
return applied, rejected
def format_list(items: Sequence[str], sep: str = ", ", quote: str = '"') -> str:
quote_escaped = "\\" + quote
return sep.join(f"{quote}{x.replace(quote, quote_escaped)}{quote}" for x in items)
def find_duplicates(items: Iterable[InputType]) -> List[InputType]:
"""Find duplicate items in an iterable."""
return [item for item, count in collections.Counter(items).items() if count > 1]
def remove_duplicates(
items: Iterable[InputType], key: Optional[Callable[[InputType], Any]] = None
) -> List[InputType]:
"""Remove duplicate items in an iterable."""
if not key:
return list(dict.fromkeys(items).keys())
seen = set()
result = []
for item in items:
item_key = key(item)
if item_key not in seen:
seen.add(item_key)
result.append(item)
return result
def normalize_dttm_col(
df: pd.DataFrame,
timestamp_format: Optional[str],
offset: int,
time_shift: Optional[timedelta],
) -> None:
if DTTM_ALIAS not in df.columns:
return
if timestamp_format in ("epoch_s", "epoch_ms"):
dttm_col = df[DTTM_ALIAS]
if is_numeric_dtype(dttm_col):
# Column is formatted as a numeric value
unit = timestamp_format.replace("epoch_", "")
df[DTTM_ALIAS] = pd.to_datetime(
dttm_col, utc=False, unit=unit, origin="unix"
)
else:
# Column has already been formatted as a timestamp.
df[DTTM_ALIAS] = dttm_col.apply(pd.Timestamp)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format
)
if offset:
df[DTTM_ALIAS] += timedelta(hours=offset)
if time_shift is not None:
df[DTTM_ALIAS] += time_shift
def parse_boolean_string(bool_str: Optional[str]) -> bool:
"""
Convert a string representation of a true/false value into a boolean
>>> parse_boolean_string(None)
False
>>> parse_boolean_string('false')
False
>>> parse_boolean_string('true')
True
>>> parse_boolean_string('False')
False
>>> parse_boolean_string('True')
True
>>> parse_boolean_string('foo')
False
>>> parse_boolean_string('0')
False
>>> parse_boolean_string('1')
True
:param bool_str: string representation of a value that is assumed to be boolean
:return: parsed boolean value
"""
if bool_str is None:
return False
try:
return bool(strtobool(bool_str.lower()))
except ValueError:
return False
| 31.504177 | 96 | 0.633605 |
d77ef7dc921cda66823367da292df6f07d81b68a | 170 | py | Python | tests/examples-bad/classmethod3.py | JohannesBuchner/pystrict3 | f442a89ac6a23f4323daed8ef829d8e9e1197f90 | [
"BSD-2-Clause"
] | 1 | 2020-06-05T08:53:26.000Z | 2020-06-05T08:53:26.000Z | tests/examples-bad/classmethod3.py | JohannesBuchner/pystrict3 | f442a89ac6a23f4323daed8ef829d8e9e1197f90 | [
"BSD-2-Clause"
] | 1 | 2020-06-04T13:47:19.000Z | 2020-06-04T13:47:57.000Z | tests/examples-bad/classmethod3.py | JohannesBuchner/pystrict3 | f442a89ac6a23f4323daed8ef829d8e9e1197f90 | [
"BSD-2-Clause"
] | 1 | 2020-11-07T17:02:46.000Z | 2020-11-07T17:02:46.000Z | class FooMany(object):
def __init__(self, a):
self.a = a
def foo(self, b):
self.fookwargs(self.a, self.b, c=1, d=2)
def fookwargs(self, a, b, **kwargs):
pass
| 15.454545 | 42 | 0.623529 |
475799d7b4e75a2f2a5961d9d9e64ae80589e8d9 | 17,233 | py | Python | tests/tests.py | malware-revealer/extractor | c92e7c845024126daacc5e7c4f84af800bf86dcd | [
"MIT"
] | 10 | 2019-08-12T21:50:31.000Z | 2021-07-23T15:42:30.000Z | tests/tests.py | malware-revealer/extractor | c92e7c845024126daacc5e7c4f84af800bf86dcd | [
"MIT"
] | 4 | 2019-10-05T14:00:25.000Z | 2019-10-06T22:05:53.000Z | tests/tests.py | malware-revealer/extractor | c92e7c845024126daacc5e7c4f84af800bf86dcd | [
"MIT"
] | 2 | 2019-10-06T12:31:20.000Z | 2020-10-03T13:33:45.000Z | import unittest
import json
import mrextractor
PE_EXE_DIR = "./test_assets/executables/pe"
ELF_EXE_DIR = "test_assets/executables/elf"
EXPECTED_FEATURES_DIR = "test_assets/expected_features"
EXTRACTED_FEATURES_DIR = "test_assets/extracted_features"
CONFS_DIR = "test_assets/extractor_confs"
PE_0_HASH = "071df5b74f08fb5a4ce13a6cd2e7f485"
ELF_0_HASH = "0e1631f5eaadf5ac5010530077727092"
class TestExtractor(unittest.TestCase):
def test_creation(self):
"""
Test the extractor creation using a test conf file.
"""
conf_file = "test_assets/extractor_conf.yaml"
out_folder = "test_assets/extracted_features"
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
feature_list = list(extractor.features.keys())
expected_feature_list = sorted([
'base.ByteCounts',
'base.BinaryImage',
'base.FileSize',
'base.URLs',
'base.ImportedFunctions',
'base.ExportedFunctions',
'base.Strings',
'pe.PEGeneralFileInfo',
'pe.PEMSDOSHeader',
'pe.PEHeader',
'pe.PEOptionalHeader',
'pe.PELibraries',
'pe.PESections',
'elf.ELFHeader',
'elf.ELFLibraries',
'elf.ELFSections',
])
self.assertEqual(
sorted(feature_list),
expected_feature_list,
"Imported features don't match"
)
def test_PE_Header(self):
"""
Test the extracted features of Pe Header !
"""
feature_name = "pe_header"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, PE_0_HASH)
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"The extracted features of Pe Header don't match"
)
def test_Libraries(self):
"""
Test the extracted features of Libraries !
"""
feature_name = "libraries"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, PE_0_HASH)
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"The extracted features of Libraries don't match"
)
def test_Sections(self):
"""
Test the extracted features of Sections !
"""
feature_name = "sections"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, PE_0_HASH)
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
extractor.extract_batch()
# feature_dict = extractor.features
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"The extracted features of Sections don't match"
)
def test_general_file_info(self):
"""
Testing the file general informations extraction .
"""
feature_name = "general_file_info"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, PE_0_HASH)
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"extracted general file informations don't match"
)
def test_msdos_header(self):
"""
Testing the Msdos Header extraction .
"""
feature_name = "msdos_header"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, PE_0_HASH)
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"msdos header dosen't match"
)
def test_optional_header(self):
"""
Testing the optional header extraction using a test conf file.
"""
feature_name = "optional_header"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, PE_0_HASH)
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"Optional Header dosen't match"
)
def test_file_size(self):
"""
Testing file size extarction using a test conf file.
"""
feature_name = "file_size"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, PE_0_HASH)
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"file size dosen't match"
)
def test_urls(self):
"""
Testing URLs extarction using a test conf file.
"""
feature_name = "urls"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, PE_0_HASH)
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"urls don't match"
)
def test_imported_functions(self):
"""
Testing imported functions extarction using a test conf file.
"""
feature_name = "imported_functions"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, PE_0_HASH)
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"imported functions don't match"
)
def test_byte_counts(self):
"""
Testing the byte counts extraction using a test conf file.
"""
feature_name = "byte_counts"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, PE_0_HASH)
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"Byte Counts dosen't match"
)
def test_exported_functions(self):
"""
Testing exported functions extarction using a test conf file.
"""
feature_name = "exported_functions"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, PE_0_HASH)
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"exported functions don't match"
)
def test_binary_image(self):
"""
Testing the binary image extraction using a test conf file.
"""
from PIL import Image, ImageChops
"""
# Funtion that compares the differences of the two images .
@param1 image, @param2 image (extracted & expected images)
@return an image (difference between pixels)
if they are equal then it returns a black image
"""
def assertImage(pic_1, pic_2):
diff = ImageChops.difference(pic_1, pic_2)
theDifferenceImage = diff.convert('RGB')
theDifferenceImage.paste(pic_2, mask=diff)
return theDifferenceImage
feature_name = "binary_image"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.png".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/image/binary_image/0/{}.png".format(
out_folder, PE_0_HASH)
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
extractor.extract_batch()
extracted_image = Image.open(expected)
expected_image = Image.open(extracted)
difference = assertImage(extracted_image, expected_image)
# getbbox(): verifying if all pixels are black
# it return 'None' if they are
# if not then the pixels where they are changed
self.assertTrue(not difference.getbbox(), "Binary images don't match")
def test_strings(self):
"""
Testing exported functions extarction using a test conf file.
"""
feature_name = "strings"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, PE_0_HASH)
extractor = mrextractor.new(conf_file, PE_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"strings don't match"
)
def test_elf_header(self):
"""
Testing the extraction of informations from the header of an example
ELF file.
"""
feature_name = "elf_header"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, ELF_0_HASH)
extractor = mrextractor.new(conf_file, ELF_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"ELF header don't match the expected output"
)
def test_elf_sections(self):
"""
Testing the extraction of informations from the sections of an example
ELF file.
"""
feature_name = "elf_sections"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, ELF_0_HASH)
extractor = mrextractor.new(conf_file, ELF_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"ELF Sections don't match the expected output"
)
def test_elf_libraries(self):
"""
Testing the extraction of ELF library names
"""
feature_name = "elf_libraries"
conf_file = "{}/{}_conf.yaml".format(CONFS_DIR, feature_name)
out_folder = "{}/{}".format(EXTRACTED_FEATURES_DIR, feature_name)
expected = "{}/{}.json".format(EXPECTED_FEATURES_DIR,
feature_name)
extracted = "{}/json/0/{}.json".format(out_folder, ELF_0_HASH)
extractor = mrextractor.new(conf_file, ELF_EXE_DIR, out_folder)
extractor.extract_batch()
with open(expected, "rb") as f1:
expected_feature_dict = json.load(f1)
with open(extracted, "rb") as f2:
extracted_feature_dict = json.load(f2)
self.assertEqual(
extracted_feature_dict,
expected_feature_dict,
"ELF Sections don't match the expected output"
)
if __name__ == '__main__':
unittest.main()
| 34.328685 | 78 | 0.593512 |
4ed65632fbb63741a05f0b6bafebe6d5ec456286 | 1,400 | py | Python | jtyoui/web/interfaces.py | vanton/Jtyoui | c44d66b038ac5f4e2d75b68b3493d02f7b7b385e | [
"MIT"
] | 1 | 2019-12-24T00:57:47.000Z | 2019-12-24T00:57:47.000Z | jtyoui/web/interfaces.py | liangxioa/Jtyoui | 5a584cbf12d644b6c4fb13167d8841a383afbbac | [
"MIT"
] | null | null | null | jtyoui/web/interfaces.py | liangxioa/Jtyoui | 5a584cbf12d644b6c4fb13167d8841a383afbbac | [
"MIT"
] | null | null | null | #!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2019/8/13 16:23
# @Author: Jtyoui@qq.com
import json
import time
import requests
def interface_test(dict_, address, record_time=True, **kwargs):
"""简单接口测试
:param dict_: 传入的参数
:param address: url地址
:param kwargs: 参考requests.post **kwargs
:param record_time: 是否记录消耗时间
:return: 接口返回值
"""
start = time.time()
j = json.dumps(dict_, ensure_ascii='utf8')
response = requests.post(address, j, **kwargs)
page = json.loads(response.text)
end = time.time()
if record_time:
return page, end - start
return page
def interface_tests(method, url, data, record_time=True, **kwargs):
"""简单接口测试
:param method: 请求方法,比如:post、get
:param url: url地址
:param data: 传入的参数
:param record_time: 是否记录消耗时间
:param kwargs: 参考requests.post **kwargs
:return: 接口返回值
"""
start = time.time()
response = requests.request(method=method.upper(), url=url, data=data, **kwargs)
end = time.time()
if record_time:
return response.text, end - start
return response.text
interface_post_test = interface_test # post接口测试
interface_all_tests = interface_tests # 接口测试
if __name__ == '__main__':
d = {'answer': '我要告南明区政府贪污。', 'event_id': 'df99f4bb7f94c69c1b37ece4b41f1d05'}
ji = interface_test(d, 'http://222.85.147.140:10056/commit_org')
print(ji)
| 25.925926 | 84 | 0.659286 |
42395c3c5ff5316e5f0fc5d517f37052f03088ee | 3,386 | py | Python | openml_data_integration/protobuf_generator/openml_1497/server.py | tuix/tutorials | 733d35a8a39df079e8c2432c441b70785ab08440 | [
"Apache-2.0"
] | 8 | 2020-04-21T13:29:04.000Z | 2021-12-13T08:59:09.000Z | openml_data_integration/protobuf_generator/openml_1497/server.py | tuix/tutorials | 733d35a8a39df079e8c2432c441b70785ab08440 | [
"Apache-2.0"
] | 3 | 2021-04-27T11:03:04.000Z | 2021-05-24T18:22:57.000Z | openml_data_integration/protobuf_generator/openml_1497/server.py | tuix/tutorials | 733d35a8a39df079e8c2432c441b70785ab08440 | [
"Apache-2.0"
] | 6 | 2020-07-06T08:23:25.000Z | 2021-11-24T10:39:34.000Z | # date: 2021.07.14
# author: Raul Saavedra raul.saavedra.felipe@iais.fraunhofer.de
import grpc
from concurrent import futures
import time
import numpy
# import constant with the hardcoded openml data ID number
import myconstants
# import the generated grpc related classes for python
import model_pb2
import model_pb2_grpc
# import utility file to get the data
import openml_data_fetcher as odf
port_address = 8061
openml_obj = odf.FetchOpenMLData()
current_row = 0
class get_next_rowServicer(model_pb2_grpc.get_next_rowServicer):
def get_next_row(self, request, context):
response = model_pb2.Features()
total_rows = openml_obj.get_num_rows()
current_row = openml_obj.current_row
#print("total number of rows of OpenML file: ", total_rows)
if current_row == total_rows:
context.set_code(grpc.StatusCode.NOT_FOUND)
context.set_details('All available data has been processed')
print("All data processed. Exception raised")
return response
#print(f"fetching row {current_row} from a total of {total_rows}")
row = openml_obj.get_next_row(current_row)
openml_obj.current_row = openml_obj.current_row + 1
###############################################################
# Here goes the OpenML dataset specific Feature assignments
###############################################################
response.V1 = row[0]
response.V2 = row[1]
response.V3 = row[2]
response.V4 = row[3]
response.V5 = row[4]
response.V6 = row[5]
response.V7 = row[6]
response.V8 = row[7]
response.V9 = row[8]
response.V10 = row[9]
response.V11 = row[10]
response.V12 = row[11]
response.V13 = row[12]
response.V14 = row[13]
response.V15 = row[14]
response.V16 = row[15]
response.V17 = row[16]
response.V18 = row[17]
response.V19 = row[18]
response.V20 = row[19]
response.V21 = row[20]
response.V22 = row[21]
response.V23 = row[22]
response.V24 = row[23]
response.Class = row[24]
###############################################################
return response
# Following House_Price_Prediction/csv_databroker/csv_server.py
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
model_pb2_grpc.add_get_next_rowServicer_to_server(get_next_rowServicer(), server)
print("Starting OpenML data node server")
server.add_insecure_port(f'[::]:{port_address}')
server.start()
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0)
| 39.835294 | 81 | 0.492912 |
0c53f0c9c0aeac49ecac14399579494a64d7c1d4 | 1,929 | py | Python | tryalgo/matrix_chain_mult.py | Shloub/tryalgo | ec01a16dd6a6053047f1948531bd5e9b2abf0fab | [
"MIT"
] | null | null | null | tryalgo/matrix_chain_mult.py | Shloub/tryalgo | ec01a16dd6a6053047f1948531bd5e9b2abf0fab | [
"MIT"
] | null | null | null | tryalgo/matrix_chain_mult.py | Shloub/tryalgo | ec01a16dd6a6053047f1948531bd5e9b2abf0fab | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Matrix chain multiplication
# multiplication de matrices
# jill-jenn vie et christoph durr - 2014-2015
# snip{
def matrix_mult_opt_order(M):
"""Matrix chain multiplication optimal order
:param M: list of matrices
:returns: matrices opt, arg, such that opt[i][j] is the optimal number of
operations to compute M[i] * ... * M[j] when done in the order
(M[i] * ... * M[k]) * (M[k + 1] * ... * M[j]) for k = arg[i][j]
:complexity: :math:`O(n^2)`
"""
n = len(M)
r = [len(Mi) for Mi in M]
c = [len(Mi[0]) for Mi in M]
opt = [[0 for j in range(n)] for i in range(n)]
arg = [[None for j in range(n)] for i in range(n)]
for j_i in range(1, n): # boucler sur i, j par j - i croissant
for i in range(n - j_i):
j = i + j_i
opt[i][j] = float('inf')
for k in range(i, j):
alt = opt[i][k] + opt[k + 1][j] + r[i] * c[k] * c[j]
if opt[i][j] > alt:
opt[i][j] = alt
arg[i][j] = k
return opt, arg
def matrix_chain_mult(M):
"""Matrix chain multiplication
:param M: list of matrices
:returns: M[0] * ... * M[-1], computed in time optimal order
:complexity: whatever is needed by the multiplications
"""
opt, arg = matrix_mult_opt_order(M)
return _apply_order(M, arg, 0, len(M)-1)
def _apply_order(M, arg, i, j):
# --- multiplication de matrices de M[i] à M[j] inclu
if i == j:
return M[i]
else:
k = arg[i][j] # --- suivre le placement de parenthèses
A = _apply_order(M, arg, i, k)
B = _apply_order(M, arg, k + 1, j)
row_A = range(len(A))
row_B = range(len(B))
col_B = range(len(B[0]))
return [[sum(A[a][b] * B[b][c] for b in row_B)
for c in col_B] for a in row_A]
# snip}
| 32.15 | 77 | 0.523069 |
c3fc26f37f7163d03a71200dd7080e11ddc2cf09 | 940 | py | Python | mi/dataset/driver/ctdbp_p/dcl/test/test_ctdbp_p_dcl_recovered_driver.py | petercable/mi-dataset | d3c1607ea31af85fbba5719a31d4a60bf39f8dd3 | [
"BSD-2-Clause"
] | 1 | 2015-05-10T01:08:44.000Z | 2015-05-10T01:08:44.000Z | mi/dataset/driver/ctdbp_p/dcl/test/test_ctdbp_p_dcl_recovered_driver.py | petercable/mi-dataset | d3c1607ea31af85fbba5719a31d4a60bf39f8dd3 | [
"BSD-2-Clause"
] | null | null | null | mi/dataset/driver/ctdbp_p/dcl/test/test_ctdbp_p_dcl_recovered_driver.py | petercable/mi-dataset | d3c1607ea31af85fbba5719a31d4a60bf39f8dd3 | [
"BSD-2-Clause"
] | 9 | 2015-04-15T21:09:08.000Z | 2019-11-15T03:18:53.000Z | import os
import unittest
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.dataset_driver import ParticleDataHandler
from mi.dataset.driver.ctdbp_p.dcl.ctdbp_p_dcl_recovered_driver import parse
from mi.dataset.driver.ctdbp_p.dcl.resource import RESOURCE_PATH
_author__ = 'jeff roy'
log = get_logger()
@attr('UNIT', group='mi')
class DriverTest(unittest.TestCase):
def test_one(self):
source_file_path = os.path.join(RESOURCE_PATH, 'ctdbp01_20150804_061734.DAT')
particle_data_handler = ParticleDataHandler()
particle_data_handler = parse(None, source_file_path, particle_data_handler)
log.debug("SAMPLES: %s", particle_data_handler._samples)
log.debug("FAILURE: %s", particle_data_handler._failure)
self.assertEquals(particle_data_handler._failure, False)
if __name__ == '__main__':
test = DriverTest('test_one')
test.test_one()
| 27.647059 | 85 | 0.757447 |
e1c626a9cf2bd6b774cae0ab45064f6c0079180d | 214 | py | Python | .history/spider_20210123234046.py | KustomApe/yahoauc_spider | bea630bbe1aa88e5138a98137c21865f316fdc96 | [
"MIT"
] | null | null | null | .history/spider_20210123234046.py | KustomApe/yahoauc_spider | bea630bbe1aa88e5138a98137c21865f316fdc96 | [
"MIT"
] | null | null | null | .history/spider_20210123234046.py | KustomApe/yahoauc_spider | bea630bbe1aa88e5138a98137c21865f316fdc96 | [
"MIT"
] | null | null | null | import requests
import urllib.request as urlreq
from bs4 import BeautifulSoup
def main():
url = 'https://auctions.yahoo.co.jp/'
r = urlreq.Request(url)
with urlreq.urlopen(r) as r:
r = r.read() | 23.777778 | 41 | 0.668224 |
08e33bdce52c5502afd883eb8b07e5f6cff1db7f | 10,101 | py | Python | grodddroid/AnalyseAndroBlareTools/algo/logconverter_m.py | demirdagemir/thesis | 4a48bddf815c91729e27484548bb7bbf7ddeda64 | [
"MIT"
] | null | null | null | grodddroid/AnalyseAndroBlareTools/algo/logconverter_m.py | demirdagemir/thesis | 4a48bddf815c91729e27484548bb7bbf7ddeda64 | [
"MIT"
] | null | null | null | grodddroid/AnalyseAndroBlareTools/algo/logconverter_m.py | demirdagemir/thesis | 4a48bddf815c91729e27484548bb7bbf7ddeda64 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import graphtodot
import argparse
import time
import ply.lex as lex
import networkx as nx
import ply.yacc as yacc
import math
import string
import libsystemflowgraph as lsfg
import re
import os.path
import os
from multiprocessing import Pool
# Define a list of couple (pattern, substitute) that can serve to format correctly a log entry
regexp_list = [("> itag\[(-*[0-9 ]*)\]", "> {\\1}"), ("}select.*$", "}"), ("> \(null\)", "> {0}"), ("}failed.*$", "}"), ("}.+$", "}"), #("^.* > file /dev/cpuctl/tasks .*$", ""),
("re-initialized>", "re-initialized"), ("(process .*)\]", "\\1"), ("([^ >\\n\\t])\[", "\\1"), ("'", ""), (" #", "#"), ("Binder Thread", "BinderThread"), ("> process ([0-9])", "> process a\\1"), ("\] process ([0-9])", "\] process a\\1")]
# List of regexp that will serve to discard unwanted log entries
discard_filter = ["> file /proc/.*/oom_adj", "> {}$", "> file /sys/power/wake_", "/dev/cpuctl/bg_non_interactive/tasks", "\[BLARE\]", "^.* > file /dev/cpuctl/tasks .*$"]
argparser = argparse.ArgumentParser(description="Process blare log to build graphs", usage="logconverter-nx.py -i input [--cluster_system] --o_type <TYPE1> <TYPE2> -j N --id N")
argparser.add_argument('-i', help='Specify the input file to be parsed', action='store', required=False, dest='input')
argparser.add_argument('--cluster_system', help='Cluster threads running in system_server process', action='store_true', required=False, default=False)
argparser.add_argument('--thread_node', help='Specify if threads of the same process should be represented by different nodes or not', action='store_true', required=False, default=False)
argparser.add_argument('--o_type', help='Specify the file output type(s). Supported types are dot, gexf and pickle', required=False, nargs='+')
argparser.add_argument('--id', help='Specify an information identifier. The output graph will describe how the information flow', action='store', required=False, default=False, dest='info_id', type=int)
argparser.add_argument('--ntime', help='Replace each timestamp with an integer value t such as the timestamp of the previous flow was t-1 and the next one is t+1 ', action='store_true', required=False, default=False)
argparser.add_argument('-j', help='Define the number of jobs to use. It can be useful if we want to parse more than one Blare log', required=False, default=1, dest='job', type=int)
args = argparser.parse_args()
def clean_entry(line, sub_re=None, disc_re=None):
"""
Return a new version of line that was stripped from any noise due to other kernel messages.
line is a string that represent a log entry (a description of a flow).
sub_re is a dictionnary which keys are regexp that should be replaced by the value corresponding to the keys
disc_re is a list of discarding patterns. If line contains one of the pattern, clean_entry will return None to indicate that line should be ignored
"""
res = line
for pattern, repl in sub_re :
res = re.sub(pattern, repl, res)
for pattern in disc_re:
if not (re.search(pattern, res) is None):
return None
return res
def same_container(cont1, cont2):
"""
Return True if cont1 and cont2 are the same containers.We assume that processes
that share the same PID are the same container even if their name differ. We
assume that files that are located in the same directory and share the same
inode are the same containers too even if their name differ. In reality
this should not be limited to files in the same directory but located in the
same partition.
"""
partition_list = ["/data", "/system", "/mnt/sdcard", "/sdcard"]
if (cont1 == cont2):
return True
if (cont1[0] == cont2[0]):
if (cont1[0] == 'process'):
return cont1[2] == cont2[2]
elif (cont1[0] == 'file') and (cont1[2] == cont2[2]):
s1 = cont1[1].split("/")
s2 = cont2[1].split("/")
if len(s1) == len (s2):
i = 0
equal = True
while equal and (i < (len(s1) - 2)):
if not (s1[i] == s2[i]):
equal = False
i += 1
if equal:
return True
elif (cont1[0] == 'socket') :
return cont1[1] == cont2[1]
return False
def cleansfg(sfg):
"""
Remove nodes that do not have neighbors in sfg
"""
to_be_removed = []
for node in sfg.nodes() :
if ((len(lsfg.get_out_edges(sfg, node)) == 0) and (len(lsfg.get_in_edges(sfg, node)) == 0)):
to_be_removed.append(node)
for elt in to_be_removed:
sfg.remove_node(elt)
# Tokens
tokens = (
'TIMESTAMP',
'INTEGER',
'STRING',
#'LEVEL',
)
literals = ['[', ']', '-', '<', '>', '{', '}']
def t_TIMESTAMP(t):
r'\d+\.\d+'
t.value = (int) (math.pow(10, 6) * float(t.value))
return t
def t_INTEGER(t):
r'[+-]?\d+'
t.value = int(t.value)
return t
#t_STRING = r'[^ \n<>()\[\]{}+]'
t_STRING = r'[a-zA-Z0-9/#.()$:@_-]+'
t_ignore = ' \n\t'
#def t_newline(t):
# r'\n+'
# t.lexer.lineno += len(t.value)
def t_error(t):
print "Illegal character at line " + str(lineno) + " : *+ " + t.value + " +*"
t.lexer.skip(1)
#quit()
def find_column(input,token):
last_cr = input.rfind('\n',0,token.lexpos)
if last_cr < 0:
last_cr = 0
column = (token.lexpos - last_cr) + 1
return column
lexer = lex.lex(debug=0)
# Grammar of the alert-log that can be parsed
# ALERT : KERN_MSG_LEVEL '['timestamp']' '['MSG_TAG']' CONTAINER '>' CONTAINER > '{' FLOW '}'
#
# CONTAINER : string string C_ID ;; first string is the container type and the second one is its name
#
# C_ID : integer
#
# FLOW : integer FLOW
# | integer
#
# A container is meant to be converted into a vertex and a flow into an edge
flow_graph = nx.MultiDiGraph()
new_node_id = 0
def p_alert(p):
''' alert : level '[' TIMESTAMP ']' '[' STRING ']' container '>' container '>' '{' flow '}' '''
# Add code to link vertexes corresponding to the flow
edge_flows = nx.get_edge_attributes(flow_graph, 'flow')
current_flow = []
if (args.info_id == False):
current_flow = p[13]
elif (args.info_id in p[13]):
current_flow = [args.info_id]
if (current_flow != []):
flow_set = set(current_flow)
for edge in flow_graph.edges(data=True, keys=True):
if ((edge[0], edge[1]) == (p[8], p[10])) and (set(edge[3]['flow']) == flow_set):
edge[3]['timestamp'].append(p[3])
return
flow_graph.add_edge(p[8], p[10], flow=current_flow, timestamp=[p[3]])
# Check if p[8] is the apk that was analysed. If that is the case then give its name
# as the value of the attribute app_name of the SFG
if not (flow_graph.graph.has_key('app_name') and (flow_graph.graph['app_name'] != "")):
if ((p[8][0] == "file") and (p[10][0] == "process") and p[8][1].startswith("/data/app/")
and p[8][1].endswith("apk")):
f_name = p[8][1].split("/")[3]
if f_name.find(p[10][1]):
flow_graph.graph["app_name"] = f_name.replace("-1.apk", "").replace(".apk", "")
def p_level(p):
''' level : '<' INTEGER '>' '''
p[0] = p[2]
def p_container(p):
'container : STRING container_name INTEGER'
#global new_node_id
global args
cont_name = p[2]
if (args.thread_node == False):
cont_name = p[2].split(":", 1)[0]
new_node = (p[1], cont_name, p[3])
if (not flow_graph.has_node(new_node)):
flow_graph.add_node(new_node)
if (new_node[0] == 'process') or (new_node[0] == 'file'):
# We assume that a PID is a unique identifier for all processes listed in the log.
# So if two processes have the same PID we assume that they are the same process but
# its name changed during its execution. It can happen
former = None
for n in flow_graph.nodes():
if same_container(n, new_node):
former = n
break
if not (former is None):
for e in lsfg.get_out_edges(flow_graph, former):
flow_graph.add_edge(new_node, e[1], flow=e[3]['flow'], timestamp=e[3]['timestamp'])
for e in lsfg.get_in_edges(flow_graph, former):
flow_graph.add_edge(e[0], new_node, flow=e[3]['flow'], timestamp=e[3]['timestamp'])
flow_graph.remove_node(former)
p[0] = new_node
def p_container_name(p):
''' container_name : STRING
| container_name2'''
p[0] = p[1]
def p_container_name2(p):
'container_name2 : STRING container_name'
p[0] = p[1] + p[2]
def p_flow(p):
'flow : flow INTEGER'
p[1].append(p[2])
p[0] = p[1]
def p_flow_single_lement(p):
'flow : INTEGER'
p[0] = [p[1]]
def p_error(p):
print "Syntax error at line " + str(lineno) + " : " + str(p)
def buildsfg(filename):
"""
Build a SFG from logfile.
The SFG will have an attribute app_name of which value is supposed to be the application that
was analysed to produce the Blare log
"""
global regexp_list
global discard_filter
global flow_graph
global lineno
lineno = 1
parser = yacc.yacc(debug=False)
flow_graph = nx.MultiDiGraph()
logfile = open(filename)
previous_line = ""
for line in logfile:
new_line = clean_entry(line, regexp_list, discard_filter)
if (not (new_line is None)) and (new_line != previous_line) and (len(new_line) > 5) :
parser.parse(new_line)
previous_line = new_line
lineno += 1
cleansfg(flow_graph)
return (flow_graph, filename)
(G, a) = buildsfg('/home/tr4ckt3r/Documents/Projet3A/BlareLogs/simple_cond/log')
print "Les noeuds du graphe sont :"
print G.nodes()
print "Les arretes du graphe sont :"
print G.edges()
| 37.690299 | 240 | 0.604891 |
110a0d936cbf3f495059983a0b5237314fb45221 | 32,427 | py | Python | src/dawgdictionary.py | tommyy911/Netskrafl | 6d2cee482290ae64693ea3bcf7e1695abd8fede1 | [
"MIT",
"Unlicense"
] | null | null | null | src/dawgdictionary.py | tommyy911/Netskrafl | 6d2cee482290ae64693ea3bcf7e1695abd8fede1 | [
"MIT",
"Unlicense"
] | 1 | 2021-05-07T11:39:47.000Z | 2021-05-07T11:39:47.000Z | src/dawgdictionary.py | tommyy911/Netskrafl | 6d2cee482290ae64693ea3bcf7e1695abd8fede1 | [
"MIT",
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
""" Word dictionary implemented with a DAWG
Copyright (C) 2019 Miðeind ehf.
Author: Vilhjálmur Þorsteinsson
The GNU General Public License, version 3, applies to this software.
For further information, see https://github.com/mideind/Netskrafl
DawgDictionary uses a Directed Acyclic Word Graph (DAWG) internally
to store a large set of words in an efficient structure in terms
of storage and speed.
The graph is pre-built using the code in dawgbuilder.py and stored
in a text-based file to be loaded at run-time by DawgDictionary.
The main class supports three fundamental query functions:
DawgDictionary.find(word)
Returns True if the word is found in the dictionary, or False if not.
The __contains__ operator is supported, so "'myword' in dawgdict" also works.
DawgDictionary.find_matches(pattern)
Returns a list of words that match the pattern. The pattern can contain
wildcards ('?'). For example, result = dawgdict.find_matches("ex???") returns
a list of all 5-letter words starting with "ex".
DawgDictionary.find_permutations(rack)
Returns a list of all permutations of the given rack, i.e. valid words
consisting of one or more letters from the rack in various orders.
The rack may contain wildcards ('?'). For example,
result = dawgdict.find_permutations("se?")
returns a list of all words from 1 to 3 characters that can be constructed from
the letters "s" and "e" and any one additional letter.
All of the above query functions are built on top of a generic DAWG navigation function:
DawgDictionary.navigate(navigator)
Uses a navigation object to control the traversal of the graph and tabulate
results. The navigation object should implement a number of interface functions,
as documented in comments for the navigate() function.
DawgDictionary.FindNavigator(word)
A navigation class to find words by exact match. Used by DawgDictionary.find()
DawgDictionary.PermutationNavigator(rack, minlen)
A navigation class to find rack permutations. Used by DawgDictionary.find_permutations()
DawgDictionary.MatchNavigator(rack, minlen)
A navigation class to find words matching a pattern. Used by DawgDictionary.find_matches()
See also comments in dawgbuilder.py
Test code for this module is found in dawgtester.py
"""
import os
import codecs
import threading
import logging
import time
import struct
import sys
from languages import Alphabet
# Mask away differences between Python 2 and 3
if sys.version_info >= (3, 0):
# Python 3
import pickle
items = lambda d: d.items()
else:
# Python 2
# noinspection PyPep8Naming
import cPickle as pickle
items = lambda d: d.iteritems()
class _Node:
""" This class must be at module level for pickling """
def __init__(self):
self.final = False
self.edges = dict()
class DawgDictionary:
""" A 'classic' DAWG dictionary, loaded either from a text
file or from a pickle. This implementation has largely
been surpassed by PackedDawgDictionary, defined below. """
def __init__(self):
# Initialize an empty graph
# The root entry will eventually be self._nodes[0]
self._nodes = None
# Running counter of nodes read
self._index = 1
# Lock to ensure that only one thread loads the dictionary
self._lock = threading.Lock()
def _parse_and_add(self, line):
""" Parse a single line of a DAWG text file and add to the graph structure """
# The first line is the root (by convention nodeid 0)
# The first non-root node is in line 2 and has nodeid 2
assert self._nodes is not None
nodeid = self._index if self._index > 1 else 0
self._index += 1
edgedata = line.split(u"_")
final = False
firstedge = 0
if len(edgedata) >= 1 and edgedata[0] == u"|":
# Vertical bar denotes final node
final = True
firstedge = 1
if nodeid in self._nodes:
# We have already seen this node id: use the previously created instance
newnode = self._nodes[nodeid]
else:
# The id is appearing for the first time: add it
newnode = _Node()
self._nodes[nodeid] = newnode
newnode.final = final
# Process the edges
for edge in edgedata[firstedge:]:
e = edge.split(u":")
prefix = e[0]
edgeid = int(e[1])
if edgeid == 0:
# Edge leads to null/zero, i.e. is final
newnode.edges[prefix] = None
elif edgeid in self._nodes:
# Edge leads to a node we've already seen
newnode.edges[prefix] = self._nodes[edgeid]
else:
# Edge leads to a new, previously unseen node: Create it
newterminal = _Node()
newnode.edges[prefix] = newterminal
self._nodes[edgeid] = newterminal
def load(self, fname):
""" Load a DAWG from a text file """
# Reset the graph contents
with self._lock:
# Ensure that we don't have multiple threads trying to load simultaneously
if self._nodes is not None:
# Already loaded
return
self._nodes = dict()
self._index = 1
with codecs.open(fname, mode="r", encoding="utf-8") as fin:
for line in fin:
line = line.strip()
if line:
self._parse_and_add(line)
def store_pickle(self, fname):
""" Store a DAWG in a Python pickle file """
# noinspection Restricted_Python_calls
with open(fname, "wb") as pf:
pickle.dump(self._nodes, pf, pickle.HIGHEST_PROTOCOL)
def load_pickle(self, fname):
""" Load a DAWG from a Python pickle file """
with self._lock:
if self._nodes is not None:
# Already loaded
return
with open(fname, "rb") as pf:
self._nodes = pickle.load(pf)
def num_nodes(self):
""" Return a count of unique nodes in the DAWG """
return 0 if self._nodes is None else len(self._nodes)
def find(self, word):
""" Look for a word in the graph, returning True if it is found or False if not """
nav = FindNavigator(word)
self.navigate(nav)
return nav.is_found()
def __contains__(self, word):
""" Enable simple lookup syntax: "word" in dawgdict """
return self.find(word)
def find_matches(self, pattern, sort=True):
""" Returns a list of words matching a pattern.
The pattern contains characters and '?'-signs denoting wildcards.
Characters are matched exactly, while the wildcards match any character.
"""
nav = MatchNavigator(pattern, sort)
self.navigate(nav)
return nav.result()
def find_permutations(self, rack, minlen=0):
""" Returns a list of legal permutations of a rack of letters.
The list is sorted in descending order by permutation length.
The rack may contain question marks '?' as wildcards, matching all letters.
Question marks should be used carefully as they can
yield very large result sets.
"""
nav = PermutationNavigator(rack, minlen)
self.navigate(nav)
return nav.result()
def navigate(self, nav):
""" A generic function to navigate through the DAWG under
the control of a navigation object.
The navigation object should implement the following interface:
def push_edge(firstchar)
returns True if the edge should be entered or False if not
def accepting()
returns False if the navigator does not want more characters
def accepts(newchar)
returns True if the navigator will accept and 'eat' the new character
def accept(matched, final)
called to inform the navigator of a match and whether it is a final word
def pop_edge()
called when leaving an edge that has been navigated; returns False
if there is no need to visit other edges
def done()
called when the navigation is completed
"""
if self._nodes is None:
# No graph: no navigation
nav.done()
return
root = self._nodes[0] # Start at the root
Navigation(nav).go(root)
# noinspection PyMethodMayBeStatic
def resume_navigation(self, nav, prefix, nextnode, leftpart):
""" Continue a previous navigation of the DAWG, using saved
state information """
return Navigation(nav).resume(prefix, nextnode, leftpart)
class Wordbase:
""" Container for two singleton instances of the word database,
one for the main dictionary and the other for common words
"""
_dawg = None
_dawg_common = None
_lock = threading.Lock()
_lock_common = threading.Lock()
def __init__(self):
pass
@staticmethod
def _load_resource(resource):
""" Load a dictionary, from either a text file or a pickle file """
# Assumes that the appropriate lock has been acquired
# Compare the file times of the text version vs. the pickled version
bname = os.path.abspath(os.path.join("resources", resource + ".bin.dawg"))
pname = os.path.abspath(os.path.join("resources", resource + ".dawg.pickle"))
fname = os.path.abspath(os.path.join("resources", resource + ".text.dawg"))
try:
fname_t = os.path.getmtime(fname)
except os.error:
fname_t = None
try:
bname_t = os.path.getmtime(bname)
except os.error:
bname_t = None
try:
pname_t = os.path.getmtime(pname)
except os.error:
pname_t = None
if bname_t is not None and (fname_t is None or bname_t > fname_t):
# Load binary file if it exists and is newer than the text file
logging.info(
u"Instance {0} loading DAWG from binary file {1}"
.format(os.environ.get("INSTANCE_ID", ""), bname)
)
# print("Loading binary DAWG")
t0 = time.time()
dawg = PackedDawgDictionary()
dawg.load(bname)
t1 = time.time()
logging.info(u"Loaded complete graph in {0:.2f} seconds".format(t1 - t0))
elif fname_t is not None and (pname_t is None or fname_t > pname_t):
# We have a newer text file (or no pickle): load it
logging.info(
u"Instance {0} loading DAWG from text file {1}"
.format(os.environ.get("INSTANCE_ID", ""), fname)
)
# print("Loading text DAWG")
t0 = time.time()
dawg = DawgDictionary()
dawg.load(fname)
t1 = time.time()
logging.info(
u"Loaded {0} graph nodes in {1:.2f} seconds"
.format(dawg.num_nodes(), t1 - t0)
)
else:
# Newer pickle file or no text file: load the pickle
logging.info(
u"Instance {0} loading DAWG from pickle file {1}"
.format(os.environ.get("INSTANCE_ID", ""), pname)
)
# print("Loading pickled DAWG")
t0 = time.time()
dawg = DawgDictionary()
dawg.load_pickle(pname)
t1 = time.time()
logging.info(
u"Loaded {0} graph nodes in {1:.2f} seconds"
.format(dawg.num_nodes(), t1 - t0)
)
# Do not assign Wordbase._dawg until fully loaded, to prevent race conditions
return dawg
@staticmethod
def dawg():
""" Return the main dictionary DAWG object, loading it if required """
with Wordbase._lock:
if Wordbase._dawg is None:
# Main dictionary
Wordbase._dawg = Wordbase._load_resource("ordalisti")
assert Wordbase._dawg is not None
return Wordbase._dawg
@staticmethod
def dawg_common():
""" Return the common words DAWG object, loading it if required """
with Wordbase._lock_common:
if Wordbase._dawg_common is None:
# Common words
Wordbase._dawg_common = Wordbase._load_resource("algeng")
assert Wordbase._dawg_common is not None
return Wordbase._dawg_common
class Navigation:
""" Manages the state for a navigation while it is in progress """
def __init__(self, nav):
self._nav = nav
# If the navigator has a method called accept_resumable(),
# note it and call it with additional state information instead of
# plain accept()
self._resumable = callable(getattr(nav, "accept_resumable", None))
def _navigate_from_node(self, node, matched):
""" Starting from a given node, navigate outgoing edges """
# Go through the edges of this node and follow the ones
# okayed by the navigator
nav = self._nav
for prefix, nextnode in items(node.edges):
if nav.push_edge(prefix[0]):
# This edge is a candidate: navigate through it
self._navigate_from_edge(prefix, nextnode, matched)
if not nav.pop_edge():
# Short-circuit and finish the loop if pop_edge() returns False
break
def _navigate_from_edge(self, prefix, nextnode, matched):
""" Navigate along an edge, accepting partial and full matches """
# Go along the edge as long as the navigator is accepting
lenp = len(prefix)
j = 0
nav = self._nav
while j < lenp and nav.accepting():
# See if the navigator is OK with accepting the current character
if not nav.accepts(prefix[j]):
# Nope: we're done with this edge
return
# So far, we have a match: add a letter to the matched path
matched += prefix[j]
j += 1
# Check whether the next prefix character is a vertical bar, denoting finality
final = False
if j < lenp:
if prefix[j] == u"|":
final = True
j += 1
elif (nextnode is None) or nextnode.final:
# If we're at the final char of the prefix and the next node is final,
# set the final flag as well (there is no trailing vertical bar in this case)
final = True
# Tell the navigator where we are
if self._resumable:
# The navigator wants to know the position in the graph
# so that navigation can be resumed later from this spot
nav.accept_resumable(prefix[j:], nextnode, matched)
else:
# Normal navigator: tell it about the match
nav.accept(matched, final)
# We're done following the prefix for as long as it goes and
# as long as the navigator was accepting
if j < lenp:
# We didn't complete the prefix, so the navigator must no longer
# be interested (accepting): we're done
return
if nav.accepting() and (nextnode is not None):
# Gone through the entire edge and still have rack letters left:
# continue with the next node
self._navigate_from_node(nextnode, matched)
def go(self, root):
""" Perform the navigation using the given navigator """
if root is None:
# No root: no navigation
self._nav.done()
return
# The ship is ready to go
if self._nav.accepting():
# Leave shore and navigate the open seas
self._navigate_from_node(root, u"")
self._nav.done()
def resume(self, prefix, nextnode, matched):
""" Resume navigation from a previously saved state """
self._navigate_from_edge(prefix, nextnode, matched)
class FindNavigator:
""" A navigation class to be used with DawgDictionary.navigate()
to find a particular word in the dictionary by exact match
"""
def __init__(self, word):
self._word = word
self._len = len(word)
self._index = 0
self._found = False
def push_edge(self, firstchar):
""" Returns True if the edge should be entered or False if not """
# Enter the edge if it fits where we are in the word
return self._word[self._index] == firstchar
def accepting(self):
""" Returns False if the navigator does not want more characters """
# Don't go too deep
return self._index < self._len
def accepts(self, newchar):
""" Returns True if the navigator will accept the new character """
if newchar != self._word[self._index]:
return False
# Match: move to the next index position
self._index += 1
return True
# pylint: disable=unused-argument
def accept(self, matched, final):
""" Called to inform the navigator of a match and whether it is a final word """
if final and self._index == self._len:
# Yes, this is what we were looking for
# assert matched == self._word
self._found = True
def pop_edge(self):
""" Called when leaving an edge that has been navigated """
# We only need to visit one outgoing edge, so short-circuit the edge loop
return False
def done(self):
""" Called when the whole navigation is done """
pass
def is_found(self):
""" Return True if the sought word was found in the DAWG """
return self._found
class PermutationNavigator:
""" A navigation class to be used with DawgDictionary.navigate()
to find all permutations of a rack
"""
def __init__(self, rack, minlen=0):
self._rack = rack
self._stack = []
self._result = []
self._minlen = minlen
def push_edge(self, firstchar):
""" Returns True if the edge should be entered or False if not """
# Follow all edges that match a letter in the rack
# (which can be '?', matching all edges)
rack = self._rack
if not ((firstchar in rack) or (u"?" in rack)):
return False
# Fit: save our rack and move into the edge
self._stack.append(rack)
return True
def accepting(self):
""" Returns False if the navigator does not want more characters """
# Continue as long as there is something left on the rack
return bool(self._rack)
def accepts(self, newchar):
""" Returns True if the navigator will accept the new character """
rack = self._rack
exactmatch = newchar in rack
if (not exactmatch) and (u"?" not in rack):
# Can't continue with this prefix - we no longer have rack letters matching it
return False
# We're fine with this: accept the character and remove from the rack
if exactmatch:
self._rack = rack.replace(newchar, u"", 1)
else:
self._rack = rack.replace(u"?", u"", 1)
return True
def accept(self, matched, final):
""" Called to inform the navigator of a match and whether it is a final word """
if final and len(matched) >= self._minlen:
self._result.append(matched)
def pop_edge(self):
""" Called when leaving an edge that has been navigated """
self._rack = self._stack.pop()
# We need to visit all outgoing edges, so return True
return True
def done(self):
""" Called when the whole navigation is done """
self._result.sort(key=lambda x: (-len(x), Alphabet.sortkey(x)))
def result(self):
""" Return the list of results accumulated during the navigation """
return self._result
class MatchNavigator:
""" A navigation class to be used with DawgDictionary.navigate()
to find all words matching a pattern
"""
def __init__(self, pattern, sort):
self._pattern = pattern
self._lenp = len(pattern)
self._index = 0
self._chmatch = pattern[0]
self._wildcard = self._chmatch == u"?"
self._stack = []
self._result = []
self._sort = sort
def push_edge(self, firstchar):
""" Returns True if the edge should be entered or False if not """
# Follow all edges that match a letter in the rack
# (which can be '?', matching all edges)
if not self._wildcard and (firstchar != self._chmatch):
return False
# Fit: save our index and move into the edge
self._stack.append((self._index, self._chmatch, self._wildcard))
return True
def accepting(self):
""" Returns False if the navigator does not want more characters """
# Continue as long as there is something left to match
return self._index < self._lenp
def accepts(self, newchar):
""" Returns True if the navigator will accept the new character """
if not self._wildcard and (newchar != self._chmatch):
return False
self._index += 1
if self._index < self._lenp:
self._chmatch = self._pattern[self._index]
self._wildcard = self._chmatch == u"?"
return True
def accept(self, matched, final):
""" Called to inform the navigator of a match and whether it is a final word """
if final and self._index == self._lenp:
# We have an entire pattern match
# (Note that this could be relaxed to also return partial (shorter) pattern matches)
self._result.append(matched)
def pop_edge(self):
""" Called when leaving an edge that has been navigated """
self._index, self._chmatch, self._wildcard = self._stack.pop()
# We need to continue visiting edges only if this is a wildcard position
return self._wildcard
def done(self):
""" Called when the whole navigation is done """
if self._sort:
self._result.sort(key=Alphabet.sortkey)
def result(self):
""" Return the list of results accumulated during the navigation """
return self._result
class PackedDawgDictionary:
""" Encapsulates a DAWG dictionary that is initialized from a packed
binary file on disk and navigated as a byte buffer. """
def __init__(self):
# The packed byte buffer
self._b = None
# Lock to ensure that only one thread loads the dictionary
self._lock = threading.Lock()
def load(self, fname):
""" Load a packed DAWG from a binary file """
with self._lock:
# Ensure that we don't have multiple threads trying to load simultaneously
if self._b is not None:
# Already loaded
return
# Quickly gulp the file contents into the byte buffer
with open(fname, mode="rb") as fin:
self._b = bytearray(fin.read())
def num_nodes(self):
""" Return a count of unique nodes in the DAWG """
return 0 # !!! TBD - maybe not required
def find(self, word):
""" Look for a word in the graph, returning True if it is found or False if not """
nav = FindNavigator(word)
self.navigate(nav)
return nav.is_found()
def __contains__(self, word):
""" Enable simple lookup syntax: "word" in dawgdict """
return self.find(word)
def find_matches(self, pattern, sort=True):
""" Returns a list of words matching a pattern.
The pattern contains characters and '?'-signs denoting wildcards.
Characters are matched exactly, while the wildcards match any character.
"""
nav = MatchNavigator(pattern, sort)
self.navigate(nav)
return nav.result()
def find_permutations(self, rack, minlen=0):
""" Returns a list of legal permutations of a rack of letters.
The list is sorted in descending order by permutation length.
The rack may contain question marks '?' as wildcards, matching all letters.
Question marks should be used carefully as they can
yield very large result sets.
"""
nav = PermutationNavigator(rack, minlen)
self.navigate(nav)
return nav.result()
def navigate(self, nav):
""" A generic function to navigate through the DAWG under
the control of a navigation object.
The navigation object should implement the following interface:
def push_edge(firstchar)
returns True if the edge should be entered or False if not
def accepting()
returns False if the navigator does not want more characters
def accepts(newchar)
returns True if the navigator will accept and 'eat' the new character
def accept(matched, final)
called to inform the navigator of a match and whether it is a final word
def pop_edge()
called when leaving an edge that has been navigated; returns False
if there is no need to visit other edges
def done()
called when the navigation is completed
"""
if self._b is None:
# No graph: no navigation
nav.done()
else:
PackedNavigation(nav, self._b).go()
def resume_navigation(self, nav, prefix, nextnode, leftpart):
""" Continue a previous navigation of the DAWG, using saved
state information """
return PackedNavigation(nav, self._b).resume(prefix, nextnode, leftpart)
class PackedNavigation:
""" Manages the state for a navigation while it is in progress """
# Assemble a decoding dictionary where encoded indices are mapped to
# characters, eventually with a suffixed vertical bar '|' to denote finality
_CODING = {i: c for i, c in enumerate(Alphabet.order)}
_CODING.update({i | 0x80: c + u"|" for i, c in enumerate(Alphabet.order)})
# The structure used to decode an edge offset from bytes
_UINT32 = struct.Struct("<L")
# Dictionary of edge iteration caches, keyed by byte buffer
_iter_caches = dict()
def __init__(self, nav, b):
# Store the associated navigator
self._nav = nav
# The DAWG bytearray
self._b = b
if id(b) in self._iter_caches:
# We already have a cache associated with this byte buffer
self._iter_cache = self._iter_caches[id(b)]
else:
# Create a fresh cache for this byte buffer
self._iter_cache = self._iter_caches[id(b)] = dict()
# If the navigator has a method called accept_resumable(),
# note it and call it with additional state information instead of
# plain accept()
self._resumable = callable(getattr(nav, "accept_resumable", None))
def _iter_from_node(self, offset):
""" A generator for yielding prefixes and next node offset along an edge
starting at the given offset in the DAWG bytearray """
b = self._b
coding = self._CODING
num_edges = b[offset] & 0x7f
offset += 1
for _ in range(num_edges):
len_byte = b[offset]
offset += 1
if len_byte & 0x40:
prefix = coding[len_byte & 0x3f] # Single character
else:
len_byte &= 0x3f
prefix = u"".join(coding[b[offset + j]] for j in range(len_byte))
offset += len_byte
if b[offset - 1] & 0x80:
# The last character of the prefix had a final marker: nextnode is 0
nextnode = 0
else:
# Read the next node offset
# Tuple of length 1, i.e. (n, )
nextnode, = self._UINT32.unpack_from(b, offset)
offset += 4
yield prefix, nextnode
def _make_iter_from_node(self, offset):
""" Return an iterator over the prefixes and next node pointers
of the edge at the given offset. If this is the first time
that the edge is iterated, cache its unpacked contents
in a dictionary for quicker subsequent iteration. """
try:
d = self._iter_cache[offset]
except KeyError:
d = {prefix: nextnode for prefix, nextnode in self._iter_from_node(offset)}
self._iter_cache[offset] = d
return items(d)
def _navigate_from_node(self, offset, matched):
""" Starting from a given node, navigate outgoing edges """
# Go through the edges of this node and follow the ones
# okayed by the navigator
nav = self._nav
for prefix, nextnode in self._make_iter_from_node(offset):
if nav.push_edge(prefix[0]):
# This edge is a candidate: navigate through it
self._navigate_from_edge(prefix, nextnode, matched)
if not nav.pop_edge():
# Short-circuit and finish the loop if pop_edge() returns False
break
def _navigate_from_edge(self, prefix, nextnode, matched):
""" Navigate along an edge, accepting partial and full matches """
# Go along the edge as long as the navigator is accepting
b = self._b
lenp = len(prefix)
j = 0
nav = self._nav
while j < lenp and nav.accepting():
# See if the navigator is OK with accepting the current character
if not nav.accepts(prefix[j]):
# Nope: we're done with this edge
return
# So far, we have a match: add a letter to the matched path
matched += prefix[j]
j += 1
# Check whether the next prefix character is a vertical bar, denoting finality
final = False
if j < lenp:
if prefix[j] == u"|":
final = True
j += 1
elif nextnode == 0 or b[nextnode] & 0x80:
# If we're at the final char of the prefix and the next node is final,
# set the final flag as well (there is no trailing vertical bar in this case)
final = True
# Tell the navigator where we are
if self._resumable:
# The navigator wants to know the position in the graph
# so that navigation can be resumed later from this spot
nav.accept_resumable(prefix[j:], nextnode, matched)
else:
# Normal navigator: tell it about the match
nav.accept(matched, final)
# We're done following the prefix for as long as it goes and
# as long as the navigator was accepting
if j < lenp:
# We didn't complete the prefix, so the navigator must no longer
# be interested (accepting): we're done
return
if nextnode != 0 and nav.accepting():
# Gone through the entire edge and still have rack letters left:
# continue with the next node
self._navigate_from_node(nextnode, matched)
def go(self):
""" Perform the navigation using the given navigator """
# The ship is ready to go
if self._nav.accepting():
# Leave shore and navigate the open seas
self._navigate_from_node(0, u"")
self._nav.done()
def resume(self, prefix, nextnode, matched):
""" Resume navigation from a previously saved state """
self._navigate_from_edge(prefix, nextnode, matched)
| 38.881295 | 98 | 0.599007 |
f173e7dbf50ff691f2be74f836777c5fb53cb2b8 | 1,082 | py | Python | IO/PreferenceManager.py | EpicTofuu/Froggers | 0395ef801fe11a7881fd32fd570bf3135a4a761f | [
"MIT"
] | 1 | 2020-11-17T04:32:55.000Z | 2020-11-17T04:32:55.000Z | IO/PreferenceManager.py | EpicTofuu/Froggers | 0395ef801fe11a7881fd32fd570bf3135a4a761f | [
"MIT"
] | null | null | null | IO/PreferenceManager.py | EpicTofuu/Froggers | 0395ef801fe11a7881fd32fd570bf3135a4a761f | [
"MIT"
] | null | null | null | import pickle
# handles ALL save data
# despite the file being called pref, the file contains main game data
# that needs to persist during runs. The naming is used to deter tampering.
class PreferenceManager:
def __init__(self) -> None:
self.Preferences = dict()
# write the state to a file
def write (self):
with open('pref.pickle', 'wb') as handle:
pickle.dump(self.Preferences, handle, protocol=pickle.HIGHEST_PROTOCOL)
# set the state to a file
def read (self):
with open('pref.pickle', 'rb') as handle:
self.Preferences = pickle.load(handle)
# safely retrieve a value. Use a default value if the value hasn't been written already
def get (self, key, defaultValue, setDefault = True):
if self.Preferences.get (key) is not None:
return self.Preferences[key]
else:
# write the value when requested
if setDefault:
self.Preferences[key] = defaultValue
self.write()
return defaultValue
| 36.066667 | 91 | 0.625693 |
2188a931c434e3191ccee29456c6f1f507d0febd | 38,893 | py | Python | pymc/tests/test_distributions_moments.py | astoeriko/pymc | 7b4bccda2b2f5b0a3de2fd6505d2056b54ddeb98 | [
"Apache-2.0"
] | 1 | 2020-01-18T05:28:55.000Z | 2020-01-18T05:28:55.000Z | pymc/tests/test_distributions_moments.py | astoeriko/pymc | 7b4bccda2b2f5b0a3de2fd6505d2056b54ddeb98 | [
"Apache-2.0"
] | 1 | 2020-08-03T09:42:56.000Z | 2020-08-03T09:42:56.000Z | pymc/tests/test_distributions_moments.py | astoeriko/pymc | 7b4bccda2b2f5b0a3de2fd6505d2056b54ddeb98 | [
"Apache-2.0"
] | null | null | null | import aesara
import numpy as np
import pytest
import scipy.stats as st
from aesara import tensor as at
from scipy import special
import pymc as pm
from pymc.distributions import (
AsymmetricLaplace,
Bernoulli,
Beta,
BetaBinomial,
Binomial,
Categorical,
Cauchy,
ChiSquared,
Constant,
DensityDist,
Dirichlet,
DiscreteUniform,
ExGaussian,
Exponential,
Flat,
Gamma,
Geometric,
Gumbel,
HalfCauchy,
HalfFlat,
HalfNormal,
HalfStudentT,
HyperGeometric,
Interpolated,
InverseGamma,
Kumaraswamy,
Laplace,
Logistic,
LogitNormal,
LogNormal,
MatrixNormal,
Moyal,
Multinomial,
MvStudentT,
NegativeBinomial,
Normal,
Pareto,
Poisson,
PolyaGamma,
Rice,
Simulator,
SkewNormal,
StudentT,
Triangular,
TruncatedNormal,
Uniform,
VonMises,
Wald,
Weibull,
ZeroInflatedBinomial,
ZeroInflatedNegativeBinomial,
ZeroInflatedPoisson,
)
from pymc.distributions.distribution import _get_moment, get_moment
from pymc.distributions.logprob import logpt
from pymc.distributions.multivariate import MvNormal
from pymc.distributions.shape_utils import rv_size_is_none, to_tuple
from pymc.initial_point import make_initial_point_fn
from pymc.model import Model
def test_all_distributions_have_moments():
import pymc.distributions as dist_module
from pymc.distributions.distribution import DistributionMeta
dists = (getattr(dist_module, dist) for dist in dist_module.__all__)
dists = (dist for dist in dists if isinstance(dist, DistributionMeta))
missing_moments = {
dist for dist in dists if type(getattr(dist, "rv_op", None)) not in _get_moment.registry
}
# Ignore super classes
missing_moments -= {
dist_module.Distribution,
dist_module.Discrete,
dist_module.Continuous,
dist_module.NoDistribution,
dist_module.DensityDist,
dist_module.simulator.Simulator,
}
# Distributions that have not been refactored for V4 yet
not_implemented = {
dist_module.multivariate.LKJCorr,
dist_module.mixture.Mixture,
dist_module.mixture.MixtureSameFamily,
dist_module.mixture.NormalMixture,
dist_module.timeseries.AR,
dist_module.timeseries.AR1,
dist_module.timeseries.GARCH11,
dist_module.timeseries.GaussianRandomWalk,
dist_module.timeseries.MvGaussianRandomWalk,
dist_module.timeseries.MvStudentTRandomWalk,
}
# Distributions that have been refactored but don't yet have moments
not_implemented |= {
dist_module.discrete.DiscreteWeibull,
dist_module.multivariate.CAR,
dist_module.multivariate.DirichletMultinomial,
dist_module.multivariate.KroneckerNormal,
dist_module.multivariate.Wishart,
}
unexpected_implemented = not_implemented - missing_moments
if unexpected_implemented:
raise Exception(
f"Distributions {unexpected_implemented} have a `get_moment` implemented. "
"This test must be updated to expect this."
)
unexpected_not_implemented = missing_moments - not_implemented
if unexpected_not_implemented:
raise NotImplementedError(
f"Unexpected by this test, distributions {unexpected_not_implemented} do "
"not have a `get_moment` implementation. Either add a moment or filter "
"these distributions in this test."
)
def test_rv_size_is_none():
rv = Normal.dist(0, 1, size=None)
assert rv_size_is_none(rv.owner.inputs[1])
rv = Normal.dist(0, 1, size=1)
assert not rv_size_is_none(rv.owner.inputs[1])
size = Bernoulli.dist(0.5)
rv = Normal.dist(0, 1, size=size)
assert not rv_size_is_none(rv.owner.inputs[1])
size = Normal.dist(0, 1).size
rv = Normal.dist(0, 1, size=size)
assert not rv_size_is_none(rv.owner.inputs[1])
def assert_moment_is_expected(model, expected, check_finite_logp=True):
fn = make_initial_point_fn(
model=model,
return_transformed=False,
default_strategy="moment",
)
moment = fn(0)["x"]
expected = np.asarray(expected)
try:
random_draw = model["x"].eval()
except NotImplementedError:
random_draw = moment
assert moment.shape == expected.shape == random_draw.shape
assert np.allclose(moment, expected)
if check_finite_logp:
logp_moment = logpt(model["x"], at.constant(moment), transformed=False).eval()
assert np.isfinite(logp_moment)
@pytest.mark.parametrize(
"size, expected",
[
(None, 0),
(5, np.zeros(5)),
((2, 5), np.zeros((2, 5))),
],
)
def test_flat_moment(size, expected):
with Model() as model:
Flat("x", size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"size, expected",
[
(None, 1),
(5, np.ones(5)),
((2, 5), np.ones((2, 5))),
],
)
def test_halfflat_moment(size, expected):
with Model() as model:
HalfFlat("x", size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"lower, upper, size, expected",
[
(-1, 1, None, 0),
(-1, 1, 5, np.zeros(5)),
(0, np.arange(1, 6), None, np.arange(1, 6) / 2),
(0, np.arange(1, 6), (2, 5), np.full((2, 5), np.arange(1, 6) / 2)),
],
)
def test_uniform_moment(lower, upper, size, expected):
with Model() as model:
Uniform("x", lower=lower, upper=upper, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, sigma, size, expected",
[
(0, 1, None, 0),
(0, np.ones(5), None, np.zeros(5)),
(np.arange(5), 1, None, np.arange(5)),
(np.arange(5), np.arange(1, 6), (2, 5), np.full((2, 5), np.arange(5))),
],
)
def test_normal_moment(mu, sigma, size, expected):
with Model() as model:
Normal("x", mu=mu, sigma=sigma, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"sigma, size, expected",
[
(1, None, 1),
(1, 5, np.ones(5)),
(np.arange(1, 6), None, np.arange(1, 6)),
(np.arange(1, 6), (2, 5), np.full((2, 5), np.arange(1, 6))),
],
)
def test_halfnormal_moment(sigma, size, expected):
with Model() as model:
HalfNormal("x", sigma=sigma, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"nu, sigma, size, expected",
[
(1, 1, None, 1),
(1, 1, 5, np.ones(5)),
(1, np.arange(1, 6), (2, 5), np.full((2, 5), np.arange(1, 6))),
(np.arange(1, 6), 1, None, np.full(5, 1)),
],
)
def test_halfstudentt_moment(nu, sigma, size, expected):
with Model() as model:
HalfStudentT("x", nu=nu, sigma=sigma, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, sigma, lower, upper, size, expected",
[
(0.9, 1, -5, 5, None, 0),
(1, np.ones(5), -10, np.inf, None, np.full(5, -9)),
(np.arange(5), 1, None, 10, (2, 5), np.full((2, 5), 9)),
(1, 1, [-np.inf, -np.inf, -np.inf], 10, None, np.full(3, 9)),
],
)
def test_truncatednormal_moment(mu, sigma, lower, upper, size, expected):
with Model() as model:
TruncatedNormal("x", mu=mu, sigma=sigma, lower=lower, upper=upper, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"p, size, expected",
[
(0.3, None, 0),
(0.9, 5, np.ones(5)),
(np.linspace(0, 1, 4), None, [0, 0, 1, 1]),
(np.linspace(0, 1, 4), (2, 4), np.full((2, 4), [0, 0, 1, 1])),
],
)
def test_bernoulli_moment(p, size, expected):
with Model() as model:
Bernoulli("x", p=p, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"alpha, beta, size, expected",
[
(1, 1, None, 0.5),
(1, 1, 5, np.full(5, 0.5)),
(1, np.arange(1, 6), None, 1 / np.arange(2, 7)),
(1, np.arange(1, 6), (2, 5), np.full((2, 5), 1 / np.arange(2, 7))),
],
)
def test_beta_moment(alpha, beta, size, expected):
with Model() as model:
Beta("x", alpha=alpha, beta=beta, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"n, alpha, beta, size, expected",
[
(10, 1, 1, None, 5),
(10, 1, 1, 5, np.full(5, 5)),
(10, 1, np.arange(1, 6), None, np.round(10 / np.arange(2, 7))),
(10, 1, np.arange(1, 6), (2, 5), np.full((2, 5), np.round(10 / np.arange(2, 7)))),
],
)
def test_beta_binomial_moment(alpha, beta, n, size, expected):
with Model() as model:
BetaBinomial("x", alpha=alpha, beta=beta, n=n, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"nu, size, expected",
[
(1, None, 1),
(1, 5, np.full(5, 1)),
(np.arange(1, 6), None, np.arange(1, 6)),
],
)
def test_chisquared_moment(nu, size, expected):
with Model() as model:
ChiSquared("x", nu=nu, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"lam, size, expected",
[
(2, None, 0.5),
(2, 5, np.full(5, 0.5)),
(np.arange(1, 5), None, 1 / np.arange(1, 5)),
(np.arange(1, 5), (2, 4), np.full((2, 4), 1 / np.arange(1, 5))),
],
)
def test_exponential_moment(lam, size, expected):
with Model() as model:
Exponential("x", lam=lam, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, b, size, expected",
[
(0, 1, None, 0),
(0, np.ones(5), None, np.zeros(5)),
(np.arange(5), 1, None, np.arange(5)),
(np.arange(5), np.arange(1, 6), (2, 5), np.full((2, 5), np.arange(5))),
],
)
def test_laplace_moment(mu, b, size, expected):
with Model() as model:
Laplace("x", mu=mu, b=b, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, nu, sigma, size, expected",
[
(0, 1, 1, None, 0),
(0, np.ones(5), 1, None, np.zeros(5)),
(np.arange(5), 10, np.arange(1, 6), None, np.arange(5)),
(
np.arange(5),
10,
np.arange(1, 6),
(2, 5),
np.full((2, 5), np.arange(5)),
),
],
)
def test_studentt_moment(mu, nu, sigma, size, expected):
with Model() as model:
StudentT("x", mu=mu, nu=nu, sigma=sigma, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"alpha, beta, size, expected",
[
(0, 1, None, 0),
(0, np.ones(5), None, np.zeros(5)),
(np.arange(5), 1, None, np.arange(5)),
(np.arange(5), np.arange(1, 6), (2, 5), np.full((2, 5), np.arange(5))),
],
)
def test_cauchy_moment(alpha, beta, size, expected):
with Model() as model:
Cauchy("x", alpha=alpha, beta=beta, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"a, b, size, expected",
[
(1, 1, None, 0.5),
(1, 1, 5, np.full(5, 0.5)),
(1, np.arange(1, 6), None, 1 / np.arange(2, 7)),
(np.arange(1, 6), 1, None, np.arange(1, 6) / np.arange(2, 7)),
(1, np.arange(1, 6), (2, 5), np.full((2, 5), 1 / np.arange(2, 7))),
],
)
def test_kumaraswamy_moment(a, b, size, expected):
with Model() as model:
Kumaraswamy("x", a=a, b=b, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, sigma, size, expected",
[
(0, 1, None, np.exp(0.5)),
(0, 1, 5, np.full(5, np.exp(0.5))),
(np.arange(5), 1, None, np.exp(np.arange(5) + 0.5)),
(
np.arange(5),
np.arange(1, 6),
(2, 5),
np.full((2, 5), np.exp(np.arange(5) + 0.5 * np.arange(1, 6) ** 2)),
),
],
)
def test_lognormal_moment(mu, sigma, size, expected):
with Model() as model:
LogNormal("x", mu=mu, sigma=sigma, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"beta, size, expected",
[
(1, None, 1),
(1, 5, np.ones(5)),
(np.arange(1, 5), None, np.arange(1, 5)),
(
np.arange(1, 5),
(2, 4),
np.full((2, 4), np.arange(1, 5)),
),
],
)
def test_halfcauchy_moment(beta, size, expected):
with Model() as model:
HalfCauchy("x", beta=beta, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"alpha, beta, size, expected",
[
(1, 1, None, 1),
(1, 1, 5, np.full(5, 1)),
(np.arange(1, 6), 1, None, np.arange(1, 6)),
(
np.arange(1, 6),
2 * np.arange(1, 6),
(2, 5),
np.full((2, 5), 0.5),
),
],
)
def test_gamma_moment(alpha, beta, size, expected):
with Model() as model:
Gamma("x", alpha=alpha, beta=beta, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"alpha, beta, size, expected",
[
(5, 1, None, 1 / 4),
(0.5, 1, None, 1 / 1.5),
(5, 1, 5, np.full(5, 1 / (5 - 1))),
(np.arange(1, 6), 1, None, np.array([0.5, 1, 1 / 2, 1 / 3, 1 / 4])),
],
)
def test_inverse_gamma_moment(alpha, beta, size, expected):
with Model() as model:
InverseGamma("x", alpha=alpha, beta=beta, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"alpha, m, size, expected",
[
(2, 1, None, 1 * 2 ** (1 / 2)),
(2, 1, 5, np.full(5, 1 * 2 ** (1 / 2))),
(np.arange(2, 7), np.arange(1, 6), None, np.arange(1, 6) * 2 ** (1 / np.arange(2, 7))),
(
np.arange(2, 7),
np.arange(1, 6),
(2, 5),
np.full((2, 5), np.arange(1, 6) * 2 ** (1 / np.arange(2, 7))),
),
],
)
def test_pareto_moment(alpha, m, size, expected):
with Model() as model:
Pareto("x", alpha=alpha, m=m, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, kappa, size, expected",
[
(0, 1, None, 0),
(0, np.ones(4), None, np.zeros(4)),
(np.arange(4), 0.5, None, np.arange(4)),
(np.arange(4), np.arange(1, 5), (2, 4), np.full((2, 4), np.arange(4))),
],
)
def test_vonmises_moment(mu, kappa, size, expected):
with Model() as model:
VonMises("x", mu=mu, kappa=kappa, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, lam, phi, size, expected",
[
(2, None, None, None, 2),
(None, 1, 1, 5, np.full(5, 1)),
(1, None, np.ones(5), None, np.full(5, 1)),
(3, np.full(5, 2), None, None, np.full(5, 3)),
(np.arange(1, 6), None, np.arange(1, 6), (2, 5), np.full((2, 5), np.arange(1, 6))),
],
)
def test_wald_moment(mu, lam, phi, size, expected):
with Model() as model:
Wald("x", mu=mu, lam=lam, phi=phi, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"alpha, beta, size, expected",
[
(1, 1, None, 1),
(1, 1, 5, np.full(5, 1)),
(np.arange(1, 6), 1, None, special.gamma(1 + 1 / np.arange(1, 6))),
(
np.arange(1, 6),
np.arange(2, 7),
(2, 5),
np.full(
(2, 5),
np.arange(2, 7) * special.gamma(1 + 1 / np.arange(1, 6)),
),
),
],
)
def test_weibull_moment(alpha, beta, size, expected):
with Model() as model:
Weibull("x", alpha=alpha, beta=beta, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"n, p, size, expected",
[
(7, 0.7, None, 5),
(7, 0.3, 5, np.full(5, 2)),
(10, np.arange(1, 6) / 10, None, np.arange(1, 6)),
(10, np.arange(1, 6) / 10, (2, 5), np.full((2, 5), np.arange(1, 6))),
],
)
def test_binomial_moment(n, p, size, expected):
with Model() as model:
Binomial("x", n=n, p=p, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, size, expected",
[
(2.7, None, 2),
(2.3, 5, np.full(5, 2)),
(np.arange(1, 5), None, np.arange(1, 5)),
(np.arange(1, 5), (2, 4), np.full((2, 4), np.arange(1, 5))),
],
)
def test_poisson_moment(mu, size, expected):
with Model() as model:
Poisson("x", mu=mu, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"n, p, size, expected",
[
(10, 0.7, None, 4),
(10, 0.7, 5, np.full(5, 4)),
(np.full(3, 10), np.arange(1, 4) / 10, None, np.array([90, 40, 23])),
(
10,
np.arange(1, 4) / 10,
(2, 3),
np.full((2, 3), np.array([90, 40, 23])),
),
],
)
def test_negative_binomial_moment(n, p, size, expected):
with Model() as model:
NegativeBinomial("x", n=n, p=p, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"c, size, expected",
[
(1, None, 1),
(1, 5, np.full(5, 1)),
(np.arange(1, 6), None, np.arange(1, 6)),
],
)
def test_constant_moment(c, size, expected):
with Model() as model:
Constant("x", c=c, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"psi, theta, size, expected",
[
(0.9, 3.0, None, 2),
(0.8, 2.9, 5, np.full(5, 2)),
(0.2, np.arange(1, 5) * 5, None, np.arange(1, 5)),
(0.2, np.arange(1, 5) * 5, (2, 4), np.full((2, 4), np.arange(1, 5))),
],
)
def test_zero_inflated_poisson_moment(psi, theta, size, expected):
with Model() as model:
ZeroInflatedPoisson("x", psi=psi, theta=theta, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"psi, n, p, size, expected",
[
(0.8, 7, 0.7, None, 4),
(0.8, 7, 0.3, 5, np.full(5, 2)),
(0.4, 25, np.arange(1, 6) / 10, None, np.arange(1, 6)),
(
0.4,
25,
np.arange(1, 6) / 10,
(2, 5),
np.full((2, 5), np.arange(1, 6)),
),
],
)
def test_zero_inflated_binomial_moment(psi, n, p, size, expected):
with Model() as model:
ZeroInflatedBinomial("x", psi=psi, n=n, p=p, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, s, size, expected",
[
(1, 1, None, 1),
(1, 1, 5, np.full(5, 1)),
(2, np.arange(1, 6), None, np.full(5, 2)),
(
np.arange(1, 6),
np.arange(1, 6),
(2, 5),
np.full((2, 5), np.arange(1, 6)),
),
],
)
def test_logistic_moment(mu, s, size, expected):
with Model() as model:
Logistic("x", mu=mu, s=s, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, nu, sigma, size, expected",
[
(1, 1, 1, None, 2),
(1, 1, np.ones((2, 5)), None, np.full([2, 5], 2)),
(1, 1, 3, 5, np.full(5, 2)),
(1, np.arange(1, 6), 5, None, np.arange(2, 7)),
(1, np.arange(1, 6), 1, (2, 5), np.full((2, 5), np.arange(2, 7))),
],
)
def test_exgaussian_moment(mu, nu, sigma, size, expected):
with Model() as model:
ExGaussian("x", mu=mu, sigma=sigma, nu=nu, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"p, size, expected",
[
(0.5, None, 2),
(0.2, 5, 5 * np.ones(5)),
(np.linspace(0.25, 1, 4), None, [4, 2, 1, 1]),
(np.linspace(0.25, 1, 4), (2, 4), np.full((2, 4), [4, 2, 1, 1])),
],
)
def test_geometric_moment(p, size, expected):
with Model() as model:
Geometric("x", p=p, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"N, k, n, size, expected",
[
(50, 10, 20, None, 4),
(50, 10, 23, 5, np.full(5, 5)),
(50, 10, np.arange(23, 28), None, np.full(5, 5)),
(
50,
10,
np.arange(18, 23),
(2, 5),
np.full((2, 5), 4),
),
],
)
def test_hyper_geometric_moment(N, k, n, size, expected):
with Model() as model:
HyperGeometric("x", N=N, k=k, n=n, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"lower, upper, size, expected",
[
(1, 5, None, 3),
(1, 5, 5, np.full(5, 3)),
(1, np.arange(5, 22, 4), None, np.arange(3, 13, 2)),
(
1,
np.arange(5, 22, 4),
(2, 5),
np.full((2, 5), np.arange(3, 13, 2)),
),
],
)
def test_discrete_uniform_moment(lower, upper, size, expected):
with Model() as model:
DiscreteUniform("x", lower=lower, upper=upper, size=size)
@pytest.mark.parametrize(
"a, size, expected",
[
(
np.array([2, 3, 5, 7, 11]),
None,
np.array([2, 3, 5, 7, 11]) / 28,
),
(
np.array([[1, 2, 3], [5, 6, 7]]),
None,
np.array([[1, 2, 3], [5, 6, 7]]) / np.array([6, 18])[..., np.newaxis],
),
(
np.array([[1, 2, 3], [5, 6, 7]]),
7,
np.apply_along_axis(
lambda x: np.divide(x, np.array([6, 18])),
1,
np.broadcast_to([[1, 2, 3], [5, 6, 7]], shape=[7, 2, 3]),
),
),
(
np.full(shape=np.array([7, 3]), fill_value=np.array([13, 17, 19])),
(
11,
5,
),
np.broadcast_to([13, 17, 19], shape=[11, 5, 7, 3]) / 49,
),
],
)
def test_dirichlet_moment(a, size, expected):
with Model() as model:
Dirichlet("x", a=a, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, beta, size, expected",
[
(0, 2, None, 2 * np.euler_gamma),
(1, np.arange(1, 4), None, 1 + np.arange(1, 4) * np.euler_gamma),
(np.arange(5), 2, None, np.arange(5) + 2 * np.euler_gamma),
(1, 2, 5, np.full(5, 1 + 2 * np.euler_gamma)),
(
np.arange(5),
np.arange(1, 6),
(2, 5),
np.full((2, 5), np.arange(5) + np.arange(1, 6) * np.euler_gamma),
),
],
)
def test_gumbel_moment(mu, beta, size, expected):
with Model() as model:
Gumbel("x", mu=mu, beta=beta, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"c, lower, upper, size, expected",
[
(1, 0, 5, None, 2),
(3, np.arange(-3, 6, 3), np.arange(3, 12, 3), None, np.array([1, 3, 5])),
(np.arange(-3, 6, 3), -3, 3, None, np.array([-1, 0, 1])),
(3, -3, 6, 5, np.full(5, 2)),
(
np.arange(-3, 6, 3),
np.arange(-9, -2, 3),
np.arange(3, 10, 3),
(2, 3),
np.full((2, 3), np.array([-3, 0, 3])),
),
],
)
def test_triangular_moment(c, lower, upper, size, expected):
with Model() as model:
Triangular("x", c=c, lower=lower, upper=upper, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, sigma, size, expected",
[
(1, 2, None, special.expit(1)),
(0, np.arange(1, 5), None, special.expit(np.zeros(4))),
(np.arange(4), 1, None, special.expit(np.arange(4))),
(1, 5, 4, special.expit(np.ones(4))),
(np.arange(4), np.arange(1, 5), (2, 4), np.full((2, 4), special.expit(np.arange(4)))),
],
)
def test_logitnormal_moment(mu, sigma, size, expected):
with Model() as model:
LogitNormal("x", mu=mu, sigma=sigma, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"p, size, expected",
[
(np.array([0.1, 0.3, 0.6]), None, 2),
(np.array([0.6, 0.1, 0.3]), 5, np.full(5, 0)),
(np.full((2, 3), np.array([0.6, 0.1, 0.3])), None, [0, 0]),
(
np.full((2, 3), np.array([0.1, 0.3, 0.6])),
(3, 2),
np.full((3, 2), [2, 2]),
),
],
)
def test_categorical_moment(p, size, expected):
with Model() as model:
Categorical("x", p=p, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"x_points, pdf_points, size, expected",
[
(np.array([-1, 1]), np.array([0.4, 0.6]), None, 0.2),
(
np.array([-4, -1, 3, 9, 19]),
np.array([0.1, 0.15, 0.2, 0.25, 0.3]),
None,
1.5458937198067635,
),
(
np.array([-22, -4, 0, 8, 13]),
np.tile(1 / 5, 5),
(5, 3),
np.full((5, 3), -0.14285714285714296),
),
(
np.arange(-100, 10),
np.arange(1, 111) / 6105,
(2, 5, 3),
np.full((2, 5, 3), -27.584097859327223),
),
],
)
def test_interpolated_moment(x_points, pdf_points, size, expected):
with Model() as model:
Interpolated("x", x_points=x_points, pdf_points=pdf_points, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, cov, size, expected",
[
(np.ones(1), np.identity(1), None, np.ones(1)),
(np.ones(3), np.identity(3), None, np.ones(3)),
(np.ones((2, 2)), np.identity(2), None, np.ones((2, 2))),
(np.array([1, 0, 3.0]), np.identity(3), None, np.array([1, 0, 3.0])),
(np.array([1, 0, 3.0]), np.identity(3), (4, 2), np.full((4, 2, 3), [1, 0, 3.0])),
(
np.array([1, 3.0]),
np.identity(2),
5,
np.full((5, 2), [1, 3.0]),
),
(
np.array([1, 3.0]),
np.array([[1.0, 0.5], [0.5, 2]]),
(4, 5),
np.full((4, 5, 2), [1, 3.0]),
),
(
np.array([[3.0, 5], [1, 4]]),
np.identity(2),
(4, 5),
np.full((4, 5, 2, 2), [[3.0, 5], [1, 4]]),
),
],
)
def test_mv_normal_moment(mu, cov, size, expected):
with Model() as model:
x = MvNormal("x", mu=mu, cov=cov, size=size)
# MvNormal logp is only impemented for up to 2D variables
assert_moment_is_expected(model, expected, check_finite_logp=x.ndim < 3)
@pytest.mark.parametrize(
"mu, sigma, size, expected",
[
(4.0, 3.0, None, 7.8110885363844345),
(4.0, np.full(5, 3), None, np.full(5, 7.8110885363844345)),
(np.arange(5), 1, None, np.arange(5) + 1.2703628454614782),
(np.arange(5), np.ones(5), (2, 5), np.full((2, 5), np.arange(5) + 1.2703628454614782)),
],
)
def test_moyal_moment(mu, sigma, size, expected):
with Model() as model:
Moyal("x", mu=mu, sigma=sigma, size=size)
assert_moment_is_expected(model, expected)
rand1d = np.random.rand(2)
rand2d = np.random.rand(2, 3)
@pytest.mark.parametrize(
"nu, mu, cov, size, expected",
[
(2, np.ones(1), np.eye(1), None, np.ones(1)),
(2, rand1d, np.eye(2), None, rand1d),
(2, rand1d, np.eye(2), 2, np.full((2, 2), rand1d)),
(2, rand1d, np.eye(2), (2, 5), np.full((2, 5, 2), rand1d)),
(2, rand2d, np.eye(3), None, rand2d),
(2, rand2d, np.eye(3), 2, np.full((2, 2, 3), rand2d)),
(2, rand2d, np.eye(3), (2, 5), np.full((2, 5, 2, 3), rand2d)),
],
)
def test_mvstudentt_moment(nu, mu, cov, size, expected):
with Model() as model:
x = MvStudentT("x", nu=nu, mu=mu, cov=cov, size=size)
# MvStudentT logp is only impemented for up to 2D variables
assert_moment_is_expected(model, expected, check_finite_logp=x.ndim < 3)
def check_matrixnormal_moment(mu, rowchol, colchol, size, expected):
with Model() as model:
MatrixNormal("x", mu=mu, rowchol=rowchol, colchol=colchol, size=size)
@pytest.mark.parametrize(
"alpha, mu, sigma, size, expected",
[
(1.0, 1.0, 1.0, None, 1.56418958),
(1.0, np.ones(5), 1.0, None, np.full(5, 1.56418958)),
(np.ones(5), 1, np.ones(5), None, np.full(5, 1.56418958)),
(
np.arange(5),
np.arange(1, 6),
np.arange(1, 6),
None,
(1.0, 3.12837917, 5.14094894, 7.02775903, 8.87030861),
),
(
np.arange(5),
np.arange(1, 6),
np.arange(1, 6),
(2, 5),
np.full((2, 5), (1.0, 3.12837917, 5.14094894, 7.02775903, 8.87030861)),
),
],
)
def test_skewnormal_moment(alpha, mu, sigma, size, expected):
with Model() as model:
SkewNormal("x", alpha=alpha, mu=mu, sigma=sigma, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"b, kappa, mu, size, expected",
[
(1.0, 1.0, 1.0, None, 1.0),
(1.0, np.ones(5), 1.0, None, np.full(5, 1.0)),
(np.arange(1, 6), 1.0, np.ones(5), None, np.full(5, 1.0)),
(
np.arange(1, 6),
np.arange(1, 6),
np.arange(1, 6),
None,
(1.0, 1.25, 2.111111111111111, 3.0625, 4.04),
),
(
np.arange(1, 6),
np.arange(1, 6),
np.arange(1, 6),
(2, 5),
np.full((2, 5), (1.0, 1.25, 2.111111111111111, 3.0625, 4.04)),
),
],
)
def test_asymmetriclaplace_moment(b, kappa, mu, size, expected):
with Model() as model:
AsymmetricLaplace("x", b=b, kappa=kappa, mu=mu, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"mu, rowchol, colchol, size, expected",
[
(np.ones((1, 1)), np.eye(1), np.eye(1), None, np.ones((1, 1))),
(np.ones((1, 1)), np.eye(2), np.eye(3), None, np.ones((2, 3))),
(rand2d, np.eye(2), np.eye(3), None, rand2d),
(rand2d, np.eye(2), np.eye(3), 2, np.full((2, 2, 3), rand2d)),
(rand2d, np.eye(2), np.eye(3), (2, 5), np.full((2, 5, 2, 3), rand2d)),
],
)
def test_matrixnormal_moment(mu, rowchol, colchol, size, expected):
if size is None:
check_matrixnormal_moment(mu, rowchol, colchol, size, expected)
else:
with pytest.raises(NotImplementedError):
check_matrixnormal_moment(mu, rowchol, colchol, size, expected)
@pytest.mark.parametrize(
"nu, sigma, size, expected",
[
(1.0, 1.0, None, 1.5485724605511453),
(1.0, np.ones(5), None, np.full(5, 1.5485724605511453)),
(
np.arange(1, 6),
1.0,
None,
(
1.5485724605511453,
2.2723834280687427,
3.1725772879007166,
4.127193542536757,
5.101069639492123,
),
),
(
np.arange(1, 6),
np.ones(5),
(2, 5),
np.full(
(2, 5),
(
1.5485724605511453,
2.2723834280687427,
3.1725772879007166,
4.127193542536757,
5.101069639492123,
),
),
),
],
)
def test_rice_moment(nu, sigma, size, expected):
with Model() as model:
Rice("x", nu=nu, sigma=sigma, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"get_moment, size, expected",
[
(None, None, 0.0),
(None, 5, np.zeros(5)),
("custom_moment", None, 5),
("custom_moment", (2, 5), np.full((2, 5), 5)),
],
)
def test_density_dist_default_moment_univariate(get_moment, size, expected):
if get_moment == "custom_moment":
get_moment = lambda rv, size, *rv_inputs: 5 * at.ones(size, dtype=rv.dtype)
with Model() as model:
DensityDist("x", get_moment=get_moment, size=size)
assert_moment_is_expected(model, expected, check_finite_logp=False)
@pytest.mark.parametrize("size", [(), (2,), (3, 2)], ids=str)
def test_density_dist_custom_moment_univariate(size):
def moment(rv, size, mu):
return (at.ones(size) * mu).astype(rv.dtype)
mu_val = np.array(np.random.normal(loc=2, scale=1)).astype(aesara.config.floatX)
with pm.Model():
mu = pm.Normal("mu")
a = pm.DensityDist("a", mu, get_moment=moment, size=size)
evaled_moment = get_moment(a).eval({mu: mu_val})
assert evaled_moment.shape == to_tuple(size)
assert np.all(evaled_moment == mu_val)
@pytest.mark.parametrize("size", [(), (2,), (3, 2)], ids=str)
def test_density_dist_custom_moment_multivariate(size):
def moment(rv, size, mu):
return (at.ones(size)[..., None] * mu).astype(rv.dtype)
mu_val = np.random.normal(loc=2, scale=1, size=5).astype(aesara.config.floatX)
with pm.Model():
mu = pm.Normal("mu", size=5)
a = pm.DensityDist("a", mu, get_moment=moment, ndims_params=[1], ndim_supp=1, size=size)
evaled_moment = get_moment(a).eval({mu: mu_val})
assert evaled_moment.shape == to_tuple(size) + (5,)
assert np.all(evaled_moment == mu_val)
@pytest.mark.parametrize(
"with_random, size",
[
(True, ()),
(True, (2,)),
(True, (3, 2)),
(False, ()),
(False, (2,)),
],
)
def test_density_dist_default_moment_multivariate(with_random, size):
def _random(mu, rng=None, size=None):
return rng.normal(mu, scale=1, size=to_tuple(size) + mu.shape)
if with_random:
random = _random
else:
random = None
mu_val = np.random.normal(loc=2, scale=1, size=5).astype(aesara.config.floatX)
with pm.Model():
mu = pm.Normal("mu", size=5)
a = pm.DensityDist("a", mu, random=random, ndims_params=[1], ndim_supp=1, size=size)
if with_random:
evaled_moment = get_moment(a).eval({mu: mu_val})
assert evaled_moment.shape == to_tuple(size) + (5,)
assert np.all(evaled_moment == 0)
else:
with pytest.raises(
TypeError,
match="Cannot safely infer the size of a multivariate random variable's moment.",
):
evaled_moment = get_moment(a).eval({mu: mu_val})
@pytest.mark.parametrize(
"h, z, size, expected",
[
(1.0, 0.0, None, 0.25),
(
1.0,
np.arange(5),
None,
(
0.25,
0.23105857863000487,
0.1903985389889412,
0.1508580422741444,
0.12050344750947711,
),
),
(
np.arange(1, 6),
np.arange(5),
None,
(
0.25,
0.46211715726000974,
0.5711956169668236,
0.6034321690965776,
0.6025172375473855,
),
),
(
np.arange(1, 6),
np.arange(5),
(2, 5),
np.full(
(2, 5),
(
0.25,
0.46211715726000974,
0.5711956169668236,
0.6034321690965776,
0.6025172375473855,
),
),
),
],
)
def test_polyagamma_moment(h, z, size, expected):
with Model() as model:
PolyaGamma("x", h=h, z=z, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"p, n, size, expected",
[
(np.array([0.25, 0.25, 0.25, 0.25]), 1, None, np.array([1, 0, 0, 0])),
(np.array([0.3, 0.6, 0.05, 0.05]), 2, None, np.array([1, 1, 0, 0])),
(np.array([0.3, 0.6, 0.05, 0.05]), 10, None, np.array([4, 6, 0, 0])),
(
np.array([[0.3, 0.6, 0.05, 0.05], [0.25, 0.25, 0.25, 0.25]]),
10,
None,
np.array([[4, 6, 0, 0], [4, 2, 2, 2]]),
),
(
np.array([[0.25, 0.25, 0.25, 0.25], [0.26, 0.26, 0.26, 0.22]]),
np.array([1, 10]),
None,
np.array([[1, 0, 0, 0], [2, 3, 3, 2]]),
),
(
np.array([0.26, 0.26, 0.26, 0.22]),
np.array([1, 10]),
None,
np.array([[1, 0, 0, 0], [2, 3, 3, 2]]),
),
(
np.array([[0.25, 0.25, 0.25, 0.25], [0.26, 0.26, 0.26, 0.22]]),
np.array([1, 10]),
2,
np.full((2, 2, 4), [[1, 0, 0, 0], [2, 3, 3, 2]]),
),
],
)
def test_multinomial_moment(p, n, size, expected):
with Model() as model:
Multinomial("x", n=n, p=p, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize(
"psi, mu, alpha, size, expected",
[
(0.2, 10, 3, None, 2),
(0.2, 10, 4, 5, np.full(5, 2)),
(0.4, np.arange(1, 5), np.arange(2, 6), None, np.array([0, 0, 1, 1])),
(
np.linspace(0.2, 0.6, 3),
np.arange(1, 10, 4),
np.arange(1, 4),
(2, 3),
np.full((2, 3), np.array([0, 2, 5])),
),
],
)
def test_zero_inflated_negative_binomial_moment(psi, mu, alpha, size, expected):
with Model() as model:
ZeroInflatedNegativeBinomial("x", psi=psi, mu=mu, alpha=alpha, size=size)
assert_moment_is_expected(model, expected)
@pytest.mark.parametrize("mu", [0, np.arange(3)], ids=str)
@pytest.mark.parametrize("sigma", [1, np.array([1, 2, 5])], ids=str)
@pytest.mark.parametrize("size", [None, 3, (5, 3)], ids=str)
def test_simulator_moment(mu, sigma, size):
def normal_sim(rng, mu, sigma, size):
return rng.normal(mu, sigma, size=size)
with Model() as model:
x = Simulator("x", normal_sim, mu, sigma, size=size)
fn = make_initial_point_fn(
model=model,
return_transformed=False,
default_strategy="moment",
)
random_draw = model["x"].eval()
result = fn(0)["x"]
assert result.shape == random_draw.shape
# We perform a z-test between the moment and expected mean from a sample of 10 draws
# This test fails if the number of samples averaged in get_moment(Simulator)
# is much smaller than 10, but would not catch the case where the number of samples
# is higher than the expected 10
n = 10 # samples
expected_sample_mean = mu
expected_sample_mean_std = np.sqrt(sigma ** 2 / n)
# Multiple test adjustment for z-test to maintain alpha=0.01
alpha = 0.01
alpha /= 2 * 2 * 3 # Correct for number of test permutations
alpha /= random_draw.size # Correct for distribution size
cutoff = st.norm().ppf(1 - (alpha / 2))
assert np.all(np.abs((result - expected_sample_mean) / expected_sample_mean_std) < cutoff)
| 29.486732 | 96 | 0.536729 |
d09c6a2310b00e4391111e7b58661f4ab0beab08 | 179 | py | Python | studyPython2/advanced_usage/para_attr/parrot.py | fairylyk/studyPy | b227b92ac5707fba665942adbaba6943940819fd | [
"Apache-2.0"
] | null | null | null | studyPython2/advanced_usage/para_attr/parrot.py | fairylyk/studyPy | b227b92ac5707fba665942adbaba6943940819fd | [
"Apache-2.0"
] | 1 | 2021-03-25T22:44:19.000Z | 2021-03-25T22:44:19.000Z | studyPython2/advanced_usage/para_attr/parrot.py | fairylyk/studyPy | b227b92ac5707fba665942adbaba6943940819fd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# vim: set fileencoding=utf-8 :
from bird import Bird
from flyable import Flyable
class Parrot(Bird, Flyable):
def wing(self):
print "I have wing" | 17.9 | 31 | 0.687151 |
a4450707aa7cbf09c272826de40930d3c18cf350 | 10,701 | py | Python | scripts/updateversion.py | heyjohnlim/ADOdb | ee5c4364da4e5e557b48db84d94e526fab4e072a | [
"BSD-3-Clause"
] | null | null | null | scripts/updateversion.py | heyjohnlim/ADOdb | ee5c4364da4e5e557b48db84d94e526fab4e072a | [
"BSD-3-Clause"
] | null | null | null | scripts/updateversion.py | heyjohnlim/ADOdb | ee5c4364da4e5e557b48db84d94e526fab4e072a | [
"BSD-3-Clause"
] | 1 | 2019-05-27T11:24:29.000Z | 2019-05-27T11:24:29.000Z | #!/usr/bin/python -u
'''
ADOdb version update script
Updates the version number, and release date in all php and html files
'''
from datetime import date
import getopt
import os
from os import path
import re
import subprocess
import sys
# ADOdb version validation regex
# These are used by sed - they are not PCRE !
_version_dev = "dev"
_version_regex = "[Vv]?([0-9]\.[0-9]+)(\.([0-9]+))?(-?%s)?" % _version_dev
_release_date_regex = "[0-9?]+-.*-[0-9]+"
_changelog_file = "docs/changelog.md"
_tag_prefix = "v"
# Command-line options
options = "hct"
long_options = ["help", "commit", "tag"]
def usage():
print '''Usage: %s version
Parameters:
version ADOdb version, format: [v]X.YY[a-z|dev]
Options:
-c | --commit Automatically commit the changes
-t | --tag Create a tag for the new release
-h | --help Show this usage message
''' % (
path.basename(__file__)
)
#end usage()
def version_is_dev(version):
''' Returns true if version is a development release
'''
return version.endswith(_version_dev)
def version_is_patch(version):
''' Returns true if version is a patch release (i.e. X.Y.Z with Z > 0)
'''
return not version.endswith('.0')
def version_parse(version):
''' Breakdown the version into groups (Z and -dev are optional)
1:(X.Y), 2:(.Z), 3:(Z), 4:(-dev)
'''
return re.match(r'^%s$' % _version_regex, version)
def version_check(version):
''' Checks that the given version is valid, exits with error if not.
Returns the SemVer-normalized version without the "v" prefix
- add '.0' if missing patch bit
- add '-' before dev release suffix if needed
'''
vparse = version_parse(version)
if not vparse:
usage()
print "ERROR: invalid version ! \n"
sys.exit(1)
vnorm = vparse.group(1)
# Add .patch version component
if vparse.group(2):
vnorm += vparse.group(2)
else:
# None was specified, assume a .0 release
vnorm += '.0'
# Normalize version number
if version_is_dev(version):
vnorm += '-' + _version_dev
return vnorm
def get_release_date(version):
''' Returns the release date in DD-MMM-YYYY format
For development releases, DD-MMM will be ??-???
'''
# Development release
if version_is_dev(version):
date_format = "??-???-%Y"
else:
date_format = "%d-%b-%Y"
# Define release date
return date.today().strftime(date_format)
def sed_script(version):
''' Builds sed script to update version information in source files
'''
# Version number and release date
script = r"s/{}\s+(-?)\s+{}/v{} \5 {}/".format(
_version_regex,
_release_date_regex,
version,
get_release_date(version)
)
return script
def sed_filelist():
''' Build list of files to update
'''
dirlist = []
for root, dirs, files in os.walk(".", topdown=True):
# Filter files by extensions
files = [
f for f in files
if re.search(r'\.(php|html?)$', f, re.IGNORECASE)
]
for fname in files:
dirlist.append(path.join(root, fname))
return dirlist
def tag_name(version):
return _tag_prefix + version
def tag_check(version):
''' Checks if the tag for the specified version exists in the repository
by attempting to check it out
Throws exception if not
'''
subprocess.check_call(
"git checkout --quiet " + tag_name(version),
stderr=subprocess.PIPE,
shell=True)
print "Tag '%s' already exists" % tag_name(version)
def tag_delete(version):
''' Deletes the specified tag
'''
subprocess.check_call(
"git tag --delete " + tag_name(version),
stderr=subprocess.PIPE,
shell=True)
def tag_create(version):
''' Creates the tag for the specified version
Returns True if tag created
'''
print "Creating release tag '%s'" % tag_name(version)
result = subprocess.call(
"git tag --sign --message '%s' %s" % (
"ADOdb version %s released %s" % (
version,
get_release_date(version)
),
tag_name(version)
),
shell=True
)
return result == 0
def section_exists(filename, version, print_message=True):
''' Checks given file for existing section with specified version
'''
script = True
for i, line in enumerate(open(filename)):
if re.search(r'^## ' + version, line):
if print_message:
print " Existing section for v%s found," % version,
return True
return False
def version_get_previous(version):
''' Returns the previous version number
Don't decrease major versions (raises exception)
'''
vprev = version.split('.')
item = len(vprev) - 1
while item > 0:
val = int(vprev[item])
if val > 0:
vprev[item] = str(val - 1)
break
else:
item -= 1
if item == 0:
raise ValueError('Refusing to decrease major version number')
return '.'.join(vprev)
def update_changelog(version):
''' Updates the release date in the Change Log
'''
print "Updating Changelog"
vparse = version_parse(version)
# Version number without '-dev' suffix
version_release = vparse.group(1) + vparse.group(2)
version_previous = version_get_previous(version_release)
if not section_exists(_changelog_file, version_previous, False):
raise ValueError(
"ERROR: previous version %s does not exist in changelog" %
version_previous
)
# Check if version already exists in changelog
version_exists = section_exists(_changelog_file, version_release)
if (not version_exists
and not version_is_patch(version)
and not version_is_dev(version)):
version += '-' + _version_dev
release_date = get_release_date(version)
# Development release
# Insert a new section for next release before the most recent one
if version_is_dev(version):
# Check changelog file for existing section
if version_exists:
print "nothing to do"
return
# No existing section found, insert new one
if version_is_patch(version_release):
print " Inserting new section for hotfix release v%s" % version
else:
print " Inserting new section for v%s" % version_release
# Adjust previous version number (remove patch component)
version_previous = version_parse(version_previous).group(1)
script = "1,/^## {0}/s/^## {0}.*$/## {1} - {2}\\n\\n\\0/".format(
version_previous,
version_release,
release_date
)
# Stable release (X.Y.0)
# Replace the 1st occurrence of markdown level 2 header matching version
# and release date patterns
elif not version_is_patch(version):
print " Updating release date for v%s" % version
script = r"s/^(## ){0}(\.0)? - {1}.*$/\1{2} - {3}/".format(
vparse.group(1),
_release_date_regex,
version,
release_date
)
# Hotfix release (X.Y.[0-9])
# Insert a new section for the hotfix release before the most recent
# section for version X.Y and display a warning message
else:
if version_exists:
print 'updating release date'
script = "s/^## {0}.*$/## {1} - {2}/".format(
version.replace('.', '\.'),
version,
release_date
)
else:
print " Inserting new section for hotfix release v%s" % version
script = "1,/^## {0}/s/^## {0}.*$/## {1} - {2}\\n\\n\\0/".format(
version_previous,
version,
release_date
)
print " WARNING: review '%s' to ensure added section is correct" % (
_changelog_file
)
subprocess.call(
"sed -r -i '%s' %s " % (
script,
_changelog_file
),
shell=True
)
#end update_changelog
def version_set(version, do_commit=True, do_tag=True):
''' Bump version number and set release date in source files
'''
print "Preparing version bump commit"
update_changelog(version)
print "Updating version and date in source files"
subprocess.call(
"sed -r -i '%s' %s " % (
sed_script(version),
" ".join(sed_filelist())
),
shell=True
)
print "Version set to %s" % version
if do_commit:
# Commit changes
print "Committing"
commit_ok = subprocess.call(
"git commit --all --message '%s'" % (
"Bump version to %s" % version
),
shell=True
)
if do_tag:
tag_ok = tag_create(version)
else:
tag_ok = False
if commit_ok == 0:
print '''
NOTE: you should carefully review the new commit, making sure updates
to the files are correct and no additional changes are required.
If everything is fine, then the commit can be pushed upstream;
otherwise:
- Make the required corrections
- Amend the commit ('git commit --all --amend' ) or create a new one'''
if tag_ok:
print ''' - Drop the tag ('git tag --delete %s')
- run this script again
''' % (
tag_name(version)
)
else:
print "Note: changes have been staged but not committed."
#end version_set()
def main():
# Get command-line options
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], options, long_options)
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
if len(args) < 1:
usage()
print "ERROR: please specify the version"
sys.exit(1)
do_commit = False
do_tag = False
for opt, val in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(0)
elif opt in ("-c", "--commit"):
do_commit = True
elif opt in ("-t", "--tag"):
do_tag = True
# Mandatory parameters
version = version_check(args[0])
# Let's do it
os.chdir(subprocess.check_output('git root', shell=True).rstrip())
version_set(version, do_commit, do_tag)
#end main()
if __name__ == "__main__":
main()
| 26.7525 | 77 | 0.577423 |
63b6f6375cb76cdfb4493c51f8278ecec72edd16 | 63 | py | Python | python/doit/05/sleep1.py | gangserver/py_test | 869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4 | [
"Apache-2.0"
] | null | null | null | python/doit/05/sleep1.py | gangserver/py_test | 869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4 | [
"Apache-2.0"
] | null | null | null | python/doit/05/sleep1.py | gangserver/py_test | 869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4 | [
"Apache-2.0"
] | null | null | null | import time
for i in range(10):
print(i)
time.sleep(1) | 12.6 | 19 | 0.619048 |
6fcddb22e525f98d8e4a4c08e039e92d93321664 | 3,528 | py | Python | petstagram/petstagram/accounts/views.py | BoyanPeychinov/python_web_basics | 2f892ac119f7fe3a5c03fc5e7b35670dc609a70f | [
"MIT"
] | 1 | 2021-07-20T12:16:34.000Z | 2021-07-20T12:16:34.000Z | petstagram/petstagram/accounts/views.py | BoyanPeychinov/python_web_basics | 2f892ac119f7fe3a5c03fc5e7b35670dc609a70f | [
"MIT"
] | null | null | null | petstagram/petstagram/accounts/views.py | BoyanPeychinov/python_web_basics | 2f892ac119f7fe3a5c03fc5e7b35670dc609a70f | [
"MIT"
] | null | null | null | from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.views import LoginView
from django.core.exceptions import ValidationError
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views.generic import CreateView, DetailView, FormView
from petstagram.accounts.forms import LoginForm, RegisterForm, ProfileForm
from petstagram.accounts.models import Profile
from petstagram.pets.models import Pet
class LoginUserView(LoginView):
template_name = 'accounts/login.html'
authentication_form = LoginForm
success_url = reverse_lazy('index')
# def login_user(request):
# if request.method == 'POST':
# form = LoginForm(request.POST)
# if form.is_valid():
# user = form.save()
# login(request, user)
# return redirect('index')
# else:
# form = LoginForm()
#
# context = {
# 'form': form,
# }
#
# return render(request, 'accounts/login.html', context)
class RegisterView(CreateView):
form_class = RegisterForm
template_name = 'accounts/register.html'
success_url = reverse_lazy('index')
def form_valid(self, form):
result = super().form_valid(form)
login(self.request, self.object)
return result
# def register_user(request):
# if request.method == 'POST':
# form = RegisterForm(request.POST)
# if form.is_valid():
# user = form.save()
# login(request, user)
# return redirect('index')
# else:
# form = RegisterForm()
#
# context = {
# 'form': form,
# }
#
# return render(request, 'accounts/register.html', context)
def logout_user(request):
logout(request)
return redirect('index')
class ProfileDetailsView(LoginRequiredMixin, FormView):
template_name = 'accounts/user_profile.html'
form_class = ProfileForm
success_url = reverse_lazy('profile details')
object = None
def get(self, request, *args, **kwargs):
self.object = Profile.objects.get(pk=request.user.id)
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = Profile.objects.get(pk=request.user.id)
return super().post(request, *args, **kwargs)
def form_valid(self, form):
self.object.profile_image = form.cleaned_data['profile_image']
self.object.save()
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['pets'] = Pet.objects.filter(user_id=self.request.user.id)
context['profile'] = self.object
return context
# @login_required
# def profile_details(request):
# profile = Profile.objects.get(pk=request.user.id)
# if request.method == 'POST':
# form = ProfileForm(
# request.POST,
# request.FILES,
# instance=profile,
# )
# if form.is_valid():
# form.save()
# return redirect('profile details')
# else:
# form = ProfileForm(instance=profile)
#
# user_pets = Pet.objects.filter(user_id=request.user.id)
#
# context = {
# 'form': form,
# 'pets': user_pets,
# 'profile': profile,
# }
#
# return render(request, 'accounts/user_profile.html', context)
| 28.918033 | 74 | 0.641723 |
4191dbd87dcc57324d738a02328559017a72554c | 1,066 | py | Python | email_notification/tests/test_senders.py | juhasuv/tilavarauspalvelu-core | ba1f3241f7ea5b3949c410c7de2a58c4be951966 | [
"MIT"
] | null | null | null | email_notification/tests/test_senders.py | juhasuv/tilavarauspalvelu-core | ba1f3241f7ea5b3949c410c7de2a58c4be951966 | [
"MIT"
] | null | null | null | email_notification/tests/test_senders.py | juhasuv/tilavarauspalvelu-core | ba1f3241f7ea5b3949c410c7de2a58c4be951966 | [
"MIT"
] | null | null | null | from assertpy import assert_that
from django.core import mail
from django.test import override_settings
from email_notification.models import EmailType
from email_notification.sender.senders import send_reservation_email_notification
from email_notification.tests.base import ReservationEmailBaseTestCase
@override_settings(EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend")
class SendReservationEmailNotificationTestCase(ReservationEmailBaseTestCase):
def test_send_email_success(self):
send_reservation_email_notification(
EmailType.RESERVATION_CONFIRMED, self.reservation
)
should_be_body = f"This is the { str(self.reservation.id).zfill(10) } content"
should_be_subject = f"Los subjectos { self.reservation.name }"
assert_that(len(mail.outbox)).is_equal_to(1)
assert_that(mail.outbox[0].subject).is_equal_to(should_be_subject)
assert_that(mail.outbox[0].body).is_equal_to(should_be_body)
assert_that(mail.outbox[0].to).is_equal_to([self.reservation.reservee_email])
| 46.347826 | 86 | 0.789869 |
501e53a42c5c6295ec17ee9c2af82dd40481bbc3 | 24,879 | py | Python | autoload/leaderf/python/leaderf/bufTagExpl.py | lu5je0/LeaderF | 6cf6862013892200e64945af3a01157a4eb76293 | [
"Apache-2.0"
] | 1,914 | 2015-01-16T07:39:58.000Z | 2022-03-31T15:19:52.000Z | autoload/leaderf/python/leaderf/bufTagExpl.py | lu5je0/LeaderF | 6cf6862013892200e64945af3a01157a4eb76293 | [
"Apache-2.0"
] | 833 | 2015-07-20T08:57:34.000Z | 2022-03-24T07:33:12.000Z | autoload/leaderf/python/leaderf/bufTagExpl.py | lu5je0/LeaderF | 6cf6862013892200e64945af3a01157a4eb76293 | [
"Apache-2.0"
] | 232 | 2015-08-25T08:18:13.000Z | 2022-03-08T11:18:41.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import vim
import re
import os
import sys
import os.path
import tempfile
import itertools
import multiprocessing
from .utils import *
from .explorer import *
from .manager import *
from .asyncExecutor import AsyncExecutor
#*****************************************************
# BufTagExplorer
#*****************************************************
class BufTagExplorer(Explorer):
def __init__(self):
self._ctags = lfEval("g:Lf_Ctags")
self._supports_preview = int(lfEval("g:Lf_PreviewCode"))
self._tag_list = {} # a dict with (key, value) = (buffer number, taglist)
self._buf_changedtick = {} # a dict with (key, value) = (buffer number, changedtick)
self._executor = []
def getContent(self, *args, **kwargs):
if "--all" in kwargs.get("arguments", {}): # all buffers
cur_buffer = vim.current.buffer
for b in vim.buffers:
if b.options["buflisted"]:
if lfEval("bufloaded(%d)" % b.number) == '0':
vim.current.buffer = b
if vim.current.buffer != cur_buffer:
vim.current.buffer = cur_buffer
for b in vim.buffers:
if b.options["buflisted"] and b.name:
changedtick = int(lfEval("getbufvar(%d, 'changedtick')" % b.number))
if changedtick != self._buf_changedtick.get(b.number, -1):
break
else:
return itertools.chain.from_iterable(self._tag_list.values())
return itertools.chain.from_iterable(self._getTagList())
else:
result = self._getTagResult(vim.current.buffer)
if not isinstance(result, list):
result = self._formatResult(*result)
tag_list = []
for i, line in enumerate(result):
if self._supports_preview and i & 1:
tag_list.append(line)
else:
first, second = line.rsplit(":", 1)
tag_list.append("{}\t :{}".format(first.rsplit("\t", 1)[0], second))
return tag_list
def _getTagList(self):
buffers = [b for b in vim.buffers]
n = multiprocessing.cpu_count()
for i in range(0, len(vim.buffers), n):
tag_list = []
exe_result = []
for b in buffers[i:i+n]:
if b.options["buflisted"] and b.name:
result = self._getTagResult(b)
if isinstance(result, list):
tag_list.extend(result)
else:
exe_result.append(result)
if not exe_result:
yield tag_list
else:
exe_taglist = (self._formatResult(*r) for r in exe_result)
yield itertools.chain(tag_list, itertools.chain.from_iterable(exe_taglist))
def _getTagResult(self, buffer):
if not buffer.name or lfEval("bufloaded(%d)" % buffer.number) == '0':
return []
changedtick = int(lfEval("getbufvar(%d, 'changedtick')" % buffer.number))
# there is no change since last call
if changedtick == self._buf_changedtick.get(buffer.number, -1):
if buffer.number in self._tag_list:
return self._tag_list[buffer.number]
else:
return []
else:
self._buf_changedtick[buffer.number] = changedtick
if lfEval("getbufvar(%d, '&filetype')" % buffer.number) == "cpp":
extra_options = "--language-force=C++ --c++-kinds=+p"
elif lfEval("getbufvar(%d, '&filetype')" % buffer.number) == "c":
extra_options = "--c-kinds=+p"
elif lfEval("getbufvar(%d, '&filetype')" % buffer.number) == "python":
extra_options = "--language-force=Python"
else:
extra_options = ""
executor = AsyncExecutor()
self._executor.append(executor)
if buffer.options["modified"] == True:
if sys.version_info >= (3, 0):
tmp_file = partial(tempfile.NamedTemporaryFile, encoding=lfEval("&encoding"))
else:
tmp_file = tempfile.NamedTemporaryFile
with tmp_file(mode='w+', suffix='_'+os.path.basename(buffer.name), delete=False) as f:
for line in buffer[:]:
f.write(line + '\n')
file_name = f.name
# {tagname}<Tab>{tagfile}<Tab>{tagaddress}[;"<Tab>{tagfield}..]
# {tagname}<Tab>{tagfile}<Tab>{tagaddress};"<Tab>{kind}<Tab>{scope}
cmd = '{} -n -u --fields=Ks {} -f- "{}"'.format(self._ctags, extra_options, lfDecode(file_name))
result = executor.execute(cmd, cleanup=partial(os.remove, file_name))
else:
cmd = '{} -n -u --fields=Ks {} -f- "{}"'.format(self._ctags, extra_options, lfDecode(buffer.name))
result = executor.execute(cmd)
return (buffer, result)
def _formatResult(self, buffer, result):
if not buffer.name or lfEval("bufloaded(%d)" % buffer.number) == '0':
return []
# a list of [tag, file, line, kind, scope]
output = [line.split('\t') for line in result]
if not output:
return []
if len(output[0]) < 4:
lfCmd("echoerr '%s'" % escQuote(str(output[0])))
return []
tag_total_len = 0
max_kind_len = 0
max_tag_len = 0
for _, item in enumerate(output):
tag_len = len(item[0])
tag_total_len += tag_len
if tag_len > max_tag_len:
max_tag_len = tag_len
kind_len = len(item[3])
if kind_len > max_kind_len:
max_kind_len = kind_len
ave_taglen = tag_total_len // len(output)
tag_len = min(max_tag_len, ave_taglen * 2)
tab_len = buffer.options["shiftwidth"]
if tab_len == 0:
tab_len = 4
std_tag_kind_len = tag_len // tab_len * tab_len + tab_len + max_kind_len
tag_list = []
for _, item in enumerate(output):
scope = item[4] if len(item) > 4 else "Global"
tag_kind = "{:{taglen}s}\t{}".format(item[0], # tag
item[3], # kind
taglen=tag_len
)
tag_kind_len = int(lfEval("strdisplaywidth('%s')" % escQuote(tag_kind)))
num = std_tag_kind_len - tag_kind_len
space_num = num if num > 0 else 0
bufname = buffer.name if vim.options["autochdir"] else lfRelpath(buffer.name)
line = "{}{}\t{}\t{:2s}{}:{}\t{}".format(tag_kind,
' ' * space_num,
scope, # scope
' ',
bufname, # file
item[2][:-2], # line
buffer.number
)
tag_list.append(line)
if self._supports_preview:
# code = "{:{taglen}s}\t{}".format(' ' * len(item[0]),
# buffer[int(item[2][:-2]) - 1].lstrip(),
# taglen=tag_len
# )
code = "\t\t{}".format(buffer[int(item[2][:-2]) - 1].lstrip())
tag_list.append(code)
self._tag_list[buffer.number] = tag_list
return tag_list
def getStlCategory(self):
return 'BufTag'
def getStlCurDir(self):
return escQuote(lfEncode(lfGetCwd()))
def removeCache(self, buf_number):
if buf_number in self._tag_list:
del self._tag_list[buf_number]
if buf_number in self._buf_changedtick:
del self._buf_changedtick[buf_number]
def cleanup(self):
for exe in self._executor:
exe.killProcess()
self._executor = []
#*****************************************************
# BufTagExplManager
#*****************************************************
class BufTagExplManager(Manager):
def __init__(self):
super(BufTagExplManager, self).__init__()
self._supports_preview = int(lfEval("g:Lf_PreviewCode"))
self._orig_line = ''
def _getExplClass(self):
return BufTagExplorer
def _defineMaps(self):
lfCmd("call leaderf#BufTag#Maps()")
def _acceptSelection(self, *args, **kwargs):
if len(args) == 0:
return
line = args[0]
if line[0].isspace(): # if g:Lf_PreviewCode == 1
buffer = args[1]
line_nr = args[2]
if self._getInstance().isReverseOrder():
line = buffer[line_nr]
else:
line = buffer[line_nr - 2]
# {tag} {kind} {scope} {file}:{line} {buf_number}
items = re.split(" *\t *", line)
tagname = items[0]
line_nr = items[3].rsplit(":", 1)[1]
buf_number = items[4]
if kwargs.get("mode", '') == 't':
buf_name = lfEval("bufname(%s)" % buf_number)
lfDrop('tab', buf_name, line_nr)
else:
lfCmd("hide buffer +%s %s" % (line_nr, buf_number))
if "preview" not in kwargs:
lfCmd("norm! ^")
lfCmd("call search('\V%s', 'Wc', line('.'))" % escQuote(tagname))
lfCmd("norm! zv")
lfCmd("norm! zz")
if "preview" not in kwargs:
lfCmd("setlocal cursorline! | redraw | sleep 150m | setlocal cursorline!")
if vim.current.window not in self._cursorline_dict:
self._cursorline_dict[vim.current.window] = vim.current.window.options["cursorline"]
lfCmd("setlocal cursorline")
def _getDigest(self, line, mode):
"""
specify what part in the line to be processed and highlighted
Args:
mode: 0, return the whole line
1, return the tagname
2, return the remaining part
"""
if mode == 0:
return line
elif mode == 1:
return re.split(" *\t *", line, 1)[0]
else:
return re.split(" *\t *", line, 1)[1]
def _getDigestStartPos(self, line, mode):
"""
return the start position of the digest returned by _getDigest()
Args:
mode: 0, return the start position of the whole line
1, return the start position of tagname
2, return the start position remaining part
"""
if mode == 0:
return 0
elif mode == 1:
return 0
else:
return len(line) - len(re.split(" *\t *", line, 1)[1])
def _createHelp(self):
help = []
help.append('" <CR>/<double-click>/o : open file under cursor')
help.append('" x : open file under cursor in a horizontally split window')
help.append('" v : open file under cursor in a vertically split window')
help.append('" t : open file under cursor in a new tabpage')
help.append('" i/<Tab> : switch to input mode')
help.append('" p : preview the result')
help.append('" q : quit')
help.append('" <F1> : toggle this help')
help.append('" ---------------------------------------------------------')
return help
def _afterEnter(self):
super(BufTagExplManager, self)._afterEnter()
lfCmd("augroup Lf_BufTag")
lfCmd("autocmd!")
lfCmd("autocmd BufWipeout * call leaderf#BufTag#removeCache(expand('<abuf>'))")
lfCmd("autocmd VimLeavePre * call leaderf#BufTag#cleanup()")
lfCmd("augroup END")
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_buftagKind'', ''^[^\t]*\t\zs\S\+'')')"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_buftagScopeType'', ''[^\t]*\t\S\+\s*\zs\w\+:'')')"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_buftagScope'', ''^[^\t]*\t\S\+\s*\(\w\+:\)\=\zs\S\+'')')"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_buftagDirname'', ''[^\t]*\t\S\+\s*\S\+\s*\zs[^\t]\+'')')"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_buftagLineNum'', ''\d\+\t\ze\d\+$'')')"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_buftagCode'', ''^\s\+.*'')')"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
else:
id = int(lfEval('''matchadd('Lf_hl_buftagKind', '^[^\t]*\t\zs\S\+')'''))
self._match_ids.append(id)
id = int(lfEval('''matchadd('Lf_hl_buftagScopeType', '[^\t]*\t\S\+\s*\zs\w\+:')'''))
self._match_ids.append(id)
id = int(lfEval('''matchadd('Lf_hl_buftagScope', '^[^\t]*\t\S\+\s*\(\w\+:\)\=\zs\S\+')'''))
self._match_ids.append(id)
id = int(lfEval('''matchadd('Lf_hl_buftagDirname', '[^\t]*\t\S\+\s*\S\+\s*\zs[^\t]\+')'''))
self._match_ids.append(id)
id = int(lfEval('''matchadd('Lf_hl_buftagLineNum', '\d\+\t\ze\d\+$')'''))
self._match_ids.append(id)
id = int(lfEval('''matchadd('Lf_hl_buftagCode', '^\s\+.*')'''))
self._match_ids.append(id)
def _beforeExit(self):
super(BufTagExplManager, self)._beforeExit()
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
for k, v in self._cursorline_dict.items():
if k.valid:
k.options["cursorline"] = v
self._cursorline_dict.clear()
def _getUnit(self):
"""
indicates how many lines are considered as a unit
"""
if self._supports_preview:
return 2
else:
return 1
def _supportsRefine(self):
return True
def _fuzzyFilter(self, is_full_path, get_weight, iterable):
"""
return a list, each item is a pair (weight, (line1, line2))
"""
if self._supports_preview:
if len(iterable) < 2:
return []
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
pairs = ((get_weight(getDigest(line)), (line, iterable[2*i+1]))
for i, line in enumerate(iterable[::2]))
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
return (t for t in pairs if t[0] > MIN_WEIGHT)
else:
return super(BufTagExplManager, self)._fuzzyFilter(is_full_path,
get_weight,
iterable)
def _refineFilter(self, first_get_weight, get_weight, iterable):
if self._supports_preview:
if len(iterable) < 2:
return []
getDigest = self._getDigest
tuples = ((first_get_weight(getDigest(line, 1)), get_weight(getDigest(line, 2)),
line, iterable[2*i+1]) for i, line in enumerate(iterable[::2]))
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
return ((i[0] + i[1], (i[2], i[3])) for i in tuples if i[0] > MIN_WEIGHT and i[1] > MIN_WEIGHT)
else:
return super(BufTagExplManager, self)._refineFilter(first_get_weight,
get_weight,
iterable)
def _regexFilter(self, iterable):
if self._supports_preview:
try:
if ('-2' == lfEval("g:LfNoErrMsgMatch('', '%s')" % escQuote(self._cli.pattern))):
return iter([])
else:
result = []
for i, line in enumerate(iterable[::2]):
if ('-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" %
(escQuote(self._getDigest(line, 1).strip()),
escQuote(self._cli.pattern)))):
result.append(line)
result.append(iterable[2*i+1])
return result
except vim.error:
return iter([])
else:
return super(BufTagExplManager, self)._regexFilter(iterable)
def _getList(self, pairs):
"""
return a list constructed from `pairs`
Args:
pairs: a list of tuple(weight, (line1, line2))
"""
if self._supports_preview:
result = []
for _, p in enumerate(pairs):
result.extend(p[1])
return result
else:
return super(BufTagExplManager, self)._getList(pairs)
def _toUp(self):
if self._supports_preview:
if self._getInstance().isReverseOrder() and self._getInstance().getCurrentPos()[0] <= 3:
self._setResultContent()
if self._cli.pattern and len(self._highlight_pos) < len(self._getInstance().buffer) // 2 \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
if self._getInstance().isReverseOrder():
lfCmd("norm! 3kj")
self._getInstance().setLineNumber()
else:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! 2k')" % (self._getInstance().getPopupWinId()))
else:
lfCmd("norm! 2k")
else:
super(BufTagExplManager, self)._toUp()
lfCmd("setlocal cursorline!") # these two help to redraw the statusline,
lfCmd("setlocal cursorline!") # also fix a weird bug of vim
def _toDown(self):
if self._supports_preview:
if self._getInstance().isReverseOrder():
lfCmd("norm! 2j")
self._getInstance().setLineNumber()
else:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! 3jk')" % (self._getInstance().getPopupWinId()))
else:
lfCmd("norm! 3jk")
else:
super(BufTagExplManager, self)._toDown()
lfCmd("setlocal cursorline!") # these two help to redraw the statusline,
lfCmd("setlocal cursorline!") # also fix a weird bug of vim
def removeCache(self, buf_number):
self._getExplorer().removeCache(buf_number)
def _previewResult(self, preview):
if self._getInstance().getWinPos() == 'floatwin':
self._cli.buildPopupPrompt()
if lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1':
if self._orig_line != self._getInstance().currentLine:
self._closePreviewPopup()
else:
return
if not self._needPreview(preview):
return
line = self._getInstance().currentLine
line_nr = self._getInstance().window.cursor[0]
if lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1':
self._previewInPopup(line, self._getInstance().buffer, line_nr)
return
orig_pos = self._getInstance().getOriginalPos()
cur_pos = (vim.current.tabpage, vim.current.window, vim.current.buffer)
saved_eventignore = vim.options['eventignore']
vim.options['eventignore'] = 'BufLeave,WinEnter,BufEnter'
try:
vim.current.tabpage, vim.current.window, vim.current.buffer = orig_pos
self._acceptSelection(line, self._getInstance().buffer, line_nr, preview=True)
finally:
vim.current.tabpage, vim.current.window, vim.current.buffer = cur_pos
vim.options['eventignore'] = saved_eventignore
def _bangEnter(self):
super(BufTagExplManager, self)._bangEnter()
if "--all" in self._arguments and not self._is_content_list:
if lfEval("exists('*timer_start')") == '0':
lfCmd("echohl Error | redraw | echo ' E117: Unknown function: timer_start' | echohl NONE")
return
self._callback(bang=True)
if self._read_finished < 2:
self._timer_id = lfEval("timer_start(1, 'leaderf#BufTag#TimerCallback', {'repeat': -1})")
else:
self._relocateCursor()
def _bangReadFinished(self):
self._relocateCursor()
def _relocateCursor(self):
remember_last_status = "--recall" in self._arguments \
or lfEval("g:Lf_RememberLastSearch") == '1' and self._cli.pattern
if remember_last_status:
return
inst = self._getInstance()
if inst.empty():
return
orig_buf_nr = inst.getOriginalPos()[2].number
orig_line = inst.getOriginalCursor()[0]
tags = []
for index, line in enumerate(inst.buffer, 1):
if self._supports_preview:
if self._getInstance().isReverseOrder():
if index & 1 == 1:
continue
elif index & 1 == 0:
continue
items = re.split(" *\t *", line)
line_nr = int(items[3].rsplit(":", 1)[1])
buf_number = int(items[4])
if orig_buf_nr == buf_number:
tags.append((index, buf_number, line_nr))
if self._getInstance().isReverseOrder():
tags = tags[::-1]
last = len(tags) - 1
while last >= 0:
if tags[last][2] <= orig_line:
break
last -= 1
if last >= 0:
index = tags[last][0]
if self._getInstance().getWinPos() == 'popup':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')"
% (self._getInstance().getPopupWinId(), 'leaderf#PopupFilter'))
lfCmd("""call win_execute(%d, "exec 'norm! %dG'")""" % (self._getInstance().getPopupWinId(), int(index)))
if lfEval("exists('*leaderf#%s#NormalModeFilter')" % self._getExplorer().getStlCategory()) == '1':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')" % (self._getInstance().getPopupWinId(),
'leaderf#%s#NormalModeFilter' % self._getExplorer().getStlCategory()))
else:
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#NormalModeFilter', [%d]))"
% (self._getInstance().getPopupWinId(), id(self)))
else:
lfCmd(str(index))
lfCmd("norm! zz")
def _previewInPopup(self, *args, **kwargs):
if len(args) == 0:
return
line = args[0]
if line[0].isspace(): # if g:Lf_PreviewCode == 1
buffer = args[1]
line_nr = args[2]
if self._getInstance().isReverseOrder():
line = buffer[line_nr]
else:
line = buffer[line_nr - 2]
# {tag} {kind} {scope} {file}:{line} {buf_number}
items = re.split(" *\t *", line)
tagname = items[0]
line_nr = items[3].rsplit(":", 1)[1]
buf_number = int(items[4])
self._createPopupPreview(tagname, buf_number, line_nr)
#*****************************************************
# bufTagExplManager is a singleton
#*****************************************************
bufTagExplManager = BufTagExplManager()
__all__ = ['bufTagExplManager']
| 41.673367 | 133 | 0.514571 |
78e6eeffcc8cb5f473de5f785facfa4e28a37fb7 | 293 | py | Python | PythonExercicios/ex109/teste.py | lordvinick/Python | c03fd08d4c204104bf0196b0bd129427fd2067ae | [
"MIT"
] | null | null | null | PythonExercicios/ex109/teste.py | lordvinick/Python | c03fd08d4c204104bf0196b0bd129427fd2067ae | [
"MIT"
] | null | null | null | PythonExercicios/ex109/teste.py | lordvinick/Python | c03fd08d4c204104bf0196b0bd129427fd2067ae | [
"MIT"
] | null | null | null | from ex109 import moeda
p = float(input('Digite o preço: R$'))
print(f'A metade de {moeda.moeda(p)} é {moeda.metade(p)}')
print(f'O dobro de {moeda.moeda(p)} é {moeda.dobro(p, True)}')
print(f'Aumentando 10% temos {moeda.aumentar(p, True)}')
print(f'Reduzindo 13% temos {moeda.diminuir(p)}')
| 36.625 | 62 | 0.686007 |
adc49f2d95f0d3ae888c59f393cd8731fecfa808 | 3,597 | py | Python | src/greentranslator/livy.py | ResearchSoftwareInstitute/greendatatranslator | b20fb52288ed0560158e3a0dec375888ea90e400 | [
"BSD-3-Clause"
] | 2 | 2018-06-25T18:36:45.000Z | 2019-01-29T16:29:36.000Z | src/greentranslator/livy.py | ResearchSoftwareInstitute/greendatatranslator | b20fb52288ed0560158e3a0dec375888ea90e400 | [
"BSD-3-Clause"
] | 183 | 2017-02-07T18:50:59.000Z | 2020-04-01T15:10:27.000Z | src/greentranslator/livy.py | ResearchSoftwareInstitute/greendatatranslator | b20fb52288ed0560158e3a0dec375888ea90e400 | [
"BSD-3-Clause"
] | null | null | null | import json, pprint, requests, textwrap, time, sys, atexit
from string import Template
class LivyContext(object):
def __init__(self, host='http://localhost:8998', kind='pyspark'):
self.host = host
self.data = { 'kind' : kind }
self.headers = { 'Content-Type': 'application/json' }
r = requests.post(host + '/sessions', data=json.dumps(self.data), headers=self.headers)
self.session_url = host + r.headers['location']
self.statements_url = self.session_url + '/statements'
print ("Waiting for Spark connection")
while True:
r = requests.get (self.session_url, headers=self.headers)
if r.json()['state'] == 'idle':
break
else:
time.sleep (2)
print ("Connected to Spark session")
def execute (self, code):
data = { 'code': textwrap.dedent (code) }
r = requests.post(self.statements_url, data=json.dumps(data), headers=self.headers)
statement_url = self.host + r.headers['location']
r = None
while True:
r = requests.get(statement_url, headers=self.headers).json ()
if r['state'] == 'available':
break
result = None
output = r['output']
if output is None:
print ("Error: result is 'available' but output is None")
print (r)
else:
if not r['output']['status'] == 'ok':
print ("Encountered error: {}".format (r))
raise "Error: {}".format (r)
if 'data' in output:
output_data = output['data']
if 'text/plain' in output_data:
result = output_data['text/plain']
return result
def close (self):
requests.delete(self.session_url, headers=self.headers)
acsLoaderCode="""
class ACSLoader(object):
def __init__(self, path, table, sample_size=1.0):
self.path = path
self.rdd = self.load (sample_size=sample_size)
self.rdd.toDF().registerTempTable (table)
def load (self, sample_size=1.0):
return sqlContext.read. \
format('com.databricks.spark.csv'). \
options(comment='#'). \
options(delimiter=","). \
options(header='true'). \
load(self.path).rdd. \
sample (False, sample_size, 1234)
acs = ACSLoader (
path = "/projects/stars/translator/var/acs/dataworld/uscensusbureau-acs-2015-5-e-income/data/USA_All_States.csv",
table = "acs_income")
"""
class ACSIncome(LivyContext):
def __init__(self):
LivyContext.__init__(self)
r = self.execute (acsLoaderCode)
def get_col (self, col):
code = Template ("""
print(sqlContext.sql('select $column from acs_income').rdd.map(lambda r : int(r.$column)).collect ())
""").substitute (column=col)
return self.execute (code)
def main0 ():
code_path = sys.argv[1]
lc = LivyContext ()
with open(code_path, 'r') as stream:
code = stream.read ()
print (code)
print(lc.execute (code))
lc.close ()
acs_income = None
def cleanup ():
if acs_income:
acs_income.close ()
atexit.register (cleanup)
def main ():
acs_income = ACSIncome ()
for x in range (0, 100):
print (acs_income.get_col('B19037E_036'))
acs_income.close ()
#main ()
| 36.333333 | 117 | 0.547679 |
28c7828a6c24cd0c3000d66f6e3b0c7b5980263b | 2,723 | py | Python | etc/reservoir_operation/dam_params/src/get_annualmax_mean.py | DirkEilander/CaMa-Flood_v4 | a8e6a157a08c2a0144b8143bc2eb78d5d81eb9a7 | [
"Apache-2.0"
] | 22 | 2021-01-17T15:22:33.000Z | 2022-01-22T15:14:50.000Z | etc/reservoir_operation/dam_params/src/get_annualmax_mean.py | zhongwangwei/CaMa-Flood_v4 | da1d1745568648858f02984b1e5b7ad05bc9bd3c | [
"Apache-2.0"
] | 3 | 2021-01-19T08:30:50.000Z | 2021-07-16T08:19:01.000Z | etc/reservoir_operation/dam_params/src/get_annualmax_mean.py | zhongwangwei/CaMa-Flood_v4 | da1d1745568648858f02984b1e5b7ad05bc9bd3c | [
"Apache-2.0"
] | 25 | 2021-01-17T15:22:35.000Z | 2022-01-15T08:32:48.000Z | import calendar
from datetime import datetime
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
import pandas as pd
import matplotlib.dates as mdates
from matplotlib import colors
from scipy.signal import argrelmax
import sys
print(os.path.basename(__file__))
#### initial setting =====================================
syear=int(sys.argv[1])
eyear=int(sys.argv[2])
dt =int(sys.argv[3])
tag =sys.argv[4]
outdir = './inp/natsim/'
mapdir = './inp/map/'
dam_file = './'+tag+'/damloc_modified.csv'
## get map nx, ny ----------------------------------------
f = open(mapdir+'/params.txt', 'r')
data = f.readline()
nx = int(data.strip().split(' ')[0])
data = f.readline()
ny = int(data.strip().split(' ')[0])
f.close()
print('CaMa map dim (nx,ny):', nx,ny)
damcsv = pd.read_csv(dam_file)
ndams = len(damcsv)
print('number of dams:', ndams)
##--------------------------------------------------
maxdays = 1 #number of days to consider extreme values in a year
max_outf = './'+tag+'/tmp_p01_AnnualMax.bin'
mean_outf= './'+tag+'/tmp_p01_AnnualMean.bin'
### calculate annual maximum -------------------------------
years = eyear - syear + 1
max_finarray = np.zeros((years*maxdays, ndams))
mean_yeararray = np.zeros((years, ndams))
x_arr = damcsv['ix'].values - 1
y_arr = damcsv['iy'].values - 1
for i, year in enumerate(range(syear, eyear+1, 1)):
print(' ')
print('read natsim outflw: year=', year)
## read NAT outflw
outflw_file = outdir + '/outflw' + str(year) + '.bin'
outflw_all = np.fromfile(outflw_file, 'float32').reshape(-1,ny,nx)
print(outflw_file)
outflw_dam = outflw_all[:,y_arr,x_arr]
print('outflw_dam.shape:', outflw_dam.shape)
## annual mean
mean_yeararray[i,:] = np.mean(outflw_dam, axis=0)
print('mean:', mean_yeararray[i,:5])
## annual maximum
for j, row in damcsv.iterrows():
outflw = outflw_dam[:,j]
maxindex = argrelmax(outflw, order=8*7)
maxarray = outflw[maxindex]
maxarray_sorted = np.sort(maxarray)[::-1]
if len(maxarray_sorted) > 0:
max_finarray[i*maxdays:(i+1)*maxdays, j] = maxarray_sorted[0:maxdays]
else:
outflw_sorted = np.sort(outflw)[::-1]
max_finarray[i*maxdays:(i+1)*maxdays, j] = outflw_sorted[0:maxdays]
print('max:', max_finarray[i*maxdays,:5])
print('save flood and mean discharge at dam grids')
max_finarray.astype('float32').tofile(max_outf)
mean_finarray = np.mean(mean_yeararray, axis=0)
mean_finarray.astype('float32').tofile(mean_outf)
print('-- flood discharge', max_outf)
print('-- mean discharge', mean_outf)
print('#########################')
print(' ')
# %%
| 28.364583 | 81 | 0.618803 |
e47ea2d194f0472f02d705994cc2bfe843ae1930 | 15 | py | Python | example/multiple_sources/settings.py | RonnyPfannschmidt/dynaconf | 3223f6586aa6ae3ef7b5cd7d198fb950f5038526 | [
"MIT"
] | 2,293 | 2015-08-14T22:39:31.000Z | 2022-03-31T12:44:49.000Z | example/multiple_sources/settings.py | RonnyPfannschmidt/dynaconf | 3223f6586aa6ae3ef7b5cd7d198fb950f5038526 | [
"MIT"
] | 676 | 2015-08-20T19:29:56.000Z | 2022-03-31T13:45:51.000Z | example/multiple_sources/settings.py | RonnyPfannschmidt/dynaconf | 3223f6586aa6ae3ef7b5cd7d198fb950f5038526 | [
"MIT"
] | 255 | 2015-12-02T21:16:33.000Z | 2022-03-20T22:03:46.000Z | PYTHON_VAR = 1
| 7.5 | 14 | 0.733333 |
09b0d84080bc62f7fe29fcdaab0e19c519f84b65 | 806 | py | Python | setup.py | admariner/GA4-Measurement-Protocol-Python | c42cb0f62be6d7fea2f96e880559e513d7707672 | [
"BSD-3-Clause"
] | 19 | 2020-11-18T20:49:12.000Z | 2022-02-08T04:49:36.000Z | setup.py | admariner/GA4-Measurement-Protocol-Python | c42cb0f62be6d7fea2f96e880559e513d7707672 | [
"BSD-3-Clause"
] | 18 | 2020-11-20T21:04:20.000Z | 2022-01-20T03:28:52.000Z | setup.py | admariner/GA4-Measurement-Protocol-Python | c42cb0f62be6d7fea2f96e880559e513d7707672 | [
"BSD-3-Clause"
] | 6 | 2020-11-18T15:16:44.000Z | 2022-01-18T01:24:19.000Z | from setuptools import setup
import sys
try:
long_description=open('DESCRIPTION.rst', 'rt').read()
except Exception:
long_description="Google Analytics 4 Measurement Protocol in Python; an implementation of Google's Analytics 4 Measurement Protocol"
VERSION = '1.1.1'
setup(
name = "ga4mp",
description = "Google Analytics 4 Measurement Protocol Python Module",
long_description = long_description,
version = VERSION or 'NOTFOUND',
author = 'Nate Bukowski',
author_email = 'nate.bukowski@adswerve.com',
url = 'https://github.com/adswerve/GA4-Measurement-Protocol-Python',
download_url = "https://github.com/adswerve/GA4-Measurement-Protocol-Python" + VERSION,
license = 'BSD',
packages = ["ga4mp"],
install_requires = [],
zip_safe = True,
)
| 26 | 136 | 0.705955 |
49758cecb4fe1316090ecc7a38b40915b6bf6792 | 1,588 | py | Python | tests/ex_redundant_ik_grad_descent.py | DerekYJC/bmi_python | 7b9cf3f294a33688db24b0863c1035e9cc6999ea | [
"Apache-2.0"
] | null | null | null | tests/ex_redundant_ik_grad_descent.py | DerekYJC/bmi_python | 7b9cf3f294a33688db24b0863c1035e9cc6999ea | [
"Apache-2.0"
] | 12 | 2020-07-31T18:58:31.000Z | 2022-02-10T14:36:00.000Z | tests/ex_redundant_ik_grad_descent.py | DerekYJC/bmi_python | 7b9cf3f294a33688db24b0863c1035e9cc6999ea | [
"Apache-2.0"
] | 4 | 2020-03-06T15:39:00.000Z | 2021-05-26T17:03:21.000Z | #!/usr/bin/python
'''
Example of inverse kinematics using the simple gradient descent method
'''
from riglib.bmi import robot_arms
reload(robot_arms)
import numpy as np
import matplotlib.pyplot as plt
import time
pi = np.pi
q = np.array([0, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) * pi/180
q_sub = q[1::3]
chain = robot_arms.KinematicChain([15, 15, 5, 5])
[t, allt] = chain.forward_kinematics(q);
planar_chain = robot_arms.PlanarXZKinematicChain([15, 15, 5, 5])
[t, allt] = planar_chain.forward_kinematics(q_sub);
# TODO check the sign for the finger joint limits
inf = np.inf
planar_chain.joint_limits = [(-inf, inf), (-inf, inf), (-pi/2, pi/2), (-pi/2, 10*pi/180)]
# target_pos = np.array([10, 0, 10])
shoulder_anchor = np.array([2, 0, -15])
x_target_pos = (np.random.randn() - 0.5)*25
z_target_pos = (np.random.randn() - 0.5)*14
target_pos = np.array([x_target_pos, 0, z_target_pos]) - shoulder_anchor
target_pos = np.array([-14.37130744, 0. , 22.97472612])
print("target position")
print(target_pos)
# target_pos = np.array([3., 0, 20])
q = q_sub[:]
q_star, path = planar_chain.inverse_kinematics(q_sub.copy(), target_pos, verbose=True, return_path=True)
# plt.close('all')
# planar_chain.plot(q_star)
print(planar_chain.endpoint_pos(q_star))
# plt.figure()
# plt.plot(endpoint_traj[:k,0], endpoint_traj[:k,2])
# plt.show()
## New algorithm: for planar arms, lock the more distal links into a single joint for initialization
# Then try to move the joint back toward its current configuration without moving the endpoint
inv_kin = ik.inv_kin_2D(target_pos, 15., 25.)
| 30.538462 | 104 | 0.706549 |
735659b0a525ca3326e55b0957523487606d5119 | 1,835 | py | Python | github-announcer.py | NURDspace/github-announcer | d59938c54704ae4a46ba31fbdcff81e39b22e71d | [
"BSD-3-Clause"
] | null | null | null | github-announcer.py | NURDspace/github-announcer | d59938c54704ae4a46ba31fbdcff81e39b22e71d | [
"BSD-3-Clause"
] | null | null | null | github-announcer.py | NURDspace/github-announcer | d59938c54704ae4a46ba31fbdcff81e39b22e71d | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/python3
# pip3 install pygithub
# pip3 install feedgen
from dateutil import parser
from feedgen.feed import FeedGenerator
import github
import sqlite3
n_items = 25
db = '/root/nurdspace/github-announcer.db'
file_out = '/var/www/htdocs.keetweej.vanheusden.com/ns-gh-rss.xml'
github_auth_token = ' something sensible here '
fg = FeedGenerator()
fg.title('NURDSpace affiliates github-repo thing')
fg.description('See title')
fg.link(href='https://github.com/NURDspace/github-announcer')
g = github.Github(github_auth_token)
dbcon = sqlite3.connect(db)
dbcur = dbcon.cursor()
dbcur.execute('SELECT DISTINCT user, last_check FROM users')
to_announce = set()
users_update = set()
for record in dbcur:
user = g.get_user(record[0])
print(record[0])
latest = None
for event in user.get_events():
event_epoch = event.created_at.timestamp()
if latest == None:
latest = int(event_epoch)
users_update.add((latest, record[0]))
print('\t', event.created_at, latest, record[0])
if event_epoch < record[1]:
break
if event.type == 'CreateEvent':
add = (event.repo.name, event.payload['description'])
print(f'\t{add}')
to_announce.add(add)
if len(to_announce) >= n_items:
break
if len(to_announce) >= n_items:
break
try:
for a in to_announce:
fe = fg.add_entry()
fe.title(f'{a[0]}: {a[1]}')
fe.description(f'{a[0]}: {a[1]}')
fe.link(href='https://www.github.com/%s' % a[0])
fg.rss_file(file_out)
dbcur = dbcon.cursor()
for u in users_update:
dbcur.execute('UPDATE users SET last_check="%d" WHERE user="%s"' % (u[0], u[1]))
dbcon.commit()
except Exception as e:
print(f'Failed: {e}')
| 23.525641 | 88 | 0.627248 |
d2e558b503c7a1229c174a811f891b3c13b3df0a | 11,605 | py | Python | main_custom_modified_cross_attention.py | agoel00/LowFER | 4723cb12e1d89c58621ec34c4eb5221c1b51d018 | [
"MIT"
] | null | null | null | main_custom_modified_cross_attention.py | agoel00/LowFER | 4723cb12e1d89c58621ec34c4eb5221c1b51d018 | [
"MIT"
] | null | null | null | main_custom_modified_cross_attention.py | agoel00/LowFER | 4723cb12e1d89c58621ec34c4eb5221c1b51d018 | [
"MIT"
] | 2 | 2021-01-06T15:18:01.000Z | 2021-01-07T04:20:37.000Z | import os
from load_data import Data
import numpy as np
import torch
import time
from collections import defaultdict
from model_cross_attention_modified import *
from torch.optim.lr_scheduler import ExponentialLR
import argparse
import logging
import math
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__file__)
def add_logging_handlers(params, dir_name="logs"):
os.makedirs(dir_name, exist_ok=True)
log_file = os.path.join(dir_name, params + "_clowfer_modified.log")
fh = logging.FileHandler(log_file)
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s', '%m/%d/%Y %H:%M:%S'))
global logger
logger.addHandler(fh)
class Experiment:
def __init__(self, learning_rate=0.0005, ent_vec_dim=200, rel_vec_dim=200,
num_iterations=500, batch_size=128, decay_rate=0., cuda=False,
input_dropout=0.3, hidden_dropout1=0.4, hidden_dropout2=0.5,
label_smoothing=0., k=30, output_dir=None, subspace=10):
self.learning_rate = learning_rate
self.ent_vec_dim = ent_vec_dim
self.rel_vec_dim = rel_vec_dim
self.num_iterations = num_iterations
self.decay_rate = decay_rate
self.label_smoothing = label_smoothing
self.cuda = cuda
self.n_gpu = torch.cuda.device_count() if cuda else None
self.batch_size = batch_size * self.n_gpu if self.n_gpu > 1 else batch_size
self.device = torch.device("cuda") if cuda else None
self.output_dir = output_dir
self.kwargs = {"input_dropout": input_dropout, "hidden_dropout1": hidden_dropout1,
"hidden_dropout2": hidden_dropout2, "k": k, "subspace": subspace}
def get_data_idxs(self, data):
data_idxs = [(self.entity_idxs[data[i][0]], self.relation_idxs[data[i][1]], \
self.entity_idxs[data[i][2]]) for i in range(len(data))]
return data_idxs
def get_er_vocab(self, data):
er_vocab = defaultdict(list)
for triple in data:
er_vocab[(triple[0], triple[1])].append(triple[2])
return er_vocab
def get_batch(self, er_vocab, er_vocab_pairs, idx):
batch = er_vocab_pairs[idx:idx+self.batch_size]
targets = np.zeros((len(batch), len(d.entities)))
for idx, pair in enumerate(batch):
targets[idx, er_vocab[pair]] = 1.
targets = torch.FloatTensor(targets)
if self.label_smoothing:
targets = ((1.0-self.label_smoothing)*targets) + (1.0/targets.size(1))
if self.cuda:
targets = targets.to(self.device)
return np.array(batch), targets
def evaluate(self, model, data):
hits = []
ranks = []
for i in range(10):
hits.append([])
test_data_idxs = self.get_data_idxs(data)
er_vocab = self.get_er_vocab(self.get_data_idxs(d.data))
logger.info("Number of data points: %d" % len(test_data_idxs))
for i in range(0, len(test_data_idxs), self.batch_size):
data_batch, _ = self.get_batch(er_vocab, test_data_idxs, i)
e1_idx = torch.tensor(data_batch[:,0])
r_idx = torch.tensor(data_batch[:,1])
e2_idx = torch.tensor(data_batch[:,2])
if self.cuda:
e1_idx = e1_idx.to(self.device)
r_idx = r_idx.to(self.device)
e2_idx = e2_idx.to(self.device)
predictions = model.forward(e1_idx, r_idx)
for j in range(data_batch.shape[0]):
filt = er_vocab[(data_batch[j][0], data_batch[j][1])]
target_value = predictions[j,e2_idx[j]].item()
predictions[j, filt] = 0.0
predictions[j, e2_idx[j]] = target_value
sort_values, sort_idxs = torch.sort(predictions.cpu(), dim=1, descending=True)
sort_idxs = sort_idxs.cpu().numpy()
for j in range(data_batch.shape[0]):
rank = np.where(sort_idxs[j]==e2_idx[j].item())[0][0]
ranks.append(rank+1)
for hits_level in range(10):
if rank <= hits_level:
hits[hits_level].append(1.0)
else:
hits[hits_level].append(0.0)
metrics = {
'h10': np.mean(hits[9]),
'h3': np.mean(hits[2]),
'h1': np.mean(hits[0]),
'mr': np.mean(ranks),
'mrr': np.mean(1./np.array(ranks))
}
logger.info('Hits @10: {0}'.format(metrics['h10']))
logger.info('Hits @3: {0}'.format(metrics['h3']))
logger.info('Hits @1: {0}'.format(metrics['h1']))
logger.info('Mean rank: {0}'.format(metrics['mr']))
logger.info('Mean reciprocal rank: {0}'.format(metrics['mrr']))
return metrics
def train_and_eval(self):
logger.info("Training the LowFER model...")
self.entity_idxs = {d.entities[i]:i for i in range(len(d.entities))}
self.relation_idxs = {d.relations[i]:i for i in range(len(d.relations))}
train_data_idxs = self.get_data_idxs(d.train_data)
logger.info("Number of training data points: %d" % len(train_data_idxs))
# model = LowFER(d, self.ent_vec_dim, self.rel_vec_dim, **self.kwargs)
model = LowFER(
d, self.ent_vec_dim, self.rel_vec_dim,
self.kwargs['input_dropout'],
self.kwargs['hidden_dropout1'],
self.kwargs['hidden_dropout2'],
self.kwargs['k'],
self.kwargs['subspace']
)
if self.cuda:
if self.n_gpu > 1:
model = torch.nn.DataParallel(model)
model.to(self.device)
if hasattr(model, 'module'):
model.module.init()
else:
model.init()
opt = torch.optim.Adam(model.parameters(), lr=self.learning_rate)
if self.decay_rate:
scheduler = ExponentialLR(opt, self.decay_rate)
er_vocab = self.get_er_vocab(train_data_idxs)
er_vocab_pairs = list(er_vocab.keys())
logger.info("Starting training...")
logger.info("Params: %d", sum(p.numel() for p in model.parameters() if p.requires_grad))
# for name, p in model.named_parameters():
# logger.info(name)
# logger.info(p.shape)
# logger.info(p.numel())
for it in range(1, self.num_iterations+1):
start_train = time.time()
model.train()
losses = []
np.random.shuffle(er_vocab_pairs)
for j in range(0, len(er_vocab_pairs), self.batch_size):
data_batch, targets = self.get_batch(er_vocab, er_vocab_pairs, j)
opt.zero_grad()
e1_idx = torch.tensor(data_batch[:,0])
r_idx = torch.tensor(data_batch[:,1])
if self.cuda:
e1_idx = e1_idx.to(self.device)
r_idx = r_idx.to(self.device)
predictions = model.forward(e1_idx, r_idx)
if hasattr(model, 'module'):
loss = model.module.loss(predictions, targets)
loss = loss.mean()
else:
loss = model.loss(predictions, targets)
loss.backward()
opt.step()
losses.append(loss.item())
if self.decay_rate:
scheduler.step()
logger.info("Epoch %d / time %0.5f / loss %0.9f" % (it, time.time()-start_train, np.mean(losses)))
model.eval()
if it % 10 == 0 and it != 0:
with torch.no_grad():
logger.info("Validation:")
valid_metrics = self.evaluate(model, d.valid_data)
torch.save(model.state_dict(), self.output_dir + "/%d.pt" % it)
logger.info("Final Validation:")
valid_metrics = self.evaluate(model, d.valid_data)
logger.info("Final Test:")
test_metrics = self.evaluate(model, d.test_data)
torch.save(model.state_dict(), self.output_dir + "/final.pt")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, default="FB15k-237", nargs="?",
help="Which dataset to use: FB15k, FB15k-237, WN18 or WN18RR.")
parser.add_argument("--num_iterations", type=int, default=500, nargs="?",
help="Number of iterations.")
parser.add_argument("--batch_size", type=int, default=128, nargs="?",
help="Batch size.")
parser.add_argument("--lr", type=float, default=0.0005, nargs="?",
help="Learning rate.")
parser.add_argument("--dr", type=float, default=1.0, nargs="?",
help="Decay rate.")
parser.add_argument("--edim", type=int, default=200, nargs="?",
help="Entity embedding dimensionality.")
parser.add_argument("--rdim", type=int, default=200, nargs="?",
help="Relation embedding dimensionality.")
parser.add_argument("--k", type=int, default=30, nargs="?",
help="Latent dimension of MFB.")
parser.add_argument("--cuda", type=bool, default=True, nargs="?",
help="Whether to use cuda (GPU) or not (CPU).")
parser.add_argument("--input_dropout", type=float, default=0.3, nargs="?",
help="Input layer dropout.")
parser.add_argument("--hidden_dropout1", type=float, default=0.4, nargs="?",
help="Dropout after the first hidden layer.")
parser.add_argument("--hidden_dropout2", type=float, default=0.5, nargs="?",
help="Dropout after the second hidden layer.")
parser.add_argument("--label_smoothing", type=float, default=0.1, nargs="?",
help="Amount of label smoothing.")
parser.add_argument("--subspace", type=int, default=10, nargs="?")
args = parser.parse_args()
params = "{}_lr_{}_dr_{}_e_{}_r_{}_k_{}_id_{}_hd1_{}_hd2_{}_ls_{}_subspace_{}".format(
args.dataset, args.lr, args.dr, args.edim, args.rdim,
args.k, args.input_dropout, args.hidden_dropout1,
args.hidden_dropout2, args.label_smoothing, args.subspace
)
add_logging_handlers(params)
dataset = args.dataset
data_dir = "data/%s/" % dataset
output_dir = "output/%s/%s" % (dataset, params)
os.makedirs(output_dir, exist_ok=True)
torch.backends.cudnn.deterministic = True
seed = 20
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available:
torch.cuda.manual_seed_all(seed)
d = Data(data_dir=data_dir, reverse=True)
experiment = Experiment(num_iterations=args.num_iterations, batch_size=args.batch_size, learning_rate=args.lr,
decay_rate=args.dr, ent_vec_dim=args.edim, rel_vec_dim=args.rdim, cuda=args.cuda,
input_dropout=args.input_dropout, hidden_dropout1=args.hidden_dropout1,
hidden_dropout2=args.hidden_dropout2, label_smoothing=args.label_smoothing, k=args.k,
output_dir=output_dir, subspace=args.subspace)
experiment.train_and_eval()
| 45.155642 | 117 | 0.578113 |
7fa22151185c7447c108c593c0b72bd5fdad1c45 | 5,194 | py | Python | tests/test_face_areas_normals.py | janEbert/pytorch3d | accdac80fb29e82f72d4e8e73135ba8fd790b6c0 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2022-01-24T20:51:16.000Z | 2022-01-24T20:51:16.000Z | tests/test_face_areas_normals.py | janEbert/pytorch3d | accdac80fb29e82f72d4e8e73135ba8fd790b6c0 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | tests/test_face_areas_normals.py | janEbert/pytorch3d | accdac80fb29e82f72d4e8e73135ba8fd790b6c0 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2022-03-29T04:29:06.000Z | 2022-03-29T04:29:06.000Z | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from common_testing import TestCaseMixin, get_random_cuda_device
from pytorch3d.ops import mesh_face_areas_normals
from pytorch3d.structures.meshes import Meshes
class TestFaceAreasNormals(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(1)
@staticmethod
def init_meshes(
num_meshes: int = 10,
num_verts: int = 1000,
num_faces: int = 3000,
device: str = "cpu",
):
device = torch.device(device)
verts_list = []
faces_list = []
for _ in range(num_meshes):
verts = torch.rand(
(num_verts, 3), dtype=torch.float32, device=device, requires_grad=True
)
faces = torch.randint(
num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
)
verts_list.append(verts)
faces_list.append(faces)
meshes = Meshes(verts_list, faces_list)
return meshes
@staticmethod
def face_areas_normals_python(verts, faces):
"""
Pytorch implementation for face areas & normals.
"""
# TODO(gkioxari) Change cast to floats once we add support for doubles.
verts = verts.float()
vertices_faces = verts[faces] # (F, 3, 3)
# vector pointing from v0 to v1
v01 = vertices_faces[:, 1] - vertices_faces[:, 0]
# vector pointing from v0 to v2
v02 = vertices_faces[:, 2] - vertices_faces[:, 0]
normals = torch.cross(v01, v02, dim=1) # (F, 3)
face_areas = normals.norm(dim=-1) / 2
face_normals = torch.nn.functional.normalize(normals, p=2, dim=1, eps=1e-6)
return face_areas, face_normals
def _test_face_areas_normals_helper(self, device, dtype=torch.float32):
"""
Check the results from face_areas cuda/cpp and PyTorch implementation are
the same.
"""
meshes = self.init_meshes(10, 200, 400, device=device)
# make them leaf nodes
verts = meshes.verts_packed().detach().clone().to(dtype)
verts.requires_grad = True
faces = meshes.faces_packed().detach().clone()
# forward
areas, normals = mesh_face_areas_normals(verts, faces)
verts_torch = verts.detach().clone().to(dtype)
verts_torch.requires_grad = True
faces_torch = faces.detach().clone()
(areas_torch, normals_torch) = TestFaceAreasNormals.face_areas_normals_python(
verts_torch, faces_torch
)
self.assertClose(areas_torch, areas, atol=1e-7)
# normals get normalized by area thus sensitivity increases as areas
# in our tests can be arbitrarily small. Thus we compare normals after
# multiplying with areas
unnormals = normals * areas.view(-1, 1)
unnormals_torch = normals_torch * areas_torch.view(-1, 1)
self.assertClose(unnormals_torch, unnormals, atol=1e-6)
# backward
grad_areas = torch.rand(areas.shape, device=device, dtype=dtype)
grad_normals = torch.rand(normals.shape, device=device, dtype=dtype)
areas.backward((grad_areas, grad_normals))
grad_verts = verts.grad
areas_torch.backward((grad_areas, grad_normals))
grad_verts_torch = verts_torch.grad
self.assertClose(grad_verts_torch, grad_verts, atol=1e-6)
def test_face_areas_normals_cpu(self):
self._test_face_areas_normals_helper("cpu")
def test_face_areas_normals_cuda(self):
device = get_random_cuda_device()
self._test_face_areas_normals_helper(device)
def test_nonfloats_cpu(self):
self._test_face_areas_normals_helper("cpu", dtype=torch.double)
def test_nonfloats_cuda(self):
device = get_random_cuda_device()
self._test_face_areas_normals_helper(device, dtype=torch.double)
@staticmethod
def face_areas_normals_with_init(
num_meshes: int, num_verts: int, num_faces: int, device: str = "cpu"
):
meshes = TestFaceAreasNormals.init_meshes(
num_meshes, num_verts, num_faces, device
)
verts = meshes.verts_packed()
faces = meshes.faces_packed()
torch.cuda.synchronize()
def face_areas_normals():
mesh_face_areas_normals(verts, faces)
torch.cuda.synchronize()
return face_areas_normals
@staticmethod
def face_areas_normals_with_init_torch(
num_meshes: int, num_verts: int, num_faces: int, device: str = "cpu"
):
meshes = TestFaceAreasNormals.init_meshes(
num_meshes, num_verts, num_faces, device
)
verts = meshes.verts_packed()
faces = meshes.faces_packed()
torch.cuda.synchronize()
def face_areas_normals():
TestFaceAreasNormals.face_areas_normals_python(verts, faces)
torch.cuda.synchronize()
return face_areas_normals
| 36.069444 | 86 | 0.651136 |
4cb61360a5b75518595430b3c7519ff390f0be01 | 2,628 | py | Python | fase.py | joaovicentefs/pythonbirds | aed41a4d8eecd6dccbb1aede74eae1cd62bbba94 | [
"MIT"
] | null | null | null | fase.py | joaovicentefs/pythonbirds | aed41a4d8eecd6dccbb1aede74eae1cd62bbba94 | [
"MIT"
] | null | null | null | fase.py | joaovicentefs/pythonbirds | aed41a4d8eecd6dccbb1aede74eae1cd62bbba94 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from itertools import chain
from atores import ATIVO
VITORIA = 'VITORIA'
DERROTA = 'DERROTA'
EM_ANDAMENTO = 'EM_ANDAMENTO'
class Ponto():
def __init__(self, x, y, caracter):
self.caracter = caracter
self.x = round(x)
self.y = round(y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.caracter == other.caracter
def __hash__(self):
return hash(self.x) ^ hash(self.y)
def __repr__(self, *args, **kwargs):
return "Ponto(%s,%s,'%s')" % (self.x, self.y, self.caracter)
class Fase():
def __init__(self, intervalo_de_colisao=1):
"""
Método que inicializa uma fase.
:param intervalo_de_colisao:
"""
self.intervalo_de_colisao = intervalo_de_colisao
self._passaros = []
self._porcos = []
self._obstaculos = []
def adicionar_obstaculo(self, *obstaculos):
"""
Adiciona obstáculos em uma fase
:param obstaculos:
"""
def adicionar_porco(self, *porcos):
"""
Adiciona porcos em uma fase
:param porcos:
"""
pass
def adicionar_passaro(self, *passaros):
"""
Adiciona pássaros em uma fase
:param passaros:
"""
pass
def status(self):
"""
Método que indica com mensagem o status do jogo
Se o jogo está em andamento (ainda tem porco ativo e pássaro ativo), retorna essa mensagem.
Se o jogo acabou com derrota (ainda existe porco ativo), retorna essa mensagem
Se o jogo acabou com vitória (não existe porco ativo), retorna essa mensagem
:return:
"""
return EM_ANDAMENTO
def lancar(self, angulo, tempo):
"""
Método que executa lógica de lançamento.
Deve escolher o primeiro pássaro não lançado da lista e chamar seu método lançar
Se não houver esse tipo de pássaro, não deve fazer nada
:param angulo: ângulo de lançamento
:param tempo: Tempo de lançamento
"""
pass
def calcular_pontos(self, tempo):
"""
Lógica que retorna os pontos a serem exibidos na tela.
Cada ator deve ser transformado em um Ponto.
:param tempo: tempo para o qual devem ser calculados os pontos
:return: objeto do tipo Ponto
"""
pontos=[self._transformar_em_ponto(a) for a in self._passaros+self._obstaculos+self._porcos]
return pontos
def _transformar_em_ponto(self, ator):
return Ponto(ator.x, ator.y, ator.caracter())
| 24.560748 | 100 | 0.606164 |
3eae84d575bedf9a7ce3cda9378e31d167cd0f05 | 1,160 | py | Python | passl/hooks/byolClip_hook.py | WangFeng18/PASSL | d03c0928434a26d4eefe2c24b229168d620f864c | [
"Apache-2.0"
] | 1 | 2021-04-02T09:59:20.000Z | 2021-04-02T09:59:20.000Z | passl/hooks/byolClip_hook.py | WangFeng18/PASSL | d03c0928434a26d4eefe2c24b229168d620f864c | [
"Apache-2.0"
] | null | null | null | passl/hooks/byolClip_hook.py | WangFeng18/PASSL | d03c0928434a26d4eefe2c24b229168d620f864c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .hook import Hook
from .builder import HOOKS
import paddle.distributed as dist
@HOOKS.register()
class BYOLClipHook(Hook):
def __init__(self, priority=1):
self.priority = priority
def train_iter_end(self, trainer):
# print('-----------------------------')
# print('updating target network!')
# print('-----------------------------')
if dist.get_world_size() > 1:
trainer.model._layers.update_target_network_clip()
else:
trainer.model.update_target_network_clip()
| 36.25 | 74 | 0.675862 |
3333bc1cf17aafa82f16a178b098b89cc5119452 | 3,511 | py | Python | env/lib/python3.7/site-packages/docusign_rooms/models/e_sign_permission_profile_list.py | davidgacc/docusign | e63167101656d0066d481844576ce687ea80eb91 | [
"MIT"
] | null | null | null | env/lib/python3.7/site-packages/docusign_rooms/models/e_sign_permission_profile_list.py | davidgacc/docusign | e63167101656d0066d481844576ce687ea80eb91 | [
"MIT"
] | null | null | null | env/lib/python3.7/site-packages/docusign_rooms/models/e_sign_permission_profile_list.py | davidgacc/docusign | e63167101656d0066d481844576ce687ea80eb91 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
DocuSign Rooms API - v2
An API for an integrator to access the features of DocuSign Rooms # noqa: E501
OpenAPI spec version: v2
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ESignPermissionProfileList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'permission_profiles': 'list[ESignPermissionProfile]'
}
attribute_map = {
'permission_profiles': 'permissionProfiles'
}
def __init__(self, permission_profiles=None): # noqa: E501
"""ESignPermissionProfileList - a model defined in Swagger""" # noqa: E501
self._permission_profiles = None
self.discriminator = None
if permission_profiles is not None:
self.permission_profiles = permission_profiles
@property
def permission_profiles(self):
"""Gets the permission_profiles of this ESignPermissionProfileList. # noqa: E501
:return: The permission_profiles of this ESignPermissionProfileList. # noqa: E501
:rtype: list[ESignPermissionProfile]
"""
return self._permission_profiles
@permission_profiles.setter
def permission_profiles(self, permission_profiles):
"""Sets the permission_profiles of this ESignPermissionProfileList.
:param permission_profiles: The permission_profiles of this ESignPermissionProfileList. # noqa: E501
:type: list[ESignPermissionProfile]
"""
self._permission_profiles = permission_profiles
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ESignPermissionProfileList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ESignPermissionProfileList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.267241 | 109 | 0.606665 |
6af33f209b68c2133eb30f8c95f288f6cb392146 | 2,541 | py | Python | RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/rtabmap_ros/cfg/CameraConfig.py | QianheYu/xtark_driver_dev | 1708888161cf20c0d1f45c99d0da4467d69c26c8 | [
"BSD-3-Clause"
] | 1 | 2022-03-11T03:31:15.000Z | 2022-03-11T03:31:15.000Z | RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/rtabmap_ros/cfg/CameraConfig.py | bravetree/xtark_driver_dev | 1708888161cf20c0d1f45c99d0da4467d69c26c8 | [
"BSD-3-Clause"
] | null | null | null | RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/rtabmap_ros/cfg/CameraConfig.py | bravetree/xtark_driver_dev | 1708888161cf20c0d1f45c99d0da4467d69c26c8 | [
"BSD-3-Clause"
] | null | null | null | ## *********************************************************
##
## File autogenerated for the rtabmap_ros package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 245, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 290, 'description': 'Camera device ID', 'max': 7, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'device_id', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Frame rate', 'max': 100.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'frame_rate', 'edit_method': '', 'default': 15.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Video or images directory path', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'video_or_images_path', 'edit_method': '', 'default': '', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Pause', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'pause', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
| 68.675676 | 1,666 | 0.63046 |
70e9ba91eeffc5e2f2f01af06eb66aa8d489d51f | 368 | py | Python | irs/search/schema.py | nitish6174/TFIDF_vs_BM25 | bf7962d37ffc3ff8e236393ad57f3f9cf2ead655 | [
"MIT"
] | 1 | 2020-08-13T03:04:14.000Z | 2020-08-13T03:04:14.000Z | irs/search/schema.py | nitish6174/TFIDF_vs_BM25 | bf7962d37ffc3ff8e236393ad57f3f9cf2ead655 | [
"MIT"
] | null | null | null | irs/search/schema.py | nitish6174/TFIDF_vs_BM25 | bf7962d37ffc3ff8e236393ad57f3f9cf2ead655 | [
"MIT"
] | null | null | null | from whoosh.fields import SchemaClass, TEXT, ID, DATETIME
from whoosh.analysis import StemmingAnalyzer
class RedditSchema(SchemaClass):
url = ID(stored=True)
title = TEXT(analyzer=StemmingAnalyzer(), stored=True, field_boost=5.0)
body = TEXT(analyzer=StemmingAnalyzer(), stored=True)
created = DATETIME(stored=True)
subreddit = TEXT(stored=True)
| 33.454545 | 75 | 0.75 |
9437ce36788c025a98483c092b2eb6681ce9882c | 10,664 | py | Python | data_loader.py | Shreypandey/crispy-enigma | 59d49e659c44063fea52fa8ea30fb9bb4d8f6f5e | [
"Apache-2.0"
] | null | null | null | data_loader.py | Shreypandey/crispy-enigma | 59d49e659c44063fea52fa8ea30fb9bb4d8f6f5e | [
"Apache-2.0"
] | null | null | null | data_loader.py | Shreypandey/crispy-enigma | 59d49e659c44063fea52fa8ea30fb9bb4d8f6f5e | [
"Apache-2.0"
] | null | null | null | import os
import copy
import json
import logging
import torch
from torch.utils.data import TensorDataset
from utils import get_intent_labels, get_slot_labels
logger = logging.getLogger(__name__)
class InputExample(object):
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
intent_label: (Optional) string. The intent label of the example.
slot_labels: (Optional) list. The slot labels of the example.
"""
def __init__(self, guid, words, intent_label=None, slot_labels=None):
self.guid = guid
self.words = words
self.intent_label = intent_label
self.slot_labels = slot_labels
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, attention_mask, token_type_ids, intent_label_id, slot_labels_ids):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.intent_label_id = intent_label_id
self.slot_labels_ids = slot_labels_ids
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class JointProcessor(object):
"""Processor for the JointBERT data set """
def __init__(self, args):
self.args = args
self.intent_labels = get_intent_labels(args)
self.slot_labels = get_slot_labels(args)
self.input_text_file = 'seq.in'
self.intent_label_file = 'label'
self.slot_labels_file = 'seq.out'
@classmethod
def _read_file(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
lines = []
for line in f:
lines.append(line.strip())
return lines
def _create_examples(self, texts, intents, slots, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for i, (text, intent, slot) in enumerate(zip(texts, intents, slots)):
guid = "%s-%s" % (set_type, i)
# 1. input_text
words = text.split() # Some are spaced twice
# 2. intent
intent_label = self.intent_labels.index(intent) if intent in self.intent_labels else self.intent_labels.index("UNK")
# 3. slot
slot_labels = []
for s in slot.split():
slot_labels.append(self.slot_labels.index(s) if s in self.slot_labels else self.slot_labels.index("UNK"))
assert len(words) == len(slot_labels)
examples.append(InputExample(guid=guid, words=words, intent_label=intent_label, slot_labels=slot_labels))
return examples
def get_examples(self, mode):
"""
Args:
mode: train, dev, test
"""
data_path = os.path.join(self.args.data_dir, self.args.task, mode)
logger.info("LOOKING AT {}".format(data_path))
return self._create_examples(texts=self._read_file(os.path.join(data_path, self.input_text_file)),
intents=self._read_file(os.path.join(data_path, self.intent_label_file)),
slots=self._read_file(os.path.join(data_path, self.slot_labels_file)),
set_type=mode)
processors = {
"atis": JointProcessor,
"snips": JointProcessor,
'bot': JointProcessor
}
def convert_examples_to_features(examples, max_seq_len, tokenizer,
pad_token_label_id=-100,
cls_token_segment_id=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
mask_padding_with_zero=True):
# Setting based on the current model type
cls_token = tokenizer.cls_token
sep_token = tokenizer.sep_token
unk_token = tokenizer.unk_token
pad_token_id = tokenizer.pad_token_id
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 5000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
# Tokenize word by word (for NER)
tokens = []
slot_labels_ids = []
for word, slot_label in zip(example.words, example.slot_labels):
word_tokens = tokenizer.tokenize(word)
if not word_tokens:
word_tokens = [unk_token] # For handling the bad-encoded word
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
slot_labels_ids.extend([int(slot_label)] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP]
special_tokens_count = 2
if len(tokens) > max_seq_len - special_tokens_count:
tokens = tokens[:(max_seq_len - special_tokens_count)]
slot_labels_ids = slot_labels_ids[:(max_seq_len - special_tokens_count)]
# Add [SEP] token
tokens += [sep_token]
slot_labels_ids += [pad_token_label_id]
token_type_ids = [sequence_a_segment_id] * len(tokens)
# Add [CLS] token
tokens = [cls_token] + tokens
slot_labels_ids = [pad_token_label_id] + slot_labels_ids
token_type_ids = [cls_token_segment_id] + token_type_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_len - len(input_ids)
input_ids = input_ids + ([pad_token_id] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
slot_labels_ids = slot_labels_ids + ([pad_token_label_id] * padding_length)
assert len(input_ids) == max_seq_len, "Error with input length {} vs {}".format(len(input_ids), max_seq_len)
assert len(attention_mask) == max_seq_len, "Error with attention mask length {} vs {}".format(len(attention_mask), max_seq_len)
assert len(token_type_ids) == max_seq_len, "Error with token type length {} vs {}".format(len(token_type_ids), max_seq_len)
assert len(slot_labels_ids) == max_seq_len, "Error with slot labels length {} vs {}".format(len(slot_labels_ids), max_seq_len)
intent_label_id = int(example.intent_label)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % example.guid)
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("intent_label: %s (id = %d)" % (example.intent_label, intent_label_id))
logger.info("slot_labels: %s" % " ".join([str(x) for x in slot_labels_ids]))
features.append(
InputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
intent_label_id=intent_label_id,
slot_labels_ids=slot_labels_ids
))
return features
def load_and_cache_examples(args, tokenizer, mode):
processor = processors[args.task](args)
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
'cached_{}_{}_{}_{}'.format(
mode,
args.task,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
args.max_seq_len
)
)
if os.path.exists(cached_features_file):
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
# Load data features from dataset file
logger.info("Creating features from dataset file at %s", args.data_dir)
if mode == "train":
examples = processor.get_examples("train")
elif mode == "dev":
examples = processor.get_examples("dev")
elif mode == "test":
examples = processor.get_examples("test")
else:
raise Exception("For mode, Only train, dev, test is available")
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = args.ignore_index
features = convert_examples_to_features(examples, args.max_seq_len, tokenizer,
pad_token_label_id=pad_token_label_id)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_intent_label_ids = torch.tensor([f.intent_label_id for f in features], dtype=torch.long)
all_slot_labels_ids = torch.tensor([f.slot_labels_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_mask,
all_token_type_ids, all_intent_label_ids, all_slot_labels_ids)
return dataset
| 41.494163 | 135 | 0.632689 |
a76da428275ea926d6b150c208314c68e95e618b | 2,011 | py | Python | experiments/karla/diplomski-rad/blade/pb/datasets/n20-indel-classes/finished-experiments/model-n20-mix-pb-indel-classes-9.py | lvrcek/consensus-net | 560957f315751822e1ddf8c097eb7b712ceadff3 | [
"MIT"
] | null | null | null | experiments/karla/diplomski-rad/blade/pb/datasets/n20-indel-classes/finished-experiments/model-n20-mix-pb-indel-classes-9.py | lvrcek/consensus-net | 560957f315751822e1ddf8c097eb7b712ceadff3 | [
"MIT"
] | null | null | null | experiments/karla/diplomski-rad/blade/pb/datasets/n20-indel-classes/finished-experiments/model-n20-mix-pb-indel-classes-9.py | lvrcek/consensus-net | 560957f315751822e1ddf8c097eb7b712ceadff3 | [
"MIT"
] | 1 | 2018-12-23T13:50:29.000Z | 2018-12-23T13:50:29.000Z | from comet_ml import Experiment
experiment = Experiment(api_key="oda8KKpxlDgWmJG5KsYrrhmIV", project_name="consensusnet")
import numpy as np
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Input
from keras.layers import Conv1D, MaxPooling1D, Conv2D, MaxPool2D
from keras.callbacks import LearningRateScheduler, EarlyStopping
import sys
module_path = '/home/diplomski-rad/consensus-net/src/python/utils/'
if module_path not in sys.path:
print('Adding utils module.')
sys.path.append(module_path)
from args_parsers import parse_train_args
def main(args):
args = parse_train_args(args)
X_train = np.load(args.X_train)
X_validate = np.load(args.X_validate)
y_train = np.load(args.y_train)
y_validate = np.load(args.y_validate)
model_save_path = args.model_save_path
def lr_schedule(epoch, lr):
if epoch > 50:
if epoch % 5 == 0:
return lr * 0.95
return lr
lr_callback = LearningRateScheduler(lr_schedule)
callbacks = [lr_callback, EarlyStopping(monitor='val_loss', patience=3)]
example_shape = X_train.shape[1:]
input_layer = Input(shape=example_shape)
conv_1 = Conv2D(filters=40, kernel_size=2, padding='same', activation='relu')(input_layer)
pool_1 = MaxPool2D(pool_size=(2, 1))(conv_1)
conv_2 = Conv2D(filters=20, kernel_size=4, padding='same', activation='relu')(pool_1)
flatten = Flatten()(conv_2)
predictions = Dense(6, activation='softmax')(flatten)
model = Model(input_layer, predictions)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print(model.summary())
batch_size = 10000
epochs = 150
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate), callbacks=callbacks)
model.save(model_save_path)
if __name__ == '__main__':
main(sys.argv[1:]) | 31.920635 | 132 | 0.708105 |
7d6632f06cb204e8e8d04481fe6c36268354a60e | 9,764 | py | Python | script/model/mini_trainer.py | eppingere/terrier | ffe1b96cfa364ad7053f1224655472b2eb69f910 | [
"MIT"
] | 2 | 2020-06-03T19:46:43.000Z | 2020-07-11T00:46:08.000Z | script/model/mini_trainer.py | thepinetree/terrier | eeb6a17e4927b9d8ccafbc3ee23f1ff6e4365069 | [
"MIT"
] | 11 | 2020-06-28T03:43:06.000Z | 2020-10-28T02:33:57.000Z | script/model/mini_trainer.py | thepinetree/terrier | eeb6a17e4927b9d8ccafbc3ee23f1ff6e4365069 | [
"MIT"
] | 1 | 2020-06-03T03:21:51.000Z | 2020-06-03T03:21:51.000Z | #!/usr/bin/env python3
import glob
import os
import numpy as np
import argparse
import pickle
import logging
from sklearn import model_selection
import model
from util import io_util, logging_util
from data_class import opunit_data
from info import data_info
from training_util import data_transforming_util, result_writing_util
from type import Target, ExecutionFeature
np.set_printoptions(precision=4)
np.set_printoptions(edgeitems=10)
np.set_printoptions(suppress=True)
class MiniTrainer:
"""
Trainer for the mini models
"""
def __init__(self, input_path, model_metrics_path, ml_models, test_ratio, trim, expose_all, txn_sample_interval):
self.input_path = input_path
self.model_metrics_path = model_metrics_path
self.ml_models = ml_models
self.test_ratio = test_ratio
self.model_map = {}
self.stats_map = {}
self.trim = trim
self.expose_all = expose_all
self.txn_sample_interval = txn_sample_interval
def get_model_map(self):
return self.model_map
def train_specific_model(self, data, y_transformer_idx, method_idx):
methods = self.ml_models
method = methods[method_idx]
label = method if y_transformer_idx == 0 else method + " transform"
logging.info("Finalizing model {} {}".format(data.opunit.name, label))
y_transformers = [None, data_transforming_util.OPUNIT_Y_TRANSFORMER_MAP[data.opunit]]
x_transformer = data_transforming_util.OPUNIT_X_TRANSFORMER_MAP[data.opunit]
regressor = model.Model(methods[method_idx], y_transformer=y_transformers[y_transformer_idx],
x_transformer=x_transformer)
regressor.train(data.x, data.y)
self.model_map[data.opunit] = regressor
def train_data(self, data, summary_file):
x_train, x_test, y_train, y_test = model_selection.train_test_split(data.x, data.y,
test_size=self.test_ratio,
random_state=0)
# Write the first header rwo to the result file
metrics_path = "{}/{}.csv".format(self.model_metrics_path, data.opunit.name.lower())
prediction_path = "{}/{}_prediction.csv".format(self.model_metrics_path, data.opunit.name.lower())
result_writing_util.create_metrics_and_prediction_files(metrics_path, prediction_path, False)
methods = self.ml_models
# Test the prediction with/without the target transformer
y_transformers = [None, data_transforming_util.OPUNIT_Y_TRANSFORMER_MAP[data.opunit]]
# modeling_transformer = data_transforming_util.OPUNIT_MODELING_TRANSFORMER_MAP[data.opunit]
# if modeling_transformer is not None:
# transformers.append(modeling_transformer)
x_transformer = data_transforming_util.OPUNIT_X_TRANSFORMER_MAP[data.opunit]
error_bias = 1
min_percentage_error = 2
pred_results = None
elapsed_us_index = data_info.TARGET_CSV_INDEX[Target.ELAPSED_US]
memory_b_index = data_info.TARGET_CSV_INDEX[Target.MEMORY_B]
best_y_transformer = -1
best_method = -1
for i, y_transformer in enumerate(y_transformers):
for m, method in enumerate(methods):
# Train the model
label = method if i == 0 else method + " transform"
logging.info("{} {}".format(data.opunit.name, label))
regressor = model.Model(method, y_transformer=y_transformer, x_transformer=x_transformer)
regressor.train(x_train, y_train)
# Evaluate on both the training and test set
results = []
evaluate_data = [(x_train, y_train), (x_test, y_test)]
train_test_label = ["Train", "Test"]
for j, d in enumerate(evaluate_data):
evaluate_x = d[0]
evaluate_y = d[1]
y_pred = regressor.predict(evaluate_x)
logging.debug("x shape: {}".format(evaluate_x.shape))
logging.debug("y shape: {}".format(y_pred.shape))
# In order to avoid the percentage error to explode when the actual label is very small,
# we omit the data point with the actual label <= 5 when calculating the percentage error (by
# essentially giving the data points with small labels a very small weight)
evaluate_threshold = 5
weights = np.where(evaluate_y > evaluate_threshold, np.ones(evaluate_y.shape), np.full(evaluate_y.shape, 1e-6))
percentage_error = np.average(np.abs(evaluate_y - y_pred) / (evaluate_y + error_bias), axis=0,
weights=weights)
results += list(percentage_error) + [""]
logging.info('{} Percentage Error: {}'.format(train_test_label[j], percentage_error))
# The default method of determining whether a model is better is by comparing the model error
# on the elapsed us. For any opunits in MEM_EVALUATE_OPUNITS, we evaluate by comparing the
# model error on memory_b.
eval_error = percentage_error[elapsed_us_index]
if data.opunit in data_info.MEM_EVALUATE_OPUNITS:
eval_error = percentage_error[memory_b_index]
# Record the model with the lowest elapsed time prediction (since that might be the most
# important prediction)
# Only use linear regression for the arithmetic operating units
if (j == 1 and eval_error < min_percentage_error
and y_transformer == y_transformers[-1]
and (data.opunit not in data_info.ARITHMETIC_OPUNITS or method == 'lr')):
min_percentage_error = eval_error
if self.expose_all:
best_y_transformer = i
best_method = m
else:
self.model_map[data.opunit] = regressor
pred_results = (evaluate_x, y_pred, evaluate_y)
if j == 1:
io_util.write_csv_result(summary_file, data.opunit.name, [label] + list(percentage_error))
# Dump the prediction results
io_util.write_csv_result(metrics_path, label, results)
logging.info("")
io_util.write_csv_result(metrics_path, "", [])
# Record the best prediction results on the test data
result_writing_util.record_predictions(pred_results, prediction_path)
return best_y_transformer, best_method
def train(self):
"""Train the mini-models
:return: the map of the trained models
"""
self.model_map = {}
# Create the results files for the paper
header = ["OpUnit", "Method"] + [target.name for target in data_info.MINI_MODEL_TARGET_LIST]
summary_file = "{}/mini_runner.csv".format(self.model_metrics_path)
io_util.create_csv_file(summary_file, header)
# First get the data for all mini runners
for filename in sorted(glob.glob(os.path.join(self.input_path, '*.csv'))):
print(filename)
data_list = opunit_data.get_mini_runner_data(filename, self.model_metrics_path, self.txn_sample_interval,
self.model_map, self.stats_map, self.trim)
for data in data_list:
best_y_transformer, best_method = self.train_data(data, summary_file)
if self.expose_all:
self.train_specific_model(data, best_y_transformer, best_method)
return self.model_map
# ==============================================
# main
# ==============================================
if __name__ == '__main__':
aparser = argparse.ArgumentParser(description='Mini Trainer')
aparser.add_argument('--input_path', default='mini_runner_input',
help='Input file path for the mini runners')
aparser.add_argument('--model_results_path', default='mini_runner_model_results',
help='Prediction results of the mini models')
aparser.add_argument('--save_path', default='trained_model', help='Path to save the mini models')
aparser.add_argument('--ml_models', nargs='*', type=str,
default=["lr", "rf", "gbm"],
help='ML models for the mini trainer to evaluate')
aparser.add_argument('--test_ratio', type=float, default=0.2, help='Test data split ratio')
aparser.add_argument('--trim', default=0.2, type=float, help='% of values to remove from both top and bottom')
aparser.add_argument('--expose_all', default=True, help='Should expose all data to the model')
aparser.add_argument('--txn_sample_interval', type=int, default=49,
help='Sampling interval for the transaction OUs')
aparser.add_argument('--log', default='info', help='The logging level')
args = aparser.parse_args()
logging_util.init_logging(args.log)
trainer = MiniTrainer(args.input_path, args.model_results_path, args.ml_models, args.test_ratio, args.trim,
args.expose_all, args.txn_sample_interval)
trained_model_map = trainer.train()
with open(args.save_path + '/mini_model_map.pickle', 'wb') as file:
pickle.dump(trained_model_map, file)
| 48.577114 | 131 | 0.620442 |
f86a3dd3476977dc1a1b1649aed647baf6c46ab2 | 3,772 | py | Python | translator/kor_to_braille.py | firekim2/korean_to_braille | 3d516488486e04de835f3c5c92612ed4f10c64ae | [
"MIT"
] | 2 | 2019-08-07T12:22:06.000Z | 2021-07-20T15:17:44.000Z | translator/kor_to_braille.py | firekim2/korean_to_braille | 3d516488486e04de835f3c5c92612ed4f10c64ae | [
"MIT"
] | null | null | null | translator/kor_to_braille.py | firekim2/korean_to_braille | 3d516488486e04de835f3c5c92612ed4f10c64ae | [
"MIT"
] | 1 | 2021-04-27T04:28:33.000Z | 2021-04-27T04:28:33.000Z | from . import map_kor_to_braille
import re
UNRECOGNIZED = '?'
open_quotes = True
BASE_CODE, CHOSUNG, JUNGSUNG = 44032, 588, 28
# 초성 리스트. 00 ~ 18
CHOSUNG_LIST = ['ㄱ', 'ㄲ', 'ㄴ', 'ㄷ', 'ㄸ', 'ㄹ', 'ㅁ',
'ㅂ', 'ㅃ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅉ',
'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
# 중성 리스트. 00 ~ 20
JUNGSUNG_LIST = ['ㅏ', 'ㅐ', 'ㅑ', 'ㅒ', 'ㅓ', 'ㅔ', 'ㅕ',
'ㅖ', 'ㅗ', 'ㅘ', 'ㅙ', 'ㅚ', 'ㅛ', 'ㅜ',
'ㅝ', 'ㅞ', 'ㅟ', 'ㅠ', 'ㅡ', 'ㅢ', 'ㅣ']
# 종성 리스트. 00 ~ 27 + 1(1개 없음)
JONGSUNG_LIST = [' ', 'ㄱ', 'ㄲ', 'ㄳ', 'ㄴ', 'ㄵ', 'ㄶ', 'ㄷ',
'ㄹ', 'ㄺ', 'ㄻ', 'ㄼ', 'ㄽ', 'ㄾ', 'ㄿ', 'ㅀ',
'ㅁ', 'ㅂ', 'ㅄ', 'ㅅ','ㅆ', 'ㅇ', 'ㅈ', 'ㅊ',
'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
def extract_words(string):
words = string.split(" ")
result = []
for word in words:
temp = word.split("\n")
for item in temp:
result.append(item)
return result
def check_contraction(word, index, braille):
for key, value in map_kor_to_braille.contractions.items():
if word[index:].startswith(key):
braille.append({'braille' : value, 'category' : '약어', 'original' : key})
return len(key)
return 0
def check_number(word, index, braille):
if word[index].isdigit():
if index is not 0:
if word[index - 1].isdigit():
value = map_kor_to_braille.numbers[word[index]]
braille.append({'braille' : value, 'category' : '숫자', 'original' : word[index]})
else:
value = map_kor_to_braille.number_start + map_kor_to_braille.numbers[word[index]]
braille.append({'braille' : value, 'category' : '숫자', 'original' : word[index]})
else:
value = map_kor_to_braille.number_start + map_kor_to_braille.numbers[word[index]]
braille.append({'braille' : value, 'category' : '숫자', 'original' : word[index]})
return True
return False
def check_punctuation(word, index, braille):
for key, value in map_kor_to_braille.punctuation.items():
if key is word[index]:
braille.append({'braille' : value, 'category' : '문장기호', 'original' : key})
return True
return False
def check_character(word, index, braille):
key = word[index]
if re.match('.*[ㄱ-ㅎㅏ-ㅣ가-힣]+.*', key) is not None:
char = ord(key) - BASE_CODE
char1 = int(char / CHOSUNG)
char2 = int((char - (CHOSUNG * char1)) / JUNGSUNG)
char3 = int((char - (CHOSUNG * char1) - (JUNGSUNG * char2)))
braille.append({'braille' : map_kor_to_braille.CHOSUNG_letters.get(CHOSUNG_LIST[char1]), 'category' : '초성', 'original' : CHOSUNG_LIST[char1]})
braille.append({'braille' : map_kor_to_braille.JUNGSUNG_letters.get(JUNGSUNG_LIST[char2]), 'category' : '중성', 'original' : JUNGSUNG_LIST[char2]})
if char3 is not 0:
braille.append({'braille' : map_kor_to_braille.JONGSUNG_letters.get(JONGSUNG_LIST[char3]), 'category' : '종성', 'original' : JONGSUNG_LIST[char3]})
return True
return False
def translate(string):
words = extract_words(string)
braille = []
for word in words:
i = 0
while (i < len(word)):
check_cont = check_contraction(word, i, braille)
if check_cont:
i += check_cont
continue
if check_number(word, i, braille):
i += 1
continue
if check_punctuation(word, i, braille):
i += 1
continue
check_character(word, i, braille)
i += 1
braille.append({'braille' : ' ', 'category' : 'space', 'original' : ' '})
return braille
if __name__ == "__main__":
print(translate("오늘 밤에도 별은 바람에 스치운다."))
| 35.584906 | 157 | 0.537911 |
003c761f532b196555958a4ed97e09da46eae940 | 2,239 | py | Python | contrib/tasks/wsss/train_val/validate.py | HAL-42/AlchemyCat | ca924755ff48e2ff74543bb0e446376eb2b1f150 | [
"Apache-2.0"
] | 8 | 2020-01-08T19:42:01.000Z | 2021-12-28T08:30:56.000Z | contrib/tasks/wsss/train_val/validate.py | HAL-42/AlchemyCat | ca924755ff48e2ff74543bb0e446376eb2b1f150 | [
"Apache-2.0"
] | 2 | 2020-09-10T12:22:57.000Z | 2022-02-17T05:21:22.000Z | contrib/tasks/wsss/train_val/validate.py | HAL-42/AlchemyCat | ca924755ff48e2ff74543bb0e446376eb2b1f150 | [
"Apache-2.0"
] | 1 | 2021-05-12T01:50:27.000Z | 2021-05-12T01:50:27.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : Xiaobo Yang
@Contact : hal_42@zju.edu.cn
@Time : 2021/7/23 3:45
@File : validate.py
@Software: PyCharm
@Desc :
"""
from tqdm import tqdm
import numpy as np
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from alchemy_cat.data import DataAuger
from alchemy_cat.contrib.voc import VOC_CLASSES
from alchemy_cat.contrib.metrics import SegmentationMetric
from alchemy_cat.alg import msc_flip_inference, find_nearest_odd_size
__all__ = ['validate_seg']
def validate_seg(model: nn.Module, val_auger: DataAuger, iteration: int, writer: SummaryWriter=None):
"""测试分割模型。
Args:
model: torch模型。
val_auger: validation数据集的data_auger。
writer: tensorboard的writer。
iteration: 当前迭代次数。
Returns:
"""
print("\n================================== Validation ==================================")
metric = SegmentationMetric(len(VOC_CLASSES), VOC_CLASSES)
device = list(model.parameters())[0].device
for _, img, label in tqdm(val_auger, total=len(val_auger),
desc="Validation Process", unit='samples', dynamic_ncols=True):
bt_img = torch.from_numpy(img).to(device=device, dtype=torch.float32)[None, ...]
score_map = msc_flip_inference(imgs=bt_img,
model=model,
msc_factors=[1.],
is_flip=False,
msc_aligner=lambda x: find_nearest_odd_size(x, min_n=3),
softmax_norm=False
).cpu().numpy()[0]
metric.update(np.argmax(score_map, axis=0), label)
metric.print_statistics(importance=1)
if writer is not None:
writer.add_scalar('mIoU', metric.mIoU, iteration + 1)
writer.add_scalar('Precision', metric.macro_avg_precision, iteration + 1)
writer.add_scalar('Recall', metric.macro_avg_recall, iteration + 1)
writer.add_scalar('Accuracy', metric.accuracy, iteration + 1)
print("\n================================ Validation End ================================")
| 36.112903 | 101 | 0.584636 |
f2f2684410d42ed760d238e77b89c2faff492445 | 298 | py | Python | selenium_stealth/navigator_webdriver.py | anhdhbn/selenium-stealth | c79e8b319faab1dfb81a0a90b80c2f8303b89b4b | [
"MIT"
] | 154 | 2020-11-05T13:24:25.000Z | 2022-03-31T13:30:40.000Z | selenium_stealth/navigator_webdriver.py | anhdhbn/selenium-stealth | c79e8b319faab1dfb81a0a90b80c2f8303b89b4b | [
"MIT"
] | 21 | 2020-11-05T13:25:47.000Z | 2022-02-16T21:33:57.000Z | selenium_stealth/navigator_webdriver.py | anhdhbn/selenium-stealth | c79e8b319faab1dfb81a0a90b80c2f8303b89b4b | [
"MIT"
] | 57 | 2020-11-06T19:06:44.000Z | 2022-03-31T07:17:50.000Z | from pathlib import Path
from .wrapper import evaluateOnNewDocument
from selenium.webdriver import Chrome as Driver
def navigator_webdriver(driver: Driver, **kwargs) -> None:
evaluateOnNewDocument(
driver, Path(__file__).parent.joinpath("js/navigator.webdriver.js").read_text()
)
| 29.8 | 87 | 0.765101 |
ab3ba9e8fa4243b739a999964b4a9c69dbfc0baf | 768 | py | Python | alembic/versions/7e30cf9b2d8b_.py | dudeisbrendan03/here | 9ff28572d49b1be038c1798cc353142e64d3fbef | [
"EFL-2.0"
] | 16 | 2015-11-11T06:35:14.000Z | 2020-12-04T14:36:31.000Z | alembic/versions/7e30cf9b2d8b_.py | dudeisbrendan03/here | 9ff28572d49b1be038c1798cc353142e64d3fbef | [
"EFL-2.0"
] | 142 | 2015-11-16T22:07:20.000Z | 2020-04-26T04:18:01.000Z | alembic/versions/7e30cf9b2d8b_.py | dudeisbrendan03/here | 9ff28572d49b1be038c1798cc353142e64d3fbef | [
"EFL-2.0"
] | 32 | 2015-11-15T09:38:12.000Z | 2020-02-29T19:25:20.000Z | """Use C collation by default for starsystem tables.
Revision ID: 7e30cf9b2d8b
Revises: 46e931c30648
Create Date: 2016-02-15 15:19:49.306682
"""
# revision identifiers, used by Alembic.
revision = '7e30cf9b2d8b'
down_revision = '46e931c30648'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column("starsystem", "name", type_=sa.Text(collation="C"))
op.alter_column("starsystem", "name_lower", type_=sa.Text(collation="C"))
op.alter_column("starsystem_prefix", "first_word", type_=sa.Text(collation="C"))
op.alter_column("starsystem_prefix", "const_words", type_=sa.Text(collation="C"))
op.execute("UPDATE status SET starsystem_refreshed = NULL")
pass
def downgrade():
pass
| 25.6 | 85 | 0.736979 |
85d3770c0f6f107aef51c0fab5eaaeb4a589da37 | 9,626 | py | Python | run.py | wellupgeek/qq_auto_sign_in | 58c058419cd1cfbea65f187ce3f75c91c76c9401 | [
"MIT"
] | 1 | 2020-04-25T09:16:58.000Z | 2020-04-25T09:16:58.000Z | run.py | wellupgeek/qq_auto_sign_in | 58c058419cd1cfbea65f187ce3f75c91c76c9401 | [
"MIT"
] | 3 | 2021-06-08T21:10:39.000Z | 2022-03-12T00:20:59.000Z | run.py | wellupgeek/qq_auto_sign_in | 58c058419cd1cfbea65f187ce3f75c91c76c9401 | [
"MIT"
] | null | null | null | import win32con, win32gui
from PIL import ImageGrab
import os, time, re, json
from time import sleep
from aip import AipOcr
import logging
import logging.handlers
import win32clipboard as w
# 截图部分
class QQ_shot_screen(object):
def __init__(self, name, savepath):
self.name = name
self.savepath = savepath
def get_window_pos(self):
handle = win32gui.FindWindow(0, self.name)
# 获取窗口句柄
if handle == 0:
return None
else:
# 返回坐标值
return win32gui.GetWindowRect(handle)
def get_image(self):
handle = win32gui.FindWindow(0, self.name)
# 发送还原最小化窗口的信息
win32gui.SendMessage(handle, win32con.WM_SYSCOMMAND, win32con.SC_RESTORE, 0)
# 设为高亮
win32gui.SetForegroundWindow(handle)
x1, y1, x2, y2 = self.get_window_pos()
image = ImageGrab.grab((x1, y1, x2, y2))
# 截图
return image
def save_image(self, num=5, sleep_time=2, logger=None):
now = time.strftime("%Y-%m-%d")
dirpath = os.path.join(self.savepath, now)
if not os.path.exists(dirpath):
os.mkdir(dirpath)
logger.info('创建文件夹: %s' %(dirpath))
dirpath = os.path.join(dirpath, self.name)
if not os.path.exists(dirpath):
os.mkdir(dirpath)
logger.info('创建文件夹: %s' %(dirpath))
for i in range(1, num + 1):
image = self.get_image()
image.save(os.path.join(dirpath, self.name + '-' + str(i) + '.jpg'))
logger.info('保存图片: %s' %(self.name + '-' + str(i) + '.jpg'))
sleep(sleep_time)
return dirpath
# 图片文字检测部分
class Detecter_pic(object):
def __init__(self, det_path, key_word):
self.det_path = det_path
self.key_word = key_word
# regex格式
def get_regex(self, num, length):
regex = [r'\d{1,' + str(length) + '}', '\w{1,' + str(length) + '}',
r'[\u96f6\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d\u5341]{1,' + str(length) + '}']
return regex[(num - 1) % 3]
# 用于指定文字匹配模式
def regrex_mode(self, mode_num, length):
# 模式1:汉字/英文 + 数字(ex: 签到3)
# 模式2:汉字/英文 + 英文(ex: 签到C or 签到c)
# 模式3:汉字/英文 + 汉字数字(ex: 签到七)
# 模式其它:自定义
key_word = json.dumps(self.key_word).replace('"', '')
regrex = self.get_regex(mode_num, length)
nameRegex = re.compile(key_word + regrex)
return nameRegex
def detector(self, app_id, api_key, secret_key, mode_num=1, length=2, logger=None):
APP_ID = app_id
API_KEY = api_key
SECRET_KEY = secret_key
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
start = time.time()
logger.info('开始识别图片')
# 选择匹配模式, 其key_word后接字符长度默认为2
nameRegex = self.regrex_mode(mode_num, length)
ans = []
dirlist = os.listdir(self.det_path)
dirlist.sort()
num = 0
for file in dirlist:
num += 1
image = open(os.path.join(self.det_path, file), 'rb')
message = client.basicAccurate(image.read())
# qps = 2 即每秒处理请求数为2
if num % 2 == 0:
time.sleep(1)
end = time.time()
logger.info('识别完成,共花时: %.2fs' %(end - start))
words, temp = [], []
for i in message.get('words_result'):
words.append(str(i.get('words')).replace(' ', ''))
text = '\n'.join(words)
for group in nameRegex.findall(text):
temp.append(group)
logger.info('group = %s' %(group))
temp.sort(reverse=True)
if len(temp) > 0:
ans.append(temp[0])
return ans
# 发送消息部分
class Send_message(object):
def __init__(self, name):
self.name = name
def sendAQQMessage(self, msg, logger=None):
# 将测试消息复制到剪切板中
w.OpenClipboard()
w.EmptyClipboard()
w.SetClipboardData(win32con.CF_UNICODETEXT, msg)
w.CloseClipboard()
logger.info('%s复制到剪切板中' %msg)
# 获取窗口句柄
handle = win32gui.FindWindow(0, self.name)
# 还原
win32gui.SendMessage(handle, win32con.WM_SYSCOMMAND, win32con.SC_RESTORE, 0)
# 设为高亮
win32gui.SetForegroundWindow(handle)
# 填充消息
win32gui.SendMessage(handle, 770, 0, 0)
logger.info('在(%s)窗口中填充消息:%s' %(self.name, msg))
# 回车发送消息
win32gui.SendMessage(handle, win32con.WM_KEYDOWN, win32con.VK_RETURN, 0)
logger.info('回车发送')
win32gui.SetBkMode(handle, win32con.TRANSPARENT)
win32gui.ShowWindow(handle, win32con.SW_MINIMIZE)
# 专门解决汉字数字与数字之间的转换
class Convert(object):
def __init__(self):
hanzi = ['一', '二', '三', '四', '五', '六', '七', '八', '九', '十', '百']
number = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 100]
self.han_num = list(zip(hanzi, number))
def pop(self, temp, num):
temp.remove(num)
while len(temp) < len(str(num)):
temp.append(0)
def hanzi_to_number(self, convert_str):
temp = []
for i in range(len(convert_str)):
symbol = convert_str[i]
for hanzi, number in self.han_num:
if symbol == hanzi:
temp.append(number)
break
if 10 in temp and 100 in temp:
self.pop(temp, 10)
self.pop(temp, 100)
elif 10 in temp and 100 not in temp:
self.pop(temp, 10)
elif 10 not in temp and 100 in temp:
self.pop(temp, 100)
sum = 0
for i in range(len(temp)):
sum += temp[i] * (10 ** (len(temp) - 1 - i))
return sum
def number_to_hanzi(self, convert_num):
temp, count = [], 0
new_num = convert_num
while new_num > 0:
base = 10 ** count
if base > 1:
temp.append(base)
temp.append(new_num % 10)
new_num //= 10
count += 1
index = len(temp) - 1
str_text = []
while index >= 0:
symbol = temp[index]
for hanzi, number in self.han_num:
if symbol == number:
str_text.append(hanzi)
break
index -= 1
return ''.join(str_text)
# 日志记录
def setMyLogger(Filename, log):
log.setLevel(logging.INFO)
file_handle = logging.handlers.RotatingFileHandler(Filename, mode='a', maxBytes=10241024, backupCount=5, encoding='utf-8')
fmt = '%(asctime)s %(levelname)s %(message)s'
datefmt = '%Y-%m-%d %H:%M:%S'
formater = logging.Formatter(fmt=fmt, datefmt=datefmt)
file_handle.setFormatter(formater)
log.addHandler(file_handle)
# 只发送固定短语,无需检测之前发送的内容
def function_one(win_list, logger):
send_obj = Send_message(win_list[0])
send_obj.sendAQQMessage(win_list[1], logger)
def function_two(win_list, others_list, logger):
variables = ['num', 'sleep_time', 'save_path', 'APP_ID', 'API_KEY', 'SECRET_KEY']
var_dict = {variables[i]: others_list[i] for i in range(6)}
win_vars = ['win_name', 'key_word', 'mode', 'max_len']
win_dict = {win_vars[i]: win_list[i] for i in range(4)}
qqshot = QQ_shot_screen(name=win_dict['win_name'], savepath=var_dict['save_path'])
dirpath = qqshot.save_image(num=int(var_dict['num']), sleep_time=int(var_dict['sleep_time']), logger=logger)
det_obj = Detecter_pic(det_path=dirpath, key_word=win_dict['key_word'])
ans = det_obj.detector(app_id=var_dict['APP_ID'], api_key=var_dict['API_KEY'], secret_key=var_dict['SECRET_KEY'],
mode_num=int(win_dict['mode']), length=int(win_dict['max_len']), logger=logger)
strtext = ans[-1]
msg = win_dict['key_word']
mode, length = int(win_dict['mode']), int(win_dict['max_len'])
start_len = len(msg)
if len(ans) > 0:
if mode == 1: # 模式1 汉字/英文 + 数字
logger.info('模式1')
num = int(strtext[start_len:]) + 1
msg += str(num)
elif mode == 2: # 模式2 汉字/英文 + 英文(ex: 签到C or 签到c)
logger.info('模式2')
uni_num = ord(strtext[start_len:])
if 65 <= uni_num < 90 or 97 <= uni_num < 122:
msg += chr(uni_num + 1) # 此处默认A-Z,a-z
elif mode == 3: # 汉字/英文 + 汉字数字(ex: 签到七、签到八一、签到八十一)
logger.info('模式3')
text = strtext[start_len:]
num = Convert().hanzi_to_number(text)
msg += Convert().number_to_hanzi(num + 1)
send_obj = Send_message(win_dict['win_name'])
send_obj.sendAQQMessage(msg, logger=logger)
def main():
logFile = 'record.log'
log = logging.getLogger()
setMyLogger(logFile, log)
fp = open("document.txt", 'r', encoding='utf-8')
cont = fp.read()
pattern = re.compile("'(.*)'")
contRe = pattern.findall(cont)
fp.close()
# 获取对应窗口及相应窗口的功能
window_name_list = contRe[0].split(';')
choose_list = contRe[1].split(';')
key_words_list = contRe[2].split(';')
mode_list = contRe[3].split(';')
max_len_list = contRe[4].split(';')
others_list = contRe[5:]
while len(choose_list) < len(window_name_list):
choose_list.append('1')
for index in range(len(choose_list)):
if choose_list[index] == '1':
temp = (window_name_list[index], key_words_list[index])
function_one(temp, log)
if choose_list[index] == '2':
temp = (window_name_list[index], key_words_list[index], mode_list[index], max_len_list[index])
function_two(temp, others_list, log)
if __name__ == '__main__':
main()
| 34.134752 | 126 | 0.569915 |
e7beed384e0bd9d0dff1e12a164d18341be031b8 | 6,653 | py | Python | tests/ut/python/parallel/test_auto_parallel_two_matmul.py | TommyLike/mindspore | 401dabb786a9097d6dd84f391657d266b04e9a37 | [
"Apache-2.0"
] | null | null | null | tests/ut/python/parallel/test_auto_parallel_two_matmul.py | TommyLike/mindspore | 401dabb786a9097d6dd84f391657d266b04e9a37 | [
"Apache-2.0"
] | null | null | null | tests/ut/python/parallel/test_auto_parallel_two_matmul.py | TommyLike/mindspore | 401dabb786a9097d6dd84f391657d266b04e9a37 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from mindspore import context
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore import Tensor
from tests.ut.python.ops.test_math_ops import VirtualLoss
import mindspore as ms
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore.parallel import _cost_model_context as cost_model_context
from mindspore.parallel import set_algo_parameters, get_algo_parameters, reset_algo_parameters
from mindspore.parallel._utils import _reset_op_id as reset_op_id
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x, y, b):
predict = self.network(x, y, b)
return self.loss(predict)
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x, y, b):
return C.grad_all(self.network)(x, y, b)
# model_parallel test
def test_two_matmul():
class Net(nn.Cell):
def __init__(self):
super().__init__()
self.matmul1 = P.MatMul()
self.matmul2 = P.MatMul()
def construct(self, x, y, b):
out = self.matmul1(x, y)
out = self.matmul2(out, b)
return out
size = 16
context.set_auto_parallel_context(device_num=size, global_rank=0)
cost_model_context.set_cost_model_context(device_memory_capacity= 32.0 * 1024.0 * 1024.0 * 1024.0,
costmodel_alpha=1.0,
costmodel_beta=60.0,
costmodel_gamma=0.1,
costmodel_communi_threshold=1024.0,
costmodel_communi_const=2222.0,
costmodel_communi_bias=1111.0)
dev_mem_cap = cost_model_context.get_cost_model_context("device_memory_capacity")
assert dev_mem_cap == 32.0 * 1024.0 * 1024.0 * 1024.0
costmodel_alpha = cost_model_context.get_cost_model_context("costmodel_alpha")
assert costmodel_alpha == 1.0
costmodel_beta = cost_model_context.get_cost_model_context("costmodel_beta")
assert costmodel_beta == 60.0
costmodel_gamma = cost_model_context.get_cost_model_context("costmodel_gamma")
assert costmodel_gamma == 0.1
costmodel_communi_threshold = cost_model_context.get_cost_model_context("costmodel_communi_threshold")
assert costmodel_communi_threshold == 1024.0
costmodel_communi_const = cost_model_context.get_cost_model_context("costmodel_communi_const")
assert costmodel_communi_const == 2222.0
costmodel_communi_bias = cost_model_context.get_cost_model_context("costmodel_communi_bias")
assert costmodel_communi_bias == 1111.0
cost_model_context.reset_cost_model_context()
dev_mem_cap = cost_model_context.get_cost_model_context("device_memory_capacity")
assert dev_mem_cap == 16.0 * 1024.0 * 1024.0 * 1024.0
costmodel_alpha = cost_model_context.get_cost_model_context("costmodel_alpha")
assert costmodel_alpha == 1.0
costmodel_beta = cost_model_context.get_cost_model_context("costmodel_beta")
assert costmodel_beta == 260.0
costmodel_gamma = cost_model_context.get_cost_model_context("costmodel_gamma")
assert costmodel_gamma == 0.001
costmodel_communi_threshold = cost_model_context.get_cost_model_context("costmodel_communi_threshold")
assert costmodel_communi_threshold == 2048.0
costmodel_communi_const = cost_model_context.get_cost_model_context("costmodel_communi_const")
assert costmodel_communi_const == 3072.0
costmodel_communi_bias = cost_model_context.get_cost_model_context("costmodel_communi_bias")
assert costmodel_communi_bias == 1024.0
set_algo_parameters(simplify_cal=True,
tensor_slice_align_enable=False,
tensor_slice_align_size=32,
not_fully_use_devices=True,
elementwise_op_strategy_follow=False)
para_simplify_cal = get_algo_parameters("simplify_cal")
assert para_simplify_cal == True
para_slice_align_enable = get_algo_parameters("tensor_slice_align_enable")
assert para_slice_align_enable == False
para_slice_align_size = get_algo_parameters("tensor_slice_align_size")
assert para_slice_align_size == 32
not_fully_use_devices = get_algo_parameters("not_fully_use_devices")
assert not_fully_use_devices == True
elementwise_op_strategy_follow = get_algo_parameters("elementwise_op_strategy_follow")
assert elementwise_op_strategy_follow == False
reset_algo_parameters()
para_simplify_cal = get_algo_parameters("simplify_cal")
assert para_simplify_cal == True
para_slice_align_enable = get_algo_parameters("tensor_slice_align_enable")
assert para_slice_align_enable == False
para_slice_align_size = get_algo_parameters("tensor_slice_align_size")
assert para_slice_align_size == 16
not_fully_use_devices = get_algo_parameters("not_fully_use_devices")
assert not_fully_use_devices == False
elementwise_op_strategy_follow = get_algo_parameters("elementwise_op_strategy_follow")
assert elementwise_op_strategy_follow == False
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
net = NetWithLoss(Net())
context.set_auto_parallel_context(parallel_mode="auto_parallel")
reset_op_id()
_executor.compile(net, x, y, b, phase='train')
strategies = _executor._get_strategy(net)
expected_strategies = {'Default/network-Net/MatMul-op2': [[16, 1], [1, 1]],
'Default/network-Net/MatMul-op3': [[16, 1], [1, 1]]}
assert strategies == expected_strategies
| 46.852113 | 106 | 0.711258 |
fb5831d9ba93e06aa241c75309e7d3d162666010 | 3,646 | py | Python | mergify_engine/actions/post_check.py | jsoref/mergify-engine | 90f24bfb33136e180c722f9d33f8704859e655d6 | [
"Apache-2.0"
] | null | null | null | mergify_engine/actions/post_check.py | jsoref/mergify-engine | 90f24bfb33136e180c722f9d33f8704859e655d6 | [
"Apache-2.0"
] | null | null | null | mergify_engine/actions/post_check.py | jsoref/mergify-engine | 90f24bfb33136e180c722f9d33f8704859e655d6 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
#
# Copyright © 2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import voluptuous
from mergify_engine import actions
from mergify_engine import check_api
from mergify_engine import context
from mergify_engine import rules
from mergify_engine import signals
from mergify_engine import subscription
from mergify_engine.rules import types
def CheckRunJinja2(v):
return types.Jinja2(
v,
{
"check_rule_name": "whatever",
"check_succeed": True,
"check_conditions": "the expected condition conditions",
},
)
class PostCheckAction(actions.Action):
validator = {
voluptuous.Required(
"title",
default="'{{ check_rule_name }}' {% if check_succeed %}succeed{% else %}failed{% endif %}", # noqa:FS003
): CheckRunJinja2,
voluptuous.Required(
"summary", default="{{ check_conditions }}"
): CheckRunJinja2,
}
always_run = True
allow_retrigger_mergify = True
async def _post(
self, ctxt: context.Context, rule: rules.EvaluatedRule
) -> check_api.Result:
# TODO(sileht): Don't run it if conditions contains the rule itself, as it can
# created an endless loop of events.
if not ctxt.subscription.has_feature(subscription.Features.CUSTOM_CHECKS):
return check_api.Result(
check_api.Conclusion.ACTION_REQUIRED,
"Custom checks are disabled",
ctxt.subscription.missing_feature_reason(
ctxt.pull["base"]["repo"]["owner"]["login"]
),
)
check_succeed = not bool(rule.missing_conditions)
check_conditions = ""
for cond in rule.conditions:
checked = " " if cond in rule.missing_conditions else "X"
check_conditions += f"\n- [{checked}] `{cond}`"
extra_variables = {
"check_rule_name": rule.name,
"check_succeed": check_succeed,
"check_conditions": check_conditions,
}
try:
title = await ctxt.pull_request.render_template(
self.config["title"],
extra_variables,
)
except context.RenderTemplateFailure as rmf:
return check_api.Result(
check_api.Conclusion.FAILURE,
"Invalid title template",
str(rmf),
)
try:
summary = await ctxt.pull_request.render_template(
self.config["summary"], extra_variables
)
except context.RenderTemplateFailure as rmf:
return check_api.Result(
check_api.Conclusion.FAILURE,
"Invalid summary template",
str(rmf),
)
await signals.send(ctxt, "action.post_check")
if rule.missing_conditions:
return check_api.Result(check_api.Conclusion.FAILURE, title, summary)
else:
return check_api.Result(check_api.Conclusion.SUCCESS, title, summary)
run = _post
cancel = _post
| 33.145455 | 117 | 0.619035 |
b0c8dfc754dfacfa3d3e35c1bb1f30b8728bb032 | 26,424 | py | Python | rpython/rlib/clibffi.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 381 | 2018-08-18T03:37:22.000Z | 2022-02-06T23:57:36.000Z | rpython/rlib/clibffi.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 16 | 2018-09-22T18:12:47.000Z | 2022-02-22T20:03:59.000Z | rpython/rlib/clibffi.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 30 | 2018-08-20T03:16:34.000Z | 2022-01-12T17:39:22.000Z | """ Libffi wrapping
"""
from __future__ import with_statement
from rpython.rtyper.tool import rffi_platform
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.tool import rffi_platform
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.rarithmetic import intmask, is_emulated_long
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib.rmmap import alloc
from rpython.rlib.rdynload import dlopen, dlclose, dlsym, dlsym_byordinal
from rpython.rlib.rdynload import DLOpenError, DLLHANDLE
from rpython.rlib import jit, rposix
from rpython.rlib.objectmodel import specialize
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.translator.platform import platform
from rpython.translator import cdir
from platform import machine
import py
import os
import sys
import ctypes.util
# maaaybe isinstance here would be better. Think
_MSVC = platform.name == "msvc"
_MINGW = platform.name == "mingw32"
_WIN32 = _MSVC or _MINGW
_WIN64 = _WIN32 and is_emulated_long
_MAC_OS = platform.name == "darwin"
_LITTLE_ENDIAN = sys.byteorder == 'little'
_BIG_ENDIAN = sys.byteorder == 'big'
_ARM = rffi_platform.getdefined('__arm__', '')
if _WIN32:
from rpython.rlib import rwin32
if _WIN32:
separate_module_sources = ['''
#include "src/precommondefs.h"
#include <stdio.h>
#include <windows.h>
/* Get the module where the "fopen" function resides in */
RPY_EXTERN
HANDLE pypy_get_libc_handle() {
MEMORY_BASIC_INFORMATION mi;
char buf[1000];
memset(&mi, 0, sizeof(mi));
if( !VirtualQueryEx(GetCurrentProcess(), &fopen, &mi, sizeof(mi)) )
return 0;
GetModuleFileName((HMODULE)mi.AllocationBase, buf, 500);
return (HMODULE)mi.AllocationBase;
}
''']
else:
separate_module_sources = []
if not _WIN32:
includes = ['ffi.h']
if _MAC_OS:
pre_include_bits = ['#define MACOSX']
else:
pre_include_bits = []
libraries = ['ffi']
link_files = []
eci = ExternalCompilationInfo(
pre_include_bits = pre_include_bits,
includes = includes,
libraries = libraries,
separate_module_sources = separate_module_sources,
include_dirs = platform.include_dirs_for_libffi(),
library_dirs = platform.library_dirs_for_libffi(),
link_files = link_files,
testonly_libraries = ['ffi'],
)
elif _MINGW:
includes = ['ffi.h']
libraries = ['libffi-5']
eci = ExternalCompilationInfo(
libraries = libraries,
includes = includes,
separate_module_sources = separate_module_sources,
)
eci = rffi_platform.configure_external_library(
'ffi-5', eci,
[dict(prefix='libffi-',
include_dir='include', library_dir='.libs'),
dict(prefix=r'c:\\mingw64', include_dir='include', library_dir='lib'),
])
else:
USE_C_LIBFFI_MSVC = True
libffidir = py.path.local(cdir).join('src', 'libffi_msvc')
if not _WIN64:
asm_ifc = 'win32.c'
else:
asm_ifc = 'win64.asm'
eci = ExternalCompilationInfo(
includes = ['ffi.h', 'windows.h'],
libraries = ['kernel32'],
include_dirs = [libffidir, cdir],
separate_module_sources = separate_module_sources,
separate_module_files = [libffidir.join('ffi.c'),
libffidir.join('prep_cif.c'),
libffidir.join(asm_ifc),
libffidir.join('pypy_ffi.c'),
],
)
FFI_TYPE_P = lltype.Ptr(lltype.ForwardReference())
FFI_TYPE_PP = rffi.CArrayPtr(FFI_TYPE_P)
FFI_TYPE_NULL = lltype.nullptr(FFI_TYPE_P.TO)
class CConfig:
_compilation_info_ = eci
FFI_OK = rffi_platform.ConstantInteger('FFI_OK')
FFI_BAD_TYPEDEF = rffi_platform.ConstantInteger('FFI_BAD_TYPEDEF')
FFI_DEFAULT_ABI = rffi_platform.ConstantInteger('FFI_DEFAULT_ABI')
if _WIN32 and not _WIN64:
FFI_STDCALL = rffi_platform.ConstantInteger('FFI_STDCALL')
if _ARM:
FFI_SYSV = rffi_platform.ConstantInteger('FFI_SYSV')
FFI_VFP = rffi_platform.ConstantInteger('FFI_VFP')
FFI_TYPE_STRUCT = rffi_platform.ConstantInteger('FFI_TYPE_STRUCT')
size_t = rffi_platform.SimpleType("size_t", rffi.ULONG)
ffi_abi = rffi_platform.SimpleType("ffi_abi", rffi.USHORT)
ffi_arg = rffi_platform.SimpleType("ffi_arg", lltype.Signed)
ffi_type = rffi_platform.Struct('ffi_type', [('size', rffi.ULONG),
('alignment', rffi.USHORT),
('type', rffi.USHORT),
('elements', FFI_TYPE_PP)])
ffi_cif = rffi_platform.Struct('ffi_cif', [])
ffi_closure = rffi_platform.Struct('ffi_closure',
[('user_data', rffi.VOIDP)])
def add_simple_type(type_name):
for name in ['size', 'alignment', 'type']:
setattr(CConfig, type_name + '_' + name,
rffi_platform.ConstantInteger(type_name + '.' + name))
def configure_simple_type(type_name):
l = lltype.malloc(FFI_TYPE_P.TO, flavor='raw', immortal=True)
for tp, name in [(size_t, 'size'),
(rffi.USHORT, 'alignment'),
(rffi.USHORT, 'type')]:
value = getattr(cConfig, '%s_%s' % (type_name, name))
setattr(l, 'c_' + name, rffi.cast(tp, value))
l.c_elements = lltype.nullptr(FFI_TYPE_PP.TO)
return l
base_names = ['double', 'uchar', 'schar', 'sshort', 'ushort', 'uint', 'sint',
# ffi_type_slong and ffi_type_ulong are omitted because
# their meaning changes too much from one libffi version to
# another. DON'T USE THEM! use cast_type_to_ffitype().
'float', 'longdouble', 'pointer', 'void',
# by size
'sint8', 'uint8', 'sint16', 'uint16', 'sint32', 'uint32',
'sint64', 'uint64']
type_names = ['ffi_type_%s' % name for name in base_names]
for i in type_names:
add_simple_type(i)
class cConfig:
pass
for k, v in rffi_platform.configure(CConfig).items():
setattr(cConfig, k, v)
FFI_TYPE_P.TO.become(cConfig.ffi_type)
size_t = cConfig.size_t
FFI_ABI = cConfig.ffi_abi
ffi_arg = cConfig.ffi_arg
for name in type_names:
locals()[name] = configure_simple_type(name)
def _signed_type_for(TYPE):
sz = rffi.sizeof(TYPE)
if sz == 1: return ffi_type_sint8
elif sz == 2: return ffi_type_sint16
elif sz == 4: return ffi_type_sint32
elif sz == 8: return ffi_type_sint64
else: raise ValueError("unsupported type size for %r" % (TYPE,))
def _unsigned_type_for(TYPE):
sz = rffi.sizeof(TYPE)
if sz == 1: return ffi_type_uint8
elif sz == 2: return ffi_type_uint16
elif sz == 4: return ffi_type_uint32
elif sz == 8: return ffi_type_uint64
else: raise ValueError("unsupported type size for %r" % (TYPE,))
__int_type_map = [
(rffi.UCHAR, ffi_type_uchar),
(rffi.SIGNEDCHAR, ffi_type_schar),
(rffi.SHORT, ffi_type_sshort),
(rffi.USHORT, ffi_type_ushort),
(rffi.UINT, ffi_type_uint),
(rffi.INT, ffi_type_sint),
# xxx don't use ffi_type_slong and ffi_type_ulong - their meaning
# changes from a libffi version to another :-((
(rffi.ULONG, _unsigned_type_for(rffi.ULONG)),
(rffi.LONG, _signed_type_for(rffi.LONG)),
(rffi.ULONGLONG, _unsigned_type_for(rffi.ULONGLONG)),
(rffi.LONGLONG, _signed_type_for(rffi.LONGLONG)),
(lltype.UniChar, _unsigned_type_for(lltype.UniChar)),
(lltype.Bool, _unsigned_type_for(lltype.Bool)),
(lltype.Char, _signed_type_for(lltype.Char)),
]
__float_type_map = [
(rffi.DOUBLE, ffi_type_double),
(rffi.FLOAT, ffi_type_float),
(rffi.LONGDOUBLE, ffi_type_longdouble),
]
__ptr_type_map = [
(rffi.VOIDP, ffi_type_pointer),
]
__type_map = __int_type_map + __float_type_map + [
(lltype.Void, ffi_type_void)
]
TYPE_MAP_INT = dict(__int_type_map)
TYPE_MAP_FLOAT = dict(__float_type_map)
TYPE_MAP = dict(__type_map)
ffitype_map_int = unrolling_iterable(__int_type_map)
ffitype_map_int_or_ptr = unrolling_iterable(__int_type_map + __ptr_type_map)
ffitype_map_float = unrolling_iterable(__float_type_map)
ffitype_map = unrolling_iterable(__type_map)
del __int_type_map, __float_type_map, __ptr_type_map, __type_map
def external(name, args, result, **kwds):
return rffi.llexternal(name, args, result, compilation_info=eci, **kwds)
def winexternal(name, args, result):
return rffi.llexternal(name, args, result, compilation_info=eci, calling_conv='win')
if not _MSVC:
def check_fficall_result(result, flags):
pass # No check
else:
def check_fficall_result(result, flags):
if result == 0:
return
# if win64:
# raises ValueError("ffi_call failed with code %d" % (result,))
if result < 0:
if flags & FUNCFLAG_CDECL:
raise StackCheckError(
"Procedure called with not enough arguments"
" (%d bytes missing)"
" or wrong calling convention" % (-result,))
else:
raise StackCheckError(
"Procedure called with not enough arguments "
" (%d bytes missing) " % (-result,))
else:
raise StackCheckError(
"Procedure called with too many "
"arguments (%d bytes in excess) " % (result,))
if not _WIN32:
libc_name = ctypes.util.find_library('c')
assert libc_name is not None, "Cannot find C library, ctypes.util.find_library('c') returned None"
def get_libc_name():
return libc_name
elif _MSVC:
get_libc_handle = external('pypy_get_libc_handle', [], DLLHANDLE)
@jit.dont_look_inside
def get_libc_name():
return rwin32.GetModuleFileName(get_libc_handle())
libc_name = get_libc_name().lower()
assert "msvcr" in libc_name or 'ucrtbase' in libc_name, \
"Suspect msvcrt library: %s" % (get_libc_name(),)
elif _MINGW:
def get_libc_name():
return 'msvcrt.dll'
if _WIN32:
LoadLibrary = rwin32.LoadLibrary
FFI_OK = cConfig.FFI_OK
FFI_BAD_TYPEDEF = cConfig.FFI_BAD_TYPEDEF
FFI_DEFAULT_ABI = cConfig.FFI_DEFAULT_ABI
if _WIN32 and not _WIN64:
FFI_STDCALL = cConfig.FFI_STDCALL
if _ARM:
FFI_SYSV = cConfig.FFI_SYSV
FFI_VFP = cConfig.FFI_VFP
FFI_TYPE_STRUCT = cConfig.FFI_TYPE_STRUCT
FFI_CIFP = lltype.Ptr(cConfig.ffi_cif)
FFI_CLOSUREP = lltype.Ptr(cConfig.ffi_closure)
VOIDPP = rffi.CArrayPtr(rffi.VOIDP)
c_ffi_prep_cif = external('ffi_prep_cif', [FFI_CIFP, FFI_ABI, rffi.UINT,
FFI_TYPE_P, FFI_TYPE_PP], rffi.INT)
if _MSVC:
c_ffi_call_return_type = rffi.INT
else:
c_ffi_call_return_type = lltype.Void
c_ffi_call = external('ffi_call', [FFI_CIFP, rffi.VOIDP, rffi.VOIDP,
VOIDPP], c_ffi_call_return_type,
save_err=rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO)
# Note: the RFFI_ALT_ERRNO flag matches the one in pyjitpl.direct_libffi_call
CALLBACK_TP = rffi.CCallback([FFI_CIFP, rffi.VOIDP, rffi.VOIDPP, rffi.VOIDP],
lltype.Void)
c_ffi_prep_closure = external('ffi_prep_closure', [FFI_CLOSUREP, FFI_CIFP,
CALLBACK_TP, rffi.VOIDP],
rffi.INT)
FFI_STRUCT_P = lltype.Ptr(lltype.Struct('FFI_STRUCT',
('ffistruct', FFI_TYPE_P.TO),
('members', lltype.Array(FFI_TYPE_P))))
@specialize.arg(3)
def make_struct_ffitype_e(size, aligment, field_types, track_allocation=True):
"""Compute the type of a structure. Returns a FFI_STRUCT_P out of
which the 'ffistruct' member is a regular FFI_TYPE.
"""
tpe = lltype.malloc(FFI_STRUCT_P.TO, len(field_types)+1, flavor='raw',
track_allocation=track_allocation)
tpe.ffistruct.c_type = rffi.cast(rffi.USHORT, FFI_TYPE_STRUCT)
tpe.ffistruct.c_size = rffi.cast(rffi.SIZE_T, size)
tpe.ffistruct.c_alignment = rffi.cast(rffi.USHORT, aligment)
tpe.ffistruct.c_elements = rffi.cast(FFI_TYPE_PP,
lltype.direct_arrayitems(tpe.members))
n = 0
while n < len(field_types):
tpe.members[n] = field_types[n]
n += 1
tpe.members[n] = lltype.nullptr(FFI_TYPE_P.TO)
return tpe
@specialize.memo()
def cast_type_to_ffitype(tp):
""" This function returns ffi representation of rpython type tp
"""
return TYPE_MAP[tp]
@specialize.argtype(1)
def push_arg_as_ffiptr(ffitp, arg, ll_buf):
# This is for primitive types. Note that the exact type of 'arg' may be
# different from the expected 'c_size'. To cope with that, we fall back
# to a byte-by-byte copy.
TP = lltype.typeOf(arg)
TP_P = lltype.Ptr(rffi.CArray(TP))
TP_size = rffi.sizeof(TP)
c_size = intmask(ffitp.c_size)
# if both types have the same size, we can directly write the
# value to the buffer
if c_size == TP_size:
buf = rffi.cast(TP_P, ll_buf)
buf[0] = arg
else:
# needs byte-by-byte copying. Make sure 'arg' is an integer type.
# Note that this won't work for rffi.FLOAT/rffi.DOUBLE.
assert TP is not rffi.FLOAT and TP is not rffi.DOUBLE
if TP_size <= rffi.sizeof(lltype.Signed):
arg = rffi.cast(lltype.Unsigned, arg)
else:
arg = rffi.cast(lltype.UnsignedLongLong, arg)
if _LITTLE_ENDIAN:
for i in range(c_size):
ll_buf[i] = chr(arg & 0xFF)
arg >>= 8
elif _BIG_ENDIAN:
for i in range(c_size-1, -1, -1):
ll_buf[i] = chr(arg & 0xFF)
arg >>= 8
else:
raise AssertionError
# type defs for callback and closure userdata
USERDATA_P = lltype.Ptr(lltype.ForwardReference())
CALLBACK_TP = lltype.Ptr(lltype.FuncType([rffi.VOIDPP, rffi.VOIDP, USERDATA_P],
lltype.Void))
USERDATA_P.TO.become(lltype.Struct('userdata',
('callback', CALLBACK_TP),
('addarg', lltype.Signed),
hints={'callback':True}))
@jit.jit_callback("CLIBFFI")
def _ll_callback(ffi_cif, ll_res, ll_args, ll_userdata):
""" Callback specification.
ffi_cif - something ffi specific, don't care
ll_args - rffi.VOIDPP - pointer to array of pointers to args
ll_restype - rffi.VOIDP - pointer to result
ll_userdata - a special structure which holds necessary information
(what the real callback is for example), casted to VOIDP
"""
userdata = rffi.cast(USERDATA_P, ll_userdata)
userdata.callback(ll_args, ll_res, userdata)
def ll_callback(ffi_cif, ll_res, ll_args, ll_userdata):
rposix._errno_after(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO)
_ll_callback(ffi_cif, ll_res, ll_args, ll_userdata)
rposix._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO)
class StackCheckError(ValueError):
message = None
def __init__(self, message):
self.message = message
class LibFFIError(Exception):
pass
CHUNK = 4096
CLOSURES = rffi.CArrayPtr(FFI_CLOSUREP.TO)
class ClosureHeap(object):
def __init__(self):
self.free_list = lltype.nullptr(rffi.VOIDP.TO)
def _more(self):
chunk = rffi.cast(CLOSURES, alloc(CHUNK))
count = CHUNK//rffi.sizeof(FFI_CLOSUREP.TO)
for i in range(count):
rffi.cast(rffi.VOIDPP, chunk)[0] = self.free_list
self.free_list = rffi.cast(rffi.VOIDP, chunk)
chunk = rffi.ptradd(chunk, 1)
def alloc(self):
if not self.free_list:
self._more()
p = self.free_list
self.free_list = rffi.cast(rffi.VOIDPP, p)[0]
return rffi.cast(FFI_CLOSUREP, p)
def free(self, p):
rffi.cast(rffi.VOIDPP, p)[0] = self.free_list
self.free_list = rffi.cast(rffi.VOIDP, p)
closureHeap = ClosureHeap()
FUNCFLAG_STDCALL = 0 # on Windows: for WINAPI calls
FUNCFLAG_CDECL = 1 # on Windows: for __cdecl calls
FUNCFLAG_PYTHONAPI = 4
FUNCFLAG_USE_ERRNO = 8
FUNCFLAG_USE_LASTERROR = 16
@specialize.arg(1) # hack :-/
def get_call_conv(flags, from_jit):
if _WIN32 and not _WIN64 and (flags & FUNCFLAG_CDECL == 0):
return FFI_STDCALL
else:
return FFI_DEFAULT_ABI
class AbstractFuncPtr(object):
ll_cif = lltype.nullptr(FFI_CIFP.TO)
ll_argtypes = lltype.nullptr(FFI_TYPE_PP.TO)
_immutable_fields_ = ['argtypes', 'restype']
def __init__(self, name, argtypes, restype, flags=FUNCFLAG_CDECL):
self.name = name
self.argtypes = argtypes
self.restype = restype
self.flags = flags
argnum = len(argtypes)
self.ll_argtypes = lltype.malloc(FFI_TYPE_PP.TO, argnum, flavor='raw',
track_allocation=False) # freed by the __del__
for i in range(argnum):
self.ll_argtypes[i] = argtypes[i]
self.ll_cif = lltype.malloc(FFI_CIFP.TO, flavor='raw',
track_allocation=False) # freed by the __del__
if _MSVC:
# This little trick works correctly with MSVC.
# It returns small structures in registers
if intmask(restype.c_type) == FFI_TYPE_STRUCT:
if restype.c_size <= 4:
restype = ffi_type_sint32
elif restype.c_size <= 8:
restype = ffi_type_sint64
res = c_ffi_prep_cif(self.ll_cif,
rffi.cast(rffi.USHORT, get_call_conv(flags,False)),
rffi.cast(rffi.UINT, argnum), restype,
self.ll_argtypes)
if not res == FFI_OK:
raise LibFFIError
def __del__(self):
if self.ll_cif:
lltype.free(self.ll_cif, flavor='raw', track_allocation=False)
self.ll_cif = lltype.nullptr(FFI_CIFP.TO)
if self.ll_argtypes:
lltype.free(self.ll_argtypes, flavor='raw', track_allocation=False)
self.ll_argtypes = lltype.nullptr(FFI_TYPE_PP.TO)
# as long as CallbackFuncPtr is kept alive, the underlaying userdata
# is kept alive as well
class CallbackFuncPtr(AbstractFuncPtr):
ll_closure = lltype.nullptr(FFI_CLOSUREP.TO)
ll_userdata = lltype.nullptr(USERDATA_P.TO)
# additional_arg should really be a non-heap type like a integer,
# it cannot be any kind of movable gc reference
def __init__(self, argtypes, restype, func, additional_arg=0,
flags=FUNCFLAG_CDECL):
AbstractFuncPtr.__init__(self, "callback", argtypes, restype, flags)
self.ll_closure = closureHeap.alloc()
self.ll_userdata = lltype.malloc(USERDATA_P.TO, flavor='raw',
track_allocation=False)
self.ll_userdata.callback = rffi.llhelper(CALLBACK_TP, func)
self.ll_userdata.addarg = additional_arg
res = c_ffi_prep_closure(self.ll_closure, self.ll_cif,
ll_callback, rffi.cast(rffi.VOIDP,
self.ll_userdata))
if not res == FFI_OK:
raise LibFFIError
def __del__(self):
AbstractFuncPtr.__del__(self)
if self.ll_closure:
closureHeap.free(self.ll_closure)
self.ll_closure = lltype.nullptr(FFI_CLOSUREP.TO)
if self.ll_userdata:
lltype.free(self.ll_userdata, flavor='raw', track_allocation=False)
self.ll_userdata = lltype.nullptr(USERDATA_P.TO)
class RawFuncPtr(AbstractFuncPtr):
def __init__(self, name, argtypes, restype, funcsym, flags=FUNCFLAG_CDECL,
keepalive=None):
AbstractFuncPtr.__init__(self, name, argtypes, restype, flags)
self.keepalive = keepalive
self.funcsym = funcsym
def call(self, args_ll, ll_result):
# adjust_return_size() should always be used here on ll_result
assert len(args_ll) == len(self.argtypes), (
"wrong number of arguments in call to %s(): "
"%d instead of %d" % (self.name, len(args_ll), len(self.argtypes)))
ll_args = lltype.malloc(rffi.VOIDPP.TO, len(args_ll), flavor='raw')
for i in range(len(args_ll)):
assert args_ll[i] # none should be NULL
ll_args[i] = args_ll[i]
ffires = c_ffi_call(self.ll_cif, self.funcsym, ll_result, ll_args)
lltype.free(ll_args, flavor='raw')
check_fficall_result(ffires, self.flags)
class FuncPtr(AbstractFuncPtr):
ll_args = lltype.nullptr(rffi.VOIDPP.TO)
ll_result = lltype.nullptr(rffi.VOIDP.TO)
def __init__(self, name, argtypes, restype, funcsym, flags=FUNCFLAG_CDECL,
keepalive=None):
# initialize each one of pointers with null
AbstractFuncPtr.__init__(self, name, argtypes, restype, flags)
self.keepalive = keepalive
self.funcsym = funcsym
self.argnum = len(self.argtypes)
self.pushed_args = 0
self.ll_args = lltype.malloc(rffi.VOIDPP.TO, self.argnum, flavor='raw')
for i in range(self.argnum):
# space for each argument
self.ll_args[i] = lltype.malloc(rffi.VOIDP.TO,
intmask(argtypes[i].c_size),
flavor='raw')
if restype != ffi_type_void:
self.restype_size = intmask(restype.c_size)
size = adjust_return_size(self.restype_size)
self.ll_result = lltype.malloc(rffi.VOIDP.TO, size,
flavor='raw')
else:
self.restype_size = -1
@specialize.argtype(1)
def push_arg(self, value):
#if self.pushed_args == self.argnum:
# raise TypeError("Too many arguments, eats %d, pushed %d" %
# (self.argnum, self.argnum + 1))
if not we_are_translated():
TP = lltype.typeOf(value)
if isinstance(TP, lltype.Ptr):
if TP.TO._gckind != 'raw':
raise ValueError("Can only push raw values to C, not 'gc'")
# XXX probably we should recursively check for struct fields
# here, lets just ignore that for now
if isinstance(TP.TO, lltype.Array):
try:
TP.TO._hints['nolength']
except KeyError:
raise ValueError("Can only push to C arrays without length info")
push_arg_as_ffiptr(self.argtypes[self.pushed_args], value,
self.ll_args[self.pushed_args])
self.pushed_args += 1
def _check_args(self):
if self.pushed_args < self.argnum:
raise TypeError("Did not specify arg nr %d" % (self.pushed_args + 1))
def _clean_args(self):
self.pushed_args = 0
@specialize.arg(1)
def call(self, RES_TP):
self._check_args()
ffires = c_ffi_call(self.ll_cif, self.funcsym,
rffi.cast(rffi.VOIDP, self.ll_result),
rffi.cast(VOIDPP, self.ll_args))
if RES_TP is not lltype.Void:
TP = lltype.Ptr(rffi.CArray(RES_TP))
ptr = self.ll_result
if _BIG_ENDIAN and RES_TP in TYPE_MAP_INT:
# we get a 8 byte value in big endian
n = rffi.sizeof(lltype.Signed) - self.restype_size
ptr = rffi.ptradd(ptr, n)
res = rffi.cast(TP, ptr)[0]
else:
res = None
self._clean_args()
check_fficall_result(ffires, self.flags)
return res
def __del__(self):
if self.ll_args:
argnum = len(self.argtypes)
for i in range(argnum):
if self.ll_args[i]:
lltype.free(self.ll_args[i], flavor='raw')
lltype.free(self.ll_args, flavor='raw')
self.ll_args = lltype.nullptr(rffi.VOIDPP.TO)
if self.ll_result:
lltype.free(self.ll_result, flavor='raw')
self.ll_result = lltype.nullptr(rffi.VOIDP.TO)
AbstractFuncPtr.__del__(self)
class RawCDLL(object):
def __init__(self, handle):
self.lib = handle
def getpointer(self, name, argtypes, restype, flags=FUNCFLAG_CDECL):
# these arguments are already casted to proper ffi
# structures!
return FuncPtr(name, argtypes, restype, dlsym(self.lib, name),
flags=flags, keepalive=self)
def getrawpointer(self, name, argtypes, restype, flags=FUNCFLAG_CDECL):
# these arguments are already casted to proper ffi
# structures!
return RawFuncPtr(name, argtypes, restype, dlsym(self.lib, name),
flags=flags, keepalive=self)
def getrawpointer_byordinal(self, ordinal, argtypes, restype,
flags=FUNCFLAG_CDECL):
# these arguments are already casted to proper ffi
# structures!
return RawFuncPtr(name, argtypes, restype,
dlsym_byordinal(self.lib, ordinal), flags=flags,
keepalive=self)
def getaddressindll(self, name):
return dlsym(self.lib, name)
class CDLL(RawCDLL):
def __init__(self, libname, mode=-1):
"""Load the library, or raises DLOpenError."""
RawCDLL.__init__(self, rffi.cast(DLLHANDLE, -1))
with rffi.scoped_str2charp(libname) as ll_libname:
self.lib = dlopen(ll_libname, mode)
def __del__(self):
if self.lib != rffi.cast(DLLHANDLE, -1):
dlclose(self.lib)
self.lib = rffi.cast(DLLHANDLE, -1)
def adjust_return_size(memsize):
# Workaround for a strange behavior of libffi: make sure that
# we always have at least 8 bytes. ffi_call() writes 8 bytes
# into the buffer even if the function's result type asks for
# less. This strange behavior is documented.
if memsize < 8:
memsize = 8
return memsize
| 37.11236 | 102 | 0.62553 |
f7bb1fbf24203dab5b90d5f24d2bf96684f94102 | 921 | py | Python | examples/tut1.py | opetlund/TMM4135-CALFEM | e15621a6fec3bef7f07cfbc9abb80ad10551d6d0 | [
"MIT"
] | null | null | null | examples/tut1.py | opetlund/TMM4135-CALFEM | e15621a6fec3bef7f07cfbc9abb80ad10551d6d0 | [
"MIT"
] | null | null | null | examples/tut1.py | opetlund/TMM4135-CALFEM | e15621a6fec3bef7f07cfbc9abb80ad10551d6d0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 3 22:08:29 2018
@author: Jonas Lindemann
"""
import calfem.geometry as cfg
import calfem.mesh as cfm
import calfem.vis as cfv
# ----- Define geometry
g = cfg.Geometry()
g.point([0.0, 0.0]) # point 0
g.point([5.0, 0.0], marker=20) # point 1
g.point([2.5, 4.0]) # point 2
g.spline([0, 1]) # line 0
g.spline([1, 2]) # line 1
g.spline([2, 0], marker=10) # line 2
g.surface([0, 1, 2])
# ----- Create mesh
mesh = cfm.GmshMesh(g)
mesh.elType = 2 # Degrees of freedom per node.
mesh.dofsPerNode = 1 # Factor that changes element sizes.
mesh.elSizeFactor = 0.15
coords, edof, dofs, bdofs, elementmarkers = mesh.create()
print(bdofs)
cfv.drawGeometry(g)
cfv.figure()
# ----- Draw the mesh.
cfv.drawMesh(
coords=coords,
edof=edof,
dofsPerNode=mesh.dofsPerNode,
elType=mesh.elType,
filled=True,
title="Example 01"
)
cfv.showAndWait() | 17.377358 | 57 | 0.639522 |
82bfb2c406587cfbb226e7090f50fa6edbbe8fc6 | 1,838 | py | Python | Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/reason/test_reason_forms.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 1 | 2021-07-24T17:22:50.000Z | 2021-07-24T17:22:50.000Z | Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/reason/test_reason_forms.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-28T03:40:31.000Z | 2022-02-28T03:40:52.000Z | Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/reason/test_reason_forms.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-25T08:34:51.000Z | 2022-03-16T17:29:44.000Z | from django.test import TestCase
from dfirtrack_main.forms import ReasonForm
class ReasonFormTestCase(TestCase):
""" reason form tests """
def test_reason_name_form_label(self):
""" test form label """
# get object
form = ReasonForm()
# compare
self.assertEqual(form.fields['reason_name'].label, 'Reason name (*)')
def test_reason_note_form_label(self):
""" test form label """
# get object
form = ReasonForm()
# compare
self.assertEqual(form.fields['reason_note'].label, 'Reason note')
def test_reason_form_empty(self):
""" test minimum form requirements / INVALID """
# get object
form = ReasonForm(data = {})
# compare
self.assertFalse(form.is_valid())
def test_reason_name_form_filled(self):
""" test minimum form requirements / VALID """
# get object
form = ReasonForm(data = {'reason_name': 'reason_1'})
# compare
self.assertTrue(form.is_valid())
def test_reason_note_form_filled(self):
""" test additional form content """
# get object
form = ReasonForm(data = {
'reason_name': 'reason_1',
'reason_note': 'lorem ipsum',
})
# compare
self.assertTrue(form.is_valid())
def test_reason_name_proper_chars(self):
""" test for max length """
# get object
form = ReasonForm(data = {'reason_name': 'rrrrrrrrrrrrrrrrrrrrrrrrrrrrrr'})
# compare
self.assertTrue(form.is_valid())
def test_reason_name_too_many_chars(self):
""" test for max length """
# get object
form = ReasonForm(data = {'reason_name': 'rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr'})
# compare
self.assertFalse(form.is_valid())
| 28.276923 | 84 | 0.603373 |
28d1a99688a1772d6c9882b99bb9d0d9dc870924 | 6,109 | py | Python | torchbenchmark/models/BERT_pytorch/bert_pytorch/trainer/pretrain.py | Chillee/benchmark | 91e1b2871327e44b9b7d24d173ca93720fb6565b | [
"BSD-3-Clause"
] | 1 | 2021-07-30T08:47:09.000Z | 2021-07-30T08:47:09.000Z | torchbenchmark/models/BERT_pytorch/bert_pytorch/trainer/pretrain.py | Chillee/benchmark | 91e1b2871327e44b9b7d24d173ca93720fb6565b | [
"BSD-3-Clause"
] | null | null | null | torchbenchmark/models/BERT_pytorch/bert_pytorch/trainer/pretrain.py | Chillee/benchmark | 91e1b2871327e44b9b7d24d173ca93720fb6565b | [
"BSD-3-Clause"
] | 2 | 2020-07-27T21:48:20.000Z | 2020-07-30T16:57:02.000Z | import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data import DataLoader
from ..model import BERTLM, BERT
from .optim_schedule import ScheduledOptim
import tqdm
class BERTTrainer:
"""
BERTTrainer make the pretrained BERT model with two LM training method.
1. Masked Language Model : 3.3.1 Task #1: Masked LM
2. Next Sentence prediction : 3.3.2 Task #2: Next Sentence Prediction
please check the details on README.md with simple example.
"""
def __init__(self, bert: BERT, vocab_size: int,
train_dataloader: DataLoader, test_dataloader: DataLoader = None,
lr: float = 1e-4, betas=(0.9, 0.999), weight_decay: float = 0.01, warmup_steps=10000,
with_cuda: bool = True, cuda_devices=None, log_freq: int = 10, debug: str = None):
"""
:param bert: BERT model which you want to train
:param vocab_size: total word vocab size
:param train_dataloader: train dataset data loader
:param test_dataloader: test dataset data loader [can be None]
:param lr: learning rate of optimizer
:param betas: Adam optimizer betas
:param weight_decay: Adam optimizer weight decay param
:param with_cuda: traning with cuda
:param log_freq: logging frequency of the batch iteration
"""
# Setup cuda device for BERT training, argument -c, --cuda should be true
cuda_condition = torch.cuda.is_available() and with_cuda
self.device = torch.device("cuda:0" if cuda_condition else "cpu")
# This BERT model will be saved every epoch
self.bert = bert
# Initialize the BERT Language Model, with BERT model
self.model = BERTLM(bert, vocab_size).to(self.device)
# Distributed GPU training if CUDA can detect more than 1 GPU
if with_cuda and torch.cuda.device_count() > 1:
print("Using %d GPUS for BERT" % torch.cuda.device_count())
self.model = nn.DataParallel(self.model, device_ids=cuda_devices)
# Setting the train and test data loader
self.train_data = train_dataloader
self.test_data = test_dataloader
# Setting the Adam optimizer with hyper-param
self.optim = Adam(self.model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
self.optim_schedule = ScheduledOptim(self.optim, self.bert.hidden, n_warmup_steps=warmup_steps)
# Using Negative Log Likelihood Loss function for predicting the masked_token
self.criterion = nn.NLLLoss(ignore_index=0)
self.log_freq = log_freq
self.debug = debug
print("Total Parameters:", sum([p.nelement() for p in self.model.parameters()]))
def train(self, epoch):
self.iteration(epoch, self.train_data)
def test(self, epoch):
self.iteration(epoch, self.test_data, train=False)
def iteration(self, epoch, data_loader, train=True):
"""
loop over the data_loader for training or testing
if on train status, backward operation is activated
and also auto save the model every peoch
:param epoch: current epoch index
:param data_loader: torch.utils.data.DataLoader for iteration
:param train: boolean value of is train or test
:return: None
"""
str_code = "train" if train else "test"
# Setting the tqdm progress bar
data_iter = tqdm.tqdm(enumerate(data_loader),
desc="EP_%s:%d" % (str_code, epoch),
total=len(data_loader),
bar_format="{l_bar}{r_bar}")
avg_loss = 0.0
total_correct = 0
total_element = 0
for i, data in data_iter:
# 0. batch_data will be sent into the device(GPU or cpu)
data = {key: value.to(self.device) for key, value in data.items()}
# 1. forward the next_sentence_prediction and masked_lm model
next_sent_output, mask_lm_output = self.model.forward(data["bert_input"], data["segment_label"])
# 2-1. NLL(negative log likelihood) loss of is_next classification result
next_loss = self.criterion(next_sent_output, data["is_next"])
# 2-2. NLLLoss of predicting masked token word
mask_loss = self.criterion(mask_lm_output.transpose(1, 2), data["bert_label"])
# 2-3. Adding next_loss and mask_loss : 3.4 Pre-training Procedure
loss = next_loss + mask_loss
# 3. backward and optimization only in train
if train:
self.optim_schedule.zero_grad()
loss.backward()
self.optim_schedule.step_and_update_lr()
# next sentence prediction accuracy
correct = next_sent_output.argmax(dim=-1).eq(data["is_next"]).sum().item()
avg_loss += loss.item()
total_correct += correct
total_element += data["is_next"].nelement()
post_fix = {
"epoch": epoch,
"iter": i,
"avg_loss": avg_loss / (i + 1),
"avg_acc": total_correct / total_element * 100,
"loss": loss.item()
}
if i % self.log_freq == 0:
data_iter.write(str(post_fix))
if self.debug and epoch == 1 and i == 0:
torch.save(next_sent_output, self.debug)
print("EP%d_%s, avg_loss=" % (epoch, str_code), avg_loss / len(data_iter), "total_acc=",
total_correct * 100.0 / total_element)
def save(self, epoch, file_path="output/bert_trained.model"):
"""
Saving the current BERT model on file_path
:param epoch: current epoch number
:param file_path: model output path which gonna be file_path+"ep%d" % epoch
:return: final_output_path
"""
output_path = file_path + ".ep%d" % epoch
self.bert.to(self.device)
print("EP:%d Model Saved on:" % epoch, output_path)
return output_path
| 39.412903 | 108 | 0.62056 |
6e277314c57e29c1c8241f4c19b1225cf04458e7 | 32,257 | py | Python | bbfreeze/modulegraph/modulegraph.py | mccredie/bbfreeze | c74223318826ebd1a19aed15cba5641055ff0187 | [
"Zlib",
"MIT"
] | 33 | 2015-01-25T18:30:31.000Z | 2021-03-17T08:53:29.000Z | bbfreeze/modulegraph/modulegraph.py | mccredie/bbfreeze | c74223318826ebd1a19aed15cba5641055ff0187 | [
"Zlib",
"MIT"
] | 8 | 2015-10-30T07:07:41.000Z | 2017-06-30T16:55:40.000Z | bbfreeze/modulegraph/modulegraph.py | mccredie/bbfreeze | c74223318826ebd1a19aed15cba5641055ff0187 | [
"Zlib",
"MIT"
] | 17 | 2015-03-11T09:38:29.000Z | 2022-01-06T23:19:48.000Z | """
Find modules used by a script, using bytecode analysis.
Based on the stdlib modulefinder by Thomas Heller and Just van Rossum,
but uses a graph data structure and 2.3 features
"""
from pkg_resources import require
require("altgraph")
import dis
import imp
import marshal
import os
import sys
import new
import struct
import urllib
from itertools import ifilter, imap
from altgraph.Dot import Dot
from altgraph.ObjectGraph import ObjectGraph
from altgraph.GraphUtil import filter_stack
from altgraph.compat import *
READ_MODE = "U" # universal line endings
LOAD_CONST = chr(dis.opname.index('LOAD_CONST'))
IMPORT_NAME = chr(dis.opname.index('IMPORT_NAME'))
STORE_NAME = chr(dis.opname.index('STORE_NAME'))
STORE_GLOBAL = chr(dis.opname.index('STORE_GLOBAL'))
STORE_OPS = [STORE_NAME, STORE_GLOBAL]
HAVE_ARGUMENT = chr(dis.HAVE_ARGUMENT)
# Modulegraph does a good job at simulating Python's, but it can not
# handle packagepath modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
def moduleInfoForPath(path, suffixes=imp.get_suffixes()):
for (ext, readmode, typ) in imp.get_suffixes():
if path.endswith(ext):
return os.path.basename(path)[:-len(ext)], readmode, typ
return None
# A Public interface
def AddPackagePath(packagename, path):
paths = packagePathMap.get(packagename, [])
paths.append(path)
packagePathMap[packagename] = paths
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around the
# way the _xmlplus package injects itself under the name "xml" into
# sys.modules at runtime by calling ReplacePackage("_xmlplus", "xml")
# before running ModuleGraph.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Node(object):
def __init__(self, identifier):
self.graphident = identifier
self.identifier = identifier
self.namespace = {}
self.filename = None
self.packagepath = None
self.code = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = set()
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = set()
def __contains__(self, name):
return name in self.namespace
def __getitem__(self, name):
return self.namespace[name]
def __setitem__(self, name, value):
self.namespace[name] = value
def get(self, *args):
return self.namespace.get(*args)
def __cmp__(self, other):
return cmp(self.graphident, other.graphident)
def __hash__(self):
return hash(self.graphident)
def infoTuple(self):
return (self.identifier,)
def __repr__(self):
return '%s%r' % (type(self).__name__, self.infoTuple())
class Alias(str):
pass
class AliasNode(Node):
def __init__(self, name, node):
super(AliasNode, self).__init__(name)
for k in ['identifier', 'packagepath', 'namespace', 'globalnames', 'startimports']:
setattr(self, k, getattr(node, k, None))
def infoTuple(self):
return (self.graphident, self.identifier)
class BadModule(Node):
pass
class ExcludedModule(BadModule):
pass
class MissingModule(BadModule):
pass
class Script(Node):
def __init__(self, filename):
super(Script, self).__init__(filename)
self.filename = filename
def infoTuple(self):
return (self.filename,)
class BaseModule(Node):
def __init__(self, name, filename=None, path=None):
super(BaseModule, self).__init__(name)
self.filename = filename
self.packagepath = path
def infoTuple(self):
return tuple(filter(None, (self.identifier, self.filename, self.packagepath)))
class BuiltinModule(BaseModule):
pass
class SourceModule(BaseModule):
pass
class CompiledModule(BaseModule):
pass
class Package(BaseModule):
pass
class FlatPackage(BaseModule):
pass
class Extension(BaseModule):
pass
class NamespaceModule(BaseModule):
pass
class ModuleGraph(ObjectGraph):
def __init__(self, path=None, excludes=(), replace_paths=(), implies=(), graph=None, debug=0):
super(ModuleGraph, self).__init__(graph=graph, debug=debug)
if path is None:
path = sys.path
self.path = path
self.lazynodes = {}
# excludes is stronger than implies
self.lazynodes.update(dict(implies))
for m in excludes:
self.lazynodes[m] = None
self.replace_paths = replace_paths
def implyNodeReference(self, node, other):
"""
Imply that one node depends on another.
other may be a module name or another node.
For use by extension modules and tricky import code
"""
if not isinstance(other, Node):
if not isinstance(other, tuple):
other = (other, node)
others = self.import_hook(*other)
for other in others:
self.createReference(node, other)
elif isinstance(other, AliasNode):
self.addNode(other)
other.connectTo(node)
else:
self.createReference(node, other)
def createReference(self, fromnode, tonode, edge_data='direct'):
return super(ModuleGraph, self).createReference(fromnode, tonode, edge_data=edge_data)
def findNode(self, name):
"""
Find a node by identifier. If a node by that identifier exists,
it will be returned.
If a lazy node exists by that identifier with no dependencies (excluded),
it will be instantiated and returned.
If a lazy node exists by that identifier with dependencies, it and its
dependencies will be instantiated and scanned for additional dependencies.
"""
data = super(ModuleGraph, self).findNode(name)
if data is not None:
return data
if name in self.lazynodes:
deps = self.lazynodes.pop(name)
if deps is None:
# excluded module
m = self.createNode(ExcludedModule, name)
elif isinstance(deps, Alias):
other = self._safe_import_hook(deps, None, None).pop()
m = self.createNode(AliasNode, name, other)
self.implyNodeReference(m, other)
else:
m = self._safe_import_hook(name, None, None).pop()
for dep in deps:
self.implyNodeReference(m, dep)
return m
return None
def run_script(self, pathname, caller=None):
"""
Create a node by path (not module name). It is expected to be a Python
source file, and will be scanned for dependencies.
"""
self.msg(2, "run_script", pathname)
pathname = os.path.realpath(pathname)
m = self.findNode(pathname)
if m is not None:
return m
co = compile(file(pathname, READ_MODE).read()+'\n', pathname, 'exec')
if self.replace_paths:
co = self.replace_paths_in_code(co)
m = self.createNode(Script, pathname)
m.code = co
self.createReference(caller, m)
self.scan_code(co, m)
return m
def import_hook(self, name, caller=None, fromlist=None, level=-1):
"""
Import a module
"""
self.msg(3, "import_hook", name, caller, fromlist)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
modules = set([m])
if fromlist and m.packagepath:
modules.update(self.ensure_fromlist(m, fromlist))
for m in modules:
self.createReference(caller, m)
return modules
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.identifier
if level >= 1: # relative import
if caller.packagepath:
level -= 1
if level == 0:
parent = self.findNode(pname)
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError, "relative importpath too deep"
pname = ".".join(pname.split(".")[:-level])
parent = self.findNode(pname)
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.packagepath:
parent = self.findNode(pname)
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.findNode(pname)
if parent:
assert parent.identifier == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
"""
Given a calling parent package and an import name determine the containing
package for the name
"""
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
head, tail = name.split('.', 1)
else:
head, tail = name, ''
if parent:
qname = parent.identifier + '.' + head
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError, "No module named " + qname
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.identifier, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError, "No module named " + mname
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist):
fromlist = set(fromlist)
self.msg(4, "ensure_fromlist", m, fromlist)
if '*' in fromlist:
fromlist.update(self.find_all_submodules(m))
fromlist.remove('*')
for sub in fromlist:
submod = m.get(sub)
if submod is None:
fullname = m.identifier + '.' + sub
submod = self.import_module(sub, fullname, m)
if submod is None:
raise ImportError, "No module named " + fullname
yield submod
def find_all_submodules(self, m):
if not m.packagepath:
return
# 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = [triple[0] for triple in imp.get_suffixes()]
for path in m.packagepath:
try:
names = os.listdir(path)
except os.error:
self.msg(2, "can't list directory", path)
continue
for (path, mode, typ) in ifilter(None, imap(moduleInfoForPath, names)):
if path != '__init__':
yield path
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
m = self.findNode(fqname)
if m is not None:
self.msgout(3, "import_module ->", m)
if parent:
self.createReference(m, parent)
return m
if parent and parent.packagepath is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.packagepath, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
m = self.load_module(fqname, fp, pathname, stuff)
if parent:
self.createReference(m, parent)
parent[partname] = m
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, (suffix, mode, typ)):
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
packagepath = None
if typ == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if typ == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
cls = SourceModule
elif typ == imp.PY_COMPILED:
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError, "Bad magic number in %s" % pathname
fp.read(4)
co = marshal.load(fp)
cls = CompiledModule
elif typ == imp.C_BUILTIN:
cls = BuiltinModule
co = None
elif typ == NamespaceModule:
cls = NamespaceModule
co = None
packagepath = sys.modules[fqname].__path__
else:
cls = Extension
co = None
m = self.createNode(cls, fqname)
m.filename = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.code = co
self.scan_code(co, m)
if packagepath is not None:
m.packagepath = packagepath
self.msgout(2, "load_module ->", m)
return m
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
try:
mods = self.import_hook(name, caller, level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
m = self.createNode(MissingModule, name)
self.createReference(caller, m)
else:
assert len(mods) == 1
m = list(mods)[0]
subs = set([m])
for sub in (fromlist or ()):
# If this name is in the module namespace already,
# then add the entry to the list of substitutions
if sub in m:
sm = m[sub]
if sm is not None:
subs.add(sm)
self.createReference(caller, sm)
continue
# See if we can load it
fullname = name + '.' + sub
sm = self.findNode(fullname)
if sm is None:
try:
sm = self.import_hook(name, caller, [sub], level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
sm = self.createNode(MissingModule, fullname)
else:
sm = self.findNode(fullname)
m[sub] = sm
if sm is not None:
self.createReference(sm, m)
subs.add(sm)
return subs
def scan_opcodes(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Version for Python 2.4 and older
code = co.co_code
names = co.co_names
consts = co.co_consts
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if c == LOAD_CONST and code[3] == IMPORT_NAME:
oparg_1, oparg_2 = unpack('<xHxH', code[:6])
yield "import", (consts[oparg_1], names[oparg_2])
code = code[6:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_opcodes_25(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Python 2.5 version (has absolute and relative imports)
code = co.co_code
names = co.co_names
consts = co.co_consts
LOAD_LOAD_AND_IMPORT = LOAD_CONST + LOAD_CONST + IMPORT_NAME
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if code[:9:3] == LOAD_LOAD_AND_IMPORT:
oparg_1, oparg_2, oparg_3 = unpack('<xHxHxH', code[:9])
level = consts[oparg_1]
if level == -1: # normal import
yield "import", (consts[oparg_2], names[oparg_3])
elif level == 0: # absolute import
yield "absolute_import", (consts[oparg_2], names[oparg_3])
else: # relative import
yield "relative_import", (level, consts[oparg_2], names[oparg_3])
code = code[9:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_code(self, co, m):
code = co.co_code
if sys.version_info >= (2, 5):
scanner = self.scan_opcodes_25
else:
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames.add(name)
elif what in ("import", "absolute_import"):
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
if what == "absolute_import": level = 0
else: level = -1
self._safe_import_hook(name, m, fromlist, level=level)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.packagepath:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.findNode(m.identifier+ "." + name)
if mm is None:
mm = self.findNode(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.code is None:
m.starimports.add(name)
else:
m.starimports.add(name)
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.identifier, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.createNode(Package, fqname)
m.filename = pathname
# As per comment at top of file, simulate runtime packagepath additions.
additions = packagePathMap.get(fqname, [])
if pathname in additions:
m.packagepath = additions
else:
m.packagepath = [pathname]+additions
fp, buf, stuff = self.find_module("__init__", m.packagepath)
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.identifier+'.'+name
else:
fullname = name
node = self.findNode(fullname)
if node is not None:
self.msgout(3, "find_module -> already included?", node)
raise ImportError, name
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
try:
fp, buf, stuff = imp.find_module(name, path)
except ImportError:
# pip installed namespace packages without a __init__
m = sys.modules.get(fullname)
if m is None or getattr(m, "__file__", None) or not getattr(m, "__path__", None):
raise
return (None, None, ("", "", NamespaceModule))
if buf:
buf = os.path.realpath(buf)
return (fp, buf, stuff)
def create_xref(self, out=None):
if out is None:
out = sys.stdout
scripts = []
mods = []
for mod in self.flatten():
name = os.path.basename(mod.identifier)
if isinstance(mod, Script):
scripts.append((name, mod))
else:
mods.append((name, mod))
scripts.sort()
mods.sort()
scriptnames = [name for name, m in scripts]
scripts.extend(mods)
mods = scripts
title = "modulegraph cross reference for " + ', '.join(scriptnames)
print >>out, """<html><head><title>%s</title></head>
<body><h1>%s</h1>""" % (title, title)
def sorted_namelist(mods):
lst = [os.path.basename(mod.identifier) for mod in mods if mod]
lst.sort()
return lst
for name, m in mods:
if isinstance(m, BuiltinModule):
print >>out, """<a name="%s" /><tt>%s</tt>
<i>(builtin module)</i> <br />""" % (name, name)
elif isinstance(m, Extension):
print >>out, """<a name="%s" /><tt>%s</tt> <tt>%s</tt></a>
<br />""" % (name, name, m.filename)
else:
url = urllib.pathname2url(m.filename or "")
print >>out, """<a name="%s" />
<a target="code" href="%s" type="text/plain"><tt>%s</tt></a>
<br />""" % (name, url, name)
oute, ince = map(sorted_namelist, self.get_edges(m))
if oute:
print >>out, 'imports:'
for n in oute:
print >>out, """<a href="#%s">%s</a>""" % (n, n)
print >>out, '<br />'
if ince:
print >>out, 'imported by:'
for n in ince:
print >>out, """<a href="#%s">%s</a>""" % (n, n)
print >>out, '<br />'
print >>out, '<br/>'
print >>out, '</body></html>'
def itergraphreport(self, name='G', flatpackages=()):
nodes = map(self.graph.describe_node, self.graph.iterdfs(self))
describe_edge = self.graph.describe_edge
edges = deque()
packagenodes = set()
packageidents = {}
nodetoident = {}
inpackages = {}
mainedges = set()
# XXX - implement
flatpackages = dict(flatpackages)
def nodevisitor(node, data, outgoing, incoming):
if not isinstance(data, Node):
return {'label': str(node)}
#if isinstance(d, (ExcludedModule, MissingModule, BadModule)):
# return None
s = '<f0> ' + type(data).__name__
for i,v in izip(count(1), data.infoTuple()[:1]):
s += '| <f%d> %s' % (i,v)
return {'label':s, 'shape':'record'}
def edgevisitor(edge, data, head, tail):
if data == 'orphan':
return {'style':'dashed'}
elif data == 'pkgref':
return {'style':'dotted'}
return {}
yield 'digraph %s {\n' % (name,)
attr = dict(rankdir='LR', concentrate='true')
cpatt = '%s="%s"'
for item in attr.iteritems():
yield '\t%s;\n' % (cpatt % item,)
# find all packages (subgraphs)
for (node, data, outgoing, incoming) in nodes:
nodetoident[node] = getattr(data, 'identifier', None)
if isinstance(data, Package):
packageidents[data.identifier] = node
inpackages[node] = set([node])
packagenodes.add(node)
# create sets for subgraph, write out descriptions
for (node, data, outgoing, incoming) in nodes:
# update edges
for edge in imap(describe_edge, outgoing):
edges.append(edge)
# describe node
yield '\t"%s" [%s];\n' % (
node,
','.join([
(cpatt % item) for item in
nodevisitor(node, data, outgoing, incoming).iteritems()
]),
)
inside = inpackages.get(node)
if inside is None:
inside = inpackages[node] = set()
ident = nodetoident[node]
if ident is None:
continue
pkgnode = packageidents.get(ident[:ident.rfind('.')])
if pkgnode is not None:
inside.add(pkgnode)
graph = []
subgraphs = {}
for key in packagenodes:
subgraphs[key] = []
while edges:
edge, data, head, tail = edges.popleft()
if ((head, tail)) in mainedges:
continue
mainedges.add((head, tail))
tailpkgs = inpackages[tail]
common = inpackages[head] & tailpkgs
if not common and tailpkgs:
usepkgs = sorted(tailpkgs)
if len(usepkgs) != 1 or usepkgs[0] != tail:
edges.append((edge, data, head, usepkgs[0]))
edges.append((edge, 'pkgref', usepkgs[-1], tail))
continue
if common:
common = common.pop()
if tail == common:
edges.append((edge, data, tail, head))
elif head == common:
subgraphs[common].append((edge, 'pkgref', head, tail))
else:
edges.append((edge, data, common, head))
edges.append((edge, data, common, tail))
else:
graph.append((edge, data, head, tail))
def do_graph(edges, tabs):
edgestr = tabs + '"%s" -> "%s" [%s];\n'
# describe edge
for (edge, data, head, tail) in edges:
attribs = edgevisitor(edge, data, head, tail)
yield edgestr % (
head,
tail,
','.join([(cpatt % item) for item in attribs.iteritems()]),
)
for g, edges in subgraphs.iteritems():
yield '\tsubgraph "cluster_%s" {\n' % (g,)
yield '\t\tlabel="%s";\n' % (nodetoident[g],)
for s in do_graph(edges, '\t\t'):
yield s
yield '\t}\n'
for s in do_graph(graph, '\t'):
yield s
yield '}\n'
def graphreport(self, fileobj=None, flatpackages=()):
if fileobj is None:
fileobj = sys.stdout
fileobj.writelines(self.itergraphreport(flatpackages=flatpackages))
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print
print "%-15s %-25s %s" % ("Class", "Name", "File")
print "%-15s %-25s %s" % ("----", "----", "----")
# Print modules found
sorted = [(os.path.basename(mod.identifier), mod) for mod in self.flatten()]
sorted.sort()
for (name, m) in sorted:
print "%-15s %-25s %s" % (type(m).__name__, name, m.filename or "")
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
f = os.path.join(f, '')
r = os.path.join(r, '')
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return new.code(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def main():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dgmp:qx:")
except getopt.error, msg:
print msg
return
# Process options
debug = 1
domods = 0
dodot = False
addpath = []
excludes = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
excludes.append(a)
if o == '-g':
dodot = True
# Provide default arguments
if not args:
script = __file__
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print "path:"
for item in path:
print " ", repr(item)
# Create the module finder and turn its crank
mf = ModuleGraph(path, excludes=excludes, debug=debug)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.run_script(arg)
mf.run_script(script)
if dodot:
mf.graphreport()
else:
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = main()
except KeyboardInterrupt:
print "\n[interrupt]"
| 34.759698 | 98 | 0.533435 |
742b5246be64f9761939aefc4d01a265a26d2551 | 437 | py | Python | solowpy/__init__.py | davidrpugh/solowPy | 91577e04481cec80679ae571ec2bdaa5788151b4 | [
"MIT"
] | 31 | 2016-02-29T00:20:53.000Z | 2022-01-26T17:40:38.000Z | solowpy/__init__.py | rfonsek/solowPy | 91577e04481cec80679ae571ec2bdaa5788151b4 | [
"MIT"
] | 11 | 2015-04-04T20:01:35.000Z | 2017-02-20T05:42:49.000Z | solowpy/__init__.py | rfonsek/solowPy | 91577e04481cec80679ae571ec2bdaa5788151b4 | [
"MIT"
] | 20 | 2015-08-23T23:42:09.000Z | 2022-02-23T08:00:53.000Z | """
models directory imports
objects imported here will live in the `solowpy` namespace
"""
__all__ = ['model', 'Model', 'CobbDouglasModel', 'CESModel']
from . model import Model
from . import model
from . cobb_douglas import CobbDouglasModel
from . import cobb_douglas
from . ces import CESModel
from . import ces
# Add Version Attribute
from pkg_resources import get_distribution
__version__ = get_distribution('solowPy').version
| 21.85 | 60 | 0.775744 |
e9d8c071af79946867b0b3b63fd607b47bbe25d0 | 730 | py | Python | adapters/rgb_adapter.py | russdan/domoticz-zigbee2mqtt-plugin | d47895eab44bc87fc19ce151698d2afe9554fadc | [
"MIT"
] | 146 | 2018-09-19T11:38:48.000Z | 2022-03-21T11:54:12.000Z | adapters/rgb_adapter.py | russdan/domoticz-zigbee2mqtt-plugin | d47895eab44bc87fc19ce151698d2afe9554fadc | [
"MIT"
] | 783 | 2018-09-28T17:07:14.000Z | 2022-03-31T10:18:27.000Z | adapters/rgb_adapter.py | russdan/domoticz-zigbee2mqtt-plugin | d47895eab44bc87fc19ce151698d2afe9554fadc | [
"MIT"
] | 147 | 2018-09-25T18:39:51.000Z | 2022-03-01T19:31:27.000Z | from adapters.base_adapter import Adapter
from adapters.generic.mixins.rgb import RGBMixin
from devices.rgb_light import RGBLight
class RGBAdapter(Adapter, RGBMixin):
def __init__(self):
super().__init__()
self.dimmer = RGBLight('light', 'state_brightness_color')
self.devices.append(self.dimmer)
def convert_message(self, message):
message = super().convert_message(message)
if 'color_temp' in message.raw:
message.raw['color_temp'] = int(message.raw['color_temp'] * 255 / 500)
return message
def handle_command(self, alias, device, command, level, color):
topic = self.name + '/set'
return self.set_color(topic, command, level, color) | 33.181818 | 82 | 0.682192 |
ba85d0a93156c015345cddd54aff119481f6b0b8 | 6,157 | py | Python | powerspectrum_interface/pyhmx/hmx.py | tilmantroester/pyhmcode | 0c10c81b86de308f2c6af108b7d5691751889da2 | [
"MIT"
] | 2 | 2021-09-16T07:18:43.000Z | 2022-02-24T13:31:14.000Z | powerspectrum_interface/pyhmx/hmx.py | tilmantroester/pyhmcode | 0c10c81b86de308f2c6af108b7d5691751889da2 | [
"MIT"
] | 1 | 2021-07-28T11:53:09.000Z | 2021-07-28T12:47:25.000Z | powerspectrum_interface/pyhmx/hmx.py | tilmantroester/pyhmcode | 0c10c81b86de308f2c6af108b7d5691751889da2 | [
"MIT"
] | null | null | null | import os
import ctypes as ct
import numpy as np
def _array_ctype(ndim, dtype=np.float64, flags="F"):
return [ct.POINTER(ct.c_int)]*ndim + [np.ctypeslib.ndpointer(ndim=ndim, dtype=dtype, flags=flags)]
def _array_arg(a):
arr = a
return (*(ct.c_int(s) for s in arr.shape), arr)
def _load_lib(libname, path=None):
if path is None:
path = os.path.dirname(__file__)
libpath = os.path.abspath(os.path.join(path, libname))
return ct.CDLL(libpath)
_hmx_lib = _load_lib("libhmx_wrapper.so")
class HMxConstants:
def __init__(self, lib):
for c in ["HMCode2016", "HMCode2016_CAMB", "HMCode2020",
"HMx2020_matter_with_temperature_scaling", "HMx2020_matter_pressure_with_temperature_scaling",
"field_dmonly", "field_matter", "field_cdm", "field_gas", "field_stars", "field_electron_pressure"]:
setattr(self, c, ct.c_int.in_dll(lib, f"constant_{c.lower()}").value)
constants = HMxConstants(_hmx_lib)
class HMx:
module_name = "HMx_wrapper"
def __init__(self):
self.lib = _hmx_lib
def run_HMCode(self, cosmology=None, halo_model=None,
k=None, z=None,
pk_lin=None,
verbose=False):
cosmology = cosmology or {}
halo_model = halo_model or {}
pofk = self._run_hmx(cosmology.get("Omega_m"), cosmology.get("Omega_b"), cosmology.get("Omega_v"),
cosmology.get("h"), cosmology.get("n_s"), cosmology.get("sigma_8"),
cosmology.get("m_nu", 0.06), cosmology.get("w", -1.0), cosmology.get("w_a", 0.0),
halo_model_mode=constants.HMCode2016, Theat=10**7.8,
eta0=halo_model.get("eta0", 0.603), As=halo_model.get("A", 3.13),
fields=np.array([constants.field_dmonly]),
k=k, z=z,
pk_lin=pk_lin,
verbose=verbose)
return pofk[0,0]
def run_HMx(self, cosmology=None, halo_model=None, fields=None,
mode=constants.HMx2020_matter_with_temperature_scaling,
k=None, z=None, pk_lin=None,
verbose=False):
cosmology = cosmology or {}
halo_model = halo_model or {}
fields = fields or [constants.field_matter]
fields = np.array(fields)
pofk = self._run_hmx(cosmology.get("Omega_m"), cosmology.get("Omega_b"), cosmology.get("Omega_v"),
cosmology.get("h"), cosmology.get("n_s"), cosmology.get("sigma_8"),
cosmology.get("m_nu", 0.06), cosmology.get("w", -1.0), cosmology.get("w_a", 0.0),
halo_model_mode=mode, Theat=halo_model.get("Theat", 10**7.8),
eta0=0.603, As=3.13,
fields=fields,
k=k, z=z,
pk_lin=pk_lin,
verbose=verbose)
return pofk
def get_function(self, name, c_bind=True):
if c_bind:
return getattr(self.lib, name)
else:
return getattr(self.lib, f"__{self.module_name}_MOD_{name}")
def _run_hmx(self, omm, omb, omv, h, ns, sigma8, mnu, w, wa,
halo_model_mode, Theat, eta0, As,
fields=None,
k=None, z=None,
pk_lin=None,
verbose=True):
f = self.get_function("run_HMx")
f.restype = ct.c_int
f.argtypes = [ct.POINTER(ct.c_double), # omm
ct.POINTER(ct.c_double), # omb
ct.POINTER(ct.c_double), # omv
ct.POINTER(ct.c_double), # mnu
ct.POINTER(ct.c_double), # h
ct.POINTER(ct.c_double), # ns
ct.POINTER(ct.c_double), # sigma8
ct.POINTER(ct.c_double), # w
ct.POINTER(ct.c_double), # wa
ct.POINTER(ct.c_int), # halo_model_mode
ct.POINTER(ct.c_double), # Theat
ct.POINTER(ct.c_double), # eta0
ct.POINTER(ct.c_double), # As
*_array_ctype(ndim=1, dtype=np.int32), # fields
*_array_ctype(ndim=1, dtype=np.float64), # k
*_array_ctype(ndim=1, dtype=np.float64), # a
*_array_ctype(ndim=2, dtype=np.float64), # Pk_lin
*_array_ctype(ndim=4, dtype=np.float64), # Pk_hmx
ct.POINTER(ct.c_bool), # verbose
]
Pk_hmx = np.zeros((len(fields), len(fields), len(k), len(z)), dtype=np.float64, order="F")
if k is None or z is None or pk_lin is None:
raise ValueError("k, z, and pk_lin need to be specified.")
if (len(z), len(k)) != pk_lin.shape:
raise ValueError("Shape of pk_lin does not match z and k arrays.")
if len(z) > 1 and z[0] > z[1]:
raise ValueError("Redshift needs to be increasing.")
a = 1/(1+np.array(z))
status = f(ct.c_double(omm), ct.c_double(omb), ct.c_double(omv), ct.c_double(mnu),
ct.c_double(h), ct.c_double(ns), ct.c_double(sigma8), ct.c_double(w), ct.c_double(wa),
ct.c_int(halo_model_mode), ct.c_double(Theat), ct.c_double(eta0), ct.c_double(As),
*_array_arg(np.ascontiguousarray(fields, dtype=np.int32)),
*_array_arg(np.ascontiguousarray(k, dtype=np.float64)),
*_array_arg(np.ascontiguousarray(a[::-1], dtype=np.float64)), # Reverse order for HMx
*_array_arg(np.asfortranarray(pk_lin[::-1].T, dtype=np.float64)), # Reverse order and transpose to (k, z) for HMx
*_array_arg(Pk_hmx),
ct.c_bool(verbose)
)
if status != 0:
raise RuntimeError("HMx failed.")
# Restore CAMB order
return np.swapaxes(Pk_hmx[...,::-1], 2, 3)
| 44.941606 | 124 | 0.529641 |
31d746d6db97ef618ceb59557ec4d5b91ac99494 | 2,009 | py | Python | conflowgen/api/truck_arrival_distribution_manager.py | 1kastner/conflowgen | 02f242517f1377ce45685099bf3196578321751a | [
"MIT"
] | 5 | 2022-02-16T11:44:42.000Z | 2022-02-24T20:02:17.000Z | conflowgen/api/truck_arrival_distribution_manager.py | 1kastner/conflowgen | 02f242517f1377ce45685099bf3196578321751a | [
"MIT"
] | 90 | 2021-12-08T14:05:44.000Z | 2022-03-24T08:53:31.000Z | conflowgen/api/truck_arrival_distribution_manager.py | 1kastner/conflowgen | 02f242517f1377ce45685099bf3196578321751a | [
"MIT"
] | 5 | 2021-12-07T16:05:15.000Z | 2022-02-16T08:24:07.000Z | from typing import Dict
from conflowgen.api import AbstractDistributionManager
from conflowgen.domain_models.distribution_repositories.truck_arrival_distribution_repository import \
TruckArrivalDistributionRepository
class TruckArrivalDistributionManager(AbstractDistributionManager):
"""
This manager provides the interface to set and get the weekly arrival rates of trucks. When the truck arrival time
is drawn from this distribution, first a slice for the minimum and maximum dwell time is created and the arrival
time of the truck is drawn from that period.
All other vehicles are created based on the schedule they adhere to with the help of the
:class:`.PortCallManager`
"""
def __init__(self):
self.truck_arrival_distribution_repository = TruckArrivalDistributionRepository()
def get_truck_arrival_distribution(self) -> Dict[int, float]:
"""
Each key represents the hour in the week and each value represents the
probability of a truck to arrive between that hour and the start of the next time slot (the successor
is the nearest key larger than the current key).
Returns:
The truck arrival distribution.
"""
return self.truck_arrival_distribution_repository.get_distribution()
def set_truck_arrival_distribution(self, distribution: Dict[int, float]) -> None:
"""
Args:
distribution: The truck arrival distribution.
Each key represents the hour in the week and each value represents the
probability of a truck to arrive between that hour and the start of the next time slot (the successor is
the nearest key larger than the current key).
"""
sanitized_distribution = self._normalize_and_validate_distribution_without_dependent_variables(
distribution,
int
)
self.truck_arrival_distribution_repository.set_distribution(sanitized_distribution)
| 44.644444 | 120 | 0.726232 |
7a8881b50edf122022ff77e68ce51de385727175 | 873 | py | Python | tests/test_fixtures.py | hdsr-mid/path_finder | 4d74b07501d3676e6aabfccd7045ace48aa4a4cf | [
"MIT"
] | null | null | null | tests/test_fixtures.py | hdsr-mid/path_finder | 4d74b07501d3676e6aabfccd7045ace48aa4a4cf | [
"MIT"
] | null | null | null | tests/test_fixtures.py | hdsr-mid/path_finder | 4d74b07501d3676e6aabfccd7045ace48aa4a4cf | [
"MIT"
] | null | null | null | from tests.fixtures import temp_tree_structure1
# silence flake8
temp_tree_structure1 = temp_tree_structure1
def test_temp_tree_structure(temp_tree_structure1):
"""Ensure a certain dir+file structure for tests that use temp_tree_structure."""
glob_pattern = "*.txt"
assert len(list(temp_tree_structure1.glob(f"{glob_pattern}"))) == 1
assert len(list(temp_tree_structure1.rglob(f"{glob_pattern}"))) == 3 # recursively
assert len(list(temp_tree_structure1.glob(f"**/{glob_pattern}"))) == 3
assert len(list(temp_tree_structure1.glob(f"**/**/{glob_pattern}"))) == 3 # same as above..
glob_pattern = "*.jpg"
assert len(list(temp_tree_structure1.glob(f"{glob_pattern}"))) == 0
assert len(list(temp_tree_structure1.rglob(f"{glob_pattern}"))) == 2 # recursively
assert len(list(temp_tree_structure1.glob(f"**/{glob_pattern}"))) == 2
| 43.65 | 96 | 0.720504 |
86648a324a1001ebcc9d595dff3f38cfd0abee17 | 12,182 | py | Python | BatchProcesses/merge_xrfs_into_ana_v7_2frame_from_J_ana.py | johnmgregoire/JCAPDataProcess | c8120e5b2f8fc840a6307b40293dccaf94bd8c2c | [
"BSD-3-Clause"
] | 5 | 2017-03-24T21:05:22.000Z | 2021-09-15T18:18:05.000Z | BatchProcesses/merge_xrfs_into_ana_v7_2frame_from_J_ana.py | johnmgregoire/JCAPDataProcess | c8120e5b2f8fc840a6307b40293dccaf94bd8c2c | [
"BSD-3-Clause"
] | null | null | null | BatchProcesses/merge_xrfs_into_ana_v7_2frame_from_J_ana.py | johnmgregoire/JCAPDataProcess | c8120e5b2f8fc840a6307b40293dccaf94bd8c2c | [
"BSD-3-Clause"
] | null | null | null | import numpy, copy, operator
if __name__ == "__main__":
import os, sys
#Needed for running line-by-line
#__file__=r'D:\Google Drive\Documents\PythonCode\JCAP\JCAPDataProcess\BatchProcesses\merge_xrfs_into_ana_v7_2frame_from_J_ana.py'
sys.path.append(os.path.split(os.path.split(os.path.realpath(__file__))[0])[0])
sys.path.append(os.path.join(os.path.split(os.path.split(os.path.realpath(__file__))[0])[0], 'AuxPrograms'))
sys.path.append(os.path.join(os.path.split(os.path.split(os.path.realpath(__file__))[0])[0], 'AnalysisFunctions'))
#import matplotlib.pyplot as plt
from fcns_io import *
from fcns_ui import *
from CalcFOMApp import calcfomDialog
from Analysis_Master import Analysis_Master_nointer
from create_udi_standalone import append_udi_to_ana, append_resampled_merged_patterns_to_ana, smoothfcn
analysismasterclass=Analysis_Master_nointer()
processed_patterns=True
include_1st_frame_solo=False
merge_first=True
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True):#, TreeWidg):
super(MainMenu, self).__init__(None)
self.calcui=calcfomDialog(self, title='Calculate FOM from EXP', guimode=False, modifyanainplace=False)
mainapp=QApplication(sys.argv)
form=MainMenu(None)
calcui=form.calcui
calcui.getplatemapCheckBox.setChecked(True)
serial_list='39248,39259,43760,39282,39349,35570,35873,35895,35479,32173,32061,32230,35558,35569,13891,48440,50296,22442,27829,35806,48473,48428,27818,35828,35503,35457,35794,27795,31071,31116'.split(',')
single_frame_pids=['3589','3557']
#plate_list='1389,4847,5037,5035,5036'.split(',')
plate_list=[s[:-1] for s in serial_list]
#plate_list=plate_list[plate_list.index('4847'):]
#plate_list=['3557']
#plate_list=plate_list[1:]
for pid in plate_list:
print 'STARTING ',pid
d=importinfo(pid)
if 0:
els='-'.join([el for el in getelements_plateidstr(pid) if not el in ['Ar']])
els+='/Pt' if True in ['Pt' in pd['elements'] for pd in d['prints'].values()] else ('/'+d['substrate'])
print d['serial_no'],'\t',els
if not 'analyses' in d:
continue
l=[]
for k,ad in d['analyses'].items():
if ad['type']=='xrds':
l+=[(float(os.path.split(ad['path'])[1][:15]),ad['path'])]
if len(l)==0: continue
# if pid=='3587':
# most_recent_xrds=sorted(l)[-2][1]
# else:
most_recent_xrds=sorted(l)[-1][1]#If phase mapping or othr analysis done for this plate then most recent probably isn't the desired one so TODO could be to check for the
l=[]
for k,ad in d['analyses'].items():
if ad['type']=='xrfs':
l+=[(float(os.path.split(ad['path'])[1][:15]),ad['path'])]
if len(l)==0: continue
most_recent_xrfs=sorted(l)[-1][1]
print most_recent_xrfs
p=buildanapath(most_recent_xrds)
#break#TEMP
#import to create tmep folder and delete anything past ana__4, which are the 4 ana created during external import
calcui.importana(p=p)
anakeys=sort_dict_keys_by_counter(calcui.anadict, keystartswith='ana__')
for anak in anakeys[4:][::-1]:
calcui.clearsingleanalysis(anak=anak)
#if ana__2 has no fom csv, make one
anak='ana__2'
if not ('files_multi_run' in calcui.anadict[anak].keys() and 'fom_files' in calcui.anadict[anak]['files_multi_run'].keys()):
calcui.create_default_fom_csv_from_runfiles(anak)
#import xrfs and merge with ana__2 to create ana__5
calcui.importauxexpana(buildanapath(most_recent_xrfs), exp=False)
for i in range(1, int(calcui.FOMProcessNamesComboBox.count())):
if (str(calcui.FOMProcessNamesComboBox.itemText(i)).partition('(')[0])=='Analysis__FOM_Interp_Merge_Ana':
calcui.FOMProcessNamesComboBox.setCurrentIndex(i)
calcui.getactiveanalysisclass()
calcui.processeditedparams()
break
#calcui.exec_()
c=calcui.analysisclass
c.params['select_ana']='ana__2'
c.params['select_aux_keys']='AtFrac'
c.params['aux_ana_ints']='2'
c.params['interp_is_comp']=1
c.processnewparams(calcFOMDialogclass=calcui, recalc_filedlist=True)
tempnum=len(sort_dict_keys_by_counter(calcui.anadict, keystartswith='ana__'))
calcui.analyzedata()
anakeys=sort_dict_keys_by_counter(calcui.anadict, keystartswith='ana__')
if len(anakeys)==tempnum:
print '***; %s; %s' %(buildanapath(most_recent_xrfs), pid)
continue
#calcui.exec_()#WILL STOP HERE IF ERROR IN XRFS MERGE
xrfsmergedanak=anakeys[-1]
#continue#this skips all file writing until the xrfs ana are fixed
newanasavefolder=calcui.saveana(dontclearyet=False, anatype='xrds', rundone='.run')
newanapath=buildanapath(newanasavefolder)
#now have core ana saved as .run and modify in place
num_ana_blocks=len(anakeys)
if pid in single_frame_pids:
#first create separate udi for the processed 1st frame - ana__6
q_key='q.nm_processed'
intensity_key='intensity.counts_processed'
anak_patterns='ana__1'
pattern_fn_search_str='1st_frame'
append_udi_to_ana(l_anapath=[newanapath], l_anak_comps=[xrfsmergedanak], l_anak_patterns=[anak_patterns], pattern_fn_search_str=pattern_fn_search_str, pattern_key='pattern_files', compkeys='AtFrac', q_key=q_key,intensity_key=intensity_key)
num_ana_blocks+=1
#first create separate udi for the RAW 1st frame - ana__7
q_key='q.nm'
intensity_key='intensity.counts'
anak_patterns='ana__2'
pattern_fn_search_str='1st_frame'
append_udi_to_ana(l_anapath=[newanapath], l_anak_comps=[xrfsmergedanak], l_anak_patterns=[anak_patterns], pattern_fn_search_str=pattern_fn_search_str, pattern_key='pattern_files', compkeys='AtFrac', q_key=q_key,intensity_key=intensity_key)
num_ana_blocks+=1
#log resam merge ana__1, which is bcknd-sub data, and then append udi - ana__8 and 9
q_key='q.nm_processed'
intensity_key='intensity.counts_processed'
append_resampled_merged_patterns_to_ana(l_anapath=[newanapath], l_anak_patterns=['ana__1'], l_pattern_fn_search_str=['1st_frame'], pattern_key='pattern_files', q_key=q_key,intensity_key=intensity_key, dq=None, q_log_space_coef=1.00235198, resamp_interp_order=3, pre_resamp_smooth_fcn=smoothfcn)
num_ana_blocks+=1
q_key='q.nm_processed_resampled'
intensity_key='intensity.counts_processed_resampled'
anak_patterns='ana__%d' %(num_ana_blocks)
append_udi_to_ana(l_anapath=[newanapath], l_anak_comps=[xrfsmergedanak], l_anak_patterns=[anak_patterns], pattern_key='pattern_files', compkeys='AtFrac', q_key=q_key,intensity_key=intensity_key)
num_ana_blocks+=1
#log resam merge ana__2, which is raw, and then append udi - ana__10 and 11
q_key='q.nm'
intensity_key='intensity.counts'
append_resampled_merged_patterns_to_ana(l_anapath=[newanapath], l_anak_patterns=['ana__2'], l_pattern_fn_search_str=['1st_frame'], pattern_key='pattern_files', q_key=q_key,intensity_key=intensity_key, dq=None, q_log_space_coef=1.00235198, resamp_interp_order=3, pre_resamp_smooth_fcn=smoothfcn)
num_ana_blocks+=1
q_key='q.nm_resampled'
intensity_key='intensity.counts_resampled'
anak_patterns='ana__%d' %(num_ana_blocks)
append_udi_to_ana(l_anapath=[newanapath], l_anak_comps=[xrfsmergedanak], l_anak_patterns=[anak_patterns], pattern_key='pattern_files', compkeys='AtFrac', q_key=q_key,intensity_key=intensity_key)
num_ana_blocks+=1
else:
#first create separate udi for the RAW 1st and 2nd frame - ana__6 and 7
q_key='q.nm'
intensity_key='intensity.counts'
anak_patterns='ana__2'
pattern_fn_search_str='1st_frame'
append_udi_to_ana(l_anapath=[newanapath], l_anak_comps=[xrfsmergedanak], l_anak_patterns=[anak_patterns], pattern_fn_search_str=pattern_fn_search_str, pattern_key='pattern_files', compkeys='AtFrac', q_key=q_key,intensity_key=intensity_key)
num_ana_blocks+=1
pattern_fn_search_str='2nd_frame'
append_udi_to_ana(l_anapath=[newanapath], l_anak_comps=[xrfsmergedanak], l_anak_patterns=[anak_patterns], pattern_fn_search_str=pattern_fn_search_str, pattern_key='pattern_files', compkeys='AtFrac', q_key=q_key,intensity_key=intensity_key)
num_ana_blocks+=1
#lin resam merge ana__1, which is bcknd-sub data, and then append udi - ana__8 and 9
q_key='q.nm_processed'
intensity_key='intensity.counts_processed'
append_resampled_merged_patterns_to_ana(l_anapath=[newanapath,newanapath], l_anak_patterns=['ana__1', 'ana__1'], l_pattern_fn_search_str=['1st_frame', '2nd_frame'], pattern_key='pattern_files', q_key=q_key,intensity_key=intensity_key, dq=None, q_log_space_coef=None, resamp_interp_order=3, pre_resamp_smooth_fcn=smoothfcn)
num_ana_blocks+=1
q_key='q.nm_processed_resampled'
intensity_key='intensity.counts_processed_resampled'
anak_patterns='ana__%d' %(num_ana_blocks)
append_udi_to_ana(l_anapath=[newanapath], l_anak_comps=[xrfsmergedanak], l_anak_patterns=[anak_patterns], pattern_key='pattern_files', compkeys='AtFrac', q_key=q_key,intensity_key=intensity_key)
num_ana_blocks+=1
#log resam merge ana__1, which is bcknd-sub data, and then append udi - ana__10 and 11
q_key='q.nm_processed'
intensity_key='intensity.counts_processed'
append_resampled_merged_patterns_to_ana(l_anapath=[newanapath,newanapath], l_anak_patterns=['ana__1', 'ana__1'], l_pattern_fn_search_str=['1st_frame', '2nd_frame'], pattern_key='pattern_files', q_key=q_key,intensity_key=intensity_key, dq=None, q_log_space_coef=1.00235198, resamp_interp_order=3, pre_resamp_smooth_fcn=smoothfcn)
num_ana_blocks+=1
q_key='q.nm_processed_resampled'
intensity_key='intensity.counts_processed_resampled'
anak_patterns='ana__%d' %(num_ana_blocks)
append_udi_to_ana(l_anapath=[newanapath], l_anak_comps=[xrfsmergedanak], l_anak_patterns=[anak_patterns], pattern_key='pattern_files', compkeys='AtFrac', q_key=q_key,intensity_key=intensity_key)
num_ana_blocks+=1
#lin resam merge ana__2, which is raw, and then append udi - ana__12 and 13
q_key='q.nm'
intensity_key='intensity.counts'
append_resampled_merged_patterns_to_ana(l_anapath=[newanapath,newanapath], l_anak_patterns=['ana__2', 'ana__2'], l_pattern_fn_search_str=['1st_frame', '2nd_frame'], pattern_key='pattern_files', q_key=q_key,intensity_key=intensity_key, dq=None, q_log_space_coef=None, resamp_interp_order=3, pre_resamp_smooth_fcn=smoothfcn)
num_ana_blocks+=1
q_key='q.nm_resampled'
intensity_key='intensity.counts_resampled'
anak_patterns='ana__%d' %(num_ana_blocks)
append_udi_to_ana(l_anapath=[newanapath], l_anak_comps=[xrfsmergedanak], l_anak_patterns=[anak_patterns], pattern_key='pattern_files', compkeys='AtFrac', q_key=q_key,intensity_key=intensity_key)
num_ana_blocks+=1
#log resam merge ana__2, which is raw, and then append udi - ana__14 and 15
q_key='q.nm'
intensity_key='intensity.counts'
append_resampled_merged_patterns_to_ana(l_anapath=[newanapath,newanapath], l_anak_patterns=['ana__2', 'ana__2'], l_pattern_fn_search_str=['1st_frame', '2nd_frame'], pattern_key='pattern_files', q_key=q_key,intensity_key=intensity_key, dq=None, q_log_space_coef=1.00235198, resamp_interp_order=3, pre_resamp_smooth_fcn=smoothfcn)
num_ana_blocks+=1
q_key='q.nm_resampled'
intensity_key='intensity.counts_resampled'
anak_patterns='ana__%d' %(num_ana_blocks)
append_udi_to_ana(l_anapath=[newanapath], l_anak_comps=[xrfsmergedanak], l_anak_patterns=[anak_patterns], pattern_key='pattern_files', compkeys='AtFrac', q_key=q_key,intensity_key=intensity_key)
num_ana_blocks+=1
print pid,',',num_ana_blocks,',',newanapath
| 53.902655 | 337 | 0.723362 |
a9aa01050da5966f0658348a92dce3f2d4c33761 | 3,953 | py | Python | awwards/tests.py | marykamau2/awwards | 0cb85991b31f8b3d2b4baf5eb985d8ee633ee4ff | [
"MIT"
] | null | null | null | awwards/tests.py | marykamau2/awwards | 0cb85991b31f8b3d2b4baf5eb985d8ee633ee4ff | [
"MIT"
] | null | null | null | awwards/tests.py | marykamau2/awwards | 0cb85991b31f8b3d2b4baf5eb985d8ee633ee4ff | [
"MIT"
] | null | null | null |
# from django.test import TestCase
# from .models import Project,Rating
# from django.contrib.auth.models import User
# # Create your tests here.
# class ProjectTestClass(TestCase):
# # Set up method
# def setUp(self):
# # Creating a new location and saving it
# self.new_user=User(username='mary',email='mmarynjerikamau@gmail.com',password='njeri2018')
# self.new_user.save()
# self.new_project= Project(user=self.new_user,title='Pizza Shop',url='https://localhost:8000',description='This ia a django test description',technologies='Django')
# self.new_project.save()
# # Tear Down method
# def tearDown(self):
# Project.objects.all().delete()
# User.objects.all().delete()
# # Testing instance
# def test_instance(self):
# self.assertTrue(isinstance(self.new_project,Project))
# # Testing Save Method
# def test_save_method(self):
# self.new_project1= Project(user=self.new_user,title='Pizza Shop',url='https://localhost:8000',description='This ia a django test description',technologies='Django')
# self.new_project1.save_project()
# projects = Project.objects.all()
# self.assertTrue(len(projects) == 2)
# # Testing get all images Method
# def test_get_all_projects_method(self):
# projects = Project.get_all_projects()
# self.assertTrue(len(projects) == 1)
# # Testing get all images Method
# def test_get_project_by_id_method(self):
# project = Project.get_project_by_id(self.new_project.id)
# self.assertEqual(project.id,self.new_project.id)
# # Testing delete method
# def test_delete_project(self):
# Project.delete_project(self.new_project.id)
# projects = Project.get_all_projects()
# self.assertTrue(len(projects) == 0)
# # Testing search project by title method
# def test_search_project(self):
# projects=Project.search_project('zza')
# projectss=Project.search_project('Taa')
# self.assertFalse(len(projectss) > 0)
# self.assertTrue(len(projects) > 0)
# # Testing filter by userid method
# def test_filter_by_userid(self):
# projects=Project.filter_by_userid(self.new_user.id)
# self.assertTrue(len(projects) > 0)
# class RatingTestClass(TestCase):
# # Set up method
# def setUp(self):
# # Creating a new location and saving it
# self.new_user=User(username='denno',email='a@gmail.com',password='qwerty1234')
# self.new_user.save()
# self.new_project= Project(user=self.new_user,title='Pizza Shop',url='https://localhost:8000',description='This ia a django test description',technologies='Django')
# self.new_project.save()
# self.new_rating=Rating(user=self.new_user,project=self.new_project,design=5,usability=8,content=7,score=6.67)
# self.new_rating.save()
# # Tear Down method
# def tearDown(self):
# Rating.objects.all().delete()
# Project.objects.all().delete()
# User.objects.all().delete()
# # Testing instance
# def test_instance(self):
# self.assertTrue(isinstance(self.new_rating,Rating))
# # Testing Save Method
# def test_save_method(self):
# self.new_rating1= Rating(user=self.new_user,project=self.new_project,design=5,usability=8,content=7,score=6.67)
# self.new_rating1.save_rating()
# ratings = Rating.objects.all()
# self.assertTrue(len(ratings) == 2)
# # Testing get_project_ratings Method
# def test_get_project_ratings_method(self):
# self.new_rating1= Rating(user=self.new_user,project=self.new_project,design=5,usability=8,content=7,score=6.67)
# self.new_rating1.save_rating()
# ratings = Rating.get_project_ratings(self.new_project.id)
# self.assertTrue(len(ratings) == 2) | 37.292453 | 174 | 0.659246 |
33e6935e71b35816a75d00d4c350408d6be482fb | 3,302 | py | Python | NextcordUtils/InviteTracker.py | amirdadfar9192/NextcordUtils | 83b2eb5d330d06383cd0f51688cdf95cdbbf09eb | [
"MIT"
] | null | null | null | NextcordUtils/InviteTracker.py | amirdadfar9192/NextcordUtils | 83b2eb5d330d06383cd0f51688cdf95cdbbf09eb | [
"MIT"
] | null | null | null | NextcordUtils/InviteTracker.py | amirdadfar9192/NextcordUtils | 83b2eb5d330d06383cd0f51688cdf95cdbbf09eb | [
"MIT"
] | null | null | null | from nextcord.errors import Forbidden
from nextcord import AuditLogAction
from datetime import datetime
from asyncio import sleep
class InviteTracker():
def __init__(self, bot):
self.bot = bot
self._cache = {}
self.add_listeners()
def add_listeners(self):
self.bot.add_listener(self.cache_invites, "on_ready")
self.bot.add_listener(self.update_invite_cache, "on_invite_create")
self.bot.add_listener(self.remove_invite_cache, "on_invite_delete")
self.bot.add_listener(self.add_guild_cache, "on_guild_join")
self.bot.add_listener(self.remove_guild_cache, "on_guild_remove")
async def cache_invites(self):
for guild in self.bot.guilds:
try:
self._cache[guild.id] = {}
for invite in await guild.invites():
self._cache[guild.id][invite.code] = invite
except Forbidden:
continue
async def update_invite_cache(self, invite):
if invite.guild.id not in self._cache.keys():
self._cache[invite.guild.id] = {}
self._cache[invite.guild.id][invite.code] = invite
async def remove_invite_cache(self, invite):
if invite.guild.id not in self._cache.keys():
return
ref_invite = self._cache[invite.guild.id][invite.code]
if (ref_invite.created_at.timestamp()+ref_invite.max_age > datetime.utcnow().timestamp() or ref_invite.max_age == 0) and ref_invite.max_uses > 0 and ref_invite.uses == ref_invite.max_uses-1:
try:
async for entry in invite.guild.audit_logs(limit=1, action=AuditLogAction.invite_delete):
if entry.target.code != invite.code:
self._cache[invite.guild.id][ref_invite.code].revoked = True
return
else:
self._cache[invite.guild.id][ref_invite.code].revoked = True
return
except Forbidden:
self._cache[invite.guild.id][ref_invite.code].revoked = True
return
else:
self._cache[invite.guild.id].pop(invite.code)
async def add_guild_cache(self, guild):
self._cache[guild.id] = {}
for invite in await guild.invites():
self._cache[guild.id][invite.code] = invite
async def remove_guild_cache(self, guild):
try:
self._cache.pop(guild.id)
except KeyError:
return
async def fetch_inviter(self, member):
await sleep(self.bot.latency)
for new_invite in await member.guild.invites():
for cached_invite in self._cache[member.guild.id].values():
if new_invite.code == cached_invite.code and new_invite.uses - cached_invite.uses == 1 or cached_invite.revoked:
if cached_invite.revoked:
self._cache[member.guild.id].pop(cached_invite.code)
elif new_invite.inviter == cached_invite.inviter:
self._cache[member.guild.id][cached_invite.code] = new_invite
else:
self._cache[member.guild.id][cached_invite.code].uses += 1
return cached_invite.inviter | 44.621622 | 198 | 0.606299 |
117cf5e4b476dea158c55faad3fece6cb28bbe6a | 14,628 | py | Python | test/jpypetest/test_jfloat.py | baztian/jpype | 034d44e6c719995c25e9cd61348ebc1860030a9b | [
"Apache-2.0"
] | null | null | null | test/jpypetest/test_jfloat.py | baztian/jpype | 034d44e6c719995c25e9cd61348ebc1860030a9b | [
"Apache-2.0"
] | null | null | null | test/jpypetest/test_jfloat.py | baztian/jpype | 034d44e6c719995c25e9cd61348ebc1860030a9b | [
"Apache-2.0"
] | null | null | null | import sys
import jpype
import common
import random
import _jpype
import jpype
from jpype import java
from jpype.types import *
try:
import numpy as np
except ImportError:
pass
class JFloatTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
self.value = 1.0 + 1.0 / 65536
self.cls = JClass("jpype.common.Fixture")
self.fixture = self.cls()
def compareFloatEqual(self, x, y, msg=None):
if x == y:
return
if x < 0:
x = -x
if y < 0:
y = -y
a = (x + y) / 2
b = (x - y)
if b < 0:
b = -b
if b < a * 1e-7:
return
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
@common.requireInstrumentation
def testJPNumberFloat_int(self):
jd = JFloat(1)
_jpype.fault("PyJPNumberFloat_int")
with self.assertRaisesRegex(SystemError, "fault"):
int(jd)
_jpype.fault("PyJPModule_getContext")
with self.assertRaisesRegex(SystemError, "fault"):
int(jd)
int(jd)
@common.requireInstrumentation
def testJPNumberFloat_float(self):
jd = JFloat(1)
_jpype.fault("PyJPNumberFloat_float")
with self.assertRaisesRegex(SystemError, "fault"):
float(jd)
_jpype.fault("PyJPModule_getContext")
with self.assertRaisesRegex(SystemError, "fault"):
float(jd)
float(jd)
@common.requireInstrumentation
def testJPNumberFloat_str(self):
jd = JFloat(1)
_jpype.fault("PyJPNumberFloat_str")
with self.assertRaisesRegex(SystemError, "fault"):
str(jd)
_jpype.fault("PyJPModule_getContext")
with self.assertRaisesRegex(SystemError, "fault"):
str(jd)
str(jd)
@common.requireInstrumentation
def testJPNumberFloat_repr(self):
jd = JFloat(1)
_jpype.fault("PyJPNumberFloat_repr")
with self.assertRaisesRegex(SystemError, "fault"):
repr(jd)
_jpype.fault("PyJPModule_getContext")
with self.assertRaisesRegex(SystemError, "fault"):
repr(jd)
repr(jd)
@common.requireInstrumentation
def testJPNumberFloat_compare(self):
jd = JFloat(1)
_jpype.fault("PyJPNumberFloat_compare")
with self.assertRaisesRegex(SystemError, "fault"):
jd == 1
_jpype.fault("PyJPModule_getContext")
with self.assertRaisesRegex(SystemError, "fault"):
jd == 1
jd == 1
@common.requireInstrumentation
def testJPNumberFloat_hash(self):
jd = JFloat(1)
_jpype.fault("PyJPNumberFloat_hash")
with self.assertRaises(SystemError):
hash(jd)
_jpype.fault("PyJPModule_getContext")
with self.assertRaises(SystemError):
hash(jd)
hash(jd)
@common.requireInstrumentation
def testFault(self):
_jpype.fault("JPFloatType::findJavaConversion")
with self.assertRaises(SystemError):
JFloat(1.0)
@common.requireInstrumentation
def testConversionFault(self):
_jpype.fault("JPFloatType::findJavaConversion")
with self.assertRaisesRegex(SystemError, "fault"):
JFloat._canConvertToJava(object())
@common.requireInstrumentation
def testArrayFault(self):
ja = JArray(JFloat)(5)
_jpype.fault("JPJavaFrame::NewFloatArray")
with self.assertRaisesRegex(SystemError, "fault"):
JArray(JFloat)(1)
_jpype.fault("JPJavaFrame::SetFloatArrayRegion")
with self.assertRaisesRegex(SystemError, "fault"):
ja[0] = 0
_jpype.fault("JPJavaFrame::GetFloatArrayRegion")
with self.assertRaisesRegex(SystemError, "fault"):
print(ja[0])
_jpype.fault("JPJavaFrame::GetFloatArrayElements")
# Special case, only BufferError is allowed from getBuffer
with self.assertRaises(BufferError):
memoryview(ja[0:3])
_jpype.fault("JPJavaFrame::ReleaseFloatArrayElements")
with self.assertRaisesRegex(SystemError, "fault"):
ja[0:3] = bytes([1, 2, 3])
_jpype.fault("JPJavaFrame::ReleaseFloatArrayElements")
with self.assertRaisesRegex(SystemError, "fault"):
jpype.JObject(ja[::2], jpype.JObject)
_jpype.fault("JPJavaFrame::ReleaseFloatArrayElements")
def f():
# Special case no fault is allowed
memoryview(ja[0:3])
f()
_jpype.fault("JPFloatType::setArrayRange")
with self.assertRaisesRegex(SystemError, "fault"):
ja[1:3] = [0, 0]
def testFromJIntWiden(self):
self.assertEqual(JFloat(JByte(123)), 123)
self.assertEqual(JFloat(JShort(12345)), 12345)
self.assertEqual(JFloat(JInt(12345678)), 12345678)
self.assertEqual(JFloat(JLong(12345678)), 12345678)
def testFromJFloatWiden(self):
self.assertEqual(JFloat(JDouble(12345678)), 12345678)
def testFromNone(self):
with self.assertRaises(TypeError):
JFloat(None)
self.assertEqual(JFloat._canConvertToJava(None), "none")
def testFromJFloat(self):
with self.useEqualityFunc(self.compareFloatEqual):
self.assertEqual(JFloat(JFloat(1.2345)), 1.2345)
def testFromJDouble(self):
with self.useEqualityFunc(self.compareFloatEqual):
self.assertEqual(JFloat(JDouble(1.2345)), 1.2345)
def testUnBox(self):
pass
# with self.useEqualityFunc(self.foo):
# self.assertEqual(JFloat(java.lang.Double(1.2345)), 1.2345)
def testFromFloat(self):
with self.useEqualityFunc(self.compareFloatEqual):
self.assertEqual(JFloat(1.2345), 1.2345)
self.assertEqual(JFloat._canConvertToJava(1.2345), "implicit")
def testFromLong(self):
self.assertEqual(JFloat(12345), 12345)
self.assertEqual(JFloat._canConvertToJava(12345), "implicit")
def testFromObject(self):
with self.assertRaises(TypeError):
JFloat(object())
with self.assertRaises(TypeError):
JFloat(JObject())
with self.assertRaises(TypeError):
JFloat(JString("A"))
self.assertEqual(JFloat._canConvertToJava(object()), "none")
ja = JArray(JFloat)(5)
with self.assertRaises(TypeError):
ja[1] = object()
jf = JClass("jpype.common.Fixture")
with self.assertRaises(TypeError):
jf.static_float_field = object()
with self.assertRaises(TypeError):
jf().float_field = object()
def testCallFloatFromNone(self):
with self.assertRaises(TypeError):
self.fixture.callFloat(None)
with self.assertRaises(TypeError):
self.fixture.static_float_field = None
with self.assertRaises(TypeError):
self.fixture.float_field = None
def checkType(self, q):
# Check field
self.fixture.float_field = q
self.assertEqual(self.fixture.float_field, q)
self.assertEqual(self.fixture.getFloat(), q)
# Check static field
self.cls.static_float_field = q
self.assertEqual(self.fixture.static_float_field, q)
self.assertEqual(self.fixture.getStaticFloat(), q)
self.assertEqual(self.cls.getStaticFloat(), q)
# Check call
self.assertEqual(self.fixture.callFloat(q), q)
self.assertEqual(self.cls.callStaticFloat(q), q)
# Check throw
with self.assertRaises(JException):
self.fixture.throwFloat()
with self.assertRaises(JException):
self.cls.throwStaticFloat()
with self.assertRaises(JException):
self.fixture.throwStaticFloat()
def testCheckInt(self):
self.checkType(1)
def testCheckFloat(self):
self.checkType(2.0)
def testCheckRange(self):
self.checkType(float(1e340))
self.checkType(float(-1e340))
def testCheckNaN(self):
import math
nan = float("nan")
self.assertTrue(math.isnan(self.fixture.callFloat(nan)))
self.fixture.static_float_field = nan
self.assertTrue(math.isnan(self.fixture.static_float_field))
self.fixture.float_field = nan
self.assertTrue(math.isnan(self.fixture.float_field))
def testCheckInf(self):
import math
inf = float("inf")
self.assertTrue(math.isinf(self.fixture.callFloat(inf)))
self.fixture.static_float_field = inf
self.assertTrue(math.isinf(self.fixture.static_float_field))
self.fixture.float_field = inf
self.assertTrue(math.isinf(self.fixture.float_field))
def testCheckBool(self):
self.checkType(True)
self.checkType(False)
def testCheckJBoolean(self):
# FIXME fails
# self.checkType(JBoolean(True))
# self.checkType(JBoolean(False))
pass
def testCheckJChar(self):
self.checkType(JChar("A"))
def testCheckJByte(self):
self.checkType(JByte(-128))
self.checkType(JByte(127))
def testCheckJShort(self):
self.checkType(JShort(-2**15))
self.checkType(JShort(2**15 - 1))
def testCheckJInt(self):
with self.useEqualityFunc(self.compareFloatEqual):
self.checkType(JInt(-2**31 + 1))
self.checkType(JInt(2**31 - 1))
def testCheckJLong(self):
with self.useEqualityFunc(self.compareFloatEqual):
self.checkType(JLong(-2**63 + 1))
self.checkType(JLong(2**63 - 1))
def testCheckJFloat(self):
self.checkType(JFloat(1.515313))
@common.requireNumpy
def testCheckNumpyInt8(self):
self.checkType(np.random.randint(-127, 128, dtype=np.int8))
self.checkType(np.random.randint(0, 255, dtype=np.uint8))
self.checkType(np.uint8(0))
self.checkType(np.uint8(255))
self.checkType(np.int8(-128))
self.checkType(np.int8(127))
@common.requireNumpy
def testCheckNumpyInt16(self):
self.checkType(np.random.randint(-2**15, 2**15 - 1, dtype=np.int16))
self.checkType(np.random.randint(0, 2**16 - 1, dtype=np.uint16))
self.checkType(np.uint16(0))
self.checkType(np.uint16(2**16 - 1))
self.checkType(np.int16(-2**15))
self.checkType(np.int16(2**15 - 1))
@common.requireNumpy
def testCheckNumpyInt32(self):
with self.useEqualityFunc(self.compareFloatEqual):
self.checkType(np.random.randint(-2**31, 2**31 - 1, dtype=np.int32))
self.checkType(np.random.randint(0, 2**32 - 1, dtype=np.uint32))
self.checkType(np.uint32(0))
self.checkType(np.uint32(2**32 - 1))
self.checkType(np.int32(-2**31))
self.checkType(np.int32(2**31 - 1))
@common.requireNumpy
def testCheckNumpyInt64(self):
with self.useEqualityFunc(self.compareFloatEqual):
self.checkType(np.random.randint(-2**63, 2**63 - 1, dtype=np.int64))
self.checkType(
np.uint64(np.random.randint(0, 2**64 - 1, dtype=np.uint64)))
self.checkType(np.uint64(0))
self.checkType(np.uint64(2**64 - 1))
self.checkType(np.int64(-2**63))
self.checkType(np.int64(2**63 - 1))
@common.requireNumpy
def testCheckNumpyFloat32(self):
self.checkType(np.float32(np.random.rand()))
@common.requireNumpy
def testCheckNumpyFloat64(self):
with self.useEqualityFunc(self.compareFloatEqual):
self.checkType(np.float64(np.random.rand()))
def testArrayConversionDouble(self):
VALUES = [float(random.random()) for i in range(100)]
jarr = JArray(JFloat)(VALUES)
self.assertElementsAlmostEqual(VALUES, jarr)
result = jarr[:]
self.assertElementsAlmostEqual(VALUES, result)
result = jarr[2:10]
self.assertEqual(len(VALUES[2:10]), len(result))
self.assertElementsAlmostEqual(VALUES[2:10], result)
# empty slice
result = jarr[-1:3]
expected = VALUES[-1:3]
self.assertElementsAlmostEqual(expected, result)
result = jarr[3:-2]
expected = VALUES[3:-2]
self.assertElementsAlmostEqual(expected, result)
@common.requireNumpy
def testArraySetFromNPDouble(self):
a = np.random.random(100).astype(np.float64)
jarr = JArray(JFloat)(100)
jarr[:] = a
self.assertElementsAlmostEqual(a, jarr)
@common.requireNumpy
def testArrayInitFromNPFloat(self):
a = np.random.random(100).astype(np.float)
jarr = JArray(JFloat)(a)
self.assertElementsAlmostEqual(a, jarr)
@common.requireNumpy
def testArrayInitFromNPFloat32(self):
a = np.random.random(100).astype(np.float32)
jarr = JArray(JFloat)(a)
self.assertElementsAlmostEqual(a, jarr)
@common.requireNumpy
def testArrayInitFromNPFloat64(self):
a = np.random.random(100).astype(np.float64)
jarr = JArray(JFloat)(a)
self.assertElementsAlmostEqual(a, jarr)
def testArraySetRange(self):
ja = JArray(JFloat)(3)
ja[0:1] = [123]
self.assertEqual(ja[0], 123)
ja[0:1] = [-1]
self.assertEqual(ja[0], -1)
ja[0:1] = [java.lang.Double(321)]
self.assertEqual(ja[0], 321)
with self.assertRaises(TypeError):
ja[0:1] = [object()]
def testArrayHash(self):
ja = JArray(JFloat)([1, 2, 3])
self.assertIsInstance(hash(ja), int)
@common.requireNumpy
def testArrayBufferDims(self):
ja = JArray(JFloat)(5)
a = np.zeros((5, 2))
with self.assertRaisesRegex(TypeError, "incorrect"):
ja[:] = a
def testArrayBadItem(self):
class q(object):
def __float__(self):
raise SystemError("nope")
ja = JArray(JFloat)(5)
a = [1, -1, q(), 3, 4]
with self.assertRaisesRegex(SystemError, "nope"):
ja[:] = a
def testArrayBadDims(self):
class q(bytes):
# Lie about our length
def __len__(self):
return 5
a = q([1, 2, 3])
ja = JArray(JFloat)(5)
with self.assertRaisesRegex(ValueError, "Slice"):
ja[:] = [1, 2, 3]
with self.assertRaisesRegex(ValueError, "mismatch"):
ja[:] = a
| 34.257611 | 80 | 0.615463 |
e618a19851dc52e6fff2cab183752ffa80ce4f67 | 3,548 | py | Python | tests/test_formatter.py | leplatrem/logging-color-formatter | 189b21a7bbff3b54091c3d0994553082de2b5460 | [
"Apache-2.0"
] | 1 | 2017-06-30T03:30:27.000Z | 2017-06-30T03:30:27.000Z | tests/test_formatter.py | leplatrem/logging-color-formatter | 189b21a7bbff3b54091c3d0994553082de2b5460 | [
"Apache-2.0"
] | null | null | null | tests/test_formatter.py | leplatrem/logging-color-formatter | 189b21a7bbff3b54091c3d0994553082de2b5460 | [
"Apache-2.0"
] | null | null | null | import logging
import re
import unittest
from io import StringIO
import mock
from logging_color_formatter import ColorFormatter
def strip_ansi(text):
"""
Strip ANSI sequences (colors) from text.
Source: http://stackoverflow.com/a/15780675
"""
SEQUENCES = r'\x1b\[([0-9,A-Z]{1,2}(;[0-9]{1,2})?(;[0-9]{3})?)?[m|K]?'
return re.sub(SEQUENCES, '', text)
class ColorFormatterTest(unittest.TestCase):
def setUp(self):
self.formatter = ColorFormatter()
self.record = mock.MagicMock()
self.record.exc_info = self.record.stack_info = None
def test_output_is_serialized_as_string(self):
value = self.formatter.format(self.record)
self.assertIsInstance(value, str)
def test_output_is_simple_if_no_request_is_bound(self):
value = self.formatter.format(self.record)
self.assertNotIn('? ms', value)
def test_values_are_defaulted_to_question_mark(self):
self.record.path = '/'
value = self.formatter.format(self.record)
self.assertIn('? ms', value)
def test_querystring_is_rendered_as_string(self):
self.record.path = '/'
self.record.querystring = {'param': 'val'}
value = self.formatter.format(self.record)
self.assertIn('/?param=val', value)
def test_extra_event_infos_is_rendered_as_key_values(self):
self.record.nb_records = 5
value = self.formatter.format(self.record)
self.assertIn('nb_records=5', strip_ansi(value))
def test_every_event_dict_entry_appears_in_log_message(self):
self.record.__dict__ = {
'msg': 'Pouet',
'method': 'GET',
'path': '/v1/',
'querystring': {'_sort': 'field'},
'code': 200,
't': 32,
'event': 'app.event',
'nb_records': 5,
'exc_info': None,
'stack_info': None,
}
value = self.formatter.format(self.record)
self.assertEqual(('"GET /v1/?_sort=field" 200 (32 ms)'
' Pouet event=app.event nb_records=5'), strip_ansi(value))
def test_fields_values_support_unicode(self):
self.record.value = '\u2014'
value = self.formatter.format(self.record)
self.assertIn('\u2014', value)
def test_extra_event_infos_is_rendered_as_key_values(self):
self.record.msg = '%r login.'
self.record.args = ('bob',)
value = self.formatter.format(self.record)
self.assertIn("'bob' login.", value)
class LoggerTest(unittest.TestCase):
def setUp(self):
self.logger = logging.getLogger("test.module")
self.logger.setLevel(logging.DEBUG)
formatter = ColorFormatter()
self.stream = StringIO()
handler = logging.StreamHandler(self.stream)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
def test_advanced_logging_message(self):
userid = 'bob'
resource = '/file'
self.logger.info("%r authorized on {resource}", userid,
extra=dict(userid=userid, resource=resource))
self.assertEqual(strip_ansi(self.stream.getvalue()),
"'bob' authorized on /file resource=/file userid=bob\n")
def test_exception_formatting(self):
try:
1 / 0
except:
self.logger.exception("Oups.")
output = self.stream.getvalue()
self.assertIn("Oups. \n", strip_ansi(output))
self.assertIn("Traceback (most recent call last):", output)
| 33.158879 | 84 | 0.620913 |
9b098905ce3eef9497157a2a185b25e88182db94 | 1,627 | py | Python | 2020/day10.py | Bug38/AoC | 576ee0d3745242b71240a62c121c52bc92f7253e | [
"MIT"
] | null | null | null | 2020/day10.py | Bug38/AoC | 576ee0d3745242b71240a62c121c52bc92f7253e | [
"MIT"
] | null | null | null | 2020/day10.py | Bug38/AoC | 576ee0d3745242b71240a62c121c52bc92f7253e | [
"MIT"
] | null | null | null | import utils
adapters = utils.getIntsFromFile("day10.input")
# adapters = [16,10,15,5,1,11,7,19,6,12,4]
# adapters = [28,33,18,42,31,14,46,20,48,47,24,23,49,45,19,38,39,11,1,32,25,35,8,17,7,9,4,2,34,10,3]
outletJoltage = 0
deviceJoltage = max(adapters) + 3
adapters.append(outletJoltage)
adapters.append(deviceJoltage)
adapters.sort()
def part1():
deltas = ['',0,0,0]
for i in range(len(adapters)-1):
deltas[adapters[i+1] - adapters[i]] += 1
return deltas[1] * deltas[3]
multipliers = []
def getMultiplier(deltas):
multipliers.append(len(deltas))
if len(deltas) == 1:
return 1
if len(deltas) == 2:
return 2
if len(deltas) == 3:
return 4
if len(deltas) == 4:
return 7
if len(deltas) == 5:
return 13
if len(deltas) == 6:
return 24
return 1
def part2():
deltaList = []
for i in range(len(adapters)-1):
deltaList.append(adapters[i+1] - adapters[i])
arrangements = 1
i = 0
while i < len(deltaList) - 1:
j = 1
while i+j < len(deltaList)-1 and deltaList[i] == deltaList[i+j] and deltaList[i] != 3:
j += 1
arrangements *= getMultiplier(deltaList[i:i+j])
i += j
return arrangements
print(f'Part1: {part1()}')
print(f'Part2: {part2()}')
# #4
# 1 1 1
# 2 1
# 1 2
# 3
# #7
# 1 1 1 1
# 2 1 1
# 1 2 1
# 1 1 2
# 3 1
# 1 3
# 2 2
# #10
# 1 1 1 1 1
# 2 1 1 1
# 1 2 1 1
# 1 1 2 1
# 1 1 1 2
# 2 2 1
# 2 1 2
# 1 2 2
# 3 1 1
# 1 3 1
# 1 1 3
# 3 2
# 2 3
# #24
# 1 1 1 1 1 1
# 2 1 1 1 1
# 1 2 1 1 1
# 1 1 2 1 1
# 1 1 1 2 1
# 1 1 1 1 2
# 2 2 1 1
# 2 1 2 1
# 2 1 1 2
# 1 2 2 1
# 1 2 1 2
# 1 1 2 2
# 2 2 2
# 3 1 1 1
# 1 3 1 1
# 1 1 3 1
# 1 1 1 3
# 1 2 3
# 1 3 2
# 2 1 3
# 2 3 1
# 3 1 2
# 3 2 1
# 3 3 | 15.064815 | 100 | 0.577136 |
1c7c3a354f1605af714fc55a36934c2d23960fba | 6,840 | py | Python | networkapi/api_deploy/facade.py | vinicius-marinho/GloboNetworkAPI | 94651d3b4dd180769bc40ec966814f3427ccfb5b | [
"Apache-2.0"
] | 73 | 2015-04-13T17:56:11.000Z | 2022-03-24T06:13:07.000Z | networkapi/api_deploy/facade.py | leopoldomauricio/GloboNetworkAPI | 3b5b2e336d9eb53b2c113977bfe466b23a50aa29 | [
"Apache-2.0"
] | 99 | 2015-04-03T01:04:46.000Z | 2021-10-03T23:24:48.000Z | networkapi/api_deploy/facade.py | leopoldomauricio/GloboNetworkAPI | 3b5b2e336d9eb53b2c113977bfe466b23a50aa29 | [
"Apache-2.0"
] | 64 | 2015-08-05T21:26:29.000Z | 2022-03-22T01:06:28.000Z | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from networkapi.api_deploy import exceptions
from networkapi.api_equipment.exceptions import AllEquipmentsAreInMaintenanceException
from networkapi.api_rest import exceptions as api_exceptions
from networkapi.distributedlock import distributedlock
from networkapi.equipamento.models import Equipamento
from networkapi.extra_logging import local
from networkapi.extra_logging import NO_REQUEST_ID
from networkapi.plugins.factory import PluginFactory
from networkapi.settings import CONFIG_FILES_PATH
from networkapi.settings import CONFIG_FILES_REL_PATH
from networkapi.settings import TFTP_SERVER_ADDR
from networkapi.settings import TFTPBOOT_FILES_PATH
# import pkgutil
# import re
# import sys
# import time
# import paramiko
# from networkapi.distributedlock import LOCK_VIP_IP_EQUIP
# from networkapi.equipamento.models import EquipamentoAcesso
# from networkapi.equipamento.models import EquipamentoRoteiro
# from networkapi.roteiro.models import TipoRoteiro
log = logging.getLogger(__name__)
def _applyconfig(equipment, filename, equipment_access=None, source_server=None, port=22):
"""Apply configuration file on equipment
Args:
equipment: networkapi.equipamento.Equipamento()
filename: relative file path from TFTPBOOT_FILES_PATH to apply in equipment
equipment_access: networkapi.equipamento.EquipamentoAcesso() to use
source_server: source TFTP server address
port: ssh tcp port
Returns:
equipment output
Raises:
"""
if equipment.maintenance is True:
return 'Equipment is in maintenance mode. No action taken.'
if source_server is None:
source_server = TFTP_SERVER_ADDR
# TODO: Handle exceptions from the following methods and generate response
# for the caller
equip_plugin = PluginFactory.factory(equipment)
equip_plugin.connect()
equip_plugin.ensure_privilege_level()
vrf = equip_plugin.equipment_access.vrf.internal_name if equip_plugin.equipment_access.vrf else None
equip_output = equip_plugin.copyScriptFileToConfig(filename, use_vrf=vrf)
equip_plugin.close()
return equip_output
def create_file_from_script(script, prefix_name=''):
"""Creates a file with script content
Args:
script: string with commands script
prefix_name: prefix to use in filename
Returns:
file name created with path relative to networkapi.settings.TFTPBOOT_FILES_PATH
Raises:
IOError: if cannot write file
"""
if prefix_name == '':
prefix_name = 'script_reqid_'
# validate filename
path = os.path.abspath(CONFIG_FILES_PATH + prefix_name)
if not path.startswith(CONFIG_FILES_PATH):
raise exceptions.InvalidFilenameException(prefix_name)
request_id = getattr(local, 'request_id', NO_REQUEST_ID)
filename_out = prefix_name + str(request_id)
filename_to_save = CONFIG_FILES_PATH + filename_out
# Save new file
try:
file_handle = open(filename_to_save, 'w')
file_handle.write(script)
file_handle.close()
except IOError, e:
log.error('Error writing to config file: %s' % filename_to_save)
raise e
return CONFIG_FILES_REL_PATH + filename_out
def deploy_config_in_equipment_synchronous(rel_filename, equipment, lockvar,
tftpserver=None,
equipment_access=None):
"""Apply configuration file on equipment
Args:
rel_filename: relative file path from TFTPBOOT_FILES_PATH to apply
in equipment
equipment: networkapi.equipamento.Equipamento() or Equipamento().id
lockvar: distributed lock variable to use when applying config to
equipment
equipment_access: networkapi.equipamento.EquipamentoAcesso() to use
tftpserver: source TFTP server address
Returns:
equipment output
Raises:
"""
# validate filename
path = os.path.abspath(TFTPBOOT_FILES_PATH + rel_filename)
if not path.startswith(TFTPBOOT_FILES_PATH):
raise exceptions.InvalidFilenameException(rel_filename)
if type(equipment) is int:
equipment = Equipamento.get_by_pk(equipment)
elif type(equipment) is Equipamento:
pass
else:
log.error('Invalid data for equipment')
raise api_exceptions.NetworkAPIException()
if equipment.maintenance:
raise AllEquipmentsAreInMaintenanceException()
with distributedlock(lockvar):
return _applyconfig(
equipment, rel_filename, equipment_access, tftpserver)
def deploy_config_in_equipment(rel_filename, equipment, tftpserver=None,
equipment_access=None):
"""Apply configuration file on equipment
Args:
rel_filename: relative file path from TFTPBOOT_FILES_PATH to apply
in equipment
equipment: networkapi.equipamento.Equipamento() or Equipamento().id
lockvar: distributed lock variable to use when applying config to
equipment
equipment_access: networkapi.equipamento.EquipamentoAcesso() to use
tftpserver: source TFTP server address
Returns:
equipment output
Raises:
"""
# validate filename
path = os.path.abspath(TFTPBOOT_FILES_PATH + rel_filename)
if not path.startswith(TFTPBOOT_FILES_PATH):
raise exceptions.InvalidFilenameException(rel_filename)
if type(equipment) is int:
equipment = Equipamento.get_by_pk(equipment)
elif type(equipment) is Equipamento:
pass
else:
log.error('Invalid data for equipment')
raise api_exceptions.NetworkAPIException()
if equipment.maintenance:
raise AllEquipmentsAreInMaintenanceException()
return _applyconfig(
equipment, rel_filename, equipment_access, tftpserver)
| 35.076923 | 104 | 0.715789 |
ddbac8c5780a4d0e4b8b6f3a7b0ed2cc864ca52e | 182 | py | Python | main.py | TheSynt4x/discord-bot | ddc7c6fb4f96ca7ba7fe4759d254234a84ae414d | [
"MIT"
] | null | null | null | main.py | TheSynt4x/discord-bot | ddc7c6fb4f96ca7ba7fe4759d254234a84ae414d | [
"MIT"
] | null | null | null | main.py | TheSynt4x/discord-bot | ddc7c6fb4f96ca7ba7fe4759d254234a84ae414d | [
"MIT"
] | null | null | null | from bot.core._config import settings
from bot.events import bot
for cog in settings.COGS:
bot.load_extension('bot.cogs.%s' % cog.get('name'))
bot.run(settings.TOKEN, bot=False)
| 22.75 | 53 | 0.747253 |
0b5b828b19b32d86e006ce35e0ade28db128adc0 | 1,304 | py | Python | praw_memories/cache/__init__.py | elnuno/praw_memories | dcab9cf795d8d9c34684fb1087c7907c56630cc2 | [
"Apache-2.0"
] | 1 | 2017-04-08T03:16:48.000Z | 2017-04-08T03:16:48.000Z | praw_memories/cache/__init__.py | elnuno/praw_memories | dcab9cf795d8d9c34684fb1087c7907c56630cc2 | [
"Apache-2.0"
] | null | null | null | praw_memories/cache/__init__.py | elnuno/praw_memories | dcab9cf795d8d9c34684fb1087c7907c56630cc2 | [
"Apache-2.0"
] | null | null | null | import praw
from packaging.version import Version
class LegacyCachingReddit(praw.Reddit):
def __init__(self, site_name=None, caching_session=None,
requestor_class=None, requestor_kwargs=None,
**config_settings):
super().__init__(site_name=site_name, **config_settings)
if caching_session:
self._core._requestor._http = caching_session
def _prepare_prawcore(self, *args, **kwargs):
super()._prepare_prawcore(*args, **kwargs)
class ModernCachingReddit(praw.Reddit):
def __init__(self, site_name=None, requestor_class=None,
requestor_kwargs=None, caching_session=None,
**config_settings):
if not requestor_kwargs:
requestor_kwargs = {}
requestor_kwargs['session'] = caching_session
elif requestor_kwargs.get('session') and caching_session:
raise ValueError('Cannot pass session both as caching_session '
'and requestor_kwargs["session"].')
super().__init__(site_name, requestor_class, requestor_kwargs,
**config_settings)
_ver = Version(praw.__version__)
_minver = Version('4.4.0')
CachingReddit = ModernCachingReddit if _ver >= _minver else LegacyCachingReddit
| 37.257143 | 79 | 0.661043 |
03231229e7aa006ae857eab6f1a6caa1e2487b7d | 5,842 | py | Python | code/gpt_decoder.py | felixwzh/DialoGPT | 11db966dc85e1de5a623690b0e430ca3c95fef49 | [
"MIT"
] | null | null | null | code/gpt_decoder.py | felixwzh/DialoGPT | 11db966dc85e1de5a623690b0e430ca3c95fef49 | [
"MIT"
] | null | null | null | code/gpt_decoder.py | felixwzh/DialoGPT | 11db966dc85e1de5a623690b0e430ca3c95fef49 | [
"MIT"
] | null | null | null | import os
import torch
import torch.nn.functional as F
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config
import argparse
from tqdm.auto import tqdm
from pathlib import Path
def reinput(text):
# global conditioned_tokens
# os.system('cls' if os.name == 'nt' else 'clear')
conditioned_tokens = tokenizer.encode(text) + [50256]
return conditioned_tokens
def top_p_filtering(logits, top_p=0.9, filter_value=-float('Inf')):
"""
Credit: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for single word generation
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def recalc(conditioned_tokens,generated_tokens):
# global conditioned_tokens
# global generated_tokens
# for segment display purpose, keep 2 sets of tokens
indexed_tokens = conditioned_tokens + generated_tokens
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to('cuda')
with torch.no_grad():
outputs = model(tokens_tensor)
predictions = outputs[0]
logits = predictions[0, -1, :]
filtered_logits = top_p_filtering(logits)
probabilities = F.softmax(filtered_logits, dim=-1)
next_token = torch.multinomial(probabilities, 1)
generated_tokens.append(next_token.item())
return next_token.item(),conditioned_tokens,generated_tokens
def generate(conditioned_tokens,generated_tokens):
# global conditioned_tokens
# global generated_tokens
while True:
result,conditioned_tokens,generated_tokens = recalc(conditioned_tokens,generated_tokens)
if result == 50256 or len(generated_tokens)>256:
# end-of-text : 50256
# use this special token to split segments
return tokenizer.decode(generated_tokens[:-1])
def generate_one_sent(input_sent):
conditioned_tokens = []
generated_tokens = []
conditioned_tokens=reinput(input_sent)
output_sent=generate(conditioned_tokens,generated_tokens)
return output_sent
parser = argparse.ArgumentParser()
parser.add_argument('--data_folder', type=str, default='data/testing',
help='the folder that contains your dataset and vocabulary file')
parser.add_argument('--decode_file', type=str, default='test.txt')
parser.add_argument('--model_folder', type=str, default='save/testing')
parser.add_argument('--model_name', type=str, default='model')
parser.add_argument('--output_folder', type=str, default='outputs')
parser.add_argument('--output_file', type=str, default='output.txt')
parser.add_argument('--decode_num', type=int, default=-1)
parser.add_argument('--decode_start', type=int, default=-1)
parser.add_argument('--decode_end', type=int, default=-1)
args = parser.parse_args()
if __name__ == '__main__':
decode_file=os.path.join(args.data_folder,args.decode_file)
model_file=os.path.join(args.model_folder,args.model_name)
output_file=os.path.join(args.output_folder,'{}_{}'.format(args.decode_file,args.output_file))
Path(args.output_folder).mkdir(parents=True, exist_ok=True)
with open(output_file,'w') as fout:
fout.write("")
# load
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
weights = torch.load(model_file)
medium_config = GPT2Config(n_embd=1024,n_layer=24,n_head=16)
small_config= GPT2Config(n_embd=768,n_layer=12,n_head=12)
if 'small' in model_file:
model = GPT2LMHeadModel(small_config)
elif 'medium' in model_file:
model = GPT2LMHeadModel(medium_config)
# fix misused key value
weights["lm_head.weight"] = weights["lm_head.decoder.weight"]
weights.pop("lm_head.decoder.weight",None)
model.load_state_dict(weights)
model.eval()
model.to('cuda')
# decode_size = len(open(decode_file,'rU').readlines())
output_lines=[]
with open(decode_file,'r') as fin:
lines=fin.readlines()
assert args.decode_num<=len(lines)
if args.decode_num==-1:
decode_size=len(lines)
else:
decode_size=args.decode_num
decode_list=list(range(decode_size))
if args.decode_start!=-1:
decode_size=args.decode_end - args.decode_start
decode_list=list(range(args.decode_start,args.decode_end,1))
progress = tqdm(unit_scale=True, total=decode_size, desc="Decoding {}".format(args.decode_file))
for i in decode_list:
line=lines[i]
progress.update(1)
# for i in tqdm.tqdm(range(len(lines))):
# line=lines[i]
# for line in tqdm.tqdm(lines):
# 474 what are those weird lines one sees after rubbing their eyes ?│474 dream dust
src,tgt=line.strip().split('│')
src_tokens=src.split(' ')
src_sent=''
for word in src_tokens[1:]:
src_sent+=word
src_sent+=' '
src_sent=src_sent[:-1]
src_pid=src_tokens[0]
tgt_sent=tgt
output_sent=generate_one_sent(src_sent)
output_line=src+'│'+src_pid+' '+output_sent+'\n'
output_lines.append(output_line)
with open(output_file,'a') as fout:
fout.write(output_line)
# input_sent='what are some crazy animal facts that no one knows ?'
# output_sent=generate_one_sent(input_sent)
# print(output_sent)
# """
# CUDA_VISIBLE_DEVICES=0 python gpt_decoder.py --model_folder '../models/medium_10epochs_trial_2/GPT2.1e-05.20.1gpu.2020-04-15200256' \
# --model_name GP2-pretrain-step-12500.pkl \
# --data_folder ../data/src_data_full_feat_tf_resplited_review \
# --decode_file val_full_ref.txt \
# --output_folder ../outputs/medium_10epochs_trial_2 \
# --output_file step-12500.txt
# """
| 36.061728 | 136 | 0.75368 |
c180eb60d8f6c85256a71fc9c9d9c00c672e2b23 | 6,768 | py | Python | dein/.cache/init.vim/temp/19020/20170514021125/rplugin/python3/denite/prompt/key.py | riggtravis/nvim-config | b8d37c5c6471fd86e8e24aa564ac9852cae0ea36 | [
"MIT"
] | null | null | null | dein/.cache/init.vim/temp/19020/20170514021125/rplugin/python3/denite/prompt/key.py | riggtravis/nvim-config | b8d37c5c6471fd86e8e24aa564ac9852cae0ea36 | [
"MIT"
] | null | null | null | dein/.cache/init.vim/temp/19020/20170514021125/rplugin/python3/denite/prompt/key.py | riggtravis/nvim-config | b8d37c5c6471fd86e8e24aa564ac9852cae0ea36 | [
"MIT"
] | null | null | null | """Key module."""
from collections import namedtuple
from .util import ensure_bytes, ensure_str, int2char
ESCAPE_QUOTE = str.maketrans({
'"': '\\"',
})
CTRL_KEY = b'\x80\xfc\x04'
META_KEY = b'\x80\xfc\x08'
CTRL_SHIFT_KEY = b'\x80\xfc\x06'
# :help key-notation
SPECIAL_KEYS = {
'C-@': b'\x80\xffX', # Vim internally use <80><ff>X for <C-@>
'NUL': 10,
'BS': b'\x80kb',
'TAB': 9,
'S-TAB': b'\x80kB',
'NL': 10,
'FE': 12,
'CR': 13,
'ESC': 27,
'SPACE': 32,
'LT': 60,
'BSLASH': 92,
'BAR': 124,
'DEL': b'\x80kD',
'CSI': b'\x9B',
'XCSI': b'\x80\xfdP',
'UP': b'\x80ku',
'DOWN': b'\x80kd',
'LEFT': b'\x80kl',
'RIGHT': b'\x80kr',
'S-UP': b'\x80\xfd',
'S-DOWN': b'\x80\xfd',
'S-LEFT': b'\x80#4',
'S-RIGHT': b'\x80%i',
'C-LEFT': b'\x80\xfdT',
'C-RIGHT': b'\x80\xfdU',
'F1': b'\x80k1',
'F2': b'\x80k2',
'F3': b'\x80k3',
'F4': b'\x80k4',
'F5': b'\x80k5',
'F6': b'\x80k6',
'F7': b'\x80k7',
'F8': b'\x80k8',
'F9': b'\x80k9',
'F10': b'\x80k;',
'F11': b'\x80F1',
'F12': b'\x80F2',
'S-F1': b'\x80\xfd\x06',
'S-F2': b'\x80\xfd\x07',
'S-F3': b'\x80\xfd\x08',
'S-F4': b'\x80\xfd\x09',
'S-F5': b'\x80\xfd\x0A',
'S-F6': b'\x80\xfd\x0B',
'S-F7': b'\x80\xfd\x0C',
'S-F8': b'\x80\xfd\x0D',
'S-F9': b'\x80\xfd\x0E',
'S-F10': b'\x80\xfd\x0F',
'S-F11': b'\x80\xfd\x10',
'S-F12': b'\x80\xfd\x11',
'HELP': b'\x80%1',
'UNDO': b'\x80&8',
'INSERT': b'\x80kI',
'HOME': b'\x80kh',
'END': b'\x80@7',
'PAGEUP': b'\x80kP',
'PAGEDOWN': b'\x80kN',
'KHOME': b'\x80K1',
'KEND': b'\x80K4',
'KPAGEUP': b'\x80K3',
'KPAGEDOWN': b'\x80K5',
'KPLUS': b'\x80K6',
'KMINUS': b'\x80K7',
'KMULTIPLY': b'\x80K9',
'KDIVIDE': b'\x80K8',
'KENTER': b'\x80KA',
'KPOINT': b'\x80KB',
'K0': b'\x80KC',
'K1': b'\x80KD',
'K2': b'\x80KE',
'K3': b'\x80KF',
'K4': b'\x80KG',
'K5': b'\x80KH',
'K6': b'\x80KI',
'K7': b'\x80KJ',
'K8': b'\x80KK',
'K9': b'\x80KL',
}
SPECIAL_KEYS_REVRESE = {v: k for k, v in SPECIAL_KEYS.items()}
# Add aliases used in Vim. This requires to be AFTER making swap dictionary
SPECIAL_KEYS.update({
'NOP': SPECIAL_KEYS['NUL'],
'RETURN': SPECIAL_KEYS['CR'],
'ENTER': SPECIAL_KEYS['CR'],
'BACKSPACE': SPECIAL_KEYS['BS'],
'DELETE': SPECIAL_KEYS['DEL'],
'INS': SPECIAL_KEYS['INSERT'],
})
KeyBase = namedtuple('KeyBase', ['code', 'char'])
class Key(KeyBase):
"""Key class which indicate a single key.
Attributes:
code (int or bytes): A code of the key. A bytes is used when the key is
a special key in Vim (a key which starts from 0x80 in getchar()).
char (str): A printable represantation of the key. It might be an empty
string when the key is not printable.
"""
__slots__ = ()
__cached = {}
def __str__(self):
"""Return string representation of the key."""
return self.char
@classmethod
def represent(cls, nvim, code):
"""Return a string representation of a Keycode."""
if isinstance(code, int):
return int2char(nvim, code)
if code in SPECIAL_KEYS_REVRESE:
char = SPECIAL_KEYS_REVRESE.get(code)
return '<%s>' % char
else:
return ensure_str(nvim, code)
@classmethod
def parse(cls, nvim, expr):
r"""Parse a key expression and return a Key instance.
It returns a Key instance of a key expression. The instance is cached
to individual expression so that the instance is exactly equal when
same expression is spcified.
Args:
expr (int, bytes, or str): A key expression.
Example:
>>> from unittest.mock import MagicMock
>>> nvim = MagicMock()
>>> nvim.options = {'encoding': 'utf-8'}
>>> Key.parse(nvim, ord('a'))
Key(code=97, char='a')
>>> Key.parse(nvim, '<Insert>')
Key(code=b'\x80kI', char='')
Returns:
Key: A Key instance.
"""
if expr not in cls.__cached:
code = _resolve(nvim, expr)
if isinstance(code, int):
char = int2char(nvim, code)
elif not code.startswith(b'\x80'):
char = ensure_str(nvim, code)
else:
char = ''
cls.__cached[expr] = cls(code, char)
return cls.__cached[expr]
def _resolve(nvim, expr):
if isinstance(expr, int):
return expr
elif isinstance(expr, str):
return _resolve(nvim, ensure_bytes(nvim, expr))
elif isinstance(expr, bytes):
if len(expr) == 1:
return ord(expr)
elif expr.startswith(b'\x80'):
return expr
else:
raise AttributeError((
'`expr` (%s) requires to be an instance of int|bytes|str but '
'"%s" has specified.'
) % (expr, type(expr)))
# Special key
if expr.startswith(b'<') or expr.endswith(b'>'):
inner = expr[1:-1]
code = _resolve_from_special_keys(nvim, inner)
if code != inner:
return code
return expr
def _resolve_from_special_keys(nvim, inner):
inner_upper = inner.upper()
inner_upper_str = ensure_str(nvim, inner_upper)
if inner_upper_str in SPECIAL_KEYS:
return SPECIAL_KEYS[inner_upper_str]
elif inner_upper.startswith(b'C-S-') or inner_upper.startswith(b'S-C-'):
return b''.join([
CTRL_SHIFT_KEY,
_resolve_from_special_keys_inner(nvim, inner[4:]),
])
elif inner_upper.startswith(b'C-'):
if len(inner) == 3:
if inner_upper[-1] in b'@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_?':
return inner[-1] & 0x1f
return b''.join([
CTRL_KEY,
_resolve_from_special_keys_inner(nvim, inner[2:]),
])
elif inner_upper.startswith(b'M-') or inner_upper.startswith(b'A-'):
return b''.join([
META_KEY,
_resolve_from_special_keys_inner(nvim, inner[2:]),
])
elif inner_upper == b'LEADER':
leader = nvim.vars['mapleader']
leader = ensure_bytes(nvim, leader)
return _resolve(nvim, leader)
elif inner_upper == b'LOCALLEADER':
leader = nvim.vars['maplocalleader']
leader = ensure_bytes(nvim, leader)
return _resolve(nvim, leader)
return inner
def _resolve_from_special_keys_inner(nvim, inner):
code = _resolve_from_special_keys(nvim, inner)
if isinstance(code, int):
return ensure_bytes(nvim, int2char(nvim, code))
return ensure_bytes(nvim, code)
| 28.677966 | 79 | 0.54669 |
be56e048c1ddc46a412eed19505b3a6eb6274938 | 3,957 | py | Python | object_track/object_tracker.py | returnfly/home_security_camera | 55f4ee7e6b715bd7547f5eda07ea942e90b5d593 | [
"MIT"
] | null | null | null | object_track/object_tracker.py | returnfly/home_security_camera | 55f4ee7e6b715bd7547f5eda07ea942e90b5d593 | [
"MIT"
] | null | null | null | object_track/object_tracker.py | returnfly/home_security_camera | 55f4ee7e6b715bd7547f5eda07ea942e90b5d593 | [
"MIT"
] | null | null | null | # USAGE
# python object_tracker.py --prototxt deploy.prototxt --model res10_300x300_ssd_iter_140000.caffemodel
# import the necessary packages
from pyimagesearch.centroidtracker import CentroidTracker
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import time
import cv2
from pyimagesearch.dingding import Send_Message
# construct the argument parse and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-p", "--prototxt", required=True,
# help="path to Caffe 'deploy' prototxt file")
# ap.add_argument("-m", "--model", required=True,
# help="path to Caffe pre-trained model")
# ap.add_argument("-c", "--confidence", type=float, default=0.5,
# help="minimum probability to filter weak detections")
# args = vars(ap.parse_args())
# initialize our centroid tracker and frame dimensions
# ct = CentroidTracker()
# (H, W) = (None, None)
# load our serialized model from disk
# print("[INFO] loading model...")
# net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# # initialize the video stream and allow the camera sensor to warmup
# print("[INFO] starting video stream...")
# net = cv2.dnn.readNetFromCaffe("deploy.prototxt", "res10_300x300_ssd_iter_140000.caffemodel")
# vs = VideoStream(src=0).start()
# time.sleep(2.0)
class Track() :
def __init__ (self) :
self.ct = CentroidTracker()
(self.H, self.W) = (None, None)
self.net = cv2.dnn.readNetFromCaffe("deploy.prototxt", "res10_300x300_ssd_iter_140000.caffemodel")
self.vs = VideoStream(src=0).start()
time.sleep(2.0)
# loop over the frames from the video stream
def get_frame(self) :
# read the next frame from the video stream and resize it
self.frame = self.vs.read()
self.frame = imutils.resize(self.frame, width=600)
# if the frame dimensions are None, grab them
if self.W is None or self.H is None:
(self.H, self.W) = self.frame.shape[:2]
# construct a blob from the frame, pass it through the network,
# obtain our output predictions, and initialize the list of
# bounding box rectangles
blob = cv2.dnn.blobFromImage(self.frame, 1.0, (self.W, self.H),
(104.0, 177.0, 123.0))
self.net.setInput(blob)
detections = self.net.forward()
rects = []
# loop over the detections
for i in range(0, detections.shape[2]):
# filter out weak detections by ensuring the predicted
# probability is greater than a minimum threshold
# if detections[0, 0, i, 2] > args["confidence"]:
if detections[0, 0, i, 2] > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object, then update the bounding box rectangles list
box = detections[0, 0, i, 3:7] * np.array([self.W, self.H, self.W, self.H])
rects.append(box.astype("int"))
# draw a bounding box surrounding the object so we can
# visualize it
(startX, startY, endX, endY) = box.astype("int")
cv2.rectangle(self.frame, (startX, startY), (endX, endY),
(0, 255, 0), 2)
# update our centroid tracker using the computed set of bounding
# box rectangles
objects = self.ct.update(rects)
# loop over the tracked objects
for (objectID, centroid) in objects.items():
# draw both the ID of the object and the centroid of the
# object on the output frame
text = "ID {}".format(objectID)
cv2.putText(self.frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(self.frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
return self.frame
# # show the output frame
# cv2.imshow("Frame", frame)
# key = cv2.waitKey(1) & 0xFF
# # if the `q` key was pressed, break from the loop
# if key == ord("q"):
# break
# example scene , "q" 退出
if __name__ == '__main__':
track = Track()
while True:
frame = track.get_frame()
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
track.vs.stop()
| 32.975 | 102 | 0.691433 |
b023405a31ea7fcdc54ee1b962a145abe1813de5 | 99 | py | Python | wemeet/globaltables/apps.py | Ketan-Suthar/wemeet | 964d933de0e40dbf11256c612889d1aa54fe8377 | [
"MIT"
] | 2 | 2021-05-08T08:35:20.000Z | 2021-05-09T05:14:53.000Z | wemeet/globaltables/apps.py | Ketan-Suthar/wemeet | 964d933de0e40dbf11256c612889d1aa54fe8377 | [
"MIT"
] | null | null | null | wemeet/globaltables/apps.py | Ketan-Suthar/wemeet | 964d933de0e40dbf11256c612889d1aa54fe8377 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class GlobaltablesConfig(AppConfig):
name = 'globaltables'
| 16.5 | 36 | 0.777778 |
bbfd51c293e2484af01279ab86d376d9469b0117 | 719 | py | Python | regressor/regressor.py | gmaher/GenericML | 510140e298b9cd8a2e0499b0e460ef0b4f3489cd | [
"MIT"
] | null | null | null | regressor/regressor.py | gmaher/GenericML | 510140e298b9cd8a2e0499b0e460ef0b4f3489cd | [
"MIT"
] | null | null | null | regressor/regressor.py | gmaher/GenericML | 510140e298b9cd8a2e0499b0e460ef0b4f3489cd | [
"MIT"
] | null | null | null | class Regressor(object):
def __init__(self):
pass
def predict(X):
pass
def fit(X,Y):
pass
class TFRegressor(Regressor):
def __init__(self,x_plh,y_plh,output_op,train_op,session,copy_op=None):
self.x = x_plh
self.y = y_plh
self.output_op = output_op
self.train = train_op
self.session = session
self.copy_op = copy_op
def predict(self,X):
return self.session.run(self.output_op,{self.x:X})
def fit(self,X,Y):
self.session.run(self.train, {self.x:X,self.y:Y})
def copy(self):
if self.copy_op == None:
raise RuntimeError('No copy op specified')
self.session.run(self.copy_op)
| 27.653846 | 75 | 0.603616 |
9055606cea7abecc6bddcddc2a44dfbbdacdcb7e | 1,398 | py | Python | aula15/exercicio3.py | ArseniumGX/bluemer-modulo1-python | 2f7c69252a9a86cc573c192d1d9685b0c20466f8 | [
"MIT"
] | null | null | null | aula15/exercicio3.py | ArseniumGX/bluemer-modulo1-python | 2f7c69252a9a86cc573c192d1d9685b0c20466f8 | [
"MIT"
] | null | null | null | aula15/exercicio3.py | ArseniumGX/bluemer-modulo1-python | 2f7c69252a9a86cc573c192d1d9685b0c20466f8 | [
"MIT"
] | null | null | null | # 03 - Data com mês por extenso. Construa uma função que receba uma data no
# formato DD/MM/AAAA e devolva uma string no formato D de mesPorExtenso de
# AAAA. Opcionalmente, valide a data e retorne NULL caso a data seja inválida.
# Considere que Fevereiro tem 28 dias e que a cada 4 anos temos ano bisexto, sendo
# que nesses casos Fevereiro terá 29 dias
def mesLiteral(mes:int):
dicio = {1: 'Janeiro', 2: 'Fevereiro', 3: 'Março', 4: 'Abril', 5: 'Maio', 6: 'Junho', 7: 'Julho',
8: 'Agosto', 9: 'Setembro', 10: 'Outubro', 11: 'Novembro', 12: 'Dezembro'}
return dicio[mes]
def dataPorExtenso(data:str):
if len(data) != 10:
return 'NULL'
(dia, mes, ano) = data.split('/')
dia = int(dia)
mes = int(mes)
ano = int(ano)
if mes in [1, 3, 5, 7, 8, 10, 12] and dia in range(1, 32):
return '{} de {} de {}'.format(dia, mesLiteral(mes), ano)
elif mes in [4, 6, 9, 11] and dia in range(1, 31):
return '{} de {} de {}'.format(dia, mesLiteral(mes), ano)
elif mes == 2 and dia in range(1, 29):
return '{} de {} de {}'.format(dia, mesLiteral(mes), ano)
elif mes == 2 and dia == 29 and ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
return '{} de {} de {}'.format(dia, mesLiteral(mes), ano)
else:
return 'NULL'
data = str(input('Digite uma data \'DD/MM/AAAA\': '))
print(dataPorExtenso(data)) | 42.363636 | 102 | 0.595851 |
f837ad26c052e75691611f1480f7aaf8956124c6 | 1,512 | py | Python | multi_class_news_classification/train.py | prakharchoudhary/MLworld | eb7e15e67772dfa3f12b59164af0603a3f36bc7c | [
"MIT"
] | 7 | 2017-06-17T09:23:24.000Z | 2019-10-02T08:56:25.000Z | multi_class_news_classification/train.py | prakharchoudhary/MLworld | eb7e15e67772dfa3f12b59164af0603a3f36bc7c | [
"MIT"
] | null | null | null | multi_class_news_classification/train.py | prakharchoudhary/MLworld | eb7e15e67772dfa3f12b59164af0603a3f36bc7c | [
"MIT"
] | 1 | 2020-02-04T08:25:40.000Z | 2020-02-04T08:25:40.000Z | import numpy as np
from keras.datasets import reuters
import nnet
import pickle
# load the dataset and prepare train and test data
(train_data, train_labels), (test_data, test_labels) = \
reuters.load_data(num_words=10000)
# decoding newswires back to text
word_index = reuters.get_word_index()
reverse_word_index = dict([(value, word)
for word, value in word_index.items()])
decoded_newswire = ' '.join(
[reverse_word_index.get(i - 3, '?') for i in train_data[0]])
# vectorizing data
def vectorize_sequences(sequences, dimensions=10000):
results = np.zeros((len(sequences), dimensions))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.
return results
X_train = vectorize_sequences(train_data)
X_test = vectorize_sequences(test_data)
# test to check encoding of data
assert len(X_train[0]) == 10000
# One-hot encoding the labels
def one_hot_encoding(labels, dims=46):
results = np.zeros((len(labels), dims))
for i, label in enumerate(labels):
results[i, label] = 1.
return results
one_hot_train_labels = one_hot_encoding(train_labels)
one_hot_test_labels = one_hot_encoding(test_labels)
# test to check encoding of labels
assert len(one_hot_train_labels[0]) == 46
# Train the model
network = nnet.Model(X_train, one_hot_train_labels)
network.network()
network.train_model()
# evaluate and print results
results = network.model.evaluate(X_test, one_hot_test_labels)
print("The results are: ", str(results))
| 28 | 66 | 0.734788 |
89d5035ceb00096ea1a598f0c11d7a591247b712 | 11,373 | py | Python | moto/ec2/responses/vpc_peering_connections.py | oakbramble/moto | 6350d8ec4c59eaf12b83385b6acd386e5c2f5593 | [
"Apache-2.0"
] | null | null | null | moto/ec2/responses/vpc_peering_connections.py | oakbramble/moto | 6350d8ec4c59eaf12b83385b6acd386e5c2f5593 | [
"Apache-2.0"
] | 1 | 2021-09-13T04:39:03.000Z | 2021-09-13T04:39:03.000Z | moto/ec2/responses/vpc_peering_connections.py | oakbramble/moto | 6350d8ec4c59eaf12b83385b6acd386e5c2f5593 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.core import ACCOUNT_ID
class VPCPeeringConnections(BaseResponse):
def create_vpc_peering_connection(self):
peer_region = self._get_param("PeerRegion")
tags = self._get_multi_param("TagSpecification")
tags = tags[0] if isinstance(tags, list) and len(tags) == 1 else tags
tags = (tags or {}).get("Tag", [])
tags = {t["Key"]: t["Value"] for t in tags}
if peer_region == self.region or peer_region is None:
peer_vpc = self.ec2_backend.get_vpc(self._get_param("PeerVpcId"))
else:
peer_vpc = self.ec2_backend.get_cross_vpc(
self._get_param("PeerVpcId"), peer_region
)
vpc = self.ec2_backend.get_vpc(self._get_param("VpcId"))
vpc_pcx = self.ec2_backend.create_vpc_peering_connection(vpc, peer_vpc, tags)
template = self.response_template(CREATE_VPC_PEERING_CONNECTION_RESPONSE)
return template.render(vpc_pcx=vpc_pcx)
def delete_vpc_peering_connection(self):
vpc_pcx_id = self._get_param("VpcPeeringConnectionId")
vpc_pcx = self.ec2_backend.delete_vpc_peering_connection(vpc_pcx_id)
template = self.response_template(DELETE_VPC_PEERING_CONNECTION_RESPONSE)
return template.render(vpc_pcx=vpc_pcx)
def describe_vpc_peering_connections(self):
ids = self._get_multi_param("VpcPeeringConnectionId")
vpc_pcxs = self.ec2_backend.describe_vpc_peering_connections(
vpc_peering_ids=ids
)
template = self.response_template(DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE)
return template.render(vpc_pcxs=vpc_pcxs)
def accept_vpc_peering_connection(self):
vpc_pcx_id = self._get_param("VpcPeeringConnectionId")
vpc_pcx = self.ec2_backend.accept_vpc_peering_connection(vpc_pcx_id)
template = self.response_template(ACCEPT_VPC_PEERING_CONNECTION_RESPONSE)
return template.render(vpc_pcx=vpc_pcx)
def reject_vpc_peering_connection(self):
vpc_pcx_id = self._get_param("VpcPeeringConnectionId")
self.ec2_backend.reject_vpc_peering_connection(vpc_pcx_id)
template = self.response_template(REJECT_VPC_PEERING_CONNECTION_RESPONSE)
return template.render()
def modify_vpc_peering_connection_options(self):
vpc_pcx_id = self._get_param("VpcPeeringConnectionId")
accepter_options = self._get_multi_param_dict(
"AccepterPeeringConnectionOptions"
)
requester_options = self._get_multi_param_dict(
"RequesterPeeringConnectionOptions"
)
self.ec2_backend.modify_vpc_peering_connection_options(
vpc_pcx_id, accepter_options, requester_options
)
template = self.response_template(MODIFY_VPC_PEERING_CONNECTION_RESPONSE)
return template.render(
accepter_options=accepter_options, requester_options=requester_options
)
# we are assuming that the owner id for accepter and requester vpc are same
# as we are checking for the vpc exsistance
CREATE_VPC_PEERING_CONNECTION_RESPONSE = (
"""
<CreateVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcPeeringConnection>
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
<requesterVpcInfo>
<ownerId>"""
+ ACCOUNT_ID
+ """</ownerId>
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
<peeringOptions>
<allowEgressFromLocalClassicLinkToRemoteVpc>{{ vpc_pcx.requester_options.AllowEgressFromLocalClassicLinkToRemoteVpc or '' }}</allowEgressFromLocalClassicLinkToRemoteVpc>
<allowEgressFromLocalVpcToRemoteClassicLink>{{ vpc_pcx.requester_options.AllowEgressFromLocalVpcToRemoteClassicLink or '' }}</allowEgressFromLocalVpcToRemoteClassicLink>
<allowDnsResolutionFromRemoteVpc>{{ vpc_pcx.requester_options.AllowDnsResolutionFromRemoteVpc or '' }}</allowDnsResolutionFromRemoteVpc>
</peeringOptions>
</requesterVpcInfo>
<accepterVpcInfo>
<ownerId>"""
+ ACCOUNT_ID
+ """</ownerId>
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
<peeringOptions>
<allowEgressFromLocalClassicLinkToRemoteVpc>{{ vpc_pcx.accepter_options.AllowEgressFromLocalClassicLinkToRemoteVpc or '' }}</allowEgressFromLocalClassicLinkToRemoteVpc>
<allowEgressFromLocalVpcToRemoteClassicLink>{{ vpc_pcx.accepter_options.AllowEgressFromLocalVpcToRemoteClassicLink or '' }}</allowEgressFromLocalVpcToRemoteClassicLink>
<allowDnsResolutionFromRemoteVpc>{{ vpc_pcx.accepter_options.AllowDnsResolutionFromRemoteVpc or '' }}</allowDnsResolutionFromRemoteVpc>
</peeringOptions>
</accepterVpcInfo>
<status>
<code>initiating-request</code>
<message>Initiating Request to {accepter ID}</message>
</status>
<expirationTime>2014-02-18T14:37:25.000Z</expirationTime>
<tagSet>
{% for tag in vpc_pcx.get_tags() %}
<item>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</vpcPeeringConnection>
</CreateVpcPeeringConnectionResponse>
"""
)
DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = (
"""
<DescribeVpcPeeringConnectionsResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcPeeringConnectionSet>
{% for vpc_pcx in vpc_pcxs %}
<item>
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
<requesterVpcInfo>
<ownerId>"""
+ ACCOUNT_ID
+ """</ownerId>
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
<region>{{ vpc_pcx.vpc.ec2_backend.region_name }}</region>
<peeringOptions>
<allowEgressFromLocalClassicLinkToRemoteVpc>{{ vpc_pcx.requester_options.AllowEgressFromLocalClassicLinkToRemoteVpc or '' }}</allowEgressFromLocalClassicLinkToRemoteVpc>
<allowEgressFromLocalVpcToRemoteClassicLink>{{ vpc_pcx.requester_options.AllowEgressFromLocalVpcToRemoteClassicLink or '' }}</allowEgressFromLocalVpcToRemoteClassicLink>
<allowDnsResolutionFromRemoteVpc>{{ vpc_pcx.requester_options.AllowDnsResolutionFromRemoteVpc or '' }}</allowDnsResolutionFromRemoteVpc>
</peeringOptions>
</requesterVpcInfo>
<accepterVpcInfo>
<ownerId>"""
+ ACCOUNT_ID
+ """</ownerId>
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.peer_vpc.cidr_block }}</cidrBlock>
<region>{{ vpc_pcx.peer_vpc.ec2_backend.region_name }}</region>
<peeringOptions>
<allowEgressFromLocalClassicLinkToRemoteVpc>{{ vpc_pcx.accepter_options.AllowEgressFromLocalClassicLinkToRemoteVpc or '' }}</allowEgressFromLocalClassicLinkToRemoteVpc>
<allowEgressFromLocalVpcToRemoteClassicLink>{{ vpc_pcx.accepter_options.AllowEgressFromLocalVpcToRemoteClassicLink or '' }}</allowEgressFromLocalVpcToRemoteClassicLink>
<allowDnsResolutionFromRemoteVpc>{{ vpc_pcx.accepter_options.AllowDnsResolutionFromRemoteVpc or '' }}</allowDnsResolutionFromRemoteVpc>
</peeringOptions>
</accepterVpcInfo>
<status>
<code>{{ vpc_pcx._status.code }}</code>
<message>{{ vpc_pcx._status.message }}</message>
</status>
<tagSet>
{% for tag in vpc_pcx.get_tags() %}
<item>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</vpcPeeringConnectionSet>
</DescribeVpcPeeringConnectionsResponse>
"""
)
DELETE_VPC_PEERING_CONNECTION_RESPONSE = """
<DeleteVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteVpcPeeringConnectionResponse>
"""
ACCEPT_VPC_PEERING_CONNECTION_RESPONSE = (
"""
<AcceptVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcPeeringConnection>
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
<requesterVpcInfo>
<ownerId>"""
+ ACCOUNT_ID
+ """</ownerId>
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
<region>{{ vpc_pcx.vpc.ec2_backend.region_name }}</region>
</requesterVpcInfo>
<accepterVpcInfo>
<ownerId>"""
+ ACCOUNT_ID
+ """</ownerId>
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.peer_vpc.cidr_block }}</cidrBlock>
<peeringOptions>
<allowEgressFromLocalClassicLinkToRemoteVpc>{{ vpc_pcx.accepter_options.AllowEgressFromLocalClassicLinkToRemoteVpc or '' }}</allowEgressFromLocalClassicLinkToRemoteVpc>
<allowEgressFromLocalVpcToRemoteClassicLink>{{ vpc_pcx.accepter_options.AllowEgressFromLocalVpcToRemoteClassicLink or '' }}</allowEgressFromLocalVpcToRemoteClassicLink>
<allowDnsResolutionFromRemoteVpc>{{ vpc_pcx.accepter_options.AllowDnsResolutionFromRemoteVpc or '' }}</allowDnsResolutionFromRemoteVpc>
</peeringOptions>
<region>{{ vpc_pcx.peer_vpc.ec2_backend.region_name }}</region>
</accepterVpcInfo>
<status>
<code>{{ vpc_pcx._status.code }}</code>
<message>{{ vpc_pcx._status.message }}</message>
</status>
<tagSet>
{% for tag in vpc_pcx.get_tags() %}
<item>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</vpcPeeringConnection>
</AcceptVpcPeeringConnectionResponse>
"""
)
REJECT_VPC_PEERING_CONNECTION_RESPONSE = """
<RejectVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</RejectVpcPeeringConnectionResponse>
"""
MODIFY_VPC_PEERING_CONNECTION_RESPONSE = """
<ModifyVpcPeeringConnectionOptionsResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>8d977c82-8aba-4cd1-81ca-example</requestId>
{% if requester_options %}
<requesterPeeringConnectionOptions>
<allowEgressFromLocalClassicLinkToRemoteVpc>{{ requester_options.AllowEgressFromLocalClassicLinkToRemoteVpc or '' }}</allowEgressFromLocalClassicLinkToRemoteVpc>
<allowEgressFromLocalVpcToRemoteClassicLink>{{ requester_options.AllowEgressFromLocalVpcToRemoteClassicLink or '' }}</allowEgressFromLocalVpcToRemoteClassicLink>
<allowDnsResolutionFromRemoteVpc>{{ requester_options.AllowDnsResolutionFromRemoteVpc or '' }}</allowDnsResolutionFromRemoteVpc>
</requesterPeeringConnectionOptions>
{% endif %}
{% if accepter_options %}
<accepterPeeringConnectionOptions>
<allowEgressFromLocalClassicLinkToRemoteVpc>{{ accepter_options.AllowEgressFromLocalClassicLinkToRemoteVpc or '' }}</allowEgressFromLocalClassicLinkToRemoteVpc>
<allowEgressFromLocalVpcToRemoteClassicLink>{{ accepter_options.AllowEgressFromLocalVpcToRemoteClassicLink or '' }}</allowEgressFromLocalVpcToRemoteClassicLink>
<allowDnsResolutionFromRemoteVpc>{{ accepter_options.AllowDnsResolutionFromRemoteVpc or '' }}</allowDnsResolutionFromRemoteVpc>
</accepterPeeringConnectionOptions>
{% endif %}
</ModifyVpcPeeringConnectionOptionsResponse>
"""
| 46.044534 | 176 | 0.736921 |
14c1673a04e2e60c13ba993fc3cd1eb14cd83bca | 2,900 | py | Python | app.py | Xeslley/Primeira_API_REST | 892d5aee6217dcabc28c4aded39cf3c3366c4b08 | [
"MIT"
] | null | null | null | app.py | Xeslley/Primeira_API_REST | 892d5aee6217dcabc28c4aded39cf3c3366c4b08 | [
"MIT"
] | null | null | null | app.py | Xeslley/Primeira_API_REST | 892d5aee6217dcabc28c4aded39cf3c3366c4b08 | [
"MIT"
] | null | null | null | #imports das libs padrao do python
import json
#imports de terceiros
from flask import Flask, request, jsonify
from loguru import logger
#imports do proprio prj
from statsapi import data_store, operation
app = Flask(__name__)
#save received list
@app.route("/data",methods=["POST"])
def save_data():
logger.info(f"Saving data...")
content = request.get_json()
uuid = data_store.save(content["data"])
logger.info(f"Data saved with UUID '{uuid}' successfully")
return jsonify({"status": "success", "message": "data saved successfully", "uuid":uuid})
def atempt_get_data(uuid):
raw_stored_data = []
try:
raw_stored_data = data_store.get(uuid)
except KeyError:
logger.warning(f"Cannot retrieve data associated with UUID '{uuid}'.")
return jsonify({"status": "failed", "message": "data cannot be retrieved.", "data": []})
return raw_stored_data
@app.route("/data/<uuid>", methods=["GET"])
def retrieve_data(uuid):
logger.info(f"Retrieving data associated with UUID '{uuid}' ...")
stored_data = atempt_get_data(uuid)
logger.info(f"Data associated with UUID '{uuid}' retrieved successfully")
return jsonify({"status": "success", "message": "data retrieved successfuly.",
"data": stored_data})
@app.route("/data/<uuid>/<operation>", methods=["GET"])
def process_operation(uuid, operation):
logger.info(f"Prossecing operation '{operation}' on data associated with UUID '{uuid}'...")
stored_data = atempt_get_data(uuid)
if not stored_data:
return jsonify(
{"status": "failed", "message": "data cannot be retrieved.",
"result": None})
try:
operation_func = get_operation(operation)
logger.info(f"operation {operation} = {operation_func}")
except NoSuchOperationError:
logger.warning(f"Cannot find operation '{operation}'.")
return jsonify({"status": "failed", "message": f"no such {operation}",
"result": None})
result = operation_func(stored_data)
logger.info(f"Operation '{operation}' on data associated with UUID '{uuid}' finished successfully!")
return jsonify({"status": "success", "message": "result completed successfuly.",
"result": result})
class NoSuchOperationError(Exception):
pass
def get_operation(operation_name):
if operation_name == 'min':
return operation.op_min
elif operation_name == 'max':
return operation.op_max
elif operation_name == 'mean':
return operation.op_mean
elif operation_name == 'median':
return operation.op_median
# elif operation_name == 'mode':
# return operation.op_mode
elif operation_name == 'range':
return operation.op_range
else:
raise NoSuchOperationError
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True) | 30.526316 | 104 | 0.661034 |
ee2ebe73ee0dbe2c41f134b9a869b124610a97a1 | 5,057 | py | Python | sdk/python/pulumi_kubernetes/core/v1/PodTemplate.py | MatheusMiranda/pulumi-kubernetes | eecebd55fe96f63365194182a69d99eda625bb96 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/core/v1/PodTemplate.py | MatheusMiranda/pulumi-kubernetes | eecebd55fe96f63365194182a69d99eda625bb96 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/core/v1/PodTemplate.py | MatheusMiranda/pulumi-kubernetes | eecebd55fe96f63365194182a69d99eda625bb96 | [
"Apache-2.0"
] | null | null | null | # *** WARNING: this file was generated by the Pulumi Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
from typing import Optional
import pulumi
import pulumi.runtime
from pulumi import Input, ResourceOptions
from ... import tables, version
class PodTemplate(pulumi.CustomResource):
"""
PodTemplate describes a template for creating copies of a predefined pod.
"""
apiVersion: pulumi.Output[str]
"""
APIVersion defines the versioned schema of this representation of an object. Servers should
convert recognized schemas to the latest internal value, and may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
"""
kind: pulumi.Output[str]
"""
Kind is a string value representing the REST resource this object represents. Servers may infer
this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More
info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
"""
metadata: pulumi.Output[dict]
"""
Standard object's metadata. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
template: pulumi.Output[dict]
"""
Template defines the pods that will be created from this pod template.
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
def __init__(self, resource_name, opts=None, metadata=None, template=None, __name__=None, __opts__=None):
"""
Create a PodTemplate resource with the given unique name, arguments, and options.
:param str resource_name: The _unique_ name of the resource.
:param pulumi.ResourceOptions opts: A bag of options that control this resource's behavior.
:param pulumi.Input[dict] metadata: Standard object's metadata. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input[dict] template: Template defines the pods that will be created from this pod template.
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'v1'
__props__['kind'] = 'PodTemplate'
__props__['metadata'] = metadata
__props__['template'] = template
__props__['status'] = None
additional_secret_outputs = [
]
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(), additional_secret_outputs=additional_secret_outputs))
parent = opts.parent if opts and opts.parent else None
aliases = [
]
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(), aliases=aliases))
super(PodTemplate, self).__init__(
"kubernetes:core/v1:PodTemplate",
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None):
"""
Get the state of an existing `PodTemplate` resource, as identified by `id`.
The ID is of the form `[namespace]/[name]`; if `[namespace]` is omitted,
then (per Kubernetes convention) the ID becomes `default/[name]`.
Pulumi will keep track of this resource using `resource_name` as the Pulumi ID.
:param str resource_name: _Unique_ name used to register this resource with Pulumi.
:param pulumi.Input[str] id: An ID for the Kubernetes resource to retrieve.
Takes the form `[namespace]/[name]` or `[name]`.
:param Optional[pulumi.ResourceOptions] opts: A bag of options that control this
resource's behavior.
"""
opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
return PodTemplate(resource_name, opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| 42.141667 | 114 | 0.684398 |
8d746b601bf247450d56c13014b506f85d3900a2 | 1,254 | py | Python | servers/python/v2/server.py | laser/polyglot-distributed-systems | 6532c52979f67c76f17e0d9ec384e0c34634478a | [
"MIT"
] | 3 | 2016-01-04T03:01:19.000Z | 2020-03-30T16:23:43.000Z | servers/python/v2/server.py | laser/polyglot-distributed-systems | 6532c52979f67c76f17e0d9ec384e0c34634478a | [
"MIT"
] | null | null | null | servers/python/v2/server.py | laser/polyglot-distributed-systems | 6532c52979f67c76f17e0d9ec384e0c34634478a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import barrister
from bottle import run, post, request
from store import Store, RecordNotFound, UserDataInvalid, MaxTodosExceeded
from functools import wraps
import sys
import code
class TodoManager:
def __init__(self, store):
self.store = store
def readTodos(self):
return self.store.get_all()
def createTodo(self, properties):
return self.store.save(properties)
def updateTodo(self, todo):
return self.store.update(todo['id'], todo)
def deleteTodo(self, id):
return self.store.delete(id)
class TodoManagerV1Adapter(TodoManager):
def deleteTodo(self, todo):
return TodoManager.deleteTodo(self, todo['id'])
store = Store()
v1_contract = barrister.contract_from_file('../../todo_manager.v1.json')
v1_server = barrister.Server(v1_contract)
v1_server.add_handler('TodoManager', TodoManagerV1Adapter(store))
v2_contract = barrister.contract_from_file('../../todo_manager.v2.json')
v2_server = barrister.Server(v2_contract)
v2_server.add_handler('TodoManager', TodoManager(store))
@post('/v1/todos')
def todos_v1():
return v1_server.call_json(request.body.read())
@post('/v2/todos')
def todos_v2():
return v2_server.call_json(request.body.read())
run(host='localhost', port=3000)
| 24.588235 | 74 | 0.750399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.