hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71b39d53d82554ce904392600c340709e0534bb | 2,018 | py | Python | pandas/tests/indexing/multiindex/test_chaining_and_caching.py | oricou/pandas | 9405e58d9268041f5416711c051cf5429a19bf49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-05-07T04:58:36.000Z | 2021-05-07T04:58:59.000Z | pandas/tests/indexing/multiindex/test_chaining_and_caching.py | oricou/pandas | 9405e58d9268041f5416711c051cf5429a19bf49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/indexing/multiindex/test_chaining_and_caching.py | oricou/pandas | 9405e58d9268041f5416711c051cf5429a19bf49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-06-16T07:19:12.000Z | 2021-12-16T10:24:44.000Z | import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
import pandas.core.common as com
def test_detect_chained_assignment():
# Inplace ops, originally from:
# https://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug
a = [12, 23]
b = [123, None]
c = [1234, 2345]
d = [12345, 23456]
tuples = [("eyes", "left"), ("eyes", "right"), ("ears", "left"), ("ears", "right")]
events = {
("eyes", "left"): a,
("eyes", "right"): b,
("ears", "left"): c,
("ears", "right"): d,
}
multiind = MultiIndex.from_tuples(tuples, names=["part", "side"])
zed = DataFrame(events, index=["a", "b"], columns=multiind)
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
zed["eyes"]["right"].fillna(value=555, inplace=True)
def test_cache_updating():
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=["x", "y", "z"])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]["z"].iloc[0] = 1.0
result = df.loc[(0, 0), "z"]
assert result == 1
# correct setting
df.loc[(0, 0), "z"] = 2
result = df.loc[(0, 0), "z"]
assert result == 2
@pytest.mark.arm_slow
def test_indexer_caching():
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = (range(n), range(n))
index = MultiIndex.from_tuples(zip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
| 28.422535 | 118 | 0.606541 | import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
import pandas.core.common as com
def test_detect_chained_assignment():
a = [12, 23]
b = [123, None]
c = [1234, 2345]
d = [12345, 23456]
tuples = [("eyes", "left"), ("eyes", "right"), ("ears", "left"), ("ears", "right")]
events = {
("eyes", "left"): a,
("eyes", "right"): b,
("ears", "left"): c,
("ears", "right"): d,
}
multiind = MultiIndex.from_tuples(tuples, names=["part", "side"])
zed = DataFrame(events, index=["a", "b"], columns=multiind)
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
zed["eyes"]["right"].fillna(value=555, inplace=True)
def test_cache_updating():
a = np.random.rand(10, 3)
df = DataFrame(a, columns=["x", "y", "z"])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]["z"].iloc[0] = 1.0
result = df.loc[(0, 0), "z"]
assert result == 1
# correct setting
df.loc[(0, 0), "z"] = 2
result = df.loc[(0, 0), "z"]
assert result == 2
@pytest.mark.arm_slow
def test_indexer_caching():
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = (range(n), range(n))
index = MultiIndex.from_tuples(zip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
| true | true |
f71b3ac0d45395a6a7e2ff0955877634c8665bca | 3,872 | py | Python | gym-kinova-gripper/plotting_code/other_plots.py | OSUrobotics/KinovaGrasping | f22af60d3683fdc4ffecf49ccff179fbc6750748 | [
"Linux-OpenIB"
] | 16 | 2020-05-16T00:40:31.000Z | 2022-02-22T11:59:03.000Z | gym-kinova-gripper/plotting_code/other_plots.py | OSUrobotics/KinovaGrasping | f22af60d3683fdc4ffecf49ccff179fbc6750748 | [
"Linux-OpenIB"
] | 9 | 2020-08-10T08:33:55.000Z | 2021-08-17T02:10:50.000Z | gym-kinova-gripper/plotting_code/other_plots.py | OSUrobotics/KinovaGrasping | f22af60d3683fdc4ffecf49ccff179fbc6750748 | [
"Linux-OpenIB"
] | 7 | 2020-07-27T09:45:05.000Z | 2021-06-21T21:42:50.000Z | import matplotlib.pyplot as plt
import numpy as np
## Extra plotting functions that can be called for quick analysis
def plot_timestep_distribution(success_timesteps=None, fail_timesteps=None, all_timesteps=None, expert_saving_dir=None):
""" Plot the distribution of time steps over successful and failed episodes """
if all_timesteps is None:
success_timesteps = np.load(expert_saving_dir + "/success_timesteps.npy")
fail_timesteps = np.load(expert_saving_dir + "/fail_timesteps.npy")
all_timesteps = np.load(expert_saving_dir + "/all_timesteps.npy")
n_bins = 40
# We can set the number of bins with the `bins` kwarg
plt.hist(all_timesteps, bins=n_bins, color="g")
plt.title("Total time steps distribution for all episodes (3x speed)", weight='bold')
plt.xlabel('# of time steps per episode')
plt.ylabel('# of episodes with the time step count')
plt.xlim(0, 800)
plt.savefig(expert_saving_dir + "/total_timestep_distribution")
plt.clf()
plt.hist(success_timesteps, bins=n_bins, color="b")
plt.title("Time steps distribution for Successful episodes (3x speed)", weight='bold')
plt.xlabel('# of time steps per episode')
plt.ylabel('# of episodes with the time step count')
plt.savefig(expert_saving_dir + "/success_timestep_distribution")
plt.clf()
plt.hist(fail_timesteps, bins=n_bins, color="r")
plt.title("Time steps distribution for Failed episodes (3x speed)", weight='bold')
plt.xlabel('# of time steps per episode')
plt.ylabel('# of episodes with the time step count')
plt.savefig(expert_saving_dir + "/fail_timestep_distribution")
plt.clf()
'''
# Plot the average velocity over an episode
def plot_average_velocity(replay_buffer,num_timesteps):
""" Plot the average velocity over a certain number of episodes """
velocity_dir = "./expert_average_velocity"
if not os.path.isdir(velocity_dir):
os.mkdir(velocity_dir)
#num_episodes = len(f1_vels)
#plt.plot(np.arrange(len(f1_vels)), f1_vels)
max_timesteps = 30
timestep_vel_count = np.zeros(max_timesteps)
wrist_avg_vels = np.zeros(max_timesteps)
f1_avg_vels = np.zeros(max_timesteps)
f2_avg_vels = np.zeros(max_timesteps)
f3_avg_vels = np.zeros(max_timesteps)
for episode_actions in replay_buffer.action:
for timestep_idx in range(len(episode_actions)):
timestep_vel_count[timestep_idx] += 1
wrist_avg_vels[timestep_idx] = (wrist_avg_vels[timestep_idx] + episode_actions[timestep_idx][0]) / timestep_vel_count[timestep_idx]
f1_avg_vels[timestep_idx] = (f1_avg_vels[timestep_idx] + episode_actions[timestep_idx][1]) / \
timestep_vel_count[timestep_idx]
f2_avg_vels[timestep_idx] = (f2_avg_vels[timestep_idx] + episode_actions[timestep_idx][2]) / \
timestep_vel_count[timestep_idx]
f3_avg_vels[timestep_idx] = (f3_avg_vels[timestep_idx] + episode_actions[timestep_idx][3]) / \
timestep_vel_count[timestep_idx]
num_episodes = len(replay_buffer.action)
print("replay_buffer.action: ",replay_buffer.action)
print("f1_avg_vels: ",f1_avg_vels)
plt.plot(np.arange(num_timesteps), f1_avg_vels, color="r", label="Finger1")
plt.plot(np.arange(num_timesteps), f2_avg_vels, color="b", label="Finger2")
plt.plot(np.arange(num_timesteps), f3_avg_vels, color="g", label="Finger3")
plt.plot(np.arange(num_timesteps), wrist_avg_vels, color="y", label="Wrist")
plt.legend()
plt.title("Average velocity over "+str(num_episodes)+" episodes", weight='bold')
plt.xlabel('Timestep within an episode')
plt.ylabel('Average Velocity at Timestep')
#plt.savefig(velocity_dir + "/velocity_plot")
#plt.clf()
plt.show()
''' | 46.650602 | 143 | 0.698089 | import matplotlib.pyplot as plt
import numpy as np
mesteps=None, all_timesteps=None, expert_saving_dir=None):
if all_timesteps is None:
success_timesteps = np.load(expert_saving_dir + "/success_timesteps.npy")
fail_timesteps = np.load(expert_saving_dir + "/fail_timesteps.npy")
all_timesteps = np.load(expert_saving_dir + "/all_timesteps.npy")
n_bins = 40
plt.hist(all_timesteps, bins=n_bins, color="g")
plt.title("Total time steps distribution for all episodes (3x speed)", weight='bold')
plt.xlabel('# of time steps per episode')
plt.ylabel('# of episodes with the time step count')
plt.xlim(0, 800)
plt.savefig(expert_saving_dir + "/total_timestep_distribution")
plt.clf()
plt.hist(success_timesteps, bins=n_bins, color="b")
plt.title("Time steps distribution for Successful episodes (3x speed)", weight='bold')
plt.xlabel('# of time steps per episode')
plt.ylabel('# of episodes with the time step count')
plt.savefig(expert_saving_dir + "/success_timestep_distribution")
plt.clf()
plt.hist(fail_timesteps, bins=n_bins, color="r")
plt.title("Time steps distribution for Failed episodes (3x speed)", weight='bold')
plt.xlabel('# of time steps per episode')
plt.ylabel('# of episodes with the time step count')
plt.savefig(expert_saving_dir + "/fail_timestep_distribution")
plt.clf()
| true | true |
f71b3b475439ea9ed08d69fbc7b9ab409bb33d5a | 2,223 | py | Python | src/utils.py | wenyuC94/LogConcComp | b17d6ba6a102ba83a8415774b0e6da27a362bd5d | [
"MIT"
] | null | null | null | src/utils.py | wenyuC94/LogConcComp | b17d6ba6a102ba83a8415774b0e6da27a362bd5d | [
"MIT"
] | null | null | null | src/utils.py | wenyuC94/LogConcComp | b17d6ba6a102ba83a8415774b0e6da27a362bd5d | [
"MIT"
] | null | null | null | import os
import numpy as np
import numba as nb
def create_folder(storage_path):
if not os.path.isdir(storage_path):
os.makedirs(storage_path,exist_ok=True)
lsdir = os.listdir(storage_path)
for item in ["info","hist","soln","figs"]:
if item not in lsdir:
os.makedirs(storage_path+item+"/",exist_ok=True)
if item == "figs":
lsdir_figs = os.listdir(storage_path+item+"/")
for item1 in ["crop","raw"]:
if item1 not in lsdir_figs:
os.makedirs(storage_path+item+"/"+item1+"/",exist_ok=True)
def time_to_string(runtime):
seconds = runtime%60
runmins = (runtime-seconds)/60
mins = int(runmins%60)
runhrs = (runmins-mins)/60
hrs = int(runhrs)
return "%.2d:%.2d:%05.2f"%(hrs,mins,seconds)
def multivariate_laplace(n,d,rng=None, random_state=None):
rng = rng if rng is not None else np.random.RandomState(random_state)
X = rng.randn(n,d)
Z = rng.exponential(size=(n,1))
return X*np.sqrt(Z)
@nb.njit(cache=True)
def np_apply_along_axis(func1d, axis, arr):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@nb.njit(cache=True)
def np_apply_along_axis_kd(funckd, axis, arr, k = -1):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
k = k if k > 0 else arr.shape[0]
result = np.empty((k,arr.shape[1]))
for i in range(arr.shape[1]):
result[:, i] = funckd(arr[:, i])
else:
k = k if k > 0 else arr.shape[1]
result = np.empty((arr.shape[0],k))
for i in range(arr.shape[0]):
result[i, :] = funckd(arr[i, :])
return result
@nb.njit(cache=True)
def split(n, B):
sep = n//B
rem = n%B
indices = []
last = 0
cur = 0
for i in range(B):
cur = last + sep + (i < rem)
indices.append(cur)
last = cur
return indices
| 28.5 | 78 | 0.559154 | import os
import numpy as np
import numba as nb
def create_folder(storage_path):
if not os.path.isdir(storage_path):
os.makedirs(storage_path,exist_ok=True)
lsdir = os.listdir(storage_path)
for item in ["info","hist","soln","figs"]:
if item not in lsdir:
os.makedirs(storage_path+item+"/",exist_ok=True)
if item == "figs":
lsdir_figs = os.listdir(storage_path+item+"/")
for item1 in ["crop","raw"]:
if item1 not in lsdir_figs:
os.makedirs(storage_path+item+"/"+item1+"/",exist_ok=True)
def time_to_string(runtime):
seconds = runtime%60
runmins = (runtime-seconds)/60
mins = int(runmins%60)
runhrs = (runmins-mins)/60
hrs = int(runhrs)
return "%.2d:%.2d:%05.2f"%(hrs,mins,seconds)
def multivariate_laplace(n,d,rng=None, random_state=None):
rng = rng if rng is not None else np.random.RandomState(random_state)
X = rng.randn(n,d)
Z = rng.exponential(size=(n,1))
return X*np.sqrt(Z)
@nb.njit(cache=True)
def np_apply_along_axis(func1d, axis, arr):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@nb.njit(cache=True)
def np_apply_along_axis_kd(funckd, axis, arr, k = -1):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
k = k if k > 0 else arr.shape[0]
result = np.empty((k,arr.shape[1]))
for i in range(arr.shape[1]):
result[:, i] = funckd(arr[:, i])
else:
k = k if k > 0 else arr.shape[1]
result = np.empty((arr.shape[0],k))
for i in range(arr.shape[0]):
result[i, :] = funckd(arr[i, :])
return result
@nb.njit(cache=True)
def split(n, B):
sep = n//B
rem = n%B
indices = []
last = 0
cur = 0
for i in range(B):
cur = last + sep + (i < rem)
indices.append(cur)
last = cur
return indices
| true | true |
f71b3b5ffd9a0d39ef3e2a5f01e2965d34e8b74e | 1,448 | py | Python | make_string_alphabetic.py | Maffey/FunPythonScripts | 7682ab1f8dc7924eb4b3fb19d58ebbabe3901a22 | [
"Apache-2.0"
] | 1 | 2022-01-29T21:07:38.000Z | 2022-01-29T21:07:38.000Z | make_string_alphabetic.py | Maffey/fun-python-scripts | 7682ab1f8dc7924eb4b3fb19d58ebbabe3901a22 | [
"Apache-2.0"
] | 2 | 2019-05-15T13:48:58.000Z | 2019-05-29T18:33:34.000Z | make_string_alphabetic.py | Maffey/FunPythonScripts | 7682ab1f8dc7924eb4b3fb19d58ebbabe3901a22 | [
"Apache-2.0"
] | 1 | 2020-08-04T11:09:17.000Z | 2020-08-04T11:09:17.000Z | #! python
# Removes letter from word to make characters go alphabetically.
# It doesn't work all the time, but is efficient.
import unittest
class TestRemoveLettersAlphabet(unittest.TestCase):
def test_object1(self):
self.assertEqual(letters_to_remove('mateusz'), 3)
def test_object2(self):
self.assertEqual(letters_to_remove('cba'), 2)
def test_object3(self):
self.assertEqual(letters_to_remove('dirt'), 0)
def test_object4(self):
self.assertEqual(letters_to_remove('jablko'), 2)
def test_repeating_letters1(self):
self.assertEqual(letters_to_remove('gabriela'), 5)
def test_repeating_letters2(self):
self.assertEqual(letters_to_remove('banana'), 3)
def test_repeating_letters3(self):
self.assertEqual(letters_to_remove('apple'), 2)
def letters_to_remove(string: str) -> int:
string = list(string)
sorted_string = sorted(string)
letters_removed = 0
remaining_string = ""
for character in sorted_string:
index = string.index(character)
to_remove = string[:index]
letters_removed += len(to_remove)
for letter in to_remove:
string.remove(letter)
sorted_string.remove(letter)
remaining_string += character
string.remove(character)
print(f"[+] Remaining string: {remaining_string}")
return letters_removed
if __name__ == "__main__":
unittest.main()
| 27.320755 | 64 | 0.685083 |
import unittest
class TestRemoveLettersAlphabet(unittest.TestCase):
def test_object1(self):
self.assertEqual(letters_to_remove('mateusz'), 3)
def test_object2(self):
self.assertEqual(letters_to_remove('cba'), 2)
def test_object3(self):
self.assertEqual(letters_to_remove('dirt'), 0)
def test_object4(self):
self.assertEqual(letters_to_remove('jablko'), 2)
def test_repeating_letters1(self):
self.assertEqual(letters_to_remove('gabriela'), 5)
def test_repeating_letters2(self):
self.assertEqual(letters_to_remove('banana'), 3)
def test_repeating_letters3(self):
self.assertEqual(letters_to_remove('apple'), 2)
def letters_to_remove(string: str) -> int:
string = list(string)
sorted_string = sorted(string)
letters_removed = 0
remaining_string = ""
for character in sorted_string:
index = string.index(character)
to_remove = string[:index]
letters_removed += len(to_remove)
for letter in to_remove:
string.remove(letter)
sorted_string.remove(letter)
remaining_string += character
string.remove(character)
print(f"[+] Remaining string: {remaining_string}")
return letters_removed
if __name__ == "__main__":
unittest.main()
| true | true |
f71b3ba93636d762ed00bb7c81089b7edadd08c2 | 1,407 | py | Python | verifiers/statusCode.py | CalConnect/caldavtester | a17683554df8a9b80fceab91085de99945fefe48 | [
"Apache-2.0"
] | 2 | 2019-04-23T04:06:08.000Z | 2020-08-26T17:30:45.000Z | verifiers/statusCode.py | CalConnect/caldavtester | a17683554df8a9b80fceab91085de99945fefe48 | [
"Apache-2.0"
] | 3 | 2017-09-28T13:23:39.000Z | 2017-10-03T15:59:36.000Z | verifiers/statusCode.py | CalConnect/caldavtester | a17683554df8a9b80fceab91085de99945fefe48 | [
"Apache-2.0"
] | 2 | 2017-02-08T04:47:31.000Z | 2021-08-05T20:09:10.000Z | ##
# Copyright (c) 2006-2016 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Verifier that chec ks the response status code for a specific value.
"""
class Verifier(object):
def verify(self, manager, uri, response, respdata, args): # @UnusedVariable
# If no status verification requested, then assume all 2xx codes are OK
teststatus = args.get("status", ["2xx"])
for test in teststatus:
if test[1:3] == "xx":
test = int(test[0])
else:
test = int(test)
if test < 100:
result = ((response.status / 100) == test)
else:
result = (response.status == test)
if result:
return True, ""
return False, " HTTP Status Code Wrong: %d expected one of %s" % (response.status, ", ".join(teststatus))
| 34.317073 | 120 | 0.628998 |
class Verifier(object):
def verify(self, manager, uri, response, respdata, args):
teststatus = args.get("status", ["2xx"])
for test in teststatus:
if test[1:3] == "xx":
test = int(test[0])
else:
test = int(test)
if test < 100:
result = ((response.status / 100) == test)
else:
result = (response.status == test)
if result:
return True, ""
return False, " HTTP Status Code Wrong: %d expected one of %s" % (response.status, ", ".join(teststatus))
| true | true |
f71b3d10ad093122f20e0962b3f6645057d8279b | 613 | py | Python | classifier_stgcn_real_only/utils/temp.py | 1suancaiyu/STEP | 54195112990feaee137f5137775c736d07c2d26f | [
"MIT"
] | 32 | 2020-02-21T16:12:13.000Z | 2022-03-11T09:00:47.000Z | classifier_stgcn_real_only/utils/temp.py | 1suancaiyu/STEP | 54195112990feaee137f5137775c736d07c2d26f | [
"MIT"
] | 12 | 2020-06-23T08:11:25.000Z | 2022-03-26T11:34:42.000Z | classifier_stgcn_real_only/utils/temp.py | 1suancaiyu/STEP | 54195112990feaee137f5137775c736d07c2d26f | [
"MIT"
] | 13 | 2020-04-01T16:51:50.000Z | 2022-03-03T10:15:10.000Z | import h5py
import os
import numpy as np
base_path = os.path.dirname(os.path.realpath(__file__))
feature_file = '/media/uttaran/FCE1-7BF3/Gamma/Gait/classifier_stgcn/model_classifier_stgcn/featuresCombineddeep_features.txt'
f = np.loadtxt(feature_file)
fCombined = h5py.File('/media/uttaran/FCE1-7BF3/Gamma/Gait/data/featuresCombined.h5', 'r')
fkeys = fCombined.keys()
dfCombined = h5py.File('/media/uttaran/FCE1-7BF3/Gamma/Gait/data/deepFeaturesCombined.h5', 'w')
for i, fkey in enumerate(fkeys):
fname = [fkey][0]
feature = f[i, :]
dfCombined.create_dataset(fname, data=feature)
dfCombined.close()
| 38.3125 | 126 | 0.76509 | import h5py
import os
import numpy as np
base_path = os.path.dirname(os.path.realpath(__file__))
feature_file = '/media/uttaran/FCE1-7BF3/Gamma/Gait/classifier_stgcn/model_classifier_stgcn/featuresCombineddeep_features.txt'
f = np.loadtxt(feature_file)
fCombined = h5py.File('/media/uttaran/FCE1-7BF3/Gamma/Gait/data/featuresCombined.h5', 'r')
fkeys = fCombined.keys()
dfCombined = h5py.File('/media/uttaran/FCE1-7BF3/Gamma/Gait/data/deepFeaturesCombined.h5', 'w')
for i, fkey in enumerate(fkeys):
fname = [fkey][0]
feature = f[i, :]
dfCombined.create_dataset(fname, data=feature)
dfCombined.close()
| true | true |
f71b3da82b76452958e6ff8037b2e5c369373cfd | 2,253 | py | Python | python/hostconfig/machines/alexf.py | stu-l/Chaste | 8efa8b440660553af66804067639f237c855f557 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | python/hostconfig/machines/alexf.py | stu-l/Chaste | 8efa8b440660553af66804067639f237c855f557 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | python/hostconfig/machines/alexf.py | stu-l/Chaste | 8efa8b440660553af66804067639f237c855f557 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # Configuration for Finarfin
"""Copyright (c) 2005-2022, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
petsc_2_2_path = None
petsc_2_3_path = '/home/alex/petsc-2.3.2-p4/'
petsc_build_name = 'linux-gnu'
petsc_build_name_optimized = 'linux-gnu-opt'
dealii_path = None
metis_path = None
intel_path = None #'/opt/intel_cc_80'
#icpc = 'icpc'
other_includepaths = []
other_libpaths = ['/home/alex/hdf5/lib']
blas_lapack = ['lapack', 'blas']
other_libraries = ['boost_serialization', 'boost_filesystem', 'xerces-c']
tools = {'mpirun': '/home/alex/mpi/bin/mpirun',
'mpicxx': '/home/alex/mpi/bin/mpicxx'}
| 44.176471 | 79 | 0.782956 |
petsc_2_2_path = None
petsc_2_3_path = '/home/alex/petsc-2.3.2-p4/'
petsc_build_name = 'linux-gnu'
petsc_build_name_optimized = 'linux-gnu-opt'
dealii_path = None
metis_path = None
intel_path = None
other_includepaths = []
other_libpaths = ['/home/alex/hdf5/lib']
blas_lapack = ['lapack', 'blas']
other_libraries = ['boost_serialization', 'boost_filesystem', 'xerces-c']
tools = {'mpirun': '/home/alex/mpi/bin/mpirun',
'mpicxx': '/home/alex/mpi/bin/mpicxx'}
| true | true |
f71b400d9193c88784e05204664ee4a910ee628d | 1,831 | py | Python | awss3upload-capi/s3upload.py | krlex/aws-python-examples | 1310e82c748e3d0898809bbe909870fe4c7cb37b | [
"MIT"
] | 21 | 2021-02-13T04:11:01.000Z | 2022-03-28T09:13:53.000Z | awss3upload-capi/s3upload.py | krlex/aws-python-examples | 1310e82c748e3d0898809bbe909870fe4c7cb37b | [
"MIT"
] | null | null | null | awss3upload-capi/s3upload.py | krlex/aws-python-examples | 1310e82c748e3d0898809bbe909870fe4c7cb37b | [
"MIT"
] | 17 | 2019-07-26T06:02:27.000Z | 2022-03-23T00:06:12.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# s3upload.py
# It is an example that handles S3 buckets on AWS.
# It uses Client API (low-level) of Boto3.
# Upload a local file to a S3 bucket.
# You must provide 1 parameter:
# BUCKET_NAME = Name of the bucket
# OBJECT_NAME = Object file name in the bucket
# LOCAL_FILE_NAME = Local file name
import sys
import os
import boto3
import botocore
def main():
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if len(args) < 3:
print('Not enough parameters.\n'\
'Proper Usage is: python s3upload.py '\
'<BUCKET_NAME> <OBJECT_NAME> <LOCAL_FILE_NAME>')
sys.exit(1)
bucket_name = args[0]
key_name = args[1]
local_file_name = args[2]
print('Bucket: ' + bucket_name)
print('Object/Key: ' + key_name)
print('Local file: ' + local_file_name)
# Create an S3 Client
s3_client = boto3.client('s3')
if not os.path.isfile(local_file_name):
print("Error: File Not Found!!")
sys.exit(1)
# Upload object
try:
print('Uploading object ...')
s3_client.upload_file(local_file_name, bucket_name, key_name)
print('Uploaded')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "NoSuchBucket":
print("Error: Bucket does not exist!!")
elif e.response['Error']['Code'] == "InvalidBucketName":
print("Error: Invalid Bucket name!!")
elif e.response['Error']['Code'] == "AllAccessDisabled":
print("Error: You do not have access to the Bucket!!")
else:
raise
return
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| 28.609375 | 69 | 0.621518 |
import sys
import os
import boto3
import botocore
def main():
args = sys.argv[1:]
if len(args) < 3:
print('Not enough parameters.\n'\
'Proper Usage is: python s3upload.py '\
'<BUCKET_NAME> <OBJECT_NAME> <LOCAL_FILE_NAME>')
sys.exit(1)
bucket_name = args[0]
key_name = args[1]
local_file_name = args[2]
print('Bucket: ' + bucket_name)
print('Object/Key: ' + key_name)
print('Local file: ' + local_file_name)
s3_client = boto3.client('s3')
if not os.path.isfile(local_file_name):
print("Error: File Not Found!!")
sys.exit(1)
try:
print('Uploading object ...')
s3_client.upload_file(local_file_name, bucket_name, key_name)
print('Uploaded')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "NoSuchBucket":
print("Error: Bucket does not exist!!")
elif e.response['Error']['Code'] == "InvalidBucketName":
print("Error: Invalid Bucket name!!")
elif e.response['Error']['Code'] == "AllAccessDisabled":
print("Error: You do not have access to the Bucket!!")
else:
raise
return
if __name__ == '__main__':
main()
| true | true |
f71b4058fcf0bfe5202371ab731ffe619ab85852 | 827 | py | Python | src/lambda/face-detector-function/main/image_ops/resizer.py | gai6948/video-analytics-for-ppe-compliance | bb41ac010f5917bd7e85adfed689489f24830617 | [
"Apache-2.0"
] | null | null | null | src/lambda/face-detector-function/main/image_ops/resizer.py | gai6948/video-analytics-for-ppe-compliance | bb41ac010f5917bd7e85adfed689489f24830617 | [
"Apache-2.0"
] | null | null | null | src/lambda/face-detector-function/main/image_ops/resizer.py | gai6948/video-analytics-for-ppe-compliance | bb41ac010f5917bd7e85adfed689489f24830617 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import timeit
import cv2
from PIL import Image
from aws_lambda_powertools.logging import Logger
from aws_lambda_powertools.tracing import Tracer
logger = Logger(service='face-detector', child=True)
tracer = Tracer(service='face-detector')
@tracer.capture_method(capture_response=False)
def resize_image(frame: np.ndarray, target_image_width: int, target_image_height: int) -> np.ndarray:
"""
Resize the drawn image and save to /tmp directory before uploading to S3
:param `frame`: frame data in numpy array
"""
start_time = timeit.default_timer()
new_frame: np.ndarray = cv2.resize(frame, dsize=(
target_image_width, target_image_height), interpolation=cv2.INTER_LINEAR)
logger.info(f'Resized frame after: {timeit.default_timer() - start_time}')
return new_frame
| 33.08 | 101 | 0.759371 | import numpy as np
import timeit
import cv2
from PIL import Image
from aws_lambda_powertools.logging import Logger
from aws_lambda_powertools.tracing import Tracer
logger = Logger(service='face-detector', child=True)
tracer = Tracer(service='face-detector')
@tracer.capture_method(capture_response=False)
def resize_image(frame: np.ndarray, target_image_width: int, target_image_height: int) -> np.ndarray:
start_time = timeit.default_timer()
new_frame: np.ndarray = cv2.resize(frame, dsize=(
target_image_width, target_image_height), interpolation=cv2.INTER_LINEAR)
logger.info(f'Resized frame after: {timeit.default_timer() - start_time}')
return new_frame
| true | true |
f71b4158a84075698aa6f4b4d391c6b10747b9c5 | 2,190 | py | Python | config/settings/local.py | suyash143/Base_todo | 3284f24f8b5c611088af6189a2f264a280fbbbd6 | [
"MIT"
] | 1 | 2022-03-16T10:22:34.000Z | 2022-03-16T10:22:34.000Z | config/settings/local.py | suyash143/Base_todo | 3284f24f8b5c611088af6189a2f264a280fbbbd6 | [
"MIT"
] | 1 | 2022-03-30T21:29:43.000Z | 2022-03-30T21:29:43.000Z | config/settings/local.py | suyash143/Base_todo | 3284f24f8b5c611088af6189a2f264a280fbbbd6 | [
"MIT"
] | null | null | null | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="00nep4XZom6FM9dVyJO6Y7kqt5JV8TN5GTNmcDnDhH0jTq3cDYEGLsyOsUYnOAsM",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
| 39.818182 | 97 | 0.583105 | from .base import *
from .base import env
= True
= env(
"DJANGO_SECRET_KEY",
default="00nep4XZom6FM9dVyJO6Y7kqt5JV8TN5GTNmcDnDhH0jTq3cDYEGLsyOsUYnOAsM",
)
= ["localhost", "0.0.0.0", "127.0.0.1"]
= {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
= env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
S += ["debug_toolbar"]
+= ["debug_toolbar.middleware.DebugToolbarMiddleware"]
= {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
= ["127.0.0.1", "10.0.2.2"]
S += ["django_extensions"]
| true | true |
f71b41b1f6968010d18f796949243c59c3f77265 | 2,847 | py | Python | wagtail/hooks.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | 1 | 2022-02-09T05:25:30.000Z | 2022-02-09T05:25:30.000Z | wagtail/hooks.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | null | null | null | wagtail/hooks.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | null | null | null | from contextlib import ContextDecorator
from operator import itemgetter
from wagtail.utils.apps import get_app_submodules
_hooks = {}
def register(hook_name, fn=None, order=0):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
def decorator(fn):
register(hook_name, fn, order=order)
return fn
return decorator
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append((fn, order))
class TemporaryHook(ContextDecorator):
def __init__(self, hooks, order):
self.hooks = hooks
self.order = order
def __enter__(self):
for hook_name, fn in self.hooks:
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append((fn, self.order))
def __exit__(self, exc_type, exc_value, traceback):
for hook_name, fn in self.hooks:
_hooks[hook_name].remove((fn, self.order))
def register_temporarily(hook_name_or_hooks, fn=None, *, order=0):
"""
Register hook for ``hook_name`` temporarily. This is useful for testing hooks.
Can be used as a decorator::
def my_hook(...):
pass
class TestMyHook(Testcase):
@hooks.register_temporarily('hook_name', my_hook)
def test_my_hook(self):
pass
or as a context manager::
def my_hook(...):
pass
with hooks.register_temporarily('hook_name', my_hook):
# Hook is registered here
# Hook is unregistered here
To register multiple hooks at the same time, pass in a list of 2-tuples:
def my_hook(...):
pass
def my_other_hook(...):
pass
with hooks.register_temporarily([
('hook_name', my_hook),
('hook_name', my_other_hook),
]):
# Hooks are registered here
"""
if not isinstance(hook_name_or_hooks, list) and fn is not None:
hooks = [(hook_name_or_hooks, fn)]
else:
hooks = hook_name_or_hooks
return TemporaryHook(hooks, order)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
list(get_app_submodules("wagtail_hooks"))
_searched_for_hooks = True
def get_hooks(hook_name):
"""Return the hooks function sorted by their order."""
search_for_hooks()
hooks = _hooks.get(hook_name, [])
hooks = sorted(hooks, key=itemgetter(1))
return [hook[0] for hook in hooks]
| 24.543103 | 82 | 0.60555 | from contextlib import ContextDecorator
from operator import itemgetter
from wagtail.utils.apps import get_app_submodules
_hooks = {}
def register(hook_name, fn=None, order=0):
if fn is None:
def decorator(fn):
register(hook_name, fn, order=order)
return fn
return decorator
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append((fn, order))
class TemporaryHook(ContextDecorator):
def __init__(self, hooks, order):
self.hooks = hooks
self.order = order
def __enter__(self):
for hook_name, fn in self.hooks:
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append((fn, self.order))
def __exit__(self, exc_type, exc_value, traceback):
for hook_name, fn in self.hooks:
_hooks[hook_name].remove((fn, self.order))
def register_temporarily(hook_name_or_hooks, fn=None, *, order=0):
if not isinstance(hook_name_or_hooks, list) and fn is not None:
hooks = [(hook_name_or_hooks, fn)]
else:
hooks = hook_name_or_hooks
return TemporaryHook(hooks, order)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
list(get_app_submodules("wagtail_hooks"))
_searched_for_hooks = True
def get_hooks(hook_name):
search_for_hooks()
hooks = _hooks.get(hook_name, [])
hooks = sorted(hooks, key=itemgetter(1))
return [hook[0] for hook in hooks]
| true | true |
f71b41c7e4894d5f578583e52382342d197a0a53 | 373 | py | Python | tests/schema/mutation/snapshots/snap_test_maps_delete.py | TaiSakuma/acondbs | 990ab44ce4081cc0e04148a8375f7ce7081c2dee | [
"MIT"
] | null | null | null | tests/schema/mutation/snapshots/snap_test_maps_delete.py | TaiSakuma/acondbs | 990ab44ce4081cc0e04148a8375f7ce7081c2dee | [
"MIT"
] | null | null | null | tests/schema/mutation/snapshots/snap_test_maps_delete.py | TaiSakuma/acondbs | 990ab44ce4081cc0e04148a8375f7ce7081c2dee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_schema[deleteMap] 1'] = {
'data': {
'deleteMap': {
'ok': True
}
}
}
snapshots['test_schema[deleteMap] 2'] = {
'data': {
'map': None
}
}
| 16.217391 | 42 | 0.571046 |
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_schema[deleteMap] 1'] = {
'data': {
'deleteMap': {
'ok': True
}
}
}
snapshots['test_schema[deleteMap] 2'] = {
'data': {
'map': None
}
}
| true | true |
f71b4212c0a7a5b644b48292c8eaeacc6cbdda01 | 1,035 | py | Python | src/products/mixins.py | bopopescu/django-estore | c092ffa965b8ef68e71d27d34a17fde1beacd90e | [
"MIT"
] | null | null | null | src/products/mixins.py | bopopescu/django-estore | c092ffa965b8ef68e71d27d34a17fde1beacd90e | [
"MIT"
] | null | null | null | src/products/mixins.py | bopopescu/django-estore | c092ffa965b8ef68e71d27d34a17fde1beacd90e | [
"MIT"
] | 2 | 2019-04-29T14:16:10.000Z | 2020-07-23T12:04:17.000Z | from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.http import Http404
class StaffRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(StaffRequiredMixin, self).as_view(*args, **kwargs)
return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if request.user.is_staff:
return super(StaffRequiredMixin, self).dispatch(request, *args, **kwargs)
else:
raise Http404
class LoginRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(LoginRequiredMixin, self).as_view(*args, **kwargs)
return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs) | 36.964286 | 85 | 0.713043 | from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.http import Http404
class StaffRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(StaffRequiredMixin, self).as_view(*args, **kwargs)
return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if request.user.is_staff:
return super(StaffRequiredMixin, self).dispatch(request, *args, **kwargs)
else:
raise Http404
class LoginRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(LoginRequiredMixin, self).as_view(*args, **kwargs)
return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs) | true | true |
f71b4286b66f22ca7786f177364f1945a65ac3fc | 3,123 | py | Python | app/app/settings.py | M0narc/recipe-api | 3b6c204ca76d98310d26fcbeaa4537646a93e023 | [
"MIT"
] | 1 | 2022-03-11T20:38:42.000Z | 2022-03-11T20:38:42.000Z | app/app/settings.py | M0narc/recipe-api | 3b6c204ca76d98310d26fcbeaa4537646a93e023 | [
"MIT"
] | null | null | null | app/app/settings.py | M0narc/recipe-api | 3b6c204ca76d98310d26fcbeaa4537646a93e023 | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p2-y-!@#t0dny#e+nx-txbsphwp(yt(9t939=o_*sf%&3z2_p%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| 25.185484 | 91 | 0.693244 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'p2-y-!@#t0dny#e+nx-txbsphwp(yt(9t939=o_*sf%&3z2_p%'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| true | true |
f71b43813e699367c1eaeee665cc3b3fd3d5c5d0 | 4,573 | py | Python | hgext/fsmonitor/state.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | hgext/fsmonitor/state.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | hgext/fsmonitor/state.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # state.py - fsmonitor persistent state
#
# Copyright 2013-2016 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import os
import socket
import struct
from mercurial.i18n import _
from mercurial import (
pathutil,
util,
)
_version = 4
_versionformat = ">I"
class state(object):
def __init__(self, repo):
self._vfs = repo.vfs
self._ui = repo.ui
self._rootdir = pathutil.normasprefix(repo.root)
self._lastclock = None
self._identity = util.filestat(None)
self.mode = self._ui.config('fsmonitor', 'mode', default='on')
self.walk_on_invalidate = self._ui.configbool(
'fsmonitor', 'walk_on_invalidate', False)
self.timeout = float(self._ui.config(
'fsmonitor', 'timeout', default='2'))
def get(self):
try:
file = self._vfs('fsmonitor.state', 'rb')
except IOError as inst:
self._identity = util.filestat(None)
if inst.errno != errno.ENOENT:
raise
return None, None, None
self._identity = util.filestat.fromfp(file)
versionbytes = file.read(4)
if len(versionbytes) < 4:
self._ui.log(
'fsmonitor', 'fsmonitor: state file only has %d bytes, '
'nuking state\n' % len(versionbytes))
self.invalidate()
return None, None, None
try:
diskversion = struct.unpack(_versionformat, versionbytes)[0]
if diskversion != _version:
# different version, nuke state and start over
self._ui.log(
'fsmonitor', 'fsmonitor: version switch from %d to '
'%d, nuking state\n' % (diskversion, _version))
self.invalidate()
return None, None, None
state = file.read().split('\0')
# state = hostname\0clock\0ignorehash\0 + list of files, each
# followed by a \0
if len(state) < 3:
self._ui.log(
'fsmonitor', 'fsmonitor: state file truncated (expected '
'3 chunks, found %d), nuking state\n', len(state))
self.invalidate()
return None, None, None
diskhostname = state[0]
hostname = socket.gethostname()
if diskhostname != hostname:
# file got moved to a different host
self._ui.log('fsmonitor', 'fsmonitor: stored hostname "%s" '
'different from current "%s", nuking state\n' %
(diskhostname, hostname))
self.invalidate()
return None, None, None
clock = state[1]
ignorehash = state[2]
# discard the value after the last \0
notefiles = state[3:-1]
finally:
file.close()
return clock, ignorehash, notefiles
def set(self, clock, ignorehash, notefiles):
if clock is None:
self.invalidate()
return
# Read the identity from the file on disk rather than from the open file
# pointer below, because the latter is actually a brand new file.
identity = util.filestat.frompath(self._vfs.join('fsmonitor.state'))
if identity != self._identity:
self._ui.debug('skip updating fsmonitor.state: identity mismatch\n')
return
try:
file = self._vfs('fsmonitor.state', 'wb', atomictemp=True,
checkambig=True)
except (IOError, OSError):
self._ui.warn(_("warning: unable to write out fsmonitor state\n"))
return
with file:
file.write(struct.pack(_versionformat, _version))
file.write(socket.gethostname() + '\0')
file.write(clock + '\0')
file.write(ignorehash + '\0')
if notefiles:
file.write('\0'.join(notefiles))
file.write('\0')
def invalidate(self):
try:
os.unlink(os.path.join(self._rootdir, '.hg', 'fsmonitor.state'))
except OSError as inst:
if inst.errno != errno.ENOENT:
raise
self._identity = util.filestat(None)
def setlastclock(self, clock):
self._lastclock = clock
def getlastclock(self):
return self._lastclock
| 33.625 | 80 | 0.557839 |
from __future__ import absolute_import
import errno
import os
import socket
import struct
from mercurial.i18n import _
from mercurial import (
pathutil,
util,
)
_version = 4
_versionformat = ">I"
class state(object):
def __init__(self, repo):
self._vfs = repo.vfs
self._ui = repo.ui
self._rootdir = pathutil.normasprefix(repo.root)
self._lastclock = None
self._identity = util.filestat(None)
self.mode = self._ui.config('fsmonitor', 'mode', default='on')
self.walk_on_invalidate = self._ui.configbool(
'fsmonitor', 'walk_on_invalidate', False)
self.timeout = float(self._ui.config(
'fsmonitor', 'timeout', default='2'))
def get(self):
try:
file = self._vfs('fsmonitor.state', 'rb')
except IOError as inst:
self._identity = util.filestat(None)
if inst.errno != errno.ENOENT:
raise
return None, None, None
self._identity = util.filestat.fromfp(file)
versionbytes = file.read(4)
if len(versionbytes) < 4:
self._ui.log(
'fsmonitor', 'fsmonitor: state file only has %d bytes, '
'nuking state\n' % len(versionbytes))
self.invalidate()
return None, None, None
try:
diskversion = struct.unpack(_versionformat, versionbytes)[0]
if diskversion != _version:
self._ui.log(
'fsmonitor', 'fsmonitor: version switch from %d to '
'%d, nuking state\n' % (diskversion, _version))
self.invalidate()
return None, None, None
state = file.read().split('\0')
if len(state) < 3:
self._ui.log(
'fsmonitor', 'fsmonitor: state file truncated (expected '
'3 chunks, found %d), nuking state\n', len(state))
self.invalidate()
return None, None, None
diskhostname = state[0]
hostname = socket.gethostname()
if diskhostname != hostname:
self._ui.log('fsmonitor', 'fsmonitor: stored hostname "%s" '
'different from current "%s", nuking state\n' %
(diskhostname, hostname))
self.invalidate()
return None, None, None
clock = state[1]
ignorehash = state[2]
notefiles = state[3:-1]
finally:
file.close()
return clock, ignorehash, notefiles
def set(self, clock, ignorehash, notefiles):
if clock is None:
self.invalidate()
return
identity = util.filestat.frompath(self._vfs.join('fsmonitor.state'))
if identity != self._identity:
self._ui.debug('skip updating fsmonitor.state: identity mismatch\n')
return
try:
file = self._vfs('fsmonitor.state', 'wb', atomictemp=True,
checkambig=True)
except (IOError, OSError):
self._ui.warn(_("warning: unable to write out fsmonitor state\n"))
return
with file:
file.write(struct.pack(_versionformat, _version))
file.write(socket.gethostname() + '\0')
file.write(clock + '\0')
file.write(ignorehash + '\0')
if notefiles:
file.write('\0'.join(notefiles))
file.write('\0')
def invalidate(self):
try:
os.unlink(os.path.join(self._rootdir, '.hg', 'fsmonitor.state'))
except OSError as inst:
if inst.errno != errno.ENOENT:
raise
self._identity = util.filestat(None)
def setlastclock(self, clock):
self._lastclock = clock
def getlastclock(self):
return self._lastclock
| true | true |
f71b440f7ca22eaf9b320a66190d76f7e190c156 | 39,690 | py | Python | galaxy_milkyway_files/tools/wohl-proteomics/ssl_converter/bak_ssl_converter.py | wohllab/milkyway_proteomics | 622969f7f4a5955ae2bff299ae7b08572d422814 | [
"MIT"
] | null | null | null | galaxy_milkyway_files/tools/wohl-proteomics/ssl_converter/bak_ssl_converter.py | wohllab/milkyway_proteomics | 622969f7f4a5955ae2bff299ae7b08572d422814 | [
"MIT"
] | null | null | null | galaxy_milkyway_files/tools/wohl-proteomics/ssl_converter/bak_ssl_converter.py | wohllab/milkyway_proteomics | 622969f7f4a5955ae2bff299ae7b08572d422814 | [
"MIT"
] | null | null | null | import os, sys, re
import shutil
import optparse
import shutil
import pandas
import numpy
import subprocess
import fnmatch
from joblib import Parallel, delayed
import multiprocessing
from Bio import SeqIO
import glob
#####################################
#This is the script to produce SSL file outputs for skyline spectral library construction
#
#Fraction parsing is taken from from after the final "-" in the file name. For example, "2015-10-05-wb-HEK293-BioRep1-F1.mzML"
#would belong to fraction "F1"
#
#VERSION 1.7.5
version="1.7.5"
#DATE: 3/03/2016
date="3/03/2016"
#####################################
print "-----------------------------------------------------------------------"
print "Welcome to the SSL file converter for Galaxy, Wohlschlegel Lab UCLA"
print "Written by William Barshop"
print "Version: ",version
print "Date: ",date
def applyParallel(dfGrouped, func):
retLst = Parallel(n_jobs=multiprocessing.cpu_count())(delayed(func)(group) for name, group in dfGrouped)
return pandas.concat(retLst)
def applyParallelQuarter(dfGrouped, func):
retLst = Parallel(n_jobs=multiprocessing.cpu_count()/4)(delayed(func)(group) for name, group in dfGrouped)
return pandas.concat(retLst)
def applyParallelHalf(dfGrouped, func):
retLst = Parallel(n_jobs=multiprocessing.cpu_count()/2)(delayed(func)(group) for name, group in dfGrouped)
return pandas.concat(retLst)
def fido_filter(unfiltered):#dataframe,float arguments...
#filtered=[]
#print unfiltered,"This is it GOING IN...."
#unfiltered.apply(fido_filter_row,axis=1)
for index,eachrow in unfiltered.iterrows():
prot_q_vals = eachrow['protein q-values'].split(',')
protein_ids = eachrow['protein id'].split(',')
indicies = eachrow['peptide prot-indicies'].split(',')
new_q_vals=[]
new_prot_ids=[]
new_indicies=[]
for each_tuple in zip(protein_ids,prot_q_vals,indicies):
if float(each_tuple[1])<=gfido_q_threshold: #KEEP IT
new_q_vals.append(each_tuple[1])
new_prot_ids.append(each_tuple[0])
new_indicies.append(each_tuple[2])
else:
pass # THROW IT OUT
if len(new_prot_ids) >= 1:
unfiltered.loc[index,'protein q-values']=",".join(new_q_vals)
unfiltered.loc[index,'protein id']=",".join(new_prot_ids)
unfiltered.loc[index,'indicies']=",".join(new_indicies)
unfiltered.loc[index,'fido_filter']=1 #This means we'll keep it when we do our df filter... We'll end up dropping this column later.
else:
unfiltered.loc[index,'fido_filter']=0
#print changed,type(changed),"this is changed..."
return unfiltered
def fido_filter_row(eachrow):
#print eachrow,"this is my row!",type(eachrow)
#print str(gfido_q_threshold),"this is the threshold..."
prot_q_vals = eachrow['protein q-values'].split(',')
protein_ids = eachrow['protein id'].split(',')
indicies = eachrow['peptide prot-indicies'].split(',')
new_q_vals=[]
new_prot_ids=[]
new_indicies=[]
for each_tuple in zip(protein_ids,prot_q_vals,indicies):
if float(each_tuple[1])<=gfido_q_threshold: #KEEP IT
new_q_vals.append(each_tuple[1])
new_prot_ids.append(each_tuple[0])
new_indicies.append(each_tuple[2])
else:
pass # THROW IT OUT
if len(new_prot_ids) >= 1:
eachrow['protein q-values']=",".join(new_q_vals)
eachrow['protein id']=",".join(new_prot_ids)
eachrow['indicies']=",".join(new_indicies)
eachrow['fido_filter']=1 #This means we'll keep it when we do our df filter... We'll end up dropping this column later.
else:
eachrow['fido_filter']=0
####################################
#Argument parsing! So much fun!
#We'll use OptParse even though some
#people really rave about argparse...
#
#
# NB: With Optparse, if an option is
# not specified, it will take a
# value of None
####################################
fractions=False
#print sys.argv,"THESE ARE THE ARGS"
parser = optparse.OptionParser()
parser.add_option("--pout",action="store",type="string",dest="operation_folder")
parser.add_option("--qthresh",action="store", type="float", dest="q_threshold")
parser.add_option("--fidoq",action="store", type="float", dest="fido_q_threshold")
parser.add_option("--fido_q_raw",action="store", type="string", dest="fido_q_raw")
parser.add_option("--pRS_prob_thresh",action="store", type="float", dest="prs_prob_threshold")
parser.add_option("--ptmRS_prob_thresh",action="store", type="float", dest="ptmrs_prob_threshold")
parser.add_option("--FLRthresh",action="store", type="float", dest="flr_threshold")
parser.add_option("--LFLR",action="store_true", dest="local_flr")
parser.add_option("--mzml",action="store",type="string",dest="mzml_files")
parser.add_option("--mass_corrected_mzml",action="store",type="string",dest="mc_mzml_files")
parser.add_option("--blib",action="store_true",dest="blib")
parser.add_option("--fasta",action="store",type="string",dest="fasta")
parser.add_option("--ffasta",action="store",type="string",dest="ffasta") # THIS IS THE FILTERED FIDO OUTPUT
parser.add_option("--expgroups",action="store",type="string",dest="exp_group_file")
parser.add_option("--ssl",action="store",type="string",dest="ssl_output_folder")
parser.add_option("--fractions",action="store_true",dest="fractions")
parser.add_option("--OnePPS",action="store_true",dest="one_pps")
parser.add_option("--only_mod",action="store_true",dest="only_mod")
parser.add_option("--diaumpire",action="store_true",dest="diaumpire")
parser.add_option("--no_mzml",action="store_true",dest="no_mzml")
#parser.add_option("--saint",action="store",type="string",dest="saint_outputs")
(options,args) = parser.parse_args()
if options.fractions is True:
fractions=True
else:
print "We'll give outputs by acquisition (runs), not by experimental set."
#saint=False
#if options.saint_outputs is not None:
# saint=True
# saint_interact,saint_bait,saint_prey=options.saint_outputs.split(",")
#### Check for FIDO q-filter
fido_q=False
if options.fido_q_threshold is not None:
fido_q=True
global gfido_q_threshold #For paralell access via JobLib/Multiprocessing...
gfido_q_threshold = options.fido_q_threshold
print "We're going to filter by Fido Q-Value of ",options.fido_q_threshold
#### Check for LuciPHOr
luciphor=False
if options.flr_threshold is not None:
luciphor=True
print "We will filter by LuciPHOr FLR of ",options.flr_threshold
#### Check for PhosphoRS
phosphoRS=False
if options.prs_prob_threshold is not None:
phosphoRS=True
print "We will filter by a PhosphoRS probability of ",options.prs_prob_threshold
#### Check for ptmRS
ptmRS=False
if options.ptmrs_prob_threshold is not None:
ptmRS=True
print "We will filter by a ptmRS probability of ",options.prs_prob_threshold
psms_files=[]
for root, subFolders, files in os.walk(options.operation_folder):
for eachfile in files:
if 'target.psms.txt' in eachfile and 'uncorrected' not in eachfile:
psms_files.append(str(os.path.join(root,eachfile)))
dataframe_vector=[]
for eachfile in psms_files:
newdf=pandas.DataFrame.from_csv(eachfile,sep='\t',index_col=False)
dataframe_vector.append(newdf)
combined_results=pandas.concat(dataframe_vector)
del dataframe_vector
group_information = pandas.read_csv(options.exp_group_file,sep='\t',dtype={'Fractionation Group ID String': object,'Fractionation Group Name':object,'Biological Condition':object})
run_dict={} # Key is file_idx, value is file_name.mzML
rev_run_dict={} #Key is file_nzme.mzML, value is file_idx
group_to_run_dict={} # Key is group, value is [1, 2, 3, 4] list of file_idx belonging to runs in the group...
run_to_group_dict={} # Key is file_idx, value is group...
group_to_file_name={} # key is group, value is ["xxxx.mzML", "xxxx.mzML"]
if fractions:
fractions_to_run_dict={}
for index,row in group_information.iterrows():
print row
run_dict[str(row['Crux File Integer'])]=row['Original File Name']+".mzML"
rev_run_dict[row['Original File Name']+".mzML"]=str(row['Crux File Integer'])
if row['Fractionation Group ID String'] in group_to_run_dict:
group_to_run_dict[row['Fractionation Group ID String']].append(str(row['Crux File Integer']))
else:
group_to_run_dict[row['Fractionation Group ID String']] = [str(row['Crux File Integer'])]
if str(row['Crux File Integer']) in run_to_group_dict:
#run_to_group_dict[str(row['Crux File Integer'])].append(row['Fractionation Group ID String'])
print "YOU HAVE MULTIPLE COPIES OF A RUN IN THE EXPERIMENTAL INFORMATION FILE... WARNING!"
else:
run_to_group_dict[str(row['Crux File Integer'])]=row['Fractionation Group ID String']
if row['Fractionation Group ID String'] in group_to_file_name:
group_to_file_name[row['Fractionation Group ID String']].append(str(row['Original File Name'])+".mzML")
else:
group_to_file_name[row['Fractionation Group ID String']] = [str(row['Original File Name'])+".mzML"]
if fractions:
fraction_tag=str(row['Original File Name'].rsplit("-",1)[1])
if fraction_tag in fractions_to_run_dict:
fractions_to_run_dict[fraction_tag].append(str(row['Crux File Integer']))#str(row['Original File Name'])+".mzML")
else:
fractions_to_run_dict[fraction_tag]=[str(row['Crux File Integer'])]
combined_results['file_idx']=combined_results['file_idx'].astype(str)
combined_results['file']=combined_results['file'].astype(str)
####################### We'll handle putting in the file names just to be sure this has been handled!
fix_combined=[]
print run_dict,"This is run dict"
print rev_run_dict,"this is rev dict"
for each_idx in run_dict:
mask = combined_results[(combined_results.file.str.contains(run_dict[each_idx]))] # MODIFIED....
mask['file']=run_dict[each_idx]
fix_combined.append(mask)
combined_results=pandas.concat(fix_combined)
if options.diaumpire:
print "DIAUmpire inputs: Decrementing scans to match mzML file index."
combined_results['scan']=combined_results['scan']-1
if luciphor:
#print combined_results
combined_results['luci_numPPS']=combined_results['luci_numPPS'].fillna(0)
combined_results['luci_numPPS']=combined_results['luci_numPPS'].astype(int).fillna(0)
combined_results['luci_globalFLR']=combined_results['luci_globalFLR'].astype(float).fillna(0.0)
combined_results['luci_localFLR']=combined_results['luci_localFLR'].astype(float).fillna(0.0)
if phosphoRS:
combined_results['pRS_peptideLocalizationProbability']=combined_results['pRS_peptideLocalizationProbability'].astype(float).fillna(1.00)
combined_results['pRS_numPPS']=combined_results['pRS_numPPS'].astype(int).fillna(0)
if ptmRS:
combined_results['ptmRS_peptideLocalizationProbability']=combined_results['ptmRS_peptideLocalizationProbability'].astype(float).fillna(1.00)
combined_results['ptmRS_totalNumPPS']=combined_results['ptmRS_totalNumPPS'].fillna(0).astype(int)
#for i,each in combined_results.iterrows():
new_results={} # KEY IS GROUP NAME
results_per_run={} # KEY IS FILE_IDX
#print "tehse are items in group to run dict",group_to_run_dict
################################################## THIS BLOCK OF CODE BELOW DOES NOT WORK WELL... FILTERING BY "IN" and for FILE IDX IS A BAD IDEA SINCE "1" IS ALSO IN "10" and "11"... STUPID.
#for each_group in group_to_run_dict:
# bool_filter=combined_results.copy(deep=True)
# bool_filter['file_idx']=bool_filter['file_idx'].astype(str)
# #mask8=combined_results[(bool_filter.file_idx == "8" )]
# mask = combined_results[(combined_results.file_idx.str.contains('|'.join(group_to_run_dict[each_group])))] #############Was this inappropriate set to file instead of file_idx?
# new_results[each_group]=mask # results by group
# for each_file in set(mask['file_idx']):
# each_file_mask=mask[(mask.file_idx == each_file)]
# results_per_run[str(each_file)]=each_file_mask # results by run
for each_group in group_to_file_name:
bool_filter=combined_results.copy(deep=True)
#bool_filter['file_idx']=bool_filter['file_idx'].astype(str)
#mask8=combined_results[(bool_filter.file_idx == "8" )]
mask = combined_results[(combined_results.file.str.contains('|'.join(group_to_file_name[each_group])))]
new_results[each_group]=mask # results by group
for each_file in set(mask['file']):
each_file_mask=mask[(mask.file.str.contains(each_file))]
results_per_run[str(rev_run_dict[each_file])]=each_file_mask # results by run
####################################################
if options.one_pps is True:
print "Allowing Unambiguous Localization Peptides Through..."
basedir=os.getcwd()
if not fractions: #This is for all times when we have 1-D runs to compare.
for eachgroup in set(group_information['Fractionation Group ID String']):
os.chdir(basedir)
os.chdir(options.operation_folder+eachgroup+".pin_out/crux-output/")
#os.mkdir(basedir+"/"+options.ssl_output_folder+"/"+eachgroup)
#if not options.no_mzml:
for eachfile in group_to_file_name[eachgroup]:
#shutil.copy(os.path.join(basedir,eachfile),basedir+"/"+options.ssl_output_folder+"/"+eachgroup+"/"+eachfile)
shutil.copy(os.path.join(basedir,eachfile),basedir+"/"+options.ssl_output_folder+"/"+eachfile)
if not fractions:
for eachrun in group_to_run_dict[eachgroup]:
this_run_results=results_per_run[eachrun]
this_run_results['protein q-values']=this_run_results['protein q-values'].astype(str)
if fido_q:
this_run_results['fido_filter']=1
this_run_grouped=this_run_results.groupby(numpy.arange(len(this_run_results))//multiprocessing.cpu_count())
this_run_results=applyParallelHalf(this_run_grouped,fido_filter)
#print type(this_run_results),"and type"
this_run_results=this_run_results[this_run_results['fido_filter']==1]
this_run_results=this_run_results.drop('fido_filter',axis=1)
del this_run_grouped
if luciphor:
if options.local_flr is True:
if options.one_pps is True:
mask=this_run_results[numpy.logical_or(( this_run_results.luci_localFLR <= options.flr_threshold ) , (this_run_results.luci_numPPS - this_run_results.luci_numRPS == 0))]
type2_mask=this_run_results[numpy.logical_and(( this_run_results.luci_localFLR > options.flr_threshold ), (this_run_results.luci_numPPS - this_run_results.luci_numRPS > 0))] #Unambiguous and poorly localized.
else:
mask=this_run_results[( this_run_results.luci_localFLR <= options.flr_threshold)]
type2_mask=this_run_results[( this_run_results.luci_localFLR > options.flr_threshold )] #Unambiguous and poorly localized.
else:
if options.one_pps is True:
mask=this_run_results[numpy.logical_or(( this_run_results.luci_globalFLR <= options.flr_threshold ) , (this_run_results.luci_numPPS - this_run_results.luci_numRPS == 0))]
type2_mask=this_run_results[numpy.logical_and(( this_run_results.luci_globalFLR > options.flr_threshold ), (this_run_results.luci_numPPS - this_run_results.luci_numRPS > 0))] #Unambiguous and poorly localized.
else:
mask=this_run_results[( this_run_results.luci_globalFLR <= options.flr_threshold)]
type2_mask=this_run_results[( this_run_results.luci_globalFLR > options.flr_threshold )] #Unambiguous and poorly localized.
elif phosphoRS:
if options.one_pps is True:
mask=this_run_results[numpy.logical_or(( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold ) , (this_run_results.pRS_numPPS - this_run_results.numModSites == 0))]
type2_mask=this_run_results[numpy.logical_and(( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold ), (this_run_results.pRS_numPPS - this_run_results.numModSites > 0))] #Unambiguous and poorly localized.
else:
mask=this_run_results[( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold)]
type2_mask=this_run_results[( this_run_results.pRS_peptideLocalizationProbability < options.prs_prob_threshold )] #Unambiguous and poorly localized.
elif ptmRS:
#if options.one_pps is True:
# mask=this_run_results[numpy.logical_or(( this_run_results.ptmRS_peptideLocalizationProbability >= options.ptmrs_prob_threshold ) , (this_run_results.ptmRS_numPPS - this_run_results.numModSites == 0))]
# type2_mask=this_run_results[numpy.logical_and(( this_run_results.ptmRS_peptideLocalizationProbability >= options.ptmrs_prob_threshold ), (this_run_results.ptmRS_numPPS - this_run_results.numModSites > 0))] #Unambiguous and poorly localized.
#else:
mask=this_run_results[( this_run_results.ptmRS_peptideLocalizationProbability >= options.ptmrs_prob_threshold)]
type2_mask=this_run_results[( this_run_results.ptmRS_peptideLocalizationProbability < options.ptmrs_prob_threshold )] #Unambiguous and poorly localized.
else:
mask=this_run_results
if options.only_mod is True:
if luciphor:
mask=mask[mask['luci_numRPS'] >= 1].copy(deep=True)
elif phosphoRS:
mask=mask[mask['numModSites'] >= 1].copy(deep=True)
elif ptmRS:
mask=mask[mask['ptmRS_numMods'] >= 1].copy(deep=True)
if luciphor:
ssl_df=mask[['file','scan','charge','luci_sequence','percolator q-value']]
ssl_df.rename(columns={'luci_sequence':'sequence'}, inplace=True)
elif phosphoRS:
ssl_df=mask[['file','scan','charge','pRS_sequence','percolator q-value']]
ssl_df.rename(columns={'pRS_sequence':'sequence'}, inplace=True)
elif ptmRS:
ssl_df=mask[['file','scan','charge','ptmRS_sequence','percolator q-value']]
ssl_df.rename(columns={'ptmRS_sequence':'sequence'}, inplace=True)
else:
ssl_df=mask[['file','scan','charge','sequence','percolator q-value']]
ssl_df.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True)
ssl_df_filtered=ssl_df[(ssl_df['percolator qvalue']<=options.q_threshold)]
#with open(basedir+"/"+options.ssl_output_folder+"/"+eachgroup+"/"+run_dict[eachrun].replace(".mzML","")+".ssl",'wb') as ssl_writer:
with open(basedir+"/"+options.ssl_output_folder+"/"+run_dict[eachrun].replace(".mzML","")+".ssl",'wb') as ssl_writer:
ssl_df_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True)
ssl_df_filtered['score-type']="PERCOLATOR QVALUE"
ssl_df_filtered.to_csv(path_or_buf=ssl_writer,sep="\t",index=False,header=True)
if luciphor:
with open(basedir+"/"+options.ssl_output_folder+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer:
ssl_df_type2=type2_mask[['file','scan','charge','luci_sequence','percolator q-value']]
ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True)
ssl_df_type2.rename(columns={'luci_sequence':'sequence'}, inplace=True)
ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)]
ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True)
ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE"
ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True)
elif phosphoRS:
with open(basedir+"/"+options.ssl_output_folder+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer:
ssl_df_type2=type2_mask[['file','scan','charge','pRS_sequence','percolator q-value']]
ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True)
ssl_df_type2.rename(columns={'pRS_sequence':'sequence'}, inplace=True)
ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)]
ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True)
ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE"
ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True)
elif ptmRS:
with open(basedir+"/"+options.ssl_output_folder+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer:
ssl_df_type2=type2_mask[['file','scan','charge','ptmRS_sequence','percolator q-value']]
ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True)
ssl_df_type2.rename(columns={'ptmRS_sequence':'sequence'}, inplace=True)
ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)]
ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True)
ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE"
ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True)
if options.blib:
print "Let's build some blib files..."
os.chdir(basedir)
os.chdir(basedir+"/"+options.ssl_output_folder)
#cmd = '/galaxy-central/tools/wohl-proteomics/ssl_converter/blibbuild/bin/BlibBuild -a milkyway-galaxy *.ssl combined_spectral_lib.blib'
#cmd = 'ls *.ssl | /galaxy-central/tools/wohl-proteomics/ssl_converter/blibbuild/bin/BlibBuild -a milkyway-galaxy -m 1000M combined_spectral_lib.blib'
#cmd = 'wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibBuild.exe -a milkyway-galaxy -m 1000M *.ssl redundant_spectral_lib.blib'
#print "running command ",cmd
#subprocess.call(cmd,shell=True)
#filtercmd='wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibFilter.exe redundant_spectral_lib.blib filtered_spectral_lib.blib'
#subprocess.call(filtercmd,shell=True)
for file in glob.glob("*type2.ssl"):
os.rename(file,file.split(".")[0]+".not_ssl")
print "protected",file,"from inclusion in spectral lib..."
cmd = 'wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibBuild.exe -a milkyway-galaxy -m 1000M *.ssl combined_spectral_lib.blib'
print "running command ",cmd
subprocess.call(cmd,shell=True)
for file in glob.glob("*.not_ssl"):
os.rename(file,file.split(".")[0]+".ssl")
filtercmd='wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibFilter.exe combined_spectral_lib.blib filtered.blib'
subprocess.call(filtercmd,shell=True)
if options.mc_mzml_files is not None: #and not options.no_mzml:
for file in glob.glob("*.mzML"):
os.remove(file)
print "removing ",file
os.chdir(basedir)
for file in glob.glob("mc_*.mzML"):
print "replacing ",file
shutil.copy(file,basedir+"/"+options.ssl_output_folder+"/"+file.split("_",1)[1])
else: #This is for when fractions is true... so we'll organize the output into fractions
#fractions_to_run_dict[fraction_tag].append(str(row['Original File Name'])+".mzML")
blib_cmds=[]
filter_cmds=[]
for eachfraction in fractions_to_run_dict:
fraction_set=fractions_to_run_dict[eachfraction]
for each_chrom_fraction in fraction_set:
if not os.path.isdir(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"):
os.mkdir(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/")
#if not options.no_mzml:
shutil.copy(os.path.join(basedir,run_dict[each_chrom_fraction]),basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[each_chrom_fraction])
for eachfraction in fractions_to_run_dict:
os.chdir(basedir)
fraction_set=fractions_to_run_dict[eachfraction]
for eachrun in fraction_set:
this_run_results=results_per_run[eachrun]
if fido_q:
this_run_results['fido_filter']=0
this_run_grouped=this_run_results.groupby(numpy.arange(len(this_run_results))//multiprocessing.cpu_count())
this_run_results=applyParallelHalf(this_run_grouped,fido_filter)
print this_run_results.columns,"this is columns..."
this_run_results=this_run_results[this_run_results['fido_filter']==1]
this_run_results=this_run_results.drop('fido_filter',axis=1)
del this_run_grouped
#print "------------------------------"
#print "each run is ",eachrun
#print set(this_run_results['file_idx']),"idx set..."
#print set(this_run_results['file']),"file set..."
#print "------------------------------"
os.chdir(basedir)
os.chdir(options.operation_folder+str(run_to_group_dict[eachrun])+".pin_out/crux-output/")
if luciphor:
if options.local_flr is True:
if options.one_pps is True:
mask=this_run_results[numpy.logical_or(( this_run_results.luci_localFLR <= options.flr_threshold ) , (this_run_results.luci_numPPS - this_run_results.luci_numRPS == 0))]
type2_mask=this_run_results[numpy.logical_and(( this_run_results.luci_localFLR >= options.flr_threshold ), (this_run_results.luci_numPPS - this_run_results.luci_numRPS > 0))] #Unambiguous and poorly localized.
else:
mask=this_run_results[( this_run_results.luci_localFLR <= options.flr_threshold)]
type2_mask=this_run_results[( this_run_results.luci_localFLR >= options.flr_threshold )] #Unambiguous and poorly localized.
else:
if options.one_pps is True:
mask=this_run_results[numpy.logical_or(( this_run_results.luci_globalFLR <= options.flr_threshold ) , (this_run_results.luci_numPPS - this_run_results.luci_numRPS == 0))]
type2_mask=this_run_results[numpy.logical_and(( this_run_results.luci_globalFLR >= options.flr_threshold ), (this_run_results.luci_numPPS - this_run_results.luci_numRPS > 0))] #Unambiguous and poorly localized.
else:
mask=this_run_results[( this_run_results.luci_globalFLR <= options.flr_threshold)]
type2_mask=this_run_results[( this_run_results.luci_globalFLR >= options.flr_threshold )] #Unambiguous and poorly localized.
elif phosphoRS:
if options.one_pps is True:
mask=this_run_results[numpy.logical_or(( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold ) , (this_run_results.pRS_numPPS - this_run_results.numModSites == 0))]
type2_mask=this_run_results[numpy.logical_and(( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold ), (this_run_results.pRS_numPPS - this_run_results.numModSites > 0))] #Unambiguous and poorly localized.
else:
mask=this_run_results[( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold)]
type2_mask=this_run_results[( this_run_results.pRS_peptideLocalizationProbability < options.prs_prob_threshold )] #Unambiguous and poorly localized.
elif ptmRS:
#if options.one_pps is True:
# mask=this_run_results[numpy.logical_or(( this_run_results.ptmRS_peptideLocalizationProbability >= options.ptmrs_prob_threshold ) , (this_run_results.ptmRS_numPPS - this_run_results.ptmRS_numMods == 0))]
# type2_mask=this_run_results[numpy.logical_and(( this_run_results.ptmRS_peptideLocalizationProbability >= options.ptmrs_prob_threshold ), (this_run_results.ptmRS_numPPS - this_run_results.ptmRS_numMods > 0))] #Unambiguous and poorly localized.
mask=this_run_results[( this_run_results.ptmRS_peptideLocalizationProbability >= options.ptmrs_prob_threshold)]
type2_mask=this_run_results[( this_run_results.ptmRS_peptideLocalizationProbability < options.ptmrs_prob_threshold )] #Unambiguous and poorly localized.
else:
mask=this_run_results
if options.only_mod is True:
if luciphor:
mask=mask[mask['luci_numRPS'] >= 1].copy(deep=True)
elif phosphoRS:
mask=mask[mask['numModSites'] >= 1].copy(deep=True)
elif ptmRS:
mask=mask[mask['ptmRS_numMods'] >= 1].copy(deep=True)
if luciphor:
ssl_df=mask[['file','scan','charge','luci_sequence','percolator q-value']]
ssl_df.rename(columns={'luci_sequence':'sequence'}, inplace=True)
elif phosphoRS:
ssl_df=mask[['file','scan','charge','pRS_sequence','percolator q-value']]
ssl_df.rename(columns={'pRS_sequence':'sequence'}, inplace=True)
elif ptmRS:
ssl_df=mask[['file','scan','charge','ptmRS_sequence','percolator q-value']]
ssl_df.rename(columns={'ptmRS_sequence':'sequence'}, inplace=True)
else:
ssl_df=mask[['file','scan','charge','sequence','percolator q-value']]
ssl_df.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True)
ssl_df_filtered=ssl_df[(ssl_df['percolator qvalue']<=options.q_threshold)]
with open(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[eachrun].replace(".mzML","")+".ssl",'wb') as ssl_writer:
ssl_df_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True)
ssl_df_filtered['score-type']="PERCOLATOR QVALUE"
ssl_df_filtered.to_csv(path_or_buf=ssl_writer,sep="\t",index=False,header=True)
if luciphor:
with open(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer:
ssl_df_type2=type2_mask[['file','scan','charge','luci_sequence','percolator q-value']]
ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True)
ssl_df_type2.rename(columns={'luci_sequence':'sequence'}, inplace=True)
ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)]
ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True)
ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE"
ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True)
elif phosphoRS:
with open(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer:
ssl_df_type2=type2_mask[['file','scan','charge','pRS_sequence','percolator q-value']]
ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True)
ssl_df_type2.rename(columns={'pRS_sequence':'sequence'}, inplace=True)
ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)]
ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True)
ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE"
ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True)
elif ptmRS:
with open(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer:
ssl_df_type2=type2_mask[['file','scan','charge','ptmRS_sequence','percolator q-value']]
ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True)
ssl_df_type2.rename(columns={'ptmRS_sequence':'sequence'}, inplace=True)
ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)]
ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True)
ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE"
ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True)
if options.blib:
print "We're going to build blib files!"
os.chdir(basedir)
#os.chdir(basedir+"/"+options.ssl_output_folder)
os.chdir(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/")
#cmd = '/galaxy-central/tools/wohl-proteomics/ssl_converter/blibbuild/bin/BlibBuild -a milkyway-galaxy *.ssl '+eachfraction.replace("-","")+".ssl" #combined_spectral_lib.blib
for file in glob.glob("*type2.ssl"):
os.rename(file,file.split(".")[0]+".not_ssl")
print "protected",file,"from inclusion in spectral lib..."
command_folder=basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"
cmd = 'wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibBuild.exe -a milkyway-galaxy -m 1000M {0}*.ssl {0}combined_spectral_lib.blib'.format(command_folder)
blib_cmds.append(cmd)
print "storing command to run later",cmd
#subprocess.call(cmd,shell=True)
filtercmd='wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibFilter.exe {0} {1}'.format(command_folder+"combined_spectral_lib.blib",command_folder+"filtered_spectral_lib.blib")
filter_cmds.append(filtercmd)
print "storing command for filter later",filtercmd
#subprocess.call(filtercmd,shell=True)
#cmd = 'ls *.ssl | /galaxy-central/tools/wohl-proteomics/ssl_converter/blibbuild/bin/BlibBuild -a milkyway-galaxy -m 1000M combined_spectral_lib.blib'
#print "Running command ...",cmd
#subprocess.call(cmd,shell=True)
if fractions and options.blib:
chunk_size=8 # Max concurrency...
job_list=[blib_cmds[i:i + chunk_size] for i in range(0, len(blib_cmds), chunk_size)]
for each_jobset in job_list:
processes=[]
for each_job in each_jobset:
print "Running ...",each_job
processes.append(subprocess.Popen(each_job,shell=True))
for each_proc in processes:
each_proc.wait()
job_list=[filter_cmds[i:i + chunk_size] for i in range(0, len(filter_cmds), chunk_size)]
for each_jobset in job_list:
processes=[]
for each_job in each_jobset:
print "Running Filter...",each_job
processes.append(subprocess.Popen(each_job,shell=True))
for each_proc in processes:
each_proc.wait()
for eachfraction in fractions_to_run_dict:
os.chdir(basedir)
fraction_set=fractions_to_run_dict[eachfraction]
for eachrun in fraction_set:
os.chdir(basedir)
os.chdir(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/")
if options.mc_mzml_files is not None:
file_list=[file for file in glob.glob("*.mzML")]
for file in file_list:
os.remove(file)
os.chdir(basedir)
for file in glob.glob("mc_*.mzML"):
if file.split("_",1)[1] in file_list:
shutil.copy(file,basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+file.split("_",1)[1])
for file in glob.glob("*.not_ssl"):
os.rename(file,file.split(".")[0]+".ssl")
os.chdir(basedir)
if options.fido_q_threshold is not None:
os.chdir(basedir)
#While we're here, let's go ahead and handle database filtering!
fido_q_df = pandas.read_csv(options.fido_q_raw,sep="\t")
fido_q_df=fido_q_df[fido_q_df['q-value']<=options.fido_q_threshold] #filter down...
proteins_to_keep=fido_q_df['protein group'].unique().tolist()#this is the list of proteins we want to keep.
with open(options.fasta,'rb') as fasta_file:
fasta_dict=SeqIO.to_dict(SeqIO.parse(fasta_file,"fasta"))
new_fasta=[]
for eachprotein in proteins_to_keep:
if eachprotein in fasta_dict:
new_fasta.append(fasta_dict[eachprotein])
#print new_fasta,len(new_fasta),len(proteins_to_keep)
shutil.copy(options.fasta,basedir+"/"+options.ssl_output_folder+"/"+"UNFILTERED_"+options.fasta)
with open(basedir+"/"+options.ssl_output_folder+"/"+"FIDO_FILTERED_"+options.fasta,'wb') as fasta_writer:
SeqIO.write(new_fasta,fasta_writer,"fasta")
print "FIDO filtered FASTA written!"
if options.ffasta is not None:
shutil.copy(basedir+"/"+options.ssl_output_folder+"/"+"FIDO_FILTERED_"+options.fasta,options.ffasta)
print "Copied the filtered fasta to the output location specified..."
else:
os.chdir(basedir)
shutil.copy(options.fasta,basedir+"/"+options.ssl_output_folder+"/"+"UNFILTERED_"+options.fasta)
os.chdir(basedir)
if options.no_mzml:
mzml_files=[]
for root, dirs, files in os.walk("."):
for file_name in fnmatch.filter(files, '*.mzML'):
mzml_files.append(os.path.join(root,file_name))
#print "will have to remove ",file_name
for each_file in mzml_files:
os.remove(each_file)
print "All done!"
sys.exit(0)
| 57.772926 | 265 | 0.657798 | import os, sys, re
import shutil
import optparse
import shutil
import pandas
import numpy
import subprocess
import fnmatch
from joblib import Parallel, delayed
import multiprocessing
from Bio import SeqIO
import glob
pend(each_tuple[1])
new_prot_ids.append(each_tuple[0])
new_indicies.append(each_tuple[2])
else:
pass
if len(new_prot_ids) >= 1:
unfiltered.loc[index,'protein q-values']=",".join(new_q_vals)
unfiltered.loc[index,'protein id']=",".join(new_prot_ids)
unfiltered.loc[index,'indicies']=",".join(new_indicies)
unfiltered.loc[index,'fido_filter']=1
else:
unfiltered.loc[index,'fido_filter']=0
return unfiltered
def fido_filter_row(eachrow):
prot_q_vals = eachrow['protein q-values'].split(',')
protein_ids = eachrow['protein id'].split(',')
indicies = eachrow['peptide prot-indicies'].split(',')
new_q_vals=[]
new_prot_ids=[]
new_indicies=[]
for each_tuple in zip(protein_ids,prot_q_vals,indicies):
if float(each_tuple[1])<=gfido_q_threshold:
new_q_vals.append(each_tuple[1])
new_prot_ids.append(each_tuple[0])
new_indicies.append(each_tuple[2])
else:
pass
if len(new_prot_ids) >= 1:
eachrow['protein q-values']=",".join(new_q_vals)
eachrow['protein id']=",".join(new_prot_ids)
eachrow['indicies']=",".join(new_indicies)
eachrow['fido_filter']=1
else:
eachrow['fido_filter']=0
ction="store", type="float", dest="prs_prob_threshold")
parser.add_option("--ptmRS_prob_thresh",action="store", type="float", dest="ptmrs_prob_threshold")
parser.add_option("--FLRthresh",action="store", type="float", dest="flr_threshold")
parser.add_option("--LFLR",action="store_true", dest="local_flr")
parser.add_option("--mzml",action="store",type="string",dest="mzml_files")
parser.add_option("--mass_corrected_mzml",action="store",type="string",dest="mc_mzml_files")
parser.add_option("--blib",action="store_true",dest="blib")
parser.add_option("--fasta",action="store",type="string",dest="fasta")
parser.add_option("--ffasta",action="store",type="string",dest="ffasta") # THIS IS THE FILTERED FIDO OUTPUT
parser.add_option("--expgroups",action="store",type="string",dest="exp_group_file")
parser.add_option("--ssl",action="store",type="string",dest="ssl_output_folder")
parser.add_option("--fractions",action="store_true",dest="fractions")
parser.add_option("--OnePPS",action="store_true",dest="one_pps")
parser.add_option("--only_mod",action="store_true",dest="only_mod")
parser.add_option("--diaumpire",action="store_true",dest="diaumpire")
parser.add_option("--no_mzml",action="store_true",dest="no_mzml")
#parser.add_option("--saint",action="store",type="string",dest="saint_outputs")
(options,args) = parser.parse_args()
if options.fractions is True:
fractions=True
else:
print "We'll give outputs by acquisition (runs), not by experimental set."
bal gfido_q_threshold
gfido_q_threshold = options.fido_q_threshold
print "We're going to filter by Fido Q-Value of ",options.fido_q_threshold
#### Check for LuciPHOr
luciphor=False
if options.flr_threshold is not None:
luciphor=True
print "We will filter by LuciPHOr FLR of ",options.flr_threshold
#### Check for PhosphoRS
phosphoRS=False
if options.prs_prob_threshold is not None:
phosphoRS=True
print "We will filter by a PhosphoRS probability of ",options.prs_prob_threshold
#### Check for ptmRS
ptmRS=False
if options.ptmrs_prob_threshold is not None:
ptmRS=True
print "We will filter by a ptmRS probability of ",options.prs_prob_threshold
psms_files=[]
for root, subFolders, files in os.walk(options.operation_folder):
for eachfile in files:
if 'target.psms.txt' in eachfile and 'uncorrected' not in eachfile:
psms_files.append(str(os.path.join(root,eachfile)))
dataframe_vector=[]
for eachfile in psms_files:
newdf=pandas.DataFrame.from_csv(eachfile,sep='\t',index_col=False)
dataframe_vector.append(newdf)
combined_results=pandas.concat(dataframe_vector)
del dataframe_vector
group_information = pandas.read_csv(options.exp_group_file,sep='\t',dtype={'Fractionation Group ID String': object,'Fractionation Group Name':object,'Biological Condition':object})
run_dict={} # Key is file_idx, value is file_name.mzML
rev_run_dict={} #Key is file_nzme.mzML, value is file_idx
group_to_run_dict={} # Key is group, value is [1, 2, 3, 4] list of file_idx belonging to runs in the group...
run_to_group_dict={} # Key is file_idx, value is group...
group_to_file_name={} # key is group, value is ["xxxx.mzML", "xxxx.mzML"]
if fractions:
fractions_to_run_dict={}
for index,row in group_information.iterrows():
print row
run_dict[str(row['Crux File Integer'])]=row['Original File Name']+".mzML"
rev_run_dict[row['Original File Name']+".mzML"]=str(row['Crux File Integer'])
if row['Fractionation Group ID String'] in group_to_run_dict:
group_to_run_dict[row['Fractionation Group ID String']].append(str(row['Crux File Integer']))
else:
group_to_run_dict[row['Fractionation Group ID String']] = [str(row['Crux File Integer'])]
if str(row['Crux File Integer']) in run_to_group_dict:
#run_to_group_dict[str(row['Crux File Integer'])].append(row['Fractionation Group ID String'])
print "YOU HAVE MULTIPLE COPIES OF A RUN IN THE EXPERIMENTAL INFORMATION FILE... WARNING!"
else:
run_to_group_dict[str(row['Crux File Integer'])]=row['Fractionation Group ID String']
if row['Fractionation Group ID String'] in group_to_file_name:
group_to_file_name[row['Fractionation Group ID String']].append(str(row['Original File Name'])+".mzML")
else:
group_to_file_name[row['Fractionation Group ID String']] = [str(row['Original File Name'])+".mzML"]
if fractions:
fraction_tag=str(row['Original File Name'].rsplit("-",1)[1])
if fraction_tag in fractions_to_run_dict:
fractions_to_run_dict[fraction_tag].append(str(row['Crux File Integer']))#str(row['Original File Name'])+".mzML")
else:
fractions_to_run_dict[fraction_tag]=[str(row['Crux File Integer'])]
combined_results['file_idx']=combined_results['file_idx'].astype(str)
combined_results['file']=combined_results['file'].astype(str)
####################### We'll handle putting in the file names just to be sure this has been handled!
fix_combined=[]
print run_dict,"This is run dict"
print rev_run_dict,"this is rev dict"
for each_idx in run_dict:
mask = combined_results[(combined_results.file.str.contains(run_dict[each_idx]))]
mask['file']=run_dict[each_idx]
fix_combined.append(mask)
combined_results=pandas.concat(fix_combined)
if options.diaumpire:
print "DIAUmpire inputs: Decrementing scans to match mzML file index."
combined_results['scan']=combined_results['scan']-1
if luciphor:
combined_results['luci_numPPS']=combined_results['luci_numPPS'].fillna(0)
combined_results['luci_numPPS']=combined_results['luci_numPPS'].astype(int).fillna(0)
combined_results['luci_globalFLR']=combined_results['luci_globalFLR'].astype(float).fillna(0.0)
combined_results['luci_localFLR']=combined_results['luci_localFLR'].astype(float).fillna(0.0)
if phosphoRS:
combined_results['pRS_peptideLocalizationProbability']=combined_results['pRS_peptideLocalizationProbability'].astype(float).fillna(1.00)
combined_results['pRS_numPPS']=combined_results['pRS_numPPS'].astype(int).fillna(0)
if ptmRS:
combined_results['ptmRS_peptideLocalizationProbability']=combined_results['ptmRS_peptideLocalizationProbability'].astype(float).fillna(1.00)
combined_results['ptmRS_totalNumPPS']=combined_results['ptmRS_totalNumPPS'].fillna(0).astype(int)
new_results={}
results_per_run={}
):
os.rename(file,file.split(".")[0]+".ssl")
filtercmd='wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibFilter.exe combined_spectral_lib.blib filtered.blib'
subprocess.call(filtercmd,shell=True)
if options.mc_mzml_files is not None: #and not options.no_mzml:
for file in glob.glob("*.mzML"):
os.remove(file)
print "removing ",file
os.chdir(basedir)
for file in glob.glob("mc_*.mzML"):
print "replacing ",file
shutil.copy(file,basedir+"/"+options.ssl_output_folder+"/"+file.split("_",1)[1])
else: #This is for when fractions is true... so we'll organize the output into fractions
blib_cmds=[]
filter_cmds=[]
for eachfraction in fractions_to_run_dict:
fraction_set=fractions_to_run_dict[eachfraction]
for each_chrom_fraction in fraction_set:
if not os.path.isdir(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"):
os.mkdir(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/")
shutil.copy(os.path.join(basedir,run_dict[each_chrom_fraction]),basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[each_chrom_fraction])
for eachfraction in fractions_to_run_dict:
os.chdir(basedir)
fraction_set=fractions_to_run_dict[eachfraction]
for eachrun in fraction_set:
this_run_results=results_per_run[eachrun]
if fido_q:
this_run_results['fido_filter']=0
this_run_grouped=this_run_results.groupby(numpy.arange(len(this_run_results))//multiprocessing.cpu_count())
this_run_results=applyParallelHalf(this_run_grouped,fido_filter)
print this_run_results.columns,"this is columns..."
this_run_results=this_run_results[this_run_results['fido_filter']==1]
this_run_results=this_run_results.drop('fido_filter',axis=1)
del this_run_grouped
os.chdir(basedir)
os.chdir(options.operation_folder+str(run_to_group_dict[eachrun])+".pin_out/crux-output/")
if luciphor:
if options.local_flr is True:
if options.one_pps is True:
mask=this_run_results[numpy.logical_or(( this_run_results.luci_localFLR <= options.flr_threshold ) , (this_run_results.luci_numPPS - this_run_results.luci_numRPS == 0))]
type2_mask=this_run_results[numpy.logical_and(( this_run_results.luci_localFLR >= options.flr_threshold ), (this_run_results.luci_numPPS - this_run_results.luci_numRPS > 0))]
else:
mask=this_run_results[( this_run_results.luci_localFLR <= options.flr_threshold)]
type2_mask=this_run_results[( this_run_results.luci_localFLR >= options.flr_threshold )]
else:
if options.one_pps is True:
mask=this_run_results[numpy.logical_or(( this_run_results.luci_globalFLR <= options.flr_threshold ) , (this_run_results.luci_numPPS - this_run_results.luci_numRPS == 0))]
type2_mask=this_run_results[numpy.logical_and(( this_run_results.luci_globalFLR >= options.flr_threshold ), (this_run_results.luci_numPPS - this_run_results.luci_numRPS > 0))]
else:
mask=this_run_results[( this_run_results.luci_globalFLR <= options.flr_threshold)]
type2_mask=this_run_results[( this_run_results.luci_globalFLR >= options.flr_threshold )]
elif phosphoRS:
if options.one_pps is True:
mask=this_run_results[numpy.logical_or(( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold ) , (this_run_results.pRS_numPPS - this_run_results.numModSites == 0))]
type2_mask=this_run_results[numpy.logical_and(( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold ), (this_run_results.pRS_numPPS - this_run_results.numModSites > 0))]
else:
mask=this_run_results[( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold)]
type2_mask=this_run_results[( this_run_results.pRS_peptideLocalizationProbability < options.prs_prob_threshold )]
elif ptmRS:
mask=this_run_results[( this_run_results.ptmRS_peptideLocalizationProbability >= options.ptmrs_prob_threshold)]
type2_mask=this_run_results[( this_run_results.ptmRS_peptideLocalizationProbability < options.ptmrs_prob_threshold )]
else:
mask=this_run_results
if options.only_mod is True:
if luciphor:
mask=mask[mask['luci_numRPS'] >= 1].copy(deep=True)
elif phosphoRS:
mask=mask[mask['numModSites'] >= 1].copy(deep=True)
elif ptmRS:
mask=mask[mask['ptmRS_numMods'] >= 1].copy(deep=True)
if luciphor:
ssl_df=mask[['file','scan','charge','luci_sequence','percolator q-value']]
ssl_df.rename(columns={'luci_sequence':'sequence'}, inplace=True)
elif phosphoRS:
ssl_df=mask[['file','scan','charge','pRS_sequence','percolator q-value']]
ssl_df.rename(columns={'pRS_sequence':'sequence'}, inplace=True)
elif ptmRS:
ssl_df=mask[['file','scan','charge','ptmRS_sequence','percolator q-value']]
ssl_df.rename(columns={'ptmRS_sequence':'sequence'}, inplace=True)
else:
ssl_df=mask[['file','scan','charge','sequence','percolator q-value']]
ssl_df.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True)
ssl_df_filtered=ssl_df[(ssl_df['percolator qvalue']<=options.q_threshold)]
with open(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[eachrun].replace(".mzML","")+".ssl",'wb') as ssl_writer:
ssl_df_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True)
ssl_df_filtered['score-type']="PERCOLATOR QVALUE"
ssl_df_filtered.to_csv(path_or_buf=ssl_writer,sep="\t",index=False,header=True)
if luciphor:
with open(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer:
ssl_df_type2=type2_mask[['file','scan','charge','luci_sequence','percolator q-value']]
ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True)
ssl_df_type2.rename(columns={'luci_sequence':'sequence'}, inplace=True)
ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)]
ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True)
ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE"
ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True)
elif phosphoRS:
with open(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer:
ssl_df_type2=type2_mask[['file','scan','charge','pRS_sequence','percolator q-value']]
ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True)
ssl_df_type2.rename(columns={'pRS_sequence':'sequence'}, inplace=True)
ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)]
ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True)
ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE"
ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True)
elif ptmRS:
with open(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer:
ssl_df_type2=type2_mask[['file','scan','charge','ptmRS_sequence','percolator q-value']]
ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True)
ssl_df_type2.rename(columns={'ptmRS_sequence':'sequence'}, inplace=True)
ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)]
ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True)
ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE"
ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True)
if options.blib:
print "We're going to build blib files!"
os.chdir(basedir)
#os.chdir(basedir+"/"+options.ssl_output_folder)
os.chdir(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/")
#cmd = '/galaxy-central/tools/wohl-proteomics/ssl_converter/blibbuild/bin/BlibBuild -a milkyway-galaxy *.ssl '+eachfraction.replace("-","")+".ssl" #combined_spectral_lib.blib
for file in glob.glob("*type2.ssl"):
os.rename(file,file.split(".")[0]+".not_ssl")
print "protected",file,"from inclusion in spectral lib..."
command_folder=basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"
cmd = 'wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibBuild.exe -a milkyway-galaxy -m 1000M {0}*.ssl {0}combined_spectral_lib.blib'.format(command_folder)
blib_cmds.append(cmd)
print "storing command to run later",cmd
#subprocess.call(cmd,shell=True)
filtercmd='wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibFilter.exe {0} {1}'.format(command_folder+"combined_spectral_lib.blib",command_folder+"filtered_spectral_lib.blib")
filter_cmds.append(filtercmd)
print "storing command for filter later",filtercmd
#subprocess.call(filtercmd,shell=True)
#cmd = 'ls *.ssl | /galaxy-central/tools/wohl-proteomics/ssl_converter/blibbuild/bin/BlibBuild -a milkyway-galaxy -m 1000M combined_spectral_lib.blib'
#print "Running command ...",cmd
#subprocess.call(cmd,shell=True)
if fractions and options.blib:
chunk_size=8 # Max concurrency...
job_list=[blib_cmds[i:i + chunk_size] for i in range(0, len(blib_cmds), chunk_size)]
for each_jobset in job_list:
processes=[]
for each_job in each_jobset:
print "Running ...",each_job
processes.append(subprocess.Popen(each_job,shell=True))
for each_proc in processes:
each_proc.wait()
job_list=[filter_cmds[i:i + chunk_size] for i in range(0, len(filter_cmds), chunk_size)]
for each_jobset in job_list:
processes=[]
for each_job in each_jobset:
print "Running Filter...",each_job
processes.append(subprocess.Popen(each_job,shell=True))
for each_proc in processes:
each_proc.wait()
for eachfraction in fractions_to_run_dict:
os.chdir(basedir)
fraction_set=fractions_to_run_dict[eachfraction]
for eachrun in fraction_set:
os.chdir(basedir)
os.chdir(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/")
if options.mc_mzml_files is not None:
file_list=[file for file in glob.glob("*.mzML")]
for file in file_list:
os.remove(file)
os.chdir(basedir)
for file in glob.glob("mc_*.mzML"):
if file.split("_",1)[1] in file_list:
shutil.copy(file,basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+file.split("_",1)[1])
for file in glob.glob("*.not_ssl"):
os.rename(file,file.split(".")[0]+".ssl")
os.chdir(basedir)
if options.fido_q_threshold is not None:
os.chdir(basedir)
#While we're here, let's go ahead and handle database filtering!
fido_q_df = pandas.read_csv(options.fido_q_raw,sep="\t")
fido_q_df=fido_q_df[fido_q_df['q-value']<=options.fido_q_threshold] #filter down...
proteins_to_keep=fido_q_df['protein group'].unique().tolist()#this is the list of proteins we want to keep.
with open(options.fasta,'rb') as fasta_file:
fasta_dict=SeqIO.to_dict(SeqIO.parse(fasta_file,"fasta"))
new_fasta=[]
for eachprotein in proteins_to_keep:
if eachprotein in fasta_dict:
new_fasta.append(fasta_dict[eachprotein])
#print new_fasta,len(new_fasta),len(proteins_to_keep)
shutil.copy(options.fasta,basedir+"/"+options.ssl_output_folder+"/"+"UNFILTERED_"+options.fasta)
with open(basedir+"/"+options.ssl_output_folder+"/"+"FIDO_FILTERED_"+options.fasta,'wb') as fasta_writer:
SeqIO.write(new_fasta,fasta_writer,"fasta")
print "FIDO filtered FASTA written!"
if options.ffasta is not None:
shutil.copy(basedir+"/"+options.ssl_output_folder+"/"+"FIDO_FILTERED_"+options.fasta,options.ffasta)
print "Copied the filtered fasta to the output location specified..."
else:
os.chdir(basedir)
shutil.copy(options.fasta,basedir+"/"+options.ssl_output_folder+"/"+"UNFILTERED_"+options.fasta)
os.chdir(basedir)
if options.no_mzml:
mzml_files=[]
for root, dirs, files in os.walk("."):
for file_name in fnmatch.filter(files, '*.mzML'):
mzml_files.append(os.path.join(root,file_name))
#print "will have to remove ",file_name
for each_file in mzml_files:
os.remove(each_file)
print "All done!"
sys.exit(0)
| false | true |
f71b45087b55f236efe7a8b8b2c2a962a1bf0ef8 | 25,938 | py | Python | Src/StdLib/Lib/modulefinder.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 1,078 | 2016-07-19T02:48:30.000Z | 2022-03-30T21:22:34.000Z | Src/StdLib/Lib/modulefinder.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 576 | 2017-05-21T12:36:48.000Z | 2022-03-30T13:47:03.000Z | Src/StdLib/Lib/modulefinder.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 269 | 2017-05-21T04:44:47.000Z | 2022-03-31T16:18:13.000Z | """Find modules used by a script, using introspection."""
from __future__ import generators
import dis
import imp
import marshal
import os
import sys
import types
import struct
if hasattr(sys.__stdout__, "newlines"):
READ_MODE = "U" # universal line endings
else:
# Python < 2.3 compatibility, no longer strictly required
READ_MODE = "r"
LOAD_CONST = dis.opmap['LOAD_CONST']
IMPORT_NAME = dis.opmap['IMPORT_NAME']
STORE_NAME = dis.opmap['STORE_NAME']
STORE_GLOBAL = dis.opmap['STORE_GLOBAL']
STORE_OPS = STORE_NAME, STORE_GLOBAL
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
EXTENDED_ARG = dis.EXTENDED_ARG
def _unpack_opargs(code):
# enumerate() is not an option, since we sometimes process
# multiple elements on a single pass through the loop
extended_arg = 0
n = len(code)
i = 0
while i < n:
op = ord(code[i])
offset = i
i = i+1
arg = None
if op >= HAVE_ARGUMENT:
arg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = arg*65536
yield (offset, op, arg)
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
paths = packagePathMap.get(packagename, [])
paths.append(path)
packagePathMap[packagename] = paths
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around the
# way the _xmlplus package injects itself under the name "xml" into
# sys.modules at runtime by calling ReplacePackage("_xmlplus", "xml")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print " ",
print str,
for arg in args:
print repr(arg),
print
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with open(pathname, READ_MODE) as fp:
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with open(pathname, READ_MODE) as fp:
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError, "relative importpath too deep"
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError, "No module named " + qname
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError, "No module named " + mname
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
for triple in imp.get_suffixes():
suffixes.append(triple[0])
for dir in m.__path__:
try:
names = os.listdir(dir)
except os.error:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp: fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError, "Bad magic number in %s" % pathname
fp.read(4)
co = marshal.load(fp)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes_cli(self, co):
import ast
with open(co.co_filename, 'rU') as f:
nodes = ast.parse(f.read(), co.co_filename)
items = []
class ModuleFinderVisitor(ast.NodeVisitor):
def visit_Assign(self, node):
for x in node.targets:
if isinstance(x, ast.Subscript):
if isinstance(x.value, ast.Name):
items.append(("store", (x.value.id, )))
elif isinstance(x.value, ast.Attribute):
items.append(("store", (x.value.attr, )))
else:
print 'Unknown in store: %s' % type(x.value).__name__
elif isinstance(x, ast.Name):
items.append(("store", (x.id, )))
def visit_Import(self, node):
items.extend([("import", (None, x.name)) for x in node.names])
def visit_ImportFrom(self, node):
if node.level == 1:
items.append(("relative_import", (node.level, [x.name for x in node.names], node.module)))
else:
items.extend([("import", ([x.name for x in node.names], node.module))])
v = ModuleFinderVisitor()
v.visit(nodes)
for what, args in items:
yield what, args
def scan_opcodes(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Version for Python 2.4 and older
code = co.co_code
names = co.co_names
consts = co.co_consts
opargs = [(op, arg) for _, op, arg in _unpack_opargs(code)
if op != EXTENDED_ARG]
for i, (op, oparg) in enumerate(opargs):
if c in STORE_OPS:
yield "store", (names[oparg],)
continue
if (op == IMPORT_NAME and i >= 1
and opargs[i-1][0] == LOAD_CONST):
fromlist = consts[opargs[i-1][1]]
yield "import", (fromlist, names[oparg])
continue
def scan_opcodes_25(self, co):
# Scan the code, and yield 'interesting' opcode combinations
code = co.co_code
names = co.co_names
consts = co.co_consts
opargs = [(op, arg) for _, op, arg in _unpack_opargs(code)
if op != EXTENDED_ARG]
for i, (op, oparg) in enumerate(opargs):
if op in STORE_OPS:
yield "store", (names[oparg],)
continue
if (op == IMPORT_NAME and i >= 2
and opargs[i-1][0] == opargs[i-2][0] == LOAD_CONST):
level = consts[opargs[i-2][1]]
fromlist = consts[opargs[i-1][1]]
if level == -1: # normal import
yield "import", (fromlist, names[oparg])
elif level == 0: # absolute import
yield "absolute_import", (fromlist, names[oparg])
else: # relative import
yield "relative_import", (level, fromlist, names[oparg])
continue
def scan_code(self, co, m):
code = co.co_code
if sys.platform == 'cli':
scanner = self.scan_opcodes_cli
elif sys.version_info >= (2, 5):
scanner = self.scan_opcodes_25
else:
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what in ("import", "absolute_import"):
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
if what == "absolute_import": level = 0
else: level = -1
self._safe_import_hook(name, m, fromlist, level=level)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
if fp:
fp.close()
return m
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError, name
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print
print " %-25s %s" % ("Name", "File")
print " %-25s %s" % ("----", "----")
# Print modules found
keys = self.modules.keys()
keys.sort()
for key in keys:
m = self.modules[key]
if m.__path__:
print "P",
else:
print "m",
print "%-25s" % key, m.__file__ or ""
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print
print "Missing modules:"
for name in missing:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
# Print modules that may be missing, but then again, maybe not...
if maybe:
print
print "Submodules that appear to be missing, but could also be",
print "global names in the parent package:"
for name in maybe:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error, msg:
print msg
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print "path:"
for item in path:
print " ", repr(item)
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print "\n[interrupt]"
| 36.175732 | 110 | 0.523441 | """Find modules used by a script, using introspection."""
from __future__ import generators
import dis
import imp
import marshal
import os
import sys
import types
import struct
if hasattr(sys.__stdout__, "newlines"):
READ_MODE = "U"
else:
READ_MODE = "r"
LOAD_CONST = dis.opmap['LOAD_CONST']
IMPORT_NAME = dis.opmap['IMPORT_NAME']
STORE_NAME = dis.opmap['STORE_NAME']
STORE_GLOBAL = dis.opmap['STORE_GLOBAL']
STORE_OPS = STORE_NAME, STORE_GLOBAL
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
EXTENDED_ARG = dis.EXTENDED_ARG
def _unpack_opargs(code):
extended_arg = 0
n = len(code)
i = 0
while i < n:
op = ord(code[i])
offset = i
i = i+1
arg = None
if op >= HAVE_ARGUMENT:
arg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = arg*65536
yield (offset, op, arg)
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
paths = packagePathMap.get(packagename, [])
paths.append(path)
packagePathMap[packagename] = paths
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around the
# way the _xmlplus package injects itself under the name "xml" into
# sys.modules at runtime by calling ReplacePackage("_xmlplus", "xml")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print " ",
print str,
for arg in args:
print repr(arg),
print
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with open(pathname, READ_MODE) as fp:
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with open(pathname, READ_MODE) as fp:
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError, "relative importpath too deep"
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError, "No module named " + qname
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError, "No module named " + mname
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
for triple in imp.get_suffixes():
suffixes.append(triple[0])
for dir in m.__path__:
try:
names = os.listdir(dir)
except os.error:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp: fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError, "Bad magic number in %s" % pathname
fp.read(4)
co = marshal.load(fp)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes_cli(self, co):
import ast
with open(co.co_filename, 'rU') as f:
nodes = ast.parse(f.read(), co.co_filename)
items = []
class ModuleFinderVisitor(ast.NodeVisitor):
def visit_Assign(self, node):
for x in node.targets:
if isinstance(x, ast.Subscript):
if isinstance(x.value, ast.Name):
items.append(("store", (x.value.id, )))
elif isinstance(x.value, ast.Attribute):
items.append(("store", (x.value.attr, )))
else:
print 'Unknown in store: %s' % type(x.value).__name__
elif isinstance(x, ast.Name):
items.append(("store", (x.id, )))
def visit_Import(self, node):
items.extend([("import", (None, x.name)) for x in node.names])
def visit_ImportFrom(self, node):
if node.level == 1:
items.append(("relative_import", (node.level, [x.name for x in node.names], node.module)))
else:
items.extend([("import", ([x.name for x in node.names], node.module))])
v = ModuleFinderVisitor()
v.visit(nodes)
for what, args in items:
yield what, args
def scan_opcodes(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Version for Python 2.4 and older
code = co.co_code
names = co.co_names
consts = co.co_consts
opargs = [(op, arg) for _, op, arg in _unpack_opargs(code)
if op != EXTENDED_ARG]
for i, (op, oparg) in enumerate(opargs):
if c in STORE_OPS:
yield "store", (names[oparg],)
continue
if (op == IMPORT_NAME and i >= 1
and opargs[i-1][0] == LOAD_CONST):
fromlist = consts[opargs[i-1][1]]
yield "import", (fromlist, names[oparg])
continue
def scan_opcodes_25(self, co):
# Scan the code, and yield 'interesting' opcode combinations
code = co.co_code
names = co.co_names
consts = co.co_consts
opargs = [(op, arg) for _, op, arg in _unpack_opargs(code)
if op != EXTENDED_ARG]
for i, (op, oparg) in enumerate(opargs):
if op in STORE_OPS:
yield "store", (names[oparg],)
continue
if (op == IMPORT_NAME and i >= 2
and opargs[i-1][0] == opargs[i-2][0] == LOAD_CONST):
level = consts[opargs[i-2][1]]
fromlist = consts[opargs[i-1][1]]
if level == -1: # normal import
yield "import", (fromlist, names[oparg])
elif level == 0: # absolute import
yield "absolute_import", (fromlist, names[oparg])
else: # relative import
yield "relative_import", (level, fromlist, names[oparg])
continue
def scan_code(self, co, m):
code = co.co_code
if sys.platform == 'cli':
scanner = self.scan_opcodes_cli
elif sys.version_info >= (2, 5):
scanner = self.scan_opcodes_25
else:
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what in ("import", "absolute_import"):
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
if what == "absolute_import": level = 0
else: level = -1
self._safe_import_hook(name, m, fromlist, level=level)
if have_star:
# We've encountered an "import *". If it is a Python module,
mm = None
if m.__path__:
# submodule of 'm' or a global module. Let's just try
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
if fp:
fp.close()
return m
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError, name
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print
print " %-25s %s" % ("Name", "File")
print " %-25s %s" % ("----", "----")
# Print modules found
keys = self.modules.keys()
keys.sort()
for key in keys:
m = self.modules[key]
if m.__path__:
print "P",
else:
print "m",
print "%-25s" % key, m.__file__ or ""
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print
print "Missing modules:"
for name in missing:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
# Print modules that may be missing, but then again, maybe not...
if maybe:
print
print "Submodules that appear to be missing, but could also be",
print "global names in the parent package:"
for name in maybe:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def test():
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error, msg:
print msg
return
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
if not args:
script = "hello.py"
else:
script = args[0]
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print "path:"
for item in path:
print " ", repr(item)
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print "\n[interrupt]"
| false | true |
f71b45acabf22bbb1840898ea73829472d8e7060 | 731 | py | Python | tests/test_python3_dbb.py | bopopescu/NovalIDE | 590c2adb69d54fa4a6c9dad5459198be057b1329 | [
"MulanPSL-1.0"
] | null | null | null | tests/test_python3_dbb.py | bopopescu/NovalIDE | 590c2adb69d54fa4a6c9dad5459198be057b1329 | [
"MulanPSL-1.0"
] | null | null | null | tests/test_python3_dbb.py | bopopescu/NovalIDE | 590c2adb69d54fa4a6c9dad5459198be057b1329 | [
"MulanPSL-1.0"
] | null | null | null | import inspect
class _D:
def _m(self): pass
class _C:
def _m(self): pass
_x = _C()
_x2 = _D()
a=121111
r = input('hahah')
print(r)
raise AttributeError('Provider test already registered')
print (type(_C),_x.__class__,dir(_x),"------------")
import types
###print (dir(types))
print (type(inspect))
###print type(inspect) is types.InstanceType,"============="
print (type(_x),type(type))
print (inspect.isclass(_x))
print (inspect.isclass(_x2))
print (inspect.isclass(_D))
print (inspect.isclass(_C))
print (inspect.ismodule(_C))
print (isinstance(inspect,object),"------------")
print (1)
###print (g)
print (2)
print (3)
print (4)
print (6)
print (7)
print (8)
print (9)
print (10)
print ("11111111111111") | 17.404762 | 60 | 0.642955 | import inspect
class _D:
def _m(self): pass
class _C:
def _m(self): pass
_x = _C()
_x2 = _D()
a=121111
r = input('hahah')
print(r)
raise AttributeError('Provider test already registered')
print (type(_C),_x.__class__,dir(_x),"------------")
import types
lass(_C))
print (inspect.ismodule(_C))
print (isinstance(inspect,object),"------------")
print (1)
print (4)
print (6)
print (7)
print (8)
print (9)
print (10)
print ("11111111111111") | true | true |
f71b45baa1e78f59775296f091528acf3ccf2008 | 718 | py | Python | meus_projetos/projetos_python/lista_de_tarefas/minhas_funcoes.py | SabinoEduardo/Python | f46e47f166150afdf0f4c4358b5848d52667a764 | [
"MIT"
] | null | null | null | meus_projetos/projetos_python/lista_de_tarefas/minhas_funcoes.py | SabinoEduardo/Python | f46e47f166150afdf0f4c4358b5848d52667a764 | [
"MIT"
] | null | null | null | meus_projetos/projetos_python/lista_de_tarefas/minhas_funcoes.py | SabinoEduardo/Python | f46e47f166150afdf0f4c4358b5848d52667a764 | [
"MIT"
] | null | null | null | def adicionar_tarefa(lista_tarefas, tarefas):
lista_tarefas.append(tarefas)
def deletar_tarefa(lista_tarefas, tarefas_deletadas):
"""
Esta função serve para deletar a ultima tarefa da lista e guarda esta tarefa em outra lista.
"""
if not lista_tarefas:
print("Nada a deletar")
return
tarefas_deletadas.append(lista_tarefas[-1])
lista_tarefas.pop()
def repor_tarefa(lista_tarefas, tarefas_deletadas):
"""
Esta função serve para repor ultima tarefa deletada da lista de tarefas.
"""
if not tarefas_deletadas:
print("Nada a repor")
return
else:
lista_tarefas.append(tarefas_deletadas[-1])
tarefas_deletadas.pop()
| 27.615385 | 100 | 0.682451 | def adicionar_tarefa(lista_tarefas, tarefas):
lista_tarefas.append(tarefas)
def deletar_tarefa(lista_tarefas, tarefas_deletadas):
if not lista_tarefas:
print("Nada a deletar")
return
tarefas_deletadas.append(lista_tarefas[-1])
lista_tarefas.pop()
def repor_tarefa(lista_tarefas, tarefas_deletadas):
if not tarefas_deletadas:
print("Nada a repor")
return
else:
lista_tarefas.append(tarefas_deletadas[-1])
tarefas_deletadas.pop()
| true | true |
f71b47bfd1af85c0318ff27db55a1a089a9d0ee9 | 4,549 | py | Python | pyuvm/s09_phasing.py | ktbarrett/pyuvm | 725e6e4b8088aa085a5ce16861b46db49ce46672 | [
"Apache-2.0"
] | 4 | 2021-11-07T13:22:27.000Z | 2022-02-17T08:51:24.000Z | pyuvm/s09_phasing.py | ktbarrett/pyuvm | 725e6e4b8088aa085a5ce16861b46db49ce46672 | [
"Apache-2.0"
] | null | null | null | pyuvm/s09_phasing.py | ktbarrett/pyuvm | 725e6e4b8088aa085a5ce16861b46db49ce46672 | [
"Apache-2.0"
] | null | null | null | from pyuvm.s05_base_classes import uvm_object
import pyuvm.error_classes as error_classes
import cocotb
# 9.1
#
# This is a dramatically simplified version of UVM phasing. We don't have
# to deal with simulation time and we are not going to deal with a generalized
# phasing system.
#
# So this system simply traverses the common phases, calling the appropriate
# method in each component.
#
# Much of the work in the SV phasing code has to do with handling the passage
# of time. There is no timewheel in Python, so all of that code can go
# away.
#
# Also, the generalized phasing system is rarely used and so that
# is left as an exercise for future developers. Instead we have a simple
# topdown and bottom up traversal of calling methods in component
# classes based on the phase name.
#
# We're not doing schedules or domains. We're just creating a list of classes
# and traversing them in order. The order it dependent upon whether they
# are topdown or bottom up phases.
# 9.3.1.2 Class declaration
class uvm_phase(uvm_object):
# Strips the "uvm_" from this class's name and uses the remainder
# to get a function call out of the component and execute it.
# 'uvm_run_phase' becomes 'run_phase' and is called as 'run_phase()'
@classmethod
def execute(cls, comp):
"""
:param comp: The component whose turn it is to execute
"""
method_name = cls.__name__[4:]
try:
method = getattr(comp, method_name)
except AttributeError:
raise error_classes.UVMBadPhase(
f"{comp.get_name()} is missing {method_name} function")
method()
def __str__(self):
return self.__name__[4:]
class uvm_topdown_phase(uvm_phase):
"""
Runs phases from the top down.
"""
@classmethod
def traverse(cls, comp):
"""
Given a component, we traverse the component tree
top to bottom calling the phase functions as we go
:param comp: The component whose hierarchy will be traversed
"""
cls.execute(comp) # first we execute this node then its children
for child in comp.get_children():
cls.traverse(child)
class uvm_bottomup_phase(uvm_phase):
"""
Runs the phases from bottom up.
"""
@classmethod
def traverse(cls, comp):
for child in comp.get_children():
cls.traverse(child)
cls.execute(comp)
class uvm_threaded_execute_phase(uvm_phase):
"""
This phase launches the phase function in a thread and
returns the thread to the caller. The caller can then
join all the threads.
"""
@classmethod
def execute(cls, comp):
phase_name = cls.__name__
assert phase_name.startswith("uvm_"), \
"We only support phases whose names start with uvm_"
method_name = cls.__name__[4:]
try:
method = getattr(comp, method_name)
except AttributeError:
raise error_classes.UVMBadPhase(
f"{comp.get_name()} is missing {method_name} function")
cocotb.start_soon(method())
# 9.8 Predefined Phases
# 9.8.1 Common Phases
# The common phases are described in the order of their execution.
# 9.8.1.1
class uvm_build_phase(uvm_topdown_phase):
...
# 9.8.1.2
class uvm_connect_phase(uvm_bottomup_phase):
...
# 9.8.1.3
class uvm_end_of_elaboration_phase(uvm_topdown_phase):
...
# 9.8.1.4
class uvm_start_of_simulation_phase(uvm_topdown_phase):
...
# 9.8.1.5
class uvm_run_phase(uvm_threaded_execute_phase, uvm_bottomup_phase):
...
# 9.8.1.6
class uvm_extract_phase(uvm_topdown_phase):
...
# 9.8.1.7
class uvm_check_phase(uvm_topdown_phase):
...
# 9.8.1.8
class uvm_report_phase(uvm_topdown_phase):
...
# 9.8.1.9
class uvm_final_phase(uvm_topdown_phase):
...
# 9.8.2
# UVM run-time phases are left as an exercise for an enterprising soul
# I cannot imagine why anyone would implement this.
# One could add phases by simply extending uvm_topdown_phase
# or uvm_bottom_up phase with a new phase named 'uvm_my_phase' and adding
# the my_phase() method to a uvm component with setattr.
uvm_common_phases = [uvm_build_phase,
uvm_connect_phase,
uvm_end_of_elaboration_phase,
uvm_start_of_simulation_phase,
uvm_run_phase,
uvm_extract_phase,
uvm_check_phase,
uvm_report_phase,
uvm_final_phase]
| 27.737805 | 78 | 0.667619 | from pyuvm.s05_base_classes import uvm_object
import pyuvm.error_classes as error_classes
import cocotb
# to deal with simulation time and we are not going to deal with a generalized
# phasing system.
#
# So this system simply traverses the common phases, calling the appropriate
# method in each component.
#
# Much of the work in the SV phasing code has to do with handling the passage
# of time. There is no timewheel in Python, so all of that code can go
# away.
#
# Also, the generalized phasing system is rarely used and so that
# is left as an exercise for future developers. Instead we have a simple
# topdown and bottom up traversal of calling methods in component
# classes based on the phase name.
#
# We're not doing schedules or domains. We're just creating a list of classes
# and traversing them in order. The order it dependent upon whether they
# are topdown or bottom up phases.
# 9.3.1.2 Class declaration
class uvm_phase(uvm_object):
# Strips the "uvm_" from this class's name and uses the remainder
@classmethod
def execute(cls, comp):
method_name = cls.__name__[4:]
try:
method = getattr(comp, method_name)
except AttributeError:
raise error_classes.UVMBadPhase(
f"{comp.get_name()} is missing {method_name} function")
method()
def __str__(self):
return self.__name__[4:]
class uvm_topdown_phase(uvm_phase):
@classmethod
def traverse(cls, comp):
cls.execute(comp)
for child in comp.get_children():
cls.traverse(child)
class uvm_bottomup_phase(uvm_phase):
@classmethod
def traverse(cls, comp):
for child in comp.get_children():
cls.traverse(child)
cls.execute(comp)
class uvm_threaded_execute_phase(uvm_phase):
@classmethod
def execute(cls, comp):
phase_name = cls.__name__
assert phase_name.startswith("uvm_"), \
"We only support phases whose names start with uvm_"
method_name = cls.__name__[4:]
try:
method = getattr(comp, method_name)
except AttributeError:
raise error_classes.UVMBadPhase(
f"{comp.get_name()} is missing {method_name} function")
cocotb.start_soon(method())
class uvm_build_phase(uvm_topdown_phase):
...
class uvm_connect_phase(uvm_bottomup_phase):
...
class uvm_end_of_elaboration_phase(uvm_topdown_phase):
...
class uvm_start_of_simulation_phase(uvm_topdown_phase):
...
class uvm_run_phase(uvm_threaded_execute_phase, uvm_bottomup_phase):
...
class uvm_extract_phase(uvm_topdown_phase):
...
class uvm_check_phase(uvm_topdown_phase):
...
class uvm_report_phase(uvm_topdown_phase):
...
class uvm_final_phase(uvm_topdown_phase):
...
uvm_common_phases = [uvm_build_phase,
uvm_connect_phase,
uvm_end_of_elaboration_phase,
uvm_start_of_simulation_phase,
uvm_run_phase,
uvm_extract_phase,
uvm_check_phase,
uvm_report_phase,
uvm_final_phase]
| true | true |
f71b486fd1687af447da51c84625b2c67f3d5401 | 3,199 | py | Python | homeassistant/components/harmony/subscriber.py | PiotrMachowski/core | b9d7d0cae2ccd2d88e90e49cc09e154a27ed809b | [
"Apache-2.0"
] | 3 | 2020-11-27T06:26:27.000Z | 2020-12-09T14:55:16.000Z | homeassistant/components/harmony/subscriber.py | PiotrMachowski/core | b9d7d0cae2ccd2d88e90e49cc09e154a27ed809b | [
"Apache-2.0"
] | 277 | 2021-10-04T06:39:33.000Z | 2021-12-28T22:04:17.000Z | homeassistant/components/harmony/subscriber.py | PiotrMachowski/core | b9d7d0cae2ccd2d88e90e49cc09e154a27ed809b | [
"Apache-2.0"
] | 3 | 2022-01-02T18:49:54.000Z | 2022-01-25T02:03:54.000Z | """Mixin class for handling harmony callback subscriptions."""
import asyncio
import logging
# pylint: disable-next=deprecated-typing-alias
# Issue with Python 3.9.0 and 3.9.1 with collections.abc.Callable
# https://bugs.python.org/issue42965
from typing import Any, Callable, NamedTuple, Optional
from homeassistant.core import callback
_LOGGER = logging.getLogger(__name__)
NoParamCallback = Optional[Callable[[object], Any]]
ActivityCallback = Optional[Callable[[object, tuple], Any]]
class HarmonyCallback(NamedTuple):
"""Callback type for Harmony Hub notifications."""
connected: NoParamCallback
disconnected: NoParamCallback
config_updated: NoParamCallback
activity_starting: ActivityCallback
activity_started: ActivityCallback
class HarmonySubscriberMixin:
"""Base implementation for a subscriber."""
def __init__(self, hass):
"""Initialize an subscriber."""
super().__init__()
self._hass = hass
self._subscriptions = []
self._activity_lock = asyncio.Lock()
async def async_lock_start_activity(self):
"""Acquire the lock."""
await self._activity_lock.acquire()
@callback
def async_unlock_start_activity(self):
"""Release the lock."""
if self._activity_lock.locked():
self._activity_lock.release()
@callback
def async_subscribe(self, update_callbacks: HarmonyCallback) -> Callable:
"""Add a callback subscriber."""
self._subscriptions.append(update_callbacks)
def _unsubscribe():
self.async_unsubscribe(update_callbacks)
return _unsubscribe
@callback
def async_unsubscribe(self, update_callback: HarmonyCallback):
"""Remove a callback subscriber."""
self._subscriptions.remove(update_callback)
def _config_updated(self, _=None) -> None:
_LOGGER.debug("config_updated")
self._call_callbacks("config_updated")
def _connected(self, _=None) -> None:
_LOGGER.debug("connected")
self.async_unlock_start_activity()
self._available = True
self._call_callbacks("connected")
def _disconnected(self, _=None) -> None:
_LOGGER.debug("disconnected")
self.async_unlock_start_activity()
self._available = False
self._call_callbacks("disconnected")
def _activity_starting(self, activity_info: tuple) -> None:
_LOGGER.debug("activity %s starting", activity_info)
self._call_callbacks("activity_starting", activity_info)
def _activity_started(self, activity_info: tuple) -> None:
_LOGGER.debug("activity %s started", activity_info)
self.async_unlock_start_activity()
self._call_callbacks("activity_started", activity_info)
def _call_callbacks(self, callback_func_name: str, argument: tuple = None):
for subscription in self._subscriptions:
current_callback = getattr(subscription, callback_func_name)
if current_callback:
if argument:
self._hass.async_run_job(current_callback, argument)
else:
self._hass.async_run_job(current_callback)
| 32.979381 | 79 | 0.689278 |
import asyncio
import logging
from typing import Any, Callable, NamedTuple, Optional
from homeassistant.core import callback
_LOGGER = logging.getLogger(__name__)
NoParamCallback = Optional[Callable[[object], Any]]
ActivityCallback = Optional[Callable[[object, tuple], Any]]
class HarmonyCallback(NamedTuple):
connected: NoParamCallback
disconnected: NoParamCallback
config_updated: NoParamCallback
activity_starting: ActivityCallback
activity_started: ActivityCallback
class HarmonySubscriberMixin:
def __init__(self, hass):
super().__init__()
self._hass = hass
self._subscriptions = []
self._activity_lock = asyncio.Lock()
async def async_lock_start_activity(self):
await self._activity_lock.acquire()
@callback
def async_unlock_start_activity(self):
if self._activity_lock.locked():
self._activity_lock.release()
@callback
def async_subscribe(self, update_callbacks: HarmonyCallback) -> Callable:
self._subscriptions.append(update_callbacks)
def _unsubscribe():
self.async_unsubscribe(update_callbacks)
return _unsubscribe
@callback
def async_unsubscribe(self, update_callback: HarmonyCallback):
self._subscriptions.remove(update_callback)
def _config_updated(self, _=None) -> None:
_LOGGER.debug("config_updated")
self._call_callbacks("config_updated")
def _connected(self, _=None) -> None:
_LOGGER.debug("connected")
self.async_unlock_start_activity()
self._available = True
self._call_callbacks("connected")
def _disconnected(self, _=None) -> None:
_LOGGER.debug("disconnected")
self.async_unlock_start_activity()
self._available = False
self._call_callbacks("disconnected")
def _activity_starting(self, activity_info: tuple) -> None:
_LOGGER.debug("activity %s starting", activity_info)
self._call_callbacks("activity_starting", activity_info)
def _activity_started(self, activity_info: tuple) -> None:
_LOGGER.debug("activity %s started", activity_info)
self.async_unlock_start_activity()
self._call_callbacks("activity_started", activity_info)
def _call_callbacks(self, callback_func_name: str, argument: tuple = None):
for subscription in self._subscriptions:
current_callback = getattr(subscription, callback_func_name)
if current_callback:
if argument:
self._hass.async_run_job(current_callback, argument)
else:
self._hass.async_run_job(current_callback)
| true | true |
f71b492ed7fe05c2dd8f787b6d743c15e42b6651 | 23,428 | py | Python | test/test_framework/util.py | vpubchain/Phore | 7819f046e629ccb5a00fa4f89a7399a7732b4113 | [
"MIT"
] | null | null | null | test/test_framework/util.py | vpubchain/Phore | 7819f046e629ccb5a00fa4f89a7399a7732b4113 | [
"MIT"
] | null | null | null | test/test_framework/util.py | vpubchain/Phore | 7819f046e629ccb5a00fa4f89a7399a7732b4113 | [
"MIT"
] | 1 | 2019-08-10T08:20:56.000Z | 2019-08-10T08:20:56.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import http.client
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
VPUBD_PROC_WAIT_TIMEOUT = 60
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to Jan 1, 2014 + (201 * 10 * 60)
global MOCKTIME
MOCKTIME = 1388534400 + (201 * 10 * 60)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1, timeout=60):
"""
Wait until everybody has the same tip
"""
while timeout > 0:
tips = [ x.getbestblockhash() for x in rpc_connections ]
if tips == [ tips[0] ]*len(tips):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Block sync failed")
def sync_mempools(rpc_connections, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
vpubd_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "vpub.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("server=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser' + str(n), 'rpcpass' + str(n)
def rpc_url(i, rpchost=None):
rpc_u, rpc_p = rpc_auth_pair(i)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
def wait_for_vpubd_start(process, url, i):
'''
Wait for vpubd to start. This means that RPC is accessible and fully initialized.
Raise an exception if vpubd exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('vpubd exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir, num_nodes):
"""
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache
"""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join('cache', 'node'+str(i))):
create_cache = True
break
if create_cache:
#find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run vpubds:
for i in range(MAX_NODES):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("VPUBD", "vpubd"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
vpubd_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: vpubd started, waiting for RPC to come up")
wait_for_vpubd_start(vpubd_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: RPC succesfully started")
rpcs = []
for i in range(MAX_NODES):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in vpub.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a vpubd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("VPUBD", "vpubd")
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-mocktime="+str(get_mocktime()), "-regtest", "-sporkkey=923EhWh2bJHynX6d4Tqt2Q75bhTDCT1b4kff3qzDKDZHZ6pkQs7"]
if extra_args is not None: args.extend(extra_args)
vpubd_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: phroed started, waiting for RPC to come up")
url = rpc_url(i, rpchost)
wait_for_vpubd_start(vpubd_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: RPC succesfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start multiple vpubds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
if binary is None: binary = [ None for _ in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
vpubd_processes[i].wait(timeout=VPUBD_PROC_WAIT_TIMEOUT)
del vpubd_processes[i]
def stop_nodes(nodes):
for node in nodes:
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
del nodes[:] # Emptying array closes connections as a side effect
wait_vpubds()
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_vpubds():
# Wait for all vpubds to cleanly exit
for vpubd in vpubd_processes.values():
vpubd.wait(timeout=VPUBD_PROC_WAIT_TIMEOUT)
vpubd_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.setgenerate(True, int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.setgenerate(True, 1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in range (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in range(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
| 35.713415 | 201 | 0.651742 |
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import http.client
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
MAX_NODES = 8
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
VPUBD_PROC_WAIT_TIMEOUT = 60
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to Jan 1, 2014 + (201 * 10 * 60)
global MOCKTIME
MOCKTIME = 1388534400 + (201 * 10 * 60)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def check_json_precision():
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1, timeout=60):
while timeout > 0:
tips = [ x.getbestblockhash() for x in rpc_connections ]
if tips == [ tips[0] ]*len(tips):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Block sync failed")
def sync_mempools(rpc_connections, wait=1, timeout=60):
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
vpubd_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "vpub.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("server=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser' + str(n), 'rpcpass' + str(n)
def rpc_url(i, rpchost=None):
rpc_u, rpc_p = rpc_auth_pair(i)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
def wait_for_vpubd_start(process, url, i):
while True:
if process.poll() is not None:
raise Exception('vpubd exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir, num_nodes):
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join('cache', 'node'+str(i))):
create_cache = True
break
if create_cache:
#find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run vpubds:
for i in range(MAX_NODES):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("VPUBD", "vpubd"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
vpubd_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: vpubd started, waiting for RPC to come up")
wait_for_vpubd_start(vpubd_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: RPC succesfully started")
rpcs = []
for i in range(MAX_NODES):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in vpub.conf
def initialize_chain_clean(test_dir, num_nodes):
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("VPUBD", "vpubd")
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-mocktime="+str(get_mocktime()), "-regtest", "-sporkkey=923EhWh2bJHynX6d4Tqt2Q75bhTDCT1b4kff3qzDKDZHZ6pkQs7"]
if extra_args is not None: args.extend(extra_args)
vpubd_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: phroed started, waiting for RPC to come up")
url = rpc_url(i, rpchost)
wait_for_vpubd_start(vpubd_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: RPC succesfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
if binary is None: binary = [ None for _ in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
vpubd_processes[i].wait(timeout=VPUBD_PROC_WAIT_TIMEOUT)
del vpubd_processes[i]
def stop_nodes(nodes):
for node in nodes:
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
del nodes[:] # Emptying array closes connections as a side effect
wait_vpubds()
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_vpubds():
# Wait for all vpubds to cleanly exit
for vpubd in vpubd_processes.values():
vpubd.wait(timeout=VPUBD_PROC_WAIT_TIMEOUT)
vpubd_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_fee_amount(fee, tx_size, fee_per_kB):
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.setgenerate(True, int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.setgenerate(True, 1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
script_pubkey = "6a4d0200"
for i in range (512):
script_pubkey = script_pubkey + "01"
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in range(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
| true | true |
f71b49b3ed0e3ba6b9a8f90ca5e35100450fb249 | 13,385 | py | Python | skimage/segmentation/tests/test_random_walker.py | thewtex/scikit-image | 22bb6b94698b8889cbdf26b25d9e4fdb8b968d97 | [
"BSD-3-Clause"
] | 1 | 2019-01-12T13:17:32.000Z | 2019-01-12T13:17:32.000Z | skimage/segmentation/tests/test_random_walker.py | thewtex/scikit-image | 22bb6b94698b8889cbdf26b25d9e4fdb8b968d97 | [
"BSD-3-Clause"
] | null | null | null | skimage/segmentation/tests/test_random_walker.py | thewtex/scikit-image | 22bb6b94698b8889cbdf26b25d9e4fdb8b968d97 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from skimage.segmentation import random_walker
from skimage.transform import resize
from skimage._shared._warnings import expected_warnings
from skimage._shared import testing
# older versions of scipy raise a warning with new NumPy because they use
# numpy.rank() instead of arr.ndim or numpy.linalg.matrix_rank.
SCIPY_EXPECTED = 'numpy.linalg.matrix_rank|\A\Z'
PYAMG_EXPECTED_WARNING = 'pyamg|\A\Z'
PYAMG_SCIPY_EXPECTED = SCIPY_EXPECTED + '|' + PYAMG_EXPECTED_WARNING
def make_2d_syntheticdata(lx, ly=None):
if ly is None:
ly = lx
np.random.seed(1234)
data = np.zeros((lx, ly)) + 0.1 * np.random.randn(lx, ly)
small_l = int(lx // 5)
data[lx // 2 - small_l:lx // 2 + small_l,
ly // 2 - small_l:ly // 2 + small_l] = 1
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
ly // 2 - small_l + 1:ly // 2 + small_l - 1] = (
0.1 * np.random.randn(2 * small_l - 2, 2 * small_l - 2))
data[lx // 2 - small_l, ly // 2 - small_l // 8:ly // 2 + small_l // 8] = 0
seeds = np.zeros_like(data)
seeds[lx // 5, ly // 5] = 1
seeds[lx // 2 + small_l // 4, ly // 2 - small_l // 4] = 2
return data, seeds
def make_3d_syntheticdata(lx, ly=None, lz=None):
if ly is None:
ly = lx
if lz is None:
lz = lx
np.random.seed(1234)
data = np.zeros((lx, ly, lz)) + 0.1 * np.random.randn(lx, ly, lz)
small_l = int(lx // 5)
data[lx // 2 - small_l:lx // 2 + small_l,
ly // 2 - small_l:ly // 2 + small_l,
lz // 2 - small_l:lz // 2 + small_l] = 1
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
ly // 2 - small_l + 1:ly // 2 + small_l - 1,
lz // 2 - small_l + 1:lz // 2 + small_l - 1] = 0
# make a hole
hole_size = np.max([1, small_l // 8])
data[lx // 2 - small_l,
ly // 2 - hole_size:ly // 2 + hole_size,
lz // 2 - hole_size:lz // 2 + hole_size] = 0
seeds = np.zeros_like(data)
seeds[lx // 5, ly // 5, lz // 5] = 1
seeds[lx // 2 + small_l // 4,
ly // 2 - small_l // 4,
lz // 2 - small_l // 4] = 2
return data, seeds
def test_2d_bf():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels_bf = random_walker(data, labels, beta=90, mode='bf')
assert (labels_bf[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
return_full_prob=True)
assert (full_prob_bf[1, 25:45, 40:60] >=
full_prob_bf[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
# Now test with more than two labels
labels[55, 80] = 3
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
return_full_prob=True)
assert (full_prob_bf[1, 25:45, 40:60] >=
full_prob_bf[0, 25:45, 40:60]).all()
assert len(full_prob_bf) == 3
assert data.shape == labels.shape
def test_2d_cg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_cg = random_walker(data, labels, beta=90, mode='cg')
assert (labels_cg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
full_prob = random_walker(data, labels, beta=90, mode='cg',
return_full_prob=True)
assert (full_prob[1, 25:45, 40:60] >=
full_prob[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
return data, labels_cg
def test_2d_cg_mg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
expected = 'scipy.sparse.sparsetools|%s' % PYAMG_SCIPY_EXPECTED
with expected_warnings([expected]):
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
with expected_warnings([expected]):
full_prob = random_walker(data, labels, beta=90, mode='cg_mg',
return_full_prob=True)
assert (full_prob[1, 25:45, 40:60] >=
full_prob[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
return data, labels_cg_mg
def test_types():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
data = 255 * (data - data.min()) // (data.max() - data.min())
data = data.astype(np.uint8)
with expected_warnings([PYAMG_SCIPY_EXPECTED]):
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels_cg_mg
def test_reorder_labels():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels[labels == 2] = 4
labels_bf = random_walker(data, labels, beta=90, mode='bf')
assert (labels_bf[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels_bf
def test_2d_inactive():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels[10:20, 10:20] = -1
labels[46:50, 33:38] = -2
labels = random_walker(data, labels, beta=90)
assert (labels.reshape((lx, ly))[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels
def test_3d():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
return data, labels
def test_3d_inactive():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
old_labels = np.copy(labels)
labels[5:25, 26:29, 26:29] = -1
after_labels = np.copy(labels)
with expected_warnings(['"cg" mode|CObject type' + '|' + SCIPY_EXPECTED]):
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
return data, labels, old_labels, after_labels
def test_multispectral_2d():
lx, ly = 70, 100
data, labels = make_2d_syntheticdata(lx, ly)
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
multi_labels = random_walker(data, labels, mode='cg',
multichannel=True)
assert data[..., 0].shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all()
assert data[..., 0].shape == labels.shape
return data, multi_labels, single_labels, labels
def test_multispectral_3d():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
multi_labels = random_walker(data, labels, mode='cg',
multichannel=True)
assert data[..., 0].shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
assert (single_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
assert data[..., 0].shape == labels.shape
return data, multi_labels, single_labels, labels
def test_spacing_0():
n = 30
lx, ly, lz = n, n, n
data, _ = make_3d_syntheticdata(lx, ly, lz)
# Rescale `data` along Z axis
data_aniso = np.zeros((n, n, n // 2))
for i, yz in enumerate(data):
data_aniso[i, :, :] = resize(yz, (n, n // 2),
mode='constant',
anti_aliasing=False)
# Generate new labels
small_l = int(lx // 5)
labels_aniso = np.zeros_like(data_aniso)
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
labels_aniso[lx // 2 + small_l // 4,
ly // 2 - small_l // 4,
lz // 4 - small_l // 8] = 2
# Test with `spacing` kwarg
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
spacing=(1., 1., 0.5))
assert (labels_aniso[13:17, 13:17, 7:9] == 2).all()
def test_spacing_1():
n = 30
lx, ly, lz = n, n, n
data, _ = make_3d_syntheticdata(lx, ly, lz)
# Rescale `data` along Y axis
# `resize` is not yet 3D capable, so this must be done by looping in 2D.
data_aniso = np.zeros((n, n * 2, n))
for i, yz in enumerate(data):
data_aniso[i, :, :] = resize(yz, (n * 2, n),
mode='constant',
anti_aliasing=False)
# Generate new labels
small_l = int(lx // 5)
labels_aniso = np.zeros_like(data_aniso)
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
labels_aniso[lx // 2 + small_l // 4,
ly - small_l // 2,
lz // 2 - small_l // 4] = 2
# Test with `spacing` kwarg
# First, anisotropic along Y
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
spacing=(1., 2., 1.))
assert (labels_aniso[13:17, 26:34, 13:17] == 2).all()
# Rescale `data` along X axis
# `resize` is not yet 3D capable, so this must be done by looping in 2D.
data_aniso = np.zeros((n, n * 2, n))
for i in range(data.shape[1]):
data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n),
mode='constant',
anti_aliasing=False)
# Generate new labels
small_l = int(lx // 5)
labels_aniso2 = np.zeros_like(data_aniso)
labels_aniso2[lx // 5, ly // 5, lz // 5] = 1
labels_aniso2[lx - small_l // 2,
ly // 2 + small_l // 4,
lz // 2 - small_l // 4] = 2
# Anisotropic along X
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso2 = random_walker(data_aniso,
labels_aniso2,
mode='cg', spacing=(2., 1., 1.))
assert (labels_aniso2[26:34, 13:17, 13:17] == 2).all()
def test_trivial_cases():
# When all voxels are labeled
img = np.ones((10, 10))
labels = np.ones((10, 10))
with expected_warnings(["Returning provided labels"]):
pass_through = random_walker(img, labels)
np.testing.assert_array_equal(pass_through, labels)
# When all voxels are labeled AND return_full_prob is True
labels[:, :5] = 3
expected = np.concatenate(((labels == 1)[..., np.newaxis],
(labels == 3)[..., np.newaxis]), axis=2)
with expected_warnings(["Returning provided labels"]):
test = random_walker(img, labels, return_full_prob=True)
np.testing.assert_array_equal(test, expected)
def test_length2_spacing():
# If this passes without raising an exception (warnings OK), the new
# spacing code is working properly.
np.random.seed(42)
img = np.ones((10, 10)) + 0.2 * np.random.normal(size=(10, 10))
labels = np.zeros((10, 10), dtype=np.uint8)
labels[2, 4] = 1
labels[6, 8] = 4
random_walker(img, labels, spacing=(1., 2.))
def test_bad_inputs():
# Too few dimensions
img = np.ones(10)
labels = np.arange(10)
with testing.raises(ValueError):
random_walker(img, labels)
with testing.raises(ValueError):
random_walker(img, labels, multichannel=True)
# Too many dimensions
np.random.seed(42)
img = np.random.normal(size=(3, 3, 3, 3, 3))
labels = np.arange(3 ** 5).reshape(img.shape)
with testing.raises(ValueError):
random_walker(img, labels)
with testing.raises(ValueError):
random_walker(img, labels, multichannel=True)
# Spacing incorrect length
img = np.random.normal(size=(10, 10))
labels = np.zeros((10, 10))
labels[2, 4] = 2
labels[6, 8] = 5
with testing.raises(ValueError):
random_walker(img, labels, spacing=(1,))
# Invalid mode
img = np.random.normal(size=(10, 10))
labels = np.zeros((10, 10))
with testing.raises(ValueError):
random_walker(img, labels, mode='bad')
def test_isolated_seeds():
np.random.seed(0)
a = np.random.random((7, 7))
mask = - np.ones(a.shape)
# This pixel is an isolated seed
mask[1, 1] = 1
# Unlabeled pixels
mask[3:, 3:] = 0
# Seeds connected to unlabeled pixels
mask[4, 4] = 2
mask[6, 6] = 1
# Test that no error is raised, and that labels of isolated seeds are OK
res = random_walker(a, mask)
assert res[1, 1] == 1
res = random_walker(a, mask, return_full_prob=True)
assert res[0, 1, 1] == 1
assert res[1, 1, 1] == 0
| 36.175676 | 80 | 0.582219 | import numpy as np
from skimage.segmentation import random_walker
from skimage.transform import resize
from skimage._shared._warnings import expected_warnings
from skimage._shared import testing
SCIPY_EXPECTED = 'numpy.linalg.matrix_rank|\A\Z'
PYAMG_EXPECTED_WARNING = 'pyamg|\A\Z'
PYAMG_SCIPY_EXPECTED = SCIPY_EXPECTED + '|' + PYAMG_EXPECTED_WARNING
def make_2d_syntheticdata(lx, ly=None):
if ly is None:
ly = lx
np.random.seed(1234)
data = np.zeros((lx, ly)) + 0.1 * np.random.randn(lx, ly)
small_l = int(lx // 5)
data[lx // 2 - small_l:lx // 2 + small_l,
ly // 2 - small_l:ly // 2 + small_l] = 1
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
ly // 2 - small_l + 1:ly // 2 + small_l - 1] = (
0.1 * np.random.randn(2 * small_l - 2, 2 * small_l - 2))
data[lx // 2 - small_l, ly // 2 - small_l // 8:ly // 2 + small_l // 8] = 0
seeds = np.zeros_like(data)
seeds[lx // 5, ly // 5] = 1
seeds[lx // 2 + small_l // 4, ly // 2 - small_l // 4] = 2
return data, seeds
def make_3d_syntheticdata(lx, ly=None, lz=None):
if ly is None:
ly = lx
if lz is None:
lz = lx
np.random.seed(1234)
data = np.zeros((lx, ly, lz)) + 0.1 * np.random.randn(lx, ly, lz)
small_l = int(lx // 5)
data[lx // 2 - small_l:lx // 2 + small_l,
ly // 2 - small_l:ly // 2 + small_l,
lz // 2 - small_l:lz // 2 + small_l] = 1
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
ly // 2 - small_l + 1:ly // 2 + small_l - 1,
lz // 2 - small_l + 1:lz // 2 + small_l - 1] = 0
hole_size = np.max([1, small_l // 8])
data[lx // 2 - small_l,
ly // 2 - hole_size:ly // 2 + hole_size,
lz // 2 - hole_size:lz // 2 + hole_size] = 0
seeds = np.zeros_like(data)
seeds[lx // 5, ly // 5, lz // 5] = 1
seeds[lx // 2 + small_l // 4,
ly // 2 - small_l // 4,
lz // 2 - small_l // 4] = 2
return data, seeds
def test_2d_bf():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels_bf = random_walker(data, labels, beta=90, mode='bf')
assert (labels_bf[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
return_full_prob=True)
assert (full_prob_bf[1, 25:45, 40:60] >=
full_prob_bf[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
labels[55, 80] = 3
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
return_full_prob=True)
assert (full_prob_bf[1, 25:45, 40:60] >=
full_prob_bf[0, 25:45, 40:60]).all()
assert len(full_prob_bf) == 3
assert data.shape == labels.shape
def test_2d_cg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_cg = random_walker(data, labels, beta=90, mode='cg')
assert (labels_cg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
full_prob = random_walker(data, labels, beta=90, mode='cg',
return_full_prob=True)
assert (full_prob[1, 25:45, 40:60] >=
full_prob[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
return data, labels_cg
def test_2d_cg_mg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
expected = 'scipy.sparse.sparsetools|%s' % PYAMG_SCIPY_EXPECTED
with expected_warnings([expected]):
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
with expected_warnings([expected]):
full_prob = random_walker(data, labels, beta=90, mode='cg_mg',
return_full_prob=True)
assert (full_prob[1, 25:45, 40:60] >=
full_prob[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
return data, labels_cg_mg
def test_types():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
data = 255 * (data - data.min()) // (data.max() - data.min())
data = data.astype(np.uint8)
with expected_warnings([PYAMG_SCIPY_EXPECTED]):
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels_cg_mg
def test_reorder_labels():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels[labels == 2] = 4
labels_bf = random_walker(data, labels, beta=90, mode='bf')
assert (labels_bf[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels_bf
def test_2d_inactive():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels[10:20, 10:20] = -1
labels[46:50, 33:38] = -2
labels = random_walker(data, labels, beta=90)
assert (labels.reshape((lx, ly))[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels
def test_3d():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
return data, labels
def test_3d_inactive():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
old_labels = np.copy(labels)
labels[5:25, 26:29, 26:29] = -1
after_labels = np.copy(labels)
with expected_warnings(['"cg" mode|CObject type' + '|' + SCIPY_EXPECTED]):
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
return data, labels, old_labels, after_labels
def test_multispectral_2d():
lx, ly = 70, 100
data, labels = make_2d_syntheticdata(lx, ly)
data = data[..., np.newaxis].repeat(2, axis=-1)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
multi_labels = random_walker(data, labels, mode='cg',
multichannel=True)
assert data[..., 0].shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all()
assert data[..., 0].shape == labels.shape
return data, multi_labels, single_labels, labels
def test_multispectral_3d():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
data = data[..., np.newaxis].repeat(2, axis=-1)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
multi_labels = random_walker(data, labels, mode='cg',
multichannel=True)
assert data[..., 0].shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
assert (single_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
assert data[..., 0].shape == labels.shape
return data, multi_labels, single_labels, labels
def test_spacing_0():
n = 30
lx, ly, lz = n, n, n
data, _ = make_3d_syntheticdata(lx, ly, lz)
data_aniso = np.zeros((n, n, n // 2))
for i, yz in enumerate(data):
data_aniso[i, :, :] = resize(yz, (n, n // 2),
mode='constant',
anti_aliasing=False)
small_l = int(lx // 5)
labels_aniso = np.zeros_like(data_aniso)
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
labels_aniso[lx // 2 + small_l // 4,
ly // 2 - small_l // 4,
lz // 4 - small_l // 8] = 2
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
spacing=(1., 1., 0.5))
assert (labels_aniso[13:17, 13:17, 7:9] == 2).all()
def test_spacing_1():
n = 30
lx, ly, lz = n, n, n
data, _ = make_3d_syntheticdata(lx, ly, lz)
data_aniso = np.zeros((n, n * 2, n))
for i, yz in enumerate(data):
data_aniso[i, :, :] = resize(yz, (n * 2, n),
mode='constant',
anti_aliasing=False)
small_l = int(lx // 5)
labels_aniso = np.zeros_like(data_aniso)
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
labels_aniso[lx // 2 + small_l // 4,
ly - small_l // 2,
lz // 2 - small_l // 4] = 2
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
spacing=(1., 2., 1.))
assert (labels_aniso[13:17, 26:34, 13:17] == 2).all()
data_aniso = np.zeros((n, n * 2, n))
for i in range(data.shape[1]):
data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n),
mode='constant',
anti_aliasing=False)
small_l = int(lx // 5)
labels_aniso2 = np.zeros_like(data_aniso)
labels_aniso2[lx // 5, ly // 5, lz // 5] = 1
labels_aniso2[lx - small_l // 2,
ly // 2 + small_l // 4,
lz // 2 - small_l // 4] = 2
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso2 = random_walker(data_aniso,
labels_aniso2,
mode='cg', spacing=(2., 1., 1.))
assert (labels_aniso2[26:34, 13:17, 13:17] == 2).all()
def test_trivial_cases():
img = np.ones((10, 10))
labels = np.ones((10, 10))
with expected_warnings(["Returning provided labels"]):
pass_through = random_walker(img, labels)
np.testing.assert_array_equal(pass_through, labels)
labels[:, :5] = 3
expected = np.concatenate(((labels == 1)[..., np.newaxis],
(labels == 3)[..., np.newaxis]), axis=2)
with expected_warnings(["Returning provided labels"]):
test = random_walker(img, labels, return_full_prob=True)
np.testing.assert_array_equal(test, expected)
def test_length2_spacing():
np.random.seed(42)
img = np.ones((10, 10)) + 0.2 * np.random.normal(size=(10, 10))
labels = np.zeros((10, 10), dtype=np.uint8)
labels[2, 4] = 1
labels[6, 8] = 4
random_walker(img, labels, spacing=(1., 2.))
def test_bad_inputs():
img = np.ones(10)
labels = np.arange(10)
with testing.raises(ValueError):
random_walker(img, labels)
with testing.raises(ValueError):
random_walker(img, labels, multichannel=True)
np.random.seed(42)
img = np.random.normal(size=(3, 3, 3, 3, 3))
labels = np.arange(3 ** 5).reshape(img.shape)
with testing.raises(ValueError):
random_walker(img, labels)
with testing.raises(ValueError):
random_walker(img, labels, multichannel=True)
img = np.random.normal(size=(10, 10))
labels = np.zeros((10, 10))
labels[2, 4] = 2
labels[6, 8] = 5
with testing.raises(ValueError):
random_walker(img, labels, spacing=(1,))
img = np.random.normal(size=(10, 10))
labels = np.zeros((10, 10))
with testing.raises(ValueError):
random_walker(img, labels, mode='bad')
def test_isolated_seeds():
np.random.seed(0)
a = np.random.random((7, 7))
mask = - np.ones(a.shape)
mask[1, 1] = 1
mask[3:, 3:] = 0
mask[4, 4] = 2
mask[6, 6] = 1
res = random_walker(a, mask)
assert res[1, 1] == 1
res = random_walker(a, mask, return_full_prob=True)
assert res[0, 1, 1] == 1
assert res[1, 1, 1] == 0
| true | true |
f71b4a1187468b74d940dc87ca1f6757b192c07f | 412 | py | Python | docs/core/howto/listings/pb/pb3server.py | ioggstream/twisted | 34f9b1e3f097685839000c656332c66ee85be5d8 | [
"Unlicense",
"MIT"
] | 3 | 2020-04-02T06:23:44.000Z | 2020-08-13T20:32:31.000Z | docs/core/howto/listings/pb/pb3server.py | ioggstream/twisted | 34f9b1e3f097685839000c656332c66ee85be5d8 | [
"Unlicense",
"MIT"
] | 1 | 2022-03-04T17:40:22.000Z | 2022-03-04T17:40:22.000Z | docs/core/howto/listings/pb/pb3server.py | ioggstream/twisted | 34f9b1e3f097685839000c656332c66ee85be5d8 | [
"Unlicense",
"MIT"
] | 1 | 2020-04-02T06:26:10.000Z | 2020-04-02T06:26:10.000Z | #!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.spread import pb
from twisted.internet import reactor
class One(pb.Root):
def remote_takeTwo(self, two):
print "received a Two called", two
print "telling it to print(12)"
two.callRemote("print", 12)
reactor.listenTCP(8800, pb.PBServerFactory(One()))
reactor.run()
| 24.235294 | 50 | 0.68932 |
from twisted.spread import pb
from twisted.internet import reactor
class One(pb.Root):
def remote_takeTwo(self, two):
print "received a Two called", two
print "telling it to print(12)"
two.callRemote("print", 12)
reactor.listenTCP(8800, pb.PBServerFactory(One()))
reactor.run()
| false | true |
f71b4ba03c952835c47e87d25a7a9beba942d977 | 8,309 | py | Python | mmdet/models/necks/mscatfpn.py | JHuang-CV/OD | 290bf90a5f210199b6a3750c88152f7dd2fbc276 | [
"Apache-2.0"
] | null | null | null | mmdet/models/necks/mscatfpn.py | JHuang-CV/OD | 290bf90a5f210199b6a3750c88152f7dd2fbc276 | [
"Apache-2.0"
] | null | null | null | mmdet/models/necks/mscatfpn.py | JHuang-CV/OD | 290bf90a5f210199b6a3750c88152f7dd2fbc276 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
from mmdet.core import auto_fp16
from ..registry import NECKS
from ..utils import ConvModule
from mmdet.ops.context_block import ContextBlock
from mmdet.models.plugins.squeeze_excitation import ChannelSELayer
@NECKS.register_module
class MSCATFPN(nn.Module):
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=True,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
activation=None):
super(MSCATFPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.epsilon = 1e-4
self.se = ChannelSELayer(768)
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.extra_convs_on_inputs = extra_convs_on_inputs
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
self.cat_convs = nn.ModuleList()
self.add_convs = nn.ModuleList()
#self.gc_block = nn.ModuleList()
self.relu = nn.ReLU()
self.gc_block1 = ContextBlock(inplanes=256, ratio=1./4.)
self.gc_block2 = ContextBlock(inplanes=256, ratio=1. / 4.)
self.scat_conv = ConvModule(
out_channels * (self.backbone_end_level-self.start_level),
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.c3_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.c4_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.c5_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
activation=self.activation,
inplace=False)
cat_conv = ConvModule(
out_channels * (self.backbone_end_level-self.start_level),
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
add_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.cat_convs.append(cat_conv)
self.lateral_convs.append(l_conv)
self.add_convs.append(add_conv)
#self.gc_block.append(ContextBlock(inplanes=256, ratio=1./4.))
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.extra_convs_on_inputs:
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
@auto_fp16()
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
used_backbone_levels = len(laterals)
mulscale_per_level = []
for i in range(used_backbone_levels):
level = []
m = i - 0
n = used_backbone_levels - 1 - i
level.append(laterals[i])
for x in range(m):
level.insert(0, F.interpolate(level[0], scale_factor=2, mode='nearest'))
for y in range(n):
level.append(F.max_pool2d(level[-1], 2, stride=2))
mulscale_per_level.append(level)
sglscale_per_level = list(zip(*mulscale_per_level))
feat_cat = [torch.cat(scale, 1)for scale in sglscale_per_level]
#channel_se = [self.se(cat_ft) for cat_ft in feat_cat]
mcat = [cat_conv(feat_cat[i]) for i, cat_conv in enumerate(self.cat_convs)]
#outs = [gc(outs[i]) for i, gc in enumerate(self.gc_block)]
mcat = [self.gc_block1(ft) for ft in mcat]
single_list = []
level = used_backbone_levels // 2
for i in range(used_backbone_levels):
if i < level:
single_list.append(F.max_pool2d(laterals[i], 2, stride=2))
elif i == level:
single_list.append(laterals[i])
else:
single_list.append(F.interpolate(laterals[i], scale_factor=2, mode='nearest'))
single_cat = torch.cat(single_list, 1)
single_cat = self.scat_conv(single_cat)
single_cat = self.gc_block2(single_cat)
m = level - 0
n = used_backbone_levels - 1 - level
scat = [single_cat]
for x in range(m):
scat.insert(0, F.interpolate(scat[0], scale_factor=2, mode='nearest'))
for y in range(n):
scat.append(F.max_pool2d(scat[-1], 2, stride=2))
# outs = [scat[i]+lateral for i, lateral in enumerate(laterals)]
# outs = [add_conv(outs[i]) for i, add_conv in enumerate(self.add_convs)]
outs = []
for i, (m, s, l) in enumerate(zip(mcat, scat, laterals)):
outs.append(
self.add_convs[i](m.sigmoid()*s/2 + l / 2)
)
if self.num_outs > used_backbone_levels:
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
else:
if self.extra_convs_on_inputs:
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[0](orig))
else:
outs.append(self.fpn_convs[0](outs[-1]))
for i in range(1, self.num_outs-used_backbone_levels):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| 37.768182 | 94 | 0.564087 | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
from mmdet.core import auto_fp16
from ..registry import NECKS
from ..utils import ConvModule
from mmdet.ops.context_block import ContextBlock
from mmdet.models.plugins.squeeze_excitation import ChannelSELayer
@NECKS.register_module
class MSCATFPN(nn.Module):
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=True,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
activation=None):
super(MSCATFPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.epsilon = 1e-4
self.se = ChannelSELayer(768)
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.extra_convs_on_inputs = extra_convs_on_inputs
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
self.cat_convs = nn.ModuleList()
self.add_convs = nn.ModuleList()
self.relu = nn.ReLU()
self.gc_block1 = ContextBlock(inplanes=256, ratio=1./4.)
self.gc_block2 = ContextBlock(inplanes=256, ratio=1. / 4.)
self.scat_conv = ConvModule(
out_channels * (self.backbone_end_level-self.start_level),
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.c3_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.c4_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.c5_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
activation=self.activation,
inplace=False)
cat_conv = ConvModule(
out_channels * (self.backbone_end_level-self.start_level),
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
add_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.cat_convs.append(cat_conv)
self.lateral_convs.append(l_conv)
self.add_convs.append(add_conv)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.extra_convs_on_inputs:
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
@auto_fp16()
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
used_backbone_levels = len(laterals)
mulscale_per_level = []
for i in range(used_backbone_levels):
level = []
m = i - 0
n = used_backbone_levels - 1 - i
level.append(laterals[i])
for x in range(m):
level.insert(0, F.interpolate(level[0], scale_factor=2, mode='nearest'))
for y in range(n):
level.append(F.max_pool2d(level[-1], 2, stride=2))
mulscale_per_level.append(level)
sglscale_per_level = list(zip(*mulscale_per_level))
feat_cat = [torch.cat(scale, 1)for scale in sglscale_per_level]
mcat = [cat_conv(feat_cat[i]) for i, cat_conv in enumerate(self.cat_convs)]
mcat = [self.gc_block1(ft) for ft in mcat]
single_list = []
level = used_backbone_levels // 2
for i in range(used_backbone_levels):
if i < level:
single_list.append(F.max_pool2d(laterals[i], 2, stride=2))
elif i == level:
single_list.append(laterals[i])
else:
single_list.append(F.interpolate(laterals[i], scale_factor=2, mode='nearest'))
single_cat = torch.cat(single_list, 1)
single_cat = self.scat_conv(single_cat)
single_cat = self.gc_block2(single_cat)
m = level - 0
n = used_backbone_levels - 1 - level
scat = [single_cat]
for x in range(m):
scat.insert(0, F.interpolate(scat[0], scale_factor=2, mode='nearest'))
for y in range(n):
scat.append(F.max_pool2d(scat[-1], 2, stride=2))
outs = []
for i, (m, s, l) in enumerate(zip(mcat, scat, laterals)):
outs.append(
self.add_convs[i](m.sigmoid()*s/2 + l / 2)
)
if self.num_outs > used_backbone_levels:
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
else:
if self.extra_convs_on_inputs:
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[0](orig))
else:
outs.append(self.fpn_convs[0](outs[-1]))
for i in range(1, self.num_outs-used_backbone_levels):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| true | true |
f71b4bb600bb418ed1ef7e86a5615b6ad8bfabf3 | 2,753 | py | Python | python/dgl/nn/mxnet/conv/agnnconv.py | jinghuix/dgl | fae26dd15caac92458a08ad34889086e1e333ddd | [
"Apache-2.0"
] | 2 | 2020-07-24T19:26:51.000Z | 2021-08-21T21:04:11.000Z | python/dgl/nn/mxnet/conv/agnnconv.py | jinghuix/dgl | fae26dd15caac92458a08ad34889086e1e333ddd | [
"Apache-2.0"
] | null | null | null | python/dgl/nn/mxnet/conv/agnnconv.py | jinghuix/dgl | fae26dd15caac92458a08ad34889086e1e333ddd | [
"Apache-2.0"
] | 1 | 2021-03-09T12:42:46.000Z | 2021-03-09T12:42:46.000Z | """MXNet Module for Attention-based Graph Neural Network layer"""
# pylint: disable= no-member, arguments-differ, invalid-name
import mxnet as mx
from mxnet.gluon import nn
from .... import function as fn
from ..softmax import edge_softmax
from ..utils import normalize
from ....utils import expand_as_pair
class AGNNConv(nn.Block):
r"""Attention-based Graph Neural Network layer from paper `Attention-based
Graph Neural Network for Semi-Supervised Learning
<https://arxiv.org/abs/1803.03735>`__.
.. math::
H^{l+1} = P H^{l}
where :math:`P` is computed as:
.. math::
P_{ij} = \mathrm{softmax}_i ( \beta \cdot \cos(h_i^l, h_j^l))
Parameters
----------
init_beta : float, optional
The :math:`\beta` in the formula.
learn_beta : bool, optional
If True, :math:`\beta` will be learnable parameter.
"""
def __init__(self,
init_beta=1.,
learn_beta=True):
super(AGNNConv, self).__init__()
with self.name_scope():
self.beta = self.params.get('beta',
shape=(1,),
grad_req='write' if learn_beta else 'null',
init=mx.init.Constant(init_beta))
def forward(self, graph, feat):
r"""Compute AGNN Layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : mxnet.NDArray
The input feature of shape :math:`(N, *)` :math:`N` is the
number of nodes, and :math:`*` could be of any shape.
If a pair of mxnet.NDArray is given, the pair must contain two tensors of shape
:math:`(N_{in}, *)` and :math:`(N_{out}, *})`, the the :math:`*` in the later
tensor must equal the previous one.
Returns
-------
mxnet.NDArray
The output feature of shape :math:`(N, *)` where :math:`*`
should be the same as input shape.
"""
with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat, graph)
graph.srcdata['h'] = feat_src
graph.srcdata['norm_h'] = normalize(feat_src, p=2, axis=-1)
if isinstance(feat, tuple) or graph.is_block:
graph.dstdata['norm_h'] = normalize(feat_dst, p=2, axis=-1)
# compute cosine distance
graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos'))
cos = graph.edata.pop('cos')
e = self.beta.data(feat_src.context) * cos
graph.edata['p'] = edge_softmax(graph, e)
graph.update_all(fn.u_mul_e('h', 'p', 'm'), fn.sum('m', 'h'))
return graph.dstdata.pop('h')
| 36.706667 | 91 | 0.553578 |
import mxnet as mx
from mxnet.gluon import nn
from .... import function as fn
from ..softmax import edge_softmax
from ..utils import normalize
from ....utils import expand_as_pair
class AGNNConv(nn.Block):
def __init__(self,
init_beta=1.,
learn_beta=True):
super(AGNNConv, self).__init__()
with self.name_scope():
self.beta = self.params.get('beta',
shape=(1,),
grad_req='write' if learn_beta else 'null',
init=mx.init.Constant(init_beta))
def forward(self, graph, feat):
with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat, graph)
graph.srcdata['h'] = feat_src
graph.srcdata['norm_h'] = normalize(feat_src, p=2, axis=-1)
if isinstance(feat, tuple) or graph.is_block:
graph.dstdata['norm_h'] = normalize(feat_dst, p=2, axis=-1)
graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos'))
cos = graph.edata.pop('cos')
e = self.beta.data(feat_src.context) * cos
graph.edata['p'] = edge_softmax(graph, e)
graph.update_all(fn.u_mul_e('h', 'p', 'm'), fn.sum('m', 'h'))
return graph.dstdata.pop('h')
| true | true |
f71b4c870143ba858ce8cb1244e06a703ac0343d | 2,539 | py | Python | Algorithms/MotorModule/MotorModule.py | TechMatt1337/Bastion | 1e89143905babc06ace4f0ae7ab59750427b0f03 | [
"BSD-3-Clause"
] | 1 | 2018-12-06T03:57:21.000Z | 2018-12-06T03:57:21.000Z | Algorithms/MotorModule/MotorModule.py | TechMatt1337/Bastion | 1e89143905babc06ace4f0ae7ab59750427b0f03 | [
"BSD-3-Clause"
] | null | null | null | Algorithms/MotorModule/MotorModule.py | TechMatt1337/Bastion | 1e89143905babc06ace4f0ae7ab59750427b0f03 | [
"BSD-3-Clause"
] | null | null | null | from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_StepperMotor
import time
import atexit
MH = Adafruit_MotorHAT(0x60)
STEPRES = 1.8 # Step resulition in units of degree/step
DIRECTIONS = { "ccw":Adafruit_MotorHAT.FORWARD,
"cw":Adafruit_MotorHAT.BACKWARD}
STEPTYPES = { "single":Adafruit_MotorHAT.SINGLE,
"double":Adafruit_MotorHAT.DOUBLE,
"interleave":Adafruit_MotorHAT.INTERLEAVE,
"micro":Adafruit_MotorHAT.MICROSTEP}
MOTORS = {"horizontal":1,"vertical":2}
class MotorController:
def __init__(self,motor,steps = 200,addr = 0x60):
motorPort = MOTORS[motor]
self.motorPort = motorPort
self.steps = steps
self.hatAddress = addr
global MH
MH = Adafruit_MotorHAT(addr)
self.stepperMotor = MH.getStepper(steps, motorPort)
self.stepperMotor.setSpeed(180)
def rotateMotor(self,degree,dir = "cw",step = "single"):
"""
Rotate motor for a certain degree from where it is located
at in a specified direction.
Inputs: degree - Degrees to rotate
dir - cw or ccw rotation
step - Type of step motor should make for
rotation. By default it is set to 'double';
which provides the highest torque that
the motor is able to provide.
Other types types of steps include
'single', 'interleave', and 'microstep'.
"""
# print("ROTATING MOTOR")
x = 0
if step == "interleave":
x = int(degree/STEPRES)*2
else:
x = int(degree/STEPRES)
self.stepperMotor.step(x,DIRECTIONS[dir],STEPTYPES[step])
def turnOffMotors():
"""
Turn off all motors
"""
MH.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
MH.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
MH.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
MH.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
# recommended for auto-disabling motors on shutdown!
atexit.register(turnOffMotors)
if __name__ == '__main__':
m = MotorController(motor="vertical")
m.rotateMotor(degree=1000,step="double")
# m.rotateMotor(degree=360,step="double",dir="ccw")
| 38.469697 | 89 | 0.569909 | from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_StepperMotor
import time
import atexit
MH = Adafruit_MotorHAT(0x60)
STEPRES = 1.8
DIRECTIONS = { "ccw":Adafruit_MotorHAT.FORWARD,
"cw":Adafruit_MotorHAT.BACKWARD}
STEPTYPES = { "single":Adafruit_MotorHAT.SINGLE,
"double":Adafruit_MotorHAT.DOUBLE,
"interleave":Adafruit_MotorHAT.INTERLEAVE,
"micro":Adafruit_MotorHAT.MICROSTEP}
MOTORS = {"horizontal":1,"vertical":2}
class MotorController:
def __init__(self,motor,steps = 200,addr = 0x60):
motorPort = MOTORS[motor]
self.motorPort = motorPort
self.steps = steps
self.hatAddress = addr
global MH
MH = Adafruit_MotorHAT(addr)
self.stepperMotor = MH.getStepper(steps, motorPort)
self.stepperMotor.setSpeed(180)
def rotateMotor(self,degree,dir = "cw",step = "single"):
"""
Rotate motor for a certain degree from where it is located
at in a specified direction.
Inputs: degree - Degrees to rotate
dir - cw or ccw rotation
step - Type of step motor should make for
rotation. By default it is set to 'double';
which provides the highest torque that
the motor is able to provide.
Other types types of steps include
'single', 'interleave', and 'microstep'.
"""
x = 0
if step == "interleave":
x = int(degree/STEPRES)*2
else:
x = int(degree/STEPRES)
self.stepperMotor.step(x,DIRECTIONS[dir],STEPTYPES[step])
def turnOffMotors():
"""
Turn off all motors
"""
MH.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
MH.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
MH.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
MH.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
atexit.register(turnOffMotors)
if __name__ == '__main__':
m = MotorController(motor="vertical")
m.rotateMotor(degree=1000,step="double")
| false | true |
f71b4d5f7826768bee64b7feec106bd5368db512 | 1,099 | py | Python | test/test_arrow_result.py | mariusvniekerk/snowflake-connector-python | 4c6b728f9ca7ac9c8a318741924a963a5574e216 | [
"Apache-2.0"
] | null | null | null | test/test_arrow_result.py | mariusvniekerk/snowflake-connector-python | 4c6b728f9ca7ac9c8a318741924a963a5574e216 | [
"Apache-2.0"
] | null | null | null | test/test_arrow_result.py | mariusvniekerk/snowflake-connector-python | 4c6b728f9ca7ac9c8a318741924a963a5574e216 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved.
#
import pytest
@pytest.mark.skip(
reason="Cython is not enabled in build env")
def test_select_with_num(conn_cnx):
with conn_cnx() as json_cnx:
with conn_cnx() as arrow_cnx:
row_count = 50000
sql_text = ("select seq4() as c1, uniform(1, 10, random(12)) as c2 from " +
"table(generator(rowcount=>50000)) order by c1")
cursor_json = json_cnx.cursor()
cursor_json.execute("alter session set query_result_format='JSON'")
cursor_json.execute(sql_text)
cursor_arrow = arrow_cnx.cursor()
cursor_arrow.execute("alter session set query_result_format='ARROW_FORCE'")
cursor_arrow.execute(sql_text)
for i in range(0, row_count):
(json_c1, json_c2) = cursor_json.fetchone()
(arrow_c1, arrow_c2) = cursor_arrow.fetchone()
assert json_c1 == arrow_c1
assert json_c2 == arrow_c2
| 36.633333 | 87 | 0.610555 |
import pytest
@pytest.mark.skip(
reason="Cython is not enabled in build env")
def test_select_with_num(conn_cnx):
with conn_cnx() as json_cnx:
with conn_cnx() as arrow_cnx:
row_count = 50000
sql_text = ("select seq4() as c1, uniform(1, 10, random(12)) as c2 from " +
"table(generator(rowcount=>50000)) order by c1")
cursor_json = json_cnx.cursor()
cursor_json.execute("alter session set query_result_format='JSON'")
cursor_json.execute(sql_text)
cursor_arrow = arrow_cnx.cursor()
cursor_arrow.execute("alter session set query_result_format='ARROW_FORCE'")
cursor_arrow.execute(sql_text)
for i in range(0, row_count):
(json_c1, json_c2) = cursor_json.fetchone()
(arrow_c1, arrow_c2) = cursor_arrow.fetchone()
assert json_c1 == arrow_c1
assert json_c2 == arrow_c2
| true | true |
f71b4eb0cc83cbb94a84bbec221dd9f3a3147026 | 25,712 | py | Python | draco/core/io.py | sjforeman/draco | b0ab40b6984637642b28a5485af1c09c9cf183f2 | [
"MIT"
] | null | null | null | draco/core/io.py | sjforeman/draco | b0ab40b6984637642b28a5485af1c09c9cf183f2 | [
"MIT"
] | null | null | null | draco/core/io.py | sjforeman/draco | b0ab40b6984637642b28a5485af1c09c9cf183f2 | [
"MIT"
] | null | null | null | """Tasks for reading and writing data.
Tasks
=====
.. autosummary::
:toctree:
LoadFiles
LoadMaps
LoadFilesFromParams
Save
Print
LoadBeamTransfer
File Groups
===========
Several tasks accept groups of files as arguments. These are specified in the YAML file as a dictionary like below.
.. code-block:: yaml
list_of_file_groups:
- tag: first_group # An optional tag naming the group
files:
- 'file1.h5'
- 'file[3-4].h5' # Globs are processed
- 'file7.h5'
- files: # No tag specified, implicitly gets the tag 'group_2'
- 'another_file1.h5'
- 'another_file2.h5'
single_group:
files: ['file1.h5', 'file2.h5']
"""
import os.path
import h5py
import numpy as np
from yaml import dump as yamldump
from caput import pipeline
from caput import config
from cora.util import units
from . import task
from ..util.truncate import bit_truncate_weights, bit_truncate_fixed
from .containers import SiderealStream, TimeStream, TrackBeam
TRUNC_SPEC = {
SiderealStream: {
"dataset": ["vis", "vis_weight"],
"weight_dataset": ["vis_weight", None],
"fixed_precision": 1e-4,
"variance_increase": 1e-3,
},
TimeStream: {
"dataset": ["vis", "vis_weight"],
"weight_dataset": ["vis_weight", None],
"fixed_precision": 1e-4,
"variance_increase": 1e-3,
},
TrackBeam: {
"dataset": ["beam", "weight"],
"weight_dataset": ["weight", None],
"fixed_precision": 1e-4,
"variance_increase": 1e-3,
},
}
def _list_of_filelists(files):
# Take in a list of lists/glob patterns of filenames
import glob
f2 = []
for filelist in files:
if isinstance(filelist, str):
filelist = glob.glob(filelist)
elif isinstance(filelist, list):
pass
else:
raise Exception("Must be list or glob pattern.")
f2.append(filelist)
return f2
def _list_or_glob(files):
# Take in a list of lists/glob patterns of filenames
import glob
if isinstance(files, str):
files = sorted(glob.glob(files))
elif isinstance(files, list):
pass
else:
raise ValueError("Argument must be list or glob pattern, got %s" % repr(files))
return files
def _list_of_filegroups(groups):
# Process a file group/groups
import glob
# Convert to list if the group was not included in a list
if not isinstance(groups, list):
groups = [groups]
# Iterate over groups, set the tag if needed, and process the file list
# through glob
for gi, group in enumerate(groups):
files = group["files"]
if "tag" not in group:
group["tag"] = "group_%i" % gi
flist = []
for fname in files:
flist += glob.glob(fname)
if not len(flist):
raise RuntimeError("No files in group exist (%s)." % files)
group["files"] = flist
return groups
class LoadMaps(task.MPILoggedTask):
"""Load a series of maps from files given in the tasks parameters.
Maps are given as one, or a list of `File Groups` (see
:mod:`draco.core.io`). Maps within the same group are added together
before being passed on.
Attributes
----------
maps : list or dict
A dictionary specifying a file group, or a list of them.
"""
maps = config.Property(proptype=_list_of_filegroups)
def next(self):
"""Load the groups of maps from disk and pass them on.
Returns
-------
map : :class:`containers.Map`
"""
from . import containers
# Exit this task if we have eaten all the file groups
if len(self.maps) == 0:
raise pipeline.PipelineStopIteration
group = self.maps.pop(0)
map_stack = None
# Iterate over all the files in the group, load them into a Map
# container and add them all together
for mfile in group["files"]:
self.log.debug("Loading file %s", mfile)
current_map = containers.Map.from_file(mfile, distributed=True)
current_map.redistribute("freq")
# Start the stack if needed
if map_stack is None:
map_stack = current_map
# Otherwise, check that the new map has consistent frequencies,
# nside and pol and stack up.
else:
if (current_map.freq != map_stack.freq).all():
raise RuntimeError("Maps do not have consistent frequencies.")
if (current_map.index_map["pol"] != map_stack.index_map["pol"]).all():
raise RuntimeError("Maps do not have the same polarisations.")
if (
current_map.index_map["pixel"] != map_stack.index_map["pixel"]
).all():
raise RuntimeError("Maps do not have the same pixelisation.")
map_stack.map[:] += current_map.map[:]
# Assign a tag to the stack of maps
map_stack.attrs["tag"] = group["tag"]
return map_stack
class LoadFITSCatalog(task.SingleTask):
"""Load an SDSS-style FITS source catalog.
Catalogs are given as one, or a list of `File Groups` (see
:mod:`draco.core.io`). Catalogs within the same group are combined together
before being passed on.
Attributes
----------
catalogs : list or dict
A dictionary specifying a file group, or a list of them.
z_range : list, optional
Select only sources with a redshift within the given range.
freq_range : list, optional
Select only sources with a 21cm line freq within the given range. Overrides
`z_range`.
"""
catalogs = config.Property(proptype=_list_of_filegroups)
z_range = config.list_type(type_=float, length=2, default=None)
freq_range = config.list_type(type_=float, length=2, default=None)
def process(self):
"""Load the groups of catalogs from disk, concatenate them and pass them on.
Returns
-------
catalog : :class:`containers.SpectroscopicCatalog`
"""
from astropy.io import fits
from . import containers
# Exit this task if we have eaten all the file groups
if len(self.catalogs) == 0:
raise pipeline.PipelineStopIteration
group = self.catalogs.pop(0)
# Set the redshift selection
if self.freq_range:
zl = units.nu21 / self.freq_range[1] - 1
zh = units.nu21 / self.freq_range[0] - 1
self.z_range = (zl, zh)
if self.z_range:
zl, zh = self.z_range
self.log.info(f"Applying redshift selection {zl:.2f} <= z <= {zh:.2f}")
# Load the data only on rank=0 and then broadcast
if self.comm.rank == 0:
# Iterate over all the files in the group, load them into a Map
# container and add them all together
catalog_stack = []
for cfile in group["files"]:
self.log.debug("Loading file %s", cfile)
# TODO: read out the weights from the catalogs
with fits.open(cfile, mode="readonly") as cat:
pos = np.array([cat[1].data[col] for col in ["RA", "DEC", "Z"]])
# Apply any redshift selection to the objects
if self.z_range:
zsel = (pos[2] >= self.z_range[0]) & (pos[2] <= self.z_range[1])
pos = pos[:, zsel]
catalog_stack.append(pos)
# NOTE: this one is tricky, for some reason the concatenate in here
# produces a non C contiguous array, so we need to ensure that otherwise
# the broadcasting will get very confused
catalog_array = np.concatenate(catalog_stack, axis=-1).astype(np.float64)
catalog_array = np.ascontiguousarray(catalog_array)
num_objects = catalog_array.shape[-1]
else:
num_objects = None
catalog_array = None
# Broadcast the size of the catalog to all ranks, create the target array and
# broadcast into it
num_objects = self.comm.bcast(num_objects, root=0)
self.log.debug(f"Constructing catalog with {num_objects} objects.")
if self.comm.rank != 0:
catalog_array = np.zeros((3, num_objects), dtype=np.float64)
self.comm.Bcast(catalog_array, root=0)
catalog = containers.SpectroscopicCatalog(object_id=num_objects)
catalog["position"]["ra"] = catalog_array[0]
catalog["position"]["dec"] = catalog_array[1]
catalog["redshift"]["z"] = catalog_array[2]
catalog["redshift"]["z_error"] = 0
# Assign a tag to the stack of maps
catalog.attrs["tag"] = group["tag"]
return catalog
class LoadFilesFromParams(task.SingleTask):
"""Load data from files given in the tasks parameters.
Attributes
----------
files : glob pattern, or list
Can either be a glob pattern, or lists of actual files.
distributed : bool, optional
Whether the file should be loaded distributed across ranks.
convert_strings : bool, optional
Convert strings to unicode when loading.
selections : dict, optional
A dictionary of axis selections. See the section below for details.
Selections
----------
Selections can be given to limit the data read to specified subsets. They can be
given for any named axis in the container.
Selections can be given as a slice with an `<axis name>_range` key with either
`[start, stop]` or `[start, stop, step]` as the value. Alternatively a list of
explicit indices to extract can be given with the `<axis name>_index` key, and
the value is a list of the indices. If both `<axis name>_range` and `<axis
name>_index` keys are given the former will take precedence, but you should
clearly avoid doing this.
Additionally index based selections currently don't work for distributed reads.
Here's an example in the YAML format that the pipeline uses:
.. code-block:: yaml
selections:
freq_range: [256, 512, 4] # A strided slice
stack_index: [1, 2, 4, 9, 16, 25, 36, 49, 64] # A sparse selection
stack_range: [1, 14] # Will override the selection above
"""
files = config.Property(proptype=_list_or_glob)
distributed = config.Property(proptype=bool, default=True)
convert_strings = config.Property(proptype=bool, default=True)
selections = config.Property(proptype=dict, default=None)
def setup(self):
"""Resolve the selections."""
self._sel = self._resolve_sel()
def process(self):
"""Load the given files in turn and pass on.
Returns
-------
cont : subclass of `memh5.BasicCont`
"""
from caput import memh5
# Garbage collect to workaround leaking memory from containers.
# TODO: find actual source of leak
import gc
gc.collect()
if len(self.files) == 0:
raise pipeline.PipelineStopIteration
# Fetch and remove the first item in the list
file_ = self.files.pop(0)
self.log.info(f"Loading file {file_}")
self.log.debug(f"Reading with selections: {self._sel}")
# If we are applying selections we need to dispatch the `from_file` via the
# correct subclass, rather than relying on the internal detection of the
# subclass. To minimise the number of files being opened this is only done on
# rank=0 and is then broadcast
if self._sel:
if self.comm.rank == 0:
with h5py.File(file_, "r") as fh:
clspath = memh5.MemDiskGroup._detect_subclass_path(fh)
else:
clspath = None
clspath = self.comm.bcast(clspath, root=0)
new_cls = memh5.MemDiskGroup._resolve_subclass(clspath)
else:
new_cls = memh5.BasicCont
cont = new_cls.from_file(
file_,
distributed=self.distributed,
comm=self.comm,
convert_attribute_strings=self.convert_strings,
convert_dataset_strings=self.convert_strings,
**self._sel,
)
if "tag" not in cont.attrs:
# Get the first part of the actual filename and use it as the tag
tag = os.path.splitext(os.path.basename(file_))[0]
cont.attrs["tag"] = tag
return cont
def _resolve_sel(self):
# Turn the selection parameters into actual selectable types
sel = {}
sel_parsers = {"range": self._parse_range, "index": self._parse_index}
# To enforce the precedence of range vs index selections, we rely on the fact
# that a sort will place the axis_range keys after axis_index keys
for k in sorted(self.selections or []):
# Parse the key to get the axis name and type, accounting for the fact the
# axis name may contain an underscore
*axis, type_ = k.split("_")
axis_name = "_".join(axis)
if type_ not in sel_parsers:
raise ValueError(
f'Unsupported selection type "{type_}", or invalid key "{k}"'
)
sel[f"{axis_name}_sel"] = sel_parsers[type_](self.selections[k])
return sel
def _parse_range(self, x):
# Parse and validate a range type selection
if not isinstance(x, (list, tuple)) or len(x) > 3 or len(x) < 2:
raise ValueError(
f"Range spec must be a length 2 or 3 list or tuple. Got {x}."
)
for v in x:
if not isinstance(v, int):
raise ValueError(f"All elements of range spec must be ints. Got {x}")
return slice(*x)
def _parse_index(self, x):
# Parse and validate an index type selection
if not isinstance(x, (list, tuple)) or len(x) == 0:
raise ValueError(f"Index spec must be a non-empty list or tuple. Got {x}.")
for v in x:
if not isinstance(v, int):
raise ValueError(f"All elements of index spec must be ints. Got {x}")
return list(x)
# Define alias for old code
LoadBasicCont = LoadFilesFromParams
class FindFiles(pipeline.TaskBase):
"""Take a glob or list of files specified as a parameter in the
configuration file and pass on to other tasks.
Parameters
----------
files : list or glob
"""
files = config.Property(proptype=_list_or_glob)
def setup(self):
"""Return list of files specified in the parameters."""
if not isinstance(self.files, (list, tuple)):
raise RuntimeError("Argument must be list of files.")
return self.files
class LoadFiles(LoadFilesFromParams):
"""Load data from files passed into the setup routine.
File must be a serialised subclass of :class:`memh5.BasicCont`.
"""
files = None
def setup(self, files):
"""Set the list of files to load.
Parameters
----------
files : list
"""
# Call the baseclass setup to resolve any selections
super().setup()
if not isinstance(files, (list, tuple)):
raise RuntimeError(f'Argument must be list of files. Got "{files}"')
self.files = files
class Save(pipeline.TaskBase):
"""Save out the input, and pass it on.
Assumes that the input has a `to_hdf5` method. Appends a *tag* if there is
a `tag` entry in the attributes, otherwise just uses a count.
Attributes
----------
root : str
Root of the file name to output to.
"""
root = config.Property(proptype=str)
count = 0
def next(self, data):
"""Write out the data file.
Assumes it has an MPIDataset interface.
Parameters
----------
data : mpidataset.MPIDataset
Data to write out.
"""
if "tag" not in data.attrs:
tag = self.count
self.count += 1
else:
tag = data.attrs["tag"]
fname = "%s_%s.h5" % (self.root, str(tag))
data.to_hdf5(fname)
return data
class Print(pipeline.TaskBase):
"""Stupid module which just prints whatever it gets. Good for debugging."""
def next(self, input_):
print(input_)
return input_
class LoadBeamTransfer(pipeline.TaskBase):
"""Loads a beam transfer manager from disk.
Attributes
----------
product_directory : str
Path to the saved Beam Transfer products.
"""
product_directory = config.Property(proptype=str)
def setup(self):
"""Load the beam transfer matrices.
Returns
-------
tel : TransitTelescope
Object describing the telescope.
bt : BeamTransfer
BeamTransfer manager.
feed_info : list, optional
Optional list providing additional information about each feed.
"""
import os
from drift.core import beamtransfer
if not os.path.exists(self.product_directory):
raise RuntimeError("BeamTransfers do not exist.")
bt = beamtransfer.BeamTransfer(self.product_directory)
tel = bt.telescope
try:
return tel, bt, tel.feeds
except AttributeError:
return tel, bt
class LoadProductManager(pipeline.TaskBase):
"""Loads a driftscan product manager from disk.
Attributes
----------
product_directory : str
Path to the root of the products. This is the same as the output
directory used by ``drift-makeproducts``.
"""
product_directory = config.Property(proptype=str)
def setup(self):
"""Load the beam transfer matrices.
Returns
-------
manager : ProductManager
Object describing the telescope.
"""
import os
from drift.core import manager
if not os.path.exists(self.product_directory):
raise RuntimeError("Products do not exist.")
# Load ProductManager and Timestream
pm = manager.ProductManager.from_config(self.product_directory)
return pm
class Truncate(task.SingleTask):
"""Precision truncate data prior to saving with bitshuffle compression.
If no configuration is provided, will look for preset values for the
input container. Any properties defined in the config will override the
presets.
If available, each specified dataset will be truncated relative to a
(specified) weight dataset with the truncation increasing the variance up
to the specified maximum in `variance_increase`. If there is no specified
weight dataset then the truncation falls back to using the
`fixed_precision`.
Attributes
----------
dataset : list of str
Datasets to truncate.
weight_dataset : list of str
Datasets to use as inverse variance for truncation precision.
fixed_precision : float
Relative precision to truncate to (default 1e-4).
variance_increase : float
Maximum fractional increase in variance from numerical truncation.
"""
dataset = config.Property(proptype=list, default=None)
weight_dataset = config.Property(proptype=list, default=None)
fixed_precision = config.Property(proptype=float, default=None)
variance_increase = config.Property(proptype=float, default=None)
def _get_params(self, container):
"""Load truncation parameters from config or container defaults."""
if container in TRUNC_SPEC:
self.log.info("Truncating from preset for container {}".format(container))
for key in [
"dataset",
"weight_dataset",
"fixed_precision",
"variance_increase",
]:
attr = getattr(self, key)
if attr is None:
setattr(self, key, TRUNC_SPEC[container][key])
else:
self.log.info("Overriding container default for '{}'.".format(key))
else:
if (
self.dataset is None
or self.fixed_precision is None
or self.variance_increase is None
):
raise pipeline.PipelineConfigError(
"Container {} has no preset values. You must define all of 'dataset', "
"'fixed_precision', and 'variance_increase' properties.".format(
container
)
)
# Factor of 3 for variance over uniform distribution of truncation errors
self.variance_increase *= 3
def process(self, data):
"""Truncate the incoming data.
The truncation is done *in place*.
Parameters
----------
data : containers.ContainerBase
Data to truncate.
Returns
-------
truncated_data : containers.ContainerBase
Truncated data.
"""
# get truncation parameters from config or container defaults
self._get_params(type(data))
if self.weight_dataset is None:
self.weight_dataset = [None] * len(self.dataset)
for dset, wgt in zip(self.dataset, self.weight_dataset):
old_shape = data[dset].local_shape
val = np.ndarray.reshape(data[dset][:], data[dset][:].size)
if wgt is None:
if np.iscomplexobj(data[dset]):
data[dset][:].real = bit_truncate_fixed(
val.real, self.fixed_precision
).reshape(old_shape)
data[dset][:].imag = bit_truncate_fixed(
val.imag, self.fixed_precision
).reshape(old_shape)
else:
data[dset][:] = bit_truncate_fixed(
val, self.fixed_precision
).reshape(old_shape)
else:
if data[dset][:].shape != data[wgt][:].shape:
raise pipeline.PipelineRuntimeError(
"Dataset and weight arrays must have same shape ({} != {})".format(
data[dset].shape, data[wgt].shape
)
)
invvar = np.ndarray.reshape(data[wgt][:], data[dset][:].size)
if np.iscomplexobj(data[dset]):
data[dset][:].real = bit_truncate_weights(
val.real,
invvar * 2.0 / self.variance_increase,
self.fixed_precision,
).reshape(old_shape)
data[dset][:].imag = bit_truncate_weights(
val.imag,
invvar * 2.0 / self.variance_increase,
self.fixed_precision,
).reshape(old_shape)
else:
data[dset][:] = bit_truncate_weights(
val, invvar / self.variance_increase, self.fixed_precision
).reshape(old_shape)
return data
class SaveModuleVersions(task.SingleTask):
"""Write module versions to a YAML file.
The list of modules should be added to the configuration under key 'save_versions'.
The version strings are written to a YAML file.
Attributes
----------
root : str
Root of the file name to output to.
"""
root = config.Property(proptype=str)
done = True
def setup(self):
"""Save module versions."""
fname = "{}_versions.yml".format(self.root)
f = open(fname, "w")
f.write(yamldump(self.versions))
f.close()
self.done = True
def process(self):
"""Do nothing."""
self.done = True
return
class SaveConfig(task.SingleTask):
"""Write pipeline config to a text file.
Yaml configuration document is written to a text file.
Attributes
----------
root : str
Root of the file name to output to.
"""
root = config.Property(proptype=str)
done = True
def setup(self):
"""Save module versions."""
fname = "{}_config.yml".format(self.root)
f = open(fname, "w")
f.write(yamldump(self.pipeline_config))
f.close()
self.done = True
def process(self):
"""Do nothing."""
self.done = True
return
def get_telescope(obj):
"""Return a telescope object out of the input (either `ProductManager`,
`BeamTransfer` or `TransitTelescope`).
"""
from drift.core import telescope
try:
return get_beamtransfer(obj).telescope
except RuntimeError:
if isinstance(obj, telescope.TransitTelescope):
return obj
raise RuntimeError("Could not get telescope instance out of %s" % repr(obj))
def get_beamtransfer(obj):
"""Return a BeamTransfer object out of the input (either `ProductManager`,
`BeamTransfer`).
"""
from drift.core import manager, beamtransfer
if isinstance(obj, beamtransfer.BeamTransfer):
return obj
if isinstance(obj, manager.ProductManager):
return obj.beamtransfer
raise RuntimeError("Could not get BeamTransfer instance out of %s" % repr(obj))
| 30.285041 | 115 | 0.592253 |
import os.path
import h5py
import numpy as np
from yaml import dump as yamldump
from caput import pipeline
from caput import config
from cora.util import units
from . import task
from ..util.truncate import bit_truncate_weights, bit_truncate_fixed
from .containers import SiderealStream, TimeStream, TrackBeam
TRUNC_SPEC = {
SiderealStream: {
"dataset": ["vis", "vis_weight"],
"weight_dataset": ["vis_weight", None],
"fixed_precision": 1e-4,
"variance_increase": 1e-3,
},
TimeStream: {
"dataset": ["vis", "vis_weight"],
"weight_dataset": ["vis_weight", None],
"fixed_precision": 1e-4,
"variance_increase": 1e-3,
},
TrackBeam: {
"dataset": ["beam", "weight"],
"weight_dataset": ["weight", None],
"fixed_precision": 1e-4,
"variance_increase": 1e-3,
},
}
def _list_of_filelists(files):
import glob
f2 = []
for filelist in files:
if isinstance(filelist, str):
filelist = glob.glob(filelist)
elif isinstance(filelist, list):
pass
else:
raise Exception("Must be list or glob pattern.")
f2.append(filelist)
return f2
def _list_or_glob(files):
import glob
if isinstance(files, str):
files = sorted(glob.glob(files))
elif isinstance(files, list):
pass
else:
raise ValueError("Argument must be list or glob pattern, got %s" % repr(files))
return files
def _list_of_filegroups(groups):
import glob
if not isinstance(groups, list):
groups = [groups]
for gi, group in enumerate(groups):
files = group["files"]
if "tag" not in group:
group["tag"] = "group_%i" % gi
flist = []
for fname in files:
flist += glob.glob(fname)
if not len(flist):
raise RuntimeError("No files in group exist (%s)." % files)
group["files"] = flist
return groups
class LoadMaps(task.MPILoggedTask):
maps = config.Property(proptype=_list_of_filegroups)
def next(self):
from . import containers
if len(self.maps) == 0:
raise pipeline.PipelineStopIteration
group = self.maps.pop(0)
map_stack = None
for mfile in group["files"]:
self.log.debug("Loading file %s", mfile)
current_map = containers.Map.from_file(mfile, distributed=True)
current_map.redistribute("freq")
if map_stack is None:
map_stack = current_map
else:
if (current_map.freq != map_stack.freq).all():
raise RuntimeError("Maps do not have consistent frequencies.")
if (current_map.index_map["pol"] != map_stack.index_map["pol"]).all():
raise RuntimeError("Maps do not have the same polarisations.")
if (
current_map.index_map["pixel"] != map_stack.index_map["pixel"]
).all():
raise RuntimeError("Maps do not have the same pixelisation.")
map_stack.map[:] += current_map.map[:]
map_stack.attrs["tag"] = group["tag"]
return map_stack
class LoadFITSCatalog(task.SingleTask):
catalogs = config.Property(proptype=_list_of_filegroups)
z_range = config.list_type(type_=float, length=2, default=None)
freq_range = config.list_type(type_=float, length=2, default=None)
def process(self):
from astropy.io import fits
from . import containers
if len(self.catalogs) == 0:
raise pipeline.PipelineStopIteration
group = self.catalogs.pop(0)
if self.freq_range:
zl = units.nu21 / self.freq_range[1] - 1
zh = units.nu21 / self.freq_range[0] - 1
self.z_range = (zl, zh)
if self.z_range:
zl, zh = self.z_range
self.log.info(f"Applying redshift selection {zl:.2f} <= z <= {zh:.2f}")
if self.comm.rank == 0:
catalog_stack = []
for cfile in group["files"]:
self.log.debug("Loading file %s", cfile)
with fits.open(cfile, mode="readonly") as cat:
pos = np.array([cat[1].data[col] for col in ["RA", "DEC", "Z"]])
if self.z_range:
zsel = (pos[2] >= self.z_range[0]) & (pos[2] <= self.z_range[1])
pos = pos[:, zsel]
catalog_stack.append(pos)
catalog_array = np.concatenate(catalog_stack, axis=-1).astype(np.float64)
catalog_array = np.ascontiguousarray(catalog_array)
num_objects = catalog_array.shape[-1]
else:
num_objects = None
catalog_array = None
num_objects = self.comm.bcast(num_objects, root=0)
self.log.debug(f"Constructing catalog with {num_objects} objects.")
if self.comm.rank != 0:
catalog_array = np.zeros((3, num_objects), dtype=np.float64)
self.comm.Bcast(catalog_array, root=0)
catalog = containers.SpectroscopicCatalog(object_id=num_objects)
catalog["position"]["ra"] = catalog_array[0]
catalog["position"]["dec"] = catalog_array[1]
catalog["redshift"]["z"] = catalog_array[2]
catalog["redshift"]["z_error"] = 0
catalog.attrs["tag"] = group["tag"]
return catalog
class LoadFilesFromParams(task.SingleTask):
files = config.Property(proptype=_list_or_glob)
distributed = config.Property(proptype=bool, default=True)
convert_strings = config.Property(proptype=bool, default=True)
selections = config.Property(proptype=dict, default=None)
def setup(self):
self._sel = self._resolve_sel()
def process(self):
from caput import memh5
import gc
gc.collect()
if len(self.files) == 0:
raise pipeline.PipelineStopIteration
file_ = self.files.pop(0)
self.log.info(f"Loading file {file_}")
self.log.debug(f"Reading with selections: {self._sel}")
if self._sel:
if self.comm.rank == 0:
with h5py.File(file_, "r") as fh:
clspath = memh5.MemDiskGroup._detect_subclass_path(fh)
else:
clspath = None
clspath = self.comm.bcast(clspath, root=0)
new_cls = memh5.MemDiskGroup._resolve_subclass(clspath)
else:
new_cls = memh5.BasicCont
cont = new_cls.from_file(
file_,
distributed=self.distributed,
comm=self.comm,
convert_attribute_strings=self.convert_strings,
convert_dataset_strings=self.convert_strings,
**self._sel,
)
if "tag" not in cont.attrs:
tag = os.path.splitext(os.path.basename(file_))[0]
cont.attrs["tag"] = tag
return cont
def _resolve_sel(self):
sel = {}
sel_parsers = {"range": self._parse_range, "index": self._parse_index}
for k in sorted(self.selections or []):
*axis, type_ = k.split("_")
axis_name = "_".join(axis)
if type_ not in sel_parsers:
raise ValueError(
f'Unsupported selection type "{type_}", or invalid key "{k}"'
)
sel[f"{axis_name}_sel"] = sel_parsers[type_](self.selections[k])
return sel
def _parse_range(self, x):
if not isinstance(x, (list, tuple)) or len(x) > 3 or len(x) < 2:
raise ValueError(
f"Range spec must be a length 2 or 3 list or tuple. Got {x}."
)
for v in x:
if not isinstance(v, int):
raise ValueError(f"All elements of range spec must be ints. Got {x}")
return slice(*x)
def _parse_index(self, x):
if not isinstance(x, (list, tuple)) or len(x) == 0:
raise ValueError(f"Index spec must be a non-empty list or tuple. Got {x}.")
for v in x:
if not isinstance(v, int):
raise ValueError(f"All elements of index spec must be ints. Got {x}")
return list(x)
LoadBasicCont = LoadFilesFromParams
class FindFiles(pipeline.TaskBase):
files = config.Property(proptype=_list_or_glob)
def setup(self):
if not isinstance(self.files, (list, tuple)):
raise RuntimeError("Argument must be list of files.")
return self.files
class LoadFiles(LoadFilesFromParams):
files = None
def setup(self, files):
super().setup()
if not isinstance(files, (list, tuple)):
raise RuntimeError(f'Argument must be list of files. Got "{files}"')
self.files = files
class Save(pipeline.TaskBase):
root = config.Property(proptype=str)
count = 0
def next(self, data):
if "tag" not in data.attrs:
tag = self.count
self.count += 1
else:
tag = data.attrs["tag"]
fname = "%s_%s.h5" % (self.root, str(tag))
data.to_hdf5(fname)
return data
class Print(pipeline.TaskBase):
def next(self, input_):
print(input_)
return input_
class LoadBeamTransfer(pipeline.TaskBase):
product_directory = config.Property(proptype=str)
def setup(self):
import os
from drift.core import beamtransfer
if not os.path.exists(self.product_directory):
raise RuntimeError("BeamTransfers do not exist.")
bt = beamtransfer.BeamTransfer(self.product_directory)
tel = bt.telescope
try:
return tel, bt, tel.feeds
except AttributeError:
return tel, bt
class LoadProductManager(pipeline.TaskBase):
product_directory = config.Property(proptype=str)
def setup(self):
import os
from drift.core import manager
if not os.path.exists(self.product_directory):
raise RuntimeError("Products do not exist.")
pm = manager.ProductManager.from_config(self.product_directory)
return pm
class Truncate(task.SingleTask):
dataset = config.Property(proptype=list, default=None)
weight_dataset = config.Property(proptype=list, default=None)
fixed_precision = config.Property(proptype=float, default=None)
variance_increase = config.Property(proptype=float, default=None)
def _get_params(self, container):
if container in TRUNC_SPEC:
self.log.info("Truncating from preset for container {}".format(container))
for key in [
"dataset",
"weight_dataset",
"fixed_precision",
"variance_increase",
]:
attr = getattr(self, key)
if attr is None:
setattr(self, key, TRUNC_SPEC[container][key])
else:
self.log.info("Overriding container default for '{}'.".format(key))
else:
if (
self.dataset is None
or self.fixed_precision is None
or self.variance_increase is None
):
raise pipeline.PipelineConfigError(
"Container {} has no preset values. You must define all of 'dataset', "
"'fixed_precision', and 'variance_increase' properties.".format(
container
)
)
self.variance_increase *= 3
def process(self, data):
self._get_params(type(data))
if self.weight_dataset is None:
self.weight_dataset = [None] * len(self.dataset)
for dset, wgt in zip(self.dataset, self.weight_dataset):
old_shape = data[dset].local_shape
val = np.ndarray.reshape(data[dset][:], data[dset][:].size)
if wgt is None:
if np.iscomplexobj(data[dset]):
data[dset][:].real = bit_truncate_fixed(
val.real, self.fixed_precision
).reshape(old_shape)
data[dset][:].imag = bit_truncate_fixed(
val.imag, self.fixed_precision
).reshape(old_shape)
else:
data[dset][:] = bit_truncate_fixed(
val, self.fixed_precision
).reshape(old_shape)
else:
if data[dset][:].shape != data[wgt][:].shape:
raise pipeline.PipelineRuntimeError(
"Dataset and weight arrays must have same shape ({} != {})".format(
data[dset].shape, data[wgt].shape
)
)
invvar = np.ndarray.reshape(data[wgt][:], data[dset][:].size)
if np.iscomplexobj(data[dset]):
data[dset][:].real = bit_truncate_weights(
val.real,
invvar * 2.0 / self.variance_increase,
self.fixed_precision,
).reshape(old_shape)
data[dset][:].imag = bit_truncate_weights(
val.imag,
invvar * 2.0 / self.variance_increase,
self.fixed_precision,
).reshape(old_shape)
else:
data[dset][:] = bit_truncate_weights(
val, invvar / self.variance_increase, self.fixed_precision
).reshape(old_shape)
return data
class SaveModuleVersions(task.SingleTask):
root = config.Property(proptype=str)
done = True
def setup(self):
fname = "{}_versions.yml".format(self.root)
f = open(fname, "w")
f.write(yamldump(self.versions))
f.close()
self.done = True
def process(self):
self.done = True
return
class SaveConfig(task.SingleTask):
root = config.Property(proptype=str)
done = True
def setup(self):
fname = "{}_config.yml".format(self.root)
f = open(fname, "w")
f.write(yamldump(self.pipeline_config))
f.close()
self.done = True
def process(self):
self.done = True
return
def get_telescope(obj):
from drift.core import telescope
try:
return get_beamtransfer(obj).telescope
except RuntimeError:
if isinstance(obj, telescope.TransitTelescope):
return obj
raise RuntimeError("Could not get telescope instance out of %s" % repr(obj))
def get_beamtransfer(obj):
from drift.core import manager, beamtransfer
if isinstance(obj, beamtransfer.BeamTransfer):
return obj
if isinstance(obj, manager.ProductManager):
return obj.beamtransfer
raise RuntimeError("Could not get BeamTransfer instance out of %s" % repr(obj))
| true | true |
f71b4f651dc252f16edc83bd218126c89ab19ffc | 2,507 | py | Python | scripts/remove_orphans.py | dbaio/portsfallout | 2512036a9983b833f4ece2a0801541dca4d8d58c | [
"BSD-2-Clause"
] | 6 | 2020-10-11T07:54:50.000Z | 2022-01-25T22:03:18.000Z | scripts/remove_orphans.py | dbaio/portsfallout | 2512036a9983b833f4ece2a0801541dca4d8d58c | [
"BSD-2-Clause"
] | null | null | null | scripts/remove_orphans.py | dbaio/portsfallout | 2512036a9983b833f4ece2a0801541dca4d8d58c | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2020-2021 Danilo G. Baio <dbaio@bsd.com.br>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import requests
import bz2
sys.path.insert(1, r'../')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portsfallout.settings')
import django
django.setup()
from ports.models import Port
def fetch_index():
url = "https://www.FreeBSD.org/ports/INDEX-13.bz2"
r = requests.get(url, allow_redirects=True)
open('INDEX-13.bz2', 'wb').write(r.content)
def populate_set():
Ports = Port.objects.all().values('origin').order_by('origin')
sPorts = set()
for port in Ports:
sPorts.add(port['origin'])
return sPorts
def read_index(sPorts):
with bz2.open('INDEX-13.bz2', mode='rt') as index_file:
for row in index_file:
row_list = row.split("|")
p_origin = row_list[1].replace("/usr/ports/", "")
if p_origin in sPorts:
sPorts.remove(p_origin)
return sPorts
def remove_orphans(sPortsOrp):
for port in sPortsOrp:
print('Removing {}'.format(port))
Port.objects.filter(origin=port).delete()
if __name__ == "__main__":
fetch_index()
sPorts = populate_set()
sPortsOrp = read_index(sPorts)
remove_orphans(sPortsOrp)
| 32.986842 | 81 | 0.723574 |
import os
import sys
import requests
import bz2
sys.path.insert(1, r'../')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portsfallout.settings')
import django
django.setup()
from ports.models import Port
def fetch_index():
url = "https://www.FreeBSD.org/ports/INDEX-13.bz2"
r = requests.get(url, allow_redirects=True)
open('INDEX-13.bz2', 'wb').write(r.content)
def populate_set():
Ports = Port.objects.all().values('origin').order_by('origin')
sPorts = set()
for port in Ports:
sPorts.add(port['origin'])
return sPorts
def read_index(sPorts):
with bz2.open('INDEX-13.bz2', mode='rt') as index_file:
for row in index_file:
row_list = row.split("|")
p_origin = row_list[1].replace("/usr/ports/", "")
if p_origin in sPorts:
sPorts.remove(p_origin)
return sPorts
def remove_orphans(sPortsOrp):
for port in sPortsOrp:
print('Removing {}'.format(port))
Port.objects.filter(origin=port).delete()
if __name__ == "__main__":
fetch_index()
sPorts = populate_set()
sPortsOrp = read_index(sPorts)
remove_orphans(sPortsOrp)
| true | true |
f71b510ba2e775050928e7d131af51fdd10e9af6 | 4,481 | py | Python | scripts/mergemessages.py | burakozdemir32/django-internationalflavor | 4c5d29519050c929a608d2054c14faa44ee273c9 | [
"BSD-3-Clause"
] | null | null | null | scripts/mergemessages.py | burakozdemir32/django-internationalflavor | 4c5d29519050c929a608d2054c14faa44ee273c9 | [
"BSD-3-Clause"
] | null | null | null | scripts/mergemessages.py | burakozdemir32/django-internationalflavor | 4c5d29519050c929a608d2054c14faa44ee273c9 | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
import polib
import django
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
# This is almost a management command, but we do not want it to be added to the django-admin namespace for the simple
# reason that it is not expected to be executed by package users, only by the package maintainers.
# We use a thin __main__ wrapper to make it work (ish) like a management command.
MODULE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'internationalflavor')
LOCALE_PATH = os.path.join(MODULE_PATH, 'locale')
def mark_entry(entry):
if 'fuzzy' in entry.flags:
entry.flags.remove('fuzzy')
entry.comment = "auto-generated from CLDR -- see docs before updating"
class Command(BaseCommand):
help = 'Updates messages in the PO file with messages from the CLDR'
def handle(self, *args, **options):
translation.deactivate_all()
if options['l']:
languages = (options['l'], dict(settings.LANGUAGES)[options['l']]),
else:
languages = settings.LANGUAGES
for lc, language in languages:
try:
self.stdout.write("Parsing language %s [%s]" % (language, lc))
# Get some files ready
# The pofile is our combined file
pofile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django.po'))
# The cldrfile contain only messages from CLDR
cldrfile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'cldr.po'))
# The djangofile will only contain messages not from CLDR
try:
djangofile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django_only.po'))
except IOError:
djangofile = polib.POFile()
djangofile.metadata = pofile.metadata
djangofile.header = pofile.header
# Merge all non-django messages to the djangofile
django_only_messages = polib.POFile()
for entry in pofile:
if cldrfile.find(entry.msgid) is None and not entry.obsolete and not 'fuzzy' in entry.flags:
django_only_messages.append(entry)
djangofile.merge(django_only_messages)
djangofile.save(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django_only.po'))
# Add all entries from the CLDR file to the combined file
for entry in cldrfile:
e = pofile.find(entry.msgid)
if e is None:
e = polib.POEntry()
e.msgid = entry.msgid
pofile.append(e)
elif 'manual' in e.tcomment.lower():
self.stdout.write("-- Skipping %s of %s" % (e.msgid, language))
continue
e.obsolete = False
e.msgstr = entry.msgstr
e.comment = entry.comment
if 'fuzzy' in e.flags:
e.flags.remove('fuzzy')
# Add entries from the Django file to the combined file
for entry in djangofile:
e = pofile.find(entry.msgid)
# If not in main file, then skip
if e is None:
continue
e.obsolete = entry.obsolete
e.msgstr = entry.msgstr
e.comment = entry.comment
e.flags = entry.flags
# We copy over the header and metadata from the djangofile.
pofile.metadata = djangofile.metadata
pofile.header = djangofile.header
pofile.save()
pofile.save_as_mofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django.mo'))
except IOError as e:
self.stderr.write("Error while handling %s: %s (possibly no valid .po file)" % (language, e))
except Exception as e:
self.stderr.write("Error while handling %s: %s" % (language, e))
def add_arguments(self, parser):
parser.add_argument('-l')
if __name__ == '__main__':
settings.configure()
django.setup()
Command().run_from_argv(["django-admin.py", "mergemessages"] + sys.argv[1:])
| 41.110092 | 117 | 0.567061 | import os
import sys
import polib
import django
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
MODULE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'internationalflavor')
LOCALE_PATH = os.path.join(MODULE_PATH, 'locale')
def mark_entry(entry):
if 'fuzzy' in entry.flags:
entry.flags.remove('fuzzy')
entry.comment = "auto-generated from CLDR -- see docs before updating"
class Command(BaseCommand):
help = 'Updates messages in the PO file with messages from the CLDR'
def handle(self, *args, **options):
translation.deactivate_all()
if options['l']:
languages = (options['l'], dict(settings.LANGUAGES)[options['l']]),
else:
languages = settings.LANGUAGES
for lc, language in languages:
try:
self.stdout.write("Parsing language %s [%s]" % (language, lc))
pofile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django.po'))
cldrfile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'cldr.po'))
try:
djangofile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django_only.po'))
except IOError:
djangofile = polib.POFile()
djangofile.metadata = pofile.metadata
djangofile.header = pofile.header
django_only_messages = polib.POFile()
for entry in pofile:
if cldrfile.find(entry.msgid) is None and not entry.obsolete and not 'fuzzy' in entry.flags:
django_only_messages.append(entry)
djangofile.merge(django_only_messages)
djangofile.save(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django_only.po'))
for entry in cldrfile:
e = pofile.find(entry.msgid)
if e is None:
e = polib.POEntry()
e.msgid = entry.msgid
pofile.append(e)
elif 'manual' in e.tcomment.lower():
self.stdout.write("-- Skipping %s of %s" % (e.msgid, language))
continue
e.obsolete = False
e.msgstr = entry.msgstr
e.comment = entry.comment
if 'fuzzy' in e.flags:
e.flags.remove('fuzzy')
for entry in djangofile:
e = pofile.find(entry.msgid)
if e is None:
continue
e.obsolete = entry.obsolete
e.msgstr = entry.msgstr
e.comment = entry.comment
e.flags = entry.flags
pofile.metadata = djangofile.metadata
pofile.header = djangofile.header
pofile.save()
pofile.save_as_mofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django.mo'))
except IOError as e:
self.stderr.write("Error while handling %s: %s (possibly no valid .po file)" % (language, e))
except Exception as e:
self.stderr.write("Error while handling %s: %s" % (language, e))
def add_arguments(self, parser):
parser.add_argument('-l')
if __name__ == '__main__':
settings.configure()
django.setup()
Command().run_from_argv(["django-admin.py", "mergemessages"] + sys.argv[1:])
| true | true |
f71b518086dd6b504569820b3ca2a1c860242389 | 213 | py | Python | wadeem/wadeem/doctype/coordinators/test_coordinators.py | siddhantsinha-oodles/Wadeem-app | eee05dead7ccee8878cf3630d3cdf32adb155c7f | [
"MIT"
] | null | null | null | wadeem/wadeem/doctype/coordinators/test_coordinators.py | siddhantsinha-oodles/Wadeem-app | eee05dead7ccee8878cf3630d3cdf32adb155c7f | [
"MIT"
] | null | null | null | wadeem/wadeem/doctype/coordinators/test_coordinators.py | siddhantsinha-oodles/Wadeem-app | eee05dead7ccee8878cf3630d3cdf32adb155c7f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Siddhant and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestCoordinators(unittest.TestCase):
pass
| 19.363636 | 47 | 0.765258 |
from __future__ import unicode_literals
import unittest
class TestCoordinators(unittest.TestCase):
pass
| true | true |
f71b52d065d31336559f6dfb1eeccf44823e1409 | 2,392 | py | Python | auto_nag/tests/test_agents.py | Mozilla-GitHub-Standards/f9c78643f5862cda82001d4471255ac29ef0c6b2c6171e2c1cbecab3d2fef4dd | 28d999fcba9ad47d1dd0b2222880b71726ddd47c | [
"BSD-3-Clause"
] | null | null | null | auto_nag/tests/test_agents.py | Mozilla-GitHub-Standards/f9c78643f5862cda82001d4471255ac29ef0c6b2c6171e2c1cbecab3d2fef4dd | 28d999fcba9ad47d1dd0b2222880b71726ddd47c | [
"BSD-3-Clause"
] | null | null | null | auto_nag/tests/test_agents.py | Mozilla-GitHub-Standards/f9c78643f5862cda82001d4471255ac29ef0c6b2c6171e2c1cbecab3d2fef4dd | 28d999fcba9ad47d1dd0b2222880b71726ddd47c | [
"BSD-3-Clause"
] | null | null | null |
try:
# If relman-auto-nag is installed
from bugzilla.agents import BMOAgent
from bugzilla.utils import os
from bugzilla.utils import get_config_path
except:
# If relman-auto-nag not installed, add project root directory into
# PYTHONPATH
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from bugzilla.agents import BMOAgent
from bugzilla.utils import os
from bugzilla.utils import get_config_path
import json
from nose.tools import *
class TestAgent:
# Test bugzilla agent methods
def test_get_bug_list(self):
# Set whatever REST API options we want
options = {
'chfieldfrom': ['2012-12-24'],
'chfieldto': ['2012-12-27'],
'chfield': ['bug_status'],
'chfieldvalue': ['RESOLVED'],
'product': ['Firefox'],
'resolution': ['FIXED'],
'include_fields': ['attachments'],
}
# Load our agent for BMO
bmo = BMOAgent()
# Get the bugs from the api
buglist = bmo.get_bug_list(options)
assert buglist != []
def test_get_bug(self):
# Load our agent for BMO
bmo = BMOAgent()
# Get the bugs from the api
bug = bmo.get_bug(656222)
assert bug != []
@raises(Exception)
def test_get_bug_list_wrng_api_k(self):
""" Wrong API Key, it should raise an Error"""
# Set whatever REST API options we want
options = {
'chfieldfrom': ['2012-12-24'],
'chfieldto': ['2012-12-27'],
'chfield': ['bug_status'],
'chfieldvalue': ['RESOLVED'],
'product': ['Firefox'],
'resolution': ['FIXED'],
'include_fields': ['attachments'],
}
# Load our agent for BMO
bmo = BMOAgent('wrong_api_key_test')
# Get the bugs from the api
bmo.get_bug_list(options)
@raises(Exception)
def test_get_bug_wrng_api_k(self):
""" Wrong API Key, it should raise an Error"""
# Load our agent for BMO
bmo = BMOAgent('wrong_api_key_test')
# Get the bugs from the api
bug = bmo.get_bug(656222)
print bug
| 30.278481 | 74 | 0.581522 |
try:
from bugzilla.agents import BMOAgent
from bugzilla.utils import os
from bugzilla.utils import get_config_path
except:
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from bugzilla.agents import BMOAgent
from bugzilla.utils import os
from bugzilla.utils import get_config_path
import json
from nose.tools import *
class TestAgent:
def test_get_bug_list(self):
options = {
'chfieldfrom': ['2012-12-24'],
'chfieldto': ['2012-12-27'],
'chfield': ['bug_status'],
'chfieldvalue': ['RESOLVED'],
'product': ['Firefox'],
'resolution': ['FIXED'],
'include_fields': ['attachments'],
}
bmo = BMOAgent()
buglist = bmo.get_bug_list(options)
assert buglist != []
def test_get_bug(self):
bmo = BMOAgent()
bug = bmo.get_bug(656222)
assert bug != []
@raises(Exception)
def test_get_bug_list_wrng_api_k(self):
""" Wrong API Key, it should raise an Error"""
options = {
'chfieldfrom': ['2012-12-24'],
'chfieldto': ['2012-12-27'],
'chfield': ['bug_status'],
'chfieldvalue': ['RESOLVED'],
'product': ['Firefox'],
'resolution': ['FIXED'],
'include_fields': ['attachments'],
}
bmo = BMOAgent('wrong_api_key_test')
bmo.get_bug_list(options)
@raises(Exception)
def test_get_bug_wrng_api_k(self):
""" Wrong API Key, it should raise an Error"""
bmo = BMOAgent('wrong_api_key_test')
bug = bmo.get_bug(656222)
print bug
| false | true |
f71b5347247e075de1cdb3afefef4dff57d2b7aa | 802 | py | Python | multiqc_npm/modules/npm_calculate_callability.py | c-BIG/MultiQC_NPM | c8dbf15c2b2ce03f2932db0bb50d49dcab865d75 | [
"MIT"
] | null | null | null | multiqc_npm/modules/npm_calculate_callability.py | c-BIG/MultiQC_NPM | c8dbf15c2b2ce03f2932db0bb50d49dcab865d75 | [
"MIT"
] | null | null | null | multiqc_npm/modules/npm_calculate_callability.py | c-BIG/MultiQC_NPM | c8dbf15c2b2ce03f2932db0bb50d49dcab865d75 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Parser for calculate_callability.py
"""
import logging
import json
from multiqc.utils import report
log = logging.getLogger(__name__)
def parse_reports(self):
# Set up vars
self.calculate_callability = dict()
# Collect metrics
for f in self.find_log_files('multiqc_npm/calculate_callability'):
parsed_data = json.loads(f["f"])
# Save results
s_name = f["s_name"]
self.calculate_callability[s_name] = parsed_data
# Write results
if len(self.calculate_callability) > 0:
# Write parsed data to a file
self.write_data_file(self.calculate_callability, 'multiqc_npm_calculate_callability')
# Return the number of detected samples to the parent module
return len(self.calculate_callability)
| 22.914286 | 93 | 0.706983 |
import logging
import json
from multiqc.utils import report
log = logging.getLogger(__name__)
def parse_reports(self):
self.calculate_callability = dict()
for f in self.find_log_files('multiqc_npm/calculate_callability'):
parsed_data = json.loads(f["f"])
s_name = f["s_name"]
self.calculate_callability[s_name] = parsed_data
if len(self.calculate_callability) > 0:
self.write_data_file(self.calculate_callability, 'multiqc_npm_calculate_callability')
return len(self.calculate_callability)
| true | true |
f71b54183262fd12198dc0255f4b1059568df90a | 14,252 | py | Python | airflow/models/connection.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | null | null | null | airflow/models/connection.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | 1 | 2019-05-14T14:32:40.000Z | 2019-05-14T14:32:40.000Z | airflow/models/connection.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from urllib.parse import parse_qsl, quote, unquote, urlencode, urlparse
from sqlalchemy import Boolean, Column, Integer, String
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import synonym
from airflow.exceptions import AirflowException
from airflow.models.base import ID_LEN, Base
from airflow.models.crypto import get_fernet
from airflow.utils.log.logging_mixin import LoggingMixin
# Python automatically converts all letters to lowercase in hostname
# See: https://issues.apache.org/jira/browse/AIRFLOW-3615
def parse_netloc_to_hostname(uri_parts):
hostname = unquote(uri_parts.hostname or '')
if '/' in hostname:
hostname = uri_parts.netloc
if "@" in hostname:
hostname = hostname.rsplit("@", 1)[1]
if ":" in hostname:
hostname = hostname.split(":", 1)[0]
hostname = unquote(hostname)
return hostname
class Connection(Base, LoggingMixin):
"""
Placeholder to store information about different database instances
connection information. The idea here is that scripts use references to
database instances (conn_id) instead of hard coding hostname, logins and
passwords when using operators or hooks.
"""
__tablename__ = "connection"
id = Column(Integer(), primary_key=True)
conn_id = Column(String(ID_LEN))
conn_type = Column(String(500))
host = Column(String(500))
schema = Column(String(500))
login = Column(String(500))
_password = Column('password', String(5000))
port = Column(Integer())
is_encrypted = Column(Boolean, unique=False, default=False)
is_extra_encrypted = Column(Boolean, unique=False, default=False)
_extra = Column('extra', String(5000))
_types = [
('docker', 'Docker Registry'),
('fs', 'File (path)'),
('ftp', 'FTP'),
('google_cloud_platform', 'Google Cloud Platform'),
('hdfs', 'HDFS'),
('http', 'HTTP'),
('pig_cli', 'Pig Client Wrapper'),
('hive_cli', 'Hive Client Wrapper'),
('hive_metastore', 'Hive Metastore Thrift'),
('hiveserver2', 'Hive Server 2 Thrift'),
('jdbc', 'JDBC Connection'),
('odbc', 'ODBC Connection'),
('jenkins', 'Jenkins'),
('mysql', 'MySQL'),
('postgres', 'Postgres'),
('oracle', 'Oracle'),
('vertica', 'Vertica'),
('presto', 'Presto'),
('s3', 'S3'),
('samba', 'Samba'),
('sqlite', 'Sqlite'),
('ssh', 'SSH'),
('cloudant', 'IBM Cloudant'),
('mssql', 'Microsoft SQL Server'),
('mesos_framework-id', 'Mesos Framework ID'),
('jira', 'JIRA'),
('redis', 'Redis'),
('wasb', 'Azure Blob Storage'),
('databricks', 'Databricks'),
('aws', 'Amazon Web Services'),
('emr', 'Elastic MapReduce'),
('snowflake', 'Snowflake'),
('segment', 'Segment'),
('sqoop', 'Sqoop'),
('azure_data_lake', 'Azure Data Lake'),
('azure_container_instances', 'Azure Container Instances'),
('azure_cosmos', 'Azure CosmosDB'),
('cassandra', 'Cassandra'),
('qubole', 'Qubole'),
('mongo', 'MongoDB'),
('gcpcloudsql', 'Google Cloud SQL'),
('grpc', 'GRPC Connection'),
]
def __init__(
self, conn_id=None, conn_type=None,
host=None, login=None, password=None,
schema=None, port=None, extra=None,
uri=None):
self.conn_id = conn_id
if uri:
self.parse_from_uri(uri)
else:
self.conn_type = conn_type
self.host = host
self.login = login
self.password = password
self.schema = schema
self.port = port
self.extra = extra
def parse_from_uri(self, uri):
uri_parts = urlparse(uri)
conn_type = uri_parts.scheme
if conn_type == 'postgresql':
conn_type = 'postgres'
elif '-' in conn_type:
conn_type = conn_type.replace('-', '_')
self.conn_type = conn_type
self.host = parse_netloc_to_hostname(uri_parts)
quoted_schema = uri_parts.path[1:]
self.schema = unquote(quoted_schema) if quoted_schema else quoted_schema
self.login = unquote(uri_parts.username) \
if uri_parts.username else uri_parts.username
self.password = unquote(uri_parts.password) \
if uri_parts.password else uri_parts.password
self.port = uri_parts.port
if uri_parts.query:
self.extra = json.dumps(dict(parse_qsl(uri_parts.query, keep_blank_values=True)))
def get_uri(self) -> str:
uri = '{}://'.format(str(self.conn_type).lower().replace('_', '-'))
authority_block = ''
if self.login is not None:
authority_block += quote(self.login, safe='')
if self.password is not None:
authority_block += ':' + quote(self.password, safe='')
if authority_block > '':
authority_block += '@'
uri += authority_block
host_block = ''
if self.host:
host_block += quote(self.host, safe='')
if self.port:
if host_block > '':
host_block += ':{}'.format(self.port)
else:
host_block += '@:{}'.format(self.port)
if self.schema:
host_block += '/{}'.format(quote(self.schema, safe=''))
uri += host_block
if self.extra_dejson:
uri += '?{}'.format(urlencode(self.extra_dejson))
return uri
def get_password(self):
if self._password and self.is_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
"Can't decrypt encrypted password for login={}, \
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._password, 'utf-8')).decode()
else:
return self._password
def set_password(self, value):
if value:
fernet = get_fernet()
self._password = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = fernet.is_encrypted
@declared_attr
def password(cls):
return synonym('_password',
descriptor=property(cls.get_password, cls.set_password))
def get_extra(self):
if self._extra and self.is_extra_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
"Can't decrypt `extra` params for login={},\
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._extra, 'utf-8')).decode()
else:
return self._extra
def set_extra(self, value):
if value:
fernet = get_fernet()
self._extra = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_extra_encrypted = fernet.is_encrypted
else:
self._extra = value
self.is_extra_encrypted = False
@declared_attr
def extra(cls):
return synonym('_extra',
descriptor=property(cls.get_extra, cls.set_extra))
def rotate_fernet_key(self):
fernet = get_fernet()
if self._password and self.is_encrypted:
self._password = fernet.rotate(self._password.encode('utf-8')).decode()
if self._extra and self.is_extra_encrypted:
self._extra = fernet.rotate(self._extra.encode('utf-8')).decode()
def get_hook(self):
if self.conn_type == 'mysql':
from airflow.providers.mysql.hooks.mysql import MySqlHook
return MySqlHook(mysql_conn_id=self.conn_id)
elif self.conn_type == 'google_cloud_platform':
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
return BigQueryHook(bigquery_conn_id=self.conn_id)
elif self.conn_type == 'postgres':
from airflow.providers.postgres.hooks.postgres import PostgresHook
return PostgresHook(postgres_conn_id=self.conn_id)
elif self.conn_type == 'pig_cli':
from airflow.providers.apache.pig.hooks.pig import PigCliHook
return PigCliHook(pig_cli_conn_id=self.conn_id)
elif self.conn_type == 'hive_cli':
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
return HiveCliHook(hive_cli_conn_id=self.conn_id)
elif self.conn_type == 'presto':
from airflow.providers.presto.hooks.presto import PrestoHook
return PrestoHook(presto_conn_id=self.conn_id)
elif self.conn_type == 'hiveserver2':
from airflow.providers.apache.hive.hooks.hive import HiveServer2Hook
return HiveServer2Hook(hiveserver2_conn_id=self.conn_id)
elif self.conn_type == 'sqlite':
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
return SqliteHook(sqlite_conn_id=self.conn_id)
elif self.conn_type == 'jdbc':
from airflow.providers.jdbc.hooks.jdbc import JdbcHook
return JdbcHook(jdbc_conn_id=self.conn_id)
elif self.conn_type == 'mssql':
from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook
return MsSqlHook(mssql_conn_id=self.conn_id)
elif self.conn_type == 'odbc':
from airflow.providers.odbc.hooks.odbc import OdbcHook
return OdbcHook(odbc_conn_id=self.conn_id)
elif self.conn_type == 'oracle':
from airflow.providers.oracle.hooks.oracle import OracleHook
return OracleHook(oracle_conn_id=self.conn_id)
elif self.conn_type == 'vertica':
from airflow.providers.vertica.hooks.vertica import VerticaHook
return VerticaHook(vertica_conn_id=self.conn_id)
elif self.conn_type == 'cloudant':
from airflow.providers.cloudant.hooks.cloudant import CloudantHook
return CloudantHook(cloudant_conn_id=self.conn_id)
elif self.conn_type == 'jira':
from airflow.providers.jira.hooks.jira import JiraHook
return JiraHook(jira_conn_id=self.conn_id)
elif self.conn_type == 'redis':
from airflow.providers.redis.hooks.redis import RedisHook
return RedisHook(redis_conn_id=self.conn_id)
elif self.conn_type == 'wasb':
from airflow.providers.microsoft.azure.hooks.wasb import WasbHook
return WasbHook(wasb_conn_id=self.conn_id)
elif self.conn_type == 'docker':
from airflow.providers.docker.hooks.docker import DockerHook
return DockerHook(docker_conn_id=self.conn_id)
elif self.conn_type == 'azure_data_lake':
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
return AzureDataLakeHook(azure_data_lake_conn_id=self.conn_id)
elif self.conn_type == 'azure_cosmos':
from airflow.providers.microsoft.azure.hooks.azure_cosmos import AzureCosmosDBHook
return AzureCosmosDBHook(azure_cosmos_conn_id=self.conn_id)
elif self.conn_type == 'cassandra':
from airflow.providers.apache.cassandra.hooks.cassandra import CassandraHook
return CassandraHook(cassandra_conn_id=self.conn_id)
elif self.conn_type == 'mongo':
from airflow.providers.mongo.hooks.mongo import MongoHook
return MongoHook(conn_id=self.conn_id)
elif self.conn_type == 'gcpcloudsql':
from airflow.providers.google.cloud.hooks.cloud_sql import CloudSQLDatabaseHook
return CloudSQLDatabaseHook(gcp_cloudsql_conn_id=self.conn_id)
elif self.conn_type == 'grpc':
from airflow.providers.grpc.hooks.grpc import GrpcHook
return GrpcHook(grpc_conn_id=self.conn_id)
raise AirflowException("Unknown hook type {}".format(self.conn_type))
def __repr__(self):
return self.conn_id
def log_info(self):
return ("id: {}. Host: {}, Port: {}, Schema: {}, "
"Login: {}, Password: {}, extra: {}".
format(self.conn_id,
self.host,
self.port,
self.schema,
self.login,
"XXXXXXXX" if self.password else None,
"XXXXXXXX" if self.extra_dejson else None))
def debug_info(self):
return ("id: {}. Host: {}, Port: {}, Schema: {}, "
"Login: {}, Password: {}, extra: {}".
format(self.conn_id,
self.host,
self.port,
self.schema,
self.login,
"XXXXXXXX" if self.password else None,
self.extra_dejson))
@property
def extra_dejson(self):
"""Returns the extra property by deserializing json."""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except Exception as e:
self.log.exception(e)
self.log.error("Failed parsing the json for conn_id %s", self.conn_id)
return obj
| 40.72 | 97 | 0.613388 |
import json
from urllib.parse import parse_qsl, quote, unquote, urlencode, urlparse
from sqlalchemy import Boolean, Column, Integer, String
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import synonym
from airflow.exceptions import AirflowException
from airflow.models.base import ID_LEN, Base
from airflow.models.crypto import get_fernet
from airflow.utils.log.logging_mixin import LoggingMixin
def parse_netloc_to_hostname(uri_parts):
hostname = unquote(uri_parts.hostname or '')
if '/' in hostname:
hostname = uri_parts.netloc
if "@" in hostname:
hostname = hostname.rsplit("@", 1)[1]
if ":" in hostname:
hostname = hostname.split(":", 1)[0]
hostname = unquote(hostname)
return hostname
class Connection(Base, LoggingMixin):
__tablename__ = "connection"
id = Column(Integer(), primary_key=True)
conn_id = Column(String(ID_LEN))
conn_type = Column(String(500))
host = Column(String(500))
schema = Column(String(500))
login = Column(String(500))
_password = Column('password', String(5000))
port = Column(Integer())
is_encrypted = Column(Boolean, unique=False, default=False)
is_extra_encrypted = Column(Boolean, unique=False, default=False)
_extra = Column('extra', String(5000))
_types = [
('docker', 'Docker Registry'),
('fs', 'File (path)'),
('ftp', 'FTP'),
('google_cloud_platform', 'Google Cloud Platform'),
('hdfs', 'HDFS'),
('http', 'HTTP'),
('pig_cli', 'Pig Client Wrapper'),
('hive_cli', 'Hive Client Wrapper'),
('hive_metastore', 'Hive Metastore Thrift'),
('hiveserver2', 'Hive Server 2 Thrift'),
('jdbc', 'JDBC Connection'),
('odbc', 'ODBC Connection'),
('jenkins', 'Jenkins'),
('mysql', 'MySQL'),
('postgres', 'Postgres'),
('oracle', 'Oracle'),
('vertica', 'Vertica'),
('presto', 'Presto'),
('s3', 'S3'),
('samba', 'Samba'),
('sqlite', 'Sqlite'),
('ssh', 'SSH'),
('cloudant', 'IBM Cloudant'),
('mssql', 'Microsoft SQL Server'),
('mesos_framework-id', 'Mesos Framework ID'),
('jira', 'JIRA'),
('redis', 'Redis'),
('wasb', 'Azure Blob Storage'),
('databricks', 'Databricks'),
('aws', 'Amazon Web Services'),
('emr', 'Elastic MapReduce'),
('snowflake', 'Snowflake'),
('segment', 'Segment'),
('sqoop', 'Sqoop'),
('azure_data_lake', 'Azure Data Lake'),
('azure_container_instances', 'Azure Container Instances'),
('azure_cosmos', 'Azure CosmosDB'),
('cassandra', 'Cassandra'),
('qubole', 'Qubole'),
('mongo', 'MongoDB'),
('gcpcloudsql', 'Google Cloud SQL'),
('grpc', 'GRPC Connection'),
]
def __init__(
self, conn_id=None, conn_type=None,
host=None, login=None, password=None,
schema=None, port=None, extra=None,
uri=None):
self.conn_id = conn_id
if uri:
self.parse_from_uri(uri)
else:
self.conn_type = conn_type
self.host = host
self.login = login
self.password = password
self.schema = schema
self.port = port
self.extra = extra
def parse_from_uri(self, uri):
uri_parts = urlparse(uri)
conn_type = uri_parts.scheme
if conn_type == 'postgresql':
conn_type = 'postgres'
elif '-' in conn_type:
conn_type = conn_type.replace('-', '_')
self.conn_type = conn_type
self.host = parse_netloc_to_hostname(uri_parts)
quoted_schema = uri_parts.path[1:]
self.schema = unquote(quoted_schema) if quoted_schema else quoted_schema
self.login = unquote(uri_parts.username) \
if uri_parts.username else uri_parts.username
self.password = unquote(uri_parts.password) \
if uri_parts.password else uri_parts.password
self.port = uri_parts.port
if uri_parts.query:
self.extra = json.dumps(dict(parse_qsl(uri_parts.query, keep_blank_values=True)))
def get_uri(self) -> str:
uri = '{}://'.format(str(self.conn_type).lower().replace('_', '-'))
authority_block = ''
if self.login is not None:
authority_block += quote(self.login, safe='')
if self.password is not None:
authority_block += ':' + quote(self.password, safe='')
if authority_block > '':
authority_block += '@'
uri += authority_block
host_block = ''
if self.host:
host_block += quote(self.host, safe='')
if self.port:
if host_block > '':
host_block += ':{}'.format(self.port)
else:
host_block += '@:{}'.format(self.port)
if self.schema:
host_block += '/{}'.format(quote(self.schema, safe=''))
uri += host_block
if self.extra_dejson:
uri += '?{}'.format(urlencode(self.extra_dejson))
return uri
def get_password(self):
if self._password and self.is_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
"Can't decrypt encrypted password for login={}, \
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._password, 'utf-8')).decode()
else:
return self._password
def set_password(self, value):
if value:
fernet = get_fernet()
self._password = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = fernet.is_encrypted
@declared_attr
def password(cls):
return synonym('_password',
descriptor=property(cls.get_password, cls.set_password))
def get_extra(self):
if self._extra and self.is_extra_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
"Can't decrypt `extra` params for login={},\
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._extra, 'utf-8')).decode()
else:
return self._extra
def set_extra(self, value):
if value:
fernet = get_fernet()
self._extra = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_extra_encrypted = fernet.is_encrypted
else:
self._extra = value
self.is_extra_encrypted = False
@declared_attr
def extra(cls):
return synonym('_extra',
descriptor=property(cls.get_extra, cls.set_extra))
def rotate_fernet_key(self):
fernet = get_fernet()
if self._password and self.is_encrypted:
self._password = fernet.rotate(self._password.encode('utf-8')).decode()
if self._extra and self.is_extra_encrypted:
self._extra = fernet.rotate(self._extra.encode('utf-8')).decode()
def get_hook(self):
if self.conn_type == 'mysql':
from airflow.providers.mysql.hooks.mysql import MySqlHook
return MySqlHook(mysql_conn_id=self.conn_id)
elif self.conn_type == 'google_cloud_platform':
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
return BigQueryHook(bigquery_conn_id=self.conn_id)
elif self.conn_type == 'postgres':
from airflow.providers.postgres.hooks.postgres import PostgresHook
return PostgresHook(postgres_conn_id=self.conn_id)
elif self.conn_type == 'pig_cli':
from airflow.providers.apache.pig.hooks.pig import PigCliHook
return PigCliHook(pig_cli_conn_id=self.conn_id)
elif self.conn_type == 'hive_cli':
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
return HiveCliHook(hive_cli_conn_id=self.conn_id)
elif self.conn_type == 'presto':
from airflow.providers.presto.hooks.presto import PrestoHook
return PrestoHook(presto_conn_id=self.conn_id)
elif self.conn_type == 'hiveserver2':
from airflow.providers.apache.hive.hooks.hive import HiveServer2Hook
return HiveServer2Hook(hiveserver2_conn_id=self.conn_id)
elif self.conn_type == 'sqlite':
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
return SqliteHook(sqlite_conn_id=self.conn_id)
elif self.conn_type == 'jdbc':
from airflow.providers.jdbc.hooks.jdbc import JdbcHook
return JdbcHook(jdbc_conn_id=self.conn_id)
elif self.conn_type == 'mssql':
from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook
return MsSqlHook(mssql_conn_id=self.conn_id)
elif self.conn_type == 'odbc':
from airflow.providers.odbc.hooks.odbc import OdbcHook
return OdbcHook(odbc_conn_id=self.conn_id)
elif self.conn_type == 'oracle':
from airflow.providers.oracle.hooks.oracle import OracleHook
return OracleHook(oracle_conn_id=self.conn_id)
elif self.conn_type == 'vertica':
from airflow.providers.vertica.hooks.vertica import VerticaHook
return VerticaHook(vertica_conn_id=self.conn_id)
elif self.conn_type == 'cloudant':
from airflow.providers.cloudant.hooks.cloudant import CloudantHook
return CloudantHook(cloudant_conn_id=self.conn_id)
elif self.conn_type == 'jira':
from airflow.providers.jira.hooks.jira import JiraHook
return JiraHook(jira_conn_id=self.conn_id)
elif self.conn_type == 'redis':
from airflow.providers.redis.hooks.redis import RedisHook
return RedisHook(redis_conn_id=self.conn_id)
elif self.conn_type == 'wasb':
from airflow.providers.microsoft.azure.hooks.wasb import WasbHook
return WasbHook(wasb_conn_id=self.conn_id)
elif self.conn_type == 'docker':
from airflow.providers.docker.hooks.docker import DockerHook
return DockerHook(docker_conn_id=self.conn_id)
elif self.conn_type == 'azure_data_lake':
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
return AzureDataLakeHook(azure_data_lake_conn_id=self.conn_id)
elif self.conn_type == 'azure_cosmos':
from airflow.providers.microsoft.azure.hooks.azure_cosmos import AzureCosmosDBHook
return AzureCosmosDBHook(azure_cosmos_conn_id=self.conn_id)
elif self.conn_type == 'cassandra':
from airflow.providers.apache.cassandra.hooks.cassandra import CassandraHook
return CassandraHook(cassandra_conn_id=self.conn_id)
elif self.conn_type == 'mongo':
from airflow.providers.mongo.hooks.mongo import MongoHook
return MongoHook(conn_id=self.conn_id)
elif self.conn_type == 'gcpcloudsql':
from airflow.providers.google.cloud.hooks.cloud_sql import CloudSQLDatabaseHook
return CloudSQLDatabaseHook(gcp_cloudsql_conn_id=self.conn_id)
elif self.conn_type == 'grpc':
from airflow.providers.grpc.hooks.grpc import GrpcHook
return GrpcHook(grpc_conn_id=self.conn_id)
raise AirflowException("Unknown hook type {}".format(self.conn_type))
def __repr__(self):
return self.conn_id
def log_info(self):
return ("id: {}. Host: {}, Port: {}, Schema: {}, "
"Login: {}, Password: {}, extra: {}".
format(self.conn_id,
self.host,
self.port,
self.schema,
self.login,
"XXXXXXXX" if self.password else None,
"XXXXXXXX" if self.extra_dejson else None))
def debug_info(self):
return ("id: {}. Host: {}, Port: {}, Schema: {}, "
"Login: {}, Password: {}, extra: {}".
format(self.conn_id,
self.host,
self.port,
self.schema,
self.login,
"XXXXXXXX" if self.password else None,
self.extra_dejson))
@property
def extra_dejson(self):
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except Exception as e:
self.log.exception(e)
self.log.error("Failed parsing the json for conn_id %s", self.conn_id)
return obj
| true | true |
f71b5426abe6a21a2aafe3e9e5c4721319783d05 | 5,751 | py | Python | original_author_notes/yolo_video.py | adam-blinzler/simple-lane-detection | 8814e0aaf7ac56b7e5be59634e363ca17839effb | [
"MIT"
] | null | null | null | original_author_notes/yolo_video.py | adam-blinzler/simple-lane-detection | 8814e0aaf7ac56b7e5be59634e363ca17839effb | [
"MIT"
] | null | null | null | original_author_notes/yolo_video.py | adam-blinzler/simple-lane-detection | 8814e0aaf7ac56b7e5be59634e363ca17839effb | [
"MIT"
] | null | null | null | # USAGE
# python yolo_video.py --input videos/airport.mp4 --output output/airport_output.avi --object_detection object_detection-coco
# import the necessary packages
import numpy as np
import argparse
import imutils
import time
import cv2
import os
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True,
help="path to input video")
ap.add_argument("-o", "--output", required=True,
help="path to output video")
ap.add_argument("-y", "--object_detection", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3,
help="threshold when applyong non-maxima suppression")
args = vars(ap.parse_args())
# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([args["object_detection"], "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["object_detection"], "yolov3.weights"])
configPath = os.path.sep.join([args["object_detection"], "yolov3.cfg"])
# load our YOLO object detector trained on COCO dataset (80 classes)
# and determine only the *output* layer names that we need from YOLO
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# initialize the video stream, pointer to output video file, and
# frame dimensions
vs = cv2.VideoCapture(args["input"])
writer = None
(W, H) = (None, None)
# try to determine the total number of frames in the video file
try:
prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \
else cv2.CAP_PROP_FRAME_COUNT
total = int(vs.get(prop))
print("[INFO] {} total frames in video".format(total))
# an error occurred while trying to determine the total
# number of frames in the video file
except:
print("[INFO] could not determine # of frames in video")
print("[INFO] no approx. completion time can be provided")
total = -1
# loop over frames from the video file stream
while True:
# read the next frame from the file
(grabbed, frame) = vs.read()
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# if the frame dimensions are empty, grab them
if W is None or H is None:
(H, W) = frame.shape[:2]
# construct a blob from the input frame and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes
# and associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# initialize our lists of detected bounding boxes, confidences,
# and class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability)
# of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to
# the size of the image, keeping in mind that YOLO
# actually returns the center (x, y)-coordinates of
# the bounding box followed by the boxes' width and
# height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates,
# confidences, and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping
# bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the frame
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]],
confidences[i])
cv2.putText(frame, text, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# check if the video writer is None
if writer is None:
# initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
# some information on processing single frame
if total > 0:
elap = (end - start)
print("[INFO] single frame took {:.4f} seconds".format(elap))
print("[INFO] estimated total time to finish: {:.4f}".format(
elap * total))
# write the output frame to disk
writer.write(frame)
# release the file pointers
print("[INFO] cleaning up...")
writer.release()
vs.release() | 34.029586 | 125 | 0.704051 |
import numpy as np
import argparse
import imutils
import time
import cv2
import os
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True,
help="path to input video")
ap.add_argument("-o", "--output", required=True,
help="path to output video")
ap.add_argument("-y", "--object_detection", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3,
help="threshold when applyong non-maxima suppression")
args = vars(ap.parse_args())
labelsPath = os.path.sep.join([args["object_detection"], "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
weightsPath = os.path.sep.join([args["object_detection"], "yolov3.weights"])
configPath = os.path.sep.join([args["object_detection"], "yolov3.cfg"])
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
vs = cv2.VideoCapture(args["input"])
writer = None
(W, H) = (None, None)
try:
prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \
else cv2.CAP_PROP_FRAME_COUNT
total = int(vs.get(prop))
print("[INFO] {} total frames in video".format(total))
except:
print("[INFO] could not determine # of frames in video")
print("[INFO] no approx. completion time can be provided")
total = -1
while True:
(grabbed, frame) = vs.read()
if not grabbed:
break
if W is None or H is None:
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > args["confidence"]:
# height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates,
# confidences, and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping
# bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the frame
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]],
confidences[i])
cv2.putText(frame, text, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# check if the video writer is None
if writer is None:
# initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
# some information on processing single frame
if total > 0:
elap = (end - start)
print("[INFO] single frame took {:.4f} seconds".format(elap))
print("[INFO] estimated total time to finish: {:.4f}".format(
elap * total))
# write the output frame to disk
writer.write(frame)
# release the file pointers
print("[INFO] cleaning up...")
writer.release()
vs.release() | true | true |
f71b5468bb09f935a4b8dd8609a936248498eb63 | 7,498 | py | Python | tessia/server/api/views/auth.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
] | 5 | 2020-06-04T10:20:33.000Z | 2020-10-26T15:09:19.000Z | tessia/server/api/views/auth.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
] | null | null | null | tessia/server/api/views/auth.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016, 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Authentication routines
"""
#
# IMPORTS
#
from base64 import b64decode
from flask import g as flask_global
from flask import request as flask_request
from sqlalchemy.sql import func
from tessia.server import auth
from tessia.server.api.db import API_DB
from tessia.server.api.exceptions import UnauthorizedError
from tessia.server.config import CONF
from tessia.server.db.models import User
from tessia.server.db.models import UserKey
# use the exception directly so that potion custom error handler can catch
# it and convert to a valid json response
from werkzeug.exceptions import BadRequest
#
# CONSTANTS AND DEFINITIONS
#
#
# CODE
#
class _LoginManager:
# holds the login manager object
_manager = None
@classmethod
def get_login_manager(cls):
"""
Return the login manager object, as defined by the auth module.
"""
if cls._manager is None:
cls._manager = auth.get_manager()
return cls._manager
# get_login_manager()
@classmethod
def authenticate_basic(cls, auth_value):
"""
Basic authentication with username and password, validate against the
login manager defined in configured file (usually LDAP)
Args:
auth_value (str): the value part of the Authorization header
form username:password base64 encoded
Raises:
BadRequest: if value is malformed
UnauthorizedError: if credentials are invalid
Returns:
User: instance of User's sqlalchemy model
"""
try:
# http headers are always ascii
user, passwd = b64decode(auth_value).decode(
'ascii').split(':', 1)
except Exception:
raise BadRequest()
case_sensitive = CONF.get_config().get(
'auth', {}).get('case_sensitive', False)
if not case_sensitive:
# logins should be case-insensitive
user = user.lower()
# user authentication with login provider failed: return unauthorized
result = cls.get_login_manager().authenticate(user, passwd)
if result is None:
raise UnauthorizedError()
# find user entry in database
user_entry = User.query.filter_by(login=user).first()
if user_entry is not None:
# update db in case user information has changed
changed = False
if user_entry.name != result['fullname']:
changed = True
user_entry.name = result['fullname']
if user_entry.title != result.get('title', None):
changed = True
user_entry.title = result.get('title', None)
if changed:
API_DB.db.session.add(user_entry)
API_DB.db.session.commit()
return user_entry
allow_auto_create = CONF.get_config().get(
'auth', {}).get('allow_user_auto_create', False)
# auto creation of users not allowed: report unauthorized
if not allow_auto_create:
raise UnauthorizedError(
msg='User authenticated but not registered in database')
# create user in database
new_user = User()
if case_sensitive:
new_user.login = result['login']
else:
# save login as lowercase to avoid duplicates or user having to
# worry about entering the right case
new_user.login = result['login'].lower()
new_user.name = result['fullname']
# job title is optional
new_user.title = result.get('title', None)
new_user.restricted = False
new_user.admin = False
API_DB.db.session.add(new_user)
API_DB.db.session.commit()
return new_user
# authenticate_basic()
@classmethod
def authenticate_key(cls, auth_value):
"""
API key-based authentication
Args:
auth_value (str): the value part of the Authorization header in the
form key_id:key_value
Raises:
BadRequest: if value is malformed
UnauthorizedError: if credentials are invalid
Returns:
User: instance of User's sqlalchemy model
"""
try:
# http headers are always ascii
key_id, key_secret = auth_value.split(':', 1)
except Exception:
raise BadRequest()
key_entry = UserKey.query.filter_by(
key_id=key_id, key_secret=key_secret).first()
if key_entry is None:
raise UnauthorizedError()
key_entry.last_used = func.now()
API_DB.db.session.add(key_entry)
API_DB.db.session.commit()
return key_entry.user_rel
# authenticate_key()
# _LoginManager
def authorize(decorated_view):
"""
A decorator view which implements authorization routine.
Args:
decorated_view (method): the view function to be decorated
Returns:
method: the authenticate wrapper containing the original view
Raises:
None
"""
def authenticate(*args, **kwargs):
"""
The wrapper that takes the authorization related actions before
executing the actual view
Args:
args: packed args for the decorated view
kwargs: packed keyargs for the decorated view
Returns:
any: the return value of the decorated view
Raises:
UnauthorizedError: if auth header is missing or invalid
"""
# no credentials provided: reply that authorization is needed.
# The exception takes care of providing the scheme allowed
# via WWW-Authenticate response header
auth_header = flask_request.headers.get('Authorization', None)
if not auth_header:
raise UnauthorizedError(auth_provided=False)
try:
auth_scheme, auth_value = auth_header.split(None, 1)
except ValueError:
raise UnauthorizedError()
auth_scheme = auth_scheme.lower()
if auth_scheme == 'basic':
user_entry = _LoginManager.authenticate_basic(auth_value)
elif auth_scheme == 'x-key':
user_entry = _LoginManager.authenticate_key(auth_value)
else:
# scheme not supported
raise UnauthorizedError()
# set model as session variable
flask_global.auth_user = user_entry # pylint: disable=assigning-non-slot
# this might be relevant depending on the nature of the operation.
# i.e. api key operations are only allowed after entering password
# (basic scheme)
flask_global.auth_method = auth_scheme # pylint: disable=assigning-non-slot
return decorated_view(*args, **kwargs)
# authenticate()
return authenticate
# authorize()
| 31.771186 | 83 | 0.634169 |
from base64 import b64decode
from flask import g as flask_global
from flask import request as flask_request
from sqlalchemy.sql import func
from tessia.server import auth
from tessia.server.api.db import API_DB
from tessia.server.api.exceptions import UnauthorizedError
from tessia.server.config import CONF
from tessia.server.db.models import User
from tessia.server.db.models import UserKey
from werkzeug.exceptions import BadRequest
class _LoginManager:
_manager = None
@classmethod
def get_login_manager(cls):
if cls._manager is None:
cls._manager = auth.get_manager()
return cls._manager
@classmethod
def authenticate_basic(cls, auth_value):
try:
user, passwd = b64decode(auth_value).decode(
'ascii').split(':', 1)
except Exception:
raise BadRequest()
case_sensitive = CONF.get_config().get(
'auth', {}).get('case_sensitive', False)
if not case_sensitive:
user = user.lower()
result = cls.get_login_manager().authenticate(user, passwd)
if result is None:
raise UnauthorizedError()
user_entry = User.query.filter_by(login=user).first()
if user_entry is not None:
changed = False
if user_entry.name != result['fullname']:
changed = True
user_entry.name = result['fullname']
if user_entry.title != result.get('title', None):
changed = True
user_entry.title = result.get('title', None)
if changed:
API_DB.db.session.add(user_entry)
API_DB.db.session.commit()
return user_entry
allow_auto_create = CONF.get_config().get(
'auth', {}).get('allow_user_auto_create', False)
if not allow_auto_create:
raise UnauthorizedError(
msg='User authenticated but not registered in database')
new_user = User()
if case_sensitive:
new_user.login = result['login']
else:
new_user.login = result['login'].lower()
new_user.name = result['fullname']
new_user.title = result.get('title', None)
new_user.restricted = False
new_user.admin = False
API_DB.db.session.add(new_user)
API_DB.db.session.commit()
return new_user
@classmethod
def authenticate_key(cls, auth_value):
try:
key_id, key_secret = auth_value.split(':', 1)
except Exception:
raise BadRequest()
key_entry = UserKey.query.filter_by(
key_id=key_id, key_secret=key_secret).first()
if key_entry is None:
raise UnauthorizedError()
key_entry.last_used = func.now()
API_DB.db.session.add(key_entry)
API_DB.db.session.commit()
return key_entry.user_rel
def authorize(decorated_view):
def authenticate(*args, **kwargs):
auth_header = flask_request.headers.get('Authorization', None)
if not auth_header:
raise UnauthorizedError(auth_provided=False)
try:
auth_scheme, auth_value = auth_header.split(None, 1)
except ValueError:
raise UnauthorizedError()
auth_scheme = auth_scheme.lower()
if auth_scheme == 'basic':
user_entry = _LoginManager.authenticate_basic(auth_value)
elif auth_scheme == 'x-key':
user_entry = _LoginManager.authenticate_key(auth_value)
else:
raise UnauthorizedError()
flask_global.auth_user = user_entry
flask_global.auth_method = auth_scheme
return decorated_view(*args, **kwargs)
return authenticate
| true | true |
f71b54b467c00b5f3318a9985c87b77ec8bf71f6 | 853 | py | Python | med2img/tests/test_med2img.py | mohitchandarana/pl-med2img | c8c9df64a5e244d57e2fdb9ffadeea3c455eab23 | [
"MIT"
] | 4 | 2017-07-06T20:32:08.000Z | 2021-09-18T02:46:55.000Z | med2img/tests/test_med2img.py | mohitchandarana/pl-med2img | c8c9df64a5e244d57e2fdb9ffadeea3c455eab23 | [
"MIT"
] | 1 | 2021-11-07T20:17:16.000Z | 2021-11-08T20:11:48.000Z | med2img/tests/test_med2img.py | mohitchandarana/pl-med2img | c8c9df64a5e244d57e2fdb9ffadeea3c455eab23 | [
"MIT"
] | 3 | 2020-09-01T07:13:19.000Z | 2021-03-25T16:06:39.000Z |
from unittest import TestCase
from unittest import mock
from med2img.med2img import Med2img
class Med2imgTests(TestCase):
"""
Test Med2img.
"""
def setUp(self):
self.app = Med2img()
def test_run(self):
"""
Test the run code.
"""
args = []
if self.app.TYPE == 'ds':
args.append('inputdir') # you may want to change this inputdir mock
args.append('outputdir') # you may want to change this outputdir mock
# you may want to add more of your custom defined optional arguments to test
# your app with
# eg.
# args.append('--custom-int')
# args.append(10)
options = self.app.parse_args(args)
self.app.run(options)
# write your own assertions
self.assertEqual(options.outputdir, 'outputdir')
| 25.088235 | 84 | 0.594373 |
from unittest import TestCase
from unittest import mock
from med2img.med2img import Med2img
class Med2imgTests(TestCase):
def setUp(self):
self.app = Med2img()
def test_run(self):
args = []
if self.app.TYPE == 'ds':
args.append('inputdir')
args.append('outputdir')
options = self.app.parse_args(args)
self.app.run(options)
self.assertEqual(options.outputdir, 'outputdir')
| true | true |
f71b555a4703e5eb655f1c62d6c59060c0f772cf | 6,488 | py | Python | MPL/MPL_envs/reach/reach_v0.py | vikashplus/MPL | 4a784fd94dc7a5988a1eca85851ee546ca1992f9 | [
"Apache-2.0"
] | null | null | null | MPL/MPL_envs/reach/reach_v0.py | vikashplus/MPL | 4a784fd94dc7a5988a1eca85851ee546ca1992f9 | [
"Apache-2.0"
] | null | null | null | MPL/MPL_envs/reach/reach_v0.py | vikashplus/MPL | 4a784fd94dc7a5988a1eca85851ee546ca1992f9 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from gym import utils
from mjrl.envs import mujoco_env
from mujoco_py import MjViewer
from MPL.MPL_robot.robot import Robot
import os
# TODO: Action normalization is missing
class sallyReachEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, noise_scale=0.0):
# prep
utils.EzPickle.__init__(self)
self._noise_scale = noise_scale
self.initializing = True
curr_dir = os.path.dirname(os.path.abspath(__file__))
self.Rtarget = 0
self.Ltarget = 0
self.Rgrasp = 0
self.Lgrasp = 0
# acquire robot
self.mpl = Robot(name='sallyReach', model_file=curr_dir+'/reach_v0.xml', config_file=curr_dir+'/reach_v0.config')
# acquire env
mujoco_env.MujocoEnv.__init__(self, curr_dir+'/reach_v0.xml', 20)
self.Rtarget = self.sim.model.site_name2id('Rtarget')
self.Ltarget = self.sim.model.site_name2id('Ltarget')
self.Rgrasp = self.sim.model.site_name2id('Rgrasp')
self.Lgrasp = self.sim.model.site_name2id('Lgrasp')
# env ready
self.initializing = False
def step(self, a):
self.mpl.step(self, a, self.frame_skip*self.sim.model.opt.timestep)
obs = self.get_obs()
score, reward_dict, solved, done = self._get_score_reward_solved_done(self.obs_dict)
# finalize step
env_info = {
'time': self.obs_dict['t'],
'obs_dict': self.obs_dict,
'rewards': reward_dict,
'score': score,
'solved': solved
}
return obs, reward_dict['total'], done, env_info
# query robot and populate observations
def get_obs(self):
# ask robot for sensor data
sen = self.mpl.get_sensors(self, noise_scale=self._noise_scale)
# parse sensor data into obs dict
self.obs_dict = {}
self.obs_dict['t'] = sen['time']
self.obs_dict['Tmpl_pos'] = sen['Tmpl_pos']
self.obs_dict['Rmpl_pos'] = sen['Rmpl_pos']
self.obs_dict['Lmpl_pos'] = sen['Lmpl_pos']
self.obs_dict['Tmpl_vel'] = sen['Tmpl_vel']
self.obs_dict['Rmpl_vel'] = sen['Rmpl_vel']
self.obs_dict['Lmpl_vel'] = sen['Lmpl_vel']
self.obs_dict['Rerr'] = self.sim.data.site_xpos[self.Rtarget]-self.sim.data.site_xpos[self.Rgrasp]
self.obs_dict['Lerr'] = self.sim.data.site_xpos[self.Ltarget]-self.sim.data.site_xpos[self.Lgrasp]
# vectorize observations
return np.concatenate([
self.obs_dict['Tmpl_pos'],
self.obs_dict['Rmpl_pos'],
self.obs_dict['Lmpl_pos'],
self.obs_dict['Tmpl_vel'],
self.obs_dict['Rmpl_vel'],
self.obs_dict['Lmpl_vel'],
self.obs_dict['Lerr'],
self.obs_dict['Rerr']])
# evaluate observations
def _get_score_reward_solved_done(self, obs, act=None):
Ldist = np.linalg.norm(obs['Lerr'])
Rdist = np.linalg.norm(obs['Rerr'])
# print(Rdist, Ldist)
done = (bool( Ldist > 1.0) or bool(Rdist>1.0)) \
if not self.initializing else False
reward_dict = {}
avg_dist = (Ldist+Rdist)/2.0
score = -1.* avg_dist
reward_dict["avg_dist"] = score
reward_dict["small_bonus"] = 2.0*(Ldist<.1) + 2.0*(Rdist<.1)
reward_dict["big_bonus"] = 2.0*(Ldist<.1) * 2.0*(Rdist<.1)
reward_dict["total"] = reward_dict["avg_dist"] + reward_dict["small_bonus"] + reward_dict["big_bonus"] - 50.0 * int(done)
solved = bool(avg_dist<0.100)
return score, reward_dict, solved, done
# reset model
def reset_model(self):
raise NotImplementedError # for child class to define
# evaluate a path
def compute_path_rewards(self, paths):
# path has two keys: observations and actions
# path["observations"] : (num_traj, horizon, obs_dim)
# path["rewards"] should have shape (num_traj, horizon)
obs = paths["observations"]
score, rewards, done = self._get_score_reward_solved_done(obs)
paths["rewards"] = rewards if rewards.shape[0] > 1 else rewards.ravel()
# evaluate policy's success from a collection of paths
def evaluate_success(self, paths, logger=None):
success = 0.0
for p in paths:
if np.mean(p['env_infos']['solved'][-4:]) > 0.0:
success += 1.0
success_rate = 100.0*success/len(paths)
if logger is None:
# nowhere to log so return the value
return success_rate
else:
# log the success
# can log multiple statistics here if needed
logger.log_kv('success_rate', success_rate)
return None
# --------------------------------
# get and set states
# --------------------------------
def get_env_state(self):
return dict(qp=self.data.qpos.copy(), qv=self.data.qvel.copy())
def set_env_state(self, state):
self.sim.reset()
qp = state['qp'].copy()
qv = state['qv'].copy()
self.set_state(qp, qv)
self.sim.forward()
# --------------------------------
# utility functions
# --------------------------------
def get_env_infos(self):
return dict(state=self.get_env_state())
def mj_viewer_setup(self):
self.viewer = MjViewer(self.sim)
self.viewer.cam.azimuth = -90
self.viewer.cam.distance = 2.5
self.viewer.cam.elevation = -30
self.sim.forward()
def close_env(self):
pass
# Reach at fixed targets
class sallyReachEnvFixed(sallyReachEnv):
def __init__(self):
super().__init__()
def reset_model(self):
self.sim.model.site_pos[self.Rtarget] = np.array([0.15, 0.2, .6])
self.sim.model.site_pos[self.Ltarget] = np.array([-0.15, 0.2, .3])
self.set_state(self.init_qpos, self.init_qvel)
self.sim.forward()
return self.get_obs()
# Reach at random targets
class sallyReachEnvRandom(sallyReachEnv):
def __init__(self):
super().__init__()
def reset_model(self):
self.sim.model.site_pos[self.Rtarget] = self.np_random.uniform(high=[0.5, .5, .6], low=[0, .1, .3])
self.sim.model.site_pos[self.Ltarget] = self.np_random.uniform(high=[0, .5, .6], low=[-.5, .1, .3])
self.set_state(self.init_qpos, self.init_qvel)
self.sim.forward()
return self.get_obs()
| 33.968586 | 130 | 0.594945 | import numpy as np
from gym import utils
from mjrl.envs import mujoco_env
from mujoco_py import MjViewer
from MPL.MPL_robot.robot import Robot
import os
class sallyReachEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, noise_scale=0.0):
utils.EzPickle.__init__(self)
self._noise_scale = noise_scale
self.initializing = True
curr_dir = os.path.dirname(os.path.abspath(__file__))
self.Rtarget = 0
self.Ltarget = 0
self.Rgrasp = 0
self.Lgrasp = 0
self.mpl = Robot(name='sallyReach', model_file=curr_dir+'/reach_v0.xml', config_file=curr_dir+'/reach_v0.config')
mujoco_env.MujocoEnv.__init__(self, curr_dir+'/reach_v0.xml', 20)
self.Rtarget = self.sim.model.site_name2id('Rtarget')
self.Ltarget = self.sim.model.site_name2id('Ltarget')
self.Rgrasp = self.sim.model.site_name2id('Rgrasp')
self.Lgrasp = self.sim.model.site_name2id('Lgrasp')
self.initializing = False
def step(self, a):
self.mpl.step(self, a, self.frame_skip*self.sim.model.opt.timestep)
obs = self.get_obs()
score, reward_dict, solved, done = self._get_score_reward_solved_done(self.obs_dict)
env_info = {
'time': self.obs_dict['t'],
'obs_dict': self.obs_dict,
'rewards': reward_dict,
'score': score,
'solved': solved
}
return obs, reward_dict['total'], done, env_info
def get_obs(self):
sen = self.mpl.get_sensors(self, noise_scale=self._noise_scale)
self.obs_dict = {}
self.obs_dict['t'] = sen['time']
self.obs_dict['Tmpl_pos'] = sen['Tmpl_pos']
self.obs_dict['Rmpl_pos'] = sen['Rmpl_pos']
self.obs_dict['Lmpl_pos'] = sen['Lmpl_pos']
self.obs_dict['Tmpl_vel'] = sen['Tmpl_vel']
self.obs_dict['Rmpl_vel'] = sen['Rmpl_vel']
self.obs_dict['Lmpl_vel'] = sen['Lmpl_vel']
self.obs_dict['Rerr'] = self.sim.data.site_xpos[self.Rtarget]-self.sim.data.site_xpos[self.Rgrasp]
self.obs_dict['Lerr'] = self.sim.data.site_xpos[self.Ltarget]-self.sim.data.site_xpos[self.Lgrasp]
return np.concatenate([
self.obs_dict['Tmpl_pos'],
self.obs_dict['Rmpl_pos'],
self.obs_dict['Lmpl_pos'],
self.obs_dict['Tmpl_vel'],
self.obs_dict['Rmpl_vel'],
self.obs_dict['Lmpl_vel'],
self.obs_dict['Lerr'],
self.obs_dict['Rerr']])
def _get_score_reward_solved_done(self, obs, act=None):
Ldist = np.linalg.norm(obs['Lerr'])
Rdist = np.linalg.norm(obs['Rerr'])
done = (bool( Ldist > 1.0) or bool(Rdist>1.0)) \
if not self.initializing else False
reward_dict = {}
avg_dist = (Ldist+Rdist)/2.0
score = -1.* avg_dist
reward_dict["avg_dist"] = score
reward_dict["small_bonus"] = 2.0*(Ldist<.1) + 2.0*(Rdist<.1)
reward_dict["big_bonus"] = 2.0*(Ldist<.1) * 2.0*(Rdist<.1)
reward_dict["total"] = reward_dict["avg_dist"] + reward_dict["small_bonus"] + reward_dict["big_bonus"] - 50.0 * int(done)
solved = bool(avg_dist<0.100)
return score, reward_dict, solved, done
def reset_model(self):
raise NotImplementedError
def compute_path_rewards(self, paths):
obs = paths["observations"]
score, rewards, done = self._get_score_reward_solved_done(obs)
paths["rewards"] = rewards if rewards.shape[0] > 1 else rewards.ravel()
def evaluate_success(self, paths, logger=None):
success = 0.0
for p in paths:
if np.mean(p['env_infos']['solved'][-4:]) > 0.0:
success += 1.0
success_rate = 100.0*success/len(paths)
if logger is None:
# nowhere to log so return the value
return success_rate
else:
# log the success
# can log multiple statistics here if needed
logger.log_kv('success_rate', success_rate)
return None
# --------------------------------
# get and set states
# --------------------------------
def get_env_state(self):
return dict(qp=self.data.qpos.copy(), qv=self.data.qvel.copy())
def set_env_state(self, state):
self.sim.reset()
qp = state['qp'].copy()
qv = state['qv'].copy()
self.set_state(qp, qv)
self.sim.forward()
# --------------------------------
# utility functions
# --------------------------------
def get_env_infos(self):
return dict(state=self.get_env_state())
def mj_viewer_setup(self):
self.viewer = MjViewer(self.sim)
self.viewer.cam.azimuth = -90
self.viewer.cam.distance = 2.5
self.viewer.cam.elevation = -30
self.sim.forward()
def close_env(self):
pass
# Reach at fixed targets
class sallyReachEnvFixed(sallyReachEnv):
def __init__(self):
super().__init__()
def reset_model(self):
self.sim.model.site_pos[self.Rtarget] = np.array([0.15, 0.2, .6])
self.sim.model.site_pos[self.Ltarget] = np.array([-0.15, 0.2, .3])
self.set_state(self.init_qpos, self.init_qvel)
self.sim.forward()
return self.get_obs()
# Reach at random targets
class sallyReachEnvRandom(sallyReachEnv):
def __init__(self):
super().__init__()
def reset_model(self):
self.sim.model.site_pos[self.Rtarget] = self.np_random.uniform(high=[0.5, .5, .6], low=[0, .1, .3])
self.sim.model.site_pos[self.Ltarget] = self.np_random.uniform(high=[0, .5, .6], low=[-.5, .1, .3])
self.set_state(self.init_qpos, self.init_qvel)
self.sim.forward()
return self.get_obs()
| true | true |
f71b560fdbc2811c07e38208065e693e4befa940 | 40,786 | py | Python | metaflow/plugins/aws/step_functions/step_functions.py | cclauss/metaflow | 5186c6c5bba36d9e77077413ee2495dc79da3dca | [
"Apache-2.0"
] | 1 | 2021-11-29T22:37:54.000Z | 2021-11-29T22:37:54.000Z | metaflow/plugins/aws/step_functions/step_functions.py | sappier/metaflow | dfe1a216e342555d5fb127607b35491a4ef11627 | [
"Apache-2.0"
] | null | null | null | metaflow/plugins/aws/step_functions/step_functions.py | sappier/metaflow | dfe1a216e342555d5fb127607b35491a4ef11627 | [
"Apache-2.0"
] | 1 | 2021-11-29T22:37:50.000Z | 2021-11-29T22:37:50.000Z | import os
from collections import defaultdict
import sys
import hashlib
import json
import time
import string
import random
import uuid
from metaflow.exception import MetaflowException, MetaflowInternalError
from metaflow.plugins import ResourcesDecorator, BatchDecorator, RetryDecorator
from metaflow.parameters import deploy_time_eval
from metaflow.decorators import flow_decorators
from metaflow.util import compress_list, dict_to_cli_options, to_pascalcase
from metaflow.metaflow_config import (
SFN_IAM_ROLE,
EVENTS_SFN_ACCESS_IAM_ROLE,
SFN_DYNAMO_DB_TABLE,
SFN_EXECUTION_LOG_GROUP_ARN,
)
from metaflow import R
from .step_functions_client import StepFunctionsClient
from .event_bridge_client import EventBridgeClient
from ..batch.batch import Batch
class StepFunctionsException(MetaflowException):
headline = "AWS Step Functions error"
class StepFunctionsSchedulingException(MetaflowException):
headline = "AWS Step Functions scheduling error"
class StepFunctions(object):
def __init__(
self,
name,
graph,
flow,
code_package_sha,
code_package_url,
production_token,
metadata,
flow_datastore,
environment,
event_logger,
monitor,
tags=None,
namespace=None,
username=None,
max_workers=None,
workflow_timeout=None,
is_project=False,
):
self.name = name
self.graph = graph
self.flow = flow
self.code_package_sha = code_package_sha
self.code_package_url = code_package_url
self.production_token = production_token
self.metadata = metadata
self.flow_datastore = flow_datastore
self.environment = environment
self.event_logger = event_logger
self.monitor = monitor
self.tags = tags
self.namespace = namespace
self.username = username
self.max_workers = max_workers
self.workflow_timeout = workflow_timeout
self._client = StepFunctionsClient()
self._workflow = self._compile()
self._cron = self._cron()
self._state_machine_arn = None
def to_json(self):
return self._workflow.to_json(pretty=True)
def trigger_explanation(self):
if self._cron:
# Sometime in the future, we should vendor (or write) a utility
# that can translate cron specifications into a human readable
# format and push to the user for a better UX, someday.
return (
"This workflow triggers automatically "
"via a cron schedule *%s* defined in AWS EventBridge."
% self.event_bridge_rule
)
else:
return "No triggers defined. " "You need to launch this workflow manually."
def deploy(self, log_execution_history):
if SFN_IAM_ROLE is None:
raise StepFunctionsException(
"No IAM role found for AWS Step "
"Functions. You can create one "
"following the instructions listed at "
"*https://admin-docs.metaflow.org/meta"
"flow-on-aws/deployment-guide/manual-d"
"eployment#scheduling* and "
"re-configure Metaflow using "
"*metaflow configure aws* on your "
"terminal."
)
if log_execution_history:
if SFN_EXECUTION_LOG_GROUP_ARN is None:
raise StepFunctionsException(
"No AWS CloudWatch Logs log "
"group ARN found for emitting "
"state machine execution logs for "
"your workflow. You can set it in "
"your environment by using the "
"METAFLOW_SFN_EXECUTION_LOG_GROUP_ARN "
"environment variable."
)
try:
self._state_machine_arn = self._client.push(
name=self.name,
definition=self.to_json(),
role_arn=SFN_IAM_ROLE,
log_execution_history=log_execution_history,
)
except Exception as e:
raise StepFunctionsException(repr(e))
def schedule(self):
# Scheduling is currently enabled via AWS Event Bridge.
if EVENTS_SFN_ACCESS_IAM_ROLE is None:
raise StepFunctionsSchedulingException(
"No IAM role found for AWS "
"Events Bridge. You can "
"create one following the "
"instructions listed at "
"*https://admin-docs.metaflo"
"w.org/metaflow-on-aws/deplo"
"yment-guide/manual-deployme"
"nt#scheduling* and "
"re-configure Metaflow "
"using *metaflow configure "
"aws* on your terminal."
)
try:
self.event_bridge_rule = (
EventBridgeClient(self.name)
.cron(self._cron)
.role_arn(EVENTS_SFN_ACCESS_IAM_ROLE)
.state_machine_arn(self._state_machine_arn)
.schedule()
)
except Exception as e:
raise StepFunctionsSchedulingException(repr(e))
@classmethod
def trigger(cls, name, parameters):
try:
state_machine = StepFunctionsClient().get(name)
except Exception as e:
raise StepFunctionsException(repr(e))
if state_machine is None:
raise StepFunctionsException(
"The workflow *%s* doesn't exist "
"on AWS Step Functions. Please "
"deploy your flow first." % name
)
# Dump parameters into `Parameters` input field.
input = json.dumps({"Parameters": json.dumps(parameters)})
# AWS Step Functions limits input to be 32KiB, but AWS Batch
# has it's own limitation of 30KiB for job specification length.
# Reserving 10KiB for rest of the job sprecification leaves 20KiB
# for us, which should be enough for most use cases for now.
if len(input) > 20480:
raise StepFunctionsException(
"Length of parameter names and "
"values shouldn't exceed 20480 as "
"imposed by AWS Step Functions."
)
try:
state_machine_arn = state_machine.get("stateMachineArn")
return StepFunctionsClient().trigger(state_machine_arn, input)
except Exception as e:
raise StepFunctionsException(repr(e))
@classmethod
def list(cls, name, states):
try:
state_machine = StepFunctionsClient().get(name)
except Exception as e:
raise StepFunctionsException(repr(e))
if state_machine is None:
raise StepFunctionsException(
"The workflow *%s* doesn't exist " "on AWS Step Functions." % name
)
try:
state_machine_arn = state_machine.get("stateMachineArn")
return StepFunctionsClient().list_executions(state_machine_arn, states)
except Exception as e:
raise StepFunctionsException(repr(e))
@classmethod
def get_existing_deployment(cls, name):
workflow = StepFunctionsClient().get(name)
if workflow is not None:
try:
start = json.loads(workflow["definition"])["States"]["start"]
parameters = start["Parameters"]["Parameters"]
return parameters.get("metaflow.owner"), parameters.get(
"metaflow.production_token"
)
except KeyError as e:
raise StepFunctionsException(
"An existing non-metaflow "
"workflow with the same name as "
"*%s* already exists in AWS Step "
"Functions. Please modify the "
"name of this flow or delete your "
"existing workflow on AWS Step "
"Functions." % name
)
return None
def _compile(self):
# Visit every node of the flow and recursively build the state machine.
def _visit(node, workflow, exit_node=None):
# Assign an AWS Batch job to the AWS Step Functions state
# and pass the intermediate state by exposing `JobId` and
# `Parameters` to the child job(s) as outputs. `Index` and
# `SplitParentTaskId` are populated optionally, when available.
# We can't modify the names of keys in AWS Step Functions aside
# from a blessed few which are set as `Parameters` for the Map
# state. That's why even though `JobId` refers to the parent task
# id, we can't call it as such. Similar situation for `Parameters`.
state = (
State(node.name)
.batch(self._batch(node))
.output_path(
"$.['JobId', " "'Parameters', " "'Index', " "'SplitParentTaskId']"
)
)
# End the (sub)workflow if we have reached the end of the flow or
# the parent step of matching_join of the sub workflow.
if node.type == "end" or exit_node in node.out_funcs:
workflow.add_state(state.end())
# Continue linear assignment within the (sub)workflow if the node
# doesn't branch or fork.
elif node.type in ("linear", "join"):
workflow.add_state(state.next(node.out_funcs[0]))
_visit(self.graph[node.out_funcs[0]], workflow, exit_node)
# Create a `Parallel` state and assign sub workflows if the node
# branches out.
elif node.type == "split-and":
branch_name = hashlib.sha224(
"&".join(node.out_funcs).encode("utf-8")
).hexdigest()
workflow.add_state(state.next(branch_name))
branch = Parallel(branch_name).next(node.matching_join)
# Generate as many sub workflows as branches and recurse.
for n in node.out_funcs:
branch.branch(
_visit(
self.graph[n], Workflow(n).start_at(n), node.matching_join
)
)
workflow.add_state(branch)
# Continue the traversal from the matching_join.
_visit(self.graph[node.matching_join], workflow, exit_node)
# Create a `Map` state and assign sub workflow if the node forks.
elif node.type == "foreach":
# Fetch runtime cardinality via an AWS DynamoDb Get call before
# configuring the node
cardinality_state_name = "#%s" % node.out_funcs[0]
workflow.add_state(state.next(cardinality_state_name))
cardinality_state = (
State(cardinality_state_name)
.dynamo_db(SFN_DYNAMO_DB_TABLE, "$.JobId", "for_each_cardinality")
.result_path("$.Result")
)
iterator_name = "*%s" % node.out_funcs[0]
workflow.add_state(cardinality_state.next(iterator_name))
workflow.add_state(
Map(iterator_name)
.items_path("$.Result.Item.for_each_cardinality.NS")
.parameter("JobId.$", "$.JobId")
.parameter("SplitParentTaskId.$", "$.JobId")
.parameter("Parameters.$", "$.Parameters")
.parameter("Index.$", "$$.Map.Item.Value")
.next(node.matching_join)
.iterator(
_visit(
self.graph[node.out_funcs[0]],
Workflow(node.out_funcs[0]).start_at(node.out_funcs[0]),
node.matching_join,
)
)
.max_concurrency(self.max_workers)
.output_path("$.[0]")
)
# Continue the traversal from the matching_join.
_visit(self.graph[node.matching_join], workflow, exit_node)
# We shouldn't ideally ever get here.
else:
raise StepFunctionsException(
"Node type *%s* for step *%s* "
"is not currently supported by "
"AWS Step Functions." % (node.type, node.name)
)
return workflow
workflow = Workflow(self.name).start_at("start")
if self.workflow_timeout:
workflow.timeout_seconds(self.workflow_timeout)
return _visit(self.graph["start"], workflow)
def _cron(self):
schedule = self.flow._flow_decorators.get("schedule")
if schedule:
return schedule.schedule
return None
def _process_parameters(self):
parameters = []
has_schedule = self._cron() is not None
seen = set()
for var, param in self.flow._get_parameters():
# Throw an exception if the parameter is specified twice.
norm = param.name.lower()
if norm in seen:
raise MetaflowException(
"Parameter *%s* is specified twice. "
"Note that parameter names are "
"case-insensitive." % param.name
)
seen.add(norm)
is_required = param.kwargs.get("required", False)
# Throw an exception if a schedule is set for a flow with required
# parameters with no defaults. We currently don't have any notion
# of data triggers in AWS Event Bridge.
if "default" not in param.kwargs and is_required and has_schedule:
raise MetaflowException(
"The parameter *%s* does not have a "
"default and is required. Scheduling "
"such parameters via AWS Event Bridge "
"is not currently supported." % param.name
)
value = deploy_time_eval(param.kwargs.get("default"))
parameters.append(dict(name=param.name, value=value))
return parameters
def _batch(self, node):
attrs = {
# metaflow.user is only used for setting the AWS Job Name.
# Since job executions are no longer tied to a specific user
# identity, we will just set their user to `SFN`. We still do need
# access to the owner of the workflow for production tokens, which
# we can stash in metaflow.owner.
"metaflow.user": "SFN",
"metaflow.owner": self.username,
"metaflow.flow_name": self.flow.name,
"metaflow.step_name": node.name,
"metaflow.run_id.$": "$$.Execution.Name",
# Unfortunately we can't set the task id here since AWS Step
# Functions lacks any notion of run-scoped task identifiers. We
# instead co-opt the AWS Batch job id as the task id. This also
# means that the AWS Batch job name will have missing fields since
# the job id is determined at job execution, but since the job id is
# part of the job description payload, we don't lose much except for
# a few ugly looking black fields in the AWS Batch UI.
# Also, unfortunately we can't set the retry count since
# `$$.State.RetryCount` resolves to an int dynamically and
# AWS Batch job specification only accepts strings. We handle
# retries/catch within AWS Batch to get around this limitation.
"metaflow.version": self.environment.get_environment_info()[
"metaflow_version"
],
# We rely on step names and task ids of parent steps to construct
# input paths for a task. Since the only information we can pass
# between states (via `InputPath` and `ResultPath`) in AWS Step
# Functions is the job description, we run the risk of exceeding
# 32K state size limit rather quickly if we don't filter the job
# description to a minimal set of fields. Unfortunately, the partial
# `JsonPath` implementation within AWS Step Functions makes this
# work a little non-trivial; it doesn't like dots in keys, so we
# have to add the field again.
# This pattern is repeated in a lot of other places, where we use
# AWS Batch parameters to store AWS Step Functions state
# information, since this field is the only field in the AWS Batch
# specification that allows us to set key-values.
"step_name": node.name,
}
# Store production token within the `start` step, so that subsequent
# `step-functions create` calls can perform a rudimentary authorization
# check.
if node.name == "start":
attrs["metaflow.production_token"] = self.production_token
# Add env vars from the optional @environment decorator.
env_deco = [deco for deco in node.decorators if deco.name == "environment"]
env = {}
if env_deco:
env = env_deco[0].attributes["vars"]
if node.name == "start":
# Initialize parameters for the flow in the `start` step.
parameters = self._process_parameters()
if parameters:
# Get user-defined parameters from State Machine Input.
# Since AWS Step Functions doesn't allow for optional inputs
# currently, we have to unfortunately place an artificial
# constraint that every parameterized workflow needs to include
# `Parameters` as a key in the input to the workflow.
# `step-functions trigger` already takes care of this
# requirement, but within the UI, the users will be required to
# specify an input with key as `Parameters` and value as a
# stringified json of the actual parameters -
# {"Parameters": "{\"alpha\": \"beta\"}"}
env["METAFLOW_PARAMETERS"] = "$.Parameters"
default_parameters = {}
for parameter in parameters:
if parameter["value"] is not None:
default_parameters[parameter["name"]] = parameter["value"]
# Dump the default values specified in the flow.
env["METAFLOW_DEFAULT_PARAMETERS"] = json.dumps(default_parameters)
# `start` step has no upstream input dependencies aside from
# parameters.
input_paths = None
else:
# We need to rely on the `InputPath` of the AWS Step Functions
# specification to grab task ids and the step names of the parent
# to properly construct input_paths at runtime. Thanks to the
# JsonPath-foo embedded in the parent states, we have this
# information easily available.
# Handle foreach join.
if (
node.type == "join"
and self.graph[node.split_parents[-1]].type == "foreach"
):
input_paths = (
"sfn-${METAFLOW_RUN_ID}/%s/:"
"${METAFLOW_PARENT_TASK_IDS}" % node.in_funcs[0]
)
# Unfortunately, AWS Batch only allows strings as value types
# in it's specification and we don't have any way to concatenate
# the task ids array from the parent steps within AWS Step
# Functions and pass it down to AWS Batch. We instead have to
# rely on publishing the state to DynamoDb and fetching it back
# in within the AWS Batch entry point to set
# `METAFLOW_PARENT_TASK_IDS`. The state is scoped to the parent
# foreach task `METAFLOW_SPLIT_PARENT_TASK_ID`. We decided on
# AWS DynamoDb and not AWS Lambdas, because deploying and
# debugging Lambdas would be a nightmare as far as OSS support
# is concerned.
env["METAFLOW_SPLIT_PARENT_TASK_ID"] = (
"$.Parameters.split_parent_task_id_%s" % node.split_parents[-1]
)
else:
# Set appropriate environment variables for runtime replacement.
if len(node.in_funcs) == 1:
input_paths = (
"sfn-${METAFLOW_RUN_ID}/%s/${METAFLOW_PARENT_TASK_ID}"
% node.in_funcs[0]
)
env["METAFLOW_PARENT_TASK_ID"] = "$.JobId"
else:
# Generate the input paths in a quasi-compressed format.
# See util.decompress_list for why this is written the way
# it is.
input_paths = "sfn-${METAFLOW_RUN_ID}:" + ",".join(
"/${METAFLOW_PARENT_%s_STEP}/"
"${METAFLOW_PARENT_%s_TASK_ID}" % (idx, idx)
for idx, _ in enumerate(node.in_funcs)
)
for idx, _ in enumerate(node.in_funcs):
env["METAFLOW_PARENT_%s_TASK_ID" % idx] = "$.[%s].JobId" % idx
env["METAFLOW_PARENT_%s_STEP" % idx] = (
"$.[%s].Parameters.step_name" % idx
)
env["METAFLOW_INPUT_PATHS"] = input_paths
if node.is_inside_foreach:
# Set the task id of the parent job of the foreach split in
# our favorite dumping ground, the AWS Batch attrs. For
# subsequent descendent tasks, this attrs blob becomes the
# input to those descendent tasks. We set and propagate the
# task ids pointing to split_parents through every state.
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
attrs[
"split_parent_task_id_%s.$" % node.split_parents[-1]
] = "$.SplitParentTaskId"
for parent in node.split_parents[:-1]:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.Parameters.split_parent_task_id_%s" % parent
)
elif node.type == "join":
if self.graph[node.split_parents[-1]].type == "foreach":
# A foreach join only gets one set of input from the
# parent tasks. We filter the Map state to only output
# `$.[0]`, since we don't need any of the other outputs,
# that information is available to us from AWS DynamoDB.
# This has a nice side-effect of making our foreach
# splits infinitely scalable because otherwise we would
# be bounded by the 32K state limit for the outputs. So,
# instead of referencing `Parameters` fields by index
# (like in `split-and`), we can just reference them
# directly.
attrs["split_parent_task_id_%s.$" % node.split_parents[-1]] = (
"$.Parameters.split_parent_task_id_%s"
% node.split_parents[-1]
)
for parent in node.split_parents[:-1]:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.Parameters.split_parent_task_id_%s" % parent
)
else:
for parent in node.split_parents:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.[0].Parameters.split_parent_task_id_%s" % parent
)
else:
for parent in node.split_parents:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.Parameters.split_parent_task_id_%s" % parent
)
# Set `METAFLOW_SPLIT_PARENT_TASK_ID_FOR_FOREACH_JOIN` if the
# next transition is to a foreach join, so that the
# stepfunctions decorator can write the mapping for input path
# to DynamoDb.
if any(
self.graph[n].type == "join"
and self.graph[self.graph[n].split_parents[-1]].type == "foreach"
for n in node.out_funcs
):
env["METAFLOW_SPLIT_PARENT_TASK_ID_FOR_FOREACH_JOIN"] = attrs[
"split_parent_task_id_%s.$"
% self.graph[node.out_funcs[0]].split_parents[-1]
]
# Set ttl for the values we set in AWS DynamoDB.
if node.type == "foreach":
if self.workflow_timeout:
env["METAFLOW_SFN_WORKFLOW_TIMEOUT"] = self.workflow_timeout
# Handle split index for for-each.
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
env["METAFLOW_SPLIT_INDEX"] = "$.Index"
env["METAFLOW_CODE_URL"] = self.code_package_url
env["METAFLOW_FLOW_NAME"] = attrs["metaflow.flow_name"]
env["METAFLOW_STEP_NAME"] = attrs["metaflow.step_name"]
env["METAFLOW_RUN_ID"] = attrs["metaflow.run_id.$"]
env["METAFLOW_PRODUCTION_TOKEN"] = self.production_token
env["SFN_STATE_MACHINE"] = self.name
env["METAFLOW_OWNER"] = attrs["metaflow.owner"]
# Can't set `METAFLOW_TASK_ID` due to lack of run-scoped identifiers.
# We will instead rely on `AWS_BATCH_JOB_ID` as the task identifier.
# Can't set `METAFLOW_RETRY_COUNT` either due to integer casting issue.
metadata_env = self.metadata.get_runtime_environment("step-functions")
env.update(metadata_env)
metaflow_version = self.environment.get_environment_info()
metaflow_version["flow_name"] = self.graph.name
metaflow_version["production_token"] = self.production_token
env["METAFLOW_VERSION"] = json.dumps(metaflow_version)
# Set AWS DynamoDb Table Name for state tracking for for-eaches.
# There are three instances when metaflow runtime directly interacts
# with AWS DynamoDB.
# 1. To set the cardinality of foreaches (which are subsequently)
# read prior to the instantiation of the Map state by AWS Step
# Functions.
# 2. To set the input paths from the parent steps of a foreach join.
# 3. To read the input paths in a foreach join.
if (
node.type == "foreach"
or (
node.is_inside_foreach
and any(
self.graph[n].type == "join"
and self.graph[self.graph[n].split_parents[-1]].type == "foreach"
for n in node.out_funcs
)
)
or (
node.type == "join"
and self.graph[node.split_parents[-1]].type == "foreach"
)
):
if SFN_DYNAMO_DB_TABLE is None:
raise StepFunctionsException(
"An AWS DynamoDB table is needed "
"to support foreach in your flow. "
"You can create one following the "
"instructions listed at *https://a"
"dmin-docs.metaflow.org/metaflow-o"
"n-aws/deployment-guide/manual-dep"
"loyment#scheduling* and "
"re-configure Metaflow using "
"*metaflow configure aws* on your "
"terminal."
)
env["METAFLOW_SFN_DYNAMO_DB_TABLE"] = SFN_DYNAMO_DB_TABLE
# Resolve AWS Batch resource requirements.
batch_deco = [deco for deco in node.decorators if deco.name == "batch"][0]
resources = batch_deco.attributes
# Resolve retry strategy.
user_code_retries, total_retries = self._get_retries(node)
task_spec = {
"flow_name": attrs["metaflow.flow_name"],
"step_name": attrs["metaflow.step_name"],
"run_id": "sfn-$METAFLOW_RUN_ID",
# Use AWS Batch job identifier as the globally unique
# task identifier.
"task_id": "$AWS_BATCH_JOB_ID",
# Since retries are handled by AWS Batch, we can rely on
# AWS_BATCH_JOB_ATTEMPT as the job counter.
"retry_count": "$((AWS_BATCH_JOB_ATTEMPT-1))",
}
return (
Batch(self.metadata, self.environment)
.create_job(
step_name=node.name,
step_cli=self._step_cli(
node, input_paths, self.code_package_url, user_code_retries
),
task_spec=task_spec,
code_package_sha=self.code_package_sha,
code_package_url=self.code_package_url,
code_package_ds=self.flow_datastore.TYPE,
image=resources["image"],
queue=resources["queue"],
iam_role=resources["iam_role"],
execution_role=resources["execution_role"],
cpu=resources["cpu"],
gpu=resources["gpu"],
memory=resources["memory"],
run_time_limit=batch_deco.run_time_limit,
shared_memory=resources["shared_memory"],
max_swap=resources["max_swap"],
swappiness=resources["swappiness"],
env=env,
attrs=attrs,
host_volumes=resources["host_volumes"],
)
.attempts(total_retries + 1)
)
def _get_retries(self, node):
max_user_code_retries = 0
max_error_retries = 0
# Different decorators may have different retrying strategies, so take
# the max of them.
for deco in node.decorators:
user_code_retries, error_retries = deco.step_task_retry_count()
max_user_code_retries = max(max_user_code_retries, user_code_retries)
max_error_retries = max(max_error_retries, error_retries)
return max_user_code_retries, max_user_code_retries + max_error_retries
def _step_cli(self, node, paths, code_package_url, user_code_retries):
cmds = []
script_name = os.path.basename(sys.argv[0])
executable = self.environment.executable(node.name)
if R.use_r():
entrypoint = [R.entrypoint()]
else:
entrypoint = [executable, script_name]
# Use AWS Batch job identifier as the globally unique task identifier.
task_id = "${AWS_BATCH_JOB_ID}"
# FlowDecorators can define their own top-level options. They are
# responsible for adding their own top-level options and values through
# the get_top_level_options() hook. See similar logic in runtime.py.
top_opts_dict = {}
for deco in flow_decorators():
top_opts_dict.update(deco.get_top_level_options())
top_opts = list(dict_to_cli_options(top_opts_dict))
if node.name == "start":
# We need a separate unique ID for the special _parameters task
task_id_params = "%s-params" % task_id
# Export user-defined parameters into runtime environment
param_file = "".join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
export_params = (
"python -m "
"metaflow.plugins.aws.step_functions.set_batch_environment "
"parameters %s && . `pwd`/%s" % (param_file, param_file)
)
params = (
entrypoint
+ top_opts
+ [
"--quiet",
"--metadata=%s" % self.metadata.TYPE,
"--environment=%s" % self.environment.TYPE,
"--datastore=s3",
"--event-logger=%s" % self.event_logger.logger_type,
"--monitor=%s" % self.monitor.monitor_type,
"--no-pylint",
"init",
"--run-id sfn-$METAFLOW_RUN_ID",
"--task-id %s" % task_id_params,
]
)
# Assign tags to run objects.
if self.tags:
params.extend("--tag %s" % tag for tag in self.tags)
# If the start step gets retried, we must be careful not to
# regenerate multiple parameters tasks. Hence we check first if
# _parameters exists already.
exists = entrypoint + [
"dump",
"--max-value-size=0",
"sfn-${METAFLOW_RUN_ID}/_parameters/%s" % (task_id_params),
]
cmd = "if ! %s >/dev/null 2>/dev/null; then %s && %s; fi" % (
" ".join(exists),
export_params,
" ".join(params),
)
cmds.append(cmd)
paths = "sfn-${METAFLOW_RUN_ID}/_parameters/%s" % (task_id_params)
if node.type == "join" and self.graph[node.split_parents[-1]].type == "foreach":
parent_tasks_file = "".join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
export_parent_tasks = (
"python -m "
"metaflow.plugins.aws.step_functions.set_batch_environment "
"parent_tasks %s && . `pwd`/%s" % (parent_tasks_file, parent_tasks_file)
)
cmds.append(export_parent_tasks)
top_level = top_opts + [
"--quiet",
"--metadata=%s" % self.metadata.TYPE,
"--environment=%s" % self.environment.TYPE,
"--datastore=%s" % self.flow_datastore.TYPE,
"--datastore-root=%s" % self.flow_datastore.datastore_root,
"--event-logger=%s" % self.event_logger.logger_type,
"--monitor=%s" % self.monitor.monitor_type,
"--no-pylint",
"--with=step_functions_internal",
]
step = [
"step",
node.name,
"--run-id sfn-$METAFLOW_RUN_ID",
"--task-id %s" % task_id,
# Since retries are handled by AWS Batch, we can rely on
# AWS_BATCH_JOB_ATTEMPT as the job counter.
"--retry-count $((AWS_BATCH_JOB_ATTEMPT-1))",
"--max-user-code-retries %d" % user_code_retries,
"--input-paths %s" % paths,
# Set decorator to batch to execute `task_*` hooks for batch
# decorator.
"--with=batch",
]
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
# We set the `METAFLOW_SPLIT_INDEX` through JSONPath-foo
# to pass the state from the parent DynamoDb state for for-each.
step.append("--split-index $METAFLOW_SPLIT_INDEX")
if self.tags:
step.extend("--tag %s" % tag for tag in self.tags)
if self.namespace is not None:
step.append("--namespace=%s" % self.namespace)
cmds.append(" ".join(entrypoint + top_level + step))
return " && ".join(cmds)
class Workflow(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
def start_at(self, start_at):
self.payload["StartAt"] = start_at
return self
def add_state(self, state):
self.payload["States"][state.name] = state.payload
return self
def timeout_seconds(self, timeout_seconds):
self.payload["TimeoutSeconds"] = timeout_seconds
return self
def to_json(self, pretty=False):
return json.dumps(self.payload, indent=4 if pretty else None)
class State(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["Type"] = "Task"
def resource(self, resource):
self.payload["Resource"] = resource
return self
def next(self, state):
self.payload["Next"] = state
return self
def end(self):
self.payload["End"] = True
return self
def parameter(self, name, value):
self.payload["Parameters"][name] = value
return self
def output_path(self, output_path):
self.payload["OutputPath"] = output_path
return self
def result_path(self, result_path):
self.payload["ResultPath"] = result_path
return self
def _partition(self):
# This is needed to support AWS Gov Cloud and AWS CN regions
return SFN_IAM_ROLE.split(":")[1]
def batch(self, job):
self.resource(
"arn:%s:states:::batch:submitJob.sync" % self._partition()
).parameter("JobDefinition", job.payload["jobDefinition"]).parameter(
"JobName", job.payload["jobName"]
).parameter(
"JobQueue", job.payload["jobQueue"]
).parameter(
"Parameters", job.payload["parameters"]
).parameter(
"ContainerOverrides", to_pascalcase(job.payload["containerOverrides"])
).parameter(
"RetryStrategy", to_pascalcase(job.payload["retryStrategy"])
).parameter(
"Timeout", to_pascalcase(job.payload["timeout"])
)
# tags may not be present in all scenarios
if "tags" in job.payload:
self.parameter("Tags", job.payload["tags"])
return self
def dynamo_db(self, table_name, primary_key, values):
self.resource("arn:%s:states:::dynamodb:getItem" % self._partition()).parameter(
"TableName", table_name
).parameter("Key", {"pathspec": {"S.$": primary_key}}).parameter(
"ConsistentRead", True
).parameter(
"ProjectionExpression", values
)
return self
class Parallel(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["Type"] = "Parallel"
def branch(self, workflow):
if "Branches" not in self.payload:
self.payload["Branches"] = []
self.payload["Branches"].append(workflow.payload)
return self
def next(self, state):
self.payload["Next"] = state
return self
def output_path(self, output_path):
self.payload["OutputPath"] = output_path
return self
def result_path(self, result_path):
self.payload["ResultPath"] = result_path
return self
class Map(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["Type"] = "Map"
self.payload["MaxConcurrency"] = 0
def iterator(self, workflow):
self.payload["Iterator"] = workflow.payload
return self
def next(self, state):
self.payload["Next"] = state
return self
def items_path(self, items_path):
self.payload["ItemsPath"] = items_path
return self
def parameter(self, name, value):
self.payload["Parameters"][name] = value
return self
def max_concurrency(self, max_concurrency):
self.payload["MaxConcurrency"] = max_concurrency
return self
def output_path(self, output_path):
self.payload["OutputPath"] = output_path
return self
def result_path(self, result_path):
self.payload["ResultPath"] = result_path
return self
| 43.205508 | 88 | 0.558721 | import os
from collections import defaultdict
import sys
import hashlib
import json
import time
import string
import random
import uuid
from metaflow.exception import MetaflowException, MetaflowInternalError
from metaflow.plugins import ResourcesDecorator, BatchDecorator, RetryDecorator
from metaflow.parameters import deploy_time_eval
from metaflow.decorators import flow_decorators
from metaflow.util import compress_list, dict_to_cli_options, to_pascalcase
from metaflow.metaflow_config import (
SFN_IAM_ROLE,
EVENTS_SFN_ACCESS_IAM_ROLE,
SFN_DYNAMO_DB_TABLE,
SFN_EXECUTION_LOG_GROUP_ARN,
)
from metaflow import R
from .step_functions_client import StepFunctionsClient
from .event_bridge_client import EventBridgeClient
from ..batch.batch import Batch
class StepFunctionsException(MetaflowException):
headline = "AWS Step Functions error"
class StepFunctionsSchedulingException(MetaflowException):
headline = "AWS Step Functions scheduling error"
class StepFunctions(object):
def __init__(
self,
name,
graph,
flow,
code_package_sha,
code_package_url,
production_token,
metadata,
flow_datastore,
environment,
event_logger,
monitor,
tags=None,
namespace=None,
username=None,
max_workers=None,
workflow_timeout=None,
is_project=False,
):
self.name = name
self.graph = graph
self.flow = flow
self.code_package_sha = code_package_sha
self.code_package_url = code_package_url
self.production_token = production_token
self.metadata = metadata
self.flow_datastore = flow_datastore
self.environment = environment
self.event_logger = event_logger
self.monitor = monitor
self.tags = tags
self.namespace = namespace
self.username = username
self.max_workers = max_workers
self.workflow_timeout = workflow_timeout
self._client = StepFunctionsClient()
self._workflow = self._compile()
self._cron = self._cron()
self._state_machine_arn = None
def to_json(self):
return self._workflow.to_json(pretty=True)
def trigger_explanation(self):
if self._cron:
return (
"This workflow triggers automatically "
"via a cron schedule *%s* defined in AWS EventBridge."
% self.event_bridge_rule
)
else:
return "No triggers defined. " "You need to launch this workflow manually."
def deploy(self, log_execution_history):
if SFN_IAM_ROLE is None:
raise StepFunctionsException(
"No IAM role found for AWS Step "
"Functions. You can create one "
"following the instructions listed at "
"*https://admin-docs.metaflow.org/meta"
"flow-on-aws/deployment-guide/manual-d"
"eployment#scheduling* and "
"re-configure Metaflow using "
"*metaflow configure aws* on your "
"terminal."
)
if log_execution_history:
if SFN_EXECUTION_LOG_GROUP_ARN is None:
raise StepFunctionsException(
"No AWS CloudWatch Logs log "
"group ARN found for emitting "
"state machine execution logs for "
"your workflow. You can set it in "
"your environment by using the "
"METAFLOW_SFN_EXECUTION_LOG_GROUP_ARN "
"environment variable."
)
try:
self._state_machine_arn = self._client.push(
name=self.name,
definition=self.to_json(),
role_arn=SFN_IAM_ROLE,
log_execution_history=log_execution_history,
)
except Exception as e:
raise StepFunctionsException(repr(e))
def schedule(self):
if EVENTS_SFN_ACCESS_IAM_ROLE is None:
raise StepFunctionsSchedulingException(
"No IAM role found for AWS "
"Events Bridge. You can "
"create one following the "
"instructions listed at "
"*https://admin-docs.metaflo"
"w.org/metaflow-on-aws/deplo"
"yment-guide/manual-deployme"
"nt#scheduling* and "
"re-configure Metaflow "
"using *metaflow configure "
"aws* on your terminal."
)
try:
self.event_bridge_rule = (
EventBridgeClient(self.name)
.cron(self._cron)
.role_arn(EVENTS_SFN_ACCESS_IAM_ROLE)
.state_machine_arn(self._state_machine_arn)
.schedule()
)
except Exception as e:
raise StepFunctionsSchedulingException(repr(e))
@classmethod
def trigger(cls, name, parameters):
try:
state_machine = StepFunctionsClient().get(name)
except Exception as e:
raise StepFunctionsException(repr(e))
if state_machine is None:
raise StepFunctionsException(
"The workflow *%s* doesn't exist "
"on AWS Step Functions. Please "
"deploy your flow first." % name
)
# Dump parameters into `Parameters` input field.
input = json.dumps({"Parameters": json.dumps(parameters)})
# AWS Step Functions limits input to be 32KiB, but AWS Batch
# has it's own limitation of 30KiB for job specification length.
if len(input) > 20480:
raise StepFunctionsException(
"Length of parameter names and "
"values shouldn't exceed 20480 as "
"imposed by AWS Step Functions."
)
try:
state_machine_arn = state_machine.get("stateMachineArn")
return StepFunctionsClient().trigger(state_machine_arn, input)
except Exception as e:
raise StepFunctionsException(repr(e))
@classmethod
def list(cls, name, states):
try:
state_machine = StepFunctionsClient().get(name)
except Exception as e:
raise StepFunctionsException(repr(e))
if state_machine is None:
raise StepFunctionsException(
"The workflow *%s* doesn't exist " "on AWS Step Functions." % name
)
try:
state_machine_arn = state_machine.get("stateMachineArn")
return StepFunctionsClient().list_executions(state_machine_arn, states)
except Exception as e:
raise StepFunctionsException(repr(e))
@classmethod
def get_existing_deployment(cls, name):
workflow = StepFunctionsClient().get(name)
if workflow is not None:
try:
start = json.loads(workflow["definition"])["States"]["start"]
parameters = start["Parameters"]["Parameters"]
return parameters.get("metaflow.owner"), parameters.get(
"metaflow.production_token"
)
except KeyError as e:
raise StepFunctionsException(
"An existing non-metaflow "
"workflow with the same name as "
"*%s* already exists in AWS Step "
"Functions. Please modify the "
"name of this flow or delete your "
"existing workflow on AWS Step "
"Functions." % name
)
return None
def _compile(self):
def _visit(node, workflow, exit_node=None):
# from a blessed few which are set as `Parameters` for the Map
# state. That's why even though `JobId` refers to the parent task
state = (
State(node.name)
.batch(self._batch(node))
.output_path(
"$.['JobId', " "'Parameters', " "'Index', " "'SplitParentTaskId']"
)
)
# End the (sub)workflow if we have reached the end of the flow or
# the parent step of matching_join of the sub workflow.
if node.type == "end" or exit_node in node.out_funcs:
workflow.add_state(state.end())
# Continue linear assignment within the (sub)workflow if the node
# doesn't branch or fork.
elif node.type in ("linear", "join"):
workflow.add_state(state.next(node.out_funcs[0]))
_visit(self.graph[node.out_funcs[0]], workflow, exit_node)
elif node.type == "split-and":
branch_name = hashlib.sha224(
"&".join(node.out_funcs).encode("utf-8")
).hexdigest()
workflow.add_state(state.next(branch_name))
branch = Parallel(branch_name).next(node.matching_join)
for n in node.out_funcs:
branch.branch(
_visit(
self.graph[n], Workflow(n).start_at(n), node.matching_join
)
)
workflow.add_state(branch)
_visit(self.graph[node.matching_join], workflow, exit_node)
elif node.type == "foreach":
cardinality_state_name = "#%s" % node.out_funcs[0]
workflow.add_state(state.next(cardinality_state_name))
cardinality_state = (
State(cardinality_state_name)
.dynamo_db(SFN_DYNAMO_DB_TABLE, "$.JobId", "for_each_cardinality")
.result_path("$.Result")
)
iterator_name = "*%s" % node.out_funcs[0]
workflow.add_state(cardinality_state.next(iterator_name))
workflow.add_state(
Map(iterator_name)
.items_path("$.Result.Item.for_each_cardinality.NS")
.parameter("JobId.$", "$.JobId")
.parameter("SplitParentTaskId.$", "$.JobId")
.parameter("Parameters.$", "$.Parameters")
.parameter("Index.$", "$$.Map.Item.Value")
.next(node.matching_join)
.iterator(
_visit(
self.graph[node.out_funcs[0]],
Workflow(node.out_funcs[0]).start_at(node.out_funcs[0]),
node.matching_join,
)
)
.max_concurrency(self.max_workers)
.output_path("$.[0]")
)
_visit(self.graph[node.matching_join], workflow, exit_node)
else:
raise StepFunctionsException(
"Node type *%s* for step *%s* "
"is not currently supported by "
"AWS Step Functions." % (node.type, node.name)
)
return workflow
workflow = Workflow(self.name).start_at("start")
if self.workflow_timeout:
workflow.timeout_seconds(self.workflow_timeout)
return _visit(self.graph["start"], workflow)
def _cron(self):
schedule = self.flow._flow_decorators.get("schedule")
if schedule:
return schedule.schedule
return None
def _process_parameters(self):
parameters = []
has_schedule = self._cron() is not None
seen = set()
for var, param in self.flow._get_parameters():
# Throw an exception if the parameter is specified twice.
norm = param.name.lower()
if norm in seen:
raise MetaflowException(
"Parameter *%s* is specified twice. "
"Note that parameter names are "
"case-insensitive." % param.name
)
seen.add(norm)
is_required = param.kwargs.get("required", False)
# Throw an exception if a schedule is set for a flow with required
# parameters with no defaults. We currently don't have any notion
if "default" not in param.kwargs and is_required and has_schedule:
raise MetaflowException(
"The parameter *%s* does not have a "
"default and is required. Scheduling "
"such parameters via AWS Event Bridge "
"is not currently supported." % param.name
)
value = deploy_time_eval(param.kwargs.get("default"))
parameters.append(dict(name=param.name, value=value))
return parameters
def _batch(self, node):
attrs = {
"metaflow.user": "SFN",
"metaflow.owner": self.username,
"metaflow.flow_name": self.flow.name,
"metaflow.step_name": node.name,
"metaflow.run_id.$": "$$.Execution.Name",
# Functions lacks any notion of run-scoped task identifiers. We
# instead co-opt the AWS Batch job id as the task id. This also
# means that the AWS Batch job name will have missing fields since
# the job id is determined at job execution, but since the job id is
# part of the job description payload, we don't lose much except for
# `$$.State.RetryCount` resolves to an int dynamically and
# AWS Batch job specification only accepts strings. We handle
# retries/catch within AWS Batch to get around this limitation.
"metaflow.version": self.environment.get_environment_info()[
"metaflow_version"
],
# We rely on step names and task ids of parent steps to construct
# input paths for a task. Since the only information we can pass
# between states (via `InputPath` and `ResultPath`) in AWS Step
# Functions is the job description, we run the risk of exceeding
# 32K state size limit rather quickly if we don't filter the job
# have to add the field again.
# This pattern is repeated in a lot of other places, where we use
# AWS Batch parameters to store AWS Step Functions state
# information, since this field is the only field in the AWS Batch
# specification that allows us to set key-values.
"step_name": node.name,
}
# Store production token within the `start` step, so that subsequent
# `step-functions create` calls can perform a rudimentary authorization
# check.
if node.name == "start":
attrs["metaflow.production_token"] = self.production_token
# Add env vars from the optional @environment decorator.
env_deco = [deco for deco in node.decorators if deco.name == "environment"]
env = {}
if env_deco:
env = env_deco[0].attributes["vars"]
if node.name == "start":
# Initialize parameters for the flow in the `start` step.
parameters = self._process_parameters()
if parameters:
# Get user-defined parameters from State Machine Input.
# Since AWS Step Functions doesn't allow for optional inputs
env["METAFLOW_PARAMETERS"] = "$.Parameters"
default_parameters = {}
for parameter in parameters:
if parameter["value"] is not None:
default_parameters[parameter["name"]] = parameter["value"]
env["METAFLOW_DEFAULT_PARAMETERS"] = json.dumps(default_parameters)
input_paths = None
else:
if (
node.type == "join"
and self.graph[node.split_parents[-1]].type == "foreach"
):
input_paths = (
"sfn-${METAFLOW_RUN_ID}/%s/:"
"${METAFLOW_PARENT_TASK_IDS}" % node.in_funcs[0]
)
env["METAFLOW_SPLIT_PARENT_TASK_ID"] = (
"$.Parameters.split_parent_task_id_%s" % node.split_parents[-1]
)
else:
if len(node.in_funcs) == 1:
input_paths = (
"sfn-${METAFLOW_RUN_ID}/%s/${METAFLOW_PARENT_TASK_ID}"
% node.in_funcs[0]
)
env["METAFLOW_PARENT_TASK_ID"] = "$.JobId"
else:
input_paths = "sfn-${METAFLOW_RUN_ID}:" + ",".join(
"/${METAFLOW_PARENT_%s_STEP}/"
"${METAFLOW_PARENT_%s_TASK_ID}" % (idx, idx)
for idx, _ in enumerate(node.in_funcs)
)
for idx, _ in enumerate(node.in_funcs):
env["METAFLOW_PARENT_%s_TASK_ID" % idx] = "$.[%s].JobId" % idx
env["METAFLOW_PARENT_%s_STEP" % idx] = (
"$.[%s].Parameters.step_name" % idx
)
env["METAFLOW_INPUT_PATHS"] = input_paths
if node.is_inside_foreach:
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
attrs[
"split_parent_task_id_%s.$" % node.split_parents[-1]
] = "$.SplitParentTaskId"
for parent in node.split_parents[:-1]:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.Parameters.split_parent_task_id_%s" % parent
)
elif node.type == "join":
if self.graph[node.split_parents[-1]].type == "foreach":
# that information is available to us from AWS DynamoDB.
# This has a nice side-effect of making our foreach
# splits infinitely scalable because otherwise we would
# be bounded by the 32K state limit for the outputs. So,
# instead of referencing `Parameters` fields by index
# (like in `split-and`), we can just reference them
# directly.
attrs["split_parent_task_id_%s.$" % node.split_parents[-1]] = (
"$.Parameters.split_parent_task_id_%s"
% node.split_parents[-1]
)
for parent in node.split_parents[:-1]:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.Parameters.split_parent_task_id_%s" % parent
)
else:
for parent in node.split_parents:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.[0].Parameters.split_parent_task_id_%s" % parent
)
else:
for parent in node.split_parents:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.Parameters.split_parent_task_id_%s" % parent
)
# Set `METAFLOW_SPLIT_PARENT_TASK_ID_FOR_FOREACH_JOIN` if the
# next transition is to a foreach join, so that the
# stepfunctions decorator can write the mapping for input path
# to DynamoDb.
if any(
self.graph[n].type == "join"
and self.graph[self.graph[n].split_parents[-1]].type == "foreach"
for n in node.out_funcs
):
env["METAFLOW_SPLIT_PARENT_TASK_ID_FOR_FOREACH_JOIN"] = attrs[
"split_parent_task_id_%s.$"
% self.graph[node.out_funcs[0]].split_parents[-1]
]
# Set ttl for the values we set in AWS DynamoDB.
if node.type == "foreach":
if self.workflow_timeout:
env["METAFLOW_SFN_WORKFLOW_TIMEOUT"] = self.workflow_timeout
# Handle split index for for-each.
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
env["METAFLOW_SPLIT_INDEX"] = "$.Index"
env["METAFLOW_CODE_URL"] = self.code_package_url
env["METAFLOW_FLOW_NAME"] = attrs["metaflow.flow_name"]
env["METAFLOW_STEP_NAME"] = attrs["metaflow.step_name"]
env["METAFLOW_RUN_ID"] = attrs["metaflow.run_id.$"]
env["METAFLOW_PRODUCTION_TOKEN"] = self.production_token
env["SFN_STATE_MACHINE"] = self.name
env["METAFLOW_OWNER"] = attrs["metaflow.owner"]
# Can't set `METAFLOW_TASK_ID` due to lack of run-scoped identifiers.
metadata_env = self.metadata.get_runtime_environment("step-functions")
env.update(metadata_env)
metaflow_version = self.environment.get_environment_info()
metaflow_version["flow_name"] = self.graph.name
metaflow_version["production_token"] = self.production_token
env["METAFLOW_VERSION"] = json.dumps(metaflow_version)
# Set AWS DynamoDb Table Name for state tracking for for-eaches.
# There are three instances when metaflow runtime directly interacts
# with AWS DynamoDB.
# 1. To set the cardinality of foreaches (which are subsequently)
# read prior to the instantiation of the Map state by AWS Step
# Functions.
# 2. To set the input paths from the parent steps of a foreach join.
# 3. To read the input paths in a foreach join.
if (
node.type == "foreach"
or (
node.is_inside_foreach
and any(
self.graph[n].type == "join"
and self.graph[self.graph[n].split_parents[-1]].type == "foreach"
for n in node.out_funcs
)
)
or (
node.type == "join"
and self.graph[node.split_parents[-1]].type == "foreach"
)
):
if SFN_DYNAMO_DB_TABLE is None:
raise StepFunctionsException(
"An AWS DynamoDB table is needed "
"to support foreach in your flow. "
"You can create one following the "
"instructions listed at *https://a"
"dmin-docs.metaflow.org/metaflow-o"
"n-aws/deployment-guide/manual-dep"
"loyment#scheduling* and "
"re-configure Metaflow using "
"*metaflow configure aws* on your "
"terminal."
)
env["METAFLOW_SFN_DYNAMO_DB_TABLE"] = SFN_DYNAMO_DB_TABLE
# Resolve AWS Batch resource requirements.
batch_deco = [deco for deco in node.decorators if deco.name == "batch"][0]
resources = batch_deco.attributes
# Resolve retry strategy.
user_code_retries, total_retries = self._get_retries(node)
task_spec = {
"flow_name": attrs["metaflow.flow_name"],
"step_name": attrs["metaflow.step_name"],
"run_id": "sfn-$METAFLOW_RUN_ID",
# Use AWS Batch job identifier as the globally unique
# task identifier.
"task_id": "$AWS_BATCH_JOB_ID",
# Since retries are handled by AWS Batch, we can rely on
# AWS_BATCH_JOB_ATTEMPT as the job counter.
"retry_count": "$((AWS_BATCH_JOB_ATTEMPT-1))",
}
return (
Batch(self.metadata, self.environment)
.create_job(
step_name=node.name,
step_cli=self._step_cli(
node, input_paths, self.code_package_url, user_code_retries
),
task_spec=task_spec,
code_package_sha=self.code_package_sha,
code_package_url=self.code_package_url,
code_package_ds=self.flow_datastore.TYPE,
image=resources["image"],
queue=resources["queue"],
iam_role=resources["iam_role"],
execution_role=resources["execution_role"],
cpu=resources["cpu"],
gpu=resources["gpu"],
memory=resources["memory"],
run_time_limit=batch_deco.run_time_limit,
shared_memory=resources["shared_memory"],
max_swap=resources["max_swap"],
swappiness=resources["swappiness"],
env=env,
attrs=attrs,
host_volumes=resources["host_volumes"],
)
.attempts(total_retries + 1)
)
def _get_retries(self, node):
max_user_code_retries = 0
max_error_retries = 0
# Different decorators may have different retrying strategies, so take
# the max of them.
for deco in node.decorators:
user_code_retries, error_retries = deco.step_task_retry_count()
max_user_code_retries = max(max_user_code_retries, user_code_retries)
max_error_retries = max(max_error_retries, error_retries)
return max_user_code_retries, max_user_code_retries + max_error_retries
def _step_cli(self, node, paths, code_package_url, user_code_retries):
cmds = []
script_name = os.path.basename(sys.argv[0])
executable = self.environment.executable(node.name)
if R.use_r():
entrypoint = [R.entrypoint()]
else:
entrypoint = [executable, script_name]
# Use AWS Batch job identifier as the globally unique task identifier.
task_id = "${AWS_BATCH_JOB_ID}"
# FlowDecorators can define their own top-level options. They are
# responsible for adding their own top-level options and values through
# the get_top_level_options() hook. See similar logic in runtime.py.
top_opts_dict = {}
for deco in flow_decorators():
top_opts_dict.update(deco.get_top_level_options())
top_opts = list(dict_to_cli_options(top_opts_dict))
if node.name == "start":
# We need a separate unique ID for the special _parameters task
task_id_params = "%s-params" % task_id
# Export user-defined parameters into runtime environment
param_file = "".join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
export_params = (
"python -m "
"metaflow.plugins.aws.step_functions.set_batch_environment "
"parameters %s && . `pwd`/%s" % (param_file, param_file)
)
params = (
entrypoint
+ top_opts
+ [
"--quiet",
"--metadata=%s" % self.metadata.TYPE,
"--environment=%s" % self.environment.TYPE,
"--datastore=s3",
"--event-logger=%s" % self.event_logger.logger_type,
"--monitor=%s" % self.monitor.monitor_type,
"--no-pylint",
"init",
"--run-id sfn-$METAFLOW_RUN_ID",
"--task-id %s" % task_id_params,
]
)
# Assign tags to run objects.
if self.tags:
params.extend("--tag %s" % tag for tag in self.tags)
# If the start step gets retried, we must be careful not to
# regenerate multiple parameters tasks. Hence we check first if
# _parameters exists already.
exists = entrypoint + [
"dump",
"--max-value-size=0",
"sfn-${METAFLOW_RUN_ID}/_parameters/%s" % (task_id_params),
]
cmd = "if ! %s >/dev/null 2>/dev/null; then %s && %s; fi" % (
" ".join(exists),
export_params,
" ".join(params),
)
cmds.append(cmd)
paths = "sfn-${METAFLOW_RUN_ID}/_parameters/%s" % (task_id_params)
if node.type == "join" and self.graph[node.split_parents[-1]].type == "foreach":
parent_tasks_file = "".join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
export_parent_tasks = (
"python -m "
"metaflow.plugins.aws.step_functions.set_batch_environment "
"parent_tasks %s && . `pwd`/%s" % (parent_tasks_file, parent_tasks_file)
)
cmds.append(export_parent_tasks)
top_level = top_opts + [
"--quiet",
"--metadata=%s" % self.metadata.TYPE,
"--environment=%s" % self.environment.TYPE,
"--datastore=%s" % self.flow_datastore.TYPE,
"--datastore-root=%s" % self.flow_datastore.datastore_root,
"--event-logger=%s" % self.event_logger.logger_type,
"--monitor=%s" % self.monitor.monitor_type,
"--no-pylint",
"--with=step_functions_internal",
]
step = [
"step",
node.name,
"--run-id sfn-$METAFLOW_RUN_ID",
"--task-id %s" % task_id,
# Since retries are handled by AWS Batch, we can rely on
# AWS_BATCH_JOB_ATTEMPT as the job counter.
"--retry-count $((AWS_BATCH_JOB_ATTEMPT-1))",
"--max-user-code-retries %d" % user_code_retries,
"--input-paths %s" % paths,
# Set decorator to batch to execute `task_*` hooks for batch
# decorator.
"--with=batch",
]
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
# We set the `METAFLOW_SPLIT_INDEX` through JSONPath-foo
# to pass the state from the parent DynamoDb state for for-each.
step.append("--split-index $METAFLOW_SPLIT_INDEX")
if self.tags:
step.extend("--tag %s" % tag for tag in self.tags)
if self.namespace is not None:
step.append("--namespace=%s" % self.namespace)
cmds.append(" ".join(entrypoint + top_level + step))
return " && ".join(cmds)
class Workflow(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
def start_at(self, start_at):
self.payload["StartAt"] = start_at
return self
def add_state(self, state):
self.payload["States"][state.name] = state.payload
return self
def timeout_seconds(self, timeout_seconds):
self.payload["TimeoutSeconds"] = timeout_seconds
return self
def to_json(self, pretty=False):
return json.dumps(self.payload, indent=4 if pretty else None)
class State(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["Type"] = "Task"
def resource(self, resource):
self.payload["Resource"] = resource
return self
def next(self, state):
self.payload["Next"] = state
return self
def end(self):
self.payload["End"] = True
return self
def parameter(self, name, value):
self.payload["Parameters"][name] = value
return self
def output_path(self, output_path):
self.payload["OutputPath"] = output_path
return self
def result_path(self, result_path):
self.payload["ResultPath"] = result_path
return self
def _partition(self):
# This is needed to support AWS Gov Cloud and AWS CN regions
return SFN_IAM_ROLE.split(":")[1]
def batch(self, job):
self.resource(
"arn:%s:states:::batch:submitJob.sync" % self._partition()
).parameter("JobDefinition", job.payload["jobDefinition"]).parameter(
"JobName", job.payload["jobName"]
).parameter(
"JobQueue", job.payload["jobQueue"]
).parameter(
"Parameters", job.payload["parameters"]
).parameter(
"ContainerOverrides", to_pascalcase(job.payload["containerOverrides"])
).parameter(
"RetryStrategy", to_pascalcase(job.payload["retryStrategy"])
).parameter(
"Timeout", to_pascalcase(job.payload["timeout"])
)
# tags may not be present in all scenarios
if "tags" in job.payload:
self.parameter("Tags", job.payload["tags"])
return self
def dynamo_db(self, table_name, primary_key, values):
self.resource("arn:%s:states:::dynamodb:getItem" % self._partition()).parameter(
"TableName", table_name
).parameter("Key", {"pathspec": {"S.$": primary_key}}).parameter(
"ConsistentRead", True
).parameter(
"ProjectionExpression", values
)
return self
class Parallel(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["Type"] = "Parallel"
def branch(self, workflow):
if "Branches" not in self.payload:
self.payload["Branches"] = []
self.payload["Branches"].append(workflow.payload)
return self
def next(self, state):
self.payload["Next"] = state
return self
def output_path(self, output_path):
self.payload["OutputPath"] = output_path
return self
def result_path(self, result_path):
self.payload["ResultPath"] = result_path
return self
class Map(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["Type"] = "Map"
self.payload["MaxConcurrency"] = 0
def iterator(self, workflow):
self.payload["Iterator"] = workflow.payload
return self
def next(self, state):
self.payload["Next"] = state
return self
def items_path(self, items_path):
self.payload["ItemsPath"] = items_path
return self
def parameter(self, name, value):
self.payload["Parameters"][name] = value
return self
def max_concurrency(self, max_concurrency):
self.payload["MaxConcurrency"] = max_concurrency
return self
def output_path(self, output_path):
self.payload["OutputPath"] = output_path
return self
def result_path(self, result_path):
self.payload["ResultPath"] = result_path
return self
| true | true |
f71b5640c381e4a1a513cc6857ecd00c92aa7029 | 279 | py | Python | circle_core/writer/base.py | glucoseinc/CircleCore | 577f814ce2944efb6e5997f3d7838c71ce9aea6a | [
"MIT"
] | 3 | 2019-01-11T04:30:18.000Z | 2019-01-11T04:31:18.000Z | circle_core/writer/base.py | glucoseinc/CircleCore | 577f814ce2944efb6e5997f3d7838c71ce9aea6a | [
"MIT"
] | 16 | 2018-11-21T11:47:18.000Z | 2021-09-01T03:52:35.000Z | circle_core/writer/base.py | glucoseinc/CircleCore | 577f814ce2944efb6e5997f3d7838c71ce9aea6a | [
"MIT"
] | null | null | null | import abc
class DBWriter(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def store(self, message_box, message) -> bool:
raise NotImplementedError
@abc.abstractmethod
async def flush(self, flush_all=False) -> None:
raise NotImplementedError
| 21.461538 | 56 | 0.706093 | import abc
class DBWriter(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def store(self, message_box, message) -> bool:
raise NotImplementedError
@abc.abstractmethod
async def flush(self, flush_all=False) -> None:
raise NotImplementedError
| true | true |
f71b56535865b78456cbc8ac2192f63ee5287cfd | 588 | py | Python | src/UI_Code_Q2/Test_code_smaller_parts/PythonWriteUSB.py | KevinEwoudLee/HU3-UI | 16d63e0be8c515540daf4f9cfcff2d0a85c1cbab | [
"MIT"
] | 1 | 2019-12-11T15:27:53.000Z | 2019-12-11T15:27:53.000Z | src/UI_Code_Q2/Test_code_smaller_parts/PythonWriteUSB.py | KevinEwoudLee/HU3-UI | 16d63e0be8c515540daf4f9cfcff2d0a85c1cbab | [
"MIT"
] | null | null | null | src/UI_Code_Q2/Test_code_smaller_parts/PythonWriteUSB.py | KevinEwoudLee/HU3-UI | 16d63e0be8c515540daf4f9cfcff2d0a85c1cbab | [
"MIT"
] | 1 | 2019-12-11T15:23:56.000Z | 2019-12-11T15:23:56.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 26 09:36:21 2019
@author: kevin
"""
import os
import time
from time import sleep
from datetime import datetime
file = open("E:/test2.csv", "a")
i=0
if os.stat("E:/test2.csv").st_size == 0:
file.write("Time,Sensor1,Sensor2,Sensor3,Sensor4,Sensor5\n")
while True:
i=i+1
now = datetime.now()
file.write(str(now)+","+str(i)+","+str(-i)+","+str(i-10)+","+str(i+5)+","+str(i*i)+"\n")
file.flush()
time.sleep(1)
if (i>=10):
break
file.close()
| 22.615385 | 97 | 0.52551 |
import os
import time
from time import sleep
from datetime import datetime
file = open("E:/test2.csv", "a")
i=0
if os.stat("E:/test2.csv").st_size == 0:
file.write("Time,Sensor1,Sensor2,Sensor3,Sensor4,Sensor5\n")
while True:
i=i+1
now = datetime.now()
file.write(str(now)+","+str(i)+","+str(-i)+","+str(i-10)+","+str(i+5)+","+str(i*i)+"\n")
file.flush()
time.sleep(1)
if (i>=10):
break
file.close()
| true | true |
f71b56ae1d0e79e35e8bd9e7c4c05e6ff33f45bf | 3,286 | py | Python | src/util/pos_util.py | tiefenauer/ip7-python | 512105ba39110ec77d2ea0961dd7c2a42d4ec26d | [
"MIT"
] | null | null | null | src/util/pos_util.py | tiefenauer/ip7-python | 512105ba39110ec77d2ea0961dd7c2a42d4ec26d | [
"MIT"
] | null | null | null | src/util/pos_util.py | tiefenauer/ip7-python | 512105ba39110ec77d2ea0961dd7c2a42d4ec26d | [
"MIT"
] | null | null | null | import collections
from src.importer.known_jobs import KnownJobs
from src.preprocessing import preproc
from src.util import loe_util, jobtitle_util
mw_tokens = ['m/w', 'w/m', 'm/f', 'f/m',
'M/W', 'W/M', 'M/F', 'F/M']
def find_jobs(sentence):
jobs = []
# find known jobs
for hit in find_job_by_keyword(sentence, KnownJobs()):
jobs.append((hit, 'known-job'))
# find by m/w patterns
sentence_without_percentage = loe_util.remove_percentage(sentence)
for hit in find_job_by_keyword(sentence_without_percentage, mw_tokens):
jobs.append((hit, 'mw'))
# find by percentages
sentence_without_mw = jobtitle_util.remove_mw(sentence)
for hit in find_job_by_keyword(sentence_without_mw, loe_util.find_all_loe(sentence_without_mw)):
jobs.append((hit, 'loe'))
# find by gender forms
# sentence_without_mw_and_percentage = loe_util.remove_percentage(sentence_without_mw)
# jobs += find_job_by_keyword(sentence_without_mw_and_percentage, ['/in', '/-in'])
# search by keyword: gender
# for match in jobtitle_util.find_all_genderized(sentence):
# gender_job = expand_left_right(sentence.split(match[0])[0], sentence)
# if gender_job:
# yield gender_job
return jobs
def find_job_by_keyword(sentence, keywords):
# job_names = []
for keyword in keywords:
if keyword in sentence:
job_name = expand_left_right(keyword, sentence)
if job_name:
yield job_name
# job_names.append(job_name)
# return job_names
def expand_left_right(token, sentence):
if token not in sentence:
return None
job_name_tokens = preproc.to_words(token)
sentence_tokens = [word for word in preproc.to_words(sentence) if word not in ['(', ')']]
ix_from, ix_to = calculate_positions(job_name_tokens, sentence_tokens)
sentence_pos = preproc.pos_tag(sentence_tokens)
left = sentence_pos[:ix_from]
right = sentence_pos[ix_to:]
initial_content = [token] if token not in mw_tokens and not loe_util.is_percentate(token) else []
tokens = collections.deque(initial_content)
search_left(left, tokens)
search_right(right, tokens)
return ' '.join(tokens)
def search_left(pos_tagged_words, tokens=collections.deque()):
i = len(pos_tagged_words) - 1
while 0 <= i:
word, pos_tag = pos_tagged_words[i]
if is_part_of_name(word, pos_tag):
tokens.appendleft(word)
else:
break
i -= 1
return tokens
def search_right(pos_tagged_words, tokens=collections.deque()):
i = 0
while 0 <= i < len(pos_tagged_words):
word, pos_tag = pos_tagged_words[i]
if is_part_of_name(word, pos_tag):
tokens.append(word)
else:
break
i += 1
return tokens
def is_part_of_name(word, pos_tag):
return is_noun(pos_tag) or word in ['/']
def is_noun(pos_tag):
return pos_tag[0] in ['N', 'F']
def is_punctuation(pos_tag):
return pos_tag.startswith('$')
def calculate_positions(job_name_tokens, sentence_tokens):
ix_from = [i for i, word in enumerate(sentence_tokens) if job_name_tokens[0] in word][0]
ix_to = ix_from + len(job_name_tokens)
return ix_from, ix_to
| 30.71028 | 101 | 0.676811 | import collections
from src.importer.known_jobs import KnownJobs
from src.preprocessing import preproc
from src.util import loe_util, jobtitle_util
mw_tokens = ['m/w', 'w/m', 'm/f', 'f/m',
'M/W', 'W/M', 'M/F', 'F/M']
def find_jobs(sentence):
jobs = []
for hit in find_job_by_keyword(sentence, KnownJobs()):
jobs.append((hit, 'known-job'))
sentence_without_percentage = loe_util.remove_percentage(sentence)
for hit in find_job_by_keyword(sentence_without_percentage, mw_tokens):
jobs.append((hit, 'mw'))
sentence_without_mw = jobtitle_util.remove_mw(sentence)
for hit in find_job_by_keyword(sentence_without_mw, loe_util.find_all_loe(sentence_without_mw)):
jobs.append((hit, 'loe'))
return jobs
def find_job_by_keyword(sentence, keywords):
for keyword in keywords:
if keyword in sentence:
job_name = expand_left_right(keyword, sentence)
if job_name:
yield job_name
def expand_left_right(token, sentence):
if token not in sentence:
return None
job_name_tokens = preproc.to_words(token)
sentence_tokens = [word for word in preproc.to_words(sentence) if word not in ['(', ')']]
ix_from, ix_to = calculate_positions(job_name_tokens, sentence_tokens)
sentence_pos = preproc.pos_tag(sentence_tokens)
left = sentence_pos[:ix_from]
right = sentence_pos[ix_to:]
initial_content = [token] if token not in mw_tokens and not loe_util.is_percentate(token) else []
tokens = collections.deque(initial_content)
search_left(left, tokens)
search_right(right, tokens)
return ' '.join(tokens)
def search_left(pos_tagged_words, tokens=collections.deque()):
i = len(pos_tagged_words) - 1
while 0 <= i:
word, pos_tag = pos_tagged_words[i]
if is_part_of_name(word, pos_tag):
tokens.appendleft(word)
else:
break
i -= 1
return tokens
def search_right(pos_tagged_words, tokens=collections.deque()):
i = 0
while 0 <= i < len(pos_tagged_words):
word, pos_tag = pos_tagged_words[i]
if is_part_of_name(word, pos_tag):
tokens.append(word)
else:
break
i += 1
return tokens
def is_part_of_name(word, pos_tag):
return is_noun(pos_tag) or word in ['/']
def is_noun(pos_tag):
return pos_tag[0] in ['N', 'F']
def is_punctuation(pos_tag):
return pos_tag.startswith('$')
def calculate_positions(job_name_tokens, sentence_tokens):
ix_from = [i for i, word in enumerate(sentence_tokens) if job_name_tokens[0] in word][0]
ix_to = ix_from + len(job_name_tokens)
return ix_from, ix_to
| true | true |
f71b56c4085612ca2aacc209126330286fb3b4f9 | 2,980 | py | Python | webvep/main/settings.py | IanVermes/vep_api | 9d9d31eddd969aad1c462278ea1f1fb09153e054 | [
"MIT"
] | null | null | null | webvep/main/settings.py | IanVermes/vep_api | 9d9d31eddd969aad1c462278ea1f1fb09153e054 | [
"MIT"
] | 1 | 2020-03-30T10:52:58.000Z | 2020-03-30T16:46:31.000Z | webvep/main/settings.py | IanVermes/vep_api | 9d9d31eddd969aad1c462278ea1f1fb09153e054 | [
"MIT"
] | null | null | null | """
Django settings for webvep project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "ec)mzu6ls4qaj!8)txrke(uxxtb1gmz^2a_^1$lqe9&ys17^!$"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"webvep_frontend",
"webvep_api",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "main.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "main.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
## DONT NEED ONE
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
| 26.607143 | 91 | 0.711409 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = "ec)mzu6ls4qaj!8)txrke(uxxtb1gmz^2a_^1$lqe9&ys17^!$"
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"webvep_frontend",
"webvep_api",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "main.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "main.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
## DONT NEED ONE
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
| true | true |
f71b57614025c4aecf90a7ea7cf4fe3d2b9c8499 | 77 | py | Python | zmon_aws_agent/__main__.py | jrake-revelant/zmon-aws-agent | 67ed3d0230f3bec9b3a3950c3eefa48404ee9d6b | [
"Apache-2.0"
] | 9 | 2016-07-28T09:28:20.000Z | 2022-02-27T23:40:09.000Z | zmon_aws_agent/__main__.py | jrake-revelant/zmon-aws-agent | 67ed3d0230f3bec9b3a3950c3eefa48404ee9d6b | [
"Apache-2.0"
] | 146 | 2016-05-23T13:43:42.000Z | 2020-03-10T09:47:07.000Z | zmon_aws_agent/__main__.py | jrake-revelant/zmon-aws-agent | 67ed3d0230f3bec9b3a3950c3eefa48404ee9d6b | [
"Apache-2.0"
] | 11 | 2017-02-18T12:46:27.000Z | 2020-01-22T13:13:36.000Z | from zmon_aws_agent.main import main
if __name__ == '__main__':
main()
| 12.833333 | 36 | 0.701299 | from zmon_aws_agent.main import main
if __name__ == '__main__':
main()
| true | true |
f71b584d40b3c272f7646b95950ee740aeb0fc1c | 5,834 | py | Python | tests/model_connectors/test_spawn_ets.py | macintoshpie/geojson-modelica-translator | 5ed02d53f06961b8d0f3705343368e4c920e7d7d | [
"BSD-3-Clause"
] | null | null | null | tests/model_connectors/test_spawn_ets.py | macintoshpie/geojson-modelica-translator | 5ed02d53f06961b8d0f3705343368e4c920e7d7d | [
"BSD-3-Clause"
] | null | null | null | tests/model_connectors/test_spawn_ets.py | macintoshpie/geojson-modelica-translator | 5ed02d53f06961b8d0f3705343368e4c920e7d7d | [
"BSD-3-Clause"
] | null | null | null | """
****************************************************************************************************
:copyright (c) 2019-2020 URBANopt, Alliance for Sustainable Energy, LLC, and other contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the copyright holder nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
****************************************************************************************************
"""
import os
import shutil
import unittest
from pathlib import Path
from geojson_modelica_translator.geojson_modelica_translator import (
GeoJsonModelicaTranslator
)
from geojson_modelica_translator.model_connectors.spawnBui_ETS_Coupling import (
SpawnConnectorETS
)
from geojson_modelica_translator.modelica.modelica_runner import ModelicaRunner
from geojson_modelica_translator.system_parameters.system_parameters import (
SystemParameters
)
class SpawnModelConnectorSingleBuildingTest(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.output_dir = os.path.join(os.path.dirname(__file__), 'output')
project_name = "spawn_single"
if os.path.exists(os.path.join(self.output_dir, project_name)):
shutil.rmtree(os.path.join(self.output_dir, project_name))
# load in the example geojson with a single offie building
filename = os.path.join(self.data_dir, "spawn_geojson_ex1.json")
self.gj = GeoJsonModelicaTranslator.from_geojson(filename)
# use the GeoJson translator to scaffold out the directory
self.gj.scaffold_directory(self.output_dir, project_name)
# load system parameter data
filename = os.path.join(self.data_dir, "spawn_system_params_ex1.json")
sys_params = SystemParameters(filename)
# now test the spawn connector (independent of the larger geojson translator
self.spawn = SpawnConnectorETS(sys_params)
for b in self.gj.buildings:
self.spawn.add_building(b)
def test_spawn_init(self):
self.assertIsNotNone(self.spawn)
self.assertEqual(self.spawn.system_parameters.get_param("buildings.custom")[0]["load_model"], "Spawn")
def test_spawn_to_modelica_and_run(self):
self.spawn.to_modelica(self.gj.scaffold)
# make sure the model can run using the ModelicaRunner class
mr = ModelicaRunner()
file_to_run = os.path.abspath(
os.path.join(
self.gj.scaffold.loads_path.files_dir, 'B5a6b99ec37f4de7f94020090', 'CouplingETS_SpawnBuilding.mo'
)
)
run_path = Path(os.path.abspath(self.gj.scaffold.project_path)).parent
exitcode = mr.run_in_docker(file_to_run, run_path=run_path)
self.assertEqual(0, exitcode)
class SpawnModelConnectorTwoBuildingTest(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.output_dir = os.path.join(os.path.dirname(__file__), 'output')
project_name = "spawn_two_building"
if os.path.exists(os.path.join(self.output_dir, project_name)):
shutil.rmtree(os.path.join(self.output_dir, project_name))
# load in the example geojson with a single offie building
filename = os.path.join(self.data_dir, "spawn_geojson_ex2.json")
self.gj = GeoJsonModelicaTranslator.from_geojson(filename)
# use the GeoJson translator to scaffold out the directory
self.gj.scaffold_directory(self.output_dir, project_name)
# load system parameter data
filename = os.path.join(self.data_dir, "spawn_system_params_ex2.json")
sys_params = SystemParameters(filename)
# now test the spawn connector (independent of the larger geojson translator
self.spawn = SpawnConnectorETS(sys_params)
for b in self.gj.buildings:
self.spawn.add_building(b)
def test_spawn_to_modelica_and_run(self):
self.spawn.to_modelica(self.gj.scaffold)
# make sure the model can run using the ModelicaRunner class
mr = ModelicaRunner()
file_to_run = os.path.abspath(
os.path.join(
self.gj.scaffold.loads_path.files_dir, 'B5a6b99ec37f4de7f94021950', 'CouplingETS_SpawnBuilding.mo'
)
)
run_path = Path(os.path.abspath(self.gj.scaffold.project_path)).parent
exitcode = mr.run_in_docker(file_to_run, run_path=run_path)
self.assertEqual(0, exitcode)
| 44.876923 | 114 | 0.711176 |
import os
import shutil
import unittest
from pathlib import Path
from geojson_modelica_translator.geojson_modelica_translator import (
GeoJsonModelicaTranslator
)
from geojson_modelica_translator.model_connectors.spawnBui_ETS_Coupling import (
SpawnConnectorETS
)
from geojson_modelica_translator.modelica.modelica_runner import ModelicaRunner
from geojson_modelica_translator.system_parameters.system_parameters import (
SystemParameters
)
class SpawnModelConnectorSingleBuildingTest(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.output_dir = os.path.join(os.path.dirname(__file__), 'output')
project_name = "spawn_single"
if os.path.exists(os.path.join(self.output_dir, project_name)):
shutil.rmtree(os.path.join(self.output_dir, project_name))
filename = os.path.join(self.data_dir, "spawn_geojson_ex1.json")
self.gj = GeoJsonModelicaTranslator.from_geojson(filename)
self.gj.scaffold_directory(self.output_dir, project_name)
filename = os.path.join(self.data_dir, "spawn_system_params_ex1.json")
sys_params = SystemParameters(filename)
self.spawn = SpawnConnectorETS(sys_params)
for b in self.gj.buildings:
self.spawn.add_building(b)
def test_spawn_init(self):
self.assertIsNotNone(self.spawn)
self.assertEqual(self.spawn.system_parameters.get_param("buildings.custom")[0]["load_model"], "Spawn")
def test_spawn_to_modelica_and_run(self):
self.spawn.to_modelica(self.gj.scaffold)
mr = ModelicaRunner()
file_to_run = os.path.abspath(
os.path.join(
self.gj.scaffold.loads_path.files_dir, 'B5a6b99ec37f4de7f94020090', 'CouplingETS_SpawnBuilding.mo'
)
)
run_path = Path(os.path.abspath(self.gj.scaffold.project_path)).parent
exitcode = mr.run_in_docker(file_to_run, run_path=run_path)
self.assertEqual(0, exitcode)
class SpawnModelConnectorTwoBuildingTest(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.output_dir = os.path.join(os.path.dirname(__file__), 'output')
project_name = "spawn_two_building"
if os.path.exists(os.path.join(self.output_dir, project_name)):
shutil.rmtree(os.path.join(self.output_dir, project_name))
filename = os.path.join(self.data_dir, "spawn_geojson_ex2.json")
self.gj = GeoJsonModelicaTranslator.from_geojson(filename)
self.gj.scaffold_directory(self.output_dir, project_name)
filename = os.path.join(self.data_dir, "spawn_system_params_ex2.json")
sys_params = SystemParameters(filename)
self.spawn = SpawnConnectorETS(sys_params)
for b in self.gj.buildings:
self.spawn.add_building(b)
def test_spawn_to_modelica_and_run(self):
self.spawn.to_modelica(self.gj.scaffold)
mr = ModelicaRunner()
file_to_run = os.path.abspath(
os.path.join(
self.gj.scaffold.loads_path.files_dir, 'B5a6b99ec37f4de7f94021950', 'CouplingETS_SpawnBuilding.mo'
)
)
run_path = Path(os.path.abspath(self.gj.scaffold.project_path)).parent
exitcode = mr.run_in_docker(file_to_run, run_path=run_path)
self.assertEqual(0, exitcode)
| true | true |
f71b58bc6969e1f4e15e0b32876c55ba66d9757e | 18,715 | py | Python | OLD/losses.py | ivankreso/semseg | fcd2889a1e9e03c3d1a71d19d68d15ce25a5dc79 | [
"MIT"
] | 2 | 2017-11-17T06:55:44.000Z | 2019-06-11T13:07:05.000Z | OLD/losses.py | ivankreso/semseg | fcd2889a1e9e03c3d1a71d19d68d15ce25a5dc79 | [
"MIT"
] | null | null | null | OLD/losses.py | ivankreso/semseg | fcd2889a1e9e03c3d1a71d19d68d15ce25a5dc79 | [
"MIT"
] | null | null | null | import tensorflow as tf
import slim
FLAGS = tf.app.flags.FLAGS
def add_loss_summaries(total_loss):
"""Add summaries for losses in model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
#print(l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
#tf.scalar_summary(l.op.name + ' (raw)', l)
#tf.scalar_summary(l.op.name, loss_averages.average(l))
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
#tf.scalar_summary([l.op.name + ' (raw)'], l)
#tf.scalar_summary([l.op.name], loss_averages.average(l))
return loss_averages_op
def total_loss_sum(losses):
# Assemble all of the losses for the current tower only.
#losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
#print(losses)
# Calculate the total loss for the current tower.
#regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
#regularization_losses = tf.contrib.losses.get_regularization_losses()
regularization_losses = tf.losses.get_regularization_losses()
#total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
return total_loss
def cross_entropy_loss(logits, labels):
print('loss: cross-entropy')
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_pixels])
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
mask = labels < FLAGS.num_classes
idx = tf.where(mask)
# # labels = tf.reshape(labels, shape=[num_pixels])
# print(idx)
labels = tf.to_float(labels)
labels = tf.gather_nd(labels, idx)
# labels = tf.boolean_mask(labels, mask)
labels = tf.to_int32(labels)
logits = tf.gather_nd(logits, idx)
# logits = tf.boolean_mask(logits, mask)
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
# range_idx = tf.range(tf.shape(labels)[0], dtype=tf.int32)
# print(range_idx, labels)
# labels = tf.reshape(labels, shape=[-1,1])
# range_idx = tf.reshape(range_idx, shape=[-1,1])
# idx = tf.concat([range_idx, labels], axis=1)
# print(idx)
# probs = tf.nn.softmax(logits)
# probs = tf.gather_nd(probs, idx)
# print(probs)
# xent = tf.square(1 - probs) * xent
# # xent = tf.pow(1 - probs, 3) * xent
# # xent = (1 - probs) * xent
#num_labels = tf.to_float(tf.reduce_sum(num_labels))
#num_labels = tf.reduce_sum(tf.to_float(num_labels))
#class_hist = tf.Print(class_hist, [class_hist], 'hist = ', summarize=30)
#num_labels = tf.reduce_sum(onehot_labels)
#class_hist = tf.to_float(tf.reduce_sum(class_hist, axis=0))
##num_labels = tf.Print(num_labels, [num_labels, tf.reduce_sum(onehot_labels)], 'lab = ', summarize=30)
#class_weights = num_labels / (class_hist + 1)
##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)
## we need to append 0 here for ignore pixels
#class_weights = tf.concat([class_weights, [0]], axis=0)
##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)
#class_weights = tf.minimum(tf.to_float(max_weight), class_weights)
# class_weights = tf.ones([FLAGS.num_classes])
# class_weights = tf.concat([class_weights, [0]], axis=0)
# #class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)
# weights = tf.gather(class_weights, labels)
xent = tf.reduce_mean(xent)
return xent
def weighted_cross_entropy_loss(logits, labels, class_hist=None, max_weight=1):
print('loss: cross-entropy')
print('Using balanced loss with max weight = ', max_weight)
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_pixels])
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
#num_labels = tf.to_float(tf.reduce_sum(num_labels))
#num_labels = tf.reduce_sum(tf.to_float(num_labels))
#class_hist = tf.Print(class_hist, [class_hist], 'hist = ', summarize=30)
num_labels = tf.reduce_sum(onehot_labels)
#class_hist = tf.to_float(tf.reduce_sum(class_hist, axis=0))
##num_labels = tf.Print(num_labels, [num_labels, tf.reduce_sum(onehot_labels)], 'lab = ', summarize=30)
#class_weights = num_labels / (class_hist + 1)
##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)
## we need to append 0 here for ignore pixels
#class_weights = tf.concat([class_weights, [0]], axis=0)
##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)
#class_weights = tf.minimum(tf.to_float(max_weight), class_weights)
class_weights = tf.ones([FLAGS.num_classes])
class_weights = tf.concat([class_weights, [0]], axis=0)
#class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)
weights = tf.gather(class_weights, labels)
if max_weight > 1:
raise ValueError()
wgt_sum = tf.reduce_sum(weights)
norm_factor = num_labels / wgt_sum
# weights need to sum to 1
weights = tf.multiply(weights, norm_factor)
xent = tf.multiply(weights, xent)
#num_labels = tf.Print(num_labels, [num_labels, wgt_sum], 'num_labels = ')
#xent = tf.Print(xent, [xent], 'num_labels = ')
xent = tf.reduce_sum(xent) / num_labels
return xent
def weighted_cross_entropy_loss_dense(logits, labels, weights=None,
num_labels=None, max_weight=100):
print('loss: cross-entropy')
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_pixels])
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
if num_labels is None:
num_labels = tf.reduce_sum(onehot_labels)
else:
num_labels = tf.reduce_sum(num_labels)
print('Using balanced loss with max weight = ', max_weight)
weights = tf.reshape(weights, shape=[num_pixels])
weights = tf.minimum(tf.to_float(max_weight), weights)
wgt_sum = tf.reduce_sum(weights)
norm_factor = num_labels / wgt_sum
# weights need to sum to 1
weights = tf.multiply(weights, norm_factor)
xent = tf.multiply(weights, xent)
#num_labels = tf.Print(num_labels, [num_labels, wgt_sum], 'num_labels = ')
#xent = tf.Print(xent, [xent], 'num_labels = ')
xent = tf.reduce_sum(xent) / num_labels
print(xent)
return xent
def cross_entropy_loss_old(logits, labels, weights, num_labels):
print('loss: cross-entropy')
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels, num_labels]):
labels = tf.reshape(labels, shape=[num_pixels])
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
weights = tf.reshape(weights, shape=[num_pixels])
xent = tf.multiply(weights, xent)
xent = tf.reduce_sum(xent) / tf.reduce_sum(num_labels)
print(xent)
return xent
def mse(yp, yt):
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.name_scope('MeanSquareError'):
yt = tf.reshape(yt, shape=[num_examples])
yp = tf.reshape(yp, shape=[num_examples])
return tf.reduce_mean(tf.square(yt - yp))
def weighted_cross_entropy_loss_deprecated(logits, labels, weights=None, max_weight=100):
#def weighted_cross_entropy_loss(logits, labels, weights=None, max_weight=1e2):
#def weighted_cross_entropy_loss(logits, labels, weights=None, max_weight=1e3):
print('loss: Weighted Cross Entropy Loss')
shape = labels.get_shape().as_list()
print(shape)
#num_examples = shape[0] * shape[1]
num_examples = -1
#num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.name_scope(None, 'WeightedCrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_examples])
#num_labels = tf.to_float(tf.reduce_sum(num_labels))
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
num_labels = tf.to_float(tf.reduce_sum(one_hot_labels))
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
# todo
#log_softmax = tf.log(tf.nn.softmax(logits_1d)) - never do this!
log_softmax = tf.nn.log_softmax(logits_1d)
xent = tf.reduce_sum(-tf.multiply(tf.to_float(one_hot_labels), log_softmax), 1)
#weighted_xent = tf.mul(weights, xent)
if weights != None:
weights = tf.reshape(weights, shape=[num_examples])
xent = tf.mul(tf.minimum(tf.to_float(max_weight), weights), xent)
#weighted_xent = xent
total_loss = tf.div(tf.reduce_sum(xent), tf.to_float(num_labels), name='value')
print(total_loss)
return total_loss
def flip_xent_loss(logits, labels, weights, max_weight=10):
print('Loss: Weighted Cross Entropy Loss')
assert(FLAGS.batch_size == 2)
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
labels = tf.reshape(labels, shape=[num_examples])
weights = tf.reshape(weights, shape=[num_examples])
#num_labels = tf.to_float(tf.reduce_sum(num_labels))
with tf.name_scope('FlipXentLoss', [logits, labels]):
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
num_labels = tf.to_float(tf.reduce_sum(one_hot_labels))
#print(logits[].get_shape())
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
# TODO
#log_softmax = tf.log(tf.nn.softmax(logits_1d))
log_softmax = tf.nn.log_softmax(logits_1d)
xent = tf.reduce_sum(tf.mul(tf.to_float(one_hot_labels), log_softmax), 1)
#weighted_xent = tf.mul(weights, xent)
weighted_xent = tf.mul(tf.minimum(tf.to_float(max_weight), weights), xent)
#weighted_xent = xent
total_loss = - tf.div(tf.reduce_sum(weighted_xent), num_labels, name='value')
return total_loss
def slim_cross_entropy_loss(logits, labels, num_labels):
print('Loss: Cross Entropy Loss')
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
xent_loss = slim.losses.cross_entropy_loss(logits_1d, one_hot_labels)
return xent_loss
def softmax(logits):
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.op_scope([logits], None, 'Softmax'):
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
softmax_1d = tf.nn.softmax(logits_1d)
softmax_2d = tf.reshape(softmax_1d, [FLAGS.img_height, FLAGS.img_width, FLAGS.num_classes])
return softmax_2d
def multiclass_hinge_loss(logits, labels, weights):
print('loss: Hinge loss')
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
num_classes = FLAGS.num_classes
with tf.op_scope([logits, labels], None, 'MulticlassHingeLoss'):
#logits = tf.reshape(logits, [num_examples, num_classes])
#labels = tf.reshape(labels, [num_examples])
#weights = tf.reshape(weights, [num_examples])
logits = tf.reshape(logits, [-1, num_classes])
labels = tf.reshape(labels, [-1])
weights = tf.reshape(weights, [-1])
select_mask = tf.greater_equal(labels, 0)
logits = tf.boolean_mask(logits, select_mask)
labels = tf.boolean_mask(labels, select_mask)
weights = tf.boolean_mask(weights, select_mask)
num_examples = tf.reduce_sum(tf.to_int32(select_mask))
#num_examples = tf.Print(num_examples, [num_examples, num_labels_old], 'num_examples = ')
#print(labels)
#print(logits)
#print(weights)
#print(select_mask)
partitions = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0, dtype=tf.int32)
#print(partitions)
#one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
#one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
#partitions = tf.to_int32(one_hot_labels)
num_partitions = 2
scores, score_yt = tf.dynamic_partition(logits, partitions, num_partitions)
#scores = tf.reshape(scores, [num_examples, num_classes - 1])
#score_yt = tf.reshape(score_yt, [num_examples, 1])
scores = tf.reshape(scores, [-1, num_classes - 1])
score_yt = tf.reshape(score_yt, [-1, 1])
#print(scores)
#print(score_yt)
#hinge_loss = tf.maximum(0.0, scores - score_yt + margin)
hinge_loss = tf.square(tf.maximum(0.0, scores - score_yt + 1.0))
hinge_loss = tf.reduce_sum(hinge_loss, 1)
#total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))
#total_loss = tf.div(total_loss, tf.to_float(num_examples), name='value')
total_loss = tf.reduce_mean(tf.mul(tf.minimum(100.0, weights), hinge_loss))
#tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)
#tf.nn.l2_loss(t, name=None)
return total_loss
def metric_hinge_loss(logits, labels, weights, num_labels):
print('loss: Hinge loss')
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.op_scope([logits, labels], None, 'weightedhingeloss'):
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
#codes = tf.nn.softmax(logits_1d)
codes = tf.nn.l2_normalize(logits_1d, 1)
# works worse
# l2 loss -> bad!
# todo - this is not true svm loss, try it from cs231n
l2_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1))
m = 0.2
#l2_dist = tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1)
#m = 0.2 ** 2
#m = 0.1 ** 2
#m = 0.3 ** 2
for i in range(num_classes):
for j in range(num_classes):
raise valueerror(1)
hinge_loss = tf.maximum(tf.to_float(0), l2_dist - m)
total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))
total_loss = tf.div(total_loss, tf.to_float(num_labels), name='value')
tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)
#tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)
#tf.nn.l2_loss(t, name=None)
return total_loss
#def weighted_hinge_loss(logits, labels, weights, num_labels):
# print('Loss: Hinge Loss')
# num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
# with tf.op_scope([logits, labels], None, 'WeightedHingeLoss'):
# weights = tf.reshape(weights, shape=[num_examples])
# labels = tf.reshape(labels, shape=[num_examples])
# num_labels = tf.to_float(tf.reduce_sum(num_labels))
# one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
# one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
# logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
# #codes = tf.nn.softmax(logits_1d)
# codes = tf.nn.l2_normalize(logits_1d, 1)
# # works worse
# #l2_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1))
# #m = 0.2
# l2_dist = tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1)
# m = 0.2 ** 2
# #m = 0.1 ** 2
# #m = 0.3 ** 2
# hinge_loss = tf.maximum(tf.to_float(0), l2_dist - m)
# total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))
#
# total_loss = tf.div(total_loss, tf.to_float(num_labels), name='value')
# tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)
#
# #tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)
# #tf.nn.l2_loss(t, name=None)
# return total_loss
def flip_xent_loss_symmetric(logits, labels, weights, num_labels):
print('Loss: Weighted Cross Entropy Loss')
num_examples = FLAGS.img_height * FLAGS.img_width
with tf.op_scope([logits, labels], None, 'WeightedCrossEntropyLoss'):
labels = tf.reshape(labels, shape=[2, num_examples])
weights = tf.reshape(weights, shape=[2, num_examples])
num_labels = tf.to_float(tf.reduce_sum(num_labels))
#num_labels = tf.to_float(num_labels[0])
logits_flip = logits[1,:,:,:]
#weights_flip = weights[1,:]
logits = logits[0,:,:,:]
weights = weights[0,:]
labels = labels[0,:]
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
#logits_orig, logits_flip = tf.split(0, 2, logits)
logits_flip = tf.image.flip_left_right(logits_flip)
#print(logits[].get_shape())
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
logits_1d_flip = tf.reshape(logits_flip, [num_examples, FLAGS.num_classes])
# TODO
log_softmax = tf.nn.log_softmax(logits_1d)
#log_softmax_flip = tf.nn.log_softmax(logits_1d_flip)
softmax_flip = tf.nn.softmax(logits_1d_flip)
xent = tf.reduce_sum(tf.mul(tf.to_float(one_hot_labels), log_softmax), 1)
weighted_xent = tf.mul(tf.minimum(tf.to_float(100), weights), xent)
xent_flip = tf.reduce_sum(tf.mul(softmax_flip, log_softmax), 1)
xent_flip = tf.mul(tf.minimum(tf.to_float(100), weights), xent_flip)
#weighted_xent = tf.mul(weights, xent)
#weighted_xent = xent
#total_loss = tf.div(- tf.reduce_sum(weighted_xent_flip),
# num_labels, name='value')
total_loss = - tf.div(tf.reduce_sum(weighted_xent) + tf.reduce_sum(xent_flip),
num_labels, name='value')
tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)
return total_loss
| 42.924312 | 107 | 0.704996 | import tensorflow as tf
import slim
FLAGS = tf.app.flags.FLAGS
def add_loss_summaries(total_loss):
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
for l in losses + [total_loss]:
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def total_loss_sum(losses):
regularization_losses = tf.losses.get_regularization_losses()
total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
return total_loss
def cross_entropy_loss(logits, labels):
print('loss: cross-entropy')
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_pixels])
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
mask = labels < FLAGS.num_classes
idx = tf.where(mask)
= tf.gather_nd(labels, idx)
labels = tf.to_int32(labels)
logits = tf.gather_nd(logits, idx)
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
= tf.one_hot(labels, FLAGS.num_classes)
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
num_labels = tf.reduce_sum(onehot_labels)
sum
weights = tf.multiply(weights, norm_factor)
xent = tf.multiply(weights, xent)
xent = tf.reduce_sum(xent) / num_labels
return xent
def weighted_cross_entropy_loss_dense(logits, labels, weights=None,
num_labels=None, max_weight=100):
print('loss: cross-entropy')
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_pixels])
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
if num_labels is None:
num_labels = tf.reduce_sum(onehot_labels)
else:
num_labels = tf.reduce_sum(num_labels)
print('Using balanced loss with max weight = ', max_weight)
weights = tf.reshape(weights, shape=[num_pixels])
weights = tf.minimum(tf.to_float(max_weight), weights)
wgt_sum = tf.reduce_sum(weights)
norm_factor = num_labels / wgt_sum
weights = tf.multiply(weights, norm_factor)
xent = tf.multiply(weights, xent)
xent = tf.reduce_sum(xent) / num_labels
print(xent)
return xent
def cross_entropy_loss_old(logits, labels, weights, num_labels):
print('loss: cross-entropy')
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels, num_labels]):
labels = tf.reshape(labels, shape=[num_pixels])
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
weights = tf.reshape(weights, shape=[num_pixels])
xent = tf.multiply(weights, xent)
xent = tf.reduce_sum(xent) / tf.reduce_sum(num_labels)
print(xent)
return xent
def mse(yp, yt):
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.name_scope('MeanSquareError'):
yt = tf.reshape(yt, shape=[num_examples])
yp = tf.reshape(yp, shape=[num_examples])
return tf.reduce_mean(tf.square(yt - yp))
def weighted_cross_entropy_loss_deprecated(logits, labels, weights=None, max_weight=100):
print('loss: Weighted Cross Entropy Loss')
shape = labels.get_shape().as_list()
print(shape)
num_examples = -1
with tf.name_scope(None, 'WeightedCrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_examples])
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
num_labels = tf.to_float(tf.reduce_sum(one_hot_labels))
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
log_softmax = tf.nn.log_softmax(logits_1d)
xent = tf.reduce_sum(-tf.multiply(tf.to_float(one_hot_labels), log_softmax), 1)
if weights != None:
weights = tf.reshape(weights, shape=[num_examples])
xent = tf.mul(tf.minimum(tf.to_float(max_weight), weights), xent)
total_loss = tf.div(tf.reduce_sum(xent), tf.to_float(num_labels), name='value')
print(total_loss)
return total_loss
def flip_xent_loss(logits, labels, weights, max_weight=10):
print('Loss: Weighted Cross Entropy Loss')
assert(FLAGS.batch_size == 2)
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
labels = tf.reshape(labels, shape=[num_examples])
weights = tf.reshape(weights, shape=[num_examples])
with tf.name_scope('FlipXentLoss', [logits, labels]):
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
num_labels = tf.to_float(tf.reduce_sum(one_hot_labels))
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
log_softmax = tf.nn.log_softmax(logits_1d)
xent = tf.reduce_sum(tf.mul(tf.to_float(one_hot_labels), log_softmax), 1)
weighted_xent = tf.mul(tf.minimum(tf.to_float(max_weight), weights), xent)
total_loss = - tf.div(tf.reduce_sum(weighted_xent), num_labels, name='value')
return total_loss
def slim_cross_entropy_loss(logits, labels, num_labels):
print('Loss: Cross Entropy Loss')
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
xent_loss = slim.losses.cross_entropy_loss(logits_1d, one_hot_labels)
return xent_loss
def softmax(logits):
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.op_scope([logits], None, 'Softmax'):
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
softmax_1d = tf.nn.softmax(logits_1d)
softmax_2d = tf.reshape(softmax_1d, [FLAGS.img_height, FLAGS.img_width, FLAGS.num_classes])
return softmax_2d
def multiclass_hinge_loss(logits, labels, weights):
print('loss: Hinge loss')
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
num_classes = FLAGS.num_classes
with tf.op_scope([logits, labels], None, 'MulticlassHingeLoss'):
logits = tf.reshape(logits, [-1, num_classes])
labels = tf.reshape(labels, [-1])
weights = tf.reshape(weights, [-1])
select_mask = tf.greater_equal(labels, 0)
logits = tf.boolean_mask(logits, select_mask)
labels = tf.boolean_mask(labels, select_mask)
weights = tf.boolean_mask(weights, select_mask)
num_examples = tf.reduce_sum(tf.to_int32(select_mask))
partitions = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0, dtype=tf.int32)
num_partitions = 2
scores, score_yt = tf.dynamic_partition(logits, partitions, num_partitions)
scores = tf.reshape(scores, [-1, num_classes - 1])
score_yt = tf.reshape(score_yt, [-1, 1])
hinge_loss = tf.square(tf.maximum(0.0, scores - score_yt + 1.0))
hinge_loss = tf.reduce_sum(hinge_loss, 1)
total_loss = tf.reduce_mean(tf.mul(tf.minimum(100.0, weights), hinge_loss))
return total_loss
def metric_hinge_loss(logits, labels, weights, num_labels):
print('loss: Hinge loss')
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.op_scope([logits, labels], None, 'weightedhingeloss'):
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
codes = tf.nn.l2_normalize(logits_1d, 1)
l2_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1))
m = 0.2
for i in range(num_classes):
for j in range(num_classes):
raise valueerror(1)
hinge_loss = tf.maximum(tf.to_float(0), l2_dist - m)
total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))
total_loss = tf.div(total_loss, tf.to_float(num_labels), name='value')
tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)
return total_loss
pyLoss'):
labels = tf.reshape(labels, shape=[2, num_examples])
weights = tf.reshape(weights, shape=[2, num_examples])
num_labels = tf.to_float(tf.reduce_sum(num_labels))
logits_flip = logits[1,:,:,:]
logits = logits[0,:,:,:]
weights = weights[0,:]
labels = labels[0,:]
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
logits_flip = tf.image.flip_left_right(logits_flip)
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
logits_1d_flip = tf.reshape(logits_flip, [num_examples, FLAGS.num_classes])
log_softmax = tf.nn.log_softmax(logits_1d)
softmax_flip = tf.nn.softmax(logits_1d_flip)
xent = tf.reduce_sum(tf.mul(tf.to_float(one_hot_labels), log_softmax), 1)
weighted_xent = tf.mul(tf.minimum(tf.to_float(100), weights), xent)
xent_flip = tf.reduce_sum(tf.mul(softmax_flip, log_softmax), 1)
xent_flip = tf.mul(tf.minimum(tf.to_float(100), weights), xent_flip)
total_loss = - tf.div(tf.reduce_sum(weighted_xent) + tf.reduce_sum(xent_flip),
num_labels, name='value')
tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)
return total_loss
| true | true |
f71b599c49ef3382050c2d01eff0c192906c1d7b | 1,580 | py | Python | aiLogic/tankAI.py | JoelEager/pyTanks.Player | a35a653e9df2416c63204aba87a95f33e6815b63 | [
"MIT"
] | 2 | 2017-03-09T15:32:55.000Z | 2017-09-04T11:25:41.000Z | aiLogic/tankAI.py | JoelEager/pyTanks.Player | a35a653e9df2416c63204aba87a95f33e6815b63 | [
"MIT"
] | null | null | null | aiLogic/tankAI.py | JoelEager/pyTanks.Player | a35a653e9df2416c63204aba87a95f33e6815b63 | [
"MIT"
] | 4 | 2017-05-16T15:10:09.000Z | 2017-07-06T15:24:50.000Z | """
The player's AI code
Functions here are called by clock.py to run the AI code
"""
import random
import math
from clientLogic.logging import logPrint
from clientLogic import clientData, commands
def onConnect():
"""
Called when the player initially connects to the server but before the tank first spawns
"""
commands.setInfo("Python player instance running the example AI.\n" +
"Fork me at https://github.com/JoelEager/pyTanks.Player")
def onSpawn():
"""
Called when the tank spawns in a new game
"""
pass
def onTick(elapsedTime):
"""
Called once every frame while the tank is alive
:param elapsedTime: The time elapsed, in seconds, since the last frame
"""
gs = clientData.gameState
# Collided so try to get moving again
if not gs.myTank.moving:
commands.turn((math.pi / 4) * random.randint(0, 7))
commands.go()
logPrint("Turned and starting moving", 2)
# Shooting logic
if gs.myTank.canShoot and random.randint(0, 4) == 0:
# Select a target
random.shuffle(gs.tanks)
for target in gs.tanks:
if target.alive:
# Do the math
deltaX = abs(gs.myTank.x - target.x)
if deltaX == 0: return
deltaY = gs.myTank.y - target.y
angle = math.atan(deltaY / deltaX)
if target.x < gs.myTank.x:
angle = math.pi - angle
commands.fire(angle)
logPrint("Fired", 2)
break
| 27.719298 | 92 | 0.588608 |
import random
import math
from clientLogic.logging import logPrint
from clientLogic import clientData, commands
def onConnect():
commands.setInfo("Python player instance running the example AI.\n" +
"Fork me at https://github.com/JoelEager/pyTanks.Player")
def onSpawn():
pass
def onTick(elapsedTime):
gs = clientData.gameState
if not gs.myTank.moving:
commands.turn((math.pi / 4) * random.randint(0, 7))
commands.go()
logPrint("Turned and starting moving", 2)
if gs.myTank.canShoot and random.randint(0, 4) == 0:
random.shuffle(gs.tanks)
for target in gs.tanks:
if target.alive:
deltaX = abs(gs.myTank.x - target.x)
if deltaX == 0: return
deltaY = gs.myTank.y - target.y
angle = math.atan(deltaY / deltaX)
if target.x < gs.myTank.x:
angle = math.pi - angle
commands.fire(angle)
logPrint("Fired", 2)
break
| true | true |
f71b5a856153c3564ce7371764011afc06ba93ae | 12,294 | py | Python | tests/unit/common_utils.py | dannielarriola/uai-coursebuilder | fbd440a8bfe1a928ac52985aea2949d5e91ad203 | [
"Apache-2.0"
] | null | null | null | tests/unit/common_utils.py | dannielarriola/uai-coursebuilder | fbd440a8bfe1a928ac52985aea2949d5e91ad203 | [
"Apache-2.0"
] | 27 | 2016-08-31T19:04:46.000Z | 2016-09-29T00:22:32.000Z | tests/unit/common_utils.py | dannielarriola/uai-coursebuilder | fbd440a8bfe1a928ac52985aea2949d5e91ad203 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for common.tags."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import datetime
import os
import unittest
import appengine_config
from common import utils
class CommonUnitTests(unittest.TestCase):
# --------------------------- String-to-list.
def test_list_parsing(self):
self.assertListEqual(['foo'], utils.text_to_list('foo'))
self.assertListEqual(['foo'], utils.text_to_list(' foo'))
self.assertListEqual(['foo'], utils.text_to_list('foo '))
self.assertListEqual(['foo'], utils.text_to_list(' foo '))
self.assertListEqual(['foo'], utils.text_to_list('foo\t'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo\t'))
self.assertListEqual(['foo'], utils.text_to_list('foo '))
self.assertListEqual(['foo'], utils.text_to_list(' foo'))
self.assertListEqual(['foo'], utils.text_to_list(' foo '))
self.assertListEqual(['foo'], utils.text_to_list('foo\n'))
self.assertListEqual(['foo'], utils.text_to_list('\nfoo'))
self.assertListEqual(['foo'], utils.text_to_list('\nfoo\n'))
self.assertListEqual(['foo'], utils.text_to_list('foo,'))
self.assertListEqual(['foo'], utils.text_to_list(',foo'))
self.assertListEqual(['foo'], utils.text_to_list(',foo,'))
self.assertListEqual(['foo'], utils.text_to_list(' foo ,\n'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo,\t\n'))
self.assertListEqual(['foo'], utils.text_to_list(',foo,\n'))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo]',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo],',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo], ', utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo],\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo], \n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo bar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(' foo bar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo bar '))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\tbar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\tfoo\tbar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\tbar\t'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\nbar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\nfoo\nbar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\n foo\n bar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(' \n foo \n bar \n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'[foo][bar]',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
' [foo] [bar] ',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'\n[foo]\n[bar]\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'\n,[foo],\n[bar],\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
def test_none_split(self):
self.assertListEqual([], utils.text_to_list(None))
def test_empty_split(self):
self.assertListEqual([], utils.text_to_list(''))
def test_all_separators_split(self):
self.assertListEqual([], utils.text_to_list(' ,,, \t\t\n\t '))
def test_one_item_split(self):
self.assertListEqual(['x'], utils.text_to_list('x'))
def test_join_none(self):
self.assertEquals('', utils.list_to_text(None))
def test_join_empty(self):
self.assertEquals('', utils.list_to_text([]))
def test_join_one(self):
self.assertEquals('x', utils.list_to_text(['x']))
def test_join_two(self):
self.assertEquals('x y', utils.list_to_text(['x', 'y']))
def test_join_split(self):
l = ['a', 'b', 'c']
self.assertListEqual(l, utils.text_to_list(utils.list_to_text(l)))
def test_split_join(self):
text = 'a b c'
self.assertEquals(text, utils.list_to_text(utils.text_to_list(text)))
class ZipAwareOpenTests(unittest.TestCase):
def test_find_in_lib_without_relative_path(self):
path = os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'babel-0.9.6.zip',
'babel', 'localedata', 'root.dat')
with self.assertRaises(IOError):
open(path) # This fails.
with utils.ZipAwareOpen():
data = open(path).read()
self.assertEquals(17490, len(data))
data = open(path, 'r').read()
self.assertEquals(17490, len(data))
data = open(path, mode='r').read()
self.assertEquals(17490, len(data))
data = open(name=path, mode='r').read()
self.assertEquals(17490, len(data))
data = open(name=path).read()
self.assertEquals(17490, len(data))
with self.assertRaises(IOError):
open(path) # This fails again; open has been reset to normal.
def test_find_in_lib_with_relative_path(self):
path = os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'markdown-2.5.zip',
'setup.cfg')
with self.assertRaises(IOError):
open(path) # This fails.
with utils.ZipAwareOpen():
data = open(path).read()
self.assertEquals(12, len(data))
class ParseTimedeltaTests(unittest.TestCase):
def test_parse_empty_string(self):
self.assertEquals(
utils.parse_timedelta_string(''),
datetime.timedelta())
def test_parse_zero(self):
self.assertEquals(
utils.parse_timedelta_string('0'),
datetime.timedelta())
def test_parse_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string('Amidst the mists and coldest frosts'),
datetime.timedelta())
def test_parse_leading_valid_partial_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string(
'5 days and a partridge in a pear tree'),
datetime.timedelta(days=5))
def test_parse_trailing_valid_partial_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string('we will leave in 5 days'),
datetime.timedelta(days=5))
def test_parse_units(self):
for unit in ('week', 'day', 'hour', 'minute', 'second'):
self._test_parse_units(unit)
def _test_parse_units(self, unit):
expected1 = datetime.timedelta(**{unit + 's': 1})
expected2 = datetime.timedelta(**{unit + 's': 2})
self.assertEquals(
utils.parse_timedelta_string('1%s' % unit[0]), expected1)
self.assertEquals(
utils.parse_timedelta_string('1%s' % unit), expected1)
self.assertEquals(
utils.parse_timedelta_string('2%ss' % unit), expected2)
self.assertEquals(
utils.parse_timedelta_string('2 %s' % unit[0]), expected2)
self.assertEquals(
utils.parse_timedelta_string('1 %s' % unit), expected1)
self.assertEquals(
utils.parse_timedelta_string('2 %s' % unit), expected2)
self.assertEquals(
utils.parse_timedelta_string('2 \t\t\n %ss' % unit), expected2)
def test_parse_out_of_bounds_handled_successfully(self):
self.assertEquals(
utils.parse_timedelta_string('86400s'),
datetime.timedelta(days=1))
self.assertEquals(
utils.parse_timedelta_string('19d, 86400s'),
datetime.timedelta(weeks=2, days=6))
def test_parse_combinations(self):
self.assertEquals(
utils.parse_timedelta_string('3w1d3m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3w, 1d, 3m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 w 1 d 3 m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 weeks 1 day 3 minutes'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 weeks, 1 day, 3 minutes'),
datetime.timedelta(weeks=3, days=1, minutes=3))
class ValidateTimedeltaTests(unittest.TestCase):
def test_blank_is_allowed(self):
errors = []
utils.ValidateTimedelta.validate('', errors)
self.assertEquals(0, len(errors))
def test_none_is_allowed(self):
errors = []
utils.ValidateTimedelta.validate(None, errors)
self.assertEquals(0, len(errors))
def test_bare_numbers_not_allowed(self):
errors = []
utils.ValidateTimedelta.validate('0', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('1', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('-1', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('100', errors)
self.assertEquals(1, len(errors))
def test_valid_items_allowed(self):
errors = []
utils.ValidateTimedelta.validate('1s', errors)
utils.ValidateTimedelta.validate('2m', errors)
utils.ValidateTimedelta.validate('3h', errors)
utils.ValidateTimedelta.validate('4d', errors)
utils.ValidateTimedelta.validate('5w', errors)
utils.ValidateTimedelta.validate('5 Weeks, 1D,2HOURS 3 seconds',
errors)
self.assertEquals(0, len(errors))
def test_invalid_items_disallowed(self):
errors = []
utils.ValidateTimedelta.validate('1t', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('1 year', errors)
self.assertEquals(1, len(errors))
def test_parse_months_gives_error(self):
errors = []
utils.ValidateTimedelta.validate('3 months', errors)
self.assertEquals(1, len(errors))
| 38.299065 | 80 | 0.577843 |
__author__ = 'Mike Gainer (mgainer@google.com)'
import datetime
import os
import unittest
import appengine_config
from common import utils
class CommonUnitTests(unittest.TestCase):
def test_list_parsing(self):
self.assertListEqual(['foo'], utils.text_to_list('foo'))
self.assertListEqual(['foo'], utils.text_to_list(' foo'))
self.assertListEqual(['foo'], utils.text_to_list('foo '))
self.assertListEqual(['foo'], utils.text_to_list(' foo '))
self.assertListEqual(['foo'], utils.text_to_list('foo\t'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo\t'))
self.assertListEqual(['foo'], utils.text_to_list('foo '))
self.assertListEqual(['foo'], utils.text_to_list(' foo'))
self.assertListEqual(['foo'], utils.text_to_list(' foo '))
self.assertListEqual(['foo'], utils.text_to_list('foo\n'))
self.assertListEqual(['foo'], utils.text_to_list('\nfoo'))
self.assertListEqual(['foo'], utils.text_to_list('\nfoo\n'))
self.assertListEqual(['foo'], utils.text_to_list('foo,'))
self.assertListEqual(['foo'], utils.text_to_list(',foo'))
self.assertListEqual(['foo'], utils.text_to_list(',foo,'))
self.assertListEqual(['foo'], utils.text_to_list(' foo ,\n'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo,\t\n'))
self.assertListEqual(['foo'], utils.text_to_list(',foo,\n'))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo]',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo],',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo], ', utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo],\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo], \n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo bar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(' foo bar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo bar '))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\tbar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\tfoo\tbar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\tbar\t'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\nbar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\nfoo\nbar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\n foo\n bar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(' \n foo \n bar \n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'[foo][bar]',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
' [foo] [bar] ',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'\n[foo]\n[bar]\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'\n,[foo],\n[bar],\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
def test_none_split(self):
self.assertListEqual([], utils.text_to_list(None))
def test_empty_split(self):
self.assertListEqual([], utils.text_to_list(''))
def test_all_separators_split(self):
self.assertListEqual([], utils.text_to_list(' ,,, \t\t\n\t '))
def test_one_item_split(self):
self.assertListEqual(['x'], utils.text_to_list('x'))
def test_join_none(self):
self.assertEquals('', utils.list_to_text(None))
def test_join_empty(self):
self.assertEquals('', utils.list_to_text([]))
def test_join_one(self):
self.assertEquals('x', utils.list_to_text(['x']))
def test_join_two(self):
self.assertEquals('x y', utils.list_to_text(['x', 'y']))
def test_join_split(self):
l = ['a', 'b', 'c']
self.assertListEqual(l, utils.text_to_list(utils.list_to_text(l)))
def test_split_join(self):
text = 'a b c'
self.assertEquals(text, utils.list_to_text(utils.text_to_list(text)))
class ZipAwareOpenTests(unittest.TestCase):
def test_find_in_lib_without_relative_path(self):
path = os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'babel-0.9.6.zip',
'babel', 'localedata', 'root.dat')
with self.assertRaises(IOError):
open(path)
with utils.ZipAwareOpen():
data = open(path).read()
self.assertEquals(17490, len(data))
data = open(path, 'r').read()
self.assertEquals(17490, len(data))
data = open(path, mode='r').read()
self.assertEquals(17490, len(data))
data = open(name=path, mode='r').read()
self.assertEquals(17490, len(data))
data = open(name=path).read()
self.assertEquals(17490, len(data))
with self.assertRaises(IOError):
open(path)
def test_find_in_lib_with_relative_path(self):
path = os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'markdown-2.5.zip',
'setup.cfg')
with self.assertRaises(IOError):
open(path)
with utils.ZipAwareOpen():
data = open(path).read()
self.assertEquals(12, len(data))
class ParseTimedeltaTests(unittest.TestCase):
def test_parse_empty_string(self):
self.assertEquals(
utils.parse_timedelta_string(''),
datetime.timedelta())
def test_parse_zero(self):
self.assertEquals(
utils.parse_timedelta_string('0'),
datetime.timedelta())
def test_parse_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string('Amidst the mists and coldest frosts'),
datetime.timedelta())
def test_parse_leading_valid_partial_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string(
'5 days and a partridge in a pear tree'),
datetime.timedelta(days=5))
def test_parse_trailing_valid_partial_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string('we will leave in 5 days'),
datetime.timedelta(days=5))
def test_parse_units(self):
for unit in ('week', 'day', 'hour', 'minute', 'second'):
self._test_parse_units(unit)
def _test_parse_units(self, unit):
expected1 = datetime.timedelta(**{unit + 's': 1})
expected2 = datetime.timedelta(**{unit + 's': 2})
self.assertEquals(
utils.parse_timedelta_string('1%s' % unit[0]), expected1)
self.assertEquals(
utils.parse_timedelta_string('1%s' % unit), expected1)
self.assertEquals(
utils.parse_timedelta_string('2%ss' % unit), expected2)
self.assertEquals(
utils.parse_timedelta_string('2 %s' % unit[0]), expected2)
self.assertEquals(
utils.parse_timedelta_string('1 %s' % unit), expected1)
self.assertEquals(
utils.parse_timedelta_string('2 %s' % unit), expected2)
self.assertEquals(
utils.parse_timedelta_string('2 \t\t\n %ss' % unit), expected2)
def test_parse_out_of_bounds_handled_successfully(self):
self.assertEquals(
utils.parse_timedelta_string('86400s'),
datetime.timedelta(days=1))
self.assertEquals(
utils.parse_timedelta_string('19d, 86400s'),
datetime.timedelta(weeks=2, days=6))
def test_parse_combinations(self):
self.assertEquals(
utils.parse_timedelta_string('3w1d3m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3w, 1d, 3m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 w 1 d 3 m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 weeks 1 day 3 minutes'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 weeks, 1 day, 3 minutes'),
datetime.timedelta(weeks=3, days=1, minutes=3))
class ValidateTimedeltaTests(unittest.TestCase):
def test_blank_is_allowed(self):
errors = []
utils.ValidateTimedelta.validate('', errors)
self.assertEquals(0, len(errors))
def test_none_is_allowed(self):
errors = []
utils.ValidateTimedelta.validate(None, errors)
self.assertEquals(0, len(errors))
def test_bare_numbers_not_allowed(self):
errors = []
utils.ValidateTimedelta.validate('0', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('1', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('-1', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('100', errors)
self.assertEquals(1, len(errors))
def test_valid_items_allowed(self):
errors = []
utils.ValidateTimedelta.validate('1s', errors)
utils.ValidateTimedelta.validate('2m', errors)
utils.ValidateTimedelta.validate('3h', errors)
utils.ValidateTimedelta.validate('4d', errors)
utils.ValidateTimedelta.validate('5w', errors)
utils.ValidateTimedelta.validate('5 Weeks, 1D,2HOURS 3 seconds',
errors)
self.assertEquals(0, len(errors))
def test_invalid_items_disallowed(self):
errors = []
utils.ValidateTimedelta.validate('1t', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('1 year', errors)
self.assertEquals(1, len(errors))
def test_parse_months_gives_error(self):
errors = []
utils.ValidateTimedelta.validate('3 months', errors)
self.assertEquals(1, len(errors))
| true | true |
f71b5b1995d1ecc2a1ba880156e9343a02f0c212 | 578 | py | Python | bark_ml/commons/tracer.py | bark-simulator/rl | 84f9c74b60becbc4bc758e19b201d85a21880717 | [
"MIT"
] | null | null | null | bark_ml/commons/tracer.py | bark-simulator/rl | 84f9c74b60becbc4bc758e19b201d85a21880717 | [
"MIT"
] | null | null | null | bark_ml/commons/tracer.py | bark-simulator/rl | 84f9c74b60becbc4bc758e19b201d85a21880717 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 fortiss GmbH
#
# Authors: Patrick Hart
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import pickle
class Tracer:
"""The tracer can be used to log certain values during episodes."""
def __init__(self, states=None, trace_history=True):
self._trace_history = trace_history
self._states = []
def Trace(self, eval_dict):
"""Traces and stores a state."""
if self._trace_history:
self._states.append(eval_dict)
def Reset(self):
self._trace_history = [] | 25.130435 | 69 | 0.698962 |
import pickle
class Tracer:
def __init__(self, states=None, trace_history=True):
self._trace_history = trace_history
self._states = []
def Trace(self, eval_dict):
if self._trace_history:
self._states.append(eval_dict)
def Reset(self):
self._trace_history = [] | true | true |
f71b5b6b59934a264b9a46047b70741641a2db51 | 10,504 | py | Python | tests/test_clone.py | mohammadroghani/django-clone | 603037194ae43f5e2eb96bd0aa159c1fbcf8c51c | [
"MIT"
] | null | null | null | tests/test_clone.py | mohammadroghani/django-clone | 603037194ae43f5e2eb96bd0aa159c1fbcf8c51c | [
"MIT"
] | null | null | null | tests/test_clone.py | mohammadroghani/django-clone | 603037194ae43f5e2eb96bd0aa159c1fbcf8c51c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Django Clone - https://github.com/mohammadroghani/django-clone
# Copyright © 2016 Mohammad Roghani <mohammadroghani43@gmail.com>
# Copyright © 2016 Amir Keivan Mohtashami <akmohtashami97@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from django.test import TestCase
from django.utils import timezone
from django_clone.clone import Cloner
from tests.models import *
def get_information_list(object_list):
information_list = []
for object in object_list:
information_list.append((object.pk, object.__module__ + "." + object.__class__.__name__))
information_list.sort()
return information_list
class VersionControlTests(TestCase):
def test_get_all_neighbor_objects(self):
question = Question(question_text='question1', pub_date=timezone.now())
question.save()
choice = question.choice_set.create(choice_text='a', votes=0)
c = Choice(question=question, choice_text='b', votes=0)
c.save()
choice.save()
person = Person()
person.save()
person.questions.add(question)
test_list = [(question.pk, question.__module__ + "." + question.__class__.__name__)]
cloner = Cloner()
self.assertEqual(get_information_list(cloner.get_all_neighbor_objects(person)), test_list)
self.assertEqual(get_information_list(cloner.get_all_neighbor_objects(person)), test_list)
test_list.clear()
test_list = [(choice.pk, choice.__module__ + "." + choice.__class__.__name__), (c.pk, c.__module__ + "." + c.__class__.__name__), (person.pk, person.__module__ + "." + person.__class__.__name__)]
test_list.sort()
self.assertEqual(get_information_list(cloner.get_all_neighbor_objects(question)), test_list)
def test_get_all_related_objects(self):
question = Question(question_text='question1', pub_date=timezone.now())
q1 = Question(question_text='q1', pub_date=timezone.now())
q1.save()
question.save()
choice = question.choice_set.create(choice_text='a', votes=0)
c = Choice(question=question, choice_text='b', votes=0)
c.save()
choice.save()
c1 = q1.choice_set.create(choice_text='a', votes=0)
c1.save()
person = Person()
person.save()
person.questions.add(question)
cloner = Cloner()
test_list = [(q1.pk, q1.__module__+ "." + q1.__class__.__name__), (c1.pk, c1.__module__ + "." + c1.__class__.__name__)]
test_list.sort()
self.assertEqual(get_information_list(cloner.get_all_related_object(q1)), test_list)
self.assertEqual(get_information_list(cloner.get_all_related_object(c1)), test_list)
test_list.clear()
test_list = [(question.pk, question.__module__ + "." + question.__class__.__name__), (c.pk, c.__module__ + "." + c.__class__.__name__), (choice.pk, choice.__module__ + "." + choice.__class__.__name__),
(person.pk, person.__module__ + "." + person.__class__.__name__)]
test_list.sort()
self.assertEqual(get_information_list(cloner.get_all_related_object(question)), test_list)
self.assertEqual(get_information_list(cloner.get_all_related_object(c)), test_list)
self.assertEqual(get_information_list(cloner.get_all_related_object(choice)), test_list)
self.assertEqual(get_information_list(cloner.get_all_related_object(person)), test_list)
self.assertNotEqual(get_information_list(Cloner().get_all_related_object(q1)), test_list)
def test_get_all_related_objects_with_circular_relation(self):
a_object = A()
b_object = B()
c_object = C()
a_object.save()
b_object.save()
c_object.save()
a_object.b.add(b_object)
b_object.c.add(c_object)
c_object.a.add(a_object)
test_list = [(b_object.pk, b_object.__module__ + "." + b_object.__class__.__name__), (c_object.pk, c_object.__module__ + "." + c_object.__class__.__name__), (a_object.pk, a_object.__module__ + "." + a_object.__class__.__name__)]
test_list.sort()
cloner = Cloner()
self.assertEqual(get_information_list(cloner.get_all_related_object(a_object)), test_list)
self.assertEqual(get_information_list(cloner.get_all_related_object(b_object)), test_list)
self.assertEqual(get_information_list(cloner.get_all_related_object(c_object)), test_list)
def test_clone_with_one_object(self):
question = Question(question_text='a', pub_date=timezone.now())
question.save()
q = Cloner().clone(question)
self.assertNotEqual(q.pk, question.pk)
self.assertEqual(q.question_text, question.question_text)
self.assertEqual(q.pub_date, question.pub_date)
def test_clone_with_foreign_key(self):
question = Question(question_text='a', pub_date=timezone.now())
question.save()
choice = Choice(question=question, choice_text='c', votes=0)
choice.save()
cloner = Cloner()
c = cloner.clone(choice)
self.assertNotEqual(choice.id, c.id)
self.assertNotEqual(choice.question.id, c.question.id)
self.assertEqual(choice.question.question_text, c.question.question_text)
q = cloner.clone(question)
self.assertNotEqual(q.id, question.id)
self.assertNotEqual(question.choice_set.get(choice_text='c').pk, q.choice_set.get(choice_text='c').pk)
def test_clone_with_ignore_list(self):
question = Question(question_text='a', pub_date=timezone.now())
question.save()
choice = Choice(question=question, choice_text='c', votes=0)
choice.save()
c = Cloner(ignored_models=["tests.Question"]).clone(choice)
self.assertNotEqual(choice.id, c.id)
self.assertEqual(choice.question.id, c.question.id)
def test_clone_with_many_to_many_field(self):
question = Question(question_text='question1', pub_date=timezone.now())
question.save()
person = Person()
person.save()
person.questions.add(question)
p = Cloner().clone(person)
self.assertNotEqual(person.id, p.id)
self.assertNotEqual(person.questions.get(question_text='question1').id,
p.questions.get(question_text='question1').id)
def test_clone_many_to_many_field_with_repeated_instance(self):
question = Question(question_text='question1', pub_date=timezone.now())
question.save()
person = Person()
person.save()
person.questions.add(question)
person.questions.add(question)
p = Cloner().clone(person)
self.assertEqual(person.questions.all().count(), p.questions.all().count())
def test_clone_with_through_field(self):
student = Student(name='Ali')
group = Group(name='ACM')
student.save()
group.save()
membership = Membership(student=student, group=group)
membership.save()
g = Cloner().clone(group)
self.assertNotEqual(g.id, group.id)
self.assertNotEqual(group.members.get(name='Ali').id, g.members.get(name='Ali').id)
s = Cloner().clone(student)
self.assertNotEqual(s.id, student.id)
self.assertNotEqual(student.group_set.get(name='ACM').id, s.group_set.get(name='ACM').id)
def test_clone_many_to_many_field_with_through_field_and_repeated_instance(self):
student = Student(name='Ali')
group = Group(name='ACM')
student.save()
group.save()
membership1 = Membership(student=student, group=group)
membership1.save()
membership2 = Membership(student=student, group=group)
membership2.save()
g = Cloner().clone(group)
self.assertEqual(g.members.all().count(), group.members.all().count())
s = Cloner().clone(student)
self.assertEqual(s.group_set.all().count(), student.group_set.all().count())
def test_clone_subclass(self):
question = Question(question_text='a', pub_date=timezone.now())
question.save()
choice = BigChoice(question=question, choice_text='c', votes=0)
choice.save()
Cloner().clone(question)
self.assertEqual(Question.objects.count(), 2)
self.assertEqual(Choice.objects.count(), 2)
Cloner().clone(choice)
self.assertEqual(Question.objects.count(), 3)
self.assertEqual(Choice.objects.count(), 3)
def test_clone_subclass_explicit_relation(self):
question = Question(question_text='a', pub_date=timezone.now())
question.save()
choice = BigChoice2(question=question, choice_text='c', votes=0)
choice.save()
Cloner().clone(question)
self.assertEqual(Question.objects.count(), 2)
self.assertEqual(Choice.objects.count(), 2)
Cloner().clone(choice)
self.assertEqual(Question.objects.count(), 3)
self.assertEqual(Choice.objects.count(), 3)
def test_clone_unique(self):
def unique_editor(obj):
if isinstance(obj, BigChoice):
obj.unique_value += "S"
return obj
question = Question(question_text='a', pub_date=timezone.now())
question.save()
choice = BigChoice(question=question, choice_text='c', votes=0, unique_value="S")
choice.save()
new_choice = Cloner().clone(choice, unique_editor)
self.assertNotEqual(new_choice.pk, choice.pk)
| 47.315315 | 236 | 0.680217 |
from django.test import TestCase
from django.utils import timezone
from django_clone.clone import Cloner
from tests.models import *
def get_information_list(object_list):
information_list = []
for object in object_list:
information_list.append((object.pk, object.__module__ + "." + object.__class__.__name__))
information_list.sort()
return information_list
class VersionControlTests(TestCase):
def test_get_all_neighbor_objects(self):
question = Question(question_text='question1', pub_date=timezone.now())
question.save()
choice = question.choice_set.create(choice_text='a', votes=0)
c = Choice(question=question, choice_text='b', votes=0)
c.save()
choice.save()
person = Person()
person.save()
person.questions.add(question)
test_list = [(question.pk, question.__module__ + "." + question.__class__.__name__)]
cloner = Cloner()
self.assertEqual(get_information_list(cloner.get_all_neighbor_objects(person)), test_list)
self.assertEqual(get_information_list(cloner.get_all_neighbor_objects(person)), test_list)
test_list.clear()
test_list = [(choice.pk, choice.__module__ + "." + choice.__class__.__name__), (c.pk, c.__module__ + "." + c.__class__.__name__), (person.pk, person.__module__ + "." + person.__class__.__name__)]
test_list.sort()
self.assertEqual(get_information_list(cloner.get_all_neighbor_objects(question)), test_list)
def test_get_all_related_objects(self):
question = Question(question_text='question1', pub_date=timezone.now())
q1 = Question(question_text='q1', pub_date=timezone.now())
q1.save()
question.save()
choice = question.choice_set.create(choice_text='a', votes=0)
c = Choice(question=question, choice_text='b', votes=0)
c.save()
choice.save()
c1 = q1.choice_set.create(choice_text='a', votes=0)
c1.save()
person = Person()
person.save()
person.questions.add(question)
cloner = Cloner()
test_list = [(q1.pk, q1.__module__+ "." + q1.__class__.__name__), (c1.pk, c1.__module__ + "." + c1.__class__.__name__)]
test_list.sort()
self.assertEqual(get_information_list(cloner.get_all_related_object(q1)), test_list)
self.assertEqual(get_information_list(cloner.get_all_related_object(c1)), test_list)
test_list.clear()
test_list = [(question.pk, question.__module__ + "." + question.__class__.__name__), (c.pk, c.__module__ + "." + c.__class__.__name__), (choice.pk, choice.__module__ + "." + choice.__class__.__name__),
(person.pk, person.__module__ + "." + person.__class__.__name__)]
test_list.sort()
self.assertEqual(get_information_list(cloner.get_all_related_object(question)), test_list)
self.assertEqual(get_information_list(cloner.get_all_related_object(c)), test_list)
self.assertEqual(get_information_list(cloner.get_all_related_object(choice)), test_list)
self.assertEqual(get_information_list(cloner.get_all_related_object(person)), test_list)
self.assertNotEqual(get_information_list(Cloner().get_all_related_object(q1)), test_list)
def test_get_all_related_objects_with_circular_relation(self):
a_object = A()
b_object = B()
c_object = C()
a_object.save()
b_object.save()
c_object.save()
a_object.b.add(b_object)
b_object.c.add(c_object)
c_object.a.add(a_object)
test_list = [(b_object.pk, b_object.__module__ + "." + b_object.__class__.__name__), (c_object.pk, c_object.__module__ + "." + c_object.__class__.__name__), (a_object.pk, a_object.__module__ + "." + a_object.__class__.__name__)]
test_list.sort()
cloner = Cloner()
self.assertEqual(get_information_list(cloner.get_all_related_object(a_object)), test_list)
self.assertEqual(get_information_list(cloner.get_all_related_object(b_object)), test_list)
self.assertEqual(get_information_list(cloner.get_all_related_object(c_object)), test_list)
def test_clone_with_one_object(self):
question = Question(question_text='a', pub_date=timezone.now())
question.save()
q = Cloner().clone(question)
self.assertNotEqual(q.pk, question.pk)
self.assertEqual(q.question_text, question.question_text)
self.assertEqual(q.pub_date, question.pub_date)
def test_clone_with_foreign_key(self):
question = Question(question_text='a', pub_date=timezone.now())
question.save()
choice = Choice(question=question, choice_text='c', votes=0)
choice.save()
cloner = Cloner()
c = cloner.clone(choice)
self.assertNotEqual(choice.id, c.id)
self.assertNotEqual(choice.question.id, c.question.id)
self.assertEqual(choice.question.question_text, c.question.question_text)
q = cloner.clone(question)
self.assertNotEqual(q.id, question.id)
self.assertNotEqual(question.choice_set.get(choice_text='c').pk, q.choice_set.get(choice_text='c').pk)
def test_clone_with_ignore_list(self):
question = Question(question_text='a', pub_date=timezone.now())
question.save()
choice = Choice(question=question, choice_text='c', votes=0)
choice.save()
c = Cloner(ignored_models=["tests.Question"]).clone(choice)
self.assertNotEqual(choice.id, c.id)
self.assertEqual(choice.question.id, c.question.id)
def test_clone_with_many_to_many_field(self):
question = Question(question_text='question1', pub_date=timezone.now())
question.save()
person = Person()
person.save()
person.questions.add(question)
p = Cloner().clone(person)
self.assertNotEqual(person.id, p.id)
self.assertNotEqual(person.questions.get(question_text='question1').id,
p.questions.get(question_text='question1').id)
def test_clone_many_to_many_field_with_repeated_instance(self):
question = Question(question_text='question1', pub_date=timezone.now())
question.save()
person = Person()
person.save()
person.questions.add(question)
person.questions.add(question)
p = Cloner().clone(person)
self.assertEqual(person.questions.all().count(), p.questions.all().count())
def test_clone_with_through_field(self):
student = Student(name='Ali')
group = Group(name='ACM')
student.save()
group.save()
membership = Membership(student=student, group=group)
membership.save()
g = Cloner().clone(group)
self.assertNotEqual(g.id, group.id)
self.assertNotEqual(group.members.get(name='Ali').id, g.members.get(name='Ali').id)
s = Cloner().clone(student)
self.assertNotEqual(s.id, student.id)
self.assertNotEqual(student.group_set.get(name='ACM').id, s.group_set.get(name='ACM').id)
def test_clone_many_to_many_field_with_through_field_and_repeated_instance(self):
student = Student(name='Ali')
group = Group(name='ACM')
student.save()
group.save()
membership1 = Membership(student=student, group=group)
membership1.save()
membership2 = Membership(student=student, group=group)
membership2.save()
g = Cloner().clone(group)
self.assertEqual(g.members.all().count(), group.members.all().count())
s = Cloner().clone(student)
self.assertEqual(s.group_set.all().count(), student.group_set.all().count())
def test_clone_subclass(self):
question = Question(question_text='a', pub_date=timezone.now())
question.save()
choice = BigChoice(question=question, choice_text='c', votes=0)
choice.save()
Cloner().clone(question)
self.assertEqual(Question.objects.count(), 2)
self.assertEqual(Choice.objects.count(), 2)
Cloner().clone(choice)
self.assertEqual(Question.objects.count(), 3)
self.assertEqual(Choice.objects.count(), 3)
def test_clone_subclass_explicit_relation(self):
question = Question(question_text='a', pub_date=timezone.now())
question.save()
choice = BigChoice2(question=question, choice_text='c', votes=0)
choice.save()
Cloner().clone(question)
self.assertEqual(Question.objects.count(), 2)
self.assertEqual(Choice.objects.count(), 2)
Cloner().clone(choice)
self.assertEqual(Question.objects.count(), 3)
self.assertEqual(Choice.objects.count(), 3)
def test_clone_unique(self):
def unique_editor(obj):
if isinstance(obj, BigChoice):
obj.unique_value += "S"
return obj
question = Question(question_text='a', pub_date=timezone.now())
question.save()
choice = BigChoice(question=question, choice_text='c', votes=0, unique_value="S")
choice.save()
new_choice = Cloner().clone(choice, unique_editor)
self.assertNotEqual(new_choice.pk, choice.pk)
| true | true |
f71b5cd7fa3f30ff2ff0a5a2c5acbd05b042c711 | 497 | py | Python | examples/example_proj/dependency_app_o2o/migrations/0001_initial.py | philsupertramp/dj-migration-test | 97ec4513b9848d96436907de7940841866895e3c | [
"MIT"
] | 4 | 2019-07-05T19:32:07.000Z | 2020-02-07T00:47:15.000Z | examples/example_proj/dependency_app_o2o/migrations/0001_initial.py | philsupertramp/dj-migration-test | 97ec4513b9848d96436907de7940841866895e3c | [
"MIT"
] | 17 | 2019-08-23T07:21:23.000Z | 2021-09-22T18:44:26.000Z | examples/example_proj/dependency_app_o2o/migrations/0001_initial.py | philsupertramp/dj-migration-test | 97ec4513b9848d96436907de7940841866895e3c | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2019-07-27 12:22
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DepModO2O',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('placeholder', models.BooleanField(default=True)),
],
),
]
| 22.590909 | 114 | 0.583501 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DepModO2O',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('placeholder', models.BooleanField(default=True)),
],
),
]
| true | true |
f71b5d0be6cfd82d1e2beb6b6ec45e9a21282a6a | 1,163 | py | Python | src/bot.py | Shignum/ShiggyBot | 292d99300dea55848d1aa458c8b8893a8dd78fc2 | [
"MIT"
] | null | null | null | src/bot.py | Shignum/ShiggyBot | 292d99300dea55848d1aa458c8b8893a8dd78fc2 | [
"MIT"
] | null | null | null | src/bot.py | Shignum/ShiggyBot | 292d99300dea55848d1aa458c8b8893a8dd78fc2 | [
"MIT"
] | null | null | null | import os
from discord import Embed
from discord import Intents
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
intents = Intents.default()
bot = commands.Bot(command_prefix=os.getenv('PREFIX'))
TOKEN = os.getenv('BOT_TOKEN')
@bot.event
async def on_ready():
print(f'{bot.user} has logged in.')
initial_extensions = ['cogs.event','cogs.music','cogs.other','cogs.playlist']
for extension in initial_extensions:
bot.load_extension(extension)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send(embed=Embed(title='Command not found.'))
return
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send(embed=Embed(title='Command needs an Argument.'))
return
elif isinstance(error, commands.CommandInvokeError):
await ctx.send(embed=Embed(title=f'{error.original}'))
return
elif isinstance(error, commands.MissingPermissions):
await ctx.send(embed=Embed(title="You don't have the permission to use this command."))
return
raise error
bot.run(TOKEN)
| 29.075 | 95 | 0.715391 | import os
from discord import Embed
from discord import Intents
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
intents = Intents.default()
bot = commands.Bot(command_prefix=os.getenv('PREFIX'))
TOKEN = os.getenv('BOT_TOKEN')
@bot.event
async def on_ready():
print(f'{bot.user} has logged in.')
initial_extensions = ['cogs.event','cogs.music','cogs.other','cogs.playlist']
for extension in initial_extensions:
bot.load_extension(extension)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send(embed=Embed(title='Command not found.'))
return
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send(embed=Embed(title='Command needs an Argument.'))
return
elif isinstance(error, commands.CommandInvokeError):
await ctx.send(embed=Embed(title=f'{error.original}'))
return
elif isinstance(error, commands.MissingPermissions):
await ctx.send(embed=Embed(title="You don't have the permission to use this command."))
return
raise error
bot.run(TOKEN)
| true | true |
f71b5d97598ff53100bfb2598cdb30dd30469fd8 | 10,221 | py | Python | client-py/iotdb/utils/IoTDBRpcDataSet.py | slawr/iotdb | 96b5269f0fc6e02927563d4481da3bfb310fc7b1 | [
"Apache-2.0"
] | null | null | null | client-py/iotdb/utils/IoTDBRpcDataSet.py | slawr/iotdb | 96b5269f0fc6e02927563d4481da3bfb310fc7b1 | [
"Apache-2.0"
] | 27 | 2021-10-19T09:41:40.000Z | 2022-03-30T16:22:17.000Z | client-py/iotdb/utils/IoTDBRpcDataSet.py | slawr/iotdb | 96b5269f0fc6e02927563d4481da3bfb310fc7b1 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# for package
import logging
from thrift.transport import TTransport
from iotdb.thrift.rpc.TSIService import TSFetchResultsReq, TSCloseOperationReq
from iotdb.utils.IoTDBConstants import TSDataType
logger = logging.getLogger("IoTDB")
class IoTDBRpcDataSet(object):
TIMESTAMP_STR = "Time"
# VALUE_IS_NULL = "The value got by %s (column name) is NULL."
START_INDEX = 2
FLAG = 0x80
def __init__(
self,
sql,
column_name_list,
column_type_list,
column_name_index,
ignore_timestamp,
query_id,
client,
session_id,
query_data_set,
fetch_size,
):
self.__session_id = session_id
self.__ignore_timestamp = ignore_timestamp
self.__sql = sql
self.__query_id = query_id
self.__client = client
self.__fetch_size = fetch_size
self.__column_size = len(column_name_list)
self.__default_time_out = 1000
self.__column_name_list = []
self.__column_type_list = []
self.__column_ordinal_dict = {}
if not ignore_timestamp:
self.__column_name_list.append(IoTDBRpcDataSet.TIMESTAMP_STR)
self.__column_type_list.append(TSDataType.INT64)
self.__column_ordinal_dict[IoTDBRpcDataSet.TIMESTAMP_STR] = 1
if column_name_index is not None:
self.__column_type_deduplicated_list = [
None for _ in range(len(column_name_index))
]
for i in range(len(column_name_list)):
name = column_name_list[i]
self.__column_name_list.append(name)
self.__column_type_list.append(TSDataType[column_type_list[i]])
if name not in self.__column_ordinal_dict:
index = column_name_index[name]
self.__column_ordinal_dict[name] = (
index + IoTDBRpcDataSet.START_INDEX
)
self.__column_type_deduplicated_list[index] = TSDataType[
column_type_list[i]
]
else:
index = IoTDBRpcDataSet.START_INDEX
self.__column_type_deduplicated_list = []
for i in range(len(column_name_list)):
name = column_name_list[i]
self.__column_name_list.append(name)
self.__column_type_list.append(TSDataType[column_type_list[i]])
if name not in self.__column_ordinal_dict:
self.__column_ordinal_dict[name] = index
index += 1
self.__column_type_deduplicated_list.append(
TSDataType[column_type_list[i]]
)
self.__time_bytes = bytes(0)
self.__current_bitmap = [
bytes(0) for _ in range(len(self.__column_type_deduplicated_list))
]
self.__value = [None for _ in range(len(self.__column_type_deduplicated_list))]
self.__query_data_set = query_data_set
self.__is_closed = False
self.__empty_resultSet = False
self.__has_cached_record = False
self.__rows_index = 0
def close(self):
if self.__is_closed:
return
if self.__client is not None:
try:
status = self.__client.closeOperation(
TSCloseOperationReq(self.__session_id, self.__query_id)
)
logger.debug(
"close session {}, message: {}".format(
self.__session_id, status.message
)
)
except TTransport.TException as e:
raise RuntimeError(
"close session {} failed because: ".format(self.__session_id), e
)
self.__is_closed = True
self.__client = None
def next(self):
if self.has_cached_result():
self.construct_one_row()
return True
if self.__empty_resultSet:
return False
if self.fetch_results():
self.construct_one_row()
return True
return False
def has_cached_result(self):
return (self.__query_data_set is not None) and (
len(self.__query_data_set.time) != 0
)
def construct_one_row(self):
# simulating buffer, read 8 bytes from data set and discard first 8 bytes which have been read.
self.__time_bytes = self.__query_data_set.time[:8]
self.__query_data_set.time = self.__query_data_set.time[8:]
for i in range(len(self.__query_data_set.bitmapList)):
bitmap_buffer = self.__query_data_set.bitmapList[i]
# another 8 new rows, should move the bitmap buffer position to next byte
if self.__rows_index % 8 == 0:
self.__current_bitmap[i] = bitmap_buffer[0]
self.__query_data_set.bitmapList[i] = bitmap_buffer[1:]
if not self.is_null(i, self.__rows_index):
value_buffer = self.__query_data_set.valueList[i]
data_type = self.__column_type_deduplicated_list[i]
# simulating buffer
if data_type == TSDataType.BOOLEAN:
self.__value[i] = value_buffer[:1]
self.__query_data_set.valueList[i] = value_buffer[1:]
elif data_type == TSDataType.INT32:
self.__value[i] = value_buffer[:4]
self.__query_data_set.valueList[i] = value_buffer[4:]
elif data_type == TSDataType.INT64:
self.__value[i] = value_buffer[:8]
self.__query_data_set.valueList[i] = value_buffer[8:]
elif data_type == TSDataType.FLOAT:
self.__value[i] = value_buffer[:4]
self.__query_data_set.valueList[i] = value_buffer[4:]
elif data_type == TSDataType.DOUBLE:
self.__value[i] = value_buffer[:8]
self.__query_data_set.valueList[i] = value_buffer[8:]
elif data_type == TSDataType.TEXT:
length = int.from_bytes(
value_buffer[:4], byteorder="big", signed=False
)
self.__value[i] = value_buffer[4 : 4 + length]
self.__query_data_set.valueList[i] = value_buffer[4 + length :]
else:
raise RuntimeError("unsupported data type {}.".format(data_type))
self.__rows_index += 1
self.__has_cached_record = True
def fetch_results(self):
self.__rows_index = 0
request = TSFetchResultsReq(
self.__session_id,
self.__sql,
self.__fetch_size,
self.__query_id,
True,
self.__default_time_out,
)
try:
resp = self.__client.fetchResults(request)
if not resp.hasResultSet:
self.__empty_resultSet = True
else:
self.__query_data_set = resp.queryDataSet
return resp.hasResultSet
except TTransport.TException as e:
raise RuntimeError(
"Cannot fetch result from server, because of network connection: ", e
)
def is_null(self, index, row_num):
bitmap = self.__current_bitmap[index]
shift = row_num % 8
return ((IoTDBRpcDataSet.FLAG >> shift) & (bitmap & 0xFF)) == 0
def is_null_by_index(self, column_index):
index = (
self.__column_ordinal_dict[self.find_column_name_by_index(column_index)]
- IoTDBRpcDataSet.START_INDEX
)
# time column will never be None
if index < 0:
return True
return self.is_null(index, self.__rows_index - 1)
def is_null_by_name(self, column_name):
index = self.__column_ordinal_dict[column_name] - IoTDBRpcDataSet.START_INDEX
# time column will never be None
if index < 0:
return True
return self.is_null(index, self.__rows_index - 1)
def find_column_name_by_index(self, column_index):
if column_index <= 0:
raise Exception("Column index should start from 1")
if column_index > len(self.__column_name_list):
raise Exception(
"column index {} out of range {}".format(
column_index, self.__column_size
)
)
return self.__column_name_list[column_index - 1]
def get_fetch_size(self):
return self.__fetch_size
def set_fetch_size(self, fetch_size):
self.__fetch_size = fetch_size
def get_column_names(self):
return self.__column_name_list
def get_column_types(self):
return self.__column_type_list
def get_column_size(self):
return self.__column_size
def get_ignore_timestamp(self):
return self.__ignore_timestamp
def get_column_ordinal_dict(self):
return self.__column_ordinal_dict
def get_column_type_deduplicated_list(self):
return self.__column_type_deduplicated_list
def get_values(self):
return self.__value
def get_time_bytes(self):
return self.__time_bytes
def get_has_cached_record(self):
return self.__has_cached_record
| 37.577206 | 103 | 0.604246 |
import logging
from thrift.transport import TTransport
from iotdb.thrift.rpc.TSIService import TSFetchResultsReq, TSCloseOperationReq
from iotdb.utils.IoTDBConstants import TSDataType
logger = logging.getLogger("IoTDB")
class IoTDBRpcDataSet(object):
TIMESTAMP_STR = "Time"
START_INDEX = 2
FLAG = 0x80
def __init__(
self,
sql,
column_name_list,
column_type_list,
column_name_index,
ignore_timestamp,
query_id,
client,
session_id,
query_data_set,
fetch_size,
):
self.__session_id = session_id
self.__ignore_timestamp = ignore_timestamp
self.__sql = sql
self.__query_id = query_id
self.__client = client
self.__fetch_size = fetch_size
self.__column_size = len(column_name_list)
self.__default_time_out = 1000
self.__column_name_list = []
self.__column_type_list = []
self.__column_ordinal_dict = {}
if not ignore_timestamp:
self.__column_name_list.append(IoTDBRpcDataSet.TIMESTAMP_STR)
self.__column_type_list.append(TSDataType.INT64)
self.__column_ordinal_dict[IoTDBRpcDataSet.TIMESTAMP_STR] = 1
if column_name_index is not None:
self.__column_type_deduplicated_list = [
None for _ in range(len(column_name_index))
]
for i in range(len(column_name_list)):
name = column_name_list[i]
self.__column_name_list.append(name)
self.__column_type_list.append(TSDataType[column_type_list[i]])
if name not in self.__column_ordinal_dict:
index = column_name_index[name]
self.__column_ordinal_dict[name] = (
index + IoTDBRpcDataSet.START_INDEX
)
self.__column_type_deduplicated_list[index] = TSDataType[
column_type_list[i]
]
else:
index = IoTDBRpcDataSet.START_INDEX
self.__column_type_deduplicated_list = []
for i in range(len(column_name_list)):
name = column_name_list[i]
self.__column_name_list.append(name)
self.__column_type_list.append(TSDataType[column_type_list[i]])
if name not in self.__column_ordinal_dict:
self.__column_ordinal_dict[name] = index
index += 1
self.__column_type_deduplicated_list.append(
TSDataType[column_type_list[i]]
)
self.__time_bytes = bytes(0)
self.__current_bitmap = [
bytes(0) for _ in range(len(self.__column_type_deduplicated_list))
]
self.__value = [None for _ in range(len(self.__column_type_deduplicated_list))]
self.__query_data_set = query_data_set
self.__is_closed = False
self.__empty_resultSet = False
self.__has_cached_record = False
self.__rows_index = 0
def close(self):
if self.__is_closed:
return
if self.__client is not None:
try:
status = self.__client.closeOperation(
TSCloseOperationReq(self.__session_id, self.__query_id)
)
logger.debug(
"close session {}, message: {}".format(
self.__session_id, status.message
)
)
except TTransport.TException as e:
raise RuntimeError(
"close session {} failed because: ".format(self.__session_id), e
)
self.__is_closed = True
self.__client = None
def next(self):
if self.has_cached_result():
self.construct_one_row()
return True
if self.__empty_resultSet:
return False
if self.fetch_results():
self.construct_one_row()
return True
return False
def has_cached_result(self):
return (self.__query_data_set is not None) and (
len(self.__query_data_set.time) != 0
)
def construct_one_row(self):
self.__time_bytes = self.__query_data_set.time[:8]
self.__query_data_set.time = self.__query_data_set.time[8:]
for i in range(len(self.__query_data_set.bitmapList)):
bitmap_buffer = self.__query_data_set.bitmapList[i]
if self.__rows_index % 8 == 0:
self.__current_bitmap[i] = bitmap_buffer[0]
self.__query_data_set.bitmapList[i] = bitmap_buffer[1:]
if not self.is_null(i, self.__rows_index):
value_buffer = self.__query_data_set.valueList[i]
data_type = self.__column_type_deduplicated_list[i]
if data_type == TSDataType.BOOLEAN:
self.__value[i] = value_buffer[:1]
self.__query_data_set.valueList[i] = value_buffer[1:]
elif data_type == TSDataType.INT32:
self.__value[i] = value_buffer[:4]
self.__query_data_set.valueList[i] = value_buffer[4:]
elif data_type == TSDataType.INT64:
self.__value[i] = value_buffer[:8]
self.__query_data_set.valueList[i] = value_buffer[8:]
elif data_type == TSDataType.FLOAT:
self.__value[i] = value_buffer[:4]
self.__query_data_set.valueList[i] = value_buffer[4:]
elif data_type == TSDataType.DOUBLE:
self.__value[i] = value_buffer[:8]
self.__query_data_set.valueList[i] = value_buffer[8:]
elif data_type == TSDataType.TEXT:
length = int.from_bytes(
value_buffer[:4], byteorder="big", signed=False
)
self.__value[i] = value_buffer[4 : 4 + length]
self.__query_data_set.valueList[i] = value_buffer[4 + length :]
else:
raise RuntimeError("unsupported data type {}.".format(data_type))
self.__rows_index += 1
self.__has_cached_record = True
def fetch_results(self):
self.__rows_index = 0
request = TSFetchResultsReq(
self.__session_id,
self.__sql,
self.__fetch_size,
self.__query_id,
True,
self.__default_time_out,
)
try:
resp = self.__client.fetchResults(request)
if not resp.hasResultSet:
self.__empty_resultSet = True
else:
self.__query_data_set = resp.queryDataSet
return resp.hasResultSet
except TTransport.TException as e:
raise RuntimeError(
"Cannot fetch result from server, because of network connection: ", e
)
def is_null(self, index, row_num):
bitmap = self.__current_bitmap[index]
shift = row_num % 8
return ((IoTDBRpcDataSet.FLAG >> shift) & (bitmap & 0xFF)) == 0
def is_null_by_index(self, column_index):
index = (
self.__column_ordinal_dict[self.find_column_name_by_index(column_index)]
- IoTDBRpcDataSet.START_INDEX
)
if index < 0:
return True
return self.is_null(index, self.__rows_index - 1)
def is_null_by_name(self, column_name):
index = self.__column_ordinal_dict[column_name] - IoTDBRpcDataSet.START_INDEX
if index < 0:
return True
return self.is_null(index, self.__rows_index - 1)
def find_column_name_by_index(self, column_index):
if column_index <= 0:
raise Exception("Column index should start from 1")
if column_index > len(self.__column_name_list):
raise Exception(
"column index {} out of range {}".format(
column_index, self.__column_size
)
)
return self.__column_name_list[column_index - 1]
def get_fetch_size(self):
return self.__fetch_size
def set_fetch_size(self, fetch_size):
self.__fetch_size = fetch_size
def get_column_names(self):
return self.__column_name_list
def get_column_types(self):
return self.__column_type_list
def get_column_size(self):
return self.__column_size
def get_ignore_timestamp(self):
return self.__ignore_timestamp
def get_column_ordinal_dict(self):
return self.__column_ordinal_dict
def get_column_type_deduplicated_list(self):
return self.__column_type_deduplicated_list
def get_values(self):
return self.__value
def get_time_bytes(self):
return self.__time_bytes
def get_has_cached_record(self):
return self.__has_cached_record
| true | true |
f71b5dbf84e94f967043c63798744db773956c70 | 2,822 | py | Python | awesome-bot.py | ksmirenko/awesome-irc-bot | 2d39da7efc3621d737bcec458fc0f50ee7189e05 | [
"MIT"
] | null | null | null | awesome-bot.py | ksmirenko/awesome-irc-bot | 2d39da7efc3621d737bcec458fc0f50ee7189e05 | [
"MIT"
] | null | null | null | awesome-bot.py | ksmirenko/awesome-irc-bot | 2d39da7efc3621d737bcec458fc0f50ee7189e05 | [
"MIT"
] | null | null | null | import re
import socket
import sys
import threading
from random import randint
host = 'irc.freenode.org'
port = 6667
nick = 'gabe_the_dog'
real_name = 'Gabe the dog'
channel = '#spbnet'
size = 2048
youtube_prefix = 'https://www.youtube.com/watch?v='
gabe_the_dog_sources = [
'i1H0leZhXcY',
'i11RMG_U3R4',
'xK6cUQQ9cJY',
'b2p8Zxmuq4g',
'iY4Ci0wg258',
'd6ysCgOu8N8',
'dvZGs9QRNIw',
'TsIZG5QbS1g',
'gwkRRED5WxY',
'oFRSLqpq9xk',
'h4-pHUVthf0',
'gIx6_Srsrog',
'eWu5eB62dT8',
'vwGnXKNGjT0',
'AeEH5ugJrUU',
'WCFnvj4Lztg',
'Gl1uq4tg7YU',
'rcIpIw4YtZk',
'9u9vlj8CgS0',
'gvOWADwCDNg',
'JtA_WnBP_Co',
'R78ZxZW_N-o',
'd1lth7uX02g',
'onZcB3y2RTM',
'j20cTvQYe6s',
'tVznLG3PAdM',
'muLAN-kP5pE',
'VJxNv2m7qns',
'y3PcelCeraw'
]
def send_cmd(sock, cmd):
sock.send(bytes(cmd))
def connect():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return sock
def login(sock):
send_cmd(sock, "USER {0} * * :{1}\r\n".format(nick, real_name))
send_cmd(sock, "NICK {0}\r\n".format(nick))
send_cmd(sock, "JOIN {0}\r\n".format(channel))
def send_msg(sock, msg):
send_cmd(sock, "PRIVMSG {} :{}\r\n".format(channel, msg))
# magic
def magic(sock):
index = randint(0, len(gabe_the_dog_sources) - 1)
msg = "Check this out: {}{}".format(youtube_prefix, gabe_the_dog_sources[index])
send_msg(sock, msg)
# thread routines
def send_routine(sock):
while True:
msg = raw_input()
if msg.startswith("/q"):
send_cmd(sock, "QUIT")
sock.close()
return
send_msg(sock, msg)
def receive_routine(sock):
try:
while True:
text = str(sock.recv(size))
if text.startswith("PING "):
send_cmd(sock, "PONG {}".format(text[5:]))
continue
if len(text) > 1:
print_message(text, "PRIVMSG" in text and channel in text)
if "show some magic" in text and nick in text:
magic(sock)
except:
print("Disconnected!")
def print_message(msg, is_private):
if is_private:
sender_nick = re.sub(r":(.*)!.*PRIVMSG " + channel + r" :(.*)", r"\1", msg)
msg_text = re.sub(r":(.*)!.*PRIVMSG " + channel + r" :(.*)", r"\2", msg)
print("<{}>: {}".format(sender_nick[:-1], msg_text[:-1]))
else:
print(msg)
def main():
sock = connect()
login(sock)
print("Connected!")
sender_thread = threading.Thread(target=send_routine, args=(sock,))
receiver_thread = threading.Thread(target=receive_routine, args=(sock,))
sender_thread.start()
receiver_thread.start()
sender_thread.join()
receiver_thread.join()
main()
| 22.576 | 84 | 0.592488 | import re
import socket
import sys
import threading
from random import randint
host = 'irc.freenode.org'
port = 6667
nick = 'gabe_the_dog'
real_name = 'Gabe the dog'
channel = '#spbnet'
size = 2048
youtube_prefix = 'https://www.youtube.com/watch?v='
gabe_the_dog_sources = [
'i1H0leZhXcY',
'i11RMG_U3R4',
'xK6cUQQ9cJY',
'b2p8Zxmuq4g',
'iY4Ci0wg258',
'd6ysCgOu8N8',
'dvZGs9QRNIw',
'TsIZG5QbS1g',
'gwkRRED5WxY',
'oFRSLqpq9xk',
'h4-pHUVthf0',
'gIx6_Srsrog',
'eWu5eB62dT8',
'vwGnXKNGjT0',
'AeEH5ugJrUU',
'WCFnvj4Lztg',
'Gl1uq4tg7YU',
'rcIpIw4YtZk',
'9u9vlj8CgS0',
'gvOWADwCDNg',
'JtA_WnBP_Co',
'R78ZxZW_N-o',
'd1lth7uX02g',
'onZcB3y2RTM',
'j20cTvQYe6s',
'tVznLG3PAdM',
'muLAN-kP5pE',
'VJxNv2m7qns',
'y3PcelCeraw'
]
def send_cmd(sock, cmd):
sock.send(bytes(cmd))
def connect():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return sock
def login(sock):
send_cmd(sock, "USER {0} * * :{1}\r\n".format(nick, real_name))
send_cmd(sock, "NICK {0}\r\n".format(nick))
send_cmd(sock, "JOIN {0}\r\n".format(channel))
def send_msg(sock, msg):
send_cmd(sock, "PRIVMSG {} :{}\r\n".format(channel, msg))
def magic(sock):
index = randint(0, len(gabe_the_dog_sources) - 1)
msg = "Check this out: {}{}".format(youtube_prefix, gabe_the_dog_sources[index])
send_msg(sock, msg)
def send_routine(sock):
while True:
msg = raw_input()
if msg.startswith("/q"):
send_cmd(sock, "QUIT")
sock.close()
return
send_msg(sock, msg)
def receive_routine(sock):
try:
while True:
text = str(sock.recv(size))
if text.startswith("PING "):
send_cmd(sock, "PONG {}".format(text[5:]))
continue
if len(text) > 1:
print_message(text, "PRIVMSG" in text and channel in text)
if "show some magic" in text and nick in text:
magic(sock)
except:
print("Disconnected!")
def print_message(msg, is_private):
if is_private:
sender_nick = re.sub(r":(.*)!.*PRIVMSG " + channel + r" :(.*)", r"\1", msg)
msg_text = re.sub(r":(.*)!.*PRIVMSG " + channel + r" :(.*)", r"\2", msg)
print("<{}>: {}".format(sender_nick[:-1], msg_text[:-1]))
else:
print(msg)
def main():
sock = connect()
login(sock)
print("Connected!")
sender_thread = threading.Thread(target=send_routine, args=(sock,))
receiver_thread = threading.Thread(target=receive_routine, args=(sock,))
sender_thread.start()
receiver_thread.start()
sender_thread.join()
receiver_thread.join()
main()
| true | true |
f71b5dcfe6e6dcab397ded91c0b2aed0f4eaaa39 | 3,380 | py | Python | drawSS.py | banroku/analySS | 15ba9e9216f86a1bf74062eae479a3ce1c9c5a11 | [
"MIT"
] | null | null | null | drawSS.py | banroku/analySS | 15ba9e9216f86a1bf74062eae479a3ce1c9c5a11 | [
"MIT"
] | null | null | null | drawSS.py | banroku/analySS | 15ba9e9216f86a1bf74062eae479a3ce1c9c5a11 | [
"MIT"
] | null | null | null | # coding=utf-8
def thinningSS(file, max_strain=10, interval=0.1):
'''a function to conduct data thinning of SS curve at range (0, MAX_STRAIN), with INTERVAL
This returns np.series of stress with strain in the index.
FILE should be passed as dictionary containing following:
'name': name of sample like 'RL7785'
'crv': path(relative) of xxx_crv.csv file
'rlt': path(relative) of xxx_rlt.csv file
'set': path(relative) of xxx_set.csv file
'''
import pandas as pd
import numpy as np
# read files and parameters
data = pd.read_csv(file['crv'], sep=',', encoding='shift_jis', skiprows=1, index_col=0)
data_rlt = pd.read_csv(file['rlt'], sep=',', encoding='shift_jis')
L = 64 # span
b = float(data_rlt.iloc[2, 3]) # width of first specimen
h = float(data_rlt.iloc[2, 4]) # height of first specimen
#print('span, width, height of first specimen:', L, ',', b, ',', h)#cut out curve of first specimen
col = ['mm', 'N']
data = data.reindex(columns=col)
data.dropna(subset=['mm'], inplace=True)
#%% convert (mm, N) to (%, MPa)
# sigma = 3*F*L / (2*b*h^2)
# epsilon = 6*100*s*h / (L^2)
# F: load, L:span = 64 mm, b:width, h:height, s=strain/mm
data['strain'] = data['mm'] * 6 * 100 * h / L / L
data['stress'] = data['N'] * 3 * L / (2 * b * h * h)
#%% data thinnings
interval_steps = int(max_strain/interval)
marker = pd.DataFrame({'strain': np.round(np.linspace(0, max_strain, interval_steps, endpoint=False), 2), 'marker': True})
data_marked = pd.merge(data, marker, on='strain', how='outer')
data_marked.rename(data_marked['strain'], inplace=True)
data_marked.sort_values(by=['strain'], inplace=True)
data_marked.interpolate(method='slinear', limit=1, inplace=True)
data_marked['marker'].fillna('False', inplace=True)
data_skipped = data_marked[data_marked['marker']==True]
thinnedSS = data_skipped['stress']
thinnedSS.name = file['name']
return thinnedSS
#%%
def parameters(file):
'''a function to pick following parameters as pd.Series:
parameters = ['width', 'height', 'FM', 'FS_max', 'FS_break', 'FE_max', 'FE_break',
'd_width', 'd_height', 'd_FM', 'd_FS_max', 'd_FS_break', 'd_FE_max', 'd_FE_break']
FILE should be passed as dictionary containing following:
'name': name of sample like 'RL7785'
'crv': path(relative) of xxx_crv.csv file
'rlt': path(relative) of xxx_rlt.csv file
'set': path(relative) of xxx_set.csv file '''
file_rlt = file['rlt']
data_rlt = pd.read_csv(file_rlt, sep=',', skiprows=[1,2], index_col=0, encoding='shift_jis')
parameters = ['幅', '厚さ', '弾性率', '最大点', '破壊点', '最大点.1', '破壊点.1']
data_rlt = data_rlt.loc[['単純平均', '標準偏差'], parameters]
data_rlt.index = ['average', 'stdev']
data_rlt.columns = ['width', 'height', 'FM', 'FS_max', 'FS_break', 'FE_max', 'FE_break']
data_rlt = data_rlt.values
data_flattened = [item for sublist in data_rlt for item in sublist] #see below
parameters = ['width', 'height', 'FM', 'FS_max', 'FS_break', 'FE_max', 'FE_break',
'd_width', 'd_height', 'd_FM', 'd_FS_max', 'd_FS_break', 'd_FE_max', 'd_FE_break']
data_rlt = pd.Series(data_flattened, index=parameters)
data_rlt.name = file['name']
return data_rlt | 47.605634 | 126 | 0.628107 |
def thinningSS(file, max_strain=10, interval=0.1):
import pandas as pd
import numpy as np
data = pd.read_csv(file['crv'], sep=',', encoding='shift_jis', skiprows=1, index_col=0)
data_rlt = pd.read_csv(file['rlt'], sep=',', encoding='shift_jis')
L = 64
b = float(data_rlt.iloc[2, 3])
h = float(data_rlt.iloc[2, 4])
= data.reindex(columns=col)
data.dropna(subset=['mm'], inplace=True)
data['strain'] = data['mm'] * 6 * 100 * h / L / L
data['stress'] = data['N'] * 3 * L / (2 * b * h * h)
interval_steps = int(max_strain/interval)
marker = pd.DataFrame({'strain': np.round(np.linspace(0, max_strain, interval_steps, endpoint=False), 2), 'marker': True})
data_marked = pd.merge(data, marker, on='strain', how='outer')
data_marked.rename(data_marked['strain'], inplace=True)
data_marked.sort_values(by=['strain'], inplace=True)
data_marked.interpolate(method='slinear', limit=1, inplace=True)
data_marked['marker'].fillna('False', inplace=True)
data_skipped = data_marked[data_marked['marker']==True]
thinnedSS = data_skipped['stress']
thinnedSS.name = file['name']
return thinnedSS
def parameters(file):
file_rlt = file['rlt']
data_rlt = pd.read_csv(file_rlt, sep=',', skiprows=[1,2], index_col=0, encoding='shift_jis')
parameters = ['幅', '厚さ', '弾性率', '最大点', '破壊点', '最大点.1', '破壊点.1']
data_rlt = data_rlt.loc[['単純平均', '標準偏差'], parameters]
data_rlt.index = ['average', 'stdev']
data_rlt.columns = ['width', 'height', 'FM', 'FS_max', 'FS_break', 'FE_max', 'FE_break']
data_rlt = data_rlt.values
data_flattened = [item for sublist in data_rlt for item in sublist]
parameters = ['width', 'height', 'FM', 'FS_max', 'FS_break', 'FE_max', 'FE_break',
'd_width', 'd_height', 'd_FM', 'd_FS_max', 'd_FS_break', 'd_FE_max', 'd_FE_break']
data_rlt = pd.Series(data_flattened, index=parameters)
data_rlt.name = file['name']
return data_rlt | true | true |
f71b5dd3b2f1f6ba21eafc9f59670a50d9efc222 | 207 | py | Python | sciencer/expanders/__init__.py | SciencerIO/sciencer-toolkit | f17c4a5dfb6cc5dbabefe03b13eb1e5345f7b1b9 | [
"MIT"
] | 2 | 2022-03-28T17:27:21.000Z | 2022-03-29T22:27:15.000Z | sciencer/expanders/__init__.py | SciencerIO/sciencer-toolkit | f17c4a5dfb6cc5dbabefe03b13eb1e5345f7b1b9 | [
"MIT"
] | null | null | null | sciencer/expanders/__init__.py | SciencerIO/sciencer-toolkit | f17c4a5dfb6cc5dbabefe03b13eb1e5345f7b1b9 | [
"MIT"
] | 1 | 2022-03-28T14:47:53.000Z | 2022-03-28T14:47:53.000Z | """Sciencer Expanders"""
from .expander import Expander
from .expand_by_authors import ExpandByAuthors
from .expand_by_references import ExpandByReferences
from .expand_by_citations import ExpandByCitations
| 34.5 | 52 | 0.864734 | from .expander import Expander
from .expand_by_authors import ExpandByAuthors
from .expand_by_references import ExpandByReferences
from .expand_by_citations import ExpandByCitations
| true | true |
f71b5e233cb62b6fa8ba747a25edcddd0d4c142f | 1,068 | py | Python | get-git-lfs.py | rcmurphy/pre-commit-hooks | 17fcaab5769b7628e872601d852d3dcf13c0930e | [
"MIT"
] | null | null | null | get-git-lfs.py | rcmurphy/pre-commit-hooks | 17fcaab5769b7628e872601d852d3dcf13c0930e | [
"MIT"
] | null | null | null | get-git-lfs.py | rcmurphy/pre-commit-hooks | 17fcaab5769b7628e872601d852d3dcf13c0930e | [
"MIT"
] | 1 | 2016-05-06T15:27:07.000Z | 2016-05-06T15:27:07.000Z | #!/usr/bin/env python3.4
"""This is a script to install git-lfs to a tempdir for use in tests"""
import io
import os.path
import shutil
import tarfile
from urllib.request import urlopen
DOWNLOAD_PATH = (
'https://github.com/github/git-lfs/releases/download/'
'v1.1.0/git-lfs-linux-amd64-1.1.0.tar.gz'
)
PATH_IN_TAR = 'git-lfs-1.1.0/git-lfs'
DEST_PATH = '/tmp/git-lfs/git-lfs'
DEST_DIR = os.path.dirname(DEST_PATH)
def main():
if (
os.path.exists(DEST_PATH) and
os.path.isfile(DEST_PATH) and
os.access(DEST_PATH, os.X_OK)
):
print('Already installed!')
return 0
shutil.rmtree(DEST_DIR, ignore_errors=True)
os.makedirs(DEST_DIR, exist_ok=True)
contents = io.BytesIO(urlopen(DOWNLOAD_PATH).read())
with tarfile.open(fileobj=contents) as tar:
with tar.extractfile(PATH_IN_TAR) as src_file:
with open(DEST_PATH, 'wb') as dest_file:
shutil.copyfileobj(src_file, dest_file)
os.chmod(DEST_PATH, 0o755)
if __name__ == '__main__':
exit(main())
| 27.384615 | 71 | 0.661985 |
import io
import os.path
import shutil
import tarfile
from urllib.request import urlopen
DOWNLOAD_PATH = (
'https://github.com/github/git-lfs/releases/download/'
'v1.1.0/git-lfs-linux-amd64-1.1.0.tar.gz'
)
PATH_IN_TAR = 'git-lfs-1.1.0/git-lfs'
DEST_PATH = '/tmp/git-lfs/git-lfs'
DEST_DIR = os.path.dirname(DEST_PATH)
def main():
if (
os.path.exists(DEST_PATH) and
os.path.isfile(DEST_PATH) and
os.access(DEST_PATH, os.X_OK)
):
print('Already installed!')
return 0
shutil.rmtree(DEST_DIR, ignore_errors=True)
os.makedirs(DEST_DIR, exist_ok=True)
contents = io.BytesIO(urlopen(DOWNLOAD_PATH).read())
with tarfile.open(fileobj=contents) as tar:
with tar.extractfile(PATH_IN_TAR) as src_file:
with open(DEST_PATH, 'wb') as dest_file:
shutil.copyfileobj(src_file, dest_file)
os.chmod(DEST_PATH, 0o755)
if __name__ == '__main__':
exit(main())
| true | true |
f71b5e5ba3ad4fa2190d7a089a3fbcdfd842d9d6 | 4,150 | py | Python | ptvs_virtualenv_proxy.py | SpaceTheArcher/test | 469ba40a6e3a5719e90f521d851252b1d5499dab | [
"Apache-2.0"
] | null | null | null | ptvs_virtualenv_proxy.py | SpaceTheArcher/test | 469ba40a6e3a5719e90f521d851252b1d5499dab | [
"Apache-2.0"
] | 2 | 2020-06-05T18:25:57.000Z | 2021-06-01T22:22:13.000Z | ptvs_virtualenv_proxy.py | bruno-zaccariello/test | 469ba40a6e3a5719e90f521d851252b1d5499dab | [
"Apache-2.0"
] | null | null | null | # ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
import datetime
import os
import sys
if sys.version_info[0] == 3:
def to_str(value):
return value.decode(sys.getfilesystemencoding())
def execfile(path, global_dict):
"""Execute a file"""
with open(path, 'r') as f:
code = f.read()
code = code.replace('\r\n', '\n') + '\n'
exec(code, global_dict)
else:
def to_str(value):
return value.encode(sys.getfilesystemencoding())
def log(txt):
"""Logs fatal errors to a log file if WSGI_LOG env var is defined"""
log_file = os.environ.get('WSGI_LOG')
if log_file:
f = open(log_file, 'a+')
try:
f.write('%s: %s' % (datetime.datetime.now(), txt))
finally:
f.close()
ptvsd_secret = os.getenv('WSGI_PTVSD_SECRET')
if ptvsd_secret:
log('Enabling ptvsd ...\n')
try:
import ptvsd
try:
ptvsd.enable_attach(ptvsd_secret)
log('ptvsd enabled.\n')
except:
log('ptvsd.enable_attach failed\n')
except ImportError:
log('error importing ptvsd.\n');
def get_wsgi_handler(handler_name):
if not handler_name:
raise Exception('WSGI_HANDLER env var must be set')
if not isinstance(handler_name, str):
handler_name = to_str(handler_name)
module_name, _, callable_name = handler_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list = [(callable_name, should_call)]
handler = None
while module_name:
try:
handler = __import__(module_name, fromlist=[name_list[0][0]])
for name, should_call in name_list:
handler = getattr(handler, name)
if should_call:
handler = handler()
break
except ImportError:
module_name, _, callable_name = module_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list.insert(0, (callable_name, should_call))
handler = None
if handler is None:
raise ValueError('"%s" could not be imported' % handler_name)
return handler
activate_this = os.getenv('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS')
if not activate_this:
raise Exception('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS is not set')
def get_virtualenv_handler():
log('Activating virtualenv with %s\n' % activate_this)
execfile(activate_this, dict(__file__=activate_this))
log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
log('Got handler: %r\n' % handler)
return handler
def get_venv_handler():
log('Activating venv with executable at %s\n' % activate_this)
import site
sys.executable = activate_this
old_sys_path, sys.path = sys.path, []
site.main()
sys.path.insert(0, '')
for item in old_sys_path:
if item not in sys.path:
sys.path.append(item)
log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
log('Got handler: %r\n' % handler)
return handler
| 34.87395 | 98 | 0.608675 | true | true | |
f71b5f38bc0959d120c19af81b07d70402e40457 | 2,779 | py | Python | bifurcation-diagram/run.py | ExplosiveJam/fickettmodel-reproducibility | e47af1d3e2513d35dad65c16d4fd68c23e505f87 | [
"MIT"
] | 1 | 2019-06-08T20:06:33.000Z | 2019-06-08T20:06:33.000Z | bifurcation-diagram/run.py | ExplosiveJam/fickettmodel-reproducibility | e47af1d3e2513d35dad65c16d4fd68c23e505f87 | [
"MIT"
] | null | null | null | bifurcation-diagram/run.py | ExplosiveJam/fickettmodel-reproducibility | e47af1d3e2513d35dad65c16d4fd68c23e505f87 | [
"MIT"
] | 1 | 2019-06-24T13:00:02.000Z | 2019-06-24T13:00:02.000Z | #!/usr/bin/env python
r""" Run many simulations with varying :math:`\theta`.
The simulations are run.
Separate script should plot bifurcation diagram.
"""
import argparse
import os
import sys
import shutil
import numpy as np
from mpi4py import MPI
from saf.fm.nonlinear import Config
from saf.action import solve
from saf.util import reset_logging
TOTAL_THETAS = 251
FINAL_TIME = 1000
Q = 4
IO_FORMAT = 'numpy'
# Format for floating-point numbers.
FMT = '.3f'
def _worker(tasks, rank):
for t in tasks:
_worker_single_task(t, rank)
def _worker_single_task(task, rank):
theta = task
worker_name = rank
try:
outdir = 'theta={:{fmt}}'.format(theta, fmt=FMT)
outdir = os.path.join(OUTPUT_DIR, outdir)
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
outname = os.path.join(outdir, 'stdout.log')
errname = os.path.join(outdir, 'stderr.log')
sys.stdout = open(outname, 'w')
sys.stderr = open(errname, 'w')
msg = 'Worker {} | theta={:{fmt}}'.format(worker_name, theta, fmt=FMT)
print(msg)
except Exception as e:
print('theta={:{fmt}} | {}'.format(theta, str(e), fmt=FMT))
return
try:
c = _get_config(theta)
solve('nonlinear', c, outdir, log_to_file=False)
reset_logging()
except Exception as e:
print('theta={:{fmt}} | {}'.format(theta, str(e), fmt=FMT))
sys.stdout = sys.__stdout__
print('theta={:{fmt}} | {}'.format(theta, str(e), fmt=FMT))
def _get_config(theta):
c = Config()
c.n12 = N12
c.final_time = FINAL_TIME
c.dt = 0.005
c.approximator = 'godunov-minmod'
c.time_integrator = 'dopri5'
c.plot_time_step = 0
c.io_format = IO_FORMAT
c.play_animation = False
c.lambda_tol = 1e-6
c.q = Q
c.theta = theta
c.reaction_rate_version = 'v2' # Expression exactly as in FariaEtAl2015.
c.f = 1
c.ic_amplitude = 0.0
c.ic_type = 'gaussian'
c.truncation_coef = 1e6
return c
p = argparse.ArgumentParser()
p.add_argument('N12', help='Resolution', type=int)
args = p.parse_args()
N12 = args.N12
OUTPUT_DIR = os.path.join('_output', 'N12={:04d}'.format(N12))
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
all_tasks = []
# Build `all_tasks` in master process to distribute it to all processes.
if rank == 0:
# Uniformly spaced values of :math:`\theta`.
theta_values = np.linspace(0.90, 1.15, num=TOTAL_THETAS)
for i in range(size):
all_tasks.append([])
for i in range(len(theta_values)):
all_tasks[i % size].append(theta_values[i])
# Now distribute the tasks to each process.
tasks = comm.scatter(all_tasks, root=0)
_worker(tasks, rank)
| 23.956897 | 78 | 0.640158 |
import argparse
import os
import sys
import shutil
import numpy as np
from mpi4py import MPI
from saf.fm.nonlinear import Config
from saf.action import solve
from saf.util import reset_logging
TOTAL_THETAS = 251
FINAL_TIME = 1000
Q = 4
IO_FORMAT = 'numpy'
FMT = '.3f'
def _worker(tasks, rank):
for t in tasks:
_worker_single_task(t, rank)
def _worker_single_task(task, rank):
theta = task
worker_name = rank
try:
outdir = 'theta={:{fmt}}'.format(theta, fmt=FMT)
outdir = os.path.join(OUTPUT_DIR, outdir)
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
outname = os.path.join(outdir, 'stdout.log')
errname = os.path.join(outdir, 'stderr.log')
sys.stdout = open(outname, 'w')
sys.stderr = open(errname, 'w')
msg = 'Worker {} | theta={:{fmt}}'.format(worker_name, theta, fmt=FMT)
print(msg)
except Exception as e:
print('theta={:{fmt}} | {}'.format(theta, str(e), fmt=FMT))
return
try:
c = _get_config(theta)
solve('nonlinear', c, outdir, log_to_file=False)
reset_logging()
except Exception as e:
print('theta={:{fmt}} | {}'.format(theta, str(e), fmt=FMT))
sys.stdout = sys.__stdout__
print('theta={:{fmt}} | {}'.format(theta, str(e), fmt=FMT))
def _get_config(theta):
c = Config()
c.n12 = N12
c.final_time = FINAL_TIME
c.dt = 0.005
c.approximator = 'godunov-minmod'
c.time_integrator = 'dopri5'
c.plot_time_step = 0
c.io_format = IO_FORMAT
c.play_animation = False
c.lambda_tol = 1e-6
c.q = Q
c.theta = theta
c.reaction_rate_version = 'v2'
c.f = 1
c.ic_amplitude = 0.0
c.ic_type = 'gaussian'
c.truncation_coef = 1e6
return c
p = argparse.ArgumentParser()
p.add_argument('N12', help='Resolution', type=int)
args = p.parse_args()
N12 = args.N12
OUTPUT_DIR = os.path.join('_output', 'N12={:04d}'.format(N12))
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
all_tasks = []
if rank == 0:
theta_values = np.linspace(0.90, 1.15, num=TOTAL_THETAS)
for i in range(size):
all_tasks.append([])
for i in range(len(theta_values)):
all_tasks[i % size].append(theta_values[i])
tasks = comm.scatter(all_tasks, root=0)
_worker(tasks, rank)
| true | true |
f71b5f65fde60a4fce5bcdd06e514fa54d419c62 | 2,762 | py | Python | mwparserfromhell/nodes/wikilink.py | hperala/kontuwikibot | f409e6fb45adf4e553dc326d9fb3c0d29eda6373 | [
"MIT"
] | null | null | null | mwparserfromhell/nodes/wikilink.py | hperala/kontuwikibot | f409e6fb45adf4e553dc326d9fb3c0d29eda6373 | [
"MIT"
] | null | null | null | mwparserfromhell/nodes/wikilink.py | hperala/kontuwikibot | f409e6fb45adf4e553dc326d9fb3c0d29eda6373 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
from . import Node
from ..compat import str
from ..utils import parse_anything
__all__ = ["Wikilink"]
class Wikilink(Node):
"""Represents an internal wikilink, like ``[[Foo|Bar]]``."""
def __init__(self, title, text=None):
super(Wikilink, self).__init__()
self._title = title
self._text = text
def __unicode__(self):
if self.text is not None:
return "[[" + str(self.title) + "|" + str(self.text) + "]]"
return "[[" + str(self.title) + "]]"
def __children__(self):
yield self.title
if self.text is not None:
yield self.text
def __strip__(self, normalize, collapse):
if self.text is not None:
return self.text.strip_code(normalize, collapse)
return self.title.strip_code(normalize, collapse)
def __showtree__(self, write, get, mark):
write("[[")
get(self.title)
if self.text is not None:
write(" | ")
mark()
get(self.text)
write("]]")
@property
def title(self):
"""The title of the linked page, as a :class:`.Wikicode` object."""
return self._title
@property
def text(self):
"""The text to display (if any), as a :class:`.Wikicode` object."""
return self._text
@title.setter
def title(self, value):
self._title = parse_anything(value)
@text.setter
def text(self, value):
if value is None:
self._text = None
else:
self._text = parse_anything(value)
| 33.277108 | 79 | 0.654598 |
from __future__ import unicode_literals
from . import Node
from ..compat import str
from ..utils import parse_anything
__all__ = ["Wikilink"]
class Wikilink(Node):
def __init__(self, title, text=None):
super(Wikilink, self).__init__()
self._title = title
self._text = text
def __unicode__(self):
if self.text is not None:
return "[[" + str(self.title) + "|" + str(self.text) + "]]"
return "[[" + str(self.title) + "]]"
def __children__(self):
yield self.title
if self.text is not None:
yield self.text
def __strip__(self, normalize, collapse):
if self.text is not None:
return self.text.strip_code(normalize, collapse)
return self.title.strip_code(normalize, collapse)
def __showtree__(self, write, get, mark):
write("[[")
get(self.title)
if self.text is not None:
write(" | ")
mark()
get(self.text)
write("]]")
@property
def title(self):
return self._title
@property
def text(self):
return self._text
@title.setter
def title(self, value):
self._title = parse_anything(value)
@text.setter
def text(self, value):
if value is None:
self._text = None
else:
self._text = parse_anything(value)
| true | true |
f71b5f8ccdadb4be20d3cb2813522c3537586cb1 | 2,254 | py | Python | bcs-ui/backend/tests/container_service/observability/log_stream/test_log_stream.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 599 | 2019-06-25T03:20:46.000Z | 2022-03-31T12:14:33.000Z | bcs-ui/backend/tests/container_service/observability/log_stream/test_log_stream.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 537 | 2019-06-27T06:03:44.000Z | 2022-03-31T12:10:01.000Z | bcs-ui/backend/tests/container_service/observability/log_stream/test_log_stream.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 214 | 2019-06-25T03:26:05.000Z | 2022-03-31T07:52:03.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pytest
from channels.testing import WebsocketCommunicator
from backend.accounts.middlewares import BCSChannelAuthMiddlewareStack
from backend.container_service.observability.log_stream.views import LogStreamHandler
@pytest.fixture
def session_id(api_client, project_id, cluster_id, namespace, pod_name, container_name):
response = api_client.post(
f'/api/logs/projects/{project_id}/clusters/{cluster_id}/namespaces/{namespace}/pods/{pod_name}/stdlogs/sessions/', # noqa
{"container_name": container_name},
)
result = response.json()
return result['data']['session_id']
@pytest.mark.skip(reason='暂时跳过标准日志部分单元测试')
@pytest.mark.django_db
@pytest.mark.asyncio
async def test_log_stream(project_id, cluster_id, namespace, pod_name, session_id):
app = BCSChannelAuthMiddlewareStack(LogStreamHandler.as_asgi())
# Test a normal connection
communicator = WebsocketCommunicator(
app,
f'/ws/logs/projects/{project_id}/clusters/{cluster_id}/namespaces/{namespace}/pods/{pod_name}/stdlogs/stream/?session_id={session_id}', # noqa
)
communicator.scope['url_route'] = {
'kwargs': {
'project_id': project_id,
'cluster_id': cluster_id,
'namespace': namespace,
'pod': pod_name,
}
}
connected, _ = await communicator.connect()
assert connected
# Test sending text
await communicator.send_to(text_data="hello")
# Close out
await communicator.disconnect()
| 35.777778 | 151 | 0.733807 |
import pytest
from channels.testing import WebsocketCommunicator
from backend.accounts.middlewares import BCSChannelAuthMiddlewareStack
from backend.container_service.observability.log_stream.views import LogStreamHandler
@pytest.fixture
def session_id(api_client, project_id, cluster_id, namespace, pod_name, container_name):
response = api_client.post(
f'/api/logs/projects/{project_id}/clusters/{cluster_id}/namespaces/{namespace}/pods/{pod_name}/stdlogs/sessions/',
{"container_name": container_name},
)
result = response.json()
return result['data']['session_id']
@pytest.mark.skip(reason='暂时跳过标准日志部分单元测试')
@pytest.mark.django_db
@pytest.mark.asyncio
async def test_log_stream(project_id, cluster_id, namespace, pod_name, session_id):
app = BCSChannelAuthMiddlewareStack(LogStreamHandler.as_asgi())
communicator = WebsocketCommunicator(
app,
f'/ws/logs/projects/{project_id}/clusters/{cluster_id}/namespaces/{namespace}/pods/{pod_name}/stdlogs/stream/?session_id={session_id}',
)
communicator.scope['url_route'] = {
'kwargs': {
'project_id': project_id,
'cluster_id': cluster_id,
'namespace': namespace,
'pod': pod_name,
}
}
connected, _ = await communicator.connect()
assert connected
await communicator.send_to(text_data="hello")
await communicator.disconnect()
| true | true |
f71b5fa3d07b50277b17d00725bcbd1f7fff771e | 6,977 | py | Python | tensorflow/contrib/cmake/tools/create_def_file.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | 6 | 2022-02-04T18:12:24.000Z | 2022-03-21T23:57:12.000Z | Lib/site-packages/tensorflow/contrib/cmake/tools/create_def_file.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/tensorflow/contrib/cmake/tools/create_def_file.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | 1 | 2022-02-08T03:53:23.000Z | 2022-02-08T03:53:23.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""create_def_file.py - tool to create a windows def file.
The def file can be used to export symbols from the tensorflow dll to enable
tf.load_library().
Because the linker allows only 64K symbols to be exported per dll
we filter the symbols down to the essentials. The regular expressions
we use for this are specific to tensorflow.
TODO: this works fine but there is an issue with exporting
'const char * const' and importing it from a user_ops. The problem is
on the importing end and using __declspec(dllimport) works around it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import codecs
import os
import re
import subprocess
import sys
import tempfile
# External tools we use that come with visual studio sdk and
# we assume that the caller has the correct PATH to the sdk
UNDNAME = "undname.exe"
DUMPBIN = "dumpbin.exe"
# Exclude if matched
EXCLUDE_RE = re.compile(r"RTTI|deleting destructor|::internal::|Internal|"
r"python_op_gen_internal|grappler")
# Include if matched before exclude
INCLUDEPRE_RE = re.compile(r"google::protobuf::internal::ExplicitlyConstructed|"
r"tensorflow::internal::LogMessage|"
r"tensorflow::internal::LogString|"
r"tensorflow::internal::CheckOpMessageBuilder|"
r"tensorflow::internal::PickUnusedPortOrDie|"
r"tensorflow::internal::ValidateDevice|"
r"tensorflow::ops::internal::Enter|"
r"tensorflow::strings::internal::AppendPieces|"
r"tensorflow::strings::internal::CatPieces|"
r"tensorflow::errors::Internal|"
r"tensorflow::Tensor::CopyFromInternal|"
r"tensorflow::kernel_factory::"
r"OpKernelRegistrar::InitInternal|"
r"tensorflow::io::internal::JoinPathImpl")
# Include if matched after exclude
INCLUDE_RE = re.compile(r"^(TF_\w*)$|"
r"^(TFE_\w*)$|"
r"tensorflow::|"
r"functor::|"
r"\?nsync_|"
r"stream_executor::")
# We want to identify data members explicitly in the DEF file, so that no one
# can implicitly link against the DLL if they use one of the variables exported
# from the DLL and the header they use does not decorate the symbol with
# __declspec(dllimport). It is easier to detect what a data symbol does
# NOT look like, so doing it with the below regex.
DATA_EXCLUDE_RE = re.compile(r"[)(]|"
r"vftable|"
r"vbtable|"
r"vcall|"
r"RTTI|"
r"protobuf::internal::ExplicitlyConstructed")
def get_args():
"""Parse command line."""
filename_list = lambda x: x.split(";")
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=filename_list,
help="paths to input libraries separated by semicolons",
required=True)
parser.add_argument("--output", help="output deffile", required=True)
parser.add_argument("--target", help="name of the target", required=True)
parser.add_argument("--bitness", help="build target bitness", required=True)
args = parser.parse_args()
return args
def main():
"""main."""
args = get_args()
# Pipe dumpbin to extract all linkable symbols from libs.
# Good symbols are collected in candidates and also written to
# a temp file.
candidates = []
tmpfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
for lib_path in args.input:
proc = subprocess.Popen([DUMPBIN, "/nologo", "/linkermember:1", lib_path],
stdout=subprocess.PIPE)
for line in codecs.getreader("utf-8")(proc.stdout):
cols = line.split()
if len(cols) < 2:
continue
sym = cols[1]
tmpfile.file.write(sym + "\n")
candidates.append(sym)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(DUMPBIN, exit_code))
return exit_code
tmpfile.file.close()
# Run the symbols through undname to get their undecorated name
# so we can filter on something readable.
with open(args.output, "w") as def_fp:
# track dupes
taken = set()
# Header for the def file.
def_fp.write("LIBRARY " + args.target + "\n")
def_fp.write("EXPORTS\n")
if args.bitness == "64":
def_fp.write("\t??1OpDef@tensorflow@@UEAA@XZ\n")
else:
def_fp.write("\t??1OpDef@tensorflow@@UAE@XZ\n")
# Each symbols returned by undname matches the same position in candidates.
# We compare on undname but use the decorated name from candidates.
dupes = 0
proc = subprocess.Popen([UNDNAME, tmpfile.name], stdout=subprocess.PIPE)
for idx, line in enumerate(codecs.getreader("utf-8")(proc.stdout)):
decorated = candidates[idx]
if decorated in taken:
# Symbol is already in output, done.
dupes += 1
continue
if not INCLUDEPRE_RE.search(line):
if EXCLUDE_RE.search(line):
continue
if not INCLUDE_RE.search(line):
continue
if "deleting destructor" in line:
# Some of the symbols convered by INCLUDEPRE_RE export deleting
# destructor symbols, which is a bad idea.
# So we filter out such symbols here.
continue
if DATA_EXCLUDE_RE.search(line):
def_fp.write("\t" + decorated + "\n")
else:
def_fp.write("\t" + decorated + " DATA\n")
taken.add(decorated)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(UNDNAME, exit_code))
return exit_code
os.unlink(tmpfile.name)
print("symbols={}, taken={}, dupes={}"
.format(len(candidates), len(taken), dupes))
return 0
if __name__ == "__main__":
sys.exit(main())
| 38.546961 | 81 | 0.611151 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import codecs
import os
import re
import subprocess
import sys
import tempfile
UNDNAME = "undname.exe"
DUMPBIN = "dumpbin.exe"
EXCLUDE_RE = re.compile(r"RTTI|deleting destructor|::internal::|Internal|"
r"python_op_gen_internal|grappler")
INCLUDEPRE_RE = re.compile(r"google::protobuf::internal::ExplicitlyConstructed|"
r"tensorflow::internal::LogMessage|"
r"tensorflow::internal::LogString|"
r"tensorflow::internal::CheckOpMessageBuilder|"
r"tensorflow::internal::PickUnusedPortOrDie|"
r"tensorflow::internal::ValidateDevice|"
r"tensorflow::ops::internal::Enter|"
r"tensorflow::strings::internal::AppendPieces|"
r"tensorflow::strings::internal::CatPieces|"
r"tensorflow::errors::Internal|"
r"tensorflow::Tensor::CopyFromInternal|"
r"tensorflow::kernel_factory::"
r"OpKernelRegistrar::InitInternal|"
r"tensorflow::io::internal::JoinPathImpl")
INCLUDE_RE = re.compile(r"^(TF_\w*)$|"
r"^(TFE_\w*)$|"
r"tensorflow::|"
r"functor::|"
r"\?nsync_|"
r"stream_executor::")
DATA_EXCLUDE_RE = re.compile(r"[)(]|"
r"vftable|"
r"vbtable|"
r"vcall|"
r"RTTI|"
r"protobuf::internal::ExplicitlyConstructed")
def get_args():
filename_list = lambda x: x.split(";")
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=filename_list,
help="paths to input libraries separated by semicolons",
required=True)
parser.add_argument("--output", help="output deffile", required=True)
parser.add_argument("--target", help="name of the target", required=True)
parser.add_argument("--bitness", help="build target bitness", required=True)
args = parser.parse_args()
return args
def main():
args = get_args()
candidates = []
tmpfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
for lib_path in args.input:
proc = subprocess.Popen([DUMPBIN, "/nologo", "/linkermember:1", lib_path],
stdout=subprocess.PIPE)
for line in codecs.getreader("utf-8")(proc.stdout):
cols = line.split()
if len(cols) < 2:
continue
sym = cols[1]
tmpfile.file.write(sym + "\n")
candidates.append(sym)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(DUMPBIN, exit_code))
return exit_code
tmpfile.file.close()
with open(args.output, "w") as def_fp:
taken = set()
def_fp.write("LIBRARY " + args.target + "\n")
def_fp.write("EXPORTS\n")
if args.bitness == "64":
def_fp.write("\t??1OpDef@tensorflow@@UEAA@XZ\n")
else:
def_fp.write("\t??1OpDef@tensorflow@@UAE@XZ\n")
dupes = 0
proc = subprocess.Popen([UNDNAME, tmpfile.name], stdout=subprocess.PIPE)
for idx, line in enumerate(codecs.getreader("utf-8")(proc.stdout)):
decorated = candidates[idx]
if decorated in taken:
dupes += 1
continue
if not INCLUDEPRE_RE.search(line):
if EXCLUDE_RE.search(line):
continue
if not INCLUDE_RE.search(line):
continue
if "deleting destructor" in line:
continue
if DATA_EXCLUDE_RE.search(line):
def_fp.write("\t" + decorated + "\n")
else:
def_fp.write("\t" + decorated + " DATA\n")
taken.add(decorated)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(UNDNAME, exit_code))
return exit_code
os.unlink(tmpfile.name)
print("symbols={}, taken={}, dupes={}"
.format(len(candidates), len(taken), dupes))
return 0
if __name__ == "__main__":
sys.exit(main())
| true | true |
f71b5fa9abf8cdc0cf3fabe615159d23770b9aaa | 4,790 | py | Python | flanker/mime/message/headers/encodedword.py | skshetry/flanker | 63d1cdf927777f49f97e8d7f01e105a3b0d25cd2 | [
"Apache-2.0"
] | 929 | 2015-01-01T11:14:21.000Z | 2022-03-28T23:47:40.000Z | flanker/mime/message/headers/encodedword.py | skshetry/flanker | 63d1cdf927777f49f97e8d7f01e105a3b0d25cd2 | [
"Apache-2.0"
] | 141 | 2015-01-10T19:02:03.000Z | 2021-07-26T18:04:14.000Z | flanker/mime/message/headers/encodedword.py | skshetry/flanker | 63d1cdf927777f49f97e8d7f01e105a3b0d25cd2 | [
"Apache-2.0"
] | 179 | 2015-01-01T18:42:46.000Z | 2022-02-16T21:57:14.000Z | # coding:utf-8
import logging
from base64 import b64encode
import regex as re
import six
from flanker import _email
from flanker.mime.message import charsets, errors
_log = logging.getLogger(__name__)
_RE_FOLDING_WHITE_SPACES = re.compile(r"(?:\n\r?|\r\n?)")
# This spec refers to http://tools.ietf.org/html/rfc2047
_RE_ENCODED_WORD = re.compile(r'''(?P<encodedWord>
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
)''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
def unfold(value):
"""
Unfolding is accomplished by simply removing any CRLF
that is immediately followed by WSP. Each header field should be
treated in its unfolded form for further syntactic and semantic
evaluation.
"""
return _RE_FOLDING_WHITE_SPACES.sub('', value)
def decode(header):
return mime_to_unicode(header)
def mime_to_unicode(header):
"""
Takes a header value and returns a fully decoded unicode string.
It differs from standard Python's mail.header.decode_header() because:
- it is higher level, i.e. returns a unicode string instead of
an array of tuples
- it accepts Unicode and non-ASCII strings as well
>>> header_to_unicode("=?UTF-8?B?UmVbMl06INCX0LXQvNC70Y/QutC4?=")
u"Земляки"
>>> header_to_unicode("hello")
u"Hello"
"""
# Only string header values need to be converted.
if not isinstance(header, six.string_types):
return header
try:
header = unfold(header)
decoded = [] # decoded parts
while header:
match = _RE_ENCODED_WORD.search(header)
if not match:
# Append the remainder of the string to the list of chunks.
decoded.append((header, 'ascii'))
break
start = match.start()
if start != 0:
# decodes unencoded ascii part to unicode
value = header[0:start]
if value.strip():
decoded.append((value, 'ascii'))
# decode a header =?...?= of encoding
charset, value = _decode_part(match.group('charset').lower(),
match.group('encoding').lower(),
match.group('encoded'))
if decoded and decoded[-1][1] == charset:
decoded[-1] = (decoded[-1][0]+value, charset)
else:
decoded.append((value, charset))
header = header[match.end():]
return u"".join(charsets.convert_to_unicode(c, v) for v, c in decoded)
except Exception:
try:
logged_header = header
if isinstance(logged_header, six.text_type):
logged_header = logged_header.encode('utf-8')
# encode header as utf-8 so all characters can be base64 encoded
logged_header = b64encode(logged_header)
_log.warning(
u"HEADER-DECODE-FAIL: ({0}) - b64encoded".format(
logged_header))
except Exception:
_log.exception("Failed to log exception")
return header
def _decode_part(charset, encoding, value):
"""
Attempts to decode part, understands
'q' - quoted encoding
'b' - base64 mime encoding
Returns (charset, decoded-string)
"""
if encoding == 'q':
return charset, _decode_quoted_printable(value)
if encoding == 'b':
# Postel's law: add missing padding
paderr = len(value) % 4
if paderr:
value += '==='[:4 - paderr]
return charset, _email.decode_base64(value)
if not encoding:
return charset, value
raise errors.DecodingError('Unknown encoding: %s' % encoding)
def _decode_quoted_printable(qp):
if six.PY2:
return _email.decode_quoted_printable(str(qp))
buf = bytearray()
size = len(qp)
i = 0
while i < size:
ch = qp[i]
i += 1
if ch == '_':
buf.append(ord(' '))
continue
if ch != '=':
buf.append(ord(ch))
continue
# If there is no enough characters left, then treat them as is.
if size - i < 2:
buf.append(ord(ch))
continue
try:
codepoint = int(qp[i:i + 2], 16)
except ValueError:
buf.append(ord(ch))
continue
buf.append(codepoint)
i += 2
return six.binary_type(buf)
| 30.125786 | 80 | 0.56618 |
import logging
from base64 import b64encode
import regex as re
import six
from flanker import _email
from flanker.mime.message import charsets, errors
_log = logging.getLogger(__name__)
_RE_FOLDING_WHITE_SPACES = re.compile(r"(?:\n\r?|\r\n?)")
_RE_ENCODED_WORD = re.compile(r'''(?P<encodedWord>
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
)''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
def unfold(value):
return _RE_FOLDING_WHITE_SPACES.sub('', value)
def decode(header):
return mime_to_unicode(header)
def mime_to_unicode(header):
if not isinstance(header, six.string_types):
return header
try:
header = unfold(header)
decoded = []
while header:
match = _RE_ENCODED_WORD.search(header)
if not match:
decoded.append((header, 'ascii'))
break
start = match.start()
if start != 0:
value = header[0:start]
if value.strip():
decoded.append((value, 'ascii'))
charset, value = _decode_part(match.group('charset').lower(),
match.group('encoding').lower(),
match.group('encoded'))
if decoded and decoded[-1][1] == charset:
decoded[-1] = (decoded[-1][0]+value, charset)
else:
decoded.append((value, charset))
header = header[match.end():]
return u"".join(charsets.convert_to_unicode(c, v) for v, c in decoded)
except Exception:
try:
logged_header = header
if isinstance(logged_header, six.text_type):
logged_header = logged_header.encode('utf-8')
logged_header = b64encode(logged_header)
_log.warning(
u"HEADER-DECODE-FAIL: ({0}) - b64encoded".format(
logged_header))
except Exception:
_log.exception("Failed to log exception")
return header
def _decode_part(charset, encoding, value):
if encoding == 'q':
return charset, _decode_quoted_printable(value)
if encoding == 'b':
paderr = len(value) % 4
if paderr:
value += '==='[:4 - paderr]
return charset, _email.decode_base64(value)
if not encoding:
return charset, value
raise errors.DecodingError('Unknown encoding: %s' % encoding)
def _decode_quoted_printable(qp):
if six.PY2:
return _email.decode_quoted_printable(str(qp))
buf = bytearray()
size = len(qp)
i = 0
while i < size:
ch = qp[i]
i += 1
if ch == '_':
buf.append(ord(' '))
continue
if ch != '=':
buf.append(ord(ch))
continue
# If there is no enough characters left, then treat them as is.
if size - i < 2:
buf.append(ord(ch))
continue
try:
codepoint = int(qp[i:i + 2], 16)
except ValueError:
buf.append(ord(ch))
continue
buf.append(codepoint)
i += 2
return six.binary_type(buf)
| true | true |
f71b5fdd3e686df0976041498cd2acf2ea0dd77c | 352 | py | Python | Exercicios/PythonExercicios/ex001 - 010/ex005.py | sggrilo/Curso-em-Video-Python | a0e6f3d80d89eb8709345a38e207d81a77891192 | [
"MIT"
] | null | null | null | Exercicios/PythonExercicios/ex001 - 010/ex005.py | sggrilo/Curso-em-Video-Python | a0e6f3d80d89eb8709345a38e207d81a77891192 | [
"MIT"
] | null | null | null | Exercicios/PythonExercicios/ex001 - 010/ex005.py | sggrilo/Curso-em-Video-Python | a0e6f3d80d89eb8709345a38e207d81a77891192 | [
"MIT"
] | null | null | null | # ANTECESSOR E SUCESSOR — Faça um programa que leia um número
# inteiro e mostre na tela o seu antecessor e o seu sucessor.
n = int(input('Digite um número inteiro: '))
a = n - 1
s = n + 1
print('O antecessor de \033[4;33m{}\033[m equivale a \033[4;31m{}\033[m. '.format(n, a), end='')
print('Seu sucessor equivale a \033[4;32m{}\033[m.'.format(s))
| 32 | 96 | 0.664773 |
n = int(input('Digite um número inteiro: '))
a = n - 1
s = n + 1
print('O antecessor de \033[4;33m{}\033[m equivale a \033[4;31m{}\033[m. '.format(n, a), end='')
print('Seu sucessor equivale a \033[4;32m{}\033[m.'.format(s))
| true | true |
f71b5fe3a6cd69858a329449b4f2842d872d3cb0 | 27,953 | py | Python | canvasapi/user.py | onomou/canvasapi | 94d269e8e771bcf03fd57e235190aced3b5af87a | [
"MIT"
] | null | null | null | canvasapi/user.py | onomou/canvasapi | 94d269e8e771bcf03fd57e235190aced3b5af87a | [
"MIT"
] | null | null | null | canvasapi/user.py | onomou/canvasapi | 94d269e8e771bcf03fd57e235190aced3b5af87a | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
from six import python_2_unicode_compatible, string_types
import warnings
from canvasapi.calendar_event import CalendarEvent
from canvasapi.canvas_object import CanvasObject
from canvasapi.communication_channel import CommunicationChannel
from canvasapi.folder import Folder
from canvasapi.paginated_list import PaginatedList
from canvasapi.upload import Uploader
from canvasapi.util import combine_kwargs, obj_or_id
@python_2_unicode_compatible
class User(CanvasObject):
def __str__(self):
return "{} ({})".format(self.name, self.id)
def get_profile(self, **kwargs):
"""
Retrieve this user's profile.
:calls: `GET /api/v1/users/:user_id/profile \
<https://canvas.instructure.com/doc/api/users.html#method.profile.settings>`_
:rtype: dict
"""
response = self._requester.request(
'GET',
'users/{}/profile'.format(self.id)
)
return response.json()
def get_page_views(self, **kwargs):
"""
Retrieve this user's page views.
:calls: `GET /api/v1/users/:user_id/page_views \
<https://canvas.instructure.com/doc/api/users.html#method.page_views.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.course.PageView`
"""
from canvasapi.page_view import PageView
return PaginatedList(
PageView,
self._requester,
'GET',
'users/{}/page_views'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def get_courses(self, **kwargs):
"""
Retrieve all courses this user is enrolled in.
:calls: `GET /api/v1/users/:user_id/courses \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.user_index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.course.Course`
"""
from canvasapi.course import Course
return PaginatedList(
Course,
self._requester,
'GET',
'users/{}/courses'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def get_missing_submissions(self):
"""
Retrieve all past-due assignments for which the student does not
have a submission.
:calls: `GET /api/v1/users/:user_id/missing_submissions \
<https://canvas.instructure.com/doc/api/users.html#method.users.missing_submissions>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.assignment.Assignment`
"""
from canvasapi.assignment import Assignment
return PaginatedList(
Assignment,
self._requester,
'GET',
'users/{}/missing_submissions'.format(self.id)
)
def update_settings(self, **kwargs):
"""
Update this user's settings.
:calls: `PUT /api/v1/users/:id/settings \
<https://canvas.instructure.com/doc/api/users.html#method.users.settings>`_
:rtype: dict
"""
response = self._requester.request(
'PUT',
'users/{}/settings'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
return response.json()
def get_color(self, asset_string):
"""
Return the custom colors that have been saved by this user for a given context.
The `asset_string` parameter should be in the format 'context_id', for example 'course_42'.
:calls: `GET /api/v1/users/:id/colors/:asset_string \
<https://canvas.instructure.com/doc/api/users.html#method.users.get_custom_color>`_
:param asset_string: The asset to retrieve the color from.
:type asset_string: str
:rtype: dict
"""
response = self._requester.request(
'GET',
'users/{}/colors/{}'.format(self.id, asset_string)
)
return response.json()
def get_colors(self):
"""
Return all custom colors that have been saved by this user.
:calls: `GET /api/v1/users/:id/colors \
<https://canvas.instructure.com/doc/api/users.html#method.users.get_custom_colors>`_
:rtype: dict
"""
response = self._requester.request(
'GET',
'users/{}/colors'.format(self.id)
)
return response.json()
def update_color(self, asset_string, hexcode):
"""
Update a custom color for this user for a given context.
This allows colors for the calendar and elsewhere to be customized on a user basis.
The `asset_string` parameter should be in the format 'context_id', for example 'course_42'.
The `hexcode` parameter need not include the '#'.
:calls: `PUT /api/v1/users/:id/colors/:asset_string \
<https://canvas.instructure.com/doc/api/users.html#method.users.set_custom_color>`_
:param asset_string: The asset to modify the color for.
:type asset_string: str
:param hexcode: The hexcode of the color to use.
:type hexcode: str
:rtype: dict
"""
response = self._requester.request(
'PUT',
'users/{}/colors/{}'.format(self.id, asset_string),
hexcode=hexcode
)
return response.json()
def edit(self, **kwargs):
"""
Modify this user's information.
:calls: `PUT /api/v1/users/:id \
<https://canvas.instructure.com/doc/api/users.html#method.users.update>`_
:rtype: :class:`canvasapi.user.User`
"""
response = self._requester.request(
'PUT',
'users/{}'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
super(User, self).set_attributes(response.json())
return self
def merge_into(self, destination_user):
"""
Merge this user into another user.
:calls: `PUT /api/v1/users/:id/merge_into/:destination_user_id \
<https://canvas.instructure.com/doc/api/users.html#method.users.merge_into>`_
:param destination_user: The object or ID of the user to merge into.
:type destination_user: :class:`canvasapi.user.User` or int
:rtype: :class:`canvasapi.user.User`
"""
dest_user_id = obj_or_id(destination_user, 'destination_user', (User, ))
response = self._requester.request(
'PUT',
'users/{}/merge_into/{}'.format(self.id, dest_user_id),
)
super(User, self).set_attributes(response.json())
return self
def get_avatars(self):
"""
Retrieve the possible user avatar options that can be set with the user update endpoint.
:calls: `GET /api/v1/users/:user_id/avatars \
<https://canvas.instructure.com/doc/api/users.html#method.profile.profile_pics>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.avatar.Avatar`
"""
from canvasapi.avatar import Avatar
return PaginatedList(
Avatar,
self._requester,
'GET',
'users/{}/avatars'.format(self.id)
)
def get_assignments(self, course, **kwargs):
"""
Return the list of assignments for this user if the current
user (the API key owner) has rights to view. See List assignments for valid arguments.
:calls: `GET /api/v1/users/:user_id/courses/:course_id/assignments \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.user_index>`_
:param course: The object or ID of the course to retrieve.
:type course: :class:`canvasapi.course.Course` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.assignment.Assignment`
"""
from canvasapi.assignment import Assignment
from canvasapi.course import Course
course_id = obj_or_id(course, "course", (Course,))
return PaginatedList(
Assignment,
self._requester,
'GET',
'users/{}/courses/{}/assignments'.format(self.id, course_id),
_kwargs=combine_kwargs(**kwargs)
)
def get_enrollments(self, **kwargs):
"""
List all of the enrollments for this user.
:calls: `GET /api/v1/users/:user_id/enrollments \
<https://canvas.instructure.com/doc/api/enrollments.html#method.enrollments_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.enrollment.Enrollment`
"""
from canvasapi.enrollment import Enrollment
return PaginatedList(
Enrollment,
self._requester,
'GET',
'users/{}/enrollments'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def upload(self, file, **kwargs):
"""
Upload a file for a user.
NOTE: You *must* have authenticated with this user's API key to
upload on their behalf no matter what permissions the issuer of the
request has.
:calls: `POST /api/v1/users/:user_id/files \
<https://canvas.instructure.com/doc/api/users.html#method.users.create_file>`_
:param file: The file or path of the file to upload.
:type file: file or str
:returns: True if the file uploaded successfully, False otherwise, \
and the JSON response from the API.
:rtype: tuple
"""
return Uploader(
self._requester,
'users/{}/files'.format(self.id),
file,
**kwargs
).start()
def list_calendar_events_for_user(self, **kwargs):
"""
List calendar events that the current user can view or manage.
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi.user.User.get_calendar_events_for_user` instead.
:calls: `GET /api/v1/users/:user_id/calendar_events \
<https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.user_index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.calendar_event.CalendarEvent`
"""
warnings.warn(
"`list_calendar_events_for_user`"
" is being deprecated and will be removed in a future version."
" Use `get_calendar_events_for_user` instead",
DeprecationWarning
)
return self.get_calendar_events_for_user(**kwargs)
def get_calendar_events_for_user(self, **kwargs):
"""
List calendar events that the current user can view or manage.
:calls: `GET /api/v1/users/:user_id/calendar_events \
<https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.user_index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.calendar_event.CalendarEvent`
"""
return PaginatedList(
CalendarEvent,
self._requester,
'GET',
'users/{}/calendar_events'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def list_communication_channels(self, **kwargs):
"""
List communication channels for the specified user, sorted by
position.
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi.user.User.get_communication_channels` instead.
:calls: `GET /api/v1/users/:user_id/communication_channels \
<https://canvas.instructure.com/doc/api/communication_channels.html#method.communication_channels.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.communication_channel.CommunicationChannel`
"""
warnings.warn(
"`list_communication_channels`"
" is being deprecated and will be removed in a future version."
" Use `get_communication_channels` instead",
DeprecationWarning
)
return self.get_communication_channels(**kwargs)
def get_communication_channels(self, **kwargs):
"""
List communication channels for the specified user, sorted by
position.
:calls: `GET /api/v1/users/:user_id/communication_channels \
<https://canvas.instructure.com/doc/api/communication_channels.html#method.communication_channels.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.communication_channel.CommunicationChannel`
"""
return PaginatedList(
CommunicationChannel,
self._requester,
'GET',
'users/{}/communication_channels'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def create_communication_channel(self, **kwargs):
"""
Create a communication channel for this user
:calls: `POST /api/v1/users/:user_id/communication_channels \
<https://canvas.instructure.com/doc/api/communication_channels.html#method.communication_channels.create>`_
:rtype: :class:`canvasapi.communication_channel.CommunicationChannel`
"""
response = self._requester.request(
'POST',
'users/{}/communication_channels'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
return CommunicationChannel(self._requester, response.json())
def list_files(self, **kwargs):
"""
Returns the paginated list of files for the user.
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi.user.User.get_files` instead.
:calls: `GET /api/v1/users/:user_id/files \
<https://canvas.instructure.com/doc/api/files.html#method.files.api_index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.file.File`
"""
warnings.warn(
"`list_files` is being deprecated and will be removed in a future "
"version. Use `get_files` instead",
DeprecationWarning
)
return self.get_files(**kwargs)
def get_files(self, **kwargs):
"""
Returns the paginated list of files for the user.
:calls: `GET /api/v1/users/:user_id/files \
<https://canvas.instructure.com/doc/api/files.html#method.files.api_index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.file.File`
"""
from canvasapi.file import File
return PaginatedList(
File,
self._requester,
'GET',
'users/{}/files'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def get_file(self, file, **kwargs):
"""
Return the standard attachment json object for a file.
:calls: `GET /api/v1/users/:user_id/files/:id \
<https://canvas.instructure.com/doc/api/files.html#method.files.api_show>`_
:param file: The object or ID of the file to retrieve.
:type file: :class:`canvasapi.file.File` or int
:rtype: :class:`canvasapi.file.File`
"""
from canvasapi.file import File
file_id = obj_or_id(file, "file", (File,))
response = self._requester.request(
'GET',
'users/{}/files/{}'.format(self.id, file_id),
_kwargs=combine_kwargs(**kwargs)
)
return File(self._requester, response.json())
def get_folder(self, folder):
"""
Returns the details for a user's folder
:calls: `GET /api/v1/users/:user_id/folders/:id \
<https://canvas.instructure.com/doc/api/files.html#method.folders.show>`_
:param folder: The object or ID of the folder to retrieve.
:type folder: :class:`canvasapi.folder.Folder` or int
:rtype: :class:`canvasapi.folder.Folder`
"""
from canvasapi.folder import Folder
folder_id = obj_or_id(folder, "folder", (Folder,))
response = self._requester.request(
'GET',
'users/{}/folders/{}'.format(self.id, folder_id)
)
return Folder(self._requester, response.json())
def list_folders(self, **kwargs):
"""
Returns the paginated list of all folders for the given user. This will be returned as a
flat list containing all subfolders as well.
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi.user.User.get_folders` instead.
:calls: `GET /api/v1/users/:user_id/folders \
<https://canvas.instructure.com/doc/api/files.html#method.folders.list_all_folders>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.folder.Folder`
"""
warnings.warn(
"`list_folders` is being deprecated and will be removed in a "
"future version. Use `get_folders` instead.",
DeprecationWarning
)
return self.get_folders(**kwargs)
def get_folders(self, **kwargs):
"""
Returns the paginated list of all folders for the given user. This will be returned as a
flat list containing all subfolders as well.
:calls: `GET /api/v1/users/:user_id/folders \
<https://canvas.instructure.com/doc/api/files.html#method.folders.list_all_folders>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.folder.Folder`
"""
return PaginatedList(
Folder,
self._requester,
'GET',
'users/{}/folders'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def create_folder(self, name, **kwargs):
"""
Creates a folder in this user.
:calls: `POST /api/v1/users/:user_id/folders \
<https://canvas.instructure.com/doc/api/files.html#method.folders.create>`_
:param name: The name of the folder.
:type name: str
:rtype: :class:`canvasapi.folder.Folder`
"""
response = self._requester.request(
'POST',
'users/{}/folders'.format(self.id),
name=name,
_kwargs=combine_kwargs(**kwargs)
)
return Folder(self._requester, response.json())
def list_user_logins(self, **kwargs):
"""
Given a user ID, return that user's logins for the given account.
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi.user.User.get_user_logins` instead.
:calls: `GET /api/v1/users/:user_id/logins \
<https://canvas.instructure.com/doc/api/logins.html#method.pseudonyms.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.login.Login`
"""
warnings.warn(
"`list_user_logins` is being deprecated and will be removed in a future version."
" Use `get_user_logins` instead",
DeprecationWarning
)
return self. get_user_logins(**kwargs)
def get_user_logins(self, **kwargs):
"""
Given a user ID, return that user's logins for the given account.
:calls: `GET /api/v1/users/:user_id/logins \
<https://canvas.instructure.com/doc/api/logins.html#method.pseudonyms.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.login.Login`
"""
from canvasapi.login import Login
return PaginatedList(
Login,
self._requester,
'GET',
'users/{}/logins'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def list_observees(self, **kwargs):
"""
List the users that the given user is observing
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi.user.User.get_observees` instead.
:calls: `GET /api/v1/users/:user_id/observees \
<https://canvas.instructure.com/doc/api/user_observees.html#method.user_observees.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.user.User`
"""
warnings.warn(
"`list_observees` is being deprecated and will be removed in a "
"future version. Use `get_observees` instead",
DeprecationWarning
)
return self.get_observees(**kwargs)
def get_observees(self, **kwargs):
"""
List the users that the given user is observing
:calls: `GET /api/v1/users/:user_id/observees \
<https://canvas.instructure.com/doc/api/user_observees.html#method.user_observees.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.user.User`
"""
return PaginatedList(
User,
self._requester,
'GET',
'users/{}/observees'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def add_observee_with_credentials(self, **kwargs):
"""
Register the given user to observe another user, given the observee's credentials.
:calls: `POST /api/v1/users/:user_id/observees \
<https://canvas.instructure.com/doc/api/user_observees.html#method.user_observees.create>`_
:rtype: :class:`canvasapi.user.User`
"""
response = self._requester.request(
'POST',
'users/{}/observees'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
return User(self._requester, response.json())
def show_observee(self, observee_id):
"""
Gets information about an observed user.
:calls: `GET /api/v1/users/:user_id/observees/:observee_id \
<https://canvas.instructure.com/doc/api/user_observees.html#method.user_observees.show>`_
:param observee_id: The login id for the user to observe.
:type observee_id: int
:rtype: :class: `canvasapi.user.User`
"""
response = self._requester.request(
'GET',
'users/{}/observees/{}'.format(self.id, observee_id)
)
return User(self._requester, response.json())
def add_observee(self, observee_id):
"""
Registers a user as being observed by the given user.
:calls: `PUT /api/v1/users/:user_id/observees/:observee_id \
<https://canvas.instructure.com/doc/api/user_observees.html#method.user_observees.update>`_
:param observee_id: The login id for the user to observe.
:type observee_id: int
:rtype: :class: `canvasapi.user.User`
"""
response = self._requester.request(
'PUT',
'users/{}/observees/{}'.format(self.id, observee_id)
)
return User(self._requester, response.json())
def remove_observee(self, observee_id):
"""
Unregisters a user as being observed by the given user.
:calls: `DELETE /api/v1/users/:user_id/observees/:observee_id \
<https://canvas.instructure.com/doc/api/user_observees.html#method.user_observees.destroy>`_
:param observee_id: The login id for the user to observe.
:type observee_id: int
:rtype: :class: `canvasapi.user.User`
"""
response = self._requester.request(
'DELETE',
'users/{}/observees/{}'.format(self.id, observee_id)
)
return User(self._requester, response.json())
def create_content_migration(self, migration_type, **kwargs):
"""
Create a content migration.
:calls: `POST /api/v1/users/:user_id/content_migrations \
<https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.create>`_
:param migration_type: The migrator type to use in this migration
:type migration_type: str or :class:`canvasapi.content_migration.Migrator`
:rtype: :class:`canvasapi.content_migration.ContentMigration`
"""
from canvasapi.content_migration import ContentMigration, Migrator
if isinstance(migration_type, Migrator):
kwargs['migration_type'] = migration_type.type
elif isinstance(migration_type, string_types):
kwargs['migration_type'] = migration_type
else:
raise TypeError('Parameter migration_type must be of type Migrator or str')
response = self._requester.request(
'POST',
'users/{}/content_migrations'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
response_json = response.json()
response_json.update({'user_id': self.id})
return ContentMigration(self._requester, response_json)
def get_content_migration(self, content_migration, **kwargs):
"""
Retrive a content migration by its ID
:calls: `GET /api/v1/users/:user_id/content_migrations/:id \
<https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.show>`_
:param content_migration: The object or ID of the content migration to retrieve.
:type content_migration: int, str or :class:`canvasapi.content_migration.ContentMigration`
:rtype: :class:`canvasapi.content_migration.ContentMigration`
"""
from canvasapi.content_migration import ContentMigration
migration_id = obj_or_id(content_migration, "content_migration", (ContentMigration,))
response = self._requester.request(
'GET',
'users/{}/content_migrations/{}'.format(self.id, migration_id),
_kwargs=combine_kwargs(**kwargs)
)
response_json = response.json()
response_json.update({'user_id': self.id})
return ContentMigration(self._requester, response_json)
def get_content_migrations(self, **kwargs):
"""
List content migrations that the current account can view or manage.
:calls: `GET /api/v1/users/:user_id/content_migrations/ \
<https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.content_migration.ContentMigration`
"""
from canvasapi.content_migration import ContentMigration
return PaginatedList(
ContentMigration,
self._requester,
'GET',
'users/{}/content_migrations'.format(self.id),
{'user_id': self.id},
_kwargs=combine_kwargs(**kwargs)
)
def get_migration_systems(self, **kwargs):
"""
Return a list of migration systems.
:calls: `GET /api/v1/users/:user_id/content_migrations/migrators \
<https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.available_migrators>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.content_migration.Migrator`
"""
from canvasapi.content_migration import Migrator
return PaginatedList(
Migrator,
self._requester,
'GET',
'users/{}/content_migrations/migrators'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
@python_2_unicode_compatible
class UserDisplay(CanvasObject):
def __str__(self):
return "{}".format(self.display_name)
| 34.595297 | 120 | 0.615676 | from __future__ import absolute_import, division, print_function, unicode_literals
from six import python_2_unicode_compatible, string_types
import warnings
from canvasapi.calendar_event import CalendarEvent
from canvasapi.canvas_object import CanvasObject
from canvasapi.communication_channel import CommunicationChannel
from canvasapi.folder import Folder
from canvasapi.paginated_list import PaginatedList
from canvasapi.upload import Uploader
from canvasapi.util import combine_kwargs, obj_or_id
@python_2_unicode_compatible
class User(CanvasObject):
def __str__(self):
return "{} ({})".format(self.name, self.id)
def get_profile(self, **kwargs):
response = self._requester.request(
'GET',
'users/{}/profile'.format(self.id)
)
return response.json()
def get_page_views(self, **kwargs):
from canvasapi.page_view import PageView
return PaginatedList(
PageView,
self._requester,
'GET',
'users/{}/page_views'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def get_courses(self, **kwargs):
from canvasapi.course import Course
return PaginatedList(
Course,
self._requester,
'GET',
'users/{}/courses'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def get_missing_submissions(self):
from canvasapi.assignment import Assignment
return PaginatedList(
Assignment,
self._requester,
'GET',
'users/{}/missing_submissions'.format(self.id)
)
def update_settings(self, **kwargs):
response = self._requester.request(
'PUT',
'users/{}/settings'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
return response.json()
def get_color(self, asset_string):
response = self._requester.request(
'GET',
'users/{}/colors/{}'.format(self.id, asset_string)
)
return response.json()
def get_colors(self):
response = self._requester.request(
'GET',
'users/{}/colors'.format(self.id)
)
return response.json()
def update_color(self, asset_string, hexcode):
response = self._requester.request(
'PUT',
'users/{}/colors/{}'.format(self.id, asset_string),
hexcode=hexcode
)
return response.json()
def edit(self, **kwargs):
response = self._requester.request(
'PUT',
'users/{}'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
super(User, self).set_attributes(response.json())
return self
def merge_into(self, destination_user):
dest_user_id = obj_or_id(destination_user, 'destination_user', (User, ))
response = self._requester.request(
'PUT',
'users/{}/merge_into/{}'.format(self.id, dest_user_id),
)
super(User, self).set_attributes(response.json())
return self
def get_avatars(self):
from canvasapi.avatar import Avatar
return PaginatedList(
Avatar,
self._requester,
'GET',
'users/{}/avatars'.format(self.id)
)
def get_assignments(self, course, **kwargs):
from canvasapi.assignment import Assignment
from canvasapi.course import Course
course_id = obj_or_id(course, "course", (Course,))
return PaginatedList(
Assignment,
self._requester,
'GET',
'users/{}/courses/{}/assignments'.format(self.id, course_id),
_kwargs=combine_kwargs(**kwargs)
)
def get_enrollments(self, **kwargs):
from canvasapi.enrollment import Enrollment
return PaginatedList(
Enrollment,
self._requester,
'GET',
'users/{}/enrollments'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def upload(self, file, **kwargs):
return Uploader(
self._requester,
'users/{}/files'.format(self.id),
file,
**kwargs
).start()
def list_calendar_events_for_user(self, **kwargs):
warnings.warn(
"`list_calendar_events_for_user`"
" is being deprecated and will be removed in a future version."
" Use `get_calendar_events_for_user` instead",
DeprecationWarning
)
return self.get_calendar_events_for_user(**kwargs)
def get_calendar_events_for_user(self, **kwargs):
return PaginatedList(
CalendarEvent,
self._requester,
'GET',
'users/{}/calendar_events'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def list_communication_channels(self, **kwargs):
warnings.warn(
"`list_communication_channels`"
" is being deprecated and will be removed in a future version."
" Use `get_communication_channels` instead",
DeprecationWarning
)
return self.get_communication_channels(**kwargs)
def get_communication_channels(self, **kwargs):
return PaginatedList(
CommunicationChannel,
self._requester,
'GET',
'users/{}/communication_channels'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def create_communication_channel(self, **kwargs):
response = self._requester.request(
'POST',
'users/{}/communication_channels'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
return CommunicationChannel(self._requester, response.json())
def list_files(self, **kwargs):
warnings.warn(
"`list_files` is being deprecated and will be removed in a future "
"version. Use `get_files` instead",
DeprecationWarning
)
return self.get_files(**kwargs)
def get_files(self, **kwargs):
from canvasapi.file import File
return PaginatedList(
File,
self._requester,
'GET',
'users/{}/files'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def get_file(self, file, **kwargs):
from canvasapi.file import File
file_id = obj_or_id(file, "file", (File,))
response = self._requester.request(
'GET',
'users/{}/files/{}'.format(self.id, file_id),
_kwargs=combine_kwargs(**kwargs)
)
return File(self._requester, response.json())
def get_folder(self, folder):
from canvasapi.folder import Folder
folder_id = obj_or_id(folder, "folder", (Folder,))
response = self._requester.request(
'GET',
'users/{}/folders/{}'.format(self.id, folder_id)
)
return Folder(self._requester, response.json())
def list_folders(self, **kwargs):
warnings.warn(
"`list_folders` is being deprecated and will be removed in a "
"future version. Use `get_folders` instead.",
DeprecationWarning
)
return self.get_folders(**kwargs)
def get_folders(self, **kwargs):
return PaginatedList(
Folder,
self._requester,
'GET',
'users/{}/folders'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def create_folder(self, name, **kwargs):
response = self._requester.request(
'POST',
'users/{}/folders'.format(self.id),
name=name,
_kwargs=combine_kwargs(**kwargs)
)
return Folder(self._requester, response.json())
def list_user_logins(self, **kwargs):
warnings.warn(
"`list_user_logins` is being deprecated and will be removed in a future version."
" Use `get_user_logins` instead",
DeprecationWarning
)
return self. get_user_logins(**kwargs)
def get_user_logins(self, **kwargs):
from canvasapi.login import Login
return PaginatedList(
Login,
self._requester,
'GET',
'users/{}/logins'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def list_observees(self, **kwargs):
warnings.warn(
"`list_observees` is being deprecated and will be removed in a "
"future version. Use `get_observees` instead",
DeprecationWarning
)
return self.get_observees(**kwargs)
def get_observees(self, **kwargs):
return PaginatedList(
User,
self._requester,
'GET',
'users/{}/observees'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
def add_observee_with_credentials(self, **kwargs):
response = self._requester.request(
'POST',
'users/{}/observees'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
return User(self._requester, response.json())
def show_observee(self, observee_id):
response = self._requester.request(
'GET',
'users/{}/observees/{}'.format(self.id, observee_id)
)
return User(self._requester, response.json())
def add_observee(self, observee_id):
response = self._requester.request(
'PUT',
'users/{}/observees/{}'.format(self.id, observee_id)
)
return User(self._requester, response.json())
def remove_observee(self, observee_id):
response = self._requester.request(
'DELETE',
'users/{}/observees/{}'.format(self.id, observee_id)
)
return User(self._requester, response.json())
def create_content_migration(self, migration_type, **kwargs):
from canvasapi.content_migration import ContentMigration, Migrator
if isinstance(migration_type, Migrator):
kwargs['migration_type'] = migration_type.type
elif isinstance(migration_type, string_types):
kwargs['migration_type'] = migration_type
else:
raise TypeError('Parameter migration_type must be of type Migrator or str')
response = self._requester.request(
'POST',
'users/{}/content_migrations'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
response_json = response.json()
response_json.update({'user_id': self.id})
return ContentMigration(self._requester, response_json)
def get_content_migration(self, content_migration, **kwargs):
from canvasapi.content_migration import ContentMigration
migration_id = obj_or_id(content_migration, "content_migration", (ContentMigration,))
response = self._requester.request(
'GET',
'users/{}/content_migrations/{}'.format(self.id, migration_id),
_kwargs=combine_kwargs(**kwargs)
)
response_json = response.json()
response_json.update({'user_id': self.id})
return ContentMigration(self._requester, response_json)
def get_content_migrations(self, **kwargs):
from canvasapi.content_migration import ContentMigration
return PaginatedList(
ContentMigration,
self._requester,
'GET',
'users/{}/content_migrations'.format(self.id),
{'user_id': self.id},
_kwargs=combine_kwargs(**kwargs)
)
def get_migration_systems(self, **kwargs):
from canvasapi.content_migration import Migrator
return PaginatedList(
Migrator,
self._requester,
'GET',
'users/{}/content_migrations/migrators'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
@python_2_unicode_compatible
class UserDisplay(CanvasObject):
def __str__(self):
return "{}".format(self.display_name)
| true | true |
f71b604290c4284cdb29c7ba708ed37267f359af | 3,054 | py | Python | copyright_updater/logger_factory.py | swasun/copyright-updater | 750ced32ee9738e4d65189bc0e917e0581a59668 | [
"MIT"
] | null | null | null | copyright_updater/logger_factory.py | swasun/copyright-updater | 750ced32ee9738e4d65189bc0e917e0581a59668 | [
"MIT"
] | null | null | null | copyright_updater/logger_factory.py | swasun/copyright-updater | 750ced32ee9738e4d65189bc0e917e0581a59668 | [
"MIT"
] | null | null | null | #####################################################################################
# MIT License #
# #
# Copyright (C) 2018 Charly Lamothe #
# #
# This file is part of copyright-updater. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#####################################################################################
import logging
from logging.handlers import RotatingFileHandler
import os
import errno
class LoggerFactory:
@staticmethod
def create(path, module_name):
# Create logger
logger = logging.getLogger(module_name)
logger.setLevel(logging.DEBUG)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Create file handler
fh = RotatingFileHandler(path + os.sep + module_name + '.log', maxBytes=1000000, backupCount=5)
fh.setLevel(logging.DEBUG)
# Create formatter
formatter = logging.Formatter('%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(levelname)s - %(message)s')
# Add formatter to handler
fh.setFormatter(formatter)
# Add fh to logger
logger.addHandler(fh)
return logger | 51.762712 | 119 | 0.47053 | true | true | |
f71b60c66400900beffb67846939db70bfb9249f | 4,381 | py | Python | avax/webdav/tests/benchmarks.py | eavatar/avax.webdav | e4d4915fd5af8878ba88e3641e624e64033ece96 | [
"MIT"
] | null | null | null | avax/webdav/tests/benchmarks.py | eavatar/avax.webdav | e4d4915fd5af8878ba88e3641e624e64033ece96 | [
"MIT"
] | null | null | null | avax/webdav/tests/benchmarks.py | eavatar/avax.webdav | e4d4915fd5af8878ba88e3641e624e64033ece96 | [
"MIT"
] | null | null | null | # -*- coding: iso-8859-1 -*-
# (c) 2009-2014 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Benchmark suite for WsgiDAV.
This test suite uses davclient to generate WebDAV requests.
A first collection of ideas
===========================
- The result is printable HTML, copy/pastable
- It also contains date, environment info (Hardware, package versions, ...)
- The suite can be run stand-alone against a running WsgiDAV server, just like
litmus.
- It uses `davclient` and generates an HTML file.
- There should be detailed results as well as a few summarizing numbers:
('Total time', 'Byte reads per second', 'Byte write per second', or something
like this), so one can compare benchmarks at a glance.
- Optional parameters allow to run only a single test
- Parameter allows to pass configuration infos that are dumped with the result:
benchEnviron = {
"comment": "Test with caching enabled",
"server_os": "Ubuntu 9.01",
"server_cpu": "Intel 3GHz",
"server_ram": "2GB",
"wsgidav_version": "0.4.b1"
"network_bandwidth": "100MBit",
>> these can be automatically set?:
"client_os": "Windows XP",
"client_cpu": "AMD 5000",
"date": now()
}
- Allow to print profiling info (from WsgiDAV server and from becnhmark client!)
- The result file could also contain the results of test suites ('PASSED'),
so we could use it as documentation for tests on different platforms/setups.
Questions
=========
- is lxml really faster?
- compare this to mod_dav's performance
Test cases
==========
- PUT 1 x 10 MB
- PUT 100 x 1 kB
- GET 1 x 10 MB
- GET 100 x 1 kB
- 100 x PROPFIND depth 0
- 1 x PROPFIND depth infinity
- COPY: big file, many small files, big tree
- MOVE: big file, many small files, big tree
- DELETE: big file, many small files, big tree
- LOCK
- UNLOCK
- Check if locked
- PROPPATCH
- PROPFIND: depth 0, many small files
depth infinity
- run litmus in a timed script
- Simulate typical Windows Client request sequences:
- dir browsing
- file reading
- file editing
- http://groups.google.com/group/paste-users/t/b2afc88a86caade1?hl=en
use httperf
http://www.hpl.hp.com/research/linux/httperf/httperf-man-0.9.txt
and openwebload
http://openwebload.sourceforge.net/index.html
- makeTree(roofolderName="/bench", folderCount=10, subfolderCount=10, fileCount=10, fileSize=1024)
Big tree with 100 folders and 1000 files
bench/
folder1/
..
folder10/
subfolder10-1/
..
subfolder10-10/
file10-10-1.txt -> 1k
"""
import logging
_benchmarks = [#"proppatch_many",
#"proppatch_big",
#"proppatch_deep",
"test_scripted",
]
def _real_run_bench(bench, opts):
if bench == "*":
for bench in _benchmarks:
run_bench(bench, opts)
return
assert bench in _benchmarks
if bench == "test_scripted":
from avax.webdav.tests import test_scripted
test_scripted.main()
else:
raise ValueError()
def run_bench(bench, opts):
profile_benchmarks = opts["profile_benchmarks"]
if bench in profile_benchmarks:
# http://docs.python.org/library/profile.html#module-cProfile
import cProfile, pstats, StringIO
prof = cProfile.Profile()
prof = prof.runctx("_real_run_bench(bench, opts)", globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
# stats.sort_stats("time") # Or cumulative
stats.sort_stats("cumulative") # Or time
stats.print_stats(80) # 80 = how many to print
# The rest is optional.
# stats.print_callees()
# stats.print_callers()
logging.warning("Profile data for '%s':\n%s" % (bench, stream.getvalue()))
else:
_real_run_bench(bench, opts)
def bench_all(opts):
run_bench("*", opts)
def main():
opts = {"num": 10,
"profile_benchmarks": ["*"],
}
bench_all(opts)
if __name__ == "__main__":
main()
| 31.070922 | 99 | 0.625428 |
import logging
_benchmarks = [
"test_scripted",
]
def _real_run_bench(bench, opts):
if bench == "*":
for bench in _benchmarks:
run_bench(bench, opts)
return
assert bench in _benchmarks
if bench == "test_scripted":
from avax.webdav.tests import test_scripted
test_scripted.main()
else:
raise ValueError()
def run_bench(bench, opts):
profile_benchmarks = opts["profile_benchmarks"]
if bench in profile_benchmarks:
Profile, pstats, StringIO
prof = cProfile.Profile()
prof = prof.runctx("_real_run_bench(bench, opts)", globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
ort_stats("cumulative")
stats.print_stats(80)
logging.warning("Profile data for '%s':\n%s" % (bench, stream.getvalue()))
else:
_real_run_bench(bench, opts)
def bench_all(opts):
run_bench("*", opts)
def main():
opts = {"num": 10,
"profile_benchmarks": ["*"],
}
bench_all(opts)
if __name__ == "__main__":
main()
| true | true |
f71b6170ec1ea5471b4314a0e09ef42e3e38daff | 1,001 | py | Python | project/urls.py | tgavankar/PlaydohSlideSync | 5718d661e78d361a0dcda908b63c736bab886bb4 | [
"BSD-3-Clause"
] | null | null | null | project/urls.py | tgavankar/PlaydohSlideSync | 5718d661e78d361a0dcda908b63c736bab886bb4 | [
"BSD-3-Clause"
] | null | null | null | project/urls.py | tgavankar/PlaydohSlideSync | 5718d661e78d361a0dcda908b63c736bab886bb4 | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from django.conf.urls.defaults import patterns, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from .examples import urls
from funfactory.monkeypatches import patch
patch()
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
(r'', include(urls)),
# Generate a robots.txt
(r'^robots\.txt$',
lambda r: HttpResponse(
"User-agent: *\n%s: /" % 'Allow' if settings.ENGAGE_ROBOTS else 'Disallow' ,
mimetype="text/plain"
)
)
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
)
## In DEBUG mode, serve media files through Django.
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
| 27.805556 | 88 | 0.685315 | from django.conf import settings
from django.conf.urls.defaults import patterns, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from .examples import urls
from funfactory.monkeypatches import patch
patch()
urlpatterns = patterns('',
(r'', include(urls)),
(r'^robots\.txt$',
lambda r: HttpResponse(
"User-agent: *\n%s: /" % 'Allow' if settings.ENGAGE_ROBOTS else 'Disallow' ,
mimetype="text/plain"
)
)
)
_urlpatterns()
| true | true |
f71b63178ebdc11ae83bec5b2f2f47ff8b336dd6 | 1,011 | py | Python | src/sellers/migrations/0004_alter_seller_logo_url.py | evis-market/web-interface-backend | f8930ff1c009ad18e522ab29680b4bcd50a6020e | [
"MIT"
] | 2 | 2021-08-30T22:58:32.000Z | 2021-12-12T10:47:52.000Z | src/sellers/migrations/0004_alter_seller_logo_url.py | evis-market/web-interface-backend | f8930ff1c009ad18e522ab29680b4bcd50a6020e | [
"MIT"
] | null | null | null | src/sellers/migrations/0004_alter_seller_logo_url.py | evis-market/web-interface-backend | f8930ff1c009ad18e522ab29680b4bcd50a6020e | [
"MIT"
] | 1 | 2021-08-22T19:12:44.000Z | 2021-08-22T19:12:44.000Z | # Generated by Django 3.2.7 on 2021-10-27 07:05
import django.core.validators
from django.db import migrations, models
from django.db.transaction import atomic
from sellers.models import Seller
"""
Note: Migrations includes data migration that set to null logo_url field for currently existing records.
Logo_url field would be change to FileField type in this migration
"""
def clear_seller_logo_url(apps, schema_editor):
with atomic():
for seller in Seller.objects.all():
seller.logo_url = None
seller.save()
class Migration(migrations.Migration):
dependencies = [
('sellers', '0003_auto_20211012_1929'),
]
operations = [
migrations.RunPython(clear_seller_logo_url, migrations.RunPython.noop),
migrations.AlterField(
model_name='seller',
name='logo_url',
field=models.FileField(blank=True, help_text='Logo', max_length=1000, null=True, upload_to='', verbose_name='Logo'),
),
]
| 26.605263 | 128 | 0.68546 |
import django.core.validators
from django.db import migrations, models
from django.db.transaction import atomic
from sellers.models import Seller
def clear_seller_logo_url(apps, schema_editor):
with atomic():
for seller in Seller.objects.all():
seller.logo_url = None
seller.save()
class Migration(migrations.Migration):
dependencies = [
('sellers', '0003_auto_20211012_1929'),
]
operations = [
migrations.RunPython(clear_seller_logo_url, migrations.RunPython.noop),
migrations.AlterField(
model_name='seller',
name='logo_url',
field=models.FileField(blank=True, help_text='Logo', max_length=1000, null=True, upload_to='', verbose_name='Logo'),
),
]
| true | true |
f71b632bb314545ed7732ce47684f88d027b19e7 | 78 | py | Python | xpring/proto/__init__.py | mvadari/xpring-py | b837420127d1c1e5051ed305ed4f19fe9910a4f6 | [
"0BSD"
] | 6 | 2019-12-11T00:54:56.000Z | 2021-03-11T19:44:44.000Z | xpring/proto/__init__.py | mvadari/xpring-py | b837420127d1c1e5051ed305ed4f19fe9910a4f6 | [
"0BSD"
] | null | null | null | xpring/proto/__init__.py | mvadari/xpring-py | b837420127d1c1e5051ed305ed4f19fe9910a4f6 | [
"0BSD"
] | 9 | 2020-02-28T18:40:46.000Z | 2022-02-28T23:01:09.000Z | # The rest of this package, but not this __init__.py, is generated by protoc.
| 39 | 77 | 0.75641 | true | true | |
f71b6354cd7ddb3ba58cc906feac3f6233ca894c | 537 | py | Python | ImgVidProcessing/Exercise1/solution1.py | SystemNinja/MyPythonPrograms | 6bdebb5017994c3431aea769319f702075fff9b9 | [
"MIT"
] | null | null | null | ImgVidProcessing/Exercise1/solution1.py | SystemNinja/MyPythonPrograms | 6bdebb5017994c3431aea769319f702075fff9b9 | [
"MIT"
] | null | null | null | ImgVidProcessing/Exercise1/solution1.py | SystemNinja/MyPythonPrograms | 6bdebb5017994c3431aea769319f702075fff9b9 | [
"MIT"
] | null | null | null | """
This solution implements glob library in order to 'automatize' task.
Instead of manually processing each image
Reference: https://pymotw.com/2/glob/
"""
import cv2
import glob2
images=glob2.glob("*.jpg")
#images=glob2.glob("Exercise1\*.jpg")
for image in images:
img=cv2.imread(image, 0)
re=cv2.resize(img,(100,100))
cv2.imshow("Resized image", re)
cv2.waitKey(500)
cv2.destroyAllWindows()
cv2.imwrite(image+"_resized.jpg", re)
#cv2.imwrite("Exercise1\\"+image+"_resized.jpg", re) | 25.571429 | 69 | 0.670391 |
import cv2
import glob2
images=glob2.glob("*.jpg")
for image in images:
img=cv2.imread(image, 0)
re=cv2.resize(img,(100,100))
cv2.imshow("Resized image", re)
cv2.waitKey(500)
cv2.destroyAllWindows()
cv2.imwrite(image+"_resized.jpg", re)
| true | true |
f71b63892ebbad403e4916b665b53156a244c0fa | 87 | py | Python | Python/100Excersises/.history/51 to 75/69/69_20201119121845.py | magusikrak/NAMI-TERM-I-GroupWork | f0a9a5f219ccbec024eb5316361db3fca46e171c | [
"MIT"
] | null | null | null | Python/100Excersises/.history/51 to 75/69/69_20201119121845.py | magusikrak/NAMI-TERM-I-GroupWork | f0a9a5f219ccbec024eb5316361db3fca46e171c | [
"MIT"
] | 1 | 2021-07-24T03:18:30.000Z | 2021-07-24T12:45:07.000Z | Python/100Excersises/.history/51 to 75/69/69_20201119121845.py | magusikrak/NAMI-TERM-I-GroupWork | f0a9a5f219ccbec024eb5316361db3fca46e171c | [
"MIT"
] | null | null | null | import requests
sugam = requests.get("http://www.pythonhow.com")
print(rp.text[:100])
| 17.4 | 48 | 0.724138 | import requests
sugam = requests.get("http://www.pythonhow.com")
print(rp.text[:100])
| true | true |
f71b64529d9237153dd6f12a58b0280dfcb69bfe | 454 | py | Python | .history/ClassFiles/WorkingWithExternalFiles/FileHandling_20210107190119.py | minefarmer/Comprehensive-Python | f97b9b83ec328fc4e4815607e6a65de90bb8de66 | [
"Unlicense"
] | null | null | null | .history/ClassFiles/WorkingWithExternalFiles/FileHandling_20210107190119.py | minefarmer/Comprehensive-Python | f97b9b83ec328fc4e4815607e6a65de90bb8de66 | [
"Unlicense"
] | null | null | null | .history/ClassFiles/WorkingWithExternalFiles/FileHandling_20210107190119.py | minefarmer/Comprehensive-Python | f97b9b83ec328fc4e4815607e6a65de90bb8de66 | [
"Unlicense"
] | null | null | null | """ Opening and Reading Files
Syntax to open file.
f = open("Myfile.txt) # assigned to the variable f.
f = open("Myfile.txt","rt") # if in the same directory.
f = open("c:\\MyFolders\Myfile.txt") # if hot in the same directory.
"""
f = open("Quotes.txt")
# print(f.readable())
# print(f.read())
# f.close()
# print(f.readable())
print(f.read(11))
print(f.readlines())
for quote in f:
print(quote)
print("HI")
| 15.655172 | 69 | 0.594714 | f = open("Quotes.txt")
print(f.read(11))
print(f.readlines())
for quote in f:
print(quote)
print("HI")
| true | true |
f71b64ca7afadced29875edb91d99ac16e7d6ba0 | 11,148 | py | Python | src/main/app-resources/notebook/libexec/helpers.py | ec-better/ewf-ethz-03-01-01 | 5ca616e5c25bbba29013a7de248af4b69757921b | [
"Apache-2.0"
] | 1 | 2021-09-23T02:20:11.000Z | 2021-09-23T02:20:11.000Z | src/main/app-resources/notebook/libexec/helpers.py | ec-better/ewf-ethz-03-01-01 | 5ca616e5c25bbba29013a7de248af4b69757921b | [
"Apache-2.0"
] | null | null | null | src/main/app-resources/notebook/libexec/helpers.py | ec-better/ewf-ethz-03-01-01 | 5ca616e5c25bbba29013a7de248af4b69757921b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import numpy as np
import cv2
import re
from shapely import wkt
from shapely.geometry import box, Polygon
import pandas as pd
import geopandas as gpd
from osgeo import gdal, gdalnumeric, osr, ogr
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def getResolution(demFolder, return_full_paths = False):
rasterFilePaths = [f for f in os.listdir(demFolder) if os.path.isfile(os.path.join(demFolder, f))]
if return_full_paths:
rasterFilePaths = [demFolder + '/' + f for f in rasterFilePaths if f[:4] == 'DEM_' and f[-4:] == '.tif']
rasterFilePaths.sort(reverse=True)
else:
rasterFilePaths = [int(f[4:-4]) for f in rasterFilePaths if f[:4] == 'DEM_' and f[-4:] == '.tif']
return rasterFilePaths
def readGDAL2numpy(rasterPath, return_geoInformation = False):
try:
ds = gdal.Open(rasterPath)
except RuntimeError:
print('Unable to open input file')
sys.exit(1)
data = gdalnumeric.LoadFile(rasterPath, False)
noDataVal = ds.GetRasterBand(1).GetNoDataValue()
try:
if data.dtype in ['float16', 'float32', 'float64'] and noDataVal is not None:
data[data == noDataVal] = np.NaN
except:
print("Issue in no data value")
if return_geoInformation == False:
return data
else:
geoTransform = ds.GetGeoTransform()
projection = ds.GetProjection()
return data, geoTransform, projection
def writeNumpyArr2Geotiff(outputPath, data, geoTransform = None, projection = None, GDAL_dtype = gdal.GDT_Byte, noDataValue = None):
nscn, npix = data.shape
if np.isnan(data).any() and noDataValue is not None:
data[np.isnan(data)] = noDataValue
ds_new = gdal.GetDriverByName('GTiff').Create(outputPath, npix, nscn, 1, GDAL_dtype)
if geoTransform != None:
ds_new.SetGeoTransform(geoTransform)
if projection != None:
ds_new.SetProjection(projection)
outBand = ds_new.GetRasterBand(1)
outBand.WriteArray(data)
if noDataValue != None:
ds_new.GetRasterBand(1).SetNoDataValue(noDataValue)
# Close dataset
ds_new.FlushCache()
ds_new = None
outBand = None
def writeNumpyArr2Saga(outputPath, data, geoTransform = None, projection = None, GDAL_dtype = gdal.GDT_Byte, noDataValue = None):
nscn, npix = data.shape
if np.isnan(data).any() and noDataValue is not None:
data[np.isnan(data)] = noDataValue
ds_new = gdal.GetDriverByName('SAGA').Create(outputPath, npix, nscn, 1, GDAL_dtype)
outBand = ds_new.GetRasterBand(1)
outBand.WriteArray(data)
if noDataValue != None:
ds_new.GetRasterBand(1).SetNoDataValue(noDataValue)
if projection != None:
ds_new.SetProjection(projection)
# Close dataset
ds_new.FlushCache()
ds_new = None
outBand = None
def wkt2bbox(wkt_input):
wkt_geometry = wkt.loads(wkt_input)
minx, miny, maxx, maxy = wkt_geometry.bounds
b = box(minx, miny, maxx, maxy)
bbox_tuple = list(b.exterior.coords)
bbox = []
for point in bbox_tuple:
bbox.append([point[0],point[1]])
return bbox
def wkt2shp(wkt_input, target_epsg, dst_file, bbox=False):
ensure_dir(dst_file)
if bbox:
polygon = Polygon(wkt2bbox(wkt_input))
else:
polygon = wkt.loads(wkt_input)
gpd.GeoDataFrame(pd.DataFrame(['p1'], columns = ['geom']),
crs = {'init':'epsg:' + str(target_epsg)},
geometry = [polygon]).to_file(dst_file)
def rescaleDEM(image, noData = None, maxVal = 255):
if noData:
image = np.float32(image)
image[image == noData] = np.nan
minElev = np.nanmin(image)
maxElev = np.nanmax(image)
rescaled = ( ((image - minElev)/(maxElev- minElev)) * (maxVal - 1) ) + 1
return np.uint8(rescaled)
def joinStrArg(str1, str2, str3 = None):
if str3 is not None:
return str(str1) + ' ' + str(str2) + ' ' + str(str3)
else:
return str(str1) + ' ' + str(str2)
def wkt2EPSG(wkt, epsg='/usr/local/share/proj/epsg', forceProj4=False):
'''
Transform a WKT string to an EPSG code
Arguments
---------
wkt: WKT definition
epsg: the proj.4 epsg file (defaults to '/usr/local/share/proj/epsg')
forceProj4: whether to perform brute force proj4 epsg file check (last resort)
Returns: EPSG code
'''
code = None
p_in = osr.SpatialReference()
s = p_in.ImportFromWkt(wkt)
if s == 5: # invalid WKT
return None
if p_in.IsLocal() == 1: # this is a local definition
return p_in.ExportToWkt()
if p_in.IsGeographic() == 1: # this is a geographic srs
cstype = 'GEOGCS'
else: # this is a projected srs
cstype = 'PROJCS'
an = p_in.GetAuthorityName(cstype)
ac = p_in.GetAuthorityCode(cstype)
if an is not None and ac is not None: # return the EPSG code
return '%s:%s' % \
(p_in.GetAuthorityName(cstype), p_in.GetAuthorityCode(cstype))
else: # try brute force approach by grokking proj epsg definition file
p_out = p_in.ExportToProj4()
if p_out:
if forceProj4 is True:
return p_out
f = open(epsg)
for line in f:
if line.find(p_out) != -1:
m = re.search('<(\\d+)>', line)
if m:
code = m.group(1)
break
if code: # match
return 'EPSG:%s' % code
else: # no match
return None
else:
return None
def getCornerCoordinates(gdal_dataSet, target_srs = False):
"""
:param gdal_dataSet: /path/to/file OR gdal dataset
:param target_srs: False for output coordinates in same coordinate system OR 'wgs84' for lat long values OR custom osr.SpatialReference() object
:return: list of corner coordinates
--0--------3--
| |
| | <--- Index of coordinates returned in list
| |
--1--------2--
"""
if type(gdal_dataSet) is str:
gdal_dataSet = gdal.Open(gdal_dataSet)
gt=gdal_dataSet.GetGeoTransform() # gt = [ulx, xres, xskew, uly, yskew, yres]
cols = gdal_dataSet.RasterXSize
rows = gdal_dataSet.RasterYSize
def GetExtent(gt,cols,rows):
''' Return list of corner coordinates from a geotransform
@type gt: C{tuple/list}
@param gt: geotransform
@type cols: C{int}
@param cols: number of columns in the dataset
@type rows: C{int}
@param rows: number of rows in the dataset
@rtype: C{[float,...,float]}
@return: coordinates of each corner
'''
ext=[]
xarr=[0,cols]
yarr=[0,rows]
for px in xarr:
for py in yarr:
x=gt[0]+(px*gt[1])+(py*gt[2])
y=gt[3]+(px*gt[4])+(py*gt[5])
ext.append([x,y])
#print(x,y)
yarr.reverse()
return ext
def ReprojectCoords(coords,src_srs,tgt_srs):
''' Reproject a list of x,y coordinates.
@type geom: C{tuple/list}
@param geom: List of [[x,y],...[x,y]] coordinates
@type src_srs: C{osr.SpatialReference}
@param src_srs: OSR SpatialReference object
@type tgt_srs: C{osr.SpatialReference}
@param tgt_srs: OSR SpatialReference object
@rtype: C{tuple/list}
@return: List of transformed [[x,y],...[x,y]] coordinates
'''
trans_coords=[]
transform = osr.CoordinateTransformation( src_srs, tgt_srs)
for x,y in coords:
x,y,z = transform.TransformPoint(x,y)
trans_coords.append([x,y])
return trans_coords
ext = GetExtent(gt,cols,rows)
src_srs=osr.SpatialReference()
src_srs.ImportFromWkt(gdal_dataSet.GetProjection())
if target_srs == False:
return ext
elif target_srs == 'wgs84':
#target_srs = src_srs.CloneGeogCS()
#
target_srs=osr.SpatialReference()
target_srs.ImportFromEPSG(4326)
return ReprojectCoords(ext,src_srs,target_srs)
def resizeToDEM(imPath, sizeDEM = None, geoTransform = None, projection = None, noData = None):
imDS = gdal.Open(imPath, gdal.GA_ReadOnly)
imPix = imDS.RasterXSize
imScn = imDS.RasterYSize
nscn, npix = sizeDEM
if sizeDEM is not None:
if nscn != imScn or npix != imPix:
print("Size Mismatch")
image = imDS.ReadAsArray()
if noData is not None:
image = np.float32(image)
image[image == noData] = np.nan
imNew = cv2.resize(image, (npix, nscn), interpolation=cv2.INTER_CUBIC)
writeNumpyArr2Geotiff(imPath, imNew, geoTransform = geoTransform, projection = projection, GDAL_dtype = gdal.GDT_UInt16, noDataValue = noData)
def map_uint16_to_uint8(img, lower_bound=None, upper_bound=None):
'''
Map a 16-bit image trough a lookup table to convert it to 8-bit.
'''
if not(0 <= lower_bound < 2**16) and lower_bound is not None:
raise ValueError(
'"lower_bound" must be in the range [0, 65535]')
if not(0 <= upper_bound < 2**16) and upper_bound is not None:
raise ValueError(
'"upper_bound" must be in the range [0, 65535]')
if lower_bound is None:
lower_bound = np.min(img)
if upper_bound is None:
upper_bound = np.max(img)
if lower_bound >= upper_bound:
raise ValueError(
'"lower_bound" must be smaller than "upper_bound"')
lut = np.concatenate([
np.zeros(lower_bound, dtype=np.uint16),
np.linspace(0, 255, upper_bound - lower_bound).astype(np.uint16),
np.ones(2**16 - upper_bound, dtype=np.uint16) * 255
])
return lut[img].astype(np.uint8)
def closeCV(mask, kernelSize = 11):
kernel = np.ones((kernelSize, kernelSize),np.uint8)
return cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
def newGeoTransform(geoTransform, maskBounds):
newGeoTransform = (geoTransform[0]+ maskBounds['xMin'] * geoTransform[1],
geoTransform[1],
geoTransform[2],
geoTransform[3] + maskBounds['yMin'] * geoTransform[5],
geoTransform[4],
geoTransform[5])
return newGeoTransform
def shrinkGeoTransform(geoTransform, factor):
newGeoTransform = (geoTransform[0],
geoTransform[1] / factor,
geoTransform[2],
geoTransform[3],
geoTransform[4],
geoTransform[5] / factor)
return newGeoTransform
| 33.884498 | 157 | 0.591137 |
import os
import numpy as np
import cv2
import re
from shapely import wkt
from shapely.geometry import box, Polygon
import pandas as pd
import geopandas as gpd
from osgeo import gdal, gdalnumeric, osr, ogr
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def getResolution(demFolder, return_full_paths = False):
rasterFilePaths = [f for f in os.listdir(demFolder) if os.path.isfile(os.path.join(demFolder, f))]
if return_full_paths:
rasterFilePaths = [demFolder + '/' + f for f in rasterFilePaths if f[:4] == 'DEM_' and f[-4:] == '.tif']
rasterFilePaths.sort(reverse=True)
else:
rasterFilePaths = [int(f[4:-4]) for f in rasterFilePaths if f[:4] == 'DEM_' and f[-4:] == '.tif']
return rasterFilePaths
def readGDAL2numpy(rasterPath, return_geoInformation = False):
try:
ds = gdal.Open(rasterPath)
except RuntimeError:
print('Unable to open input file')
sys.exit(1)
data = gdalnumeric.LoadFile(rasterPath, False)
noDataVal = ds.GetRasterBand(1).GetNoDataValue()
try:
if data.dtype in ['float16', 'float32', 'float64'] and noDataVal is not None:
data[data == noDataVal] = np.NaN
except:
print("Issue in no data value")
if return_geoInformation == False:
return data
else:
geoTransform = ds.GetGeoTransform()
projection = ds.GetProjection()
return data, geoTransform, projection
def writeNumpyArr2Geotiff(outputPath, data, geoTransform = None, projection = None, GDAL_dtype = gdal.GDT_Byte, noDataValue = None):
nscn, npix = data.shape
if np.isnan(data).any() and noDataValue is not None:
data[np.isnan(data)] = noDataValue
ds_new = gdal.GetDriverByName('GTiff').Create(outputPath, npix, nscn, 1, GDAL_dtype)
if geoTransform != None:
ds_new.SetGeoTransform(geoTransform)
if projection != None:
ds_new.SetProjection(projection)
outBand = ds_new.GetRasterBand(1)
outBand.WriteArray(data)
if noDataValue != None:
ds_new.GetRasterBand(1).SetNoDataValue(noDataValue)
ds_new.FlushCache()
ds_new = None
outBand = None
def writeNumpyArr2Saga(outputPath, data, geoTransform = None, projection = None, GDAL_dtype = gdal.GDT_Byte, noDataValue = None):
nscn, npix = data.shape
if np.isnan(data).any() and noDataValue is not None:
data[np.isnan(data)] = noDataValue
ds_new = gdal.GetDriverByName('SAGA').Create(outputPath, npix, nscn, 1, GDAL_dtype)
outBand = ds_new.GetRasterBand(1)
outBand.WriteArray(data)
if noDataValue != None:
ds_new.GetRasterBand(1).SetNoDataValue(noDataValue)
if projection != None:
ds_new.SetProjection(projection)
ds_new.FlushCache()
ds_new = None
outBand = None
def wkt2bbox(wkt_input):
wkt_geometry = wkt.loads(wkt_input)
minx, miny, maxx, maxy = wkt_geometry.bounds
b = box(minx, miny, maxx, maxy)
bbox_tuple = list(b.exterior.coords)
bbox = []
for point in bbox_tuple:
bbox.append([point[0],point[1]])
return bbox
def wkt2shp(wkt_input, target_epsg, dst_file, bbox=False):
ensure_dir(dst_file)
if bbox:
polygon = Polygon(wkt2bbox(wkt_input))
else:
polygon = wkt.loads(wkt_input)
gpd.GeoDataFrame(pd.DataFrame(['p1'], columns = ['geom']),
crs = {'init':'epsg:' + str(target_epsg)},
geometry = [polygon]).to_file(dst_file)
def rescaleDEM(image, noData = None, maxVal = 255):
if noData:
image = np.float32(image)
image[image == noData] = np.nan
minElev = np.nanmin(image)
maxElev = np.nanmax(image)
rescaled = ( ((image - minElev)/(maxElev- minElev)) * (maxVal - 1) ) + 1
return np.uint8(rescaled)
def joinStrArg(str1, str2, str3 = None):
if str3 is not None:
return str(str1) + ' ' + str(str2) + ' ' + str(str3)
else:
return str(str1) + ' ' + str(str2)
def wkt2EPSG(wkt, epsg='/usr/local/share/proj/epsg', forceProj4=False):
code = None
p_in = osr.SpatialReference()
s = p_in.ImportFromWkt(wkt)
if s == 5:
return None
if p_in.IsLocal() == 1:
return p_in.ExportToWkt()
if p_in.IsGeographic() == 1:
cstype = 'GEOGCS'
else:
cstype = 'PROJCS'
an = p_in.GetAuthorityName(cstype)
ac = p_in.GetAuthorityCode(cstype)
if an is not None and ac is not None:
return '%s:%s' % \
(p_in.GetAuthorityName(cstype), p_in.GetAuthorityCode(cstype))
else:
p_out = p_in.ExportToProj4()
if p_out:
if forceProj4 is True:
return p_out
f = open(epsg)
for line in f:
if line.find(p_out) != -1:
m = re.search('<(\\d+)>', line)
if m:
code = m.group(1)
break
if code:
return 'EPSG:%s' % code
else:
return None
else:
return None
def getCornerCoordinates(gdal_dataSet, target_srs = False):
if type(gdal_dataSet) is str:
gdal_dataSet = gdal.Open(gdal_dataSet)
gt=gdal_dataSet.GetGeoTransform()
cols = gdal_dataSet.RasterXSize
rows = gdal_dataSet.RasterYSize
def GetExtent(gt,cols,rows):
ext=[]
xarr=[0,cols]
yarr=[0,rows]
for px in xarr:
for py in yarr:
x=gt[0]+(px*gt[1])+(py*gt[2])
y=gt[3]+(px*gt[4])+(py*gt[5])
ext.append([x,y])
yarr.reverse()
return ext
def ReprojectCoords(coords,src_srs,tgt_srs):
trans_coords=[]
transform = osr.CoordinateTransformation( src_srs, tgt_srs)
for x,y in coords:
x,y,z = transform.TransformPoint(x,y)
trans_coords.append([x,y])
return trans_coords
ext = GetExtent(gt,cols,rows)
src_srs=osr.SpatialReference()
src_srs.ImportFromWkt(gdal_dataSet.GetProjection())
if target_srs == False:
return ext
elif target_srs == 'wgs84':
target_srs=osr.SpatialReference()
target_srs.ImportFromEPSG(4326)
return ReprojectCoords(ext,src_srs,target_srs)
def resizeToDEM(imPath, sizeDEM = None, geoTransform = None, projection = None, noData = None):
imDS = gdal.Open(imPath, gdal.GA_ReadOnly)
imPix = imDS.RasterXSize
imScn = imDS.RasterYSize
nscn, npix = sizeDEM
if sizeDEM is not None:
if nscn != imScn or npix != imPix:
print("Size Mismatch")
image = imDS.ReadAsArray()
if noData is not None:
image = np.float32(image)
image[image == noData] = np.nan
imNew = cv2.resize(image, (npix, nscn), interpolation=cv2.INTER_CUBIC)
writeNumpyArr2Geotiff(imPath, imNew, geoTransform = geoTransform, projection = projection, GDAL_dtype = gdal.GDT_UInt16, noDataValue = noData)
def map_uint16_to_uint8(img, lower_bound=None, upper_bound=None):
if not(0 <= lower_bound < 2**16) and lower_bound is not None:
raise ValueError(
'"lower_bound" must be in the range [0, 65535]')
if not(0 <= upper_bound < 2**16) and upper_bound is not None:
raise ValueError(
'"upper_bound" must be in the range [0, 65535]')
if lower_bound is None:
lower_bound = np.min(img)
if upper_bound is None:
upper_bound = np.max(img)
if lower_bound >= upper_bound:
raise ValueError(
'"lower_bound" must be smaller than "upper_bound"')
lut = np.concatenate([
np.zeros(lower_bound, dtype=np.uint16),
np.linspace(0, 255, upper_bound - lower_bound).astype(np.uint16),
np.ones(2**16 - upper_bound, dtype=np.uint16) * 255
])
return lut[img].astype(np.uint8)
def closeCV(mask, kernelSize = 11):
kernel = np.ones((kernelSize, kernelSize),np.uint8)
return cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
def newGeoTransform(geoTransform, maskBounds):
newGeoTransform = (geoTransform[0]+ maskBounds['xMin'] * geoTransform[1],
geoTransform[1],
geoTransform[2],
geoTransform[3] + maskBounds['yMin'] * geoTransform[5],
geoTransform[4],
geoTransform[5])
return newGeoTransform
def shrinkGeoTransform(geoTransform, factor):
newGeoTransform = (geoTransform[0],
geoTransform[1] / factor,
geoTransform[2],
geoTransform[3],
geoTransform[4],
geoTransform[5] / factor)
return newGeoTransform
| true | true |
f71b651c18866a5ae438540b4c87f225edab7b8a | 21,388 | py | Python | gym_let_mpc/let_mpc.py | eivindeb/gym-letMPC | 7041aa56a25aa9a1c749088f2b370c910d21fe75 | [
"MIT"
] | 6 | 2020-12-04T18:15:29.000Z | 2022-02-26T11:01:31.000Z | gym_let_mpc/let_mpc.py | eivindeb/gym-letMPC | 7041aa56a25aa9a1c749088f2b370c910d21fe75 | [
"MIT"
] | null | null | null | gym_let_mpc/let_mpc.py | eivindeb/gym-letMPC | 7041aa56a25aa9a1c749088f2b370c910d21fe75 | [
"MIT"
] | 5 | 2021-03-08T06:00:27.000Z | 2021-11-22T08:14:12.000Z | import gym
from gym.utils import seeding
import numpy as np
import json
from gym_let_mpc.simulator import ControlSystem
from gym_let_mpc.controllers import ETMPC, AHMPC
import collections.abc
import matplotlib.pyplot as plt
from gym_let_mpc.utils import str_replace_whole_words
import copy
class LetMPCEnv(gym.Env):
def __init__(self, config_path):
with open(config_path) as file_object:
config = json.load(file_object)
if config["mpc"]["model"] == "plant":
config["mpc"]["model"] = copy.deepcopy(config["plant"]["model"])
elif config["mpc"]["model"].get("parameters", None) == "plant":
config["mpc"]["model"]["parameters"] = copy.deepcopy(config["plant"]["model"]["parameters"])
if config["lqr"]["model"] == "plant":
config["lqr"]["model"] = copy.deepcopy(config["plant"]["model"])
elif config["lqr"]["model"] == "mpc":
config["lqr"]["model"] = copy.deepcopy(config["mpc"]["model"])
elif config["lqr"]["model"].get("parameters", None) == "plant":
config["lqr"]["model"]["parameters"] = copy.deepcopy(config["plant"]["model"]["parameters"])
elif config["lqr"]["model"].get("parameters", None) == "mpc":
config["lqr"]["model"]["parameters"] = copy.deepcopy(config["mpc"]["model"]["parameters"])
self.config = config
assert "max_steps" in self.config["environment"]
self.max_steps = self.config["environment"]["max_steps"]
assert "randomize" in self.config["environment"]
assert "state" in self.config["environment"]["randomize"] and "reference" in self.config["environment"]["randomize"]
assert "render" in self.config["environment"]
if config["mpc"]["type"] == "ETMPC":
assert len(config["environment"]["action"]["variables"]) == 1 and \
config["environment"]["action"]["variables"][0]["name"] == "mpc_compute"
controller = ETMPC(config["mpc"], config["lqr"])
self.action_space = gym.spaces.Discrete(2)
elif config["mpc"]["type"] == "AHMPC":
assert len(config["environment"]["action"]["variables"]) == 1 and \
config["environment"]["action"]["variables"][0]["name"] == "mpc_horizon"
controller = AHMPC(config["mpc"])
self.action_space = gym.spaces.Box(low=np.array([1]), high=np.array([50]), dtype=np.float32)
else:
raise ValueError
self.control_system = ControlSystem(config["plant"], controller=controller)
self.history = None
self.steps_count = None
self.np_random = None
self.min_constraint_delta = 0.25 # TODO: how and where to set
obs_high = []
obs_low = []
for obs_var in self.config["environment"]["observation"]["variables"]:
for var_transform in obs_var.get("transform", ["none"]):
for lim_i, lim in enumerate(obs_var.get("limits", [None, None])):
if lim is None:
if lim_i == 0:
obs_low.append(-np.finfo(np.float32).max)
else:
obs_high.append(np.finfo(np.float32).max)
else:
if var_transform == "none":
if lim_i == 0:
obs_low.append(lim)
else:
obs_high.append(lim)
elif var_transform == "absolute":
if lim_i == 0:
obs_low.append(0)
else:
obs_high.append(lim)
elif var_transform == "square":
if lim_i == 0:
obs_low.append(0)
else:
obs_high.append(lim ** 2)
else:
raise NotImplementedError
self.observation_space = gym.spaces.Box(low=np.array(obs_low, dtype=np.float32),
high=np.array(obs_high, dtype=np.float32),
dtype=np.float32)
self.value_function_is_set = False
self.viewer = None
def seed(self, seed=None):
"""
Seed the random number generator of the control system.
:param seed: (int) seed for random state
"""
self.np_random, seed = gym.utils.seeding.np_random(seed)
self.control_system.seed(seed)
return [seed]
def reset(self, state=None, reference=None, constraint=None, model=None, process_noise=None, tvp=None):
"""
Reset state of environment. Note that the simulator is reset, the MPC solution is computed and the first
MPC action is applied to the plant.
:param state: (dict) initial conditions (value) for state name (key).
:param reference: (dict) reference value (value) for reference name (key).
:param constraint: (dict) constraint values (value) for constraint names (key).
:param model: (dict) dictionary of dictionary where first key is model that it applies to ["plant", "mpc", "lqr"],
first value is dictionary of model parameters where second value is the specified model parameter value.
:param process_noise: (dict) process noise values (value) as ndarray for state name (key). The process noise at
each time step loops through the provided array.
:param tvp: (dict) values of time-varying parameters. New values are generated if values arent specified
for all time steps elapsed.
:return: ([float]) observation vector
"""
def update_dict_recursively(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = update_dict_recursively(d.get(k, {}), v)
else:
d[k] = v
return d
sampled_state = self.sample_state()
sampled_reference = self.sample_reference()
sampled_constraint = self.sample_constraints()
sampled_model = self.sample_model()
if state is not None:
sampled_state.update(state)
elif len(sampled_state) == 0:
sampled_state = None
if reference is not None:
sampled_reference.update(reference)
elif len(sampled_reference) == 0:
sampled_reference = None
if constraint is not None:
sampled_constraint.update(constraint)
elif len(sampled_constraint) == 0:
sampled_constraint = None
if model is not None:
sampled_model = update_dict_recursively(sampled_model, model)
elif len(sampled_model) == 0:
sampled_model = None
self.control_system.reset(state=sampled_state, reference=sampled_reference, constraint=sampled_constraint,
model=sampled_model, process_noise=process_noise, tvp=tvp)
if self.config["mpc"]["type"] == "ETMPC":
self.control_system.step(action=np.array([1]))
obs = self.get_observation()
self.history = {"obs": [obs], "actions": [], "rewards": []}
self.steps_count = 0
return obs
def step(self, action):
a_dict = {a_props["name"]: action[a_i]
for a_i, a_props in enumerate(self.config["environment"]["action"]["variables"])}
self.control_system.step(np.round(a_dict["mpc_horizon"]).astype(np.int32))#np.atleast_1d(int(a_dict["mpc_compute"])))
self.history["actions"].append(a_dict)
self.steps_count += 1
info = {}
obs = self.get_observation()
done = False
if self.steps_count >= self.max_steps:
done = True
info["termination"] = "steps"
elif len(self.config["environment"].get("end_on_constraint_violation", [])) > 0:
for c_name, c_d in self.control_system.get_constraint_distances().items():
if c_name.split("-")[1] in self.config["environment"]["end_on_constraint_violation"] and c_d > 0:
done = True
info["termination"] = "constraint"
break
rew = self.get_reward(done=done)
for category, v in self.config["environment"].get("info", {}).items():
if category == "reward":
for rew_name, rew_expr in v.items():
info["reward/{}".format(rew_name)] = self.get_reward(rew_expr, done=done)
else:
raise NotImplementedError
if self.value_function_is_set:
step_vf_data = {"mpc_state": self.control_system.get_state_vector(self.control_system.history["state"][-2]),
"mpc_next_state": self.control_system.controller.mpc_state_preds[:, -1, -1]}
step_vf_data["mpc_n_horizon"] = self.control_system.controller.history["mpc_horizon"][-1]
info["mpc_value_fn"] = (self.control_system.controller.value_function.eval([step_vf_data["mpc_next_state"].reshape(1, -1)])[0][0, 0]).astype(np.float64)
step_vf_data["mpc_rewards"] = self.control_system.controller.mpc.opt_f_num.toarray()[0, 0] - \
self.config["mpc"]["objective"].get("discount_factor") ** (step_vf_data["mpc_n_horizon"] + 1) * info["mpc_value_fn"]
info["mpc_computation_time"] = sum([v for k, v in self.control_system.controller.mpc.solver_stats.items() if k.startswith("t_proc")])
info["data"] = step_vf_data
info["mpc_avg_stage_cost"] = step_vf_data["mpc_rewards"] / step_vf_data["mpc_n_horizon"]
info.update({k: v.astype(np.float64) if hasattr(v, "dtype") else v for k, v in a_dict.items()})
self.history["obs"].append(obs)
self.history["rewards"].append(rew)
return obs, rew, done, info
def render(self, mode='human', save_path=None): # TODO: add env renders
figure, axes = None, None
if self.viewer is None:
env_plots = [plot_name for plot_name, make_plot in self.config["environment"]["render"].items() if make_plot]
if len(env_plots) > 0:
figure, axes = plt.subplots(self.control_system.render_n_axes + len(env_plots), sharex=True,
figsize=(9, 16))
self.viewer = self.control_system.render(figure=figure, axes=axes, return_viewer=True)
for i, plot in enumerate(env_plots):
self.viewer["axes"][plot] = axes[-(i + 1)]
else:
self.viewer = self.control_system.render(figure=figure, axes=axes, return_viewer=True)
for plot_name, make_plot in self.config["environment"]["render"].items():
if make_plot:
self.viewer["axes"][plot_name].set_ylabel("-".join(plot_name.split("_")[1:]))
x_data = np.array(range(self.steps_count)) * self.control_system.config["params"]["t_step"]
self.viewer["axes"][plot_name].clear()
if plot_name == "plot_action":
for a_var in self.config["environment"]["action"]["variables"]:
y_data = [step_a[a_var["name"]] for step_a in self.history["actions"]]
self.viewer["axes"][plot_name].plot(x_data, y_data, label=a_var["name"], drawstyle="steps")
elif plot_name == "plot_reward":
self.viewer["axes"][plot_name].plot(x_data, self.history["rewards"], label="reward")
self.viewer["axes"][plot_name].text(max(x_data) + self.control_system.config["params"]["t_step"],
self.history["rewards"][-1],
"{:.3f}".format(np.sum(self.history["rewards"])))
else:
raise ValueError
for axis in self.viewer["axes"].values():
axis.legend()
if save_path is not None:
self.viewer["figure"].savefig(save_path, bbox_inches="tight", format="png")
plt.close(self.viewer["figure"])
else:
self.viewer["figure"].show()
def get_observation(self):
obs = []
for var in self.config["environment"]["observation"]["variables"]:
var_val = self._get_variable_value(var)
for transform in var.get("transform", ["none"]):
if transform == "none":
obs.append(var_val)
elif transform == "absolute":
obs.append(abs(var_val))
elif transform == "square":
obs.append(var_val ** 2)
else:
raise ValueError
return np.array(obs)
def get_reward(self, rew_expr=None, done=False):
if rew_expr is None:
rew_expr = self.config["environment"]["reward"]["expression"]
rew_expr = str_replace_whole_words(rew_expr, "done", int(done))
for var in sorted(self.config["environment"]["reward"]["variables"], key=lambda x: len(x), reverse=True):
var_val = self._get_variable_value(var)
if isinstance(var_val, list) or isinstance(var_val, np.ndarray): # TODO: needs to be better way to do this
var_val = var_val[0]
rew_expr = str_replace_whole_words(rew_expr, var["name"], var_val)
return eval(rew_expr)
def _get_variable_value(self, var):
if var["type"] == "state":
val = self.control_system.current_state[var["name"]]
elif var["type"] == "input":
if var.get("value_type", "absolute") == "absolute":
val = self.control_system.controller.current_input[var["name"]]
elif var.get("value_type") == "delta":
val = self.control_system.controller.history["inputs"][-2][var["name"]] - \
self.control_system.controller.current_input[var["name"]]
else:
raise ValueError
elif var["type"] == "reference":
val = self.control_system.controller.current_reference[var["name"]]
elif var["type"] == "tvp":
val = self.control_system.tvps[var["name"]].get_values(self.steps_count)
elif var["type"] == "error":
val = self.control_system.controller.history["errors"][-1][var["name"]]
if np.isnan(val):
val = 0
elif var["type"] == "epsilon":
val = self.control_system.controller.history["epsilons"][-1][var["name"]]
if np.isnan(val):
val = 0
elif var["type"] == "constraint":
if var.get("value_type") == "distance":
val = self.control_system.get_constraint_distances((var["name"],))[var["name"]]
else:
raise ValueError
elif var["type"] == "action":
if var.get("value_type", "agent") == "agent":
val = self.history["actions"][-1][var["name"]]
elif var.get("value_type") == "controller":
val = self.control_system.controller.history[var["name"]][-1]
else:
raise ValueError
elif var["type"] == "time":
if var.get("value_type") == "fraction":
val = self.control_system.controller.steps_since_mpc_computation / self.control_system.controller.mpc.n_horizon
elif var.get("value_type") == "absolute":
val = self.control_system.controller.steps_since_mpc_computation
else:
raise ValueError
elif var["type"] == "parameter":
if var["value_type"] in ["plant", "mpc", "lqr"]:
val = self.config[var["value_type"]]["model"]["parameters"][var["name"]]
else:
raise ValueError
else:
raise ValueError
if isinstance(val, np.ndarray):
val = val[0]
if "limits" in var:
val = np.clip(val, var["limits"][0], var["limits"][1])
return val
def sample_constraints(self):
constraints = {}
for c_name, c_props in self.config["environment"].get("randomize", {}).get("constraints", {}).items():
constraint_val = getattr(self.np_random, c_props["type"])(**c_props["kw"])
if c_name.split("-")[1] in [k.split("-")[1] for k in constraints.keys()]:
other_bound_type = "u" if c_name.split("-")[2] == "l" else "l"
other_bound_val = constraints[c_name[:-1] + other_bound_type]
if other_bound_type == "u":
constraint_val = min(other_bound_val - self.min_constraint_delta, constraint_val)
else:
constraint_val = max(other_bound_val + self.min_constraint_delta, constraint_val)
constraints[c_name] = constraint_val
return constraints
def sample_state(self):
state = {}
for s_name, s_props in self.config["environment"].get("randomize", {}).get("state", {}).items():
state[s_name] = getattr(self.np_random, s_props["type"])(**s_props["kw"])
return state
def sample_reference(self):
reference = {}
for r_name, r_props in self.config["environment"].get("randomize", {}).get("reference", {}).items():
reference[r_name] = getattr(self.np_random, r_props["type"])(**r_props["kw"])
return reference
def sample_model(self):
model = {}
for s_name, s_props in self.config["environment"].get("randomize", {}).get("model", {}).get("states", {}).items():
model["states"] = {s_name: {}}
for component_name, component_props in s_props.items():
model["states"][s_name][component_name] = \
{comp_v_name: getattr(self.np_random, v_prop["type"])(**v_prop["kw"])
for comp_v_name, v_prop in component_props.items()}
model = {dest: model for dest in self.config["environment"].get("randomize", {}).get("model", {}).get("apply", [])}
return model
def stop(self):
pass
def create_dataset(self, n_scenarios):
dataset = []
self.reset()
for i in range(n_scenarios):
process_noise = np.array([self.control_system._get_process_noise() for i in range(self.max_steps)])
ep_dict = {"state": self.sample_state(), "reference": self.sample_reference(),
"constraint": self.sample_constraints(), "model": self.sample_model(),
"process_noise": {}, "tvp": {}}
s_i = 0
for s_name, s_props in self.config["plant"]["model"]["states"].items():
if "W" in s_props:
ep_dict["process_noise"][s_name] = process_noise[:, s_i]
s_i += 1
for tvp_name, tvp_obj in self.control_system.tvps.items():
tvp_obj.generate_values(self.max_steps)
ep_dict["tvp"][tvp_name] = tvp_obj.values
dataset.append(ep_dict)
self.reset()
return dataset
def set_value_function(self, input_ph, output_ph, tf_session):
self.control_system.controller.set_value_function(input_ph, output_ph, tf_session)
self.value_function_is_set = True
def set_learning_status(self, status):
if self.value_function_is_set:
self.control_system.controller.value_function.set_enabled(status)
if __name__ == "__main__": # TODO: constraints on pendulum and end episode if constraints violated
env = LetMPCEnv("configs/cart_pendulum_horizon.json")
env.seed(0)
"""
from tensorflow_casadi import TensorFlowEvaluator, MLP
import tensorflow as tf
a = tf.placeholder(shape=(None, 4), dtype=tf.float32)
mlp = MLP(a)
sess = tf.Session()
val_fun = TensorFlowEvaluator([mlp.input_ph], [mlp.output], sess)
env.set_value_function(mlp.input_ph, mlp.output, sess)
"""
import pickle
with open("../../lmpc-horizon/datasets/cart_pendulum_10.pkl", "rb") as f:
test_set = pickle.load(f)
rews = {}
for i in range(1):
import time
obs = env.reset(**test_set[5])
done = False
t_before = time.process_time()
horizon = 10
while not done:
t_step = time.process_time()
if env.steps_count % 1 == 0 and False:
horizon = 25 if horizon == 50 else 50
obs, rew, done, info = env.step([horizon])#[np.random.randint(1, 10)])
for rew_comp, v in info.items():
if rew_comp.startswith("reward/"):
if rew_comp not in rews:
rews[rew_comp] = []
rews[rew_comp].append(v)
if time.process_time() - t_step > 1:
print(env.control_system.controller.mpc.solver_stats)
print(env.steps_count)
for k, v in rews.items():
print("{}: {}".format(k, sum(v)))
print("Elapsed time {}".format(time.process_time() - t_before))
env.render()
| 47.423503 | 164 | 0.565738 | import gym
from gym.utils import seeding
import numpy as np
import json
from gym_let_mpc.simulator import ControlSystem
from gym_let_mpc.controllers import ETMPC, AHMPC
import collections.abc
import matplotlib.pyplot as plt
from gym_let_mpc.utils import str_replace_whole_words
import copy
class LetMPCEnv(gym.Env):
def __init__(self, config_path):
with open(config_path) as file_object:
config = json.load(file_object)
if config["mpc"]["model"] == "plant":
config["mpc"]["model"] = copy.deepcopy(config["plant"]["model"])
elif config["mpc"]["model"].get("parameters", None) == "plant":
config["mpc"]["model"]["parameters"] = copy.deepcopy(config["plant"]["model"]["parameters"])
if config["lqr"]["model"] == "plant":
config["lqr"]["model"] = copy.deepcopy(config["plant"]["model"])
elif config["lqr"]["model"] == "mpc":
config["lqr"]["model"] = copy.deepcopy(config["mpc"]["model"])
elif config["lqr"]["model"].get("parameters", None) == "plant":
config["lqr"]["model"]["parameters"] = copy.deepcopy(config["plant"]["model"]["parameters"])
elif config["lqr"]["model"].get("parameters", None) == "mpc":
config["lqr"]["model"]["parameters"] = copy.deepcopy(config["mpc"]["model"]["parameters"])
self.config = config
assert "max_steps" in self.config["environment"]
self.max_steps = self.config["environment"]["max_steps"]
assert "randomize" in self.config["environment"]
assert "state" in self.config["environment"]["randomize"] and "reference" in self.config["environment"]["randomize"]
assert "render" in self.config["environment"]
if config["mpc"]["type"] == "ETMPC":
assert len(config["environment"]["action"]["variables"]) == 1 and \
config["environment"]["action"]["variables"][0]["name"] == "mpc_compute"
controller = ETMPC(config["mpc"], config["lqr"])
self.action_space = gym.spaces.Discrete(2)
elif config["mpc"]["type"] == "AHMPC":
assert len(config["environment"]["action"]["variables"]) == 1 and \
config["environment"]["action"]["variables"][0]["name"] == "mpc_horizon"
controller = AHMPC(config["mpc"])
self.action_space = gym.spaces.Box(low=np.array([1]), high=np.array([50]), dtype=np.float32)
else:
raise ValueError
self.control_system = ControlSystem(config["plant"], controller=controller)
self.history = None
self.steps_count = None
self.np_random = None
self.min_constraint_delta = 0.25
obs_high = []
obs_low = []
for obs_var in self.config["environment"]["observation"]["variables"]:
for var_transform in obs_var.get("transform", ["none"]):
for lim_i, lim in enumerate(obs_var.get("limits", [None, None])):
if lim is None:
if lim_i == 0:
obs_low.append(-np.finfo(np.float32).max)
else:
obs_high.append(np.finfo(np.float32).max)
else:
if var_transform == "none":
if lim_i == 0:
obs_low.append(lim)
else:
obs_high.append(lim)
elif var_transform == "absolute":
if lim_i == 0:
obs_low.append(0)
else:
obs_high.append(lim)
elif var_transform == "square":
if lim_i == 0:
obs_low.append(0)
else:
obs_high.append(lim ** 2)
else:
raise NotImplementedError
self.observation_space = gym.spaces.Box(low=np.array(obs_low, dtype=np.float32),
high=np.array(obs_high, dtype=np.float32),
dtype=np.float32)
self.value_function_is_set = False
self.viewer = None
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
self.control_system.seed(seed)
return [seed]
def reset(self, state=None, reference=None, constraint=None, model=None, process_noise=None, tvp=None):
def update_dict_recursively(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = update_dict_recursively(d.get(k, {}), v)
else:
d[k] = v
return d
sampled_state = self.sample_state()
sampled_reference = self.sample_reference()
sampled_constraint = self.sample_constraints()
sampled_model = self.sample_model()
if state is not None:
sampled_state.update(state)
elif len(sampled_state) == 0:
sampled_state = None
if reference is not None:
sampled_reference.update(reference)
elif len(sampled_reference) == 0:
sampled_reference = None
if constraint is not None:
sampled_constraint.update(constraint)
elif len(sampled_constraint) == 0:
sampled_constraint = None
if model is not None:
sampled_model = update_dict_recursively(sampled_model, model)
elif len(sampled_model) == 0:
sampled_model = None
self.control_system.reset(state=sampled_state, reference=sampled_reference, constraint=sampled_constraint,
model=sampled_model, process_noise=process_noise, tvp=tvp)
if self.config["mpc"]["type"] == "ETMPC":
self.control_system.step(action=np.array([1]))
obs = self.get_observation()
self.history = {"obs": [obs], "actions": [], "rewards": []}
self.steps_count = 0
return obs
def step(self, action):
a_dict = {a_props["name"]: action[a_i]
for a_i, a_props in enumerate(self.config["environment"]["action"]["variables"])}
self.control_system.step(np.round(a_dict["mpc_horizon"]).astype(np.int32))
self.history["actions"].append(a_dict)
self.steps_count += 1
info = {}
obs = self.get_observation()
done = False
if self.steps_count >= self.max_steps:
done = True
info["termination"] = "steps"
elif len(self.config["environment"].get("end_on_constraint_violation", [])) > 0:
for c_name, c_d in self.control_system.get_constraint_distances().items():
if c_name.split("-")[1] in self.config["environment"]["end_on_constraint_violation"] and c_d > 0:
done = True
info["termination"] = "constraint"
break
rew = self.get_reward(done=done)
for category, v in self.config["environment"].get("info", {}).items():
if category == "reward":
for rew_name, rew_expr in v.items():
info["reward/{}".format(rew_name)] = self.get_reward(rew_expr, done=done)
else:
raise NotImplementedError
if self.value_function_is_set:
step_vf_data = {"mpc_state": self.control_system.get_state_vector(self.control_system.history["state"][-2]),
"mpc_next_state": self.control_system.controller.mpc_state_preds[:, -1, -1]}
step_vf_data["mpc_n_horizon"] = self.control_system.controller.history["mpc_horizon"][-1]
info["mpc_value_fn"] = (self.control_system.controller.value_function.eval([step_vf_data["mpc_next_state"].reshape(1, -1)])[0][0, 0]).astype(np.float64)
step_vf_data["mpc_rewards"] = self.control_system.controller.mpc.opt_f_num.toarray()[0, 0] - \
self.config["mpc"]["objective"].get("discount_factor") ** (step_vf_data["mpc_n_horizon"] + 1) * info["mpc_value_fn"]
info["mpc_computation_time"] = sum([v for k, v in self.control_system.controller.mpc.solver_stats.items() if k.startswith("t_proc")])
info["data"] = step_vf_data
info["mpc_avg_stage_cost"] = step_vf_data["mpc_rewards"] / step_vf_data["mpc_n_horizon"]
info.update({k: v.astype(np.float64) if hasattr(v, "dtype") else v for k, v in a_dict.items()})
self.history["obs"].append(obs)
self.history["rewards"].append(rew)
return obs, rew, done, info
def render(self, mode='human', save_path=None):
figure, axes = None, None
if self.viewer is None:
env_plots = [plot_name for plot_name, make_plot in self.config["environment"]["render"].items() if make_plot]
if len(env_plots) > 0:
figure, axes = plt.subplots(self.control_system.render_n_axes + len(env_plots), sharex=True,
figsize=(9, 16))
self.viewer = self.control_system.render(figure=figure, axes=axes, return_viewer=True)
for i, plot in enumerate(env_plots):
self.viewer["axes"][plot] = axes[-(i + 1)]
else:
self.viewer = self.control_system.render(figure=figure, axes=axes, return_viewer=True)
for plot_name, make_plot in self.config["environment"]["render"].items():
if make_plot:
self.viewer["axes"][plot_name].set_ylabel("-".join(plot_name.split("_")[1:]))
x_data = np.array(range(self.steps_count)) * self.control_system.config["params"]["t_step"]
self.viewer["axes"][plot_name].clear()
if plot_name == "plot_action":
for a_var in self.config["environment"]["action"]["variables"]:
y_data = [step_a[a_var["name"]] for step_a in self.history["actions"]]
self.viewer["axes"][plot_name].plot(x_data, y_data, label=a_var["name"], drawstyle="steps")
elif plot_name == "plot_reward":
self.viewer["axes"][plot_name].plot(x_data, self.history["rewards"], label="reward")
self.viewer["axes"][plot_name].text(max(x_data) + self.control_system.config["params"]["t_step"],
self.history["rewards"][-1],
"{:.3f}".format(np.sum(self.history["rewards"])))
else:
raise ValueError
for axis in self.viewer["axes"].values():
axis.legend()
if save_path is not None:
self.viewer["figure"].savefig(save_path, bbox_inches="tight", format="png")
plt.close(self.viewer["figure"])
else:
self.viewer["figure"].show()
def get_observation(self):
obs = []
for var in self.config["environment"]["observation"]["variables"]:
var_val = self._get_variable_value(var)
for transform in var.get("transform", ["none"]):
if transform == "none":
obs.append(var_val)
elif transform == "absolute":
obs.append(abs(var_val))
elif transform == "square":
obs.append(var_val ** 2)
else:
raise ValueError
return np.array(obs)
def get_reward(self, rew_expr=None, done=False):
if rew_expr is None:
rew_expr = self.config["environment"]["reward"]["expression"]
rew_expr = str_replace_whole_words(rew_expr, "done", int(done))
for var in sorted(self.config["environment"]["reward"]["variables"], key=lambda x: len(x), reverse=True):
var_val = self._get_variable_value(var)
if isinstance(var_val, list) or isinstance(var_val, np.ndarray):
var_val = var_val[0]
rew_expr = str_replace_whole_words(rew_expr, var["name"], var_val)
return eval(rew_expr)
def _get_variable_value(self, var):
if var["type"] == "state":
val = self.control_system.current_state[var["name"]]
elif var["type"] == "input":
if var.get("value_type", "absolute") == "absolute":
val = self.control_system.controller.current_input[var["name"]]
elif var.get("value_type") == "delta":
val = self.control_system.controller.history["inputs"][-2][var["name"]] - \
self.control_system.controller.current_input[var["name"]]
else:
raise ValueError
elif var["type"] == "reference":
val = self.control_system.controller.current_reference[var["name"]]
elif var["type"] == "tvp":
val = self.control_system.tvps[var["name"]].get_values(self.steps_count)
elif var["type"] == "error":
val = self.control_system.controller.history["errors"][-1][var["name"]]
if np.isnan(val):
val = 0
elif var["type"] == "epsilon":
val = self.control_system.controller.history["epsilons"][-1][var["name"]]
if np.isnan(val):
val = 0
elif var["type"] == "constraint":
if var.get("value_type") == "distance":
val = self.control_system.get_constraint_distances((var["name"],))[var["name"]]
else:
raise ValueError
elif var["type"] == "action":
if var.get("value_type", "agent") == "agent":
val = self.history["actions"][-1][var["name"]]
elif var.get("value_type") == "controller":
val = self.control_system.controller.history[var["name"]][-1]
else:
raise ValueError
elif var["type"] == "time":
if var.get("value_type") == "fraction":
val = self.control_system.controller.steps_since_mpc_computation / self.control_system.controller.mpc.n_horizon
elif var.get("value_type") == "absolute":
val = self.control_system.controller.steps_since_mpc_computation
else:
raise ValueError
elif var["type"] == "parameter":
if var["value_type"] in ["plant", "mpc", "lqr"]:
val = self.config[var["value_type"]]["model"]["parameters"][var["name"]]
else:
raise ValueError
else:
raise ValueError
if isinstance(val, np.ndarray):
val = val[0]
if "limits" in var:
val = np.clip(val, var["limits"][0], var["limits"][1])
return val
def sample_constraints(self):
constraints = {}
for c_name, c_props in self.config["environment"].get("randomize", {}).get("constraints", {}).items():
constraint_val = getattr(self.np_random, c_props["type"])(**c_props["kw"])
if c_name.split("-")[1] in [k.split("-")[1] for k in constraints.keys()]:
other_bound_type = "u" if c_name.split("-")[2] == "l" else "l"
other_bound_val = constraints[c_name[:-1] + other_bound_type]
if other_bound_type == "u":
constraint_val = min(other_bound_val - self.min_constraint_delta, constraint_val)
else:
constraint_val = max(other_bound_val + self.min_constraint_delta, constraint_val)
constraints[c_name] = constraint_val
return constraints
def sample_state(self):
state = {}
for s_name, s_props in self.config["environment"].get("randomize", {}).get("state", {}).items():
state[s_name] = getattr(self.np_random, s_props["type"])(**s_props["kw"])
return state
def sample_reference(self):
reference = {}
for r_name, r_props in self.config["environment"].get("randomize", {}).get("reference", {}).items():
reference[r_name] = getattr(self.np_random, r_props["type"])(**r_props["kw"])
return reference
def sample_model(self):
model = {}
for s_name, s_props in self.config["environment"].get("randomize", {}).get("model", {}).get("states", {}).items():
model["states"] = {s_name: {}}
for component_name, component_props in s_props.items():
model["states"][s_name][component_name] = \
{comp_v_name: getattr(self.np_random, v_prop["type"])(**v_prop["kw"])
for comp_v_name, v_prop in component_props.items()}
model = {dest: model for dest in self.config["environment"].get("randomize", {}).get("model", {}).get("apply", [])}
return model
def stop(self):
pass
def create_dataset(self, n_scenarios):
dataset = []
self.reset()
for i in range(n_scenarios):
process_noise = np.array([self.control_system._get_process_noise() for i in range(self.max_steps)])
ep_dict = {"state": self.sample_state(), "reference": self.sample_reference(),
"constraint": self.sample_constraints(), "model": self.sample_model(),
"process_noise": {}, "tvp": {}}
s_i = 0
for s_name, s_props in self.config["plant"]["model"]["states"].items():
if "W" in s_props:
ep_dict["process_noise"][s_name] = process_noise[:, s_i]
s_i += 1
for tvp_name, tvp_obj in self.control_system.tvps.items():
tvp_obj.generate_values(self.max_steps)
ep_dict["tvp"][tvp_name] = tvp_obj.values
dataset.append(ep_dict)
self.reset()
return dataset
def set_value_function(self, input_ph, output_ph, tf_session):
self.control_system.controller.set_value_function(input_ph, output_ph, tf_session)
self.value_function_is_set = True
def set_learning_status(self, status):
if self.value_function_is_set:
self.control_system.controller.value_function.set_enabled(status)
if __name__ == "__main__":
env = LetMPCEnv("configs/cart_pendulum_horizon.json")
env.seed(0)
import pickle
with open("../../lmpc-horizon/datasets/cart_pendulum_10.pkl", "rb") as f:
test_set = pickle.load(f)
rews = {}
for i in range(1):
import time
obs = env.reset(**test_set[5])
done = False
t_before = time.process_time()
horizon = 10
while not done:
t_step = time.process_time()
if env.steps_count % 1 == 0 and False:
horizon = 25 if horizon == 50 else 50
obs, rew, done, info = env.step([horizon])
for rew_comp, v in info.items():
if rew_comp.startswith("reward/"):
if rew_comp not in rews:
rews[rew_comp] = []
rews[rew_comp].append(v)
if time.process_time() - t_step > 1:
print(env.control_system.controller.mpc.solver_stats)
print(env.steps_count)
for k, v in rews.items():
print("{}: {}".format(k, sum(v)))
print("Elapsed time {}".format(time.process_time() - t_before))
env.render()
| true | true |
f71b65b3b003148f57d2ed310d5f76f0d067c474 | 933 | py | Python | violas_client/canoser/bool_t.py | violas-core/violas-client | e8798f7d081ac218b78b81fd7eb2f8da92631a16 | [
"MIT"
] | null | null | null | violas_client/canoser/bool_t.py | violas-core/violas-client | e8798f7d081ac218b78b81fd7eb2f8da92631a16 | [
"MIT"
] | null | null | null | violas_client/canoser/bool_t.py | violas-core/violas-client | e8798f7d081ac218b78b81fd7eb2f8da92631a16 | [
"MIT"
] | 1 | 2022-01-05T06:49:42.000Z | 2022-01-05T06:49:42.000Z | from violas_client.canoser.base import Base
class BoolT(Base):
@classmethod
def encode(self, value):
if value:
return b'\1'
else:
return b'\0'
@classmethod
def decode_bytes(self, value):
if value == b'\0':
return False
elif value == b'\1':
return True
else:
raise TypeError("bool should be 0 or 1.")
@classmethod
def decode(self, cursor):
value = cursor.read_bytes(1)
return self.decode_bytes(value)
@classmethod
def from_value(cls, value):
if value:
return True
return False
@classmethod
def check_value(self, value):
if not isinstance(value, bool):
raise TypeError('value {} is not bool'.format(value))
@classmethod
def to_json_serializable(cls, value):
return value
| 23.923077 | 66 | 0.543408 | from violas_client.canoser.base import Base
class BoolT(Base):
@classmethod
def encode(self, value):
if value:
return b'\1'
else:
return b'\0'
@classmethod
def decode_bytes(self, value):
if value == b'\0':
return False
elif value == b'\1':
return True
else:
raise TypeError("bool should be 0 or 1.")
@classmethod
def decode(self, cursor):
value = cursor.read_bytes(1)
return self.decode_bytes(value)
@classmethod
def from_value(cls, value):
if value:
return True
return False
@classmethod
def check_value(self, value):
if not isinstance(value, bool):
raise TypeError('value {} is not bool'.format(value))
@classmethod
def to_json_serializable(cls, value):
return value
| true | true |
f71b6618acab7a74ff8f4e811e451717d08dc511 | 1,097 | py | Python | 4.conditionals/challenge3_rouillonh.py | rouillonh/ChallengePython | 7e7d9b69f60394fd1f00a6a4aa32f97de95b1b92 | [
"MIT"
] | null | null | null | 4.conditionals/challenge3_rouillonh.py | rouillonh/ChallengePython | 7e7d9b69f60394fd1f00a6a4aa32f97de95b1b92 | [
"MIT"
] | null | null | null | 4.conditionals/challenge3_rouillonh.py | rouillonh/ChallengePython | 7e7d9b69f60394fd1f00a6a4aa32f97de95b1b92 | [
"MIT"
] | null | null | null | print("\tWelcome to the Voter Registration App")
#Pedimos nombre y edad para asi registrar su voto
name = input("\nPlease enter your name: ").title()
age = int(input("Please enter your age: "))
partidos = ['Republican','Democratic','Independent','Libertarian','Green']
#Si es mayor de edad, podrá votar
if age >= 18:
#Dependiendo del partido que escoja, se imprimirá un mensaje
print("\nCongratulations ",name,"! You are old enough to register to vote.")
print("\nHere is a list of political parties to join.")
for i in partidos:
print("-",i)
p = input("\nWhat party would you like to join: ").capitalize()
if p in 'Republican,Democratic':
print("Congratulations ",name,"! You have joined the ",p," party!")
print("That is a major party!")
elif p == 'Independent':
print("Congratulations ",name,"! You have joined the ",p," party!")
print("You are an independent person!")
else:
print("That is not a major party")
#Si no lo es, no podrá hacerlo
elif age < 18:
print("You are not old enough to register to vote.")
| 43.88 | 80 | 0.65907 | print("\tWelcome to the Voter Registration App")
name = input("\nPlease enter your name: ").title()
age = int(input("Please enter your age: "))
partidos = ['Republican','Democratic','Independent','Libertarian','Green']
if age >= 18:
print("\nCongratulations ",name,"! You are old enough to register to vote.")
print("\nHere is a list of political parties to join.")
for i in partidos:
print("-",i)
p = input("\nWhat party would you like to join: ").capitalize()
if p in 'Republican,Democratic':
print("Congratulations ",name,"! You have joined the ",p," party!")
print("That is a major party!")
elif p == 'Independent':
print("Congratulations ",name,"! You have joined the ",p," party!")
print("You are an independent person!")
else:
print("That is not a major party")
elif age < 18:
print("You are not old enough to register to vote.")
| true | true |
f71b6745cb39d3ccd6a45e1c0ecd693cdffb6acf | 2,559 | py | Python | etc/mtrace/parse_mtrace.py | diamantopoulos/memluv | f3a283d65f07b19d48589e02ac484563e12e22e8 | [
"Apache-2.0"
] | 9 | 2015-12-16T08:05:06.000Z | 2022-02-25T08:29:30.000Z | etc/mtrace/parse_mtrace.py | diamantopoulos/memluv | f3a283d65f07b19d48589e02ac484563e12e22e8 | [
"Apache-2.0"
] | 1 | 2022-02-26T07:40:23.000Z | 2022-03-15T03:27:59.000Z | etc/mtrace/parse_mtrace.py | diamantopoulos/memluv | f3a283d65f07b19d48589e02ac484563e12e22e8 | [
"Apache-2.0"
] | null | null | null | """
Parsing a mtrace log file and append to timeline-footprint format
"""
from numpy import *
import numpy as np
import glob
import os
import linecache
import csv
# 1038 bytes is the size of a heap log file with no heap activity (only heap info)
def ValidHeapFile(fpath):
header_lines=1
with open(fpath) as f:
lines = len(list(f))
return True if os.path.isfile(fpath) and lines > header_lines else False
print ("INFO: --------------------- \nINFO: Parsing mtrace logs \nINFO: ---------------------")
mtrace_files = glob.glob("/tmp/mtrace*.txt")
mtraces=len(mtrace_files)
print ("INFO: Total mtrace logs found:", mtraces)
colours=['b','g','r','c','m','y','k']
elapsed_time=208000
#with plt.xkcd():
total_bytes_allocated=0
index=0
fout = open("/tmp/mtrace.out",'w')
lines_parsed=0
event_time=0
#Heaps log parsing
for cur_mtrace in sorted(mtrace_files):
if ValidHeapFile(cur_mtrace):
fin = open(cur_mtrace,'r')
total_lines = len(fin.readlines())
tic=elapsed_time/(total_lines-3)
print ("total_lines = ", total_lines, "tic = ", tic)
fin.close()
fin = open(cur_mtrace,'r')
for line in fin:
line = line.rstrip().split(' ')
#print ("length(line) = ", len(line), "index=", index)
if lines_parsed>=2 and lines_parsed<total_lines-1:
sign = line[2]
if sign == '+':
cur_bytes = line[4]
cur_bytes_dec = int(cur_bytes, 16)
total_bytes_allocated = total_bytes_allocated + cur_bytes_dec
#print ("INFO: Adding ", cur_bytes_dec, "bytes", "total_bytes_allocated=", total_bytes_allocated)
elif sign == '-':
total_bytes_allocated = total_bytes_allocated - cur_bytes_dec
#print ("INFO: Subtracting ", cur_bytes_dec, "bytes", "total_bytes_allocated=", total_bytes_allocated)
else:
print ("ERROR: Unknown sign", sign, "Aborting...")
__exit__
event_time=event_time+tic
fout.write(str(index)+" "+str(event_time)+" "+str(total_bytes_allocated)+"\n")
index=index+1
else:
print ("WARNING: Ignoring this line", line)
lines_parsed=lines_parsed+1
else:
print ("INFO: Current mtrace path :", cur_mtrace, "-> Skipping empty file")
fin.close()
fout.close()
| 34.581081 | 126 | 0.569754 | from numpy import *
import numpy as np
import glob
import os
import linecache
import csv
def ValidHeapFile(fpath):
header_lines=1
with open(fpath) as f:
lines = len(list(f))
return True if os.path.isfile(fpath) and lines > header_lines else False
print ("INFO: --------------------- \nINFO: Parsing mtrace logs \nINFO: ---------------------")
mtrace_files = glob.glob("/tmp/mtrace*.txt")
mtraces=len(mtrace_files)
print ("INFO: Total mtrace logs found:", mtraces)
colours=['b','g','r','c','m','y','k']
elapsed_time=208000
total_bytes_allocated=0
index=0
fout = open("/tmp/mtrace.out",'w')
lines_parsed=0
event_time=0
for cur_mtrace in sorted(mtrace_files):
if ValidHeapFile(cur_mtrace):
fin = open(cur_mtrace,'r')
total_lines = len(fin.readlines())
tic=elapsed_time/(total_lines-3)
print ("total_lines = ", total_lines, "tic = ", tic)
fin.close()
fin = open(cur_mtrace,'r')
for line in fin:
line = line.rstrip().split(' ')
if lines_parsed>=2 and lines_parsed<total_lines-1:
sign = line[2]
if sign == '+':
cur_bytes = line[4]
cur_bytes_dec = int(cur_bytes, 16)
total_bytes_allocated = total_bytes_allocated + cur_bytes_dec
elif sign == '-':
total_bytes_allocated = total_bytes_allocated - cur_bytes_dec
else:
print ("ERROR: Unknown sign", sign, "Aborting...")
__exit__
event_time=event_time+tic
fout.write(str(index)+" "+str(event_time)+" "+str(total_bytes_allocated)+"\n")
index=index+1
else:
print ("WARNING: Ignoring this line", line)
lines_parsed=lines_parsed+1
else:
print ("INFO: Current mtrace path :", cur_mtrace, "-> Skipping empty file")
fin.close()
fout.close()
| true | true |
f71b675d58f0489d8b6561c581bfe700396f87fb | 965 | py | Python | python/day12-2.py | Aerdan/adventcode-2020 | 83120aa8c7fc9d1f2d34780610401e3c6d4f583b | [
"BSD-1-Clause"
] | null | null | null | python/day12-2.py | Aerdan/adventcode-2020 | 83120aa8c7fc9d1f2d34780610401e3c6d4f583b | [
"BSD-1-Clause"
] | null | null | null | python/day12-2.py | Aerdan/adventcode-2020 | 83120aa8c7fc9d1f2d34780610401e3c6d4f583b | [
"BSD-1-Clause"
] | null | null | null | #!/usr/bin/env python3
from math import sin, cos, radians
data = []
with open('input12.txt') as f:
for line in f:
data.append(line.strip())
x, y = 10, 1
sx, sy = 0, 0
d = 'E'
c = 'NESW'
for line in data:
insn = line[0]
dist = int(line[1:])
if insn == 'F':
# move to waypoint dist times
for i in range(dist):
sx += x
sy += y
elif insn == 'N':
y += dist
elif insn == 'E':
x += dist
elif insn == 'S':
y -= dist
elif insn == 'W':
x -= dist
elif insn == 'L':
dist = radians(dist)
nx = x * cos(dist) - y * sin(dist)
ny = y * cos(dist) + x * sin(dist)
x = round(nx)
y = round(ny)
elif insn == 'R':
dist = radians(360 - dist)
nx = x * cos(dist) - y * sin(dist)
ny = y * cos(dist) + x * sin(dist)
x = round(nx)
y = round(ny)
md = abs(sx) + abs(sy)
print(sx, sy, md)
| 19.693878 | 42 | 0.449741 |
from math import sin, cos, radians
data = []
with open('input12.txt') as f:
for line in f:
data.append(line.strip())
x, y = 10, 1
sx, sy = 0, 0
d = 'E'
c = 'NESW'
for line in data:
insn = line[0]
dist = int(line[1:])
if insn == 'F':
for i in range(dist):
sx += x
sy += y
elif insn == 'N':
y += dist
elif insn == 'E':
x += dist
elif insn == 'S':
y -= dist
elif insn == 'W':
x -= dist
elif insn == 'L':
dist = radians(dist)
nx = x * cos(dist) - y * sin(dist)
ny = y * cos(dist) + x * sin(dist)
x = round(nx)
y = round(ny)
elif insn == 'R':
dist = radians(360 - dist)
nx = x * cos(dist) - y * sin(dist)
ny = y * cos(dist) + x * sin(dist)
x = round(nx)
y = round(ny)
md = abs(sx) + abs(sy)
print(sx, sy, md)
| true | true |
f71b693c8f73a9ec5102fb39ced2b8f6a4ea8b4b | 511 | py | Python | tcfcli/cmds/local/libs/local/debug_context.py | tencentyun/scfcli | ef15508ad34a851cf0d2750dfaa5202f6a600887 | [
"Apache-2.0"
] | 103 | 2019-06-11T06:09:56.000Z | 2021-12-18T22:48:59.000Z | tcfcli/cmds/local/libs/local/debug_context.py | TencentCloud/Serverless-cli | 57f98b24cfd10712770a4806212cfb69d981a11a | [
"Apache-2.0"
] | 8 | 2019-07-12T12:08:40.000Z | 2020-10-20T07:18:17.000Z | tcfcli/cmds/local/libs/local/debug_context.py | TencentCloud/Serverless-cli | 57f98b24cfd10712770a4806212cfb69d981a11a | [
"Apache-2.0"
] | 49 | 2019-06-11T06:26:05.000Z | 2020-02-19T08:13:36.000Z | # -*- coding: utf-8 -*-
import os
class DebugContext(object):
def __init__(self,
debug_port=None,
debugger_path=None,
debug_args=None):
self.debug_port = debug_port
self.debugger_path = debugger_path
self.debug_args = debug_args
if self.debug_port:
os.environ["PYTHONUNBUFFERED"] = "1"
def __bool__(self):
return bool(self.debug_port)
def __nonzero__(self):
return self.__bool__()
| 21.291667 | 48 | 0.579256 |
import os
class DebugContext(object):
def __init__(self,
debug_port=None,
debugger_path=None,
debug_args=None):
self.debug_port = debug_port
self.debugger_path = debugger_path
self.debug_args = debug_args
if self.debug_port:
os.environ["PYTHONUNBUFFERED"] = "1"
def __bool__(self):
return bool(self.debug_port)
def __nonzero__(self):
return self.__bool__()
| true | true |
f71b6aed9afed4cf56533fb2127e350f2b0dc11b | 289 | py | Python | tests/integration/test_notes.py | mhk001/python-alerta-client | 6e02f8a2245cef223df3048d445921e1ba90ad1c | [
"Apache-2.0"
] | 20 | 2017-04-14T08:05:48.000Z | 2022-01-11T06:26:17.000Z | tests/integration/test_notes.py | mhk001/python-alerta-client | 6e02f8a2245cef223df3048d445921e1ba90ad1c | [
"Apache-2.0"
] | 99 | 2016-09-30T20:53:05.000Z | 2022-03-14T10:00:59.000Z | tests/integration/test_notes.py | mhk001/python-alerta-client | 6e02f8a2245cef223df3048d445921e1ba90ad1c | [
"Apache-2.0"
] | 33 | 2016-10-04T20:44:58.000Z | 2022-03-04T21:35:49.000Z | import unittest
from alertaclient.api import Client
class AlertTestCase(unittest.TestCase):
def setUp(self):
self.client = Client(endpoint='http://api:8080', key='demo-key')
def test_notes(self):
# add tests here when /notes endpoints are created
pass
| 20.642857 | 72 | 0.681661 | import unittest
from alertaclient.api import Client
class AlertTestCase(unittest.TestCase):
def setUp(self):
self.client = Client(endpoint='http://api:8080', key='demo-key')
def test_notes(self):
pass
| true | true |
f71b6b5b67e80a03f5062113889382389fc8dc72 | 29,281 | py | Python | resource/pypi/cryptography-1.7.1/tests/hazmat/primitives/fixtures_rsa.py | hipnusleo/Laserjet | f53e0b740f48f2feb0c0bb285ec6728b313b4ccc | [
"Apache-2.0"
] | null | null | null | resource/pypi/cryptography-1.7.1/tests/hazmat/primitives/fixtures_rsa.py | hipnusleo/Laserjet | f53e0b740f48f2feb0c0bb285ec6728b313b4ccc | [
"Apache-2.0"
] | null | null | null | resource/pypi/cryptography-1.7.1/tests/hazmat/primitives/fixtures_rsa.py | hipnusleo/Laserjet | f53e0b740f48f2feb0c0bb285ec6728b313b4ccc | [
"Apache-2.0"
] | null | null | null | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography.hazmat.primitives.asymmetric.rsa import (
RSAPrivateNumbers, RSAPublicNumbers
)
RSA_KEY_512 = RSAPrivateNumbers(
p=int(
"d57846898d5c0de249c08467586cb458fa9bc417cdf297f73cfc52281b787cd9", 16
),
q=int(
"d10f71229e87e010eb363db6a85fd07df72d985b73c42786191f2ce9134afb2d", 16
),
d=int(
"272869352cacf9c866c4e107acc95d4c608ca91460a93d28588d51cfccc07f449"
"18bbe7660f9f16adc2b4ed36ca310ef3d63b79bd447456e3505736a45a6ed21", 16
),
dmp1=int(
"addff2ec7564c6b64bc670d250b6f24b0b8db6b2810099813b7e7658cecf5c39", 16
),
dmq1=int(
"463ae9c6b77aedcac1397781e50e4afc060d4b216dc2778494ebe42a6850c81", 16
),
iqmp=int(
"54deef8548f65cad1d411527a32dcb8e712d3e128e4e0ff118663fae82a758f4", 16
),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"ae5411f963c50e3267fafcf76381c8b1e5f7b741fdb2a544bcf48bd607b10c991"
"90caeb8011dc22cf83d921da55ec32bd05cac3ee02ca5e1dbef93952850b525",
16
),
)
)
RSA_KEY_512_ALT = RSAPrivateNumbers(
p=int(
"febe19c29a0b50fefa4f7b1832f84df1caf9be8242da25c9d689e18226e67ce5",
16),
q=int(
"eb616c639dd999feda26517e1c77b6878f363fe828c4e6670ec1787f28b1e731",
16),
d=int(
"80edecfde704a806445a4cc782b85d3f36f17558f385654ea767f006470fdfcbda5e2"
"206839289d3f419b4e4fb8e1acee1b4fb9c591f69b64ec83937f5829241", 16),
dmp1=int(
"7f4fa06e2a3077a54691cc5216bf13ad40a4b9fa3dd0ea4bca259487484baea5",
16),
dmq1=int(
"35eaa70d5a8711c352ed1c15ab27b0e3f46614d575214535ae279b166597fac1",
16),
iqmp=int(
"cc1f272de6846851ec80cb89a02dbac78f44b47bc08f53b67b4651a3acde8b19",
16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"ea397388b999ef0f7e7416fa000367efd9a0ba0deddd3f8160d1c36d62267f210"
"fbd9c97abeb6654450ff03e7601b8caa6c6f4cba18f0b52c179d17e8f258ad5",
16),
)
)
RSA_KEY_522 = RSAPrivateNumbers(
p=int(
"1a8aab9a069f92b52fdf05824f2846223dc27adfc806716a247a77d4c36885e4bf",
16),
q=int(
"19e8d620d177ec54cdb733bb1915e72ef644b1202b889ceb524613efa49c07eb4f",
16),
d=int(
"10b8a7c0a92c1ae2d678097d69db3bfa966b541fb857468291d48d1b52397ea2bac0d"
"4370c159015c7219e3806a01bbafaffdd46f86e3da1e2d1fe80a0369ccd745", 16),
dmp1=int(
"3eb6277f66e6e2dcf89f1b8529431f730839dbd9a3e49555159bc8470eee886e5",
16),
dmq1=int(
"184b4d74aa54c361e51eb23fee4eae5e4786b37b11b6e0447af9c0b9c4e4953c5b",
16),
iqmp=int(
"f80e9ab4fa7b35d0d232ef51c4736d1f2dcf2c7b1dd8716211b1bf1337e74f8ae",
16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"2afaea0e0bb6fca037da7d190b5270a6c665bc18e7a456f7e69beaac4433db748"
"ba99acdd14697e453bca596eb35b47f2d48f1f85ef08ce5109dad557a9cf85ebf"
"1", 16),
),
)
RSA_KEY_599 = RSAPrivateNumbers(
p=int(
"cf95d20be0c7af69f4b3d909f65d858c26d1a7ef34da8e3977f4fa230580e58814b54"
"24be99", 16),
q=int(
"6052be4b28debd4265fe12ace5aa4a0c4eb8d63ff8853c66824b35622161eb48a3bc8"
"c3ada5", 16),
d=int(
"69d9adc465e61585d3142d7cc8dd30605e8d1cbbf31009bc2cd5538dc40528d5d68ee"
"fe6a42d23674b6ec76e192351bf368c8968f0392110bf1c2825dbcff071270b80adcc"
"fa1d19d00a1", 16),
dmp1=int(
"a86d10edde456687fba968b1f298d2e07226adb1221b2a466a93f3d83280f0bb46c20"
"2b6811", 16),
dmq1=int(
"40d570e08611e6b1da94b95d46f8e7fe80be48f7a5ff8838375b08039514a399b11c2"
"80735", 16),
iqmp=int(
"cd051cb0ea68b88765c041262ace2ec4db11dab14afd192742e34d5da3328637fabdf"
"bae26e", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"4e1b470fe00642426f3808e74c959632dd67855a4c503c5b7876ccf4dc7f6a1a4"
"9107b90d26daf0a7879a6858218345fbc6e59f01cd095ca5647c27c25265e6c47"
"4fea89537191c7073d9d", 16),
)
)
RSA_KEY_745 = RSAPrivateNumbers(
p=int(
"1c5a0cfe9a86debd19eca33ba961f15bc598aa7983a545ce775b933afc89eb51bcf90"
"836257fdd060d4b383240241d", 16
),
q=int(
"fb2634f657f82ee6b70553382c4e2ed26b947c97ce2f0016f1b282cf2998184ad0527"
"a9eead826dd95fe06b57a025", 16
),
d=int(
"402f30f976bc07d15ff0779abff127b20a8b6b1d0024cc2ad8b6762d38f174f81e792"
"3b49d80bdbdd80d9675cbc7b2793ec199a0430eb5c84604dacfdb29259ae6a1a44676"
"22f0b23d4cb0f5cb1db4b8173c8d9d3e57a74dbd200d2141", 16),
dmp1=int(
"e5e95b7751a6649f199be21bef7a51c9e49821d945b6fc5f538b4a670d8762c375b00"
"8e70f31d52b3ea2bd14c3101", 16),
dmq1=int(
"12b85d5843645f72990fcf8d2f58408b34b3a3b9d9078dd527fceb5d2fb7839008092"
"dd4aca2a1fb00542801dcef5", 16),
iqmp=int(
"5672740d947f621fc7969e3a44ec26736f3f819863d330e63e9409e139d20753551ac"
"c16544dd2bdadb9dee917440", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"1bd085f92237774d34013b477ceebbb2f2feca71118db9b7429341477947e7b1d"
"04e8c43ede3c52bb25781af58d4ff81289f301eac62dc3bcd7dafd7a4d5304e9f"
"308e766952fbf2b62373e66611fa53189987dbef9f7243dcbbeb25831", 16),
)
)
RSA_KEY_768 = RSAPrivateNumbers(
p=int(
"f80c0061b607f93206b68e208906498d68c6e396faf457150cf975c8f849848465869"
"7ecd402313397088044c4c2071b", 16),
q=int(
"e5b5dbecc93c6d306fc14e6aa9737f9be2728bc1a326a8713d2849b34c1cb54c63468"
"3a68abb1d345dbf15a3c492cf55", 16),
d=int(
"d44601442255ffa331212c60385b5e898555c75c0272632ff42d57c4b16ca97dbca9f"
"d6d99cd2c9fd298df155ed5141b4be06c651934076133331d4564d73faed7ce98e283"
"2f7ce3949bc183be7e7ca34f6dd04a9098b6c73649394b0a76c541", 16),
dmp1=int(
"a5763406fa0b65929661ce7b2b8c73220e43a5ebbfe99ff15ddf464fd238105ad4f2a"
"c83818518d70627d8908703bb03", 16),
dmq1=int(
"cb467a9ef899a39a685aecd4d0ad27b0bfdc53b68075363c373d8eb2bed8eccaf3533"
"42f4db735a9e087b7539c21ba9d", 16),
iqmp=int(
"5fe86bd3aee0c4d09ef11e0530a78a4534c9b833422813b5c934a450c8e564d8097a0"
"6fd74f1ebe2d5573782093f587a", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"de92f1eb5f4abf426b6cac9dd1e9bf57132a4988b4ed3f8aecc15e251028bd6df"
"46eb97c711624af7db15e6430894d1b640c13929329241ee094f5a4fe1a20bc9b"
"75232320a72bc567207ec54d6b48dccb19737cf63acc1021abb337f19130f7",
16),
)
)
RSA_KEY_1024 = RSAPrivateNumbers(
p=int(
"ea4d9d9a1a068be44b9a5f8f6de0512b2c5ba1fb804a4655babba688e6e890b347c1a"
"7426685a929337f513ae4256f0b7e5022d642237f960c5b24b96bee8e51", 16),
q=int(
"cffb33e400d6f08b410d69deb18a85cf0ed88fcca9f32d6f2f66c62143d49aff92c11"
"4de937d4f1f62d4635ee89af99ce86d38a2b05310f3857c7b5d586ac8f9", 16),
d=int(
"3d12d46d04ce942fb99be7bf30587b8cd3e21d75a2720e7bda1b867f1d418d91d8b9f"
"e1c00181fdde94f2faf33b4e6f800a1b3ae3b972ccb6d5079dcb6c794070ac8306d59"
"c00b58b7a9a81122a6b055832de7c72334a07494d8e7c9fbeed2cc37e011d9e6bfc6e"
"9bcddbef7f0f5771d9cf82cd4b268c97ec684575c24b6c881", 16),
dmp1=int(
"470f2b11257b7ec9ca34136f487f939e6861920ad8a9ae132a02e74af5dceaa5b4c98"
"2949ccb44b67e2bcad2f58674db237fe250e0d62b47b28fa1dfaa603b41", 16),
dmq1=int(
"c616e8317d6b3ae8272973709b80e8397256697ff14ea03389de454f619f99915a617"
"45319fefbe154ec1d49441a772c2f63f7d15c478199afc60469bfd0d561", 16),
iqmp=int(
"d15e7c9ad357dfcd5dbdc8427680daf1006761bcfba93a7f86589ad88832a8d564b1c"
"d4291a658c96fbaea7ca588795820902d85caebd49c2d731e3fe0243130", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"be5aac07456d990133ebce69c06b48845b972ab1ad9f134bc5683c6b5489b5119"
"ede07be3bed0e355d48e0dfab1e4fb5187adf42d7d3fb0401c082acb8481bf17f"
"0e871f8877be04c3a1197d40aa260e2e0c48ed3fd2b93dc3fc0867591f67f3cd6"
"0a77adee1d68a8c3730a5702485f6ac9ede7f0fd2918e037ee4cc1fc1b4c9",
16),
)
)
RSA_KEY_1025 = RSAPrivateNumbers(
p=int(
"18e9bfb7071725da04d31c103fa3563648c69def43a204989214eb57b0c8b299f9ef3"
"5dda79a62d8d67fd2a9b69fbd8d0490aa2edc1e111a2b8eb7c737bb691a5", 16),
q=int(
"d8eccaeeb95815f3079d13685f3f72ca2bf2550b349518049421375df88ca9bbb4ba8"
"cb0e3502203c9eeae174112509153445d251313e4711a102818c66fcbb7", 16),
d=int(
"fe9ac54910b8b1bc948a03511c54cab206a1d36d50d591124109a48abb7480977ccb0"
"47b4d4f1ce7b0805df2d4fa3fe425f49b78535a11f4b87a4eba0638b3340c23d4e6b2"
"1ecebe9d5364ea6ead2d47b27836019e6ecb407000a50dc95a8614c9d0031a6e3a524"
"d2345cfb76e15c1f69d5ba35bdfb6ec63bcb115a757ef79d9", 16),
dmp1=int(
"18537e81006a68ea76d590cc88e73bd26bc38d09c977959748e5265c0ce21c0b5fd26"
"53d975f97ef759b809f791487a8fff1264bf561627fb4527a3f0bbb72c85", 16),
dmq1=int(
"c807eac5a1f1e1239f04b04dd16eff9a00565127a91046fa89e1eb5d6301cace85447"
"4d1f47b0332bd35b4214b66e9166953241538f761f30d969272ee214f17", 16),
iqmp=int(
"133aa74dd41fe70fa244f07d0c4091a22f8c8f0134fe6aea9ec8b55383b758fefe358"
"2beec36eca91715eee7d21931f24fa9e97e8e3a50f9cd0f731574a5eafcc", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"151c44fed756370fb2d4a0e6ec7dcac84068ca459b6aaf22daf902dca72c77563"
"bf276fe3523f38f5ddaf3ea9aa88486a9d8760ff732489075862bee0e599de5c5"
"f509b4519f4f446521bad15cd279a498fe1e89107ce0d237e3103d7c5eb801666"
"42e2924b152aebff97b71fdd2d68ebb45034cc784e2e822ff6d1edf98af3f3",
16),
)
)
RSA_KEY_1026 = RSAPrivateNumbers(
p=int(
"1fcbfb8719c5bdb5fe3eb0937c76bb096e750b9442dfe31d6a877a13aed2a6a4e9f79"
"40f815f1c307dd6bc2b4b207bb6fe5be3a15bd2875a957492ce197cdedb1", 16),
q=int(
"1f704a0f6b8966dd52582fdc08227dd3dbaeaa781918b41144b692711091b4ca4eb62"
"985c3513853828ce8739001dfba9a9a7f1a23cbcaf74280be925e2e7b50d", 16),
d=int(
"c67975e35a1d0d0b3ebfca736262cf91990cb31cf4ac473c0c816f3bc2720bcba2475"
"e8d0de8535d257816c0fc53afc1b597eada8b229069d6ef2792fc23f59ffb4dc6c3d9"
"0a3c462082025a4cba7561296dd3d8870c4440d779406f00879afe2c681e7f5ee055e"
"ff829e6e55883ec20830c72300762e6e3a333d94b4dbe4501", 16),
dmp1=int(
"314730ca7066c55d086a9fbdf3670ef7cef816b9efea8b514b882ae9d647217cf41d7"
"e9989269dc9893d02e315cb81f058c49043c2cac47adea58bdf5e20e841", 16),
dmq1=int(
"1da28a9d687ff7cfeebc2439240de7505a8796376968c8ec723a2b669af8ce53d9c88"
"af18540bd78b2da429014923fa435f22697ac60812d7ca9c17a557f394cd", 16),
iqmp=int(
"727947b57b8a36acd85180522f1b381bce5fdbd962743b3b14af98a36771a80f58ddd"
"62675d72a5935190da9ddc6fd6d6d5e9e9f805a2e92ab8d56b820493cdf", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"3e7a5e6483e55eb8b723f9c46732d21b0af9e06a4a1099962d67a35ee3f62e312"
"9cfae6ab0446da18e26f33e1d753bc1cc03585c100cf0ab5ef056695706fc8b0c"
"9c710cd73fe6e5beda70f515a96fabd3cc5ac49efcb2594b220ff3b603fcd927f"
"6a0838ef04bf52f3ed9eab801f09e5aed1613ddeb946ed0fbb02060b3a36fd",
16),
)
)
RSA_KEY_1027 = RSAPrivateNumbers(
p=int(
"30135e54cfb072c3d3eaf2000f3ed92ceafc85efc867b9d4bf5612f2978c432040093"
"4829f741c0f002b54af2a4433ff872b6321ef00ff1e72cba4e0ced937c7d", 16),
q=int(
"1d01a8aead6f86b78c875f18edd74214e06535d65da054aeb8e1851d6f3319b4fb6d8"
"6b01e07d19f8261a1ded7dc08116345509ab9790e3f13e65c037e5bb7e27", 16),
d=int(
"21cf4477df79561c7818731da9b9c88cd793f1b4b8e175bd0bfb9c0941a4dc648ecf1"
"6d96b35166c9ea116f4c2eb33ce1c231e641a37c25e54c17027bdec08ddafcb83642e"
"795a0dd133155ccc5eed03b6e745930d9ac7cfe91f9045149f33295af03a2198c660f"
"08d8150d13ce0e2eb02f21ac75d63b55822f77bd5be8d07619", 16),
dmp1=int(
"173fb695931e845179511c18b546b265cb79b517c135902377281bdf9f34205e1f399"
"4603ad63e9f6e7885ea73a929f03fa0d6bed943051ce76cddde2d89d434d", 16),
dmq1=int(
"10956b387b2621327da0c3c8ffea2af8be967ee25163222746c28115a406e632a7f12"
"5a9397224f1fa5c116cd3a313e5c508d31db2deb83b6e082d213e33f7fcf", 16),
iqmp=int(
"234f833949f2c0d797bc6a0e906331e17394fa8fbc8449395766d3a8d222cf6167c48"
"8e7fe1fe9721d3e3b699a595c8e6f063d92bd840dbc84d763b2b37002109", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"57281707d7f9b1369c117911758980e32c05b133ac52c225bcf68b79157ff47ea"
"0a5ae9f579ef1fd7e42937f921eb3123c4a045cc47a2159fbbf904783e654954c"
"42294c30a95c15db7c7b91f136244e548f62474b137087346c5522e54f226f49d"
"6c93bc58cb39972e41bde452bb3ae9d60eb93e5e1ce91d222138d9890c7d0b",
16),
)
)
RSA_KEY_1028 = RSAPrivateNumbers(
p=int(
"359d17378fae8e9160097daee78a206bd52efe1b757c12a6da8026cc4fc4bb2620f12"
"b8254f4db6aed8228be8ee3e5a27ec7d31048602f01edb00befd209e8c75", 16),
q=int(
"33a2e70b93d397c46e63b273dcd3dcfa64291342a6ce896e1ec8f1c0edc44106550f3"
"c06e7d3ca6ea29eccf3f6ab5ac6235c265313d6ea8e8767e6a343f616581", 16),
d=int(
"880640088d331aa5c0f4cf2887809a420a2bc086e671e6ffe4e47a8c80792c038a314"
"9a8e45ef9a72816ab45b36e3af6800351067a6b2751843d4232413146bb575491463a"
"8addd06ce3d1bcf7028ec6c5d938c545a20f0a40214b5c574ca7e840062b2b5f8ed49"
"4b144bb2113677c4b10519177fee1d4f5fb8a1c159b0b47c01", 16),
dmp1=int(
"75f8c52dad2c1cea26b8bba63236ee4059489e3d2db766136098bcc6b67fde8f77cd3"
"640035107bfb1ffc6480983cfb84fe0c3be008424ebc968a7db7e01f005", 16),
dmq1=int(
"3893c59469e4ede5cd0e6ff9837ca023ba9b46ff40c60ccf1bec10f7d38db5b1ba817"
"6c41a3f750ec4203b711455aca06d1e0adffc5cffa42bb92c7cb77a6c01", 16),
iqmp=int(
"ad32aafae3c962ac25459856dc8ef1f733c3df697eced29773677f435d186cf759d1a"
"5563dd421ec47b4d7e7f12f29647c615166d9c43fc49001b29089344f65", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"ad0696bef71597eb3a88e135d83c596930cac73868fbd7e6b2d64f34eea5c28cc"
"e3510c68073954d3ba4deb38643e7a820a4cf06e75f7f82eca545d412bd637819"
"45c28d406e95a6cced5ae924a8bfa4f3def3e0250d91246c269ec40c89c93a85a"
"cd3770ba4d2e774732f43abe94394de43fb57f93ca25f7a59d75d400a3eff5",
16),
)
)
RSA_KEY_1029 = RSAPrivateNumbers(
p=int(
"66f33e513c0b6b6adbf041d037d9b1f0ebf8de52812a3ac397a963d3f71ba64b3ad04"
"e4d4b5e377e6fa22febcac292c907dc8dcfe64c807fd9a7e3a698850d983", 16),
q=int(
"3b47a89a19022461dcc2d3c05b501ee76955e8ce3cf821beb4afa85a21a26fd7203db"
"deb8941f1c60ada39fd6799f6c07eb8554113f1020460ec40e93cd5f6b21", 16),
d=int(
"280c42af8b1c719821f2f6e2bf5f3dd53c81b1f3e1e7cc4fce6e2f830132da0665bde"
"bc1e307106b112b52ad5754867dddd028116cf4471bc14a58696b99524b1ad8f05b31"
"cf47256e54ab4399b6a073b2c0452441438dfddf47f3334c13c5ec86ece4d33409056"
"139328fafa992fb5f5156f25f9b21d3e1c37f156d963d97e41", 16),
dmp1=int(
"198c7402a4ec10944c50ab8488d7b5991c767e75eb2817bd427dff10335ae141fa2e8"
"7c016dc22d975cac229b9ffdf7d943ddfd3a04b8bf82e83c3b32c5698b11", 16),
dmq1=int(
"15fd30c7687b68ef7c2a30cdeb913ec56c4757c218cf9a04d995470797ee5f3a17558"
"fbb6d00af245d2631d893b382da48a72bc8a613024289895952ab245b0c1", 16),
iqmp=int(
"4f8fde17e84557a3f4e242d889e898545ab55a1a8e075c9bb0220173ccffe84659abe"
"a235104f82e32750309389d4a52af57dbb6e48d831917b6efeb190176570", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"17d6e0a09aa5b2d003e51f43b9c37ffde74688f5e3b709fd02ef375cb6b8d15e2"
"99a9f74981c3eeaaf947d5c2d64a1a80f5c5108a49a715c3f7be95a016b8d3300"
"965ead4a4df76e642d761526803e9434d4ec61b10cb50526d4dcaef02593085de"
"d8c331c1b27b200a45628403065efcb2c0a0ca1f75d648d40a007fbfbf2cae3",
16),
)
)
RSA_KEY_1030 = RSAPrivateNumbers(
p=int(
"6f4ac8a8172ef1154cf7f80b5e91de723c35a4c512860bfdbafcc3b994a2384bf7796"
"3a2dd0480c7e04d5d418629651a0de8979add6f47b23da14c27a682b69c9", 16),
q=int(
"65a9f83e07dea5b633e036a9dccfb32c46bf53c81040a19c574c3680838fc6d28bde9"
"55c0ff18b30481d4ab52a9f5e9f835459b1348bbb563ad90b15a682fadb3", 16),
d=int(
"290db707b3e1a96445ae8ea93af55a9f211a54ebe52995c2eb28085d1e3f09c986e73"
"a00010c8e4785786eaaa5c85b98444bd93b585d0c24363ccc22c482e150a3fd900176"
"86968e4fa20423ae72823b0049defceccb39bb34aa4ef64e6b14463b76d6a871c859e"
"37285455b94b8e1527d1525b1682ac6f7c8fd79d576c55318c1", 16),
dmp1=int(
"23f7fa84010225dea98297032dac5d45745a2e07976605681acfe87e0920a8ab3caf5"
"9d9602f3d63dc0584f75161fd8fff20c626c21c5e02a85282276a74628a9", 16),
dmq1=int(
"18ebb657765464a8aa44bf019a882b72a2110a77934c54915f70e6375088b10331982"
"962bce1c7edd8ef9d3d95aa2566d2a99da6ebab890b95375919408d00f33", 16),
iqmp=int(
"3d59d208743c74054151002d77dcdfc55af3d41357e89af88d7eef2767be54c290255"
"9258d85cf2a1083c035a33e65a1ca46dc8b706847c1c6434cef7b71a9dae", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"2c326574320818a6a8cb6b3328e2d6c1ba2a3f09b6eb2bc543c03ab18eb5efdaa"
"8fcdbb6b4e12168304f587999f9d96a421fc80cb933a490df85d25883e6a88750"
"d6bd8b3d4117251eee8f45e70e6daac7dbbd92a9103c623a09355cf00e3f16168"
"e38b9c4cb5b368deabbed8df466bc6835eaba959bc1c2f4ec32a09840becc8b",
16),
)
)
RSA_KEY_1031 = RSAPrivateNumbers(
p=int(
"c0958c08e50137db989fb7cc93abf1984543e2f955d4f43fb2967f40105e79274c852"
"293fa06ce63ca8436155e475ed6d1f73fea4c8e2516cc79153e3dc83e897", 16),
q=int(
"78cae354ea5d6862e5d71d20273b7cddb8cdfab25478fe865180676b04250685c4d03"
"30c216574f7876a7b12dfe69f1661d3b0cea6c2c0dcfb84050f817afc28d", 16),
d=int(
"1d55cc02b17a5d25bfb39f2bc58389004d0d7255051507f75ef347cdf5519d1a00f4b"
"d235ce4171bfab7bdb7a6dcfae1cf41433fb7da5923cc84f15a675c0b83492c95dd99"
"a9fc157aea352ffdcbb5d59dbc3662171d5838d69f130678ee27841a79ef64f679ce9"
"3821fa69c03f502244c04b737edad8967def8022a144feaab29", 16),
dmp1=int(
"5b1c2504ec3a984f86b4414342b5bcf59a0754f13adf25b2a0edbc43f5ba8c3cc061d"
"80b03e5866d059968f0d10a98deaeb4f7830436d76b22cf41f2914e13eff", 16),
dmq1=int(
"6c361e1819691ab5d67fb2a8f65c958d301cdf24d90617c68ec7005edfb4a7b638cde"
"79d4b61cfba5c86e8c0ccf296bc7f611cb8d4ae0e072a0f68552ec2d5995", 16),
iqmp=int(
"b7d61945fdc8b92e075b15554bab507fa8a18edd0a18da373ec6c766c71eece61136a"
"84b90b6d01741d40458bfad17a9bee9d4a8ed2f6e270782dc3bf5d58b56e", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"5adebaa926ea11fb635879487fdd53dcfbb391a11ac7279bb3b4877c9b811370a"
"9f73da0690581691626d8a7cf5d972cced9c2091ccf999024b23b4e6dc6d99f80"
"a454737dec0caffaebe4a3fac250ed02079267c8f39620b5ae3e125ca35338522"
"dc9353ecac19cb2fe3b9e3a9291619dbb1ea3a7c388e9ee6469fbf5fb22892b",
16),
)
)
RSA_KEY_1536 = RSAPrivateNumbers(
p=int(
"f1a65fa4e2aa6e7e2b560251e8a4cd65b625ad9f04f6571785782d1c213d91c961637"
"0c572f2783caf2899f7fb690cf99a0184257fbd4b071b212c88fb348279a5387e61f1"
"17e9c62980c45ea863fa9292087c0f66ecdcde6443d5a37268bf71", 16),
q=int(
"e54c2cbc3839b1da6ae6fea45038d986d6f523a3ae76051ba20583aab711ea5965cf5"
"3cf54128cc9573f7460bba0fd6758a57aaf240c391790fb38ab473d83ef735510c53d"
"1d10c31782e8fd7da42615e33565745c30a5e6ceb2a3ae0666cc35", 16),
d=int(
"7bcad87e23da2cb2a8c328883fabce06e1f8e9b776c8bf253ad9884e6200e3bd9bd3b"
"a2cbe87d3854527bf005ba5d878c5b0fa20cfb0a2a42884ae95ca12bf7304285e9214"
"5e992f7006c7c0ae839ad550da495b143bec0f4806c7f44caed45f3ccc6dc44cfaf30"
"7abdb757e3d28e41c2d21366835c0a41e50a95af490ac03af061d2feb36ac0afb87be"
"a13fb0f0c5a410727ebedb286c77f9469473fae27ef2c836da6071ef7efc1647f1233"
"4009a89eecb09a8287abc8c2afd1ddd9a1b0641", 16),
dmp1=int(
"a845366cd6f9df1f34861bef7594ed025aa83a12759e245f58adaa9bdff9c3befb760"
"75d3701e90038e888eec9bf092df63400152cb25fc07effc6c74c45f0654ccbde15cd"
"90dd5504298a946fa5cf22a956072da27a6602e6c6e5c97f2db9c1", 16),
dmq1=int(
"28b0c1e78cdac03310717992d321a3888830ec6829978c048156152d805b4f8919c61"
"70b5dd204e5ddf3c6c53bc6aff15d0bd09faff7f351b94abb9db980b31f150a6d7573"
"08eb66938f89a5225cb4dd817a824c89e7a0293b58fc2eefb7e259", 16),
iqmp=int(
"6c1536c0e16e42a094b6caaf50231ba81916871497d73dcbbbd4bdeb9e60cae0413b3"
"8143b5d680275b29ed7769fe5577e4f9b3647ddb064941120914526d64d80016d2eb7"
"dc362da7c569623157f3d7cff8347f11494bf5c048d77e28d3f515", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"d871bb2d27672e54fc62c4680148cbdf848438da804e2c48b5a9c9f9daf6cc6e8"
"ea7d2296f25064537a9a542aef3dd449ea75774238d4da02c353d1bee70013dcc"
"c248ceef4050160705c188043c8559bf6dbfb6c4bb382eda4e9547575a8227d5b"
"3c0a7088391364cf9f018d8bea053b226ec65e8cdbeaf48a071d0074860a734b1"
"cb7d2146d43014b20776dea42f7853a54690e6cbbf3331a9f43763cfe2a51c329"
"3bea3b2eebec0d8e43eb317a443afe541107d886e5243c096091543ae65", 16),
)
)
RSA_KEY_2048 = RSAPrivateNumbers(
p=int(
"e14202e58c5f7446648d75e5dc465781f661f6b73000c080368afcfb21377f4ef19da"
"845d4ef9bc6b151f6d9f34629103f2e57615f9ba0a3a2fbb035069e1d63b4bb0e78ad"
"dad1ec3c6f87e25c877a1c4c1972098e09158ef7b9bc163852a18d44a70b7b31a03dc"
"2614fd9ab7bf002cba79054544af3bfbdb6aed06c7b24e6ab", 16),
q=int(
"dbe2bea1ff92599bd19f9d045d6ce62250c05cfeac5117f3cf3e626cb696e3d886379"
"557d5a57b7476f9cf886accfd40508a805fe3b45a78e1a8a125e516cda91640ee6398"
"ec5a39d3e6b177ef12ab00d07907a17640e4ca454fd8487da3c4ffa0d5c2a5edb1221"
"1c8e33c7ee9fa6753771fd111ec04b8317f86693eb2928c89", 16),
d=int(
"aef17f80f2653bc30539f26dd4c82ed6abc1d1b53bc0abcdbee47e9a8ab433abde865"
"9fcfae1244d22de6ad333c95aee7d47f30b6815065ac3322744d3ea75058002cd1b29"
"3141ee2a6dc682342432707080071bd2131d6262cab07871c28aa5238b87173fb78c3"
"7f9c7bcd18c12e8971bb77fd9fa3e0792fec18d8d9bed0b03ba02b263606f24dbace1"
"c8263ce2802a769a090e993fd49abc50c3d3c78c29bee2de0c98055d2f102f1c5684b"
"8dddee611d5205392d8e8dd61a15bf44680972a87f040a611a149271eeb2573f8bf6f"
"627dfa70e77def2ee6584914fa0290e041349ea0999cdff3e493365885b906cbcf195"
"843345809a85098cca90fea014a21", 16),
dmp1=int(
"9ba56522ffcfa5244eae805c87cc0303461f82be29691b9a7c15a5a050df6c143c575"
"7c288d3d7ab7f32c782e9d9fcddc10a604e6425c0e5d0e46069035d95a923646d276d"
"d9d95b8696fa29ab0de18e53f6f119310f8dd9efca62f0679291166fed8cbd5f18fe1"
"3a5f1ead1d71d8c90f40382818c18c8d069be793dbc094f69", 16),
dmq1=int(
"a8d4a0aaa2212ccc875796a81353da1fdf00d46676c88d2b96a4bfcdd924622d8e607"
"f3ac1c01dda7ebfb0a97dd7875c2a7b2db6728fb827b89c519f5716fb3228f4121647"
"04b30253c17de2289e9cce3343baa82eb404f789e094a094577a9b0c5314f1725fdf5"
"8e87611ad20da331bd30b8aebc7dc97d0e9a9ba8579772c9", 16),
iqmp=int(
"17bd5ef638c49440d1853acb3fa63a5aca28cb7f94ed350db7001c8445da8943866a7"
"0936e1ee2716c98b484e357cc054d82fbbd98d42f880695d38a1dd4eb096f629b9417"
"aca47e6de5da9f34e60e8a0ffd7e35be74deeef67298d94b3e0db73fc4b7a4cb360c8"
"9d2117a0bfd9434d37dc7c027d6b01e5295c875015510917d", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"c17afc7e77474caa5aa83036158a3ffbf7b5216851ba2230e5d6abfcc1c6cfef5"
"9e923ea1330bc593b73802ab608a6e4a3306523a3116ba5aa3966145174e13b6c"
"49e9b78062e449d72efb10fd49e91fa08b96d051e782e9f5abc5b5a6f7984827a"
"db8e73da00f22b2efdcdb76eab46edad98ed65662743fdc6c0e336a5d0cdbaa7d"
"c29e53635e24c87a5b2c4215968063cdeb68a972babbc1e3cff00fb9a80e372a4"
"d0c2c920d1e8cee333ce470dc2e8145adb05bf29aee1d24f141e8cc784989c587"
"fc6fbacd979f3f2163c1d7299b365bc72ffe2848e967aed1e48dcc515b3a50ed4"
"de04fd053846ca10a223b10cc841cc80fdebee44f3114c13e886af583", 16),
)
)
RSA_KEY_2048_ALT = RSAPrivateNumbers(
d=int(
"7522768467449591813737881904131688860626637897199391200040629"
"8641018746450502628484395471408986929218353894683769457466923"
"3079369551423094451013669595729568593462009746342148367797495"
"5529909313614750246672441810743580455199636293179539903480635"
"3091286716112931976896334411287175213124504134181121011488550"
"5290054443979198998564749640800633368957384058700741073997703"
"8877364695937023906368630297588990131009278072614118207348356"
"4640244134189285070202534488517371577359510236833464698189075"
"5160693085297816063285814039518178249628112908466649245545732"
"5791532385553960363601827996980725025898649392004494256400884"
"092073"
),
dmp1=int(
"5847872614112935747739644055317429405973942336206460017493394"
"9737607778799766591021036792892472774720417920838206576785118"
"8889624058962939702950175807073343659386156232294197300491647"
"1029508414050591959344812347424476498076532682798598325230069"
"0925827594762920534235575029199380552228825468180187156871965"
"973"
),
dmq1=int(
"2949536259161239302081155875068405238857801001054083407704879"
"8210876832264504685327766351157044892283801611558399025326793"
"4131638001934454489864437565651739832511702151461257267169691"
"6611992398459006200708626815153304591390855807749769768978152"
"9854112656599931724820610358669306523835327459478374630794532"
"167"
),
iqmp=int(
"7331180989818931535458916053540252830484856703208982675535284"
"4613815808798190559315018094080936347757336989616401164752221"
"8101156529898067044923499386460167055405998646366011838018441"
"3678947694258190172377716154009305082091341215866326061721180"
"3836418654472188816187630316821692982783286322262994892003058"
"782"
),
p=int(
"1460007723851883695617573533155574746587863843382715314919865"
"2434108956187429726002840717317310431378483921058946835896252"
"7109559207437158778332364464259678946305487699031865937075508"
"8616612925453842458055546540240601585731206561647892336916583"
"0023641764106581040198845259766246869529221084602380669333021"
"0819"
),
q=int(
"1433897765867889178402883410610177836503402597775250087462018"
"4617952933433119527945447840336616357136736935069377619782227"
"2822380830300262175671282877680573202309319960687756231128996"
"9764855320953993690199846269451095044922353809602378616938811"
"7513900906279873343591486841303392490561500301994171338761080"
"4439"
),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"209350181338107812610165420955871971489973659392253291327"
"839812910252466502190690572476688311285621239204212139711"
"207388949164851984253143698667018532039612470954223918242"
"145976986600705122576087630525229796950722166468064721258"
"490916138706756006902066136471049807637157890128560592039"
"941717275079733754782848729566190631725183735944031456237"
"089928120178187552521649483240599003240074352860189285952"
"078970127554801074176375499583703254849309993132931268013"
"715070507278514207864914944621214574162116786377990456375"
"964817771730371110612100247262908550409785456157505694419"
"00451152778245269283276012328748538414051025541"
)
)
)
| 48.478477 | 80 | 0.765787 |
from __future__ import absolute_import, division, print_function
from cryptography.hazmat.primitives.asymmetric.rsa import (
RSAPrivateNumbers, RSAPublicNumbers
)
RSA_KEY_512 = RSAPrivateNumbers(
p=int(
"d57846898d5c0de249c08467586cb458fa9bc417cdf297f73cfc52281b787cd9", 16
),
q=int(
"d10f71229e87e010eb363db6a85fd07df72d985b73c42786191f2ce9134afb2d", 16
),
d=int(
"272869352cacf9c866c4e107acc95d4c608ca91460a93d28588d51cfccc07f449"
"18bbe7660f9f16adc2b4ed36ca310ef3d63b79bd447456e3505736a45a6ed21", 16
),
dmp1=int(
"addff2ec7564c6b64bc670d250b6f24b0b8db6b2810099813b7e7658cecf5c39", 16
),
dmq1=int(
"463ae9c6b77aedcac1397781e50e4afc060d4b216dc2778494ebe42a6850c81", 16
),
iqmp=int(
"54deef8548f65cad1d411527a32dcb8e712d3e128e4e0ff118663fae82a758f4", 16
),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"ae5411f963c50e3267fafcf76381c8b1e5f7b741fdb2a544bcf48bd607b10c991"
"90caeb8011dc22cf83d921da55ec32bd05cac3ee02ca5e1dbef93952850b525",
16
),
)
)
RSA_KEY_512_ALT = RSAPrivateNumbers(
p=int(
"febe19c29a0b50fefa4f7b1832f84df1caf9be8242da25c9d689e18226e67ce5",
16),
q=int(
"eb616c639dd999feda26517e1c77b6878f363fe828c4e6670ec1787f28b1e731",
16),
d=int(
"80edecfde704a806445a4cc782b85d3f36f17558f385654ea767f006470fdfcbda5e2"
"206839289d3f419b4e4fb8e1acee1b4fb9c591f69b64ec83937f5829241", 16),
dmp1=int(
"7f4fa06e2a3077a54691cc5216bf13ad40a4b9fa3dd0ea4bca259487484baea5",
16),
dmq1=int(
"35eaa70d5a8711c352ed1c15ab27b0e3f46614d575214535ae279b166597fac1",
16),
iqmp=int(
"cc1f272de6846851ec80cb89a02dbac78f44b47bc08f53b67b4651a3acde8b19",
16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"ea397388b999ef0f7e7416fa000367efd9a0ba0deddd3f8160d1c36d62267f210"
"fbd9c97abeb6654450ff03e7601b8caa6c6f4cba18f0b52c179d17e8f258ad5",
16),
)
)
RSA_KEY_522 = RSAPrivateNumbers(
p=int(
"1a8aab9a069f92b52fdf05824f2846223dc27adfc806716a247a77d4c36885e4bf",
16),
q=int(
"19e8d620d177ec54cdb733bb1915e72ef644b1202b889ceb524613efa49c07eb4f",
16),
d=int(
"10b8a7c0a92c1ae2d678097d69db3bfa966b541fb857468291d48d1b52397ea2bac0d"
"4370c159015c7219e3806a01bbafaffdd46f86e3da1e2d1fe80a0369ccd745", 16),
dmp1=int(
"3eb6277f66e6e2dcf89f1b8529431f730839dbd9a3e49555159bc8470eee886e5",
16),
dmq1=int(
"184b4d74aa54c361e51eb23fee4eae5e4786b37b11b6e0447af9c0b9c4e4953c5b",
16),
iqmp=int(
"f80e9ab4fa7b35d0d232ef51c4736d1f2dcf2c7b1dd8716211b1bf1337e74f8ae",
16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"2afaea0e0bb6fca037da7d190b5270a6c665bc18e7a456f7e69beaac4433db748"
"ba99acdd14697e453bca596eb35b47f2d48f1f85ef08ce5109dad557a9cf85ebf"
"1", 16),
),
)
RSA_KEY_599 = RSAPrivateNumbers(
p=int(
"cf95d20be0c7af69f4b3d909f65d858c26d1a7ef34da8e3977f4fa230580e58814b54"
"24be99", 16),
q=int(
"6052be4b28debd4265fe12ace5aa4a0c4eb8d63ff8853c66824b35622161eb48a3bc8"
"c3ada5", 16),
d=int(
"69d9adc465e61585d3142d7cc8dd30605e8d1cbbf31009bc2cd5538dc40528d5d68ee"
"fe6a42d23674b6ec76e192351bf368c8968f0392110bf1c2825dbcff071270b80adcc"
"fa1d19d00a1", 16),
dmp1=int(
"a86d10edde456687fba968b1f298d2e07226adb1221b2a466a93f3d83280f0bb46c20"
"2b6811", 16),
dmq1=int(
"40d570e08611e6b1da94b95d46f8e7fe80be48f7a5ff8838375b08039514a399b11c2"
"80735", 16),
iqmp=int(
"cd051cb0ea68b88765c041262ace2ec4db11dab14afd192742e34d5da3328637fabdf"
"bae26e", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"4e1b470fe00642426f3808e74c959632dd67855a4c503c5b7876ccf4dc7f6a1a4"
"9107b90d26daf0a7879a6858218345fbc6e59f01cd095ca5647c27c25265e6c47"
"4fea89537191c7073d9d", 16),
)
)
RSA_KEY_745 = RSAPrivateNumbers(
p=int(
"1c5a0cfe9a86debd19eca33ba961f15bc598aa7983a545ce775b933afc89eb51bcf90"
"836257fdd060d4b383240241d", 16
),
q=int(
"fb2634f657f82ee6b70553382c4e2ed26b947c97ce2f0016f1b282cf2998184ad0527"
"a9eead826dd95fe06b57a025", 16
),
d=int(
"402f30f976bc07d15ff0779abff127b20a8b6b1d0024cc2ad8b6762d38f174f81e792"
"3b49d80bdbdd80d9675cbc7b2793ec199a0430eb5c84604dacfdb29259ae6a1a44676"
"22f0b23d4cb0f5cb1db4b8173c8d9d3e57a74dbd200d2141", 16),
dmp1=int(
"e5e95b7751a6649f199be21bef7a51c9e49821d945b6fc5f538b4a670d8762c375b00"
"8e70f31d52b3ea2bd14c3101", 16),
dmq1=int(
"12b85d5843645f72990fcf8d2f58408b34b3a3b9d9078dd527fceb5d2fb7839008092"
"dd4aca2a1fb00542801dcef5", 16),
iqmp=int(
"5672740d947f621fc7969e3a44ec26736f3f819863d330e63e9409e139d20753551ac"
"c16544dd2bdadb9dee917440", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"1bd085f92237774d34013b477ceebbb2f2feca71118db9b7429341477947e7b1d"
"04e8c43ede3c52bb25781af58d4ff81289f301eac62dc3bcd7dafd7a4d5304e9f"
"308e766952fbf2b62373e66611fa53189987dbef9f7243dcbbeb25831", 16),
)
)
RSA_KEY_768 = RSAPrivateNumbers(
p=int(
"f80c0061b607f93206b68e208906498d68c6e396faf457150cf975c8f849848465869"
"7ecd402313397088044c4c2071b", 16),
q=int(
"e5b5dbecc93c6d306fc14e6aa9737f9be2728bc1a326a8713d2849b34c1cb54c63468"
"3a68abb1d345dbf15a3c492cf55", 16),
d=int(
"d44601442255ffa331212c60385b5e898555c75c0272632ff42d57c4b16ca97dbca9f"
"d6d99cd2c9fd298df155ed5141b4be06c651934076133331d4564d73faed7ce98e283"
"2f7ce3949bc183be7e7ca34f6dd04a9098b6c73649394b0a76c541", 16),
dmp1=int(
"a5763406fa0b65929661ce7b2b8c73220e43a5ebbfe99ff15ddf464fd238105ad4f2a"
"c83818518d70627d8908703bb03", 16),
dmq1=int(
"cb467a9ef899a39a685aecd4d0ad27b0bfdc53b68075363c373d8eb2bed8eccaf3533"
"42f4db735a9e087b7539c21ba9d", 16),
iqmp=int(
"5fe86bd3aee0c4d09ef11e0530a78a4534c9b833422813b5c934a450c8e564d8097a0"
"6fd74f1ebe2d5573782093f587a", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"de92f1eb5f4abf426b6cac9dd1e9bf57132a4988b4ed3f8aecc15e251028bd6df"
"46eb97c711624af7db15e6430894d1b640c13929329241ee094f5a4fe1a20bc9b"
"75232320a72bc567207ec54d6b48dccb19737cf63acc1021abb337f19130f7",
16),
)
)
RSA_KEY_1024 = RSAPrivateNumbers(
p=int(
"ea4d9d9a1a068be44b9a5f8f6de0512b2c5ba1fb804a4655babba688e6e890b347c1a"
"7426685a929337f513ae4256f0b7e5022d642237f960c5b24b96bee8e51", 16),
q=int(
"cffb33e400d6f08b410d69deb18a85cf0ed88fcca9f32d6f2f66c62143d49aff92c11"
"4de937d4f1f62d4635ee89af99ce86d38a2b05310f3857c7b5d586ac8f9", 16),
d=int(
"3d12d46d04ce942fb99be7bf30587b8cd3e21d75a2720e7bda1b867f1d418d91d8b9f"
"e1c00181fdde94f2faf33b4e6f800a1b3ae3b972ccb6d5079dcb6c794070ac8306d59"
"c00b58b7a9a81122a6b055832de7c72334a07494d8e7c9fbeed2cc37e011d9e6bfc6e"
"9bcddbef7f0f5771d9cf82cd4b268c97ec684575c24b6c881", 16),
dmp1=int(
"470f2b11257b7ec9ca34136f487f939e6861920ad8a9ae132a02e74af5dceaa5b4c98"
"2949ccb44b67e2bcad2f58674db237fe250e0d62b47b28fa1dfaa603b41", 16),
dmq1=int(
"c616e8317d6b3ae8272973709b80e8397256697ff14ea03389de454f619f99915a617"
"45319fefbe154ec1d49441a772c2f63f7d15c478199afc60469bfd0d561", 16),
iqmp=int(
"d15e7c9ad357dfcd5dbdc8427680daf1006761bcfba93a7f86589ad88832a8d564b1c"
"d4291a658c96fbaea7ca588795820902d85caebd49c2d731e3fe0243130", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"be5aac07456d990133ebce69c06b48845b972ab1ad9f134bc5683c6b5489b5119"
"ede07be3bed0e355d48e0dfab1e4fb5187adf42d7d3fb0401c082acb8481bf17f"
"0e871f8877be04c3a1197d40aa260e2e0c48ed3fd2b93dc3fc0867591f67f3cd6"
"0a77adee1d68a8c3730a5702485f6ac9ede7f0fd2918e037ee4cc1fc1b4c9",
16),
)
)
RSA_KEY_1025 = RSAPrivateNumbers(
p=int(
"18e9bfb7071725da04d31c103fa3563648c69def43a204989214eb57b0c8b299f9ef3"
"5dda79a62d8d67fd2a9b69fbd8d0490aa2edc1e111a2b8eb7c737bb691a5", 16),
q=int(
"d8eccaeeb95815f3079d13685f3f72ca2bf2550b349518049421375df88ca9bbb4ba8"
"cb0e3502203c9eeae174112509153445d251313e4711a102818c66fcbb7", 16),
d=int(
"fe9ac54910b8b1bc948a03511c54cab206a1d36d50d591124109a48abb7480977ccb0"
"47b4d4f1ce7b0805df2d4fa3fe425f49b78535a11f4b87a4eba0638b3340c23d4e6b2"
"1ecebe9d5364ea6ead2d47b27836019e6ecb407000a50dc95a8614c9d0031a6e3a524"
"d2345cfb76e15c1f69d5ba35bdfb6ec63bcb115a757ef79d9", 16),
dmp1=int(
"18537e81006a68ea76d590cc88e73bd26bc38d09c977959748e5265c0ce21c0b5fd26"
"53d975f97ef759b809f791487a8fff1264bf561627fb4527a3f0bbb72c85", 16),
dmq1=int(
"c807eac5a1f1e1239f04b04dd16eff9a00565127a91046fa89e1eb5d6301cace85447"
"4d1f47b0332bd35b4214b66e9166953241538f761f30d969272ee214f17", 16),
iqmp=int(
"133aa74dd41fe70fa244f07d0c4091a22f8c8f0134fe6aea9ec8b55383b758fefe358"
"2beec36eca91715eee7d21931f24fa9e97e8e3a50f9cd0f731574a5eafcc", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"151c44fed756370fb2d4a0e6ec7dcac84068ca459b6aaf22daf902dca72c77563"
"bf276fe3523f38f5ddaf3ea9aa88486a9d8760ff732489075862bee0e599de5c5"
"f509b4519f4f446521bad15cd279a498fe1e89107ce0d237e3103d7c5eb801666"
"42e2924b152aebff97b71fdd2d68ebb45034cc784e2e822ff6d1edf98af3f3",
16),
)
)
RSA_KEY_1026 = RSAPrivateNumbers(
p=int(
"1fcbfb8719c5bdb5fe3eb0937c76bb096e750b9442dfe31d6a877a13aed2a6a4e9f79"
"40f815f1c307dd6bc2b4b207bb6fe5be3a15bd2875a957492ce197cdedb1", 16),
q=int(
"1f704a0f6b8966dd52582fdc08227dd3dbaeaa781918b41144b692711091b4ca4eb62"
"985c3513853828ce8739001dfba9a9a7f1a23cbcaf74280be925e2e7b50d", 16),
d=int(
"c67975e35a1d0d0b3ebfca736262cf91990cb31cf4ac473c0c816f3bc2720bcba2475"
"e8d0de8535d257816c0fc53afc1b597eada8b229069d6ef2792fc23f59ffb4dc6c3d9"
"0a3c462082025a4cba7561296dd3d8870c4440d779406f00879afe2c681e7f5ee055e"
"ff829e6e55883ec20830c72300762e6e3a333d94b4dbe4501", 16),
dmp1=int(
"314730ca7066c55d086a9fbdf3670ef7cef816b9efea8b514b882ae9d647217cf41d7"
"e9989269dc9893d02e315cb81f058c49043c2cac47adea58bdf5e20e841", 16),
dmq1=int(
"1da28a9d687ff7cfeebc2439240de7505a8796376968c8ec723a2b669af8ce53d9c88"
"af18540bd78b2da429014923fa435f22697ac60812d7ca9c17a557f394cd", 16),
iqmp=int(
"727947b57b8a36acd85180522f1b381bce5fdbd962743b3b14af98a36771a80f58ddd"
"62675d72a5935190da9ddc6fd6d6d5e9e9f805a2e92ab8d56b820493cdf", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"3e7a5e6483e55eb8b723f9c46732d21b0af9e06a4a1099962d67a35ee3f62e312"
"9cfae6ab0446da18e26f33e1d753bc1cc03585c100cf0ab5ef056695706fc8b0c"
"9c710cd73fe6e5beda70f515a96fabd3cc5ac49efcb2594b220ff3b603fcd927f"
"6a0838ef04bf52f3ed9eab801f09e5aed1613ddeb946ed0fbb02060b3a36fd",
16),
)
)
RSA_KEY_1027 = RSAPrivateNumbers(
p=int(
"30135e54cfb072c3d3eaf2000f3ed92ceafc85efc867b9d4bf5612f2978c432040093"
"4829f741c0f002b54af2a4433ff872b6321ef00ff1e72cba4e0ced937c7d", 16),
q=int(
"1d01a8aead6f86b78c875f18edd74214e06535d65da054aeb8e1851d6f3319b4fb6d8"
"6b01e07d19f8261a1ded7dc08116345509ab9790e3f13e65c037e5bb7e27", 16),
d=int(
"21cf4477df79561c7818731da9b9c88cd793f1b4b8e175bd0bfb9c0941a4dc648ecf1"
"6d96b35166c9ea116f4c2eb33ce1c231e641a37c25e54c17027bdec08ddafcb83642e"
"795a0dd133155ccc5eed03b6e745930d9ac7cfe91f9045149f33295af03a2198c660f"
"08d8150d13ce0e2eb02f21ac75d63b55822f77bd5be8d07619", 16),
dmp1=int(
"173fb695931e845179511c18b546b265cb79b517c135902377281bdf9f34205e1f399"
"4603ad63e9f6e7885ea73a929f03fa0d6bed943051ce76cddde2d89d434d", 16),
dmq1=int(
"10956b387b2621327da0c3c8ffea2af8be967ee25163222746c28115a406e632a7f12"
"5a9397224f1fa5c116cd3a313e5c508d31db2deb83b6e082d213e33f7fcf", 16),
iqmp=int(
"234f833949f2c0d797bc6a0e906331e17394fa8fbc8449395766d3a8d222cf6167c48"
"8e7fe1fe9721d3e3b699a595c8e6f063d92bd840dbc84d763b2b37002109", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"57281707d7f9b1369c117911758980e32c05b133ac52c225bcf68b79157ff47ea"
"0a5ae9f579ef1fd7e42937f921eb3123c4a045cc47a2159fbbf904783e654954c"
"42294c30a95c15db7c7b91f136244e548f62474b137087346c5522e54f226f49d"
"6c93bc58cb39972e41bde452bb3ae9d60eb93e5e1ce91d222138d9890c7d0b",
16),
)
)
RSA_KEY_1028 = RSAPrivateNumbers(
p=int(
"359d17378fae8e9160097daee78a206bd52efe1b757c12a6da8026cc4fc4bb2620f12"
"b8254f4db6aed8228be8ee3e5a27ec7d31048602f01edb00befd209e8c75", 16),
q=int(
"33a2e70b93d397c46e63b273dcd3dcfa64291342a6ce896e1ec8f1c0edc44106550f3"
"c06e7d3ca6ea29eccf3f6ab5ac6235c265313d6ea8e8767e6a343f616581", 16),
d=int(
"880640088d331aa5c0f4cf2887809a420a2bc086e671e6ffe4e47a8c80792c038a314"
"9a8e45ef9a72816ab45b36e3af6800351067a6b2751843d4232413146bb575491463a"
"8addd06ce3d1bcf7028ec6c5d938c545a20f0a40214b5c574ca7e840062b2b5f8ed49"
"4b144bb2113677c4b10519177fee1d4f5fb8a1c159b0b47c01", 16),
dmp1=int(
"75f8c52dad2c1cea26b8bba63236ee4059489e3d2db766136098bcc6b67fde8f77cd3"
"640035107bfb1ffc6480983cfb84fe0c3be008424ebc968a7db7e01f005", 16),
dmq1=int(
"3893c59469e4ede5cd0e6ff9837ca023ba9b46ff40c60ccf1bec10f7d38db5b1ba817"
"6c41a3f750ec4203b711455aca06d1e0adffc5cffa42bb92c7cb77a6c01", 16),
iqmp=int(
"ad32aafae3c962ac25459856dc8ef1f733c3df697eced29773677f435d186cf759d1a"
"5563dd421ec47b4d7e7f12f29647c615166d9c43fc49001b29089344f65", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"ad0696bef71597eb3a88e135d83c596930cac73868fbd7e6b2d64f34eea5c28cc"
"e3510c68073954d3ba4deb38643e7a820a4cf06e75f7f82eca545d412bd637819"
"45c28d406e95a6cced5ae924a8bfa4f3def3e0250d91246c269ec40c89c93a85a"
"cd3770ba4d2e774732f43abe94394de43fb57f93ca25f7a59d75d400a3eff5",
16),
)
)
RSA_KEY_1029 = RSAPrivateNumbers(
p=int(
"66f33e513c0b6b6adbf041d037d9b1f0ebf8de52812a3ac397a963d3f71ba64b3ad04"
"e4d4b5e377e6fa22febcac292c907dc8dcfe64c807fd9a7e3a698850d983", 16),
q=int(
"3b47a89a19022461dcc2d3c05b501ee76955e8ce3cf821beb4afa85a21a26fd7203db"
"deb8941f1c60ada39fd6799f6c07eb8554113f1020460ec40e93cd5f6b21", 16),
d=int(
"280c42af8b1c719821f2f6e2bf5f3dd53c81b1f3e1e7cc4fce6e2f830132da0665bde"
"bc1e307106b112b52ad5754867dddd028116cf4471bc14a58696b99524b1ad8f05b31"
"cf47256e54ab4399b6a073b2c0452441438dfddf47f3334c13c5ec86ece4d33409056"
"139328fafa992fb5f5156f25f9b21d3e1c37f156d963d97e41", 16),
dmp1=int(
"198c7402a4ec10944c50ab8488d7b5991c767e75eb2817bd427dff10335ae141fa2e8"
"7c016dc22d975cac229b9ffdf7d943ddfd3a04b8bf82e83c3b32c5698b11", 16),
dmq1=int(
"15fd30c7687b68ef7c2a30cdeb913ec56c4757c218cf9a04d995470797ee5f3a17558"
"fbb6d00af245d2631d893b382da48a72bc8a613024289895952ab245b0c1", 16),
iqmp=int(
"4f8fde17e84557a3f4e242d889e898545ab55a1a8e075c9bb0220173ccffe84659abe"
"a235104f82e32750309389d4a52af57dbb6e48d831917b6efeb190176570", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"17d6e0a09aa5b2d003e51f43b9c37ffde74688f5e3b709fd02ef375cb6b8d15e2"
"99a9f74981c3eeaaf947d5c2d64a1a80f5c5108a49a715c3f7be95a016b8d3300"
"965ead4a4df76e642d761526803e9434d4ec61b10cb50526d4dcaef02593085de"
"d8c331c1b27b200a45628403065efcb2c0a0ca1f75d648d40a007fbfbf2cae3",
16),
)
)
RSA_KEY_1030 = RSAPrivateNumbers(
p=int(
"6f4ac8a8172ef1154cf7f80b5e91de723c35a4c512860bfdbafcc3b994a2384bf7796"
"3a2dd0480c7e04d5d418629651a0de8979add6f47b23da14c27a682b69c9", 16),
q=int(
"65a9f83e07dea5b633e036a9dccfb32c46bf53c81040a19c574c3680838fc6d28bde9"
"55c0ff18b30481d4ab52a9f5e9f835459b1348bbb563ad90b15a682fadb3", 16),
d=int(
"290db707b3e1a96445ae8ea93af55a9f211a54ebe52995c2eb28085d1e3f09c986e73"
"a00010c8e4785786eaaa5c85b98444bd93b585d0c24363ccc22c482e150a3fd900176"
"86968e4fa20423ae72823b0049defceccb39bb34aa4ef64e6b14463b76d6a871c859e"
"37285455b94b8e1527d1525b1682ac6f7c8fd79d576c55318c1", 16),
dmp1=int(
"23f7fa84010225dea98297032dac5d45745a2e07976605681acfe87e0920a8ab3caf5"
"9d9602f3d63dc0584f75161fd8fff20c626c21c5e02a85282276a74628a9", 16),
dmq1=int(
"18ebb657765464a8aa44bf019a882b72a2110a77934c54915f70e6375088b10331982"
"962bce1c7edd8ef9d3d95aa2566d2a99da6ebab890b95375919408d00f33", 16),
iqmp=int(
"3d59d208743c74054151002d77dcdfc55af3d41357e89af88d7eef2767be54c290255"
"9258d85cf2a1083c035a33e65a1ca46dc8b706847c1c6434cef7b71a9dae", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"2c326574320818a6a8cb6b3328e2d6c1ba2a3f09b6eb2bc543c03ab18eb5efdaa"
"8fcdbb6b4e12168304f587999f9d96a421fc80cb933a490df85d25883e6a88750"
"d6bd8b3d4117251eee8f45e70e6daac7dbbd92a9103c623a09355cf00e3f16168"
"e38b9c4cb5b368deabbed8df466bc6835eaba959bc1c2f4ec32a09840becc8b",
16),
)
)
RSA_KEY_1031 = RSAPrivateNumbers(
p=int(
"c0958c08e50137db989fb7cc93abf1984543e2f955d4f43fb2967f40105e79274c852"
"293fa06ce63ca8436155e475ed6d1f73fea4c8e2516cc79153e3dc83e897", 16),
q=int(
"78cae354ea5d6862e5d71d20273b7cddb8cdfab25478fe865180676b04250685c4d03"
"30c216574f7876a7b12dfe69f1661d3b0cea6c2c0dcfb84050f817afc28d", 16),
d=int(
"1d55cc02b17a5d25bfb39f2bc58389004d0d7255051507f75ef347cdf5519d1a00f4b"
"d235ce4171bfab7bdb7a6dcfae1cf41433fb7da5923cc84f15a675c0b83492c95dd99"
"a9fc157aea352ffdcbb5d59dbc3662171d5838d69f130678ee27841a79ef64f679ce9"
"3821fa69c03f502244c04b737edad8967def8022a144feaab29", 16),
dmp1=int(
"5b1c2504ec3a984f86b4414342b5bcf59a0754f13adf25b2a0edbc43f5ba8c3cc061d"
"80b03e5866d059968f0d10a98deaeb4f7830436d76b22cf41f2914e13eff", 16),
dmq1=int(
"6c361e1819691ab5d67fb2a8f65c958d301cdf24d90617c68ec7005edfb4a7b638cde"
"79d4b61cfba5c86e8c0ccf296bc7f611cb8d4ae0e072a0f68552ec2d5995", 16),
iqmp=int(
"b7d61945fdc8b92e075b15554bab507fa8a18edd0a18da373ec6c766c71eece61136a"
"84b90b6d01741d40458bfad17a9bee9d4a8ed2f6e270782dc3bf5d58b56e", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"5adebaa926ea11fb635879487fdd53dcfbb391a11ac7279bb3b4877c9b811370a"
"9f73da0690581691626d8a7cf5d972cced9c2091ccf999024b23b4e6dc6d99f80"
"a454737dec0caffaebe4a3fac250ed02079267c8f39620b5ae3e125ca35338522"
"dc9353ecac19cb2fe3b9e3a9291619dbb1ea3a7c388e9ee6469fbf5fb22892b",
16),
)
)
RSA_KEY_1536 = RSAPrivateNumbers(
p=int(
"f1a65fa4e2aa6e7e2b560251e8a4cd65b625ad9f04f6571785782d1c213d91c961637"
"0c572f2783caf2899f7fb690cf99a0184257fbd4b071b212c88fb348279a5387e61f1"
"17e9c62980c45ea863fa9292087c0f66ecdcde6443d5a37268bf71", 16),
q=int(
"e54c2cbc3839b1da6ae6fea45038d986d6f523a3ae76051ba20583aab711ea5965cf5"
"3cf54128cc9573f7460bba0fd6758a57aaf240c391790fb38ab473d83ef735510c53d"
"1d10c31782e8fd7da42615e33565745c30a5e6ceb2a3ae0666cc35", 16),
d=int(
"7bcad87e23da2cb2a8c328883fabce06e1f8e9b776c8bf253ad9884e6200e3bd9bd3b"
"a2cbe87d3854527bf005ba5d878c5b0fa20cfb0a2a42884ae95ca12bf7304285e9214"
"5e992f7006c7c0ae839ad550da495b143bec0f4806c7f44caed45f3ccc6dc44cfaf30"
"7abdb757e3d28e41c2d21366835c0a41e50a95af490ac03af061d2feb36ac0afb87be"
"a13fb0f0c5a410727ebedb286c77f9469473fae27ef2c836da6071ef7efc1647f1233"
"4009a89eecb09a8287abc8c2afd1ddd9a1b0641", 16),
dmp1=int(
"a845366cd6f9df1f34861bef7594ed025aa83a12759e245f58adaa9bdff9c3befb760"
"75d3701e90038e888eec9bf092df63400152cb25fc07effc6c74c45f0654ccbde15cd"
"90dd5504298a946fa5cf22a956072da27a6602e6c6e5c97f2db9c1", 16),
dmq1=int(
"28b0c1e78cdac03310717992d321a3888830ec6829978c048156152d805b4f8919c61"
"70b5dd204e5ddf3c6c53bc6aff15d0bd09faff7f351b94abb9db980b31f150a6d7573"
"08eb66938f89a5225cb4dd817a824c89e7a0293b58fc2eefb7e259", 16),
iqmp=int(
"6c1536c0e16e42a094b6caaf50231ba81916871497d73dcbbbd4bdeb9e60cae0413b3"
"8143b5d680275b29ed7769fe5577e4f9b3647ddb064941120914526d64d80016d2eb7"
"dc362da7c569623157f3d7cff8347f11494bf5c048d77e28d3f515", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"d871bb2d27672e54fc62c4680148cbdf848438da804e2c48b5a9c9f9daf6cc6e8"
"ea7d2296f25064537a9a542aef3dd449ea75774238d4da02c353d1bee70013dcc"
"c248ceef4050160705c188043c8559bf6dbfb6c4bb382eda4e9547575a8227d5b"
"3c0a7088391364cf9f018d8bea053b226ec65e8cdbeaf48a071d0074860a734b1"
"cb7d2146d43014b20776dea42f7853a54690e6cbbf3331a9f43763cfe2a51c329"
"3bea3b2eebec0d8e43eb317a443afe541107d886e5243c096091543ae65", 16),
)
)
RSA_KEY_2048 = RSAPrivateNumbers(
p=int(
"e14202e58c5f7446648d75e5dc465781f661f6b73000c080368afcfb21377f4ef19da"
"845d4ef9bc6b151f6d9f34629103f2e57615f9ba0a3a2fbb035069e1d63b4bb0e78ad"
"dad1ec3c6f87e25c877a1c4c1972098e09158ef7b9bc163852a18d44a70b7b31a03dc"
"2614fd9ab7bf002cba79054544af3bfbdb6aed06c7b24e6ab", 16),
q=int(
"dbe2bea1ff92599bd19f9d045d6ce62250c05cfeac5117f3cf3e626cb696e3d886379"
"557d5a57b7476f9cf886accfd40508a805fe3b45a78e1a8a125e516cda91640ee6398"
"ec5a39d3e6b177ef12ab00d07907a17640e4ca454fd8487da3c4ffa0d5c2a5edb1221"
"1c8e33c7ee9fa6753771fd111ec04b8317f86693eb2928c89", 16),
d=int(
"aef17f80f2653bc30539f26dd4c82ed6abc1d1b53bc0abcdbee47e9a8ab433abde865"
"9fcfae1244d22de6ad333c95aee7d47f30b6815065ac3322744d3ea75058002cd1b29"
"3141ee2a6dc682342432707080071bd2131d6262cab07871c28aa5238b87173fb78c3"
"7f9c7bcd18c12e8971bb77fd9fa3e0792fec18d8d9bed0b03ba02b263606f24dbace1"
"c8263ce2802a769a090e993fd49abc50c3d3c78c29bee2de0c98055d2f102f1c5684b"
"8dddee611d5205392d8e8dd61a15bf44680972a87f040a611a149271eeb2573f8bf6f"
"627dfa70e77def2ee6584914fa0290e041349ea0999cdff3e493365885b906cbcf195"
"843345809a85098cca90fea014a21", 16),
dmp1=int(
"9ba56522ffcfa5244eae805c87cc0303461f82be29691b9a7c15a5a050df6c143c575"
"7c288d3d7ab7f32c782e9d9fcddc10a604e6425c0e5d0e46069035d95a923646d276d"
"d9d95b8696fa29ab0de18e53f6f119310f8dd9efca62f0679291166fed8cbd5f18fe1"
"3a5f1ead1d71d8c90f40382818c18c8d069be793dbc094f69", 16),
dmq1=int(
"a8d4a0aaa2212ccc875796a81353da1fdf00d46676c88d2b96a4bfcdd924622d8e607"
"f3ac1c01dda7ebfb0a97dd7875c2a7b2db6728fb827b89c519f5716fb3228f4121647"
"04b30253c17de2289e9cce3343baa82eb404f789e094a094577a9b0c5314f1725fdf5"
"8e87611ad20da331bd30b8aebc7dc97d0e9a9ba8579772c9", 16),
iqmp=int(
"17bd5ef638c49440d1853acb3fa63a5aca28cb7f94ed350db7001c8445da8943866a7"
"0936e1ee2716c98b484e357cc054d82fbbd98d42f880695d38a1dd4eb096f629b9417"
"aca47e6de5da9f34e60e8a0ffd7e35be74deeef67298d94b3e0db73fc4b7a4cb360c8"
"9d2117a0bfd9434d37dc7c027d6b01e5295c875015510917d", 16),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"c17afc7e77474caa5aa83036158a3ffbf7b5216851ba2230e5d6abfcc1c6cfef5"
"9e923ea1330bc593b73802ab608a6e4a3306523a3116ba5aa3966145174e13b6c"
"49e9b78062e449d72efb10fd49e91fa08b96d051e782e9f5abc5b5a6f7984827a"
"db8e73da00f22b2efdcdb76eab46edad98ed65662743fdc6c0e336a5d0cdbaa7d"
"c29e53635e24c87a5b2c4215968063cdeb68a972babbc1e3cff00fb9a80e372a4"
"d0c2c920d1e8cee333ce470dc2e8145adb05bf29aee1d24f141e8cc784989c587"
"fc6fbacd979f3f2163c1d7299b365bc72ffe2848e967aed1e48dcc515b3a50ed4"
"de04fd053846ca10a223b10cc841cc80fdebee44f3114c13e886af583", 16),
)
)
RSA_KEY_2048_ALT = RSAPrivateNumbers(
d=int(
"7522768467449591813737881904131688860626637897199391200040629"
"8641018746450502628484395471408986929218353894683769457466923"
"3079369551423094451013669595729568593462009746342148367797495"
"5529909313614750246672441810743580455199636293179539903480635"
"3091286716112931976896334411287175213124504134181121011488550"
"5290054443979198998564749640800633368957384058700741073997703"
"8877364695937023906368630297588990131009278072614118207348356"
"4640244134189285070202534488517371577359510236833464698189075"
"5160693085297816063285814039518178249628112908466649245545732"
"5791532385553960363601827996980725025898649392004494256400884"
"092073"
),
dmp1=int(
"5847872614112935747739644055317429405973942336206460017493394"
"9737607778799766591021036792892472774720417920838206576785118"
"8889624058962939702950175807073343659386156232294197300491647"
"1029508414050591959344812347424476498076532682798598325230069"
"0925827594762920534235575029199380552228825468180187156871965"
"973"
),
dmq1=int(
"2949536259161239302081155875068405238857801001054083407704879"
"8210876832264504685327766351157044892283801611558399025326793"
"4131638001934454489864437565651739832511702151461257267169691"
"6611992398459006200708626815153304591390855807749769768978152"
"9854112656599931724820610358669306523835327459478374630794532"
"167"
),
iqmp=int(
"7331180989818931535458916053540252830484856703208982675535284"
"4613815808798190559315018094080936347757336989616401164752221"
"8101156529898067044923499386460167055405998646366011838018441"
"3678947694258190172377716154009305082091341215866326061721180"
"3836418654472188816187630316821692982783286322262994892003058"
"782"
),
p=int(
"1460007723851883695617573533155574746587863843382715314919865"
"2434108956187429726002840717317310431378483921058946835896252"
"7109559207437158778332364464259678946305487699031865937075508"
"8616612925453842458055546540240601585731206561647892336916583"
"0023641764106581040198845259766246869529221084602380669333021"
"0819"
),
q=int(
"1433897765867889178402883410610177836503402597775250087462018"
"4617952933433119527945447840336616357136736935069377619782227"
"2822380830300262175671282877680573202309319960687756231128996"
"9764855320953993690199846269451095044922353809602378616938811"
"7513900906279873343591486841303392490561500301994171338761080"
"4439"
),
public_numbers=RSAPublicNumbers(
e=65537,
n=int(
"209350181338107812610165420955871971489973659392253291327"
"839812910252466502190690572476688311285621239204212139711"
"207388949164851984253143698667018532039612470954223918242"
"145976986600705122576087630525229796950722166468064721258"
"490916138706756006902066136471049807637157890128560592039"
"941717275079733754782848729566190631725183735944031456237"
"089928120178187552521649483240599003240074352860189285952"
"078970127554801074176375499583703254849309993132931268013"
"715070507278514207864914944621214574162116786377990456375"
"964817771730371110612100247262908550409785456157505694419"
"00451152778245269283276012328748538414051025541"
)
)
)
| true | true |
f71b6b65aa6aa47c57fda3ac6483ee6b1a2be140 | 239 | py | Python | more-python-for-beginners/03 - Classes/basic_class.py | CloudBreadPaPa/c9-python-getting-started | c49580be5e7e88a480d05596a7a53c89d0be7dd3 | [
"MIT"
] | null | null | null | more-python-for-beginners/03 - Classes/basic_class.py | CloudBreadPaPa/c9-python-getting-started | c49580be5e7e88a480d05596a7a53c89d0be7dd3 | [
"MIT"
] | null | null | null | more-python-for-beginners/03 - Classes/basic_class.py | CloudBreadPaPa/c9-python-getting-started | c49580be5e7e88a480d05596a7a53c89d0be7dd3 | [
"MIT"
] | 1 | 2021-09-12T15:34:13.000Z | 2021-09-12T15:34:13.000Z | class Presenter():
def __init__(self, name):
# 생성자(Constructor)
self.name = name
def say_hello(self):
# 메서드(method)
print('Hello, ' + self.name)
presenter = Presenter('Chris')
presenter.name = 'Christopher'
presenter.say_hello() | 21.727273 | 30 | 0.698745 | class Presenter():
def __init__(self, name):
self.name = name
def say_hello(self):
print('Hello, ' + self.name)
presenter = Presenter('Chris')
presenter.name = 'Christopher'
presenter.say_hello() | true | true |
f71b6c3e8f25504c53f9b02239b585cd06f3f509 | 1,080 | py | Python | posts/views.py | hamzabell/hackernews_mvp | 54beff25f6d23f42b39a13dfe0c289768faa4c3d | [
"MIT"
] | null | null | null | posts/views.py | hamzabell/hackernews_mvp | 54beff25f6d23f42b39a13dfe0c289768faa4c3d | [
"MIT"
] | null | null | null | posts/views.py | hamzabell/hackernews_mvp | 54beff25f6d23f42b39a13dfe0c289768faa4c3d | [
"MIT"
] | null | null | null | from django.core.checks import messages
from rest_framework import generics
from rest_framework.response import Response
from posts.models import Post
from .serializers import PostSerializer, UpVoteSerializer
class PostList(generics.ListCreateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
class PostDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
class UpVoteAPIView(generics.GenericAPIView):
serializer_class = UpVoteSerializer
def post(self, request, format=None):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
post_id = serializer.data['post_id']
post= Post.objects.filter(pk=post_id).first()
if post:
post.upvotes_count += 1
post.save()
return Response({
'message': 'Post has been sucessfully upvoted'
})
return Response({
"message": "Post does not exist"
})
| 26.341463 | 62 | 0.682407 | from django.core.checks import messages
from rest_framework import generics
from rest_framework.response import Response
from posts.models import Post
from .serializers import PostSerializer, UpVoteSerializer
class PostList(generics.ListCreateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
class PostDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
class UpVoteAPIView(generics.GenericAPIView):
serializer_class = UpVoteSerializer
def post(self, request, format=None):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
post_id = serializer.data['post_id']
post= Post.objects.filter(pk=post_id).first()
if post:
post.upvotes_count += 1
post.save()
return Response({
'message': 'Post has been sucessfully upvoted'
})
return Response({
"message": "Post does not exist"
})
| true | true |
f71b6d9c04fe09d52e0af50a82d2a1e90ad0f9f1 | 8,352 | py | Python | estimagic/tests/differentiation/test_derivatives.py | vishalbelsare/estimagic | afae1be3a1566056d11962c495b67e64bc4a0822 | [
"BSD-3-Clause"
] | null | null | null | estimagic/tests/differentiation/test_derivatives.py | vishalbelsare/estimagic | afae1be3a1566056d11962c495b67e64bc4a0822 | [
"BSD-3-Clause"
] | null | null | null | estimagic/tests/differentiation/test_derivatives.py | vishalbelsare/estimagic | afae1be3a1566056d11962c495b67e64bc4a0822 | [
"BSD-3-Clause"
] | null | null | null | from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_almost_equal as aaae
from pandas.testing import assert_frame_equal
from scipy.optimize._numdiff import approx_derivative
from estimagic.differentiation.derivatives import _consolidate_one_step_derivatives
from estimagic.differentiation.derivatives import _convert_evaluation_data_to_frame
from estimagic.differentiation.derivatives import (
_convert_richardson_candidates_to_frame,
)
from estimagic.differentiation.derivatives import _nan_skipping_batch_evaluator
from estimagic.differentiation.derivatives import _select_minimizer_along_axis
from estimagic.differentiation.derivatives import first_derivative
from estimagic.examples.numdiff_functions import logit_loglike
from estimagic.examples.numdiff_functions import logit_loglike_gradient
from estimagic.examples.numdiff_functions import logit_loglikeobs
from estimagic.examples.numdiff_functions import logit_loglikeobs_jacobian
from estimagic.utilities import namedtuple_from_kwargs
@pytest.fixture
def binary_choice_inputs():
fix_path = Path(__file__).resolve().parent / "binary_choice_inputs.pickle"
inputs = pd.read_pickle(fix_path)
return inputs
methods = ["forward", "backward", "central"]
@pytest.mark.parametrize("method", methods)
def test_first_derivative_jacobian(binary_choice_inputs, method):
fix = binary_choice_inputs
func = partial(logit_loglikeobs, y=fix["y"], x=fix["x"])
calculated = first_derivative(
func=func,
method=method,
params=fix["params_np"],
n_steps=1,
base_steps=None,
lower_bounds=np.full(fix["params_np"].shape, -np.inf),
upper_bounds=np.full(fix["params_np"].shape, np.inf),
min_steps=1e-8,
step_ratio=2.0,
f0=func(fix["params_np"]),
n_cores=1,
)
expected = logit_loglikeobs_jacobian(fix["params_np"], fix["y"], fix["x"])
aaae(calculated["derivative"], expected, decimal=6)
def test_first_derivative_jacobian_works_at_defaults(binary_choice_inputs):
fix = binary_choice_inputs
func = partial(logit_loglikeobs, y=fix["y"], x=fix["x"])
calculated = first_derivative(func=func, params=fix["params_np"], n_cores=1)
expected = logit_loglikeobs_jacobian(fix["params_np"], fix["y"], fix["x"])
aaae(calculated["derivative"], expected, decimal=6)
@pytest.mark.parametrize("method", methods)
def test_first_derivative_gradient(binary_choice_inputs, method):
fix = binary_choice_inputs
func = partial(logit_loglike, y=fix["y"], x=fix["x"])
calculated = first_derivative(
func=func,
method=method,
params=fix["params_np"],
n_steps=1,
f0=func(fix["params_np"]),
n_cores=1,
)
expected = logit_loglike_gradient(fix["params_np"], fix["y"], fix["x"])
aaae(calculated["derivative"], expected, decimal=4)
@pytest.mark.parametrize("method", methods)
def test_first_derivative_scalar(method):
def f(x):
return x ** 2
calculated = first_derivative(f, 3.0, n_cores=1)
expected = 6.0
assert calculated["derivative"] == expected
@pytest.mark.parametrize("method", methods)
def test_first_derivative_scalar_with_return_func_value(method):
def f(x):
return x ** 2
calculated = first_derivative(
f, 3.0, return_func_value=True, return_info=False, n_cores=1
)
expected = {"derivative": 6.0, "func_value": 9.0}
assert calculated == expected
def test_nan_skipping_batch_evaluator():
arglist = [np.nan, np.ones(2), np.array([3, 4]), np.nan, np.array([1, 2])]
expected = [
np.full(2, np.nan),
np.ones(2),
np.array([9, 16]),
np.full(2, np.nan),
np.array([1, 4]),
]
calculated = _nan_skipping_batch_evaluator(
func=lambda x: x ** 2,
arguments=arglist,
n_cores=1,
error_handling="continue",
batch_evaluator="joblib",
)
for arr_calc, arr_exp in zip(calculated, expected):
if np.isnan(arr_exp).all():
assert np.isnan(arr_calc).all()
else:
aaae(arr_calc, arr_exp)
def test_consolidate_one_step_derivatives():
forward = np.ones((1, 4, 3))
forward[:, :, 0] = np.nan
backward = np.zeros_like(forward)
calculated = _consolidate_one_step_derivatives(
{"forward": forward, "backward": backward}, ["forward", "backward"]
)
expected = np.array([[0, 1, 1]] * 4)
aaae(calculated, expected)
@pytest.fixture()
def example_function_gradient_fixtures():
def f(x):
"""f:R^3 -> R"""
x1, x2, x3 = x[0], x[1], x[2]
y1 = np.sin(x1) + np.cos(x2) + x3 - x3
return y1
def fprime(x):
"""Gradient(f)(x):R^3 -> R^3"""
x1, x2, x3 = x[0], x[1], x[2]
grad = np.array([np.cos(x1), -np.sin(x2), x3 - x3])
return grad
return {"func": f, "func_prime": fprime}
@pytest.fixture()
def example_function_jacobian_fixtures():
def f(x):
"""f:R^3 -> R^2"""
x1, x2, x3 = x[0], x[1], x[2]
y1, y2 = np.sin(x1) + np.cos(x2), np.exp(x3)
return np.array([y1, y2])
def fprime(x):
"""Jacobian(f)(x):R^3 -> R^(2x3)"""
x1, x2, x3 = x[0], x[1], x[2]
jac = np.array([[np.cos(x1), -np.sin(x2), 0], [0, 0, np.exp(x3)]])
return jac
return {"func": f, "func_prime": fprime}
def test_first_derivative_gradient_richardson(example_function_gradient_fixtures):
f = example_function_gradient_fixtures["func"]
fprime = example_function_gradient_fixtures["func_prime"]
true_fprime = fprime(np.ones(3))
scipy_fprime = approx_derivative(f, np.ones(3))
our_fprime = first_derivative(f, np.ones(3), n_steps=3, method="central", n_cores=1)
aaae(scipy_fprime, our_fprime["derivative"])
aaae(true_fprime, our_fprime["derivative"])
def test_first_derivative_jacobian_richardson(example_function_jacobian_fixtures):
f = example_function_jacobian_fixtures["func"]
fprime = example_function_jacobian_fixtures["func_prime"]
true_fprime = fprime(np.ones(3))
scipy_fprime = approx_derivative(f, np.ones(3))
our_fprime = first_derivative(f, np.ones(3), n_steps=3, method="central", n_cores=1)
aaae(scipy_fprime, our_fprime["derivative"])
aaae(true_fprime, our_fprime["derivative"])
def test_convert_evaluation_data_to_frame():
arr = np.arange(4).reshape(2, 2)
arr2 = arr.reshape(2, 1, 2)
steps = namedtuple_from_kwargs(pos=arr, neg=-arr)
evals = namedtuple_from_kwargs(pos=arr2, neg=-arr2)
expected = [
[1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 1],
[1, 1, 0, 0, 2, 2],
[1, 1, 1, 0, 3, 3],
[-1, 0, 0, 0, 0, 0],
[-1, 0, 1, 0, 1, -1],
[-1, 1, 0, 0, 2, -2],
[-1, 1, 1, 0, 3, -3],
]
expected = pd.DataFrame(
expected, columns=["sign", "step_number", "dim_x", "dim_f", "step", "eval"]
)
got = _convert_evaluation_data_to_frame(steps, evals)
assert_frame_equal(expected, got.reset_index(), check_dtype=False)
def test__convert_richardson_candidates_to_frame():
jac = {
"forward1": np.array([[0, 1], [2, 3]]),
"forward2": np.array([[0.5, 1], [2, 3]]),
}
err = {
"forward1": np.array([[0, 0], [0, 1]]),
"forward2": np.array([[1, 0], [0, 0]]),
}
expected = [
["forward", 1, 0, 0, 0, 0],
["forward", 1, 1, 0, 1, 0],
["forward", 1, 0, 1, 2, 0],
["forward", 1, 1, 1, 3, 1],
["forward", 2, 0, 0, 0.5, 1],
["forward", 2, 1, 0, 1, 0],
["forward", 2, 0, 1, 2, 0],
["forward", 2, 1, 1, 3, 0],
]
expected = pd.DataFrame(
expected, columns=["method", "num_term", "dim_x", "dim_f", "der", "err"]
)
expected = expected.set_index(["method", "num_term", "dim_x", "dim_f"])
got = _convert_richardson_candidates_to_frame(jac, err)
assert_frame_equal(got, expected, check_dtype=False)
def test__select_minimizer_along_axis():
der = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
err = np.array([[[0, 1], [1, 0]], [[1, 0], [0, 1]]])
expected = (np.array([[0, 5], [6, 3]]), np.array([[0, 0], [0, 0]]))
got = _select_minimizer_along_axis(der, err)
aaae(expected, got)
| 32.498054 | 88 | 0.639727 | from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_almost_equal as aaae
from pandas.testing import assert_frame_equal
from scipy.optimize._numdiff import approx_derivative
from estimagic.differentiation.derivatives import _consolidate_one_step_derivatives
from estimagic.differentiation.derivatives import _convert_evaluation_data_to_frame
from estimagic.differentiation.derivatives import (
_convert_richardson_candidates_to_frame,
)
from estimagic.differentiation.derivatives import _nan_skipping_batch_evaluator
from estimagic.differentiation.derivatives import _select_minimizer_along_axis
from estimagic.differentiation.derivatives import first_derivative
from estimagic.examples.numdiff_functions import logit_loglike
from estimagic.examples.numdiff_functions import logit_loglike_gradient
from estimagic.examples.numdiff_functions import logit_loglikeobs
from estimagic.examples.numdiff_functions import logit_loglikeobs_jacobian
from estimagic.utilities import namedtuple_from_kwargs
@pytest.fixture
def binary_choice_inputs():
fix_path = Path(__file__).resolve().parent / "binary_choice_inputs.pickle"
inputs = pd.read_pickle(fix_path)
return inputs
methods = ["forward", "backward", "central"]
@pytest.mark.parametrize("method", methods)
def test_first_derivative_jacobian(binary_choice_inputs, method):
fix = binary_choice_inputs
func = partial(logit_loglikeobs, y=fix["y"], x=fix["x"])
calculated = first_derivative(
func=func,
method=method,
params=fix["params_np"],
n_steps=1,
base_steps=None,
lower_bounds=np.full(fix["params_np"].shape, -np.inf),
upper_bounds=np.full(fix["params_np"].shape, np.inf),
min_steps=1e-8,
step_ratio=2.0,
f0=func(fix["params_np"]),
n_cores=1,
)
expected = logit_loglikeobs_jacobian(fix["params_np"], fix["y"], fix["x"])
aaae(calculated["derivative"], expected, decimal=6)
def test_first_derivative_jacobian_works_at_defaults(binary_choice_inputs):
fix = binary_choice_inputs
func = partial(logit_loglikeobs, y=fix["y"], x=fix["x"])
calculated = first_derivative(func=func, params=fix["params_np"], n_cores=1)
expected = logit_loglikeobs_jacobian(fix["params_np"], fix["y"], fix["x"])
aaae(calculated["derivative"], expected, decimal=6)
@pytest.mark.parametrize("method", methods)
def test_first_derivative_gradient(binary_choice_inputs, method):
fix = binary_choice_inputs
func = partial(logit_loglike, y=fix["y"], x=fix["x"])
calculated = first_derivative(
func=func,
method=method,
params=fix["params_np"],
n_steps=1,
f0=func(fix["params_np"]),
n_cores=1,
)
expected = logit_loglike_gradient(fix["params_np"], fix["y"], fix["x"])
aaae(calculated["derivative"], expected, decimal=4)
@pytest.mark.parametrize("method", methods)
def test_first_derivative_scalar(method):
def f(x):
return x ** 2
calculated = first_derivative(f, 3.0, n_cores=1)
expected = 6.0
assert calculated["derivative"] == expected
@pytest.mark.parametrize("method", methods)
def test_first_derivative_scalar_with_return_func_value(method):
def f(x):
return x ** 2
calculated = first_derivative(
f, 3.0, return_func_value=True, return_info=False, n_cores=1
)
expected = {"derivative": 6.0, "func_value": 9.0}
assert calculated == expected
def test_nan_skipping_batch_evaluator():
arglist = [np.nan, np.ones(2), np.array([3, 4]), np.nan, np.array([1, 2])]
expected = [
np.full(2, np.nan),
np.ones(2),
np.array([9, 16]),
np.full(2, np.nan),
np.array([1, 4]),
]
calculated = _nan_skipping_batch_evaluator(
func=lambda x: x ** 2,
arguments=arglist,
n_cores=1,
error_handling="continue",
batch_evaluator="joblib",
)
for arr_calc, arr_exp in zip(calculated, expected):
if np.isnan(arr_exp).all():
assert np.isnan(arr_calc).all()
else:
aaae(arr_calc, arr_exp)
def test_consolidate_one_step_derivatives():
forward = np.ones((1, 4, 3))
forward[:, :, 0] = np.nan
backward = np.zeros_like(forward)
calculated = _consolidate_one_step_derivatives(
{"forward": forward, "backward": backward}, ["forward", "backward"]
)
expected = np.array([[0, 1, 1]] * 4)
aaae(calculated, expected)
@pytest.fixture()
def example_function_gradient_fixtures():
def f(x):
x1, x2, x3 = x[0], x[1], x[2]
y1 = np.sin(x1) + np.cos(x2) + x3 - x3
return y1
def fprime(x):
x1, x2, x3 = x[0], x[1], x[2]
grad = np.array([np.cos(x1), -np.sin(x2), x3 - x3])
return grad
return {"func": f, "func_prime": fprime}
@pytest.fixture()
def example_function_jacobian_fixtures():
def f(x):
x1, x2, x3 = x[0], x[1], x[2]
y1, y2 = np.sin(x1) + np.cos(x2), np.exp(x3)
return np.array([y1, y2])
def fprime(x):
x1, x2, x3 = x[0], x[1], x[2]
jac = np.array([[np.cos(x1), -np.sin(x2), 0], [0, 0, np.exp(x3)]])
return jac
return {"func": f, "func_prime": fprime}
def test_first_derivative_gradient_richardson(example_function_gradient_fixtures):
f = example_function_gradient_fixtures["func"]
fprime = example_function_gradient_fixtures["func_prime"]
true_fprime = fprime(np.ones(3))
scipy_fprime = approx_derivative(f, np.ones(3))
our_fprime = first_derivative(f, np.ones(3), n_steps=3, method="central", n_cores=1)
aaae(scipy_fprime, our_fprime["derivative"])
aaae(true_fprime, our_fprime["derivative"])
def test_first_derivative_jacobian_richardson(example_function_jacobian_fixtures):
f = example_function_jacobian_fixtures["func"]
fprime = example_function_jacobian_fixtures["func_prime"]
true_fprime = fprime(np.ones(3))
scipy_fprime = approx_derivative(f, np.ones(3))
our_fprime = first_derivative(f, np.ones(3), n_steps=3, method="central", n_cores=1)
aaae(scipy_fprime, our_fprime["derivative"])
aaae(true_fprime, our_fprime["derivative"])
def test_convert_evaluation_data_to_frame():
arr = np.arange(4).reshape(2, 2)
arr2 = arr.reshape(2, 1, 2)
steps = namedtuple_from_kwargs(pos=arr, neg=-arr)
evals = namedtuple_from_kwargs(pos=arr2, neg=-arr2)
expected = [
[1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 1],
[1, 1, 0, 0, 2, 2],
[1, 1, 1, 0, 3, 3],
[-1, 0, 0, 0, 0, 0],
[-1, 0, 1, 0, 1, -1],
[-1, 1, 0, 0, 2, -2],
[-1, 1, 1, 0, 3, -3],
]
expected = pd.DataFrame(
expected, columns=["sign", "step_number", "dim_x", "dim_f", "step", "eval"]
)
got = _convert_evaluation_data_to_frame(steps, evals)
assert_frame_equal(expected, got.reset_index(), check_dtype=False)
def test__convert_richardson_candidates_to_frame():
jac = {
"forward1": np.array([[0, 1], [2, 3]]),
"forward2": np.array([[0.5, 1], [2, 3]]),
}
err = {
"forward1": np.array([[0, 0], [0, 1]]),
"forward2": np.array([[1, 0], [0, 0]]),
}
expected = [
["forward", 1, 0, 0, 0, 0],
["forward", 1, 1, 0, 1, 0],
["forward", 1, 0, 1, 2, 0],
["forward", 1, 1, 1, 3, 1],
["forward", 2, 0, 0, 0.5, 1],
["forward", 2, 1, 0, 1, 0],
["forward", 2, 0, 1, 2, 0],
["forward", 2, 1, 1, 3, 0],
]
expected = pd.DataFrame(
expected, columns=["method", "num_term", "dim_x", "dim_f", "der", "err"]
)
expected = expected.set_index(["method", "num_term", "dim_x", "dim_f"])
got = _convert_richardson_candidates_to_frame(jac, err)
assert_frame_equal(got, expected, check_dtype=False)
def test__select_minimizer_along_axis():
der = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
err = np.array([[[0, 1], [1, 0]], [[1, 0], [0, 1]]])
expected = (np.array([[0, 5], [6, 3]]), np.array([[0, 0], [0, 0]]))
got = _select_minimizer_along_axis(der, err)
aaae(expected, got)
| true | true |
f71b6dab14627d4699e80c6acbce3ef420b0a543 | 35 | py | Python | ciphey/basemods/Searchers/__init__.py | paramint/ciphey | 26195dfe1f216c3d43d07b50279b64eb026f0c13 | [
"MIT"
] | 1 | 2021-05-30T19:55:00.000Z | 2021-05-30T19:55:00.000Z | ciphey/basemods/Searchers/__init__.py | usama7628674/Ciphey | e18801c506e93e7e9377d0bbc6870ecd84ae2f61 | [
"MIT"
] | 4 | 2020-11-13T19:01:56.000Z | 2022-02-10T02:14:00.000Z | ciphey/basemods/Searchers/__init__.py | usama7628674/Ciphey | e18801c506e93e7e9377d0bbc6870ecd84ae2f61 | [
"MIT"
] | null | null | null | from . import ausearch, perfection
| 17.5 | 34 | 0.8 | from . import ausearch, perfection
| true | true |
f71b6e4295ed13a2ac4d43cdf95ee46cabd50a60 | 18,334 | py | Python | python/helpers/pycharm/teamcity/pytest_plugin.py | janchochol/intellij-community | fce543ac6018b411e519fe01ddc71a8c1bbd138b | [
"Apache-2.0"
] | null | null | null | python/helpers/pycharm/teamcity/pytest_plugin.py | janchochol/intellij-community | fce543ac6018b411e519fe01ddc71a8c1bbd138b | [
"Apache-2.0"
] | null | null | null | python/helpers/pycharm/teamcity/pytest_plugin.py | janchochol/intellij-community | fce543ac6018b411e519fe01ddc71a8c1bbd138b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
"""
Aaron Buchanan
Nov. 2012
Plug-in for py.test for reporting to TeamCity server
Report results to TeamCity during test execution for immediate reporting
when using TeamCity.
This should be installed as a py.test plugin and will be automatically enabled by running
tests under TeamCity build.
"""
import os
import pprint
import sys
import re
import traceback
from datetime import timedelta
from teamcity.messages import TeamcityServiceMessages
from teamcity.common import convert_error_to_string, dump_test_stderr, dump_test_stdout
from teamcity import is_running_under_teamcity
from teamcity import diff_tools
diff_tools.patch_unittest_diff()
def unformat_pytest_explanation(s):
"""
Undo _pytest.assertion.util.format_explanation
"""
return s.replace("\\n", "\n")
def fetch_diff_error_from_message(err_message, swap_diff):
line_with_diff = None
diff_error_message = None
lines = err_message.split("\n")
if err_message.startswith("AssertionError: assert"):
# Everything in one line
line_with_diff = lines[0][len("AssertionError: assert "):]
elif len(err_message.split("\n")) > 1:
err_line = lines[1]
line_with_diff = err_line[len("assert "):]
diff_error_message = lines[0]
if line_with_diff and line_with_diff.count("==") == 1:
parts = [x.strip() for x in line_with_diff.split("==")]
parts = [s[1:-1] if s.startswith("'") or s.startswith('"') else s for s in parts]
# Pytest cuts too long lines, no need to check is_too_big
expected, actual = parts[1], parts[0]
if swap_diff:
expected, actual = actual, expected
expected = unformat_pytest_explanation(expected)
actual = unformat_pytest_explanation(actual)
return diff_tools.EqualsAssertionError(expected, actual, diff_error_message)
else:
return None
def _is_bool_supported():
"""
Type "bool" is not supported before 2.9
"""
try:
from pytest import __version__
from distutils import version
return version.LooseVersion(str(__version__)) >= version.LooseVersion("2.9")
except ImportError:
return False
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption('--teamcity', action="count",
dest="teamcity", default=0, help="force output of JetBrains TeamCity service messages")
group._addoption('--no-teamcity', action="count",
dest="no_teamcity", default=0, help="disable output of JetBrains TeamCity service messages")
kwargs = {"help": "skip output of passed tests for JetBrains TeamCity service messages"}
if _is_bool_supported():
kwargs.update({"type": "bool"})
parser.addini("skippassedoutput", **kwargs)
parser.addini("swapdiff", **kwargs)
def pytest_configure(config):
if config.option.no_teamcity >= 1:
enabled = False
elif config.option.teamcity >= 1:
enabled = True
else:
enabled = is_running_under_teamcity()
if enabled:
output_capture_enabled = getattr(config.option, 'capture', 'fd') != 'no'
coverage_controller = _get_coverage_controller(config)
skip_passed_output = bool(config.getini('skippassedoutput'))
config.option.verbose = 2 # don't truncate assert explanations
config._teamcityReporting = EchoTeamCityMessages(
output_capture_enabled,
coverage_controller,
skip_passed_output,
bool(config.getini('swapdiff'))
)
config.pluginmanager.register(config._teamcityReporting)
def pytest_unconfigure(config):
teamcity_reporting = getattr(config, '_teamcityReporting', None)
if teamcity_reporting:
del config._teamcityReporting
config.pluginmanager.unregister(teamcity_reporting)
def _get_coverage_controller(config):
cov_plugin = config.pluginmanager.getplugin('_cov')
if not cov_plugin:
return None
return cov_plugin.cov_controller
class EchoTeamCityMessages(object):
def __init__(self, output_capture_enabled, coverage_controller, skip_passed_output, swap_diff):
self.coverage_controller = coverage_controller
self.output_capture_enabled = output_capture_enabled
self.skip_passed_output = skip_passed_output
self.teamcity = TeamcityServiceMessages()
self.test_start_reported_mark = set()
self.max_reported_output_size = 1 * 1024 * 1024
self.reported_output_chunk_size = 50000
self.swap_diff = swap_diff
def get_id_from_location(self, location):
if type(location) is not tuple or len(location) != 3 or not hasattr(location[2], "startswith"):
return None
def convert_file_to_id(filename):
filename = re.sub(r"\.pyc?$", "", filename)
return filename.replace(os.sep, ".").replace("/", ".")
def add_prefix_to_filename_id(filename_id, prefix):
dot_location = filename_id.rfind('.')
if dot_location <= 0 or dot_location >= len(filename_id) - 1:
return None
return filename_id[:dot_location + 1] + prefix + filename_id[dot_location + 1:]
pylint_prefix = '[pylint] '
if location[2].startswith(pylint_prefix):
id_from_file = convert_file_to_id(location[2][len(pylint_prefix):])
return id_from_file + ".Pylint"
if location[2] == "PEP8-check":
id_from_file = convert_file_to_id(location[0])
return id_from_file + ".PEP8"
return None
def format_test_id(self, nodeid, location):
id_from_location = self.get_id_from_location(location)
if id_from_location is not None:
return id_from_location
test_id = nodeid
if test_id:
if test_id.find("::") < 0:
test_id += "::top_level"
else:
test_id = "top_level"
first_bracket = test_id.find("[")
if first_bracket > 0:
# [] -> (), make it look like nose parameterized tests
params = "(" + test_id[first_bracket + 1:]
if params.endswith("]"):
params = params[:-1] + ")"
test_id = test_id[:first_bracket]
if test_id.endswith("::"):
test_id = test_id[:-2]
else:
params = ""
test_id = test_id.replace("::()::", "::")
test_id = re.sub(r"\.pyc?::", r"::", test_id)
test_id = test_id.replace(".", "_").replace(os.sep, ".").replace("/", ".").replace('::', '.')
if params:
params = params.replace(".", "_")
test_id += params
return test_id
def format_location(self, location):
if type(location) is tuple and len(location) == 3:
return "%s:%s (%s)" % (str(location[0]), str(location[1]), str(location[2]))
return str(location)
def pytest_collection_finish(self, session):
self.teamcity.testCount(len(session.items))
def pytest_runtest_logstart(self, nodeid, location):
# test name fetched from location passed as metainfo to PyCharm
# it will be used to run specific test
# See IDEA-176950, PY-31836
test_name = location[2]
if test_name:
test_name = str(test_name).split(".")[-1]
self.ensure_test_start_reported(self.format_test_id(nodeid, location), test_name)
def ensure_test_start_reported(self, test_id, metainfo=None):
if test_id not in self.test_start_reported_mark:
if self.output_capture_enabled:
capture_standard_output = "false"
else:
capture_standard_output = "true"
self.teamcity.testStarted(test_id, flowId=test_id, captureStandardOutput=capture_standard_output, metainfo=metainfo)
self.test_start_reported_mark.add(test_id)
def report_has_output(self, report):
for (secname, data) in report.sections:
if report.when in secname and ('stdout' in secname or 'stderr' in secname):
return True
return False
def report_test_output(self, report, test_id):
for (secname, data) in report.sections:
# https://github.com/JetBrains/teamcity-messages/issues/112
# CollectReport didn't have 'when' property, but now it has.
# But we still need output on 'collect' state
if hasattr(report, "when") and report.when not in secname and report.when != 'collect':
continue
if not data:
continue
if 'stdout' in secname:
dump_test_stdout(self.teamcity, test_id, test_id, data)
elif 'stderr' in secname:
dump_test_stderr(self.teamcity, test_id, test_id, data)
def report_test_finished(self, test_id, duration=None):
self.teamcity.testFinished(test_id, testDuration=duration, flowId=test_id)
self.test_start_reported_mark.remove(test_id)
def report_test_failure(self, test_id, report, message=None, report_output=True):
if hasattr(report, 'duration'):
duration = timedelta(seconds=report.duration)
else:
duration = None
if message is None:
message = self.format_location(report.location)
self.ensure_test_start_reported(test_id)
if report_output:
self.report_test_output(report, test_id)
diff_error = None
try:
err_message = str(report.longrepr.reprcrash.message)
diff_name = diff_tools.EqualsAssertionError.__name__
# There is a string like "foo.bar.DiffError: [serialized_data]"
if diff_name in err_message:
serialized_data = err_message[err_message.index(diff_name) + len(diff_name) + 1:]
diff_error = diff_tools.deserialize_error(serialized_data)
# AssertionError is patched in py.test, we can try to fetch diff from it
# In general case message starts with "AssertionError: ", but can also starts with "assert" for top-level
# function. To support both cases we unify them
if err_message.startswith("assert"):
err_message = "AssertionError: " + err_message
if err_message.startswith("AssertionError:"):
diff_error = fetch_diff_error_from_message(err_message, self.swap_diff)
except Exception:
pass
if not diff_error:
from .jb_local_exc_store import get_exception
diff_error = get_exception()
if diff_error:
# Cut everything after postfix: it is internal view of DiffError
strace = str(report.longrepr)
data_postfix = "_ _ _ _ _"
if data_postfix in strace:
strace = strace[0:strace.index(data_postfix)]
self.teamcity.testFailed(test_id, diff_error.msg if diff_error.msg else message, strace,
flowId=test_id,
comparison_failure=diff_error
)
else:
self.teamcity.testFailed(test_id, message, str(report.longrepr), flowId=test_id)
self.report_test_finished(test_id, duration)
def report_test_skip(self, test_id, report):
if type(report.longrepr) is tuple and len(report.longrepr) == 3:
reason = report.longrepr[2]
else:
reason = str(report.longrepr)
if hasattr(report, 'duration'):
duration = timedelta(seconds=report.duration)
else:
duration = None
self.ensure_test_start_reported(test_id)
self.report_test_output(report, test_id)
self.teamcity.testIgnored(test_id, reason, flowId=test_id)
self.report_test_finished(test_id, duration)
def pytest_assertrepr_compare(self, config, op, left, right):
if op in ('==', '!='):
return ['{0} {1} {2}'.format(pprint.pformat(left), op, pprint.pformat(right))]
def pytest_runtest_logreport(self, report):
"""
:type report: _pytest.runner.TestReport
"""
test_id = self.format_test_id(report.nodeid, report.location)
duration = timedelta(seconds=report.duration)
if report.passed:
# Do not report passed setup/teardown if no output
if report.when == 'call':
self.ensure_test_start_reported(test_id)
if not self.skip_passed_output:
self.report_test_output(report, test_id)
self.report_test_finished(test_id, duration)
else:
if self.report_has_output(report) and not self.skip_passed_output:
block_name = "test " + report.when
self.teamcity.blockOpened(block_name, flowId=test_id)
self.report_test_output(report, test_id)
self.teamcity.blockClosed(block_name, flowId=test_id)
elif report.failed:
if report.when == 'call':
self.report_test_failure(test_id, report)
elif report.when == 'setup':
if self.report_has_output(report):
self.teamcity.blockOpened("test setup", flowId=test_id)
self.report_test_output(report, test_id)
self.teamcity.blockClosed("test setup", flowId=test_id)
self.report_test_failure(test_id, report, message="test setup failed", report_output=False)
elif report.when == 'teardown':
# Report failed teardown as a separate test as original test is already finished
self.report_test_failure(test_id + "_teardown", report)
elif report.skipped:
self.report_test_skip(test_id, report)
def pytest_collectreport(self, report):
test_id = self.format_test_id(report.nodeid, report.location) + "_collect"
if report.failed:
self.report_test_failure(test_id, report)
elif report.skipped:
self.report_test_skip(test_id, report)
def pytest_terminal_summary(self):
if self.coverage_controller is not None:
try:
self._report_coverage()
except Exception:
tb = traceback.format_exc()
self.teamcity.customMessage("Coverage statistics reporting failed", "ERROR", errorDetails=tb)
def _report_coverage(self):
from coverage.misc import NotPython
from coverage.results import Numbers
class _Reporter(object):
def __init__(self, coverage, config):
try:
from coverage.report import Reporter
except ImportError:
# Support for coverage >= 5.0.1.
from coverage.report import get_analysis_to_report
class Reporter(object):
def __init__(self, coverage, config):
self.coverage = coverage
self.config = config
self._file_reporters = []
def find_file_reporters(self, morfs):
return [fr for fr, _ in get_analysis_to_report(self.coverage, morfs)]
self._reporter = Reporter(coverage, config)
def find_file_reporters(self, morfs):
self.file_reporters = self._reporter.find_file_reporters(morfs)
def __getattr__(self, name):
return getattr(self._reporter, name)
class _CoverageReporter(_Reporter):
def __init__(self, coverage, config, messages):
super(_CoverageReporter, self).__init__(coverage, config)
if hasattr(coverage, 'data'):
self.branches = coverage.data.has_arcs()
else:
self.branches = coverage.get_data().has_arcs()
self.messages = messages
def report(self, morfs, outfile=None):
if hasattr(self, 'find_code_units'):
self.find_code_units(morfs)
else:
self.find_file_reporters(morfs)
total = Numbers()
if hasattr(self, 'code_units'):
units = self.code_units
else:
units = self.file_reporters
for cu in units:
try:
analysis = self.coverage._analyze(cu)
nums = analysis.numbers
total += nums
except KeyboardInterrupt:
raise
except Exception:
if self.config.ignore_errors:
continue
err = sys.exc_info()
typ, msg = err[:2]
if typ is NotPython and not cu.should_be_python():
continue
test_id = cu.name
details = convert_error_to_string(err)
self.messages.testStarted(test_id, flowId=test_id)
self.messages.testFailed(test_id, message="Coverage analysis failed", details=details, flowId=test_id)
self.messages.testFinished(test_id, flowId=test_id)
if total.n_files > 0:
covered = total.n_executed
total_statements = total.n_statements
if self.branches:
covered += total.n_executed_branches
total_statements += total.n_branches
self.messages.buildStatisticLinesCovered(covered)
self.messages.buildStatisticTotalLines(total_statements)
self.messages.buildStatisticLinesUncovered(total_statements - covered)
reporter = _CoverageReporter(
self.coverage_controller.cov,
self.coverage_controller.cov.config,
self.teamcity,
)
reporter.report(None)
| 38.761099 | 128 | 0.608432 |
import os
import pprint
import sys
import re
import traceback
from datetime import timedelta
from teamcity.messages import TeamcityServiceMessages
from teamcity.common import convert_error_to_string, dump_test_stderr, dump_test_stdout
from teamcity import is_running_under_teamcity
from teamcity import diff_tools
diff_tools.patch_unittest_diff()
def unformat_pytest_explanation(s):
return s.replace("\\n", "\n")
def fetch_diff_error_from_message(err_message, swap_diff):
line_with_diff = None
diff_error_message = None
lines = err_message.split("\n")
if err_message.startswith("AssertionError: assert"):
line_with_diff = lines[0][len("AssertionError: assert "):]
elif len(err_message.split("\n")) > 1:
err_line = lines[1]
line_with_diff = err_line[len("assert "):]
diff_error_message = lines[0]
if line_with_diff and line_with_diff.count("==") == 1:
parts = [x.strip() for x in line_with_diff.split("==")]
parts = [s[1:-1] if s.startswith("'") or s.startswith('"') else s for s in parts]
# Pytest cuts too long lines, no need to check is_too_big
expected, actual = parts[1], parts[0]
if swap_diff:
expected, actual = actual, expected
expected = unformat_pytest_explanation(expected)
actual = unformat_pytest_explanation(actual)
return diff_tools.EqualsAssertionError(expected, actual, diff_error_message)
else:
return None
def _is_bool_supported():
try:
from pytest import __version__
from distutils import version
return version.LooseVersion(str(__version__)) >= version.LooseVersion("2.9")
except ImportError:
return False
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption('--teamcity', action="count",
dest="teamcity", default=0, help="force output of JetBrains TeamCity service messages")
group._addoption('--no-teamcity', action="count",
dest="no_teamcity", default=0, help="disable output of JetBrains TeamCity service messages")
kwargs = {"help": "skip output of passed tests for JetBrains TeamCity service messages"}
if _is_bool_supported():
kwargs.update({"type": "bool"})
parser.addini("skippassedoutput", **kwargs)
parser.addini("swapdiff", **kwargs)
def pytest_configure(config):
if config.option.no_teamcity >= 1:
enabled = False
elif config.option.teamcity >= 1:
enabled = True
else:
enabled = is_running_under_teamcity()
if enabled:
output_capture_enabled = getattr(config.option, 'capture', 'fd') != 'no'
coverage_controller = _get_coverage_controller(config)
skip_passed_output = bool(config.getini('skippassedoutput'))
config.option.verbose = 2 # don't truncate assert explanations
config._teamcityReporting = EchoTeamCityMessages(
output_capture_enabled,
coverage_controller,
skip_passed_output,
bool(config.getini('swapdiff'))
)
config.pluginmanager.register(config._teamcityReporting)
def pytest_unconfigure(config):
teamcity_reporting = getattr(config, '_teamcityReporting', None)
if teamcity_reporting:
del config._teamcityReporting
config.pluginmanager.unregister(teamcity_reporting)
def _get_coverage_controller(config):
cov_plugin = config.pluginmanager.getplugin('_cov')
if not cov_plugin:
return None
return cov_plugin.cov_controller
class EchoTeamCityMessages(object):
def __init__(self, output_capture_enabled, coverage_controller, skip_passed_output, swap_diff):
self.coverage_controller = coverage_controller
self.output_capture_enabled = output_capture_enabled
self.skip_passed_output = skip_passed_output
self.teamcity = TeamcityServiceMessages()
self.test_start_reported_mark = set()
self.max_reported_output_size = 1 * 1024 * 1024
self.reported_output_chunk_size = 50000
self.swap_diff = swap_diff
def get_id_from_location(self, location):
if type(location) is not tuple or len(location) != 3 or not hasattr(location[2], "startswith"):
return None
def convert_file_to_id(filename):
filename = re.sub(r"\.pyc?$", "", filename)
return filename.replace(os.sep, ".").replace("/", ".")
def add_prefix_to_filename_id(filename_id, prefix):
dot_location = filename_id.rfind('.')
if dot_location <= 0 or dot_location >= len(filename_id) - 1:
return None
return filename_id[:dot_location + 1] + prefix + filename_id[dot_location + 1:]
pylint_prefix = '[pylint] '
if location[2].startswith(pylint_prefix):
id_from_file = convert_file_to_id(location[2][len(pylint_prefix):])
return id_from_file + ".Pylint"
if location[2] == "PEP8-check":
id_from_file = convert_file_to_id(location[0])
return id_from_file + ".PEP8"
return None
def format_test_id(self, nodeid, location):
id_from_location = self.get_id_from_location(location)
if id_from_location is not None:
return id_from_location
test_id = nodeid
if test_id:
if test_id.find("::") < 0:
test_id += "::top_level"
else:
test_id = "top_level"
first_bracket = test_id.find("[")
if first_bracket > 0:
# [] -> (), make it look like nose parameterized tests
params = "(" + test_id[first_bracket + 1:]
if params.endswith("]"):
params = params[:-1] + ")"
test_id = test_id[:first_bracket]
if test_id.endswith("::"):
test_id = test_id[:-2]
else:
params = ""
test_id = test_id.replace("::()::", "::")
test_id = re.sub(r"\.pyc?::", r"::", test_id)
test_id = test_id.replace(".", "_").replace(os.sep, ".").replace("/", ".").replace('::', '.')
if params:
params = params.replace(".", "_")
test_id += params
return test_id
def format_location(self, location):
if type(location) is tuple and len(location) == 3:
return "%s:%s (%s)" % (str(location[0]), str(location[1]), str(location[2]))
return str(location)
def pytest_collection_finish(self, session):
self.teamcity.testCount(len(session.items))
def pytest_runtest_logstart(self, nodeid, location):
# test name fetched from location passed as metainfo to PyCharm
# it will be used to run specific test
# See IDEA-176950, PY-31836
test_name = location[2]
if test_name:
test_name = str(test_name).split(".")[-1]
self.ensure_test_start_reported(self.format_test_id(nodeid, location), test_name)
def ensure_test_start_reported(self, test_id, metainfo=None):
if test_id not in self.test_start_reported_mark:
if self.output_capture_enabled:
capture_standard_output = "false"
else:
capture_standard_output = "true"
self.teamcity.testStarted(test_id, flowId=test_id, captureStandardOutput=capture_standard_output, metainfo=metainfo)
self.test_start_reported_mark.add(test_id)
def report_has_output(self, report):
for (secname, data) in report.sections:
if report.when in secname and ('stdout' in secname or 'stderr' in secname):
return True
return False
def report_test_output(self, report, test_id):
for (secname, data) in report.sections:
# https://github.com/JetBrains/teamcity-messages/issues/112
# CollectReport didn't have 'when' property, but now it has.
# But we still need output on 'collect' state
if hasattr(report, "when") and report.when not in secname and report.when != 'collect':
continue
if not data:
continue
if 'stdout' in secname:
dump_test_stdout(self.teamcity, test_id, test_id, data)
elif 'stderr' in secname:
dump_test_stderr(self.teamcity, test_id, test_id, data)
def report_test_finished(self, test_id, duration=None):
self.teamcity.testFinished(test_id, testDuration=duration, flowId=test_id)
self.test_start_reported_mark.remove(test_id)
def report_test_failure(self, test_id, report, message=None, report_output=True):
if hasattr(report, 'duration'):
duration = timedelta(seconds=report.duration)
else:
duration = None
if message is None:
message = self.format_location(report.location)
self.ensure_test_start_reported(test_id)
if report_output:
self.report_test_output(report, test_id)
diff_error = None
try:
err_message = str(report.longrepr.reprcrash.message)
diff_name = diff_tools.EqualsAssertionError.__name__
# There is a string like "foo.bar.DiffError: [serialized_data]"
if diff_name in err_message:
serialized_data = err_message[err_message.index(diff_name) + len(diff_name) + 1:]
diff_error = diff_tools.deserialize_error(serialized_data)
# AssertionError is patched in py.test, we can try to fetch diff from it
# In general case message starts with "AssertionError: ", but can also starts with "assert" for top-level
# function. To support both cases we unify them
if err_message.startswith("assert"):
err_message = "AssertionError: " + err_message
if err_message.startswith("AssertionError:"):
diff_error = fetch_diff_error_from_message(err_message, self.swap_diff)
except Exception:
pass
if not diff_error:
from .jb_local_exc_store import get_exception
diff_error = get_exception()
if diff_error:
# Cut everything after postfix: it is internal view of DiffError
strace = str(report.longrepr)
data_postfix = "_ _ _ _ _"
if data_postfix in strace:
strace = strace[0:strace.index(data_postfix)]
self.teamcity.testFailed(test_id, diff_error.msg if diff_error.msg else message, strace,
flowId=test_id,
comparison_failure=diff_error
)
else:
self.teamcity.testFailed(test_id, message, str(report.longrepr), flowId=test_id)
self.report_test_finished(test_id, duration)
def report_test_skip(self, test_id, report):
if type(report.longrepr) is tuple and len(report.longrepr) == 3:
reason = report.longrepr[2]
else:
reason = str(report.longrepr)
if hasattr(report, 'duration'):
duration = timedelta(seconds=report.duration)
else:
duration = None
self.ensure_test_start_reported(test_id)
self.report_test_output(report, test_id)
self.teamcity.testIgnored(test_id, reason, flowId=test_id)
self.report_test_finished(test_id, duration)
def pytest_assertrepr_compare(self, config, op, left, right):
if op in ('==', '!='):
return ['{0} {1} {2}'.format(pprint.pformat(left), op, pprint.pformat(right))]
def pytest_runtest_logreport(self, report):
test_id = self.format_test_id(report.nodeid, report.location)
duration = timedelta(seconds=report.duration)
if report.passed:
# Do not report passed setup/teardown if no output
if report.when == 'call':
self.ensure_test_start_reported(test_id)
if not self.skip_passed_output:
self.report_test_output(report, test_id)
self.report_test_finished(test_id, duration)
else:
if self.report_has_output(report) and not self.skip_passed_output:
block_name = "test " + report.when
self.teamcity.blockOpened(block_name, flowId=test_id)
self.report_test_output(report, test_id)
self.teamcity.blockClosed(block_name, flowId=test_id)
elif report.failed:
if report.when == 'call':
self.report_test_failure(test_id, report)
elif report.when == 'setup':
if self.report_has_output(report):
self.teamcity.blockOpened("test setup", flowId=test_id)
self.report_test_output(report, test_id)
self.teamcity.blockClosed("test setup", flowId=test_id)
self.report_test_failure(test_id, report, message="test setup failed", report_output=False)
elif report.when == 'teardown':
# Report failed teardown as a separate test as original test is already finished
self.report_test_failure(test_id + "_teardown", report)
elif report.skipped:
self.report_test_skip(test_id, report)
def pytest_collectreport(self, report):
test_id = self.format_test_id(report.nodeid, report.location) + "_collect"
if report.failed:
self.report_test_failure(test_id, report)
elif report.skipped:
self.report_test_skip(test_id, report)
def pytest_terminal_summary(self):
if self.coverage_controller is not None:
try:
self._report_coverage()
except Exception:
tb = traceback.format_exc()
self.teamcity.customMessage("Coverage statistics reporting failed", "ERROR", errorDetails=tb)
def _report_coverage(self):
from coverage.misc import NotPython
from coverage.results import Numbers
class _Reporter(object):
def __init__(self, coverage, config):
try:
from coverage.report import Reporter
except ImportError:
# Support for coverage >= 5.0.1.
from coverage.report import get_analysis_to_report
class Reporter(object):
def __init__(self, coverage, config):
self.coverage = coverage
self.config = config
self._file_reporters = []
def find_file_reporters(self, morfs):
return [fr for fr, _ in get_analysis_to_report(self.coverage, morfs)]
self._reporter = Reporter(coverage, config)
def find_file_reporters(self, morfs):
self.file_reporters = self._reporter.find_file_reporters(morfs)
def __getattr__(self, name):
return getattr(self._reporter, name)
class _CoverageReporter(_Reporter):
def __init__(self, coverage, config, messages):
super(_CoverageReporter, self).__init__(coverage, config)
if hasattr(coverage, 'data'):
self.branches = coverage.data.has_arcs()
else:
self.branches = coverage.get_data().has_arcs()
self.messages = messages
def report(self, morfs, outfile=None):
if hasattr(self, 'find_code_units'):
self.find_code_units(morfs)
else:
self.find_file_reporters(morfs)
total = Numbers()
if hasattr(self, 'code_units'):
units = self.code_units
else:
units = self.file_reporters
for cu in units:
try:
analysis = self.coverage._analyze(cu)
nums = analysis.numbers
total += nums
except KeyboardInterrupt:
raise
except Exception:
if self.config.ignore_errors:
continue
err = sys.exc_info()
typ, msg = err[:2]
if typ is NotPython and not cu.should_be_python():
continue
test_id = cu.name
details = convert_error_to_string(err)
self.messages.testStarted(test_id, flowId=test_id)
self.messages.testFailed(test_id, message="Coverage analysis failed", details=details, flowId=test_id)
self.messages.testFinished(test_id, flowId=test_id)
if total.n_files > 0:
covered = total.n_executed
total_statements = total.n_statements
if self.branches:
covered += total.n_executed_branches
total_statements += total.n_branches
self.messages.buildStatisticLinesCovered(covered)
self.messages.buildStatisticTotalLines(total_statements)
self.messages.buildStatisticLinesUncovered(total_statements - covered)
reporter = _CoverageReporter(
self.coverage_controller.cov,
self.coverage_controller.cov.config,
self.teamcity,
)
reporter.report(None)
| true | true |
f71b6ec1ac6a3e138fec3e28c7e2f2eda3b7aa07 | 2,948 | py | Python | mayan/apps/mayan_statistics/views.py | sophiawa/Mayan-EDMS | 42f20576d0c690b645a60bf53c5169cda4264231 | [
"Apache-2.0"
] | null | null | null | mayan/apps/mayan_statistics/views.py | sophiawa/Mayan-EDMS | 42f20576d0c690b645a60bf53c5169cda4264231 | [
"Apache-2.0"
] | 10 | 2021-03-19T23:48:12.000Z | 2022-03-12T00:41:49.000Z | mayan/apps/mayan_statistics/views.py | sophiawa/Mayan-EDMS | 42f20576d0c690b645a60bf53c5169cda4264231 | [
"Apache-2.0"
] | 1 | 2020-12-17T02:35:09.000Z | 2020-12-17T02:35:09.000Z | from django.contrib import messages
from django.http import Http404
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.generics import ConfirmView, SimpleView, SingleObjectListView
from .classes import Statistic, StatisticNamespace
from .permissions import permission_statistics_view
from .tasks import task_execute_statistic
class NamespaceListView(SingleObjectListView):
extra_context = {
'hide_link': True,
'title': _('Statistics namespaces'),
}
template_name = 'appearance/generic_list.html'
view_permission = permission_statistics_view
def get_source_queryset(self):
return StatisticNamespace.get_all()
class NamespaceDetailView(SingleObjectListView):
view_permission = permission_statistics_view
def get_extra_context(self):
return {
'hide_link': True,
'object': self.get_namespace(),
'title': _('Namespace details for: %s') % self.get_namespace(),
}
def get_namespace(self):
return StatisticNamespace.get(slug=self.kwargs['slug'])
def get_source_queryset(self):
return self.get_namespace().statistics
class StatisticDetailView(SimpleView):
view_permission = permission_statistics_view
def get_extra_context(self):
obj = self.get_object()
return {
'chart_data': obj.get_chart_data(),
'namespace': obj.namespace,
'navigation_object_list': ('namespace', 'object'),
'no_data': not obj.get_results_data()['series'],
'object': obj,
'title': _('Results for: %s') % obj,
}
def get_object(self):
try:
return Statistic.get(self.kwargs['slug'])
except KeyError:
raise Http404(_('Statistic "%s" not found.') % self.kwargs['slug'])
def get_template_names(self):
return (self.get_object().renderer.template_name,)
class StatisticQueueView(ConfirmView):
view_permission = permission_statistics_view
def get_extra_context(self):
obj = self.get_object()
return {
'namespace': obj.namespace,
'object': obj,
# Translators: This text is asking users if they want to queue
# (to send to the queue) a statistic for it to be update ahead
# of schedule
'title': _(
'Queue statistic "%s" to be updated?'
) % obj,
}
def get_object(self):
try:
return Statistic.get(slug=self.kwargs['slug'])
except KeyError:
raise Http404(_('Statistic "%s" not found.') % self.kwargs['slug'])
def view_action(self):
task_execute_statistic.delay(slug=self.get_object().slug)
messages.success(
message=_(
'Statistic "%s" queued successfully for update.'
) % self.get_object().label, request=self.request
)
| 31.031579 | 84 | 0.636364 | from django.contrib import messages
from django.http import Http404
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.generics import ConfirmView, SimpleView, SingleObjectListView
from .classes import Statistic, StatisticNamespace
from .permissions import permission_statistics_view
from .tasks import task_execute_statistic
class NamespaceListView(SingleObjectListView):
extra_context = {
'hide_link': True,
'title': _('Statistics namespaces'),
}
template_name = 'appearance/generic_list.html'
view_permission = permission_statistics_view
def get_source_queryset(self):
return StatisticNamespace.get_all()
class NamespaceDetailView(SingleObjectListView):
view_permission = permission_statistics_view
def get_extra_context(self):
return {
'hide_link': True,
'object': self.get_namespace(),
'title': _('Namespace details for: %s') % self.get_namespace(),
}
def get_namespace(self):
return StatisticNamespace.get(slug=self.kwargs['slug'])
def get_source_queryset(self):
return self.get_namespace().statistics
class StatisticDetailView(SimpleView):
view_permission = permission_statistics_view
def get_extra_context(self):
obj = self.get_object()
return {
'chart_data': obj.get_chart_data(),
'namespace': obj.namespace,
'navigation_object_list': ('namespace', 'object'),
'no_data': not obj.get_results_data()['series'],
'object': obj,
'title': _('Results for: %s') % obj,
}
def get_object(self):
try:
return Statistic.get(self.kwargs['slug'])
except KeyError:
raise Http404(_('Statistic "%s" not found.') % self.kwargs['slug'])
def get_template_names(self):
return (self.get_object().renderer.template_name,)
class StatisticQueueView(ConfirmView):
view_permission = permission_statistics_view
def get_extra_context(self):
obj = self.get_object()
return {
'namespace': obj.namespace,
'object': obj,
'title': _(
'Queue statistic "%s" to be updated?'
) % obj,
}
def get_object(self):
try:
return Statistic.get(slug=self.kwargs['slug'])
except KeyError:
raise Http404(_('Statistic "%s" not found.') % self.kwargs['slug'])
def view_action(self):
task_execute_statistic.delay(slug=self.get_object().slug)
messages.success(
message=_(
'Statistic "%s" queued successfully for update.'
) % self.get_object().label, request=self.request
)
| true | true |
f71b6ef7a59d7d39b2bcee735e05d0bb4fe7d665 | 2,447 | py | Python | display_recognized_faces.py | theTechie/face-recognition | 4236405914971fa971eb8dab7f31022f154ac10b | [
"MIT"
] | null | null | null | display_recognized_faces.py | theTechie/face-recognition | 4236405914971fa971eb8dab7f31022f154ac10b | [
"MIT"
] | null | null | null | display_recognized_faces.py | theTechie/face-recognition | 4236405914971fa971eb8dab7f31022f154ac10b | [
"MIT"
] | null | null | null | import face_recognition
from PIL import Image, ImageDraw
from pathlib import Path
import recognize_face
known_path = Path("data/sample-2/jpeg/picked/known")
known_images = list(known_path.glob('*.jpeg'))
known_face_encodings = []
known_face_names = []
known_faces = [recognize_face.image_to_known_face(str(image_path), image_path.stem) for image_path in known_images]
print('I just learned to recognize %d persons... \n' % len(known_images))
unknown_path = Path("data/sample-4/unknown")
unknown_images = list(unknown_path.glob('**/*.jpeg'))
print('I am starting to identify %d unknown persons; lets see how many i know !! \n' % len(unknown_images))
output_path = Path("data/sample-4/output")
for image_to_identify in unknown_images:
unknown_image = face_recognition.load_image_file(str(image_to_identify))
# face_locations = face_recognition.face_locations(unknown_image)
# face_encodings = face_recognition.face_encodings(unknown_image, face_locations)
detected_faces = recognize_face.recognize(known_faces, unknown_image)
# Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library
# See http://pillow.readthedocs.io/ for more about PIL/Pillow
pil_image = Image.fromarray(unknown_image)
# Create a Pillow ImageDraw Draw instance to draw with
draw = ImageDraw.Draw(pil_image)
known_color = (0, 255, 0)
unknown_color = (255, 0, 0)
# Loop through each face found in the unknown image
for name, (top, right, bottom, left), distance in detected_faces:
# Draw a box around the face using the Pillow module
if name == 'Unknown':
color = unknown_color
else:
color = known_color
draw.rectangle(((left, top), (right, bottom)), outline=color)
# Draw a label with a name below the face
label = name + ' - ' + str("{0:.2f}".format(distance))
text_width, text_height = draw.textsize(label)
draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=color, outline=color)
draw.text((left + 6, bottom - text_height - 5), label, fill=(255, 0, 0, 255))
# Display the resulting image
# pil_image.show()
# Remove the drawing library from memory as per the Pillow docs
del draw
# You can also save a copy of the new image to disk if you want by uncommenting this line
pil_image.save(output_path/image_to_identify.name)
| 38.84127 | 115 | 0.707805 | import face_recognition
from PIL import Image, ImageDraw
from pathlib import Path
import recognize_face
known_path = Path("data/sample-2/jpeg/picked/known")
known_images = list(known_path.glob('*.jpeg'))
known_face_encodings = []
known_face_names = []
known_faces = [recognize_face.image_to_known_face(str(image_path), image_path.stem) for image_path in known_images]
print('I just learned to recognize %d persons... \n' % len(known_images))
unknown_path = Path("data/sample-4/unknown")
unknown_images = list(unknown_path.glob('**/*.jpeg'))
print('I am starting to identify %d unknown persons; lets see how many i know !! \n' % len(unknown_images))
output_path = Path("data/sample-4/output")
for image_to_identify in unknown_images:
unknown_image = face_recognition.load_image_file(str(image_to_identify))
detected_faces = recognize_face.recognize(known_faces, unknown_image)
pil_image = Image.fromarray(unknown_image)
draw = ImageDraw.Draw(pil_image)
known_color = (0, 255, 0)
unknown_color = (255, 0, 0)
for name, (top, right, bottom, left), distance in detected_faces:
if name == 'Unknown':
color = unknown_color
else:
color = known_color
draw.rectangle(((left, top), (right, bottom)), outline=color)
label = name + ' - ' + str("{0:.2f}".format(distance))
text_width, text_height = draw.textsize(label)
draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=color, outline=color)
draw.text((left + 6, bottom - text_height - 5), label, fill=(255, 0, 0, 255))
del draw
pil_image.save(output_path/image_to_identify.name)
| true | true |
f71b6fb24f25ebf3dab99241ef1f473abaf2eb72 | 4,448 | py | Python | releaseNewVersion.py | r-owen/TUI | 8f130368254161a2748167b7c8260cc24170c28c | [
"BSD-3-Clause"
] | 1 | 2015-04-29T20:28:20.000Z | 2015-04-29T20:28:20.000Z | releaseNewVersion.py | ApachePointObservatory/TUI | 8f130368254161a2748167b7c8260cc24170c28c | [
"BSD-3-Clause"
] | 1 | 2017-06-05T22:53:58.000Z | 2017-06-05T22:53:58.000Z | releaseNewVersion.py | r-owen/TUI | 8f130368254161a2748167b7c8260cc24170c28c | [
"BSD-3-Clause"
] | 1 | 2020-01-28T06:28:02.000Z | 2020-01-28T06:28:02.000Z | #!/usr/bin/env python
"""A script to release a new version of TUI from a git working copy
If run on a unix box it exports the current version of TUI and then zips that.
If run on MacOS X it also tries to build the Mac binary.
Not intended for use on Windows.
To use:
./releaseNewVersion.py
"""
from __future__ import with_statement
import os
import re
import shutil
import sys
import subprocess
roPath = os.environ.get("RO_DIR")
if not roPath:
print "RO not setup"
sys.exit(1)
else:
print "RO found at", roPath
PkgName = "TUI"
import TUI.Version
versionName = TUI.Version.VersionName
fullVersStr = "%s %s" % (versionName, TUI.Version.VersionDate)
queryStr = "Version in Version.py = %s; is this OK? (y/[n]) " % (fullVersStr,)
getOK = raw_input(queryStr)
if not getOK.lower().startswith("y"):
sys.exit(0)
versRegEx = re.compile(r"<h3>(\d.*\s\d\d\d\d-\d\d-\d\d)</h3>")
with file(os.path.join("TUI", "Help", "VersionHistory.html")) as vhist:
for line in vhist:
versMatch = versRegEx.match(line)
if versMatch:
histVersStr = versMatch.groups()[0]
if histVersStr == fullVersStr:
print "Version in VersionHistory.html matches"
break
else:
print "Error: version in VersionHistory.html = %s != %s" % (histVersStr, fullVersStr)
sys.exit(0)
print "Status of git repository:"
subprocess.call(["git", "status"])
getOK = raw_input("Is the git repository up to date? (y/[n]) ")
if not getOK.lower().startswith("y"):
sys.exit(0)
exportRoot = os.environ["HOME"]
exportFileName = "%s_%s_Source" % (PkgName, versionName)
exportPath = os.path.abspath(os.path.join(exportRoot, exportFileName))
zipFileName = "%s.zip" % (exportFileName,)
zipFilePath = os.path.abspath(os.path.join(exportRoot, zipFileName))
if os.path.exists(exportPath):
print "Export directory %r already exists" % (exportPath,)
getOK = raw_input("Should I delete the old %r? (yes/[n]) " % (exportPath,))
if not getOK.lower() == "yes":
sys.exit(0)
print "Deleting %r" % (exportPath,)
shutil.rmtree(exportPath)
if os.path.exists(zipFilePath):
getOK = raw_input("File %r already exists! Should I delete it? (yes/[n]) " % (zipFilePath,))
if not getOK.lower() == "yes":
sys.exit(0)
print "Deleting %r" % (zipFilePath,)
os.remove(zipFilePath)
print "Copying %s repository to %r" % (PkgName, exportPath)
# to write directly to a .zip file (but it won't include the RO package):
# git archive --prefix=<exportFileName>/ -o <zipFilePath> HEAD
#status = subprocess.call(["git", "archive", "--prefix=%s/" % (exportFileName,), "-o", zipFilePath, "HEAD"])
# to write to a directory, tar and untar in one command:
# git archive --format=tar --prefix=<exportFileName>/ HEAD | (cd <exportRoot> && tar xf -)
cmdStr = "git archive --format=tar --prefix=%s/ HEAD | (cd %s && tar xf -)" % \
(exportFileName, exportRoot)
status = subprocess.call(cmdStr, shell=True)
if status != 0:
print "git archive failed!"
sys.exit(1)
print "Copying RO repository"
roTempName = "ROTemp"
roTempDir = os.path.join(exportRoot, roTempName)
cmdStr = "git archive --format=tar --prefix=%s/ HEAD python/RO | (cd %s && tar xf -)" % \
(roTempName, exportRoot,)
status = subprocess.call(cmdStr, shell=True, cwd=roPath)
if status != 0:
print "git archive failed!"
sys.exit(1)
# copy RO source into the output repo and delete the empty extra crap
shutil.move(os.path.join(roTempDir, "python", "RO"), exportPath)
shutil.rmtree(os.path.join(roTempDir))
print "Zipping %r" % (exportPath,)
status = subprocess.call(["zip", "-r", "-q", zipFileName, exportFileName], cwd=exportRoot)
if status != 0:
print "Zip failed!"
else:
print "Source zipped"
if sys.platform == "darwin":
# open the directory in Finder, as a convenience for the user
status = subprocess.call(["open", exportRoot])
print "Building Mac version"
macBuildDir = os.path.join(exportPath, "BuildForMac")
status = subprocess.call(["python", "setup.py", "-q", "py2app"], cwd=macBuildDir)
if status != 0:
print "Mac build failed!"
else:
print "Mac build finished!"
status = subprocess.call(["open", os.path.join(macBuildDir, "dist")])
print "TUI releases: <http://www.apo.nmsu.edu/35m_operations/TUI-images/>"
print "TUI betas: <http://www.apo.nmsu.edu/35m_operations/TUI-images/files/>"
| 35.870968 | 108 | 0.665243 |
"""A script to release a new version of TUI from a git working copy
If run on a unix box it exports the current version of TUI and then zips that.
If run on MacOS X it also tries to build the Mac binary.
Not intended for use on Windows.
To use:
./releaseNewVersion.py
"""
from __future__ import with_statement
import os
import re
import shutil
import sys
import subprocess
roPath = os.environ.get("RO_DIR")
if not roPath:
print "RO not setup"
sys.exit(1)
else:
print "RO found at", roPath
PkgName = "TUI"
import TUI.Version
versionName = TUI.Version.VersionName
fullVersStr = "%s %s" % (versionName, TUI.Version.VersionDate)
queryStr = "Version in Version.py = %s; is this OK? (y/[n]) " % (fullVersStr,)
getOK = raw_input(queryStr)
if not getOK.lower().startswith("y"):
sys.exit(0)
versRegEx = re.compile(r"<h3>(\d.*\s\d\d\d\d-\d\d-\d\d)</h3>")
with file(os.path.join("TUI", "Help", "VersionHistory.html")) as vhist:
for line in vhist:
versMatch = versRegEx.match(line)
if versMatch:
histVersStr = versMatch.groups()[0]
if histVersStr == fullVersStr:
print "Version in VersionHistory.html matches"
break
else:
print "Error: version in VersionHistory.html = %s != %s" % (histVersStr, fullVersStr)
sys.exit(0)
print "Status of git repository:"
subprocess.call(["git", "status"])
getOK = raw_input("Is the git repository up to date? (y/[n]) ")
if not getOK.lower().startswith("y"):
sys.exit(0)
exportRoot = os.environ["HOME"]
exportFileName = "%s_%s_Source" % (PkgName, versionName)
exportPath = os.path.abspath(os.path.join(exportRoot, exportFileName))
zipFileName = "%s.zip" % (exportFileName,)
zipFilePath = os.path.abspath(os.path.join(exportRoot, zipFileName))
if os.path.exists(exportPath):
print "Export directory %r already exists" % (exportPath,)
getOK = raw_input("Should I delete the old %r? (yes/[n]) " % (exportPath,))
if not getOK.lower() == "yes":
sys.exit(0)
print "Deleting %r" % (exportPath,)
shutil.rmtree(exportPath)
if os.path.exists(zipFilePath):
getOK = raw_input("File %r already exists! Should I delete it? (yes/[n]) " % (zipFilePath,))
if not getOK.lower() == "yes":
sys.exit(0)
print "Deleting %r" % (zipFilePath,)
os.remove(zipFilePath)
print "Copying %s repository to %r" % (PkgName, exportPath)
# git archive --prefix=<exportFileName>/ -o <zipFilePath> HEAD
#status = subprocess.call(["git", "archive", "--prefix=%s/" % (exportFileName,), "-o", zipFilePath, "HEAD"])
# to write to a directory, tar and untar in one command:
# git archive --format=tar --prefix=<exportFileName>/ HEAD | (cd <exportRoot> && tar xf -)
cmdStr = "git archive --format=tar --prefix=%s/ HEAD | (cd %s && tar xf -)" % \
(exportFileName, exportRoot)
status = subprocess.call(cmdStr, shell=True)
if status != 0:
print "git archive failed!"
sys.exit(1)
print "Copying RO repository"
roTempName = "ROTemp"
roTempDir = os.path.join(exportRoot, roTempName)
cmdStr = "git archive --format=tar --prefix=%s/ HEAD python/RO | (cd %s && tar xf -)" % \
(roTempName, exportRoot,)
status = subprocess.call(cmdStr, shell=True, cwd=roPath)
if status != 0:
print "git archive failed!"
sys.exit(1)
# copy RO source into the output repo and delete the empty extra crap
shutil.move(os.path.join(roTempDir, "python", "RO"), exportPath)
shutil.rmtree(os.path.join(roTempDir))
print "Zipping %r" % (exportPath,)
status = subprocess.call(["zip", "-r", "-q", zipFileName, exportFileName], cwd=exportRoot)
if status != 0:
print "Zip failed!"
else:
print "Source zipped"
if sys.platform == "darwin":
# open the directory in Finder, as a convenience for the user
status = subprocess.call(["open", exportRoot])
print "Building Mac version"
macBuildDir = os.path.join(exportPath, "BuildForMac")
status = subprocess.call(["python", "setup.py", "-q", "py2app"], cwd=macBuildDir)
if status != 0:
print "Mac build failed!"
else:
print "Mac build finished!"
status = subprocess.call(["open", os.path.join(macBuildDir, "dist")])
print "TUI releases: <http://www.apo.nmsu.edu/35m_operations/TUI-images/>"
print "TUI betas: <http://www.apo.nmsu.edu/35m_operations/TUI-images/files/>"
| false | true |
f71b6fe68084b084c7f741b11c1012ffaf12dd0a | 3,230 | py | Python | srv/fluffi/data/fluffiweb/app/utils/ftp.py | sears-s/fluffi | 5f2f6d019041a6268199b69bf2f34487b18b84fe | [
"MIT"
] | 96 | 2019-09-19T10:28:05.000Z | 2022-02-28T11:53:06.000Z | srv/fluffi/data/fluffiweb/app/utils/ftp.py | sears-s/fluffi | 5f2f6d019041a6268199b69bf2f34487b18b84fe | [
"MIT"
] | 123 | 2019-11-19T09:47:14.000Z | 2021-10-19T03:10:51.000Z | srv/fluffi/data/fluffiweb/app/utils/ftp.py | sears-s/fluffi | 5f2f6d019041a6268199b69bf2f34487b18b84fe | [
"MIT"
] | 23 | 2019-11-11T06:04:56.000Z | 2022-02-11T15:35:26.000Z | # Copyright 2017-2020 Siemens AG
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Author(s): Michael Kraus, Junes Najah, Fabian Russwurm, Thomas Riedmaier
from ftplib import FTP
class FTPConnector:
def __init__(self, ftpURL):
self.ftpURL = ftpURL
self.ftpClient = FTP()
def getListOfFilesOnFTPServer(self, path):
self.ftpClient.connect(self.ftpURL)
self.ftpClient.login()
self.ftpClient.cwd(path)
ls = []
ls = self.ftpClient.nlst()
tupelsOfLS = zip(ls, ls)
self.ftpClient.quit()
return tupelsOfLS
def getListOfArchitecturesOnFTPServer(self, path, group):
self.ftpClient.connect(self.ftpURL)
self.ftpClient.login()
self.ftpClient.cwd(path)
ls = []
ls = self.ftpClient.nlst()
for i, w in enumerate(ls):
ls[i] = group + "-" + w
tupelsOfLS = zip(ls, ls)
self.ftpClient.quit()
return tupelsOfLS
def saveTargetFileOnFTPServer(self, targetFileData, name):
# ftplib storbinary is programmed to read file from disk before sending to ftp server
# solution is to extend the lib and rewrite storbinary...
# https://stackoverflow.com/questions/2671118/can-i-upload-an-object-in-memory-to-ftp-using-python
# workaround: write file to disk
# .....
path = 'tmp.zip'
target = open(path, 'wb')
target.write(targetFileData)
target.close()
self.ftpClient.connect(self.ftpURL)
self.ftpClient.login()
f = open('tmp.zip', 'rb')
self.ftpClient.storbinary("STOR /SUT/" + name.split('.', 1)[0] + ".zip", f)
self.ftpClient.quit()
def saveArchivedProjectOnFTPServer(self, fileName):
self.ftpClient.connect(self.ftpURL)
self.ftpClient.login()
myFile = open(fileName, 'rb')
if myFile:
self.ftpClient.storbinary("STOR /archive/" + fileName, myFile)
self.ftpClient.quit()
myFile.close()
return True
print("Error: File not found")
self.ftpClient.quit()
myFile.close()
return False
| 35.108696 | 107 | 0.653251 |
from ftplib import FTP
class FTPConnector:
def __init__(self, ftpURL):
self.ftpURL = ftpURL
self.ftpClient = FTP()
def getListOfFilesOnFTPServer(self, path):
self.ftpClient.connect(self.ftpURL)
self.ftpClient.login()
self.ftpClient.cwd(path)
ls = []
ls = self.ftpClient.nlst()
tupelsOfLS = zip(ls, ls)
self.ftpClient.quit()
return tupelsOfLS
def getListOfArchitecturesOnFTPServer(self, path, group):
self.ftpClient.connect(self.ftpURL)
self.ftpClient.login()
self.ftpClient.cwd(path)
ls = []
ls = self.ftpClient.nlst()
for i, w in enumerate(ls):
ls[i] = group + "-" + w
tupelsOfLS = zip(ls, ls)
self.ftpClient.quit()
return tupelsOfLS
def saveTargetFileOnFTPServer(self, targetFileData, name):
path = 'tmp.zip'
target = open(path, 'wb')
target.write(targetFileData)
target.close()
self.ftpClient.connect(self.ftpURL)
self.ftpClient.login()
f = open('tmp.zip', 'rb')
self.ftpClient.storbinary("STOR /SUT/" + name.split('.', 1)[0] + ".zip", f)
self.ftpClient.quit()
def saveArchivedProjectOnFTPServer(self, fileName):
self.ftpClient.connect(self.ftpURL)
self.ftpClient.login()
myFile = open(fileName, 'rb')
if myFile:
self.ftpClient.storbinary("STOR /archive/" + fileName, myFile)
self.ftpClient.quit()
myFile.close()
return True
print("Error: File not found")
self.ftpClient.quit()
myFile.close()
return False
| true | true |
f71b701cb0a9f7edf9be18a1b9115d0dbedac0c4 | 17,383 | py | Python | examples/language_model/bert/run_glue.py | weiwei1115/PaddleNLP | dd98f7f8b25b41d39228ba8a958b11a6212709a3 | [
"Apache-2.0"
] | 1 | 2021-02-24T14:03:55.000Z | 2021-02-24T14:03:55.000Z | examples/language_model/bert/run_glue.py | weiwei1115/PaddleNLP | dd98f7f8b25b41d39228ba8a958b11a6212709a3 | [
"Apache-2.0"
] | null | null | null | examples/language_model/bert/run_glue.py | weiwei1115/PaddleNLP | dd98f7f8b25b41d39228ba8a958b11a6212709a3 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import sys
import random
import time
import math
import distutils.util
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
from paddle.metric import Metric, Accuracy, Precision, Recall
from paddlenlp.datasets import GlueCoLA, GlueSST2, GlueMRPC, GlueSTSB, GlueQQP, GlueMNLI, GlueQNLI, GlueRTE
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.transformers import BertForSequenceClassification, BertTokenizer
from paddlenlp.transformers import ElectraForSequenceClassification, ElectraTokenizer
from paddlenlp.transformers import ErnieForSequenceClassification, ErnieTokenizer
from paddlenlp.transformers import LinearDecayWithWarmup
from paddlenlp.metrics import AccuracyAndF1, Mcc, PearsonAndSpearman
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
TASK_CLASSES = {
"cola": (GlueCoLA, Mcc),
"sst-2": (GlueSST2, Accuracy),
"mrpc": (GlueMRPC, AccuracyAndF1),
"sts-b": (GlueSTSB, PearsonAndSpearman),
"qqp": (GlueQQP, AccuracyAndF1),
"mnli": (GlueMNLI, Accuracy),
"qnli": (GlueQNLI, Accuracy),
"rte": (GlueRTE, Accuracy),
}
MODEL_CLASSES = {
"bert": (BertForSequenceClassification, BertTokenizer),
"ernie": (ErnieForSequenceClassification, ErnieTokenizer)
}
def parse_args():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " +
", ".join(TASK_CLASSES.keys()), )
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(MODEL_CLASSES.keys()), )
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(
sum([
list(classes[-1].pretrained_init_configuration.keys())
for classes in MODEL_CLASSES.values()
], [])), )
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.", )
parser.add_argument(
"--learning_rate",
default=1e-4,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument(
"--num_train_epochs",
default=3,
type=int,
help="Total number of training epochs to perform.", )
parser.add_argument(
"--logging_steps",
type=int,
default=100,
help="Log every X updates steps.")
parser.add_argument(
"--save_steps",
type=int,
default=100,
help="Save checkpoint every X updates steps.")
parser.add_argument(
"--batch_size",
default=32,
type=int,
help="Batch size per GPU/CPU for training.", )
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.")
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps. If > 0: Override warmup_proportion"
)
parser.add_argument(
"--warmup_proportion",
default=0.,
type=float,
help="Linear warmup proportion over total steps.")
parser.add_argument(
"--adam_epsilon",
default=1e-6,
type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--seed", default=42, type=int, help="random seed for initialization")
parser.add_argument(
"--n_cards",
default=1,
type=int,
help="Number cards for the training, only support multi cards in the gpu."
)
parser.add_argument(
"--select_device",
type=str,
default="gpu",
help="Device for selecting for the training.")
parser.add_argument(
"--use_amp",
type=distutils.util.strtobool,
default=False,
help="Enable mixed precision training.")
parser.add_argument(
"--scale_loss",
type=float,
default=2**15,
help="The value of scale_loss for fp16.")
args = parser.parse_args()
return args
def set_seed(args):
# Use the same data seed(for data shuffle) for all procs to guarantee data
# consistency after sharding.
random.seed(args.seed)
np.random.seed(args.seed)
# Maybe different op seeds(for dropout) for different procs is better. By:
# `paddle.seed(args.seed + paddle.distributed.get_rank())`
paddle.seed(args.seed)
def evaluate(model, loss_fct, metric, data_loader):
model.eval()
metric.reset()
for batch in data_loader:
input_ids, segment_ids, labels = batch
logits = model(input_ids, segment_ids)
loss = loss_fct(logits, labels)
correct = metric.compute(logits, labels)
metric.update(correct)
res = metric.accumulate()
if isinstance(metric, AccuracyAndF1):
logger.info(
"eval loss: %f, acc: %s, precision: %s, recall: %s, f1: %s, acc and f1: %s."
% (loss.numpy(), res[0], res[1], res[2], res[3], res[4]))
elif isinstance(metric, Mcc):
logger.info("eval loss: %f, mcc: %s." % (loss.numpy(), res[0]))
elif isinstance(metric, PearsonAndSpearman):
logger.info(
"eval loss: %f, pearson: %s, spearman: %s, pearson and spearman: %s."
% (loss.numpy(), res[0], res[1], res[2]))
else:
logger.info("eval loss: %f, acc: %s." % (loss.numpy(), res))
model.train()
def convert_example(example,
tokenizer,
label_list,
max_seq_length=512,
is_test=False):
"""convert a glue example into necessary features"""
def _truncate_seqs(seqs, max_seq_length):
if len(seqs) == 1: # single sentence
# Account for [CLS] and [SEP] with "- 2"
seqs[0] = seqs[0][0:(max_seq_length - 2)]
else: # Sentence pair
# Account for [CLS], [SEP], [SEP] with "- 3"
tokens_a, tokens_b = seqs
max_seq_length -= 3
while True: # Truncate with longest_first strategy
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_seq_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
return seqs
def _concat_seqs(seqs, separators, seq_mask=0, separator_mask=1):
concat = sum((seq + sep for sep, seq in zip(separators, seqs)), [])
segment_ids = sum(
([i] * (len(seq) + len(sep))
for i, (sep, seq) in enumerate(zip(separators, seqs))), [])
if isinstance(seq_mask, int):
seq_mask = [[seq_mask] * len(seq) for seq in seqs]
if isinstance(separator_mask, int):
separator_mask = [[separator_mask] * len(sep) for sep in separators]
p_mask = sum((s_mask + mask
for sep, seq, s_mask, mask in zip(
separators, seqs, seq_mask, separator_mask)), [])
return concat, segment_ids, p_mask
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
label = example[-1]
example = example[:-1]
# Create label maps if classification task
if label_list:
label_map = {}
for (i, l) in enumerate(label_list):
label_map[l] = i
label = label_map[label]
label = np.array([label], dtype=label_dtype)
# Tokenize raw text
tokens_raw = [tokenizer(l) for l in example]
# Truncate to the truncate_length,
tokens_trun = _truncate_seqs(tokens_raw, max_seq_length)
# Concate the sequences with special tokens
tokens_trun[0] = [tokenizer.cls_token] + tokens_trun[0]
tokens, segment_ids, _ = _concat_seqs(tokens_trun, [[tokenizer.sep_token]] *
len(tokens_trun))
# Convert the token to ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
valid_length = len(input_ids)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
# input_mask = [1] * len(input_ids)
if not is_test:
return input_ids, segment_ids, valid_length, label
else:
return input_ids, segment_ids, valid_length
def do_train(args):
paddle.set_device(args.select_device)
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
set_seed(args)
args.task_name = args.task_name.lower()
dataset_class, metric_class = TASK_CLASSES[args.task_name]
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
train_dataset = dataset_class.get_datasets(["train"])
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=train_dataset.get_labels(),
max_seq_length=args.max_seq_length)
train_dataset = train_dataset.apply(trans_func, lazy=True)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_dataset, batch_size=args.batch_size, shuffle=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_id), # segment
Stack(), # length
Stack(dtype="int64" if train_dataset.get_labels() else "float32") # label
): [data for i, data in enumerate(fn(samples)) if i != 2]
train_data_loader = DataLoader(
dataset=train_dataset,
batch_sampler=train_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
if args.task_name == "mnli":
dev_dataset_matched, dev_dataset_mismatched = dataset_class.get_datasets(
["dev_matched", "dev_mismatched"])
dev_dataset_matched = dev_dataset_matched.apply(trans_func, lazy=True)
dev_dataset_mismatched = dev_dataset_mismatched.apply(
trans_func, lazy=True)
dev_batch_sampler_matched = paddle.io.BatchSampler(
dev_dataset_matched, batch_size=args.batch_size, shuffle=False)
dev_data_loader_matched = DataLoader(
dataset=dev_dataset_matched,
batch_sampler=dev_batch_sampler_matched,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
dev_batch_sampler_mismatched = paddle.io.BatchSampler(
dev_dataset_mismatched, batch_size=args.batch_size, shuffle=False)
dev_data_loader_mismatched = DataLoader(
dataset=dev_dataset_mismatched,
batch_sampler=dev_batch_sampler_mismatched,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
else:
dev_dataset = dataset_class.get_datasets(["dev"])
dev_dataset = dev_dataset.apply(trans_func, lazy=True)
dev_batch_sampler = paddle.io.BatchSampler(
dev_dataset, batch_size=args.batch_size, shuffle=False)
dev_data_loader = DataLoader(
dataset=dev_dataset,
batch_sampler=dev_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
num_classes = 1 if train_dataset.get_labels() == None else len(
train_dataset.get_labels())
model = model_class.from_pretrained(
args.model_name_or_path, num_classes=num_classes)
if paddle.distributed.get_world_size() > 1:
model = paddle.DataParallel(model)
num_training_steps = args.max_steps if args.max_steps > 0 else (
len(train_data_loader) * args.num_train_epochs)
warmup = args.warmup_steps if args.warmup_steps > 0 else args.warmup_proportion
lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,
warmup)
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
beta1=0.9,
beta2=0.999,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
])
loss_fct = paddle.nn.loss.CrossEntropyLoss() if train_dataset.get_labels(
) else paddle.nn.loss.MSELoss()
metric = metric_class()
if args.use_amp:
scaler = paddle.amp.GradScaler(init_loss_scaling=args.scale_loss)
global_step = 0
tic_train = time.time()
for epoch in range(args.num_train_epochs):
for step, batch in enumerate(train_data_loader):
global_step += 1
input_ids, segment_ids, labels = batch
with paddle.amp.auto_cast(
args.use_amp,
custom_white_list=["layer_norm", "softmax", "gelu"]):
logits = model(input_ids, segment_ids)
loss = loss_fct(logits, labels)
if args.use_amp:
scaler.scale(loss).backward()
scaler.minimize(optimizer, loss)
else:
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_gradients()
if global_step % args.logging_steps == 0:
logger.info(
"global step %d/%d, epoch: %d, batch: %d, rank_id: %s, loss: %f, lr: %.10f, speed: %.4f step/s"
% (global_step, num_training_steps, epoch, step,
paddle.distributed.get_rank(), loss, optimizer.get_lr(),
args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
if global_step % args.save_steps == 0:
tic_eval = time.time()
if args.task_name == "mnli":
evaluate(model, loss_fct, metric, dev_data_loader_matched)
evaluate(model, loss_fct, metric,
dev_data_loader_mismatched)
logger.info("eval done total : %s s" %
(time.time() - tic_eval))
else:
evaluate(model, loss_fct, metric, dev_data_loader)
logger.info("eval done total : %s s" %
(time.time() - tic_eval))
if (not args.n_cards > 1) or paddle.distributed.get_rank() == 0:
output_dir = os.path.join(args.output_dir,
"%s_ft_model_%d.pdparams" %
(args.task_name, global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Need better way to get inner model of DataParallel
model_to_save = model._layers if isinstance(
model, paddle.DataParallel) else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
def print_arguments(args):
"""print arguments"""
print('----------- Configuration Arguments -----------')
for arg, value in sorted(vars(args).items()):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
if __name__ == "__main__":
args = parse_args()
print_arguments(args)
if args.n_cards > 1 and args.select_device == "gpu":
paddle.distributed.spawn(do_train, args=(args, ), nprocs=args.n_cards)
else:
do_train(args)
| 38.037199 | 115 | 0.611805 |
import argparse
import logging
import os
import sys
import random
import time
import math
import distutils.util
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
from paddle.metric import Metric, Accuracy, Precision, Recall
from paddlenlp.datasets import GlueCoLA, GlueSST2, GlueMRPC, GlueSTSB, GlueQQP, GlueMNLI, GlueQNLI, GlueRTE
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.transformers import BertForSequenceClassification, BertTokenizer
from paddlenlp.transformers import ElectraForSequenceClassification, ElectraTokenizer
from paddlenlp.transformers import ErnieForSequenceClassification, ErnieTokenizer
from paddlenlp.transformers import LinearDecayWithWarmup
from paddlenlp.metrics import AccuracyAndF1, Mcc, PearsonAndSpearman
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
TASK_CLASSES = {
"cola": (GlueCoLA, Mcc),
"sst-2": (GlueSST2, Accuracy),
"mrpc": (GlueMRPC, AccuracyAndF1),
"sts-b": (GlueSTSB, PearsonAndSpearman),
"qqp": (GlueQQP, AccuracyAndF1),
"mnli": (GlueMNLI, Accuracy),
"qnli": (GlueQNLI, Accuracy),
"rte": (GlueRTE, Accuracy),
}
MODEL_CLASSES = {
"bert": (BertForSequenceClassification, BertTokenizer),
"ernie": (ErnieForSequenceClassification, ErnieTokenizer)
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " +
", ".join(TASK_CLASSES.keys()), )
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(MODEL_CLASSES.keys()), )
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(
sum([
list(classes[-1].pretrained_init_configuration.keys())
for classes in MODEL_CLASSES.values()
], [])), )
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.", )
parser.add_argument(
"--learning_rate",
default=1e-4,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument(
"--num_train_epochs",
default=3,
type=int,
help="Total number of training epochs to perform.", )
parser.add_argument(
"--logging_steps",
type=int,
default=100,
help="Log every X updates steps.")
parser.add_argument(
"--save_steps",
type=int,
default=100,
help="Save checkpoint every X updates steps.")
parser.add_argument(
"--batch_size",
default=32,
type=int,
help="Batch size per GPU/CPU for training.", )
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.")
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps. If > 0: Override warmup_proportion"
)
parser.add_argument(
"--warmup_proportion",
default=0.,
type=float,
help="Linear warmup proportion over total steps.")
parser.add_argument(
"--adam_epsilon",
default=1e-6,
type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--seed", default=42, type=int, help="random seed for initialization")
parser.add_argument(
"--n_cards",
default=1,
type=int,
help="Number cards for the training, only support multi cards in the gpu."
)
parser.add_argument(
"--select_device",
type=str,
default="gpu",
help="Device for selecting for the training.")
parser.add_argument(
"--use_amp",
type=distutils.util.strtobool,
default=False,
help="Enable mixed precision training.")
parser.add_argument(
"--scale_loss",
type=float,
default=2**15,
help="The value of scale_loss for fp16.")
args = parser.parse_args()
return args
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
paddle.seed(args.seed)
def evaluate(model, loss_fct, metric, data_loader):
model.eval()
metric.reset()
for batch in data_loader:
input_ids, segment_ids, labels = batch
logits = model(input_ids, segment_ids)
loss = loss_fct(logits, labels)
correct = metric.compute(logits, labels)
metric.update(correct)
res = metric.accumulate()
if isinstance(metric, AccuracyAndF1):
logger.info(
"eval loss: %f, acc: %s, precision: %s, recall: %s, f1: %s, acc and f1: %s."
% (loss.numpy(), res[0], res[1], res[2], res[3], res[4]))
elif isinstance(metric, Mcc):
logger.info("eval loss: %f, mcc: %s." % (loss.numpy(), res[0]))
elif isinstance(metric, PearsonAndSpearman):
logger.info(
"eval loss: %f, pearson: %s, spearman: %s, pearson and spearman: %s."
% (loss.numpy(), res[0], res[1], res[2]))
else:
logger.info("eval loss: %f, acc: %s." % (loss.numpy(), res))
model.train()
def convert_example(example,
tokenizer,
label_list,
max_seq_length=512,
is_test=False):
def _truncate_seqs(seqs, max_seq_length):
if len(seqs) == 1:
seqs[0] = seqs[0][0:(max_seq_length - 2)]
else:
tokens_a, tokens_b = seqs
max_seq_length -= 3
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_seq_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
return seqs
def _concat_seqs(seqs, separators, seq_mask=0, separator_mask=1):
concat = sum((seq + sep for sep, seq in zip(separators, seqs)), [])
segment_ids = sum(
([i] * (len(seq) + len(sep))
for i, (sep, seq) in enumerate(zip(separators, seqs))), [])
if isinstance(seq_mask, int):
seq_mask = [[seq_mask] * len(seq) for seq in seqs]
if isinstance(separator_mask, int):
separator_mask = [[separator_mask] * len(sep) for sep in separators]
p_mask = sum((s_mask + mask
for sep, seq, s_mask, mask in zip(
separators, seqs, seq_mask, separator_mask)), [])
return concat, segment_ids, p_mask
if not is_test:
label_dtype = "int64" if label_list else "float32"
label = example[-1]
example = example[:-1]
if label_list:
label_map = {}
for (i, l) in enumerate(label_list):
label_map[l] = i
label = label_map[label]
label = np.array([label], dtype=label_dtype)
tokens_raw = [tokenizer(l) for l in example]
tokens_trun = _truncate_seqs(tokens_raw, max_seq_length)
tokens_trun[0] = [tokenizer.cls_token] + tokens_trun[0]
tokens, segment_ids, _ = _concat_seqs(tokens_trun, [[tokenizer.sep_token]] *
len(tokens_trun))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
valid_length = len(input_ids)
if not is_test:
return input_ids, segment_ids, valid_length, label
else:
return input_ids, segment_ids, valid_length
def do_train(args):
paddle.set_device(args.select_device)
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
set_seed(args)
args.task_name = args.task_name.lower()
dataset_class, metric_class = TASK_CLASSES[args.task_name]
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
train_dataset = dataset_class.get_datasets(["train"])
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=train_dataset.get_labels(),
max_seq_length=args.max_seq_length)
train_dataset = train_dataset.apply(trans_func, lazy=True)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_dataset, batch_size=args.batch_size, shuffle=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id),
Pad(axis=0, pad_val=tokenizer.pad_token_id),
Stack(),
Stack(dtype="int64" if train_dataset.get_labels() else "float32")
): [data for i, data in enumerate(fn(samples)) if i != 2]
train_data_loader = DataLoader(
dataset=train_dataset,
batch_sampler=train_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
if args.task_name == "mnli":
dev_dataset_matched, dev_dataset_mismatched = dataset_class.get_datasets(
["dev_matched", "dev_mismatched"])
dev_dataset_matched = dev_dataset_matched.apply(trans_func, lazy=True)
dev_dataset_mismatched = dev_dataset_mismatched.apply(
trans_func, lazy=True)
dev_batch_sampler_matched = paddle.io.BatchSampler(
dev_dataset_matched, batch_size=args.batch_size, shuffle=False)
dev_data_loader_matched = DataLoader(
dataset=dev_dataset_matched,
batch_sampler=dev_batch_sampler_matched,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
dev_batch_sampler_mismatched = paddle.io.BatchSampler(
dev_dataset_mismatched, batch_size=args.batch_size, shuffle=False)
dev_data_loader_mismatched = DataLoader(
dataset=dev_dataset_mismatched,
batch_sampler=dev_batch_sampler_mismatched,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
else:
dev_dataset = dataset_class.get_datasets(["dev"])
dev_dataset = dev_dataset.apply(trans_func, lazy=True)
dev_batch_sampler = paddle.io.BatchSampler(
dev_dataset, batch_size=args.batch_size, shuffle=False)
dev_data_loader = DataLoader(
dataset=dev_dataset,
batch_sampler=dev_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
num_classes = 1 if train_dataset.get_labels() == None else len(
train_dataset.get_labels())
model = model_class.from_pretrained(
args.model_name_or_path, num_classes=num_classes)
if paddle.distributed.get_world_size() > 1:
model = paddle.DataParallel(model)
num_training_steps = args.max_steps if args.max_steps > 0 else (
len(train_data_loader) * args.num_train_epochs)
warmup = args.warmup_steps if args.warmup_steps > 0 else args.warmup_proportion
lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,
warmup)
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
beta1=0.9,
beta2=0.999,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
])
loss_fct = paddle.nn.loss.CrossEntropyLoss() if train_dataset.get_labels(
) else paddle.nn.loss.MSELoss()
metric = metric_class()
if args.use_amp:
scaler = paddle.amp.GradScaler(init_loss_scaling=args.scale_loss)
global_step = 0
tic_train = time.time()
for epoch in range(args.num_train_epochs):
for step, batch in enumerate(train_data_loader):
global_step += 1
input_ids, segment_ids, labels = batch
with paddle.amp.auto_cast(
args.use_amp,
custom_white_list=["layer_norm", "softmax", "gelu"]):
logits = model(input_ids, segment_ids)
loss = loss_fct(logits, labels)
if args.use_amp:
scaler.scale(loss).backward()
scaler.minimize(optimizer, loss)
else:
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_gradients()
if global_step % args.logging_steps == 0:
logger.info(
"global step %d/%d, epoch: %d, batch: %d, rank_id: %s, loss: %f, lr: %.10f, speed: %.4f step/s"
% (global_step, num_training_steps, epoch, step,
paddle.distributed.get_rank(), loss, optimizer.get_lr(),
args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
if global_step % args.save_steps == 0:
tic_eval = time.time()
if args.task_name == "mnli":
evaluate(model, loss_fct, metric, dev_data_loader_matched)
evaluate(model, loss_fct, metric,
dev_data_loader_mismatched)
logger.info("eval done total : %s s" %
(time.time() - tic_eval))
else:
evaluate(model, loss_fct, metric, dev_data_loader)
logger.info("eval done total : %s s" %
(time.time() - tic_eval))
if (not args.n_cards > 1) or paddle.distributed.get_rank() == 0:
output_dir = os.path.join(args.output_dir,
"%s_ft_model_%d.pdparams" %
(args.task_name, global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model._layers if isinstance(
model, paddle.DataParallel) else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
def print_arguments(args):
print('----------- Configuration Arguments -----------')
for arg, value in sorted(vars(args).items()):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
if __name__ == "__main__":
args = parse_args()
print_arguments(args)
if args.n_cards > 1 and args.select_device == "gpu":
paddle.distributed.spawn(do_train, args=(args, ), nprocs=args.n_cards)
else:
do_train(args)
| true | true |
f71b70742d77f2a612297f4412d6829e00b6cebd | 21,406 | py | Python | pypureclient/flasharray/FA_2_13/api/file_systems_api.py | ashahid-ps/py-pure-client | 2e3565d37b2a41db69308769f6f485d08a7c46c3 | [
"BSD-2-Clause"
] | null | null | null | pypureclient/flasharray/FA_2_13/api/file_systems_api.py | ashahid-ps/py-pure-client | 2e3565d37b2a41db69308769f6f485d08a7c46c3 | [
"BSD-2-Clause"
] | null | null | null | pypureclient/flasharray/FA_2_13/api/file_systems_api.py | ashahid-ps/py-pure-client | 2e3565d37b2a41db69308769f6f485d08a7c46c3 | [
"BSD-2-Clause"
] | null | null | null | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.13
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class FileSystemsApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api213_file_systems_delete_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
ids=None, # type: List[str]
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""Delete file system
Deletes a file system that has been destroyed and is pending eradication. Eradicated file systems cannot be recovered. File systems are destroyed using the PATCH method.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api213_file_systems_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.13/file-systems', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api213_file_systems_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
continuation_token=None, # type: str
destroyed=None, # type: bool
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.FileSystemGetResponse
"""List file systems
Displays a list of file systems, including those pending eradication.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api213_file_systems_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.
:param bool destroyed: If set to `true`, lists only destroyed objects that are in the eradication pending state. If set to `false`, lists only objects that are not destroyed. For destroyed objects, the time remaining is displayed in milliseconds.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: FileSystemGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api213_file_systems_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api213_file_systems_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'destroyed' in params:
query_params.append(('destroyed', params['destroyed']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.13/file-systems', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api213_file_systems_patch_with_http_info(
self,
file_system=None, # type: models.FileSystemPatch
authorization=None, # type: str
x_request_id=None, # type: str
ids=None, # type: List[str]
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.FileSystemResponse
"""Modify a file system
Modifies a file system. You can rename, destroy, move, or recover a file system. To rename a file system, set `name` to the new name. To destroy a file system, set `destroyed=true`. To move a file system, set 'pod' to the destination pod reference. To recover a file system that has been destroyed and is pending eradication, set `destroyed=false`.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api213_file_systems_patch_with_http_info(file_system, async_req=True)
>>> result = thread.get()
:param FileSystemPatch file_system: (required)
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'file_system' is set
if file_system is None:
raise TypeError("Missing the required parameter `file_system` when calling `api213_file_systems_patch`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'file_system' in params:
body_params = params['file_system']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.13/file-systems', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api213_file_systems_post_with_http_info(
self,
names=None, # type: List[str]
authorization=None, # type: str
x_request_id=None, # type: str
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.FileSystemResponse
"""Create file system
Creates one or more file systems.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api213_file_systems_post_with_http_info(names, async_req=True)
>>> result = thread.get()
:param list[str] names: Performs the operation on the unique name specified. For example, `name01`. Enter multiple names in comma-separated format. (required)
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'names' is set
if names is None:
raise TypeError("Missing the required parameter `names` when calling `api213_file_systems_post`")
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.13/file-systems', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| 47.568889 | 671 | 0.640989 |
from __future__ import absolute_import
import re
import six
from typing import List, Optional
from .. import models
class FileSystemsApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api213_file_systems_delete_with_http_info(
self,
authorization=None,
x_request_id=None,
ids=None,
names=None,
async_req=False,
_return_http_data_only=False,
_preload_content=True,
_request_timeout=None,
):
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/2.13/file-systems', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api213_file_systems_get_with_http_info(
self,
authorization=None,
x_request_id=None,
continuation_token=None,
destroyed=None,
filter=None,
ids=None,
limit=None,
names=None,
offset=None,
sort=None,
total_item_count=None,
async_req=False,
_return_http_data_only=False,
_preload_content=True,
_request_timeout=None,
):
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api213_file_systems_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api213_file_systems_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'destroyed' in params:
query_params.append(('destroyed', params['destroyed']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/2.13/file-systems', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api213_file_systems_patch_with_http_info(
self,
file_system=None,
authorization=None,
x_request_id=None,
ids=None,
names=None,
async_req=False,
_return_http_data_only=False,
_preload_content=True,
_request_timeout=None,
):
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if file_system is None:
raise TypeError("Missing the required parameter `file_system` when calling `api213_file_systems_patch`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'file_system' in params:
body_params = params['file_system']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/2.13/file-systems', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api213_file_systems_post_with_http_info(
self,
names=None,
authorization=None,
x_request_id=None,
async_req=False,
_return_http_data_only=False,
_preload_content=True,
_request_timeout=None,
):
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if names is None:
raise TypeError("Missing the required parameter `names` when calling `api213_file_systems_post`")
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/2.13/file-systems', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| true | true |
f71b714d70924c77c72ecc7b8ec3e29d445e7a15 | 1,829 | py | Python | tests/test_servicer.py | yangtt0509/sea | f62bcdff00ef71e8c5b92bd5fc5f63d41b753ce2 | [
"MIT"
] | null | null | null | tests/test_servicer.py | yangtt0509/sea | f62bcdff00ef71e8c5b92bd5fc5f63d41b753ce2 | [
"MIT"
] | null | null | null | tests/test_servicer.py | yangtt0509/sea | f62bcdff00ef71e8c5b92bd5fc5f63d41b753ce2 | [
"MIT"
] | null | null | null | import grpc
from sea.servicer import ServicerMeta, msg2dict, stream2dict
from sea import exceptions
from sea.pb2 import default_pb2
from tests.wd.protos import helloworld_pb2
def test_meta_servicer(app, logstream):
class HelloContext():
def __init__(self):
self.code = None
self.details = None
def set_code(self, code):
self.code = code
def set_details(self, details):
self.details = details
class HelloServicer(metaclass=ServicerMeta):
def return_error(self, request, context):
raise exceptions.BadRequestException('error')
def return_normal(self, request, context):
return 'Got it!'
logstream.truncate(0)
logstream.seek(0)
servicer = HelloServicer()
context = HelloContext()
ret = servicer.return_error(None, context)
assert isinstance(ret, default_pb2.Empty)
assert context.code is grpc.StatusCode.INVALID_ARGUMENT
assert context.details == 'error'
p = logstream.tell()
assert p > 0
content = logstream.getvalue()
assert 'HelloServicer.return_error' in content
ret = servicer.return_normal(None, context)
assert ret == 'Got it!'
assert logstream.tell() > p
def test_msg2dict(app):
app.name = 'v-name'
app.msg = 'v-msg'
ret = msg2dict(app, ['name', 'msg', 'tz'])
assert ret == {'name': 'v-name', 'msg': 'v-msg', 'tz': 'Asia/Shanghai'}
request = helloworld_pb2.HelloRequest(name="value")
ret = msg2dict(request)
assert ret == {"name": "value"}
def test_stream2dict():
def stream_generator():
for i in range(5):
yield helloworld_pb2.HelloRequest(name=str(i))
ret = stream2dict(stream_generator())
for i, part in enumerate(ret):
assert part == {"name": str(i)}
| 25.760563 | 75 | 0.645708 | import grpc
from sea.servicer import ServicerMeta, msg2dict, stream2dict
from sea import exceptions
from sea.pb2 import default_pb2
from tests.wd.protos import helloworld_pb2
def test_meta_servicer(app, logstream):
class HelloContext():
def __init__(self):
self.code = None
self.details = None
def set_code(self, code):
self.code = code
def set_details(self, details):
self.details = details
class HelloServicer(metaclass=ServicerMeta):
def return_error(self, request, context):
raise exceptions.BadRequestException('error')
def return_normal(self, request, context):
return 'Got it!'
logstream.truncate(0)
logstream.seek(0)
servicer = HelloServicer()
context = HelloContext()
ret = servicer.return_error(None, context)
assert isinstance(ret, default_pb2.Empty)
assert context.code is grpc.StatusCode.INVALID_ARGUMENT
assert context.details == 'error'
p = logstream.tell()
assert p > 0
content = logstream.getvalue()
assert 'HelloServicer.return_error' in content
ret = servicer.return_normal(None, context)
assert ret == 'Got it!'
assert logstream.tell() > p
def test_msg2dict(app):
app.name = 'v-name'
app.msg = 'v-msg'
ret = msg2dict(app, ['name', 'msg', 'tz'])
assert ret == {'name': 'v-name', 'msg': 'v-msg', 'tz': 'Asia/Shanghai'}
request = helloworld_pb2.HelloRequest(name="value")
ret = msg2dict(request)
assert ret == {"name": "value"}
def test_stream2dict():
def stream_generator():
for i in range(5):
yield helloworld_pb2.HelloRequest(name=str(i))
ret = stream2dict(stream_generator())
for i, part in enumerate(ret):
assert part == {"name": str(i)}
| true | true |
f71b721716046fe128e3f99bbf0b9f20f56d1f2c | 22,880 | py | Python | venv/Lib/site-packages/sklearn/linear_model/_base.py | star10919/drf | 77c005794087484d72ffc0d76612a6ac9845821e | [
"BSD-3-Clause"
] | 7 | 2021-01-30T17:42:00.000Z | 2022-01-09T08:08:48.000Z | venv/Lib/site-packages/sklearn/linear_model/_base.py | star10919/drf | 77c005794087484d72ffc0d76612a6ac9845821e | [
"BSD-3-Clause"
] | 25 | 2020-11-16T15:36:41.000Z | 2021-06-01T05:15:31.000Z | venv/Lib/site-packages/sklearn/linear_model/_base.py | star10919/drf | 77c005794087484d72ffc0d76612a6ac9845821e | [
"BSD-3-Clause"
] | 2 | 2021-09-13T17:20:56.000Z | 2021-11-21T16:05:16.000Z | """
Generalized Linear Models.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Vincent Michel <vincent.michel@inria.fr>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Maryan Morel <maryan.morel@polytechnique.edu>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numbers
import warnings
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from scipy import optimize
from scipy import sparse
from scipy.special import expit
from joblib import Parallel
from ..base import (BaseEstimator, ClassifierMixin, RegressorMixin,
MultiOutputMixin)
from ..utils import check_array
from ..utils.validation import FLOAT_DTYPES
from ..utils.validation import _deprecate_positional_args
from ..utils import check_random_state
from ..utils.extmath import safe_sparse_dot
from ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale
from ..utils.fixes import sparse_lsqr
from ..utils._seq_dataset import ArrayDataset32, CSRDataset32
from ..utils._seq_dataset import ArrayDataset64, CSRDataset64
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.fixes import delayed
from ..preprocessing import normalize as f_normalize
# TODO: bayesian_ridge_regression and bayesian_regression_ard
# should be squashed into its respective objects.
SPARSE_INTERCEPT_DECAY = 0.01
# For sparse data intercept updates are scaled by this decay factor to avoid
# intercept oscillation.
def make_dataset(X, y, sample_weight, random_state=None):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data
y : array-like, shape (n_samples, )
Target values.
sample_weight : numpy array of shape (n_samples,)
The weight of each sample
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
dataset
The ``Dataset`` abstraction
intercept_decay
The intercept decay
"""
rng = check_random_state(random_state)
# seed should never be 0 in SequentialDataset64
seed = rng.randint(1, np.iinfo(np.int32).max)
if X.dtype == np.float32:
CSRData = CSRDataset32
ArrayData = ArrayDataset32
else:
CSRData = CSRDataset64
ArrayData = ArrayDataset64
if sp.issparse(X):
dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight,
seed=seed)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
X = np.ascontiguousarray(X)
dataset = ArrayData(X, y, sample_weight, seed=seed)
intercept_decay = 1.0
return dataset, intercept_decay
def _preprocess_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None, return_mean=False, check_input=True):
"""Center and scale data.
Centers data to have mean zero along axis 0. If fit_intercept=False or if
the X is a sparse matrix, no centering is done, but normalization can still
be applied. The function returns the statistics necessary to reconstruct
the input data, which are X_offset, y_offset, X_scale, such that the output
X = (X - X_offset) / X_scale
X_scale is the L2 norm of X - X_offset. If sample_weight is not None,
then the weighted mean of X and y is zero, and not the mean itself. If
return_mean=True, the mean, eventually weighted, is returned, independently
of whether X was centered (option used for optimization with sparse data in
coordinate_descend).
This is here because nearly all linear models will want their data to be
centered. This function also systematically makes y consistent with X.dtype
"""
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if check_input:
X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'],
dtype=FLOAT_DTYPES)
elif copy:
if sp.issparse(X):
X = X.copy()
else:
X = X.copy(order='K')
y = np.asarray(y, dtype=X.dtype)
if fit_intercept:
if sp.issparse(X):
X_offset, X_var = mean_variance_axis(X, axis=0)
if not return_mean:
X_offset[:] = X.dtype.type(0)
if normalize:
# TODO: f_normalize could be used here as well but the function
# inplace_csr_row_normalize_l2 must be changed such that it
# can return also the norms computed internally
# transform variance to norm in-place
X_var *= X.shape[0]
X_scale = np.sqrt(X_var, X_var)
del X_var
X_scale[X_scale == 0] = 1
inplace_column_scale(X, 1. / X_scale)
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X -= X_offset
if normalize:
X, X_scale = f_normalize(X, axis=0, copy=False,
return_norm=True)
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1], dtype=X.dtype)
X_scale = np.ones(X.shape[1], dtype=X.dtype)
if y.ndim == 1:
y_offset = X.dtype.type(0)
else:
y_offset = np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_scale
# TODO: _rescale_data should be factored into _preprocess_data.
# Currently, the fact that sag implements its own way to deal with
# sample_weight makes the refactoring tricky.
def _rescale_data(X, y, sample_weight):
"""Rescale data sample-wise by square root of sample_weight.
For many linear models, this enables easy support for sample_weight.
Returns
-------
X_rescaled : {array-like, sparse matrix}
y_rescaled : {array-like, sparse matrix}
"""
n_samples = X.shape[0]
sample_weight = np.asarray(sample_weight)
if sample_weight.ndim == 0:
sample_weight = np.full(n_samples, sample_weight,
dtype=sample_weight.dtype)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
class LinearModel(BaseEstimator, metaclass=ABCMeta):
"""Base class for Linear Models"""
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _decision_function(self, X):
check_is_fitted(self)
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
def predict(self, X):
"""
Predict using the linear model.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples.
Returns
-------
C : array, shape (n_samples,)
Returns predicted values.
"""
return self._decision_function(X)
_preprocess_data = staticmethod(_preprocess_data)
def _set_intercept(self, X_offset, y_offset, X_scale):
"""Set the intercept_
"""
if self.fit_intercept:
self.coef_ = self.coef_ / X_scale
self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)
else:
self.intercept_ = 0.
def _more_tags(self):
return {'requires_y': True}
# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
# Maybe the n_features checking can be moved to LinearModel.
class LinearClassifierMixin(ClassifierMixin):
"""Mixin for linear classifiers.
Handles prediction for sparse and dense X.
"""
def decision_function(self, X):
"""
Predict confidence scores for samples.
The confidence score for a sample is proportional to the signed
distance of that sample to the hyperplane.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples.
Returns
-------
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence scores per (sample, class) combination. In the binary
case, confidence score for self.classes_[1] where >0 means this
class would be predicted.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
n_features = self.coef_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d"
% (X.shape[1], n_features))
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel() if scores.shape[1] == 1 else scores
def predict(self, X):
"""
Predict class labels for samples in X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples.
Returns
-------
C : array, shape [n_samples]
Predicted class label per sample.
"""
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores > 0).astype(int)
else:
indices = scores.argmax(axis=1)
return self.classes_[indices]
def _predict_proba_lr(self, X):
"""Probability estimation for OvR logistic regression.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
prob = self.decision_function(X)
expit(prob, out=prob)
if prob.ndim == 1:
return np.vstack([1 - prob, prob]).T
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
class SparseCoefMixin:
"""Mixin for converting coef_ to and from CSR format.
L1-regularizing estimators should inherit this.
"""
def densify(self):
"""
Convert coefficient matrix to dense array format.
Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
default format of ``coef_`` and is required for fitting, so calling
this method is only required on models that have previously been
sparsified; otherwise, it is a no-op.
Returns
-------
self
Fitted estimator.
"""
msg = "Estimator, %(name)s, must be fitted before densifying."
check_is_fitted(self, msg=msg)
if sp.issparse(self.coef_):
self.coef_ = self.coef_.toarray()
return self
def sparsify(self):
"""
Convert coefficient matrix to sparse format.
Converts the ``coef_`` member to a scipy.sparse matrix, which for
L1-regularized models can be much more memory- and storage-efficient
than the usual numpy.ndarray representation.
The ``intercept_`` member is not converted.
Returns
-------
self
Fitted estimator.
Notes
-----
For non-sparse models, i.e. when there are not many zeros in ``coef_``,
this may actually *increase* memory usage, so use this method with
care. A rule of thumb is that the number of zero elements, which can
be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
to provide significant benefits.
After calling this method, further fitting with the partial_fit
method (if any) will not work until you call densify.
"""
msg = "Estimator, %(name)s, must be fitted before sparsifying."
check_is_fitted(self, msg=msg)
self.coef_ = sp.csr_matrix(self.coef_)
return self
class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):
"""
Ordinary least squares Linear Regression.
LinearRegression fits a linear model with coefficients w = (w1, ..., wp)
to minimize the residual sum of squares between the observed targets in
the dataset, and the targets predicted by the linear approximation.
Parameters
----------
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
n_jobs : int, default=None
The number of jobs to use for the computation. This will only provide
speedup for n_targets > 1 and sufficient large problems.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive. This
option is only supported for dense arrays.
.. versionadded:: 0.24
Attributes
----------
coef_ : array of shape (n_features, ) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
If multiple targets are passed during the fit (y 2D), this
is a 2D array of shape (n_targets, n_features), while if only
one target is passed, this is a 1D array of length n_features.
rank_ : int
Rank of matrix `X`. Only available when `X` is dense.
singular_ : array of shape (min(X, y),)
Singular values of `X`. Only available when `X` is dense.
intercept_ : float or array of shape (n_targets,)
Independent term in the linear model. Set to 0.0 if
`fit_intercept = False`.
See Also
--------
Ridge : Ridge regression addresses some of the
problems of Ordinary Least Squares by imposing a penalty on the
size of the coefficients with l2 regularization.
Lasso : The Lasso is a linear model that estimates
sparse coefficients with l1 regularization.
ElasticNet : Elastic-Net is a linear regression
model trained with both l1 and l2 -norm regularization of the
coefficients.
Notes
-----
From the implementation point of view, this is just plain Ordinary
Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares
(scipy.optimize.nnls) wrapped as a predictor object.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LinearRegression
>>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
>>> # y = 1 * x_0 + 2 * x_1 + 3
>>> y = np.dot(X, np.array([1, 2])) + 3
>>> reg = LinearRegression().fit(X, y)
>>> reg.score(X, y)
1.0
>>> reg.coef_
array([1., 2.])
>>> reg.intercept_
3.0...
>>> reg.predict(np.array([[3, 5]]))
array([16.])
"""
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, normalize=False, copy_X=True,
n_jobs=None, positive=False):
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.n_jobs = n_jobs
self.positive = positive
def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : returns an instance of self.
"""
n_jobs_ = self.n_jobs
accept_sparse = False if self.positive else ['csr', 'csc', 'coo']
X, y = self._validate_data(X, y, accept_sparse=accept_sparse,
y_numeric=True, multi_output=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,
copy=self.copy_X, sample_weight=sample_weight,
return_mean=True)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
if self.positive:
if y.ndim < 2:
self.coef_, self._residues = optimize.nnls(X, y)
else:
# scipy.optimize.nnls cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(optimize.nnls)(X, y[:, j])
for j in range(y.shape[1]))
self.coef_, self._residues = map(np.vstack, zip(*outs))
elif sp.issparse(X):
X_offset_scale = X_offset / X_scale
def matvec(b):
return X.dot(b) - b.dot(X_offset_scale)
def rmatvec(b):
return X.T.dot(b) - X_offset_scale * np.sum(b)
X_centered = sparse.linalg.LinearOperator(shape=X.shape,
matvec=matvec,
rmatvec=rmatvec)
if y.ndim < 2:
out = sparse_lsqr(X_centered, y)
self.coef_ = out[0]
self._residues = out[3]
else:
# sparse_lstsq cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(sparse_lsqr)(X_centered, y[:, j].ravel())
for j in range(y.shape[1]))
self.coef_ = np.vstack([out[0] for out in outs])
self._residues = np.vstack([out[3] for out in outs])
else:
self.coef_, self._residues, self.rank_, self.singular_ = \
linalg.lstsq(X, y)
self.coef_ = self.coef_.T
if y.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy,
check_input=True, sample_weight=None):
"""Aux function used at beginning of fit in linear models
Parameters
----------
order : 'F', 'C' or None, default=None
Whether X and y will be forced to be fortran or c-style. Only relevant
if sample_weight is not None.
"""
n_samples, n_features = X.shape
if sparse.isspmatrix(X):
# copy is not needed here as X is not modified inplace when X is sparse
precompute = False
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=False, return_mean=True, check_input=check_input)
else:
# copy was done in fit if necessary
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy,
check_input=check_input, sample_weight=sample_weight)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight=sample_weight)
if hasattr(precompute, '__array__') and (
fit_intercept and not np.allclose(X_offset, np.zeros(n_features)) or
normalize and not np.allclose(X_scale, np.ones(n_features))):
warnings.warn("Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
UserWarning)
# recompute Gram
precompute = 'auto'
Xy = None
# precompute if n_samples > n_features
if isinstance(precompute, str) and precompute == 'auto':
precompute = (n_samples > n_features)
if precompute is True:
# make sure that the 'precompute' array is contiguous.
precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype,
order='C')
np.dot(X.T, X, out=precompute)
if not hasattr(precompute, '__array__'):
Xy = None # cannot use Xy if precompute is not Gram
if hasattr(precompute, '__array__') and Xy is None:
common_dtype = np.find_common_type([X.dtype, y.dtype], [])
if y.ndim == 1:
# Xy is 1d, make sure it is contiguous.
Xy = np.empty(shape=n_features, dtype=common_dtype, order='C')
np.dot(X.T, y, out=Xy)
else:
# Make sure that Xy is always F contiguous even if X or y are not
# contiguous: the goal is to make it fast to extract the data for a
# specific target.
n_targets = y.shape[1]
Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype,
order='F')
np.dot(y.T, X, out=Xy.T)
return X, y, X_offset, y_offset, X_scale, precompute, Xy
| 35.583204 | 79 | 0.612981 |
from abc import ABCMeta, abstractmethod
import numbers
import warnings
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from scipy import optimize
from scipy import sparse
from scipy.special import expit
from joblib import Parallel
from ..base import (BaseEstimator, ClassifierMixin, RegressorMixin,
MultiOutputMixin)
from ..utils import check_array
from ..utils.validation import FLOAT_DTYPES
from ..utils.validation import _deprecate_positional_args
from ..utils import check_random_state
from ..utils.extmath import safe_sparse_dot
from ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale
from ..utils.fixes import sparse_lsqr
from ..utils._seq_dataset import ArrayDataset32, CSRDataset32
from ..utils._seq_dataset import ArrayDataset64, CSRDataset64
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.fixes import delayed
from ..preprocessing import normalize as f_normalize
SPARSE_INTERCEPT_DECAY = 0.01
def make_dataset(X, y, sample_weight, random_state=None):
rng = check_random_state(random_state)
seed = rng.randint(1, np.iinfo(np.int32).max)
if X.dtype == np.float32:
CSRData = CSRDataset32
ArrayData = ArrayDataset32
else:
CSRData = CSRDataset64
ArrayData = ArrayDataset64
if sp.issparse(X):
dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight,
seed=seed)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
X = np.ascontiguousarray(X)
dataset = ArrayData(X, y, sample_weight, seed=seed)
intercept_decay = 1.0
return dataset, intercept_decay
def _preprocess_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None, return_mean=False, check_input=True):
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if check_input:
X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'],
dtype=FLOAT_DTYPES)
elif copy:
if sp.issparse(X):
X = X.copy()
else:
X = X.copy(order='K')
y = np.asarray(y, dtype=X.dtype)
if fit_intercept:
if sp.issparse(X):
X_offset, X_var = mean_variance_axis(X, axis=0)
if not return_mean:
X_offset[:] = X.dtype.type(0)
if normalize:
X_var *= X.shape[0]
X_scale = np.sqrt(X_var, X_var)
del X_var
X_scale[X_scale == 0] = 1
inplace_column_scale(X, 1. / X_scale)
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X -= X_offset
if normalize:
X, X_scale = f_normalize(X, axis=0, copy=False,
return_norm=True)
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1], dtype=X.dtype)
X_scale = np.ones(X.shape[1], dtype=X.dtype)
if y.ndim == 1:
y_offset = X.dtype.type(0)
else:
y_offset = np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_scale
def _rescale_data(X, y, sample_weight):
n_samples = X.shape[0]
sample_weight = np.asarray(sample_weight)
if sample_weight.ndim == 0:
sample_weight = np.full(n_samples, sample_weight,
dtype=sample_weight.dtype)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
class LinearModel(BaseEstimator, metaclass=ABCMeta):
@abstractmethod
def fit(self, X, y):
def _decision_function(self, X):
check_is_fitted(self)
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
def predict(self, X):
return self._decision_function(X)
_preprocess_data = staticmethod(_preprocess_data)
def _set_intercept(self, X_offset, y_offset, X_scale):
if self.fit_intercept:
self.coef_ = self.coef_ / X_scale
self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)
else:
self.intercept_ = 0.
def _more_tags(self):
return {'requires_y': True}
class LinearClassifierMixin(ClassifierMixin):
def decision_function(self, X):
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
n_features = self.coef_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d"
% (X.shape[1], n_features))
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel() if scores.shape[1] == 1 else scores
def predict(self, X):
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores > 0).astype(int)
else:
indices = scores.argmax(axis=1)
return self.classes_[indices]
def _predict_proba_lr(self, X):
prob = self.decision_function(X)
expit(prob, out=prob)
if prob.ndim == 1:
return np.vstack([1 - prob, prob]).T
else:
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
class SparseCoefMixin:
def densify(self):
msg = "Estimator, %(name)s, must be fitted before densifying."
check_is_fitted(self, msg=msg)
if sp.issparse(self.coef_):
self.coef_ = self.coef_.toarray()
return self
def sparsify(self):
msg = "Estimator, %(name)s, must be fitted before sparsifying."
check_is_fitted(self, msg=msg)
self.coef_ = sp.csr_matrix(self.coef_)
return self
class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, normalize=False, copy_X=True,
n_jobs=None, positive=False):
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.n_jobs = n_jobs
self.positive = positive
def fit(self, X, y, sample_weight=None):
n_jobs_ = self.n_jobs
accept_sparse = False if self.positive else ['csr', 'csc', 'coo']
X, y = self._validate_data(X, y, accept_sparse=accept_sparse,
y_numeric=True, multi_output=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,
copy=self.copy_X, sample_weight=sample_weight,
return_mean=True)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
if self.positive:
if y.ndim < 2:
self.coef_, self._residues = optimize.nnls(X, y)
else:
# scipy.optimize.nnls cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(optimize.nnls)(X, y[:, j])
for j in range(y.shape[1]))
self.coef_, self._residues = map(np.vstack, zip(*outs))
elif sp.issparse(X):
X_offset_scale = X_offset / X_scale
def matvec(b):
return X.dot(b) - b.dot(X_offset_scale)
def rmatvec(b):
return X.T.dot(b) - X_offset_scale * np.sum(b)
X_centered = sparse.linalg.LinearOperator(shape=X.shape,
matvec=matvec,
rmatvec=rmatvec)
if y.ndim < 2:
out = sparse_lsqr(X_centered, y)
self.coef_ = out[0]
self._residues = out[3]
else:
# sparse_lstsq cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(sparse_lsqr)(X_centered, y[:, j].ravel())
for j in range(y.shape[1]))
self.coef_ = np.vstack([out[0] for out in outs])
self._residues = np.vstack([out[3] for out in outs])
else:
self.coef_, self._residues, self.rank_, self.singular_ = \
linalg.lstsq(X, y)
self.coef_ = self.coef_.T
if y.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy,
check_input=True, sample_weight=None):
n_samples, n_features = X.shape
if sparse.isspmatrix(X):
# copy is not needed here as X is not modified inplace when X is sparse
precompute = False
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=False, return_mean=True, check_input=check_input)
else:
# copy was done in fit if necessary
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy,
check_input=check_input, sample_weight=sample_weight)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight=sample_weight)
if hasattr(precompute, '__array__') and (
fit_intercept and not np.allclose(X_offset, np.zeros(n_features)) or
normalize and not np.allclose(X_scale, np.ones(n_features))):
warnings.warn("Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
UserWarning)
# recompute Gram
precompute = 'auto'
Xy = None
# precompute if n_samples > n_features
if isinstance(precompute, str) and precompute == 'auto':
precompute = (n_samples > n_features)
if precompute is True:
# make sure that the 'precompute' array is contiguous.
precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype,
order='C')
np.dot(X.T, X, out=precompute)
if not hasattr(precompute, '__array__'):
Xy = None # cannot use Xy if precompute is not Gram
if hasattr(precompute, '__array__') and Xy is None:
common_dtype = np.find_common_type([X.dtype, y.dtype], [])
if y.ndim == 1:
# Xy is 1d, make sure it is contiguous.
Xy = np.empty(shape=n_features, dtype=common_dtype, order='C')
np.dot(X.T, y, out=Xy)
else:
# Make sure that Xy is always F contiguous even if X or y are not
# contiguous: the goal is to make it fast to extract the data for a
# specific target.
n_targets = y.shape[1]
Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype,
order='F')
np.dot(y.T, X, out=Xy.T)
return X, y, X_offset, y_offset, X_scale, precompute, Xy
| true | true |
f71b728f6b7ee0bb3e520d6e3e1bb4a53edb161c | 595 | py | Python | website/system.py | timlyo/timlyo.github.io | fb3e3b65822351e49e3ba4ee17ba4ed5151c969a | [
"Apache-2.0"
] | 1 | 2016-01-14T13:52:25.000Z | 2016-01-14T13:52:25.000Z | website/system.py | timlyo/personalWebsite | fb3e3b65822351e49e3ba4ee17ba4ed5151c969a | [
"Apache-2.0"
] | null | null | null | website/system.py | timlyo/personalWebsite | fb3e3b65822351e49e3ba4ee17ba4ed5151c969a | [
"Apache-2.0"
] | null | null | null | import os
import psutil
COEFFICIENT = 2 ** 20
def get_other_ram() -> int:
"""Ram used by other processes"""
return get_ram_used() - get_process_ram()
def get_total_ram() -> int:
mem = psutil.virtual_memory()
return mem[0] / COEFFICIENT
def get_process_ram() -> int:
process = psutil.Process(os.getpid())
return process.memory_info()[0] / COEFFICIENT
def get_ram_used() -> int:
"""ram used by all processes"""
mem = psutil.virtual_memory()
return mem[4] / COEFFICIENT
def get_cpu() -> list:
"""get all cpu core usage"""
percentage = psutil.cpu_percent()
return percentage
| 18.030303 | 46 | 0.697479 | import os
import psutil
COEFFICIENT = 2 ** 20
def get_other_ram() -> int:
return get_ram_used() - get_process_ram()
def get_total_ram() -> int:
mem = psutil.virtual_memory()
return mem[0] / COEFFICIENT
def get_process_ram() -> int:
process = psutil.Process(os.getpid())
return process.memory_info()[0] / COEFFICIENT
def get_ram_used() -> int:
mem = psutil.virtual_memory()
return mem[4] / COEFFICIENT
def get_cpu() -> list:
percentage = psutil.cpu_percent()
return percentage
| true | true |
f71b72b888e77e3334994d29892f03d292b9f189 | 1,820 | py | Python | libweasyl/libweasyl/configuration.py | hyena/weasyl | a43ad885eb07ae89d6639f289a5b95f3a177439c | [
"Apache-2.0"
] | 1 | 2019-02-15T04:21:48.000Z | 2019-02-15T04:21:48.000Z | libweasyl/libweasyl/configuration.py | hyena/weasyl | a43ad885eb07ae89d6639f289a5b95f3a177439c | [
"Apache-2.0"
] | 254 | 2017-12-23T19:36:43.000Z | 2020-04-14T21:46:13.000Z | libweasyl/libweasyl/configuration.py | hyena/weasyl | a43ad885eb07ae89d6639f289a5b95f3a177439c | [
"Apache-2.0"
] | 1 | 2017-12-23T18:42:16.000Z | 2017-12-23T18:42:16.000Z | """
Configuration of libweasyl.
libweasyl depends on some global state to be set up in order for e.g. database
access to work correctly. This might be nicer if python had a way of
parameterizing modules, but we can't, so this is what we have. It does mean
that only one libweasyl configuration can exist in a running python process.
"""
from libweasyl.models.media import DiskMediaItem, MediaItem
from libweasyl.models.meta import BaseQuery, _configure_dbsession
from libweasyl.staff import _init_staff
def configure_libweasyl(
dbsession, not_found_exception, base_file_path,
staff_config_dict, media_link_formatter_callback):
"""
Configure libweasyl for the current application. This sets up some
global state around libweasyl.
This function can be called multiple times without issues; each call will
replace the values set by the previous call.
Parameters:
dbsession: A SQLAlchemy ``scoped_session`` instance configured for the
application's database usage.
not_found_exception: An exception to be raised on the ``*_or_404``
methods of queries.
base_file_path: The path to where static content lives on disk.
staff_config_dict: A dictionary of staff levels and user IDs.
media_link_formatter_callback: A callback to format the URL for a media
link. The callback will be called as ``callback(media_item, link)``
and is expected to return a URL or ``None`` to use the default.
"""
_configure_dbsession(dbsession)
BaseQuery._not_found_exception = staticmethod(not_found_exception)
DiskMediaItem._base_file_path = staticmethod(base_file_path)
_init_staff(**staff_config_dict)
MediaItem._media_link_formatter_callback = staticmethod(media_link_formatter_callback)
| 44.390244 | 90 | 0.752198 |
from libweasyl.models.media import DiskMediaItem, MediaItem
from libweasyl.models.meta import BaseQuery, _configure_dbsession
from libweasyl.staff import _init_staff
def configure_libweasyl(
dbsession, not_found_exception, base_file_path,
staff_config_dict, media_link_formatter_callback):
_configure_dbsession(dbsession)
BaseQuery._not_found_exception = staticmethod(not_found_exception)
DiskMediaItem._base_file_path = staticmethod(base_file_path)
_init_staff(**staff_config_dict)
MediaItem._media_link_formatter_callback = staticmethod(media_link_formatter_callback)
| true | true |
f71b731df78a211a9b978d951f533de530a3905f | 3,840 | py | Python | tensorforce/core/memories/latest.py | zysilence/tensorforce | 7539e5dde66f3a93b881006f9b7f38c926ced21b | [
"Apache-2.0"
] | 2 | 2021-11-14T12:28:24.000Z | 2022-02-14T19:23:51.000Z | tensorforce/core/memories/latest.py | zysilence/tensorforce | 7539e5dde66f3a93b881006f9b7f38c926ced21b | [
"Apache-2.0"
] | null | null | null | tensorforce/core/memories/latest.py | zysilence/tensorforce | 7539e5dde66f3a93b881006f9b7f38c926ced21b | [
"Apache-2.0"
] | 3 | 2021-03-04T17:26:43.000Z | 2021-03-04T17:27:10.000Z | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce.core.memories import Queue
class Latest(Queue):
"""
Memory which always retrieves most recent experiences.
"""
def __init__(self, states, internals, actions, include_next_states, capacity, scope='latest', summary_labels=None):
"""
Latest memory.
Args:
states: States specifiction.
internals: Internal states specification.
actions: Actions specification.
include_next_states: Include subsequent state if true.
capacity: Memory capacity.
"""
super(Latest, self).__init__(
states=states,
internals=internals,
actions=actions,
include_next_states=include_next_states,
capacity=capacity,
scope=scope,
summary_labels=summary_labels
)
def tf_retrieve_timesteps(self, n):
num_timesteps = (self.memory_index - self.episode_indices[-1] - 2) % self.capacity + 1
n = tf.minimum(x=n, y=num_timesteps)
indices = tf.range(
start=(self.memory_index - n),
limit=self.memory_index
) % self.capacity
return self.retrieve_indices(indices=indices)
def tf_retrieve_episodes(self, n):
n = tf.minimum(x=n, y=self.episode_count)
start = self.episode_indices[self.episode_count - n - 1] + 1
limit = self.episode_indices[self.episode_count - 1] + 1
limit += tf.where(condition=(start < limit), x=0, y=self.capacity)
indices = tf.range(start=start, limit=limit) % self.capacity
return self.retrieve_indices(indices=indices)
def tf_retrieve_sequences(self, n, sequence_length):
# Remove once #128 is resolved
tf.logging.warn("Sampling sequences is not validated yet. Use timesteps or episodes instead.")
num_sequences = (self.memory_index - self.episode_indices[-1] - 2 - sequence_length + 1) % self.capacity + 1
n = tf.minimum(x=n, y=num_sequences)
indices = tf.range(
start=(self.memory_index - n - sequence_length), # or '- 1' implied in sequence length?
limit=self.memory_index
) % self.capacity
# sequence_indices = [tf.range(start=indices[n], limit=(indices[n] + sequence_length)) for k in range(n)]
# sequence_indices = [indices[k: k + sequence_length] for k in tf.unstack(value=tf.range(start=0, limit=n), num=n)]
sequence_indices = tf.expand_dims(input=tf.range(start=0, limit=n), axis=1) + tf.expand_dims(input=tf.constant(value=list(range(sequence_length))), axis=0)
sequence_indices = tf.reshape(tensor=sequence_indices, shape=(n * sequence_length,))
# sequence_indices = tf.concat(values=sequence_indices, axis=0) # tf.stack !!!!!
terminal = tf.gather(params=self.terminal_memory, indices=indices)
sequence_indices = tf.boolean_mask(tensor=sequence_indices, mask=tf.logical_not(x=terminal))
return self.retrieve_indices(indices=sequence_indices)
| 45.176471 | 163 | 0.663542 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce.core.memories import Queue
class Latest(Queue):
def __init__(self, states, internals, actions, include_next_states, capacity, scope='latest', summary_labels=None):
super(Latest, self).__init__(
states=states,
internals=internals,
actions=actions,
include_next_states=include_next_states,
capacity=capacity,
scope=scope,
summary_labels=summary_labels
)
def tf_retrieve_timesteps(self, n):
num_timesteps = (self.memory_index - self.episode_indices[-1] - 2) % self.capacity + 1
n = tf.minimum(x=n, y=num_timesteps)
indices = tf.range(
start=(self.memory_index - n),
limit=self.memory_index
) % self.capacity
return self.retrieve_indices(indices=indices)
def tf_retrieve_episodes(self, n):
n = tf.minimum(x=n, y=self.episode_count)
start = self.episode_indices[self.episode_count - n - 1] + 1
limit = self.episode_indices[self.episode_count - 1] + 1
limit += tf.where(condition=(start < limit), x=0, y=self.capacity)
indices = tf.range(start=start, limit=limit) % self.capacity
return self.retrieve_indices(indices=indices)
def tf_retrieve_sequences(self, n, sequence_length):
ing.warn("Sampling sequences is not validated yet. Use timesteps or episodes instead.")
num_sequences = (self.memory_index - self.episode_indices[-1] - 2 - sequence_length + 1) % self.capacity + 1
n = tf.minimum(x=n, y=num_sequences)
indices = tf.range(
start=(self.memory_index - n - sequence_length),
limit=self.memory_index
) % self.capacity
sequence_indices = tf.expand_dims(input=tf.range(start=0, limit=n), axis=1) + tf.expand_dims(input=tf.constant(value=list(range(sequence_length))), axis=0)
sequence_indices = tf.reshape(tensor=sequence_indices, shape=(n * sequence_length,))
l = tf.gather(params=self.terminal_memory, indices=indices)
sequence_indices = tf.boolean_mask(tensor=sequence_indices, mask=tf.logical_not(x=terminal))
return self.retrieve_indices(indices=sequence_indices)
| true | true |
f71b73477d2f539f36cc389b2a439621a3f79453 | 18,562 | py | Python | osa/scripts/provprocess.py | gae-ucm/LSTOSA | d44df4dc1daa87f57d95272014f05908d2c9a211 | [
"BSD-3-Clause"
] | 2 | 2022-02-21T17:45:38.000Z | 2022-03-25T11:48:52.000Z | osa/scripts/provprocess.py | gae-ucm/LSTOSA | d44df4dc1daa87f57d95272014f05908d2c9a211 | [
"BSD-3-Clause"
] | 79 | 2021-12-02T10:37:42.000Z | 2022-03-29T23:56:44.000Z | osa/scripts/provprocess.py | cta-observatory/lstosa | dd7a3a4967f265217929a1271c3f9be559a122ac | [
"BSD-3-Clause"
] | 1 | 2021-11-25T09:56:12.000Z | 2021-11-25T09:56:12.000Z | #!/usr/bin/env python
"""Provenance post processing script for OSA pipeline."""
import copy
import logging
import shutil
import sys
from pathlib import Path, PurePath
import yaml
from osa.configs import options
from osa.configs.config import cfg
from osa.provenance.capture import get_activity_id, get_file_hash
from osa.provenance.io import provdoc2graph, provdoc2json, provlist2provdoc, read_prov
from osa.provenance.utils import get_log_config
from osa.utils.cliopts import provprocessparsing
from osa.utils.logging import myLogger
__all__ = ["copy_used_file", "parse_lines_log", "parse_lines_run", "produce_provenance"]
log = myLogger(logging.getLogger())
provconfig = yaml.safe_load(get_log_config())
LOG_FILENAME = provconfig["handlers"]["provHandler"]["filename"]
PROV_PREFIX = provconfig["PREFIX"]
PATH_DL1 = cfg.get("LST1", "DL1_DIR")
PATH_DL2 = cfg.get("LST1", "DL2_DIR")
def copy_used_file(src, outdir):
"""
Copy file used in process.
Parameters
----------
src
outdir
"""
# check src file exists
if not Path(src).is_file():
log.warning(f"{src} file cannot be accessed")
hash_src = get_file_hash(src, buffer="content")
filename = PurePath(src).name
destpath = Path(outdir) / filename
hash_out = ""
# get hash and new name
if destpath.exists():
hash_out = get_file_hash(str(destpath), buffer="content")
filename = filename + "_"
destpath = Path(outdir) / filename
# try copy file
if hash_src != hash_out:
try:
shutil.copyfile(src, str(destpath))
log.info(f"copying {destpath}")
except Exception as ex:
log.warning(f"could not copy {src} file into {destpath}: {ex}")
def parse_lines_log(filter_cut, calib_runs, run_number):
"""
Filter content in log file to produce a run/process wise session log.
Parameters
----------
filter_cut
calib_runs
run_number
Returns
-------
filtered
"""
filtered = []
if not filter_cut:
filter_cut = "all"
cuts = {
"calibration": ["drs4_pedestal", "calibrate_charge"],
"r0_to_dl1": ["r0_to_dl1", "dl1ab"],
"dl1_to_dl2": ["dl1_datacheck", "dl1_to_dl2"],
}
cuts["all"] = cuts["calibration"] + cuts["r0_to_dl1"] + cuts["dl1_to_dl2"]
with open(LOG_FILENAME, "r") as f:
for line in f.readlines():
ll = line.split(PROV_PREFIX)
if len(ll) != 3:
log.warning(
f"format {PROV_PREFIX} mismatch in log file {LOG_FILENAME}\n{line}"
)
continue
prov_str = ll.pop()
prov_dict = yaml.safe_load(prov_str)
keep = False
session_tag = prov_dict.get("session_tag", "0:0")
session_id = prov_dict.get("session_id", False)
tag_activity, tag_run = session_tag.split(":")
# filter by run and calib runs
if tag_run in [run_number, calib_runs]:
keep = True
# filter by activity
if tag_activity not in cuts[filter_cut]:
keep = False
# only keep first session start
if session_id and (tag_run in [run_number, calib_runs]):
keep = True
# make session starts with calibration
if session_id and filter_cut == "all" and not filtered:
prov_dict["session_id"] = f"{options.date}{run_number}"
prov_dict["name"] = run_number
prov_dict["observation_run"] = run_number
line = f"{ll[0]}{PROV_PREFIX}{ll[1]}{PROV_PREFIX}{prov_dict}\n"
# remove parallel sessions
if session_id and filtered:
keep = False
if keep:
filtered.append(line)
return filtered
def parse_lines_run(filter_step, prov_lines, out):
"""
Process provenance info to reduce session at run/process wise scope.
Parameters
----------
filter_step
prov_lines
out
Returns
-------
working_lines
"""
size = 0
container = {}
working_lines = []
r0filepath_str = ""
dl1filepath_str = ""
dl2filepath_str = ""
mufilepath_str = ""
ckfilepath_str = ""
id_activity_run = ""
end_time_line = ""
osa_config_copied = False
for line in prov_lines:
# get info
remove = False
endTime = line.get("endTime", "")
session_id = line.get("session_id", "")
activity_id = line.get("activity_id", "")
filepath = line.get("filepath", "")
used_role = line.get("used_role", "")
generated_role = line.get("generated_role", "")
parameters = line.get("parameters", "")
name = line.get("name", "")
content_type = line.get("contentType", "")
used_id = line.get("used_id", "")
osa_cfg = line.get("config_file", "")
# filter grain
session_tag = line.get("session_tag", "0:0")
tag_activity, _ = session_tag.split(":")
if tag_activity != filter_step and not session_id:
continue
# remove subruns info
if name == "DL1CheckSubrunDataset":
ckfilepath_str = filepath
elif name == "DL1SubrunDataset":
dl1filepath_str = filepath
elif name == "DL2SubrunDataset":
dl2filepath_str = filepath
elif name == "MuonsSubrunDataset":
mufilepath_str = filepath
elif name == "R0SubrunDataset":
r0filepath_str = filepath
if "Subrun" in name or "subrun" in used_role or "subrun" in generated_role:
remove = True
if parameters and "ObservationSubRun" in parameters:
del line["parameters"]["ObservationSubRun"]
# remove sub-runs activities and info
if name == filter_step and not id_activity_run:
id_activity_run = get_activity_id()
if name in container or used_id in container:
remove = True
if parameters and "parameters" in container:
remove = True
if name:
container[name] = True
if used_id:
container[used_id] = True
if parameters:
container["parameters"] = True
if endTime:
remove = True
end_time_line = line
size += 1
# remove duplicated produced files
if generated_role in container:
remove = True
if name == "DL2MergedFile":
container[name] = True
if "merged" in generated_role:
container[generated_role] = True
if name == "DL1CheckHDF5File":
container[name] = True
if "DL1Check HDF5 file" in generated_role:
container[generated_role] = True
if name == "DL1CheckPDFFile":
container[name] = True
if "DL1Check PDF file" in generated_role:
container[generated_role] = True
# replace with new run-wise activity_id
if activity_id:
line["activity_id"] = id_activity_run
# copy used files not subruns not RFs not mergedDL2
if (
filepath
and content_type != "application/x-spss-sav"
and name != "DL2MergedFile"
and not name.startswith("DL1Check")
and not remove
):
copy_used_file(filepath, out)
if session_id and osa_cfg and not osa_config_copied:
copy_used_file(osa_cfg, out)
osa_config_copied = True
if not remove:
working_lines.append(line)
# append collections used and generated at endtime line of last activity
if end_time_line:
working_lines.append(end_time_line)
if r0filepath_str and filter_step == "r0_to_dl1":
r0_entity_id = get_file_hash(r0filepath_str + "r0", buffer="path")
r0filepath_str = r0filepath_str.replace(PurePath(r0filepath_str).name, "")
used = {"entity_id": r0_entity_id}
used.update({"name": "R0Collection"})
used.update({"type": "SetCollection"})
used.update({"size": size})
used.update({"filepath": r0filepath_str})
working_lines.append(used)
used = {"activity_id": id_activity_run}
used.update({"used_id": r0_entity_id})
used.update({"used_role": "R0 Collection"})
working_lines.append(used)
if dl1filepath_str:
dl1filepath_str = dl1filepath_str.replace(PurePath(dl1filepath_str).name, "")
dl1_entity_id = get_file_hash(dl1filepath_str + "dl1", buffer="path")
dl1 = {"entity_id": dl1_entity_id}
dl1.update({"name": "DL1Collection"})
dl1.update({"type": "SetCollection"})
dl1.update({"size": size})
dl1.update({"filepath": dl1filepath_str})
working_lines.append(dl1)
if mufilepath_str:
mufilepath_str = mufilepath_str.replace(PurePath(mufilepath_str).name, "")
mu_entity_id = get_file_hash(mufilepath_str + "muons", buffer="path")
muons = {"entity_id": mu_entity_id}
muons.update({"name": "MuonsCollection"})
muons.update({"type": "SetCollection"})
muons.update({"size": size})
muons.update({"filepath": mufilepath_str})
working_lines.append(muons)
if mufilepath_str and filter_step == "r0_to_dl1":
generated = {"activity_id": id_activity_run}
generated.update({"generated_id": mu_entity_id})
generated.update({"generated_role": "Muons Collection"})
working_lines.append(generated)
if dl1filepath_str and filter_step in ["r0_to_dl1", "dl1ab"]:
generated = {"activity_id": id_activity_run}
generated.update({"generated_id": dl1_entity_id})
generated.update({"generated_role": "DL1 Collection"})
working_lines.append(generated)
if dl1filepath_str and filter_step in ["dl1_to_dl2", "dl1ab"]:
used = {"activity_id": id_activity_run}
used.update({"used_id": dl1_entity_id})
used.update({"used_role": "DL1 Collection"})
working_lines.append(used)
if dl1filepath_str and filter_step == "dl1_datacheck":
used = {"activity_id": id_activity_run}
used.update({"used_id": dl1_entity_id})
used.update({"used_role": "DL1 Collection"})
working_lines.append(used)
if mufilepath_str and filter_step == "dl1_datacheck":
used = {"activity_id": id_activity_run}
used.update({"used_id": mu_entity_id})
used.update({"used_role": "Muons Collection"})
working_lines.append(used)
if ckfilepath_str and filter_step == "dl1_datacheck":
ckfilepath_str = ckfilepath_str.replace(PurePath(ckfilepath_str).name, "")
chk_entity_id = get_file_hash(ckfilepath_str + "check", buffer="path")
dl1check = {"entity_id": chk_entity_id}
dl1check.update({"name": "DL1CheckCollection"})
dl1check.update({"type": "SetCollection"})
dl1check.update({"size": size})
dl1check.update({"filepath": ckfilepath_str})
working_lines.append(dl1check)
generated = {"activity_id": id_activity_run}
generated.update({"generated_id": chk_entity_id})
generated.update({"generated_role": "DL1Checks Collection"})
working_lines.append(generated)
if dl2filepath_str and filter_step == "dl1_to_dl2":
dl2_entity_id = get_file_hash(dl2filepath_str + "dl2", buffer="path")
dl2filepath_str = dl2filepath_str.replace(PurePath(dl2filepath_str).name, "")
used = {"entity_id": dl2_entity_id}
used.update({"name": "DL2Collection"})
used.update({"type": "SetCollection"})
used.update({"size": size})
used.update({"filepath": dl2filepath_str})
working_lines.append(used)
used = {"activity_id": id_activity_run}
used.update({"generated_id": dl2_entity_id})
used.update({"generated_role": "DL2 Collection"})
working_lines.append(used)
else:
working_lines = []
return working_lines
def define_paths(grain, start_path, end_path, base_filename):
"""Define target folders according to granularity."""
paths = {}
# check destination folder exists
step_path = Path(start_path) / options.date / options.prod_id / end_path
if not step_path.exists():
log.error(f"Path {step_path} does not exist")
# make folder log/ if does not exist
paths["out_path"] = step_path / "log"
paths["out_path"].mkdir(parents=True, exist_ok=True)
# define paths for prov products
paths["log_path"] = paths["out_path"] / f"{grain}_{base_filename}.log"
paths["json_filepath"] = paths["out_path"] / f"{grain}_{base_filename}.json"
paths["graph_filepath"] = paths["out_path"] / f"{grain}_{base_filename}.pdf"
return paths
def produce_provenance_files(processed_lines, paths):
"""Create provenance products as JSON logs and graphs."""
with open(paths["log_path"], "w") as f:
for line in processed_lines:
f.write(f"{line}\n")
log.info(f"creating {paths['log_path']}")
provdoc = provlist2provdoc(processed_lines)
# make json
try:
provdoc2json(provdoc, str(paths["json_filepath"]))
log.info(f"creating {paths['json_filepath']}")
except Exception as ex:
log.exception(f"problem while creating json: {ex}")
# make graph
try:
provdoc2graph(provdoc, str(paths["graph_filepath"]), "pdf")
log.info(f"creating {paths['graph_filepath']}")
except Exception as ex:
log.exception(f"problem while creating graph: {ex}")
def produce_provenance(session_log_filename, base_filename):
"""
Create run-wise provenance products as JSON logs
and graphs according to granularity.
"""
if options.filter == "calibration" or not options.filter:
paths_calibration = define_paths(
"calibration_to_dl1", PATH_DL1, options.dl1_prod_id, base_filename
)
plines_drs4 = parse_lines_run(
"drs4_pedestal",
read_prov(filename=session_log_filename),
str(paths_calibration["out_path"]),
)
plines_calib = parse_lines_run(
"calibrate_charge",
read_prov(filename=session_log_filename),
str(paths_calibration["out_path"]),
)
calibration_lines = plines_drs4 + plines_calib[1:]
# TODO
# create calibration prov files only if filtering
if options.filter == "calibration":
pass
if options.filter == "r0_to_dl1" or not options.filter:
paths_r0_dl1 = define_paths(
"r0_to_dl1", PATH_DL1, options.dl1_prod_id, base_filename
)
plines_r0 = parse_lines_run(
"r0_to_dl1",
read_prov(filename=session_log_filename),
str(paths_r0_dl1["out_path"]),
)
plines_ab = parse_lines_run(
"dl1ab",
read_prov(filename=session_log_filename),
str(paths_r0_dl1["out_path"]),
)
dl1_lines = plines_r0 + plines_ab[1:]
# create r0_to_dl1 prov files only if filtering
if options.filter == "r0_to_dl1":
produce_provenance_files(plines_r0 + plines_ab[1:], paths_r0_dl1)
if options.filter == "dl1_to_dl2" or not options.filter:
paths_dl1_dl2 = define_paths(
"dl1_to_dl2", PATH_DL2, options.dl2_prod_id, base_filename
)
plines_check = parse_lines_run(
"dl1_datacheck",
read_prov(filename=session_log_filename),
str(paths_dl1_dl2["out_path"]),
)
plines_dl2 = parse_lines_run(
"dl1_to_dl2",
read_prov(filename=session_log_filename),
str(paths_dl1_dl2["out_path"]),
)
dl1_dl2_lines = plines_check + plines_dl2[1:]
# create dl1_to_dl2 prov files only if filtering
if options.filter == "dl1_to_dl2":
produce_provenance_files(plines_check + plines_dl2[1:], paths_dl1_dl2)
# create calibration_to_dl1 and calibration_to_dl2 prov files
if not options.filter:
calibration_to_dl1 = define_paths(
"calibration_to_dl1", PATH_DL1, options.dl1_prod_id, base_filename
)
calibration_to_dl2 = define_paths(
"calibration_to_dl2", PATH_DL2, options.dl2_prod_id, base_filename
)
calibration_to_dl1_lines = calibration_lines + dl1_lines[1:]
lines_dl1 = copy.deepcopy(calibration_to_dl1_lines)
calibration_to_dl2_lines = calibration_to_dl1_lines + dl1_dl2_lines[1:]
lines_dl2 = copy.deepcopy(calibration_to_dl2_lines)
produce_provenance_files(lines_dl1, calibration_to_dl1)
produce_provenance_files(lines_dl2, calibration_to_dl2)
def main():
"""Extract the provenance information."""
provprocessparsing()
# Logging
if options.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
# check LOG_FILENAME exists
if not Path(LOG_FILENAME).exists():
log.error(f"file {LOG_FILENAME} does not exist")
# check LOG_FILENAME is not empty
if not Path(LOG_FILENAME).stat().st_size:
log.warning(f"file {LOG_FILENAME} is empty")
sys.exit(1)
# build base_filename
base_filename = f"{options.run}_prov"
session_log_filename = f"{base_filename}.log"
# parse LOG_FILENAME content for a specific run / process
calib_runs = f"{options.drs4_pedestal_run_id}-{options.pedcal_run_id}"
parsed_content = parse_lines_log(options.filter, calib_runs, options.run)
# create temporal session log file
with open(session_log_filename, "w") as f:
for line in parsed_content:
f.write(line)
try:
# create run-wise JSON logs and graphs for each
produce_provenance(session_log_filename, base_filename)
finally:
# remove temporal session log file
remove_session_log_file = Path(session_log_filename)
remove_session_log_file.unlink()
# remove LOG_FILENAME
if options.quit:
remove_log_file = Path(LOG_FILENAME)
remove_log_file.unlink()
if __name__ == "__main__":
main()
| 36.396078 | 89 | 0.619815 |
import copy
import logging
import shutil
import sys
from pathlib import Path, PurePath
import yaml
from osa.configs import options
from osa.configs.config import cfg
from osa.provenance.capture import get_activity_id, get_file_hash
from osa.provenance.io import provdoc2graph, provdoc2json, provlist2provdoc, read_prov
from osa.provenance.utils import get_log_config
from osa.utils.cliopts import provprocessparsing
from osa.utils.logging import myLogger
__all__ = ["copy_used_file", "parse_lines_log", "parse_lines_run", "produce_provenance"]
log = myLogger(logging.getLogger())
provconfig = yaml.safe_load(get_log_config())
LOG_FILENAME = provconfig["handlers"]["provHandler"]["filename"]
PROV_PREFIX = provconfig["PREFIX"]
PATH_DL1 = cfg.get("LST1", "DL1_DIR")
PATH_DL2 = cfg.get("LST1", "DL2_DIR")
def copy_used_file(src, outdir):
if not Path(src).is_file():
log.warning(f"{src} file cannot be accessed")
hash_src = get_file_hash(src, buffer="content")
filename = PurePath(src).name
destpath = Path(outdir) / filename
hash_out = ""
if destpath.exists():
hash_out = get_file_hash(str(destpath), buffer="content")
filename = filename + "_"
destpath = Path(outdir) / filename
if hash_src != hash_out:
try:
shutil.copyfile(src, str(destpath))
log.info(f"copying {destpath}")
except Exception as ex:
log.warning(f"could not copy {src} file into {destpath}: {ex}")
def parse_lines_log(filter_cut, calib_runs, run_number):
filtered = []
if not filter_cut:
filter_cut = "all"
cuts = {
"calibration": ["drs4_pedestal", "calibrate_charge"],
"r0_to_dl1": ["r0_to_dl1", "dl1ab"],
"dl1_to_dl2": ["dl1_datacheck", "dl1_to_dl2"],
}
cuts["all"] = cuts["calibration"] + cuts["r0_to_dl1"] + cuts["dl1_to_dl2"]
with open(LOG_FILENAME, "r") as f:
for line in f.readlines():
ll = line.split(PROV_PREFIX)
if len(ll) != 3:
log.warning(
f"format {PROV_PREFIX} mismatch in log file {LOG_FILENAME}\n{line}"
)
continue
prov_str = ll.pop()
prov_dict = yaml.safe_load(prov_str)
keep = False
session_tag = prov_dict.get("session_tag", "0:0")
session_id = prov_dict.get("session_id", False)
tag_activity, tag_run = session_tag.split(":")
if tag_run in [run_number, calib_runs]:
keep = True
if tag_activity not in cuts[filter_cut]:
keep = False
if session_id and (tag_run in [run_number, calib_runs]):
keep = True
if session_id and filter_cut == "all" and not filtered:
prov_dict["session_id"] = f"{options.date}{run_number}"
prov_dict["name"] = run_number
prov_dict["observation_run"] = run_number
line = f"{ll[0]}{PROV_PREFIX}{ll[1]}{PROV_PREFIX}{prov_dict}\n"
if session_id and filtered:
keep = False
if keep:
filtered.append(line)
return filtered
def parse_lines_run(filter_step, prov_lines, out):
size = 0
container = {}
working_lines = []
r0filepath_str = ""
dl1filepath_str = ""
dl2filepath_str = ""
mufilepath_str = ""
ckfilepath_str = ""
id_activity_run = ""
end_time_line = ""
osa_config_copied = False
for line in prov_lines:
remove = False
endTime = line.get("endTime", "")
session_id = line.get("session_id", "")
activity_id = line.get("activity_id", "")
filepath = line.get("filepath", "")
used_role = line.get("used_role", "")
generated_role = line.get("generated_role", "")
parameters = line.get("parameters", "")
name = line.get("name", "")
content_type = line.get("contentType", "")
used_id = line.get("used_id", "")
osa_cfg = line.get("config_file", "")
session_tag = line.get("session_tag", "0:0")
tag_activity, _ = session_tag.split(":")
if tag_activity != filter_step and not session_id:
continue
if name == "DL1CheckSubrunDataset":
ckfilepath_str = filepath
elif name == "DL1SubrunDataset":
dl1filepath_str = filepath
elif name == "DL2SubrunDataset":
dl2filepath_str = filepath
elif name == "MuonsSubrunDataset":
mufilepath_str = filepath
elif name == "R0SubrunDataset":
r0filepath_str = filepath
if "Subrun" in name or "subrun" in used_role or "subrun" in generated_role:
remove = True
if parameters and "ObservationSubRun" in parameters:
del line["parameters"]["ObservationSubRun"]
if name == filter_step and not id_activity_run:
id_activity_run = get_activity_id()
if name in container or used_id in container:
remove = True
if parameters and "parameters" in container:
remove = True
if name:
container[name] = True
if used_id:
container[used_id] = True
if parameters:
container["parameters"] = True
if endTime:
remove = True
end_time_line = line
size += 1
if generated_role in container:
remove = True
if name == "DL2MergedFile":
container[name] = True
if "merged" in generated_role:
container[generated_role] = True
if name == "DL1CheckHDF5File":
container[name] = True
if "DL1Check HDF5 file" in generated_role:
container[generated_role] = True
if name == "DL1CheckPDFFile":
container[name] = True
if "DL1Check PDF file" in generated_role:
container[generated_role] = True
if activity_id:
line["activity_id"] = id_activity_run
if (
filepath
and content_type != "application/x-spss-sav"
and name != "DL2MergedFile"
and not name.startswith("DL1Check")
and not remove
):
copy_used_file(filepath, out)
if session_id and osa_cfg and not osa_config_copied:
copy_used_file(osa_cfg, out)
osa_config_copied = True
if not remove:
working_lines.append(line)
if end_time_line:
working_lines.append(end_time_line)
if r0filepath_str and filter_step == "r0_to_dl1":
r0_entity_id = get_file_hash(r0filepath_str + "r0", buffer="path")
r0filepath_str = r0filepath_str.replace(PurePath(r0filepath_str).name, "")
used = {"entity_id": r0_entity_id}
used.update({"name": "R0Collection"})
used.update({"type": "SetCollection"})
used.update({"size": size})
used.update({"filepath": r0filepath_str})
working_lines.append(used)
used = {"activity_id": id_activity_run}
used.update({"used_id": r0_entity_id})
used.update({"used_role": "R0 Collection"})
working_lines.append(used)
if dl1filepath_str:
dl1filepath_str = dl1filepath_str.replace(PurePath(dl1filepath_str).name, "")
dl1_entity_id = get_file_hash(dl1filepath_str + "dl1", buffer="path")
dl1 = {"entity_id": dl1_entity_id}
dl1.update({"name": "DL1Collection"})
dl1.update({"type": "SetCollection"})
dl1.update({"size": size})
dl1.update({"filepath": dl1filepath_str})
working_lines.append(dl1)
if mufilepath_str:
mufilepath_str = mufilepath_str.replace(PurePath(mufilepath_str).name, "")
mu_entity_id = get_file_hash(mufilepath_str + "muons", buffer="path")
muons = {"entity_id": mu_entity_id}
muons.update({"name": "MuonsCollection"})
muons.update({"type": "SetCollection"})
muons.update({"size": size})
muons.update({"filepath": mufilepath_str})
working_lines.append(muons)
if mufilepath_str and filter_step == "r0_to_dl1":
generated = {"activity_id": id_activity_run}
generated.update({"generated_id": mu_entity_id})
generated.update({"generated_role": "Muons Collection"})
working_lines.append(generated)
if dl1filepath_str and filter_step in ["r0_to_dl1", "dl1ab"]:
generated = {"activity_id": id_activity_run}
generated.update({"generated_id": dl1_entity_id})
generated.update({"generated_role": "DL1 Collection"})
working_lines.append(generated)
if dl1filepath_str and filter_step in ["dl1_to_dl2", "dl1ab"]:
used = {"activity_id": id_activity_run}
used.update({"used_id": dl1_entity_id})
used.update({"used_role": "DL1 Collection"})
working_lines.append(used)
if dl1filepath_str and filter_step == "dl1_datacheck":
used = {"activity_id": id_activity_run}
used.update({"used_id": dl1_entity_id})
used.update({"used_role": "DL1 Collection"})
working_lines.append(used)
if mufilepath_str and filter_step == "dl1_datacheck":
used = {"activity_id": id_activity_run}
used.update({"used_id": mu_entity_id})
used.update({"used_role": "Muons Collection"})
working_lines.append(used)
if ckfilepath_str and filter_step == "dl1_datacheck":
ckfilepath_str = ckfilepath_str.replace(PurePath(ckfilepath_str).name, "")
chk_entity_id = get_file_hash(ckfilepath_str + "check", buffer="path")
dl1check = {"entity_id": chk_entity_id}
dl1check.update({"name": "DL1CheckCollection"})
dl1check.update({"type": "SetCollection"})
dl1check.update({"size": size})
dl1check.update({"filepath": ckfilepath_str})
working_lines.append(dl1check)
generated = {"activity_id": id_activity_run}
generated.update({"generated_id": chk_entity_id})
generated.update({"generated_role": "DL1Checks Collection"})
working_lines.append(generated)
if dl2filepath_str and filter_step == "dl1_to_dl2":
dl2_entity_id = get_file_hash(dl2filepath_str + "dl2", buffer="path")
dl2filepath_str = dl2filepath_str.replace(PurePath(dl2filepath_str).name, "")
used = {"entity_id": dl2_entity_id}
used.update({"name": "DL2Collection"})
used.update({"type": "SetCollection"})
used.update({"size": size})
used.update({"filepath": dl2filepath_str})
working_lines.append(used)
used = {"activity_id": id_activity_run}
used.update({"generated_id": dl2_entity_id})
used.update({"generated_role": "DL2 Collection"})
working_lines.append(used)
else:
working_lines = []
return working_lines
def define_paths(grain, start_path, end_path, base_filename):
paths = {}
step_path = Path(start_path) / options.date / options.prod_id / end_path
if not step_path.exists():
log.error(f"Path {step_path} does not exist")
paths["out_path"] = step_path / "log"
paths["out_path"].mkdir(parents=True, exist_ok=True)
paths["log_path"] = paths["out_path"] / f"{grain}_{base_filename}.log"
paths["json_filepath"] = paths["out_path"] / f"{grain}_{base_filename}.json"
paths["graph_filepath"] = paths["out_path"] / f"{grain}_{base_filename}.pdf"
return paths
def produce_provenance_files(processed_lines, paths):
with open(paths["log_path"], "w") as f:
for line in processed_lines:
f.write(f"{line}\n")
log.info(f"creating {paths['log_path']}")
provdoc = provlist2provdoc(processed_lines)
try:
provdoc2json(provdoc, str(paths["json_filepath"]))
log.info(f"creating {paths['json_filepath']}")
except Exception as ex:
log.exception(f"problem while creating json: {ex}")
try:
provdoc2graph(provdoc, str(paths["graph_filepath"]), "pdf")
log.info(f"creating {paths['graph_filepath']}")
except Exception as ex:
log.exception(f"problem while creating graph: {ex}")
def produce_provenance(session_log_filename, base_filename):
if options.filter == "calibration" or not options.filter:
paths_calibration = define_paths(
"calibration_to_dl1", PATH_DL1, options.dl1_prod_id, base_filename
)
plines_drs4 = parse_lines_run(
"drs4_pedestal",
read_prov(filename=session_log_filename),
str(paths_calibration["out_path"]),
)
plines_calib = parse_lines_run(
"calibrate_charge",
read_prov(filename=session_log_filename),
str(paths_calibration["out_path"]),
)
calibration_lines = plines_drs4 + plines_calib[1:]
if options.filter == "calibration":
pass
if options.filter == "r0_to_dl1" or not options.filter:
paths_r0_dl1 = define_paths(
"r0_to_dl1", PATH_DL1, options.dl1_prod_id, base_filename
)
plines_r0 = parse_lines_run(
"r0_to_dl1",
read_prov(filename=session_log_filename),
str(paths_r0_dl1["out_path"]),
)
plines_ab = parse_lines_run(
"dl1ab",
read_prov(filename=session_log_filename),
str(paths_r0_dl1["out_path"]),
)
dl1_lines = plines_r0 + plines_ab[1:]
if options.filter == "r0_to_dl1":
produce_provenance_files(plines_r0 + plines_ab[1:], paths_r0_dl1)
if options.filter == "dl1_to_dl2" or not options.filter:
paths_dl1_dl2 = define_paths(
"dl1_to_dl2", PATH_DL2, options.dl2_prod_id, base_filename
)
plines_check = parse_lines_run(
"dl1_datacheck",
read_prov(filename=session_log_filename),
str(paths_dl1_dl2["out_path"]),
)
plines_dl2 = parse_lines_run(
"dl1_to_dl2",
read_prov(filename=session_log_filename),
str(paths_dl1_dl2["out_path"]),
)
dl1_dl2_lines = plines_check + plines_dl2[1:]
if options.filter == "dl1_to_dl2":
produce_provenance_files(plines_check + plines_dl2[1:], paths_dl1_dl2)
if not options.filter:
calibration_to_dl1 = define_paths(
"calibration_to_dl1", PATH_DL1, options.dl1_prod_id, base_filename
)
calibration_to_dl2 = define_paths(
"calibration_to_dl2", PATH_DL2, options.dl2_prod_id, base_filename
)
calibration_to_dl1_lines = calibration_lines + dl1_lines[1:]
lines_dl1 = copy.deepcopy(calibration_to_dl1_lines)
calibration_to_dl2_lines = calibration_to_dl1_lines + dl1_dl2_lines[1:]
lines_dl2 = copy.deepcopy(calibration_to_dl2_lines)
produce_provenance_files(lines_dl1, calibration_to_dl1)
produce_provenance_files(lines_dl2, calibration_to_dl2)
def main():
provprocessparsing()
if options.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
if not Path(LOG_FILENAME).exists():
log.error(f"file {LOG_FILENAME} does not exist")
if not Path(LOG_FILENAME).stat().st_size:
log.warning(f"file {LOG_FILENAME} is empty")
sys.exit(1)
base_filename = f"{options.run}_prov"
session_log_filename = f"{base_filename}.log"
calib_runs = f"{options.drs4_pedestal_run_id}-{options.pedcal_run_id}"
parsed_content = parse_lines_log(options.filter, calib_runs, options.run)
with open(session_log_filename, "w") as f:
for line in parsed_content:
f.write(line)
try:
produce_provenance(session_log_filename, base_filename)
finally:
remove_session_log_file = Path(session_log_filename)
remove_session_log_file.unlink()
if options.quit:
remove_log_file = Path(LOG_FILENAME)
remove_log_file.unlink()
if __name__ == "__main__":
main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.