code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import numpy as np
import scipy.stats
from matplotlib import pyplot as plt
def signal_autocor(signal, lag=None, demean=True, method="fft", show=False):
"""**Autocorrelation (ACF)**
Compute the autocorrelation of a signal.
Parameters
-----------
signal : Union[list, np.array, pd.Series]
Vector of values.
lag : int
Time lag. If specified, one value of autocorrelation between signal with its lag self will
be returned.
demean : bool
If ``True``, the mean of the signal will be subtracted from the signal before ACF
computation.
method : str
Can be ``"correlation"`` (using :func:`.np.correlate`) or ``"fft"`` (Fast Fourier Transform;
default).
show : bool
If ``True``, plot the autocorrelation at all values of lag.
Returns
-------
r : float
The cross-correlation of the signal with itself at different time lags. Minimum time lag is
0, maximum time lag is the length of the signal. Or a correlation value at a specific lag
if lag is not ``None``.
info : dict
A dictionary containing additional information, such as the confidence interval.
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Example 1: Using 'Correlation' Method
signal = [1, 2, 3, 4, 5]
@savefig p_signal_autocor1.png scale=100%
r, info = nk.signal_autocor(signal, show=True, method='correlate')
@suppress
plt.close()
.. ipython:: python
# Example 2: Using 'FFT' Method
signal = nk.signal_simulate(duration=5, sampling_rate=100, frequency=[5, 6], noise=0.5)
@savefig p_signal_autocor2.png scale=100%
r, info = nk.signal_autocor(signal, lag=2, method='fft', show=True)
@suppress
plt.close()
"""
n = len(signal)
# Demean
if demean:
signal = np.asarray(signal) - np.nanmean(signal)
# Run autocor
method = method.lower()
if method in ["cor", "correlation", "correlate"]:
acov = np.correlate(signal, signal, mode="full")
acov = acov[n - 1 :] # Min time lag is 0
elif method == "fft":
a = np.concatenate((signal, np.zeros(n - 1))) # added zeros to your signal
A = np.fft.fft(a)
S = np.conj(A) * A
c_fourier = np.fft.ifft(S)
acov = c_fourier[: (c_fourier.size // 2) + 1].real
else:
raise ValueError("Method must be 'correlation' or 'fft'.")
# Normalize
r = acov / acov[0]
# Confidence interval
varacf = 1.0 / n
interval = scipy.stats.norm.ppf(1 - 0.05 / 2.0) * np.sqrt(varacf)
ci_low, ci_high = r - interval, r + interval
# Plot
if show:
plt.axhline(y=0, color="grey", linestyle="--")
plt.plot(np.arange(1, len(r) + 1), r, lw=2)
plt.ylabel("Autocorrelation r")
plt.xlabel("Lag")
plt.ylim(-1, 1)
if lag is not None:
if lag > n:
raise ValueError(
"NeuroKit error: signal_autocor(): The time lag exceeds the duration of the signal. "
)
else:
r = r[lag]
return r, {"CI_low": ci_low, "CI_high": ci_high, "Method": method, "ACov": acov} | neurokit2/signal/signal_autocor.py | import numpy as np
import scipy.stats
from matplotlib import pyplot as plt
def signal_autocor(signal, lag=None, demean=True, method="fft", show=False):
"""**Autocorrelation (ACF)**
Compute the autocorrelation of a signal.
Parameters
-----------
signal : Union[list, np.array, pd.Series]
Vector of values.
lag : int
Time lag. If specified, one value of autocorrelation between signal with its lag self will
be returned.
demean : bool
If ``True``, the mean of the signal will be subtracted from the signal before ACF
computation.
method : str
Can be ``"correlation"`` (using :func:`.np.correlate`) or ``"fft"`` (Fast Fourier Transform;
default).
show : bool
If ``True``, plot the autocorrelation at all values of lag.
Returns
-------
r : float
The cross-correlation of the signal with itself at different time lags. Minimum time lag is
0, maximum time lag is the length of the signal. Or a correlation value at a specific lag
if lag is not ``None``.
info : dict
A dictionary containing additional information, such as the confidence interval.
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Example 1: Using 'Correlation' Method
signal = [1, 2, 3, 4, 5]
@savefig p_signal_autocor1.png scale=100%
r, info = nk.signal_autocor(signal, show=True, method='correlate')
@suppress
plt.close()
.. ipython:: python
# Example 2: Using 'FFT' Method
signal = nk.signal_simulate(duration=5, sampling_rate=100, frequency=[5, 6], noise=0.5)
@savefig p_signal_autocor2.png scale=100%
r, info = nk.signal_autocor(signal, lag=2, method='fft', show=True)
@suppress
plt.close()
"""
n = len(signal)
# Demean
if demean:
signal = np.asarray(signal) - np.nanmean(signal)
# Run autocor
method = method.lower()
if method in ["cor", "correlation", "correlate"]:
acov = np.correlate(signal, signal, mode="full")
acov = acov[n - 1 :] # Min time lag is 0
elif method == "fft":
a = np.concatenate((signal, np.zeros(n - 1))) # added zeros to your signal
A = np.fft.fft(a)
S = np.conj(A) * A
c_fourier = np.fft.ifft(S)
acov = c_fourier[: (c_fourier.size // 2) + 1].real
else:
raise ValueError("Method must be 'correlation' or 'fft'.")
# Normalize
r = acov / acov[0]
# Confidence interval
varacf = 1.0 / n
interval = scipy.stats.norm.ppf(1 - 0.05 / 2.0) * np.sqrt(varacf)
ci_low, ci_high = r - interval, r + interval
# Plot
if show:
plt.axhline(y=0, color="grey", linestyle="--")
plt.plot(np.arange(1, len(r) + 1), r, lw=2)
plt.ylabel("Autocorrelation r")
plt.xlabel("Lag")
plt.ylim(-1, 1)
if lag is not None:
if lag > n:
raise ValueError(
"NeuroKit error: signal_autocor(): The time lag exceeds the duration of the signal. "
)
else:
r = r[lag]
return r, {"CI_low": ci_low, "CI_high": ci_high, "Method": method, "ACov": acov} | 0.924925 | 0.754599 |
from sqlalchemy import create_engine
import pandas as pd
import numpy as np
from importlib import reload
import collections
from pandas import json_normalize
import json
import argparse
import sys
from sqlalchemy import create_engine
import sqlite3
from importlib import reload
import os
def search_bypfam(dataset_path):
engine = create_engine(dataset_path)
CHEMBL_VERSION = 27
find_molbypfam = ('''SELECT td.chembl_id as target_chemblid, a2.pchembl_value,
a2.activity_comment, md.chembl_id as compound_chemblid, source_domain_id,
dm.mec_id, max_phase
FROM drug_mechanism dm
JOIN binding_sites bs on bs.tid = dm.tid
JOIN target_dictionary td ON td.tid = bs.tid
JOIN site_components sc ON sc.site_id =bs.site_id
JOIN domains d2 ON d2.domain_id = sc.domain_id
JOIN activities a2 ON dm.molregno = a2.molregno
JOIN molecule_dictionary md ON md.molregno = dm.molregno
JOIN compound_properties cp ON cp.molregno = md.molregno
JOIN compound_records cr ON cr.molregno = cp.molregno
WHERE dm.mec_id IS NOT NULL
AND a2.activity_comment LIKE 'Active'
UNION
SELECT td.chembl_id as target_chemblid, a2.pchembl_value, a2.activity_comment,
md.chembl_id as compound_chemblid, source_domain_id, dm.mec_id, max_phase
FROM drug_mechanism dm
JOIN binding_sites bs on bs.tid = dm.tid
JOIN target_dictionary td ON td.tid = bs.tid
JOIN site_components sc ON sc.site_id =bs.site_id
JOIN domains d2 ON d2.domain_id = sc.domain_id
JOIN activities a2 ON dm.molregno = a2.molregno
JOIN molecule_dictionary md ON md.molregno = dm.molregno
JOIN compound_properties cp ON cp.molregno = md.molregno
JOIN compound_records cr ON cr.molregno = cp.molregno
WHERE a2.src_id = 15
AND dm.mec_id IS NOT NULL
AND a2.pchembl_value >= 6.0
AND cp.PSA IS NOT NULL
UNION
SELECT td.chembl_id as target_chemblid, a2.pchembl_value, a2.activity_comment,
md.chembl_id as compound_chemblid, source_domain_id, dm.mec_id, max_phase
FROM drug_mechanism dm
JOIN binding_sites bs on bs.tid = dm.tid
JOIN target_dictionary td ON td.tid = bs.tid
JOIN site_components sc ON sc.site_id =bs.site_id
JOIN domains d2 ON d2.domain_id = sc.domain_id
JOIN activities a2 ON dm.molregno = a2.molregno
JOIN molecule_dictionary md ON md.molregno = dm.molregno
JOIN compound_properties cp ON cp.molregno = md.molregno
JOIN compound_records cr ON cr.molregno = cp.molregno
WHERE md.max_phase >= 3.0
AND dm.mec_id IS NOT NULL; ''')
df_mol = pd.read_sql(find_molbypfam, engine)
return df_mol
def Main():
parser = argparse.ArgumentParser()
parser.add_argument("-db", "--dataset", help="Path to the directory of ChEMBL DB",
default= "chembl_27.db")
parser.add_argument('-o','--output', help='Output result must be .csv file',
type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
db='sqlite:///'+ os.path.abspath(args.dataset)
if args.dataset:
print('Database is being generated. This may take 5-10min', file=sys.stderr)
df_targets=search_bypfam(db)
df_targets.to_csv("/tmp/mech.csv")
if len(df_targets):
df_drop=df_targets.drop_duplicates(subset= ['target_chemblid', 'source_domain_id', 'compound_chemblid'])
grouped_df = df_drop.groupby(["target_chemblid", "compound_chemblid"])
grouped_pfam = grouped_df.agg({'source_domain_id':list, 'max_phase':'max'})
grouped_pfam = grouped_pfam.reset_index()
grouped_pfam['Domain_key']=['|'.join(sorted(set(x))) for x in grouped_pfam.source_domain_id]
result = grouped_pfam
result.to_csv(args.output) ;
else:
print('No database', file=sys.stderr)
return 0
if __name__=='__main__':
Main() | patho_chembl/chembldb_pfam_mech.py | from sqlalchemy import create_engine
import pandas as pd
import numpy as np
from importlib import reload
import collections
from pandas import json_normalize
import json
import argparse
import sys
from sqlalchemy import create_engine
import sqlite3
from importlib import reload
import os
def search_bypfam(dataset_path):
engine = create_engine(dataset_path)
CHEMBL_VERSION = 27
find_molbypfam = ('''SELECT td.chembl_id as target_chemblid, a2.pchembl_value,
a2.activity_comment, md.chembl_id as compound_chemblid, source_domain_id,
dm.mec_id, max_phase
FROM drug_mechanism dm
JOIN binding_sites bs on bs.tid = dm.tid
JOIN target_dictionary td ON td.tid = bs.tid
JOIN site_components sc ON sc.site_id =bs.site_id
JOIN domains d2 ON d2.domain_id = sc.domain_id
JOIN activities a2 ON dm.molregno = a2.molregno
JOIN molecule_dictionary md ON md.molregno = dm.molregno
JOIN compound_properties cp ON cp.molregno = md.molregno
JOIN compound_records cr ON cr.molregno = cp.molregno
WHERE dm.mec_id IS NOT NULL
AND a2.activity_comment LIKE 'Active'
UNION
SELECT td.chembl_id as target_chemblid, a2.pchembl_value, a2.activity_comment,
md.chembl_id as compound_chemblid, source_domain_id, dm.mec_id, max_phase
FROM drug_mechanism dm
JOIN binding_sites bs on bs.tid = dm.tid
JOIN target_dictionary td ON td.tid = bs.tid
JOIN site_components sc ON sc.site_id =bs.site_id
JOIN domains d2 ON d2.domain_id = sc.domain_id
JOIN activities a2 ON dm.molregno = a2.molregno
JOIN molecule_dictionary md ON md.molregno = dm.molregno
JOIN compound_properties cp ON cp.molregno = md.molregno
JOIN compound_records cr ON cr.molregno = cp.molregno
WHERE a2.src_id = 15
AND dm.mec_id IS NOT NULL
AND a2.pchembl_value >= 6.0
AND cp.PSA IS NOT NULL
UNION
SELECT td.chembl_id as target_chemblid, a2.pchembl_value, a2.activity_comment,
md.chembl_id as compound_chemblid, source_domain_id, dm.mec_id, max_phase
FROM drug_mechanism dm
JOIN binding_sites bs on bs.tid = dm.tid
JOIN target_dictionary td ON td.tid = bs.tid
JOIN site_components sc ON sc.site_id =bs.site_id
JOIN domains d2 ON d2.domain_id = sc.domain_id
JOIN activities a2 ON dm.molregno = a2.molregno
JOIN molecule_dictionary md ON md.molregno = dm.molregno
JOIN compound_properties cp ON cp.molregno = md.molregno
JOIN compound_records cr ON cr.molregno = cp.molregno
WHERE md.max_phase >= 3.0
AND dm.mec_id IS NOT NULL; ''')
df_mol = pd.read_sql(find_molbypfam, engine)
return df_mol
def Main():
parser = argparse.ArgumentParser()
parser.add_argument("-db", "--dataset", help="Path to the directory of ChEMBL DB",
default= "chembl_27.db")
parser.add_argument('-o','--output', help='Output result must be .csv file',
type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
db='sqlite:///'+ os.path.abspath(args.dataset)
if args.dataset:
print('Database is being generated. This may take 5-10min', file=sys.stderr)
df_targets=search_bypfam(db)
df_targets.to_csv("/tmp/mech.csv")
if len(df_targets):
df_drop=df_targets.drop_duplicates(subset= ['target_chemblid', 'source_domain_id', 'compound_chemblid'])
grouped_df = df_drop.groupby(["target_chemblid", "compound_chemblid"])
grouped_pfam = grouped_df.agg({'source_domain_id':list, 'max_phase':'max'})
grouped_pfam = grouped_pfam.reset_index()
grouped_pfam['Domain_key']=['|'.join(sorted(set(x))) for x in grouped_pfam.source_domain_id]
result = grouped_pfam
result.to_csv(args.output) ;
else:
print('No database', file=sys.stderr)
return 0
if __name__=='__main__':
Main() | 0.233706 | 0.096663 |
import numpy as np
import sys
import random
import itertools
from datetime import datetime
def has_converged(found, current, duration):
assert current - found > 0
assert duration > 0
if current - found > 2000:
return True
if duration > 3600:
return True
return False
def print_now():
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)
class SetCover:
'''
Represents an instance of the Minimum Set Cover problem
'''
def __init__(self, problem_instance):
'''
:param problem_instance: two dimensional numpy array. The rows represent the available
sets and the columns the possible elements. If problem_instance[i][j] is one then i-th
set has the j-th element. If it is set to 0 then the set does not have this element.
'''
self.problem_instance = problem_instance
self.is_solveable()
def is_solveable(self):
'''
Checks if there exists a set cover at all. Raises an Exception if not.
'''
all_sets = np.ones(self.problem_instance.shape[0])
solution = Solution(self, all_sets)
if not solution.is_feasible_solution():
raise Exception("Set cover instance cannot be solved")
class TightSetCover():
def __init__(self, min_k, max_k, max_n):
self.n = max_n
self.ks = []
self.number_ele = []
self.number_sets = []
for _ in range(0, self.n):
k = random.randint(min_k, max_k)
self.ks.append(k)
self.number_ele.append(2**(k + 1) - 2)
self.number_sets.append(k + 2)
shape = (sum(self.number_sets), sum(self.number_ele))
self.problem_instance = np.zeros(shape)
set_index_offset = 0
ele_index_offset = 0
for i in range(0, self.n):
tight_offset = 0
for k in range(0, self.ks[i]):
# initialize S_k
row = set_index_offset + k
for l in range(0, 2**(k + 1)):
index = ele_index_offset + l + tight_offset
self.problem_instance[row][index] = 1
tight_offset += 2**(k + 1)
# T_is...
t_index = set_index_offset + self.number_sets[i] - 2
for k in range(0, self.number_ele[i]):
if k % 2 == 0:
self.problem_instance[t_index][k + ele_index_offset] = 1
else:
self.problem_instance[t_index + 1][k + ele_index_offset] = 1
ele_index_offset += self.number_ele[i]
set_index_offset += self.number_sets[i]
self.is_solveable()
def is_solveable(self):
'''
Checks if there exists a set cover at all. Raises an Exception if not.
'''
all_sets = np.ones(self.problem_instance.shape[0])
solution = Solution(self, all_sets)
if not solution.is_feasible_solution():
raise Exception("Set cover instance cannot be solved")
class SteinerTriple:
TRIPLES_27 = [
(2, 3, 4),
(1, 3, 5),
(1, 2, 6),
(5, 6, 7),
(4, 6, 8),
(4, 5, 9),
(1, 8, 9),
(2, 7, 9),
(3, 7, 8),
(1, 4, 7),
(2, 5, 8),
(3, 6, 9),
(11, 12, 13),
(10, 12, 14),
(10, 11, 15),
(14, 15, 16),
(13, 15, 17),
(13, 14, 18),
(10, 17, 18),
(11, 16, 18),
(12, 16, 17),
(10, 13, 16),
(11, 14, 17),
(12, 15, 18),
(20, 21, 22),
(19, 21, 23),
(19, 20, 24),
(23, 24, 25),
(22, 24, 26),
(22, 23, 27),
(19, 26, 27),
(20, 25, 27),
(21, 25, 26),
(19, 22, 25),
(20, 23, 26),
(21, 24, 27),
(1, 10, 19),
(1, 11, 24),
(1, 12, 23),
(1, 13, 25),
(1, 14, 21),
(1, 15, 20),
(1, 16, 22),
(1, 17, 27),
(1, 18, 26),
(2, 10, 24),
(2, 11, 20),
(2, 12, 22),
(2, 13, 21),
(2, 14, 26),
(2, 15, 19),
(2, 16, 27),
(2, 17, 23),
(2, 18, 25),
(3, 10, 23),
(3, 11, 22),
(3, 12, 21),
(3, 13, 20),
(3, 14, 19),
(3, 15, 27),
(3, 16, 26),
(3, 17, 25),
(3, 18, 24),
(4, 10, 25),
(4, 11, 21),
(4, 12, 20),
(4, 13, 22),
(4, 14, 27),
(4, 15, 26),
(4, 16, 19),
(4, 17, 24),
(4, 18, 23),
(5, 10, 21),
(5, 11, 26),
(5, 12, 19),
(5, 13, 27),
(5, 14, 23),
(5, 15, 25),
(5, 16, 24),
(5, 17, 20),
(5, 18, 22),
(6, 10, 20),
(6, 11, 19),
(6, 12, 27),
(6, 13, 26),
(6, 14, 25),
(6, 15, 24),
(6, 16, 23),
(6, 17, 22),
(6, 18, 21),
(7, 10, 22),
(7, 11, 27),
(7, 12, 26),
(7, 13, 19),
(7, 14, 24),
(7, 15, 23),
(7, 16, 25),
(7, 17, 21),
(7, 18, 20),
(8, 10, 27),
(8, 11, 23),
(8, 12, 25),
(8, 13, 24),
(8, 14, 20),
(8, 15, 22),
(8, 16, 21),
(8, 17, 26),
(8, 18, 19),
(9, 10, 26),
(9, 11, 25),
(9, 12, 24),
(9, 13, 23),
(9, 14, 22),
(9, 15, 21),
(9, 16, 20),
(9, 17, 19),
(9, 18, 27),
]
TRIPLES_45 = [
(3,4,6),
(4,5,7),
(1,5,8),
(1,2,9),
(2,3,10),
(2,5,6),
(1,3,7),
(2,4,8),
(3,5,9),
(1,4,10),
(8,9,11),
(9,10,12),
(6,10,13),
(6,7,14),
(7,8,15),
(7,10,11),
(6,8,12),
(7,9,13),
(8,10,14),
(6,9,15),
(1,13,14),
(2,14,15),
(3,11,15),
(4,11,12),
(5,12,13),
(1,12,15),
(2,11,13),
(3,12,14),
(4,13,15),
(5,11,14),
(1,6,11),
(2,7,12),
(3,8,13),
(4,9,14),
(5,10,15),
(18,19,21),
(19,20,22),
(16,20,23),
(16,17,24),
(17,18,25),
(17,20,21),
(16,18,22),
(17,19,23),
(18,20,24),
(16,19,25),
(23,24,26),
(24,25,27),
(21,25,28),
(21,22,29),
(22,23,30),
(22,25,26),
(21,23,27),
(22,24,28),
(23,25,29),
(21,24,30),
(16,28,29),
(17,29,30),
(18,26,30),
(19,26,27),
(20,27,28),
(16,27,30),
(17,26,28),
(18,27,29),
(19,28,30),
(20,26,29),
(16,21,26),
(17,22,27),
(18,23,28),
(19,24,29),
(20,25,30),
(33,34,36),
(34,35,37),
(31,35,38),
(31,32,39),
(32,33,40),
(32,35,36),
(31,33,37),
(32,34,38),
(33,35,39),
(31,34,40),
(38,39,41),
(39,40,42),
(36,40,43),
(36,37,44),
(37,38,45),
(37,40,41),
(36,38,42),
(37,39,43),
(38,40,44),
(36,39,45),
(31,43,44),
(32,44,45),
(33,41,45),
(34,41,42),
(35,42,43),
(31,42,45),
(32,41,43),
(33,42,44),
(34,43,45),
(35,41,44),
(31,36,41),
(32,37,42),
(33,38,43),
(34,39,44),
(35,40,45),
(1,16,31),
(1,17,39),
(1,18,37),
(1,19,40),
(1,20,38),
(1,21,41),
(1,22,33),
(1,23,35),
(1,24,32),
(1,25,34),
(1,26,36),
(1,27,45),
(1,28,44),
(1,29,43),
(1,30,42),
(2,16,39),
(2,17,32),
(2,18,40),
(2,19,38),
(2,20,36),
(2,21,35),
(2,22,42),
(2,23,34),
(2,24,31),
(2,25,33),
(2,26,43),
(2,27,37),
(2,28,41),
(2,29,45),
(2,30,44),
(3,16,37),
(3,17,40),
(3,18,33),
(3,19,36),
(3,20,39),
(3,21,34),
(3,22,31),
(3,23,43),
(3,24,35),
(3,25,32),
(3,26,45),
(3,27,44),
(3,28,38),
(3,29,42),
(3,30,41),
(4,16,40),
(4,17,38),
(4,18,36),
(4,19,34),
(4,20,37),
(4,21,33),
(4,22,35),
(4,23,32),
(4,24,44),
(4,25,31),
(4,26,42),
(4,27,41),
(4,28,45),
(4,29,39),
(4,30,43),
(5,16,38),
(5,17,36),
(5,18,39),
(5,19,37),
(5,20,35),
(5,21,32),
(5,22,34),
(5,23,31),
(5,24,33),
(5,25,45),
(5,26,44),
(5,27,43),
(5,28,42),
(5,29,41),
(5,30,40),
(6,16,41),
(6,17,35),
(6,18,34),
(6,19,33),
(6,20,32),
(6,21,36),
(6,22,44),
(6,23,42),
(6,24,45),
(6,25,43),
(6,26,31),
(6,27,38),
(6,28,40),
(6,29,37),
(6,30,39),
(7,16,33),
(7,17,42),
(7,18,31),
(7,19,35),
(7,20,34),
(7,21,44),
(7,22,37),
(7,23,45),
(7,24,43),
(7,25,41),
(7,26,40),
(7,27,32),
(7,28,39),
(7,29,36),
(7,30,38),
(8,16,35),
(8,17,34),
(8,18,43),
(8,19,32),
(8,20,31),
(8,21,42),
(8,22,45),
(8,23,38),
(8,24,41),
(8,25,44),
(8,26,39),
(8,27,36),
(8,28,33),
(8,29,40),
(8,30,37),
(9,16,32),
(9,17,31),
(9,18,35),
(9,19,44),
(9,20,33),
(9,21,45),
(9,22,43),
(9,23,41),
(9,24,39),
(9,25,42),
(9,26,38),
(9,27,40),
(9,28,37),
(9,29,34),
(9,30,36),
(10,16,34),
(10,17,33),
(10,18,32),
(10,19,31),
(10,20,45),
(10,21,43),
(10,22,41),
(10,23,44),
(10,24,42),
(10,25,40),
(10,26,37),
(10,27,39),
(10,28,36),
(10,29,38),
(10,30,35),
(11,16,36),
(11,17,43),
(11,18,45),
(11,19,42),
(11,20,44),
(11,21,31),
(11,22,40),
(11,23,39),
(11,24,38),
(11,25,37),
(11,26,41),
(11,27,34),
(11,28,32),
(11,29,35),
(11,30,33),
(12,16,45),
(12,17,37),
(12,18,44),
(12,19,41),
(12,20,43),
(12,21,38),
(12,22,32),
(12,23,36),
(12,24,40),
(12,25,39),
(12,26,34),
(12,27,42),
(12,28,35),
(12,29,33),
(12,30,31),
(13,16,44),
(13,17,41),
(13,18,38),
(13,19,45),
(13,20,42),
(13,21,40),
(13,22,39),
(13,23,33),
(13,24,37),
(13,25,36),
(13,26,32),
(13,27,35),
(13,28,43),
(13,29,31),
(13,30,34),
(14,16,43),
(14,17,45),
(14,18,42),
(14,19,39),
(14,20,41),
(14,21,37),
(14,22,36),
(14,23,40),
(14,24,34),
(14,25,38),
(14,26,35),
(14,27,33),
(14,28,31),
(14,29,44),
(14,30,32),
(15,16,42),
(15,17,44),
(15,18,41),
(15,19,43),
(15,20,40),
(15,21,39),
(15,22,38),
(15,23,37),
(15,24,36),
(15,25,35),
(15,26,33),
(15,27,31),
(15,28,34),
(15,29,32),
(15,30,45)]
def __init__(self, T27=True):
if T27:
TRIPLES = SteinerTriple.TRIPLES_27
else:
TRIPLES = SteinerTriple.TRIPLES_45
rows = len(TRIPLES)
cols = max(list(map(lambda x: max(x), TRIPLES)))
problem_matrix = np.zeros((rows, cols))
counter = 0
for triple in TRIPLES:
for ele in triple:
problem_matrix[counter][ele - 1] = 1
counter += 1
self.problem_instance = problem_matrix
self.is_solveable()
def is_solveable(self):
'''
Checks if there exists a set cover at all. Raises an Exception if not.
'''
all_sets = np.ones(self.problem_instance.shape[0])
solution = Solution(self, all_sets)
if not solution.is_feasible_solution():
raise Exception("Set cover instance cannot be solved")
class Solution:
'''
Represents a possible infeasible solution of the Set Cover problem
'''
def __init__(self, set_cover_instance, set_vector, is_feasible=None, cost=None):
'''
:param set_cover_instance: instance of SetCover
:param set_vector: numpy vector indicating the sets that the solution holds.
The i-th entry of set_vector corresponds to the i-th row of the set cover
table.
:param is_feasible: indicates if the solution is a possible set cover.
:param cost: number of sets in the cover.
'''
self.set_cover_instance = set_cover_instance
self.set_vector = set_vector
self.is_feasible = is_feasible
self.cost = cost
self.is_feasible_solution()
def equals_other_sol(self, other_sol):
for i in range(0, len(self.set_vector)):
if self.set_vector[i] != other_sol.set_vector[i]:
return False
return True
def add_set(self, index):
'''
Adds the set of the given index to the solution. Afterwards the cost is updated
and is checked if the solution becomes feasible.
:param index: index in the set cover table of the set.
'''
if self.set_vector[index] == 1:
return False
self.set_vector[index] = 1
self.cost += 1
self.covered_elements += self.set_cover_instance.problem_instance[index]
self.covered_elements = [1 if ele > 0 else 0 for ele in self.covered_elements]
if sum(self.covered_elements) == self.set_cover_instance.problem_instance.shape[1]:
self.is_feasible = True
self.covered = sum(self.covered_elements)
return True
def get_cost(self):
if self.is_feasible():
return self.cost
else:
return sys.maxsize
def is_feasible_solution(self):
'''
Also retrieves the covered elements and calculates the solutions cost.
'''
if self.is_feasible is not None:
return self.is_feasible
available_elements = np.zeros(len(self.set_cover_instance.problem_instance[0]))
cost = 0
for i in range(0, len(self.set_vector)):
if self.set_vector[i] == 1:
cost += 1
available_elements += self.set_cover_instance.problem_instance[i]
self.covered_elements = [1 if ele > 0 else 0 for ele in available_elements]
self.covered = sum(self.covered_elements)
if len(available_elements[0 in available_elements]) == 0:
self.cost = cost
self.is_feasible = True
return self.is_feasible
self.cost = cost
self.is_feasible = False
return self.is_feasible | source/set_cover.py | import numpy as np
import sys
import random
import itertools
from datetime import datetime
def has_converged(found, current, duration):
assert current - found > 0
assert duration > 0
if current - found > 2000:
return True
if duration > 3600:
return True
return False
def print_now():
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)
class SetCover:
'''
Represents an instance of the Minimum Set Cover problem
'''
def __init__(self, problem_instance):
'''
:param problem_instance: two dimensional numpy array. The rows represent the available
sets and the columns the possible elements. If problem_instance[i][j] is one then i-th
set has the j-th element. If it is set to 0 then the set does not have this element.
'''
self.problem_instance = problem_instance
self.is_solveable()
def is_solveable(self):
'''
Checks if there exists a set cover at all. Raises an Exception if not.
'''
all_sets = np.ones(self.problem_instance.shape[0])
solution = Solution(self, all_sets)
if not solution.is_feasible_solution():
raise Exception("Set cover instance cannot be solved")
class TightSetCover():
def __init__(self, min_k, max_k, max_n):
self.n = max_n
self.ks = []
self.number_ele = []
self.number_sets = []
for _ in range(0, self.n):
k = random.randint(min_k, max_k)
self.ks.append(k)
self.number_ele.append(2**(k + 1) - 2)
self.number_sets.append(k + 2)
shape = (sum(self.number_sets), sum(self.number_ele))
self.problem_instance = np.zeros(shape)
set_index_offset = 0
ele_index_offset = 0
for i in range(0, self.n):
tight_offset = 0
for k in range(0, self.ks[i]):
# initialize S_k
row = set_index_offset + k
for l in range(0, 2**(k + 1)):
index = ele_index_offset + l + tight_offset
self.problem_instance[row][index] = 1
tight_offset += 2**(k + 1)
# T_is...
t_index = set_index_offset + self.number_sets[i] - 2
for k in range(0, self.number_ele[i]):
if k % 2 == 0:
self.problem_instance[t_index][k + ele_index_offset] = 1
else:
self.problem_instance[t_index + 1][k + ele_index_offset] = 1
ele_index_offset += self.number_ele[i]
set_index_offset += self.number_sets[i]
self.is_solveable()
def is_solveable(self):
'''
Checks if there exists a set cover at all. Raises an Exception if not.
'''
all_sets = np.ones(self.problem_instance.shape[0])
solution = Solution(self, all_sets)
if not solution.is_feasible_solution():
raise Exception("Set cover instance cannot be solved")
class SteinerTriple:
TRIPLES_27 = [
(2, 3, 4),
(1, 3, 5),
(1, 2, 6),
(5, 6, 7),
(4, 6, 8),
(4, 5, 9),
(1, 8, 9),
(2, 7, 9),
(3, 7, 8),
(1, 4, 7),
(2, 5, 8),
(3, 6, 9),
(11, 12, 13),
(10, 12, 14),
(10, 11, 15),
(14, 15, 16),
(13, 15, 17),
(13, 14, 18),
(10, 17, 18),
(11, 16, 18),
(12, 16, 17),
(10, 13, 16),
(11, 14, 17),
(12, 15, 18),
(20, 21, 22),
(19, 21, 23),
(19, 20, 24),
(23, 24, 25),
(22, 24, 26),
(22, 23, 27),
(19, 26, 27),
(20, 25, 27),
(21, 25, 26),
(19, 22, 25),
(20, 23, 26),
(21, 24, 27),
(1, 10, 19),
(1, 11, 24),
(1, 12, 23),
(1, 13, 25),
(1, 14, 21),
(1, 15, 20),
(1, 16, 22),
(1, 17, 27),
(1, 18, 26),
(2, 10, 24),
(2, 11, 20),
(2, 12, 22),
(2, 13, 21),
(2, 14, 26),
(2, 15, 19),
(2, 16, 27),
(2, 17, 23),
(2, 18, 25),
(3, 10, 23),
(3, 11, 22),
(3, 12, 21),
(3, 13, 20),
(3, 14, 19),
(3, 15, 27),
(3, 16, 26),
(3, 17, 25),
(3, 18, 24),
(4, 10, 25),
(4, 11, 21),
(4, 12, 20),
(4, 13, 22),
(4, 14, 27),
(4, 15, 26),
(4, 16, 19),
(4, 17, 24),
(4, 18, 23),
(5, 10, 21),
(5, 11, 26),
(5, 12, 19),
(5, 13, 27),
(5, 14, 23),
(5, 15, 25),
(5, 16, 24),
(5, 17, 20),
(5, 18, 22),
(6, 10, 20),
(6, 11, 19),
(6, 12, 27),
(6, 13, 26),
(6, 14, 25),
(6, 15, 24),
(6, 16, 23),
(6, 17, 22),
(6, 18, 21),
(7, 10, 22),
(7, 11, 27),
(7, 12, 26),
(7, 13, 19),
(7, 14, 24),
(7, 15, 23),
(7, 16, 25),
(7, 17, 21),
(7, 18, 20),
(8, 10, 27),
(8, 11, 23),
(8, 12, 25),
(8, 13, 24),
(8, 14, 20),
(8, 15, 22),
(8, 16, 21),
(8, 17, 26),
(8, 18, 19),
(9, 10, 26),
(9, 11, 25),
(9, 12, 24),
(9, 13, 23),
(9, 14, 22),
(9, 15, 21),
(9, 16, 20),
(9, 17, 19),
(9, 18, 27),
]
TRIPLES_45 = [
(3,4,6),
(4,5,7),
(1,5,8),
(1,2,9),
(2,3,10),
(2,5,6),
(1,3,7),
(2,4,8),
(3,5,9),
(1,4,10),
(8,9,11),
(9,10,12),
(6,10,13),
(6,7,14),
(7,8,15),
(7,10,11),
(6,8,12),
(7,9,13),
(8,10,14),
(6,9,15),
(1,13,14),
(2,14,15),
(3,11,15),
(4,11,12),
(5,12,13),
(1,12,15),
(2,11,13),
(3,12,14),
(4,13,15),
(5,11,14),
(1,6,11),
(2,7,12),
(3,8,13),
(4,9,14),
(5,10,15),
(18,19,21),
(19,20,22),
(16,20,23),
(16,17,24),
(17,18,25),
(17,20,21),
(16,18,22),
(17,19,23),
(18,20,24),
(16,19,25),
(23,24,26),
(24,25,27),
(21,25,28),
(21,22,29),
(22,23,30),
(22,25,26),
(21,23,27),
(22,24,28),
(23,25,29),
(21,24,30),
(16,28,29),
(17,29,30),
(18,26,30),
(19,26,27),
(20,27,28),
(16,27,30),
(17,26,28),
(18,27,29),
(19,28,30),
(20,26,29),
(16,21,26),
(17,22,27),
(18,23,28),
(19,24,29),
(20,25,30),
(33,34,36),
(34,35,37),
(31,35,38),
(31,32,39),
(32,33,40),
(32,35,36),
(31,33,37),
(32,34,38),
(33,35,39),
(31,34,40),
(38,39,41),
(39,40,42),
(36,40,43),
(36,37,44),
(37,38,45),
(37,40,41),
(36,38,42),
(37,39,43),
(38,40,44),
(36,39,45),
(31,43,44),
(32,44,45),
(33,41,45),
(34,41,42),
(35,42,43),
(31,42,45),
(32,41,43),
(33,42,44),
(34,43,45),
(35,41,44),
(31,36,41),
(32,37,42),
(33,38,43),
(34,39,44),
(35,40,45),
(1,16,31),
(1,17,39),
(1,18,37),
(1,19,40),
(1,20,38),
(1,21,41),
(1,22,33),
(1,23,35),
(1,24,32),
(1,25,34),
(1,26,36),
(1,27,45),
(1,28,44),
(1,29,43),
(1,30,42),
(2,16,39),
(2,17,32),
(2,18,40),
(2,19,38),
(2,20,36),
(2,21,35),
(2,22,42),
(2,23,34),
(2,24,31),
(2,25,33),
(2,26,43),
(2,27,37),
(2,28,41),
(2,29,45),
(2,30,44),
(3,16,37),
(3,17,40),
(3,18,33),
(3,19,36),
(3,20,39),
(3,21,34),
(3,22,31),
(3,23,43),
(3,24,35),
(3,25,32),
(3,26,45),
(3,27,44),
(3,28,38),
(3,29,42),
(3,30,41),
(4,16,40),
(4,17,38),
(4,18,36),
(4,19,34),
(4,20,37),
(4,21,33),
(4,22,35),
(4,23,32),
(4,24,44),
(4,25,31),
(4,26,42),
(4,27,41),
(4,28,45),
(4,29,39),
(4,30,43),
(5,16,38),
(5,17,36),
(5,18,39),
(5,19,37),
(5,20,35),
(5,21,32),
(5,22,34),
(5,23,31),
(5,24,33),
(5,25,45),
(5,26,44),
(5,27,43),
(5,28,42),
(5,29,41),
(5,30,40),
(6,16,41),
(6,17,35),
(6,18,34),
(6,19,33),
(6,20,32),
(6,21,36),
(6,22,44),
(6,23,42),
(6,24,45),
(6,25,43),
(6,26,31),
(6,27,38),
(6,28,40),
(6,29,37),
(6,30,39),
(7,16,33),
(7,17,42),
(7,18,31),
(7,19,35),
(7,20,34),
(7,21,44),
(7,22,37),
(7,23,45),
(7,24,43),
(7,25,41),
(7,26,40),
(7,27,32),
(7,28,39),
(7,29,36),
(7,30,38),
(8,16,35),
(8,17,34),
(8,18,43),
(8,19,32),
(8,20,31),
(8,21,42),
(8,22,45),
(8,23,38),
(8,24,41),
(8,25,44),
(8,26,39),
(8,27,36),
(8,28,33),
(8,29,40),
(8,30,37),
(9,16,32),
(9,17,31),
(9,18,35),
(9,19,44),
(9,20,33),
(9,21,45),
(9,22,43),
(9,23,41),
(9,24,39),
(9,25,42),
(9,26,38),
(9,27,40),
(9,28,37),
(9,29,34),
(9,30,36),
(10,16,34),
(10,17,33),
(10,18,32),
(10,19,31),
(10,20,45),
(10,21,43),
(10,22,41),
(10,23,44),
(10,24,42),
(10,25,40),
(10,26,37),
(10,27,39),
(10,28,36),
(10,29,38),
(10,30,35),
(11,16,36),
(11,17,43),
(11,18,45),
(11,19,42),
(11,20,44),
(11,21,31),
(11,22,40),
(11,23,39),
(11,24,38),
(11,25,37),
(11,26,41),
(11,27,34),
(11,28,32),
(11,29,35),
(11,30,33),
(12,16,45),
(12,17,37),
(12,18,44),
(12,19,41),
(12,20,43),
(12,21,38),
(12,22,32),
(12,23,36),
(12,24,40),
(12,25,39),
(12,26,34),
(12,27,42),
(12,28,35),
(12,29,33),
(12,30,31),
(13,16,44),
(13,17,41),
(13,18,38),
(13,19,45),
(13,20,42),
(13,21,40),
(13,22,39),
(13,23,33),
(13,24,37),
(13,25,36),
(13,26,32),
(13,27,35),
(13,28,43),
(13,29,31),
(13,30,34),
(14,16,43),
(14,17,45),
(14,18,42),
(14,19,39),
(14,20,41),
(14,21,37),
(14,22,36),
(14,23,40),
(14,24,34),
(14,25,38),
(14,26,35),
(14,27,33),
(14,28,31),
(14,29,44),
(14,30,32),
(15,16,42),
(15,17,44),
(15,18,41),
(15,19,43),
(15,20,40),
(15,21,39),
(15,22,38),
(15,23,37),
(15,24,36),
(15,25,35),
(15,26,33),
(15,27,31),
(15,28,34),
(15,29,32),
(15,30,45)]
def __init__(self, T27=True):
if T27:
TRIPLES = SteinerTriple.TRIPLES_27
else:
TRIPLES = SteinerTriple.TRIPLES_45
rows = len(TRIPLES)
cols = max(list(map(lambda x: max(x), TRIPLES)))
problem_matrix = np.zeros((rows, cols))
counter = 0
for triple in TRIPLES:
for ele in triple:
problem_matrix[counter][ele - 1] = 1
counter += 1
self.problem_instance = problem_matrix
self.is_solveable()
def is_solveable(self):
'''
Checks if there exists a set cover at all. Raises an Exception if not.
'''
all_sets = np.ones(self.problem_instance.shape[0])
solution = Solution(self, all_sets)
if not solution.is_feasible_solution():
raise Exception("Set cover instance cannot be solved")
class Solution:
'''
Represents a possible infeasible solution of the Set Cover problem
'''
def __init__(self, set_cover_instance, set_vector, is_feasible=None, cost=None):
'''
:param set_cover_instance: instance of SetCover
:param set_vector: numpy vector indicating the sets that the solution holds.
The i-th entry of set_vector corresponds to the i-th row of the set cover
table.
:param is_feasible: indicates if the solution is a possible set cover.
:param cost: number of sets in the cover.
'''
self.set_cover_instance = set_cover_instance
self.set_vector = set_vector
self.is_feasible = is_feasible
self.cost = cost
self.is_feasible_solution()
def equals_other_sol(self, other_sol):
for i in range(0, len(self.set_vector)):
if self.set_vector[i] != other_sol.set_vector[i]:
return False
return True
def add_set(self, index):
'''
Adds the set of the given index to the solution. Afterwards the cost is updated
and is checked if the solution becomes feasible.
:param index: index in the set cover table of the set.
'''
if self.set_vector[index] == 1:
return False
self.set_vector[index] = 1
self.cost += 1
self.covered_elements += self.set_cover_instance.problem_instance[index]
self.covered_elements = [1 if ele > 0 else 0 for ele in self.covered_elements]
if sum(self.covered_elements) == self.set_cover_instance.problem_instance.shape[1]:
self.is_feasible = True
self.covered = sum(self.covered_elements)
return True
def get_cost(self):
if self.is_feasible():
return self.cost
else:
return sys.maxsize
def is_feasible_solution(self):
'''
Also retrieves the covered elements and calculates the solutions cost.
'''
if self.is_feasible is not None:
return self.is_feasible
available_elements = np.zeros(len(self.set_cover_instance.problem_instance[0]))
cost = 0
for i in range(0, len(self.set_vector)):
if self.set_vector[i] == 1:
cost += 1
available_elements += self.set_cover_instance.problem_instance[i]
self.covered_elements = [1 if ele > 0 else 0 for ele in available_elements]
self.covered = sum(self.covered_elements)
if len(available_elements[0 in available_elements]) == 0:
self.cost = cost
self.is_feasible = True
return self.is_feasible
self.cost = cost
self.is_feasible = False
return self.is_feasible | 0.38885 | 0.402803 |
import signal
from unittest import mock
import pytest
from coveralls.api import CoverallsException
import entrypoint
def patch_os_envirion(environ):
return mock.patch.dict("os.environ", environ, clear=True)
def patch_coveralls_wear():
return mock.patch("entrypoint.Coveralls.wear")
def patch_log():
return mock.patch("entrypoint.log")
def patch_sys_argv(argv):
return mock.patch("sys.argv", argv)
def patch_requests_post(json_response=None):
new_mock = mock.Mock()
if json_response:
new_mock.return_value.json.return_value = json_response
return mock.patch("entrypoint.requests.post", new_mock)
class TestEntryPoint:
def test_main_no_token(self):
"""Argument `--github-token` is required."""
argv = ["src/entrypoint.py"]
with patch_sys_argv(argv), pytest.raises(SystemExit) as ex_info:
entrypoint.main()
assert ex_info.value.args == (signal.SIGINT.value,)
def test_main(self):
argv = ["src/entrypoint.py", "--github-token", "TOKEN"]
with patch_sys_argv(argv), mock.patch(
"entrypoint.run_coveralls"
) as m_run_coveralls:
entrypoint.main()
assert m_run_coveralls.call_args_list == [
mock.call("TOKEN", False, False, False)
]
def test_main_flag_name(self):
argv = ["src/entrypoint.py", "--github-token", "TOKEN", "--flag-name", "FLAG"]
with patch_sys_argv(argv), mock.patch(
"entrypoint.run_coveralls"
) as m_run_coveralls:
entrypoint.main()
assert m_run_coveralls.call_args_list == [
mock.call("TOKEN", False, "FLAG", False)
]
def test_main_base_path(self):
argv = ["src/entrypoint.py", "--github-token", "TOKEN", "--base-path", "SRC"]
with patch_sys_argv(argv), mock.patch(
"entrypoint.run_coveralls"
) as m_run_coveralls:
entrypoint.main()
assert m_run_coveralls.call_args_list == [
mock.call("TOKEN", False, False, "SRC")
]
def test_main_parallel_finished(self):
argv = ["src/entrypoint.py", "--github-token", "TOKEN", "--parallel-finished"]
with patch_sys_argv(argv), mock.patch(
"entrypoint.post_webhook"
) as m_post_webhook:
entrypoint.main()
assert m_post_webhook.call_args_list == [mock.call("TOKEN")]
def test_try_main(self):
with mock.patch(
"entrypoint.main", side_effect=Exception
) as m_main, pytest.raises(SystemExit) as ex_info:
entrypoint.try_main()
assert m_main.call_args_list == [mock.call()]
assert ex_info.value.args == (entrypoint.ExitCode.FAILURE,)
def test_run_coveralls_github_token(self):
"""Simple case when Coveralls.wear() returns some results."""
url = "https://coveralls.io/jobs/1234"
with patch_coveralls_wear() as m_wear, patch_log() as m_log:
m_wear.return_value = {
"message": "Job ##12.34",
"url": url,
}
entrypoint.run_coveralls(repo_token="TOKEN")
assert m_wear.call_args_list == [mock.call()]
assert m_log.method_calls == [
mock.call.info("Trying submitting coverage with service_name: github..."),
mock.call.debug(
"Patching os.environ with: "
"{'COVERALLS_REPO_TOKEN': 'TOKEN', 'COVERALLS_PARALLEL': ''}"
),
mock.call.debug(m_wear.return_value),
mock.call.info(url),
]
def test_run_coveralls_wear_error_once(self):
"""On Coveralls.wear() error we should try another `service_name`."""
url = "https://coveralls.io/jobs/1234"
side_effect = (
CoverallsException("Error"),
{"message": "Job ##12.34", "url": url},
)
with patch_coveralls_wear() as m_wear, patch_log() as m_log:
m_wear.side_effect = side_effect
entrypoint.run_coveralls(repo_token="TOKEN")
assert m_wear.call_args_list == [mock.call(), mock.call()]
assert m_log.method_calls == [
mock.call.info("Trying submitting coverage with service_name: github..."),
mock.call.debug(
"Patching os.environ with: "
"{'COVERALLS_REPO_TOKEN': 'TOKEN', 'COVERALLS_PARALLEL': ''}"
),
mock.call.warning("Failed submitting coverage with service_name: github"),
mock.call.warning(side_effect[0]),
mock.call.info(
"Trying submitting coverage with service_name: github-actions..."
),
mock.call.debug(
"Patching os.environ with: "
"{'COVERALLS_REPO_TOKEN': 'TOKEN', 'COVERALLS_PARALLEL': ''}"
),
mock.call.debug(side_effect[1]),
mock.call.info(url),
]
def test_run_coveralls_wear_error_twice(self):
"""Exits with error code if Coveralls.wear() fails twice."""
side_effect = (
CoverallsException("Error 1"),
CoverallsException("Error 2"),
)
with patch_coveralls_wear() as m_wear, pytest.raises(SystemExit) as ex_info:
m_wear.side_effect = side_effect
entrypoint.run_coveralls(repo_token="TOKEN")
assert ex_info.value.args == (entrypoint.ExitCode.FAILURE,)
def test_post_webhook(self):
"""
Tests different uses cases:
1) default, no environment variable
2) `GITHUB_RUN_ID` is set
"""
repo_token = "TOKEN"
json_response = {"done": True}
# 1) default, no environment variable
environ = {}
with patch_requests_post(json_response) as m_post, patch_os_envirion(environ):
entrypoint.post_webhook(repo_token)
assert m_post.call_args_list == [
mock.call(
"https://coveralls.io/webhook",
json={
"repo_token": "TOKEN",
"repo_name": None,
"payload": {"build_num": None, "status": "done"},
},
)
]
# 2) `GITHUB_RUN_ID` and `GITHUB_REPOSITORY` are set
environ = {
"GITHUB_RUN_ID": "845347868344",
"GITHUB_REPOSITORY": "AndreMiras/coveralls-python-action",
}
with patch_requests_post(json_response) as m_post, patch_os_envirion(environ):
entrypoint.post_webhook(repo_token)
assert m_post.call_args_list == [
mock.call(
"https://coveralls.io/webhook",
json={
"repo_token": "TOKEN",
"repo_name": "AndreMiras/coveralls-python-action",
"payload": {
"build_num": "845347868344",
"status": "done",
},
},
)
]
def test_post_webhook_error(self):
"""Coveralls.io json error response should raise an exception."""
repo_token = "TOKEN"
json_response = {"error": "Invalid repo token"}
# 1) default, no environment variable
environ = {}
with patch_requests_post(json_response) as m_post, patch_os_envirion(
environ
), pytest.raises(AssertionError) as ex_info:
entrypoint.post_webhook(repo_token)
assert m_post.call_args_list == [
mock.call(
"https://coveralls.io/webhook",
json={
"repo_token": "TOKEN",
"repo_name": None,
"payload": {"build_num": None, "status": "done"},
},
)
]
assert ex_info.value.args == (json_response,)
@pytest.mark.parametrize(
"value,expected",
[
(False, False),
("false", False),
("f", False),
("0", False),
("no", False),
("n", False),
(True, True),
("true", True),
("t", True),
("1", True),
("yes", True),
("y", True),
],
)
def test_str_to_bool(self, value, expected):
"""Possible recognised values."""
assert entrypoint.str_to_bool(value) is expected
@pytest.mark.parametrize("value", ["", "yesn't"])
def test_str_to_bool_value_error(self, value):
"""Other unrecognised string values raise a `ValueError`."""
with pytest.raises(ValueError) as ex_info:
entrypoint.str_to_bool(value)
assert ex_info.value.args == (f"{value} is not a valid boolean value",)
@pytest.mark.parametrize("value", [None, 0])
def test_str_to_bool_attribute_error(self, value):
"""Other unrecognised non-string values raise an `AttributeError`."""
with pytest.raises(AttributeError) as ex_info:
entrypoint.str_to_bool(value)
assert ex_info.value.args[0].endswith(" object has no attribute 'lower'") | tests/test_entrypoint.py | import signal
from unittest import mock
import pytest
from coveralls.api import CoverallsException
import entrypoint
def patch_os_envirion(environ):
return mock.patch.dict("os.environ", environ, clear=True)
def patch_coveralls_wear():
return mock.patch("entrypoint.Coveralls.wear")
def patch_log():
return mock.patch("entrypoint.log")
def patch_sys_argv(argv):
return mock.patch("sys.argv", argv)
def patch_requests_post(json_response=None):
new_mock = mock.Mock()
if json_response:
new_mock.return_value.json.return_value = json_response
return mock.patch("entrypoint.requests.post", new_mock)
class TestEntryPoint:
def test_main_no_token(self):
"""Argument `--github-token` is required."""
argv = ["src/entrypoint.py"]
with patch_sys_argv(argv), pytest.raises(SystemExit) as ex_info:
entrypoint.main()
assert ex_info.value.args == (signal.SIGINT.value,)
def test_main(self):
argv = ["src/entrypoint.py", "--github-token", "TOKEN"]
with patch_sys_argv(argv), mock.patch(
"entrypoint.run_coveralls"
) as m_run_coveralls:
entrypoint.main()
assert m_run_coveralls.call_args_list == [
mock.call("TOKEN", False, False, False)
]
def test_main_flag_name(self):
argv = ["src/entrypoint.py", "--github-token", "TOKEN", "--flag-name", "FLAG"]
with patch_sys_argv(argv), mock.patch(
"entrypoint.run_coveralls"
) as m_run_coveralls:
entrypoint.main()
assert m_run_coveralls.call_args_list == [
mock.call("TOKEN", False, "FLAG", False)
]
def test_main_base_path(self):
argv = ["src/entrypoint.py", "--github-token", "TOKEN", "--base-path", "SRC"]
with patch_sys_argv(argv), mock.patch(
"entrypoint.run_coveralls"
) as m_run_coveralls:
entrypoint.main()
assert m_run_coveralls.call_args_list == [
mock.call("TOKEN", False, False, "SRC")
]
def test_main_parallel_finished(self):
argv = ["src/entrypoint.py", "--github-token", "TOKEN", "--parallel-finished"]
with patch_sys_argv(argv), mock.patch(
"entrypoint.post_webhook"
) as m_post_webhook:
entrypoint.main()
assert m_post_webhook.call_args_list == [mock.call("TOKEN")]
def test_try_main(self):
with mock.patch(
"entrypoint.main", side_effect=Exception
) as m_main, pytest.raises(SystemExit) as ex_info:
entrypoint.try_main()
assert m_main.call_args_list == [mock.call()]
assert ex_info.value.args == (entrypoint.ExitCode.FAILURE,)
def test_run_coveralls_github_token(self):
"""Simple case when Coveralls.wear() returns some results."""
url = "https://coveralls.io/jobs/1234"
with patch_coveralls_wear() as m_wear, patch_log() as m_log:
m_wear.return_value = {
"message": "Job ##12.34",
"url": url,
}
entrypoint.run_coveralls(repo_token="TOKEN")
assert m_wear.call_args_list == [mock.call()]
assert m_log.method_calls == [
mock.call.info("Trying submitting coverage with service_name: github..."),
mock.call.debug(
"Patching os.environ with: "
"{'COVERALLS_REPO_TOKEN': 'TOKEN', 'COVERALLS_PARALLEL': ''}"
),
mock.call.debug(m_wear.return_value),
mock.call.info(url),
]
def test_run_coveralls_wear_error_once(self):
"""On Coveralls.wear() error we should try another `service_name`."""
url = "https://coveralls.io/jobs/1234"
side_effect = (
CoverallsException("Error"),
{"message": "Job ##12.34", "url": url},
)
with patch_coveralls_wear() as m_wear, patch_log() as m_log:
m_wear.side_effect = side_effect
entrypoint.run_coveralls(repo_token="TOKEN")
assert m_wear.call_args_list == [mock.call(), mock.call()]
assert m_log.method_calls == [
mock.call.info("Trying submitting coverage with service_name: github..."),
mock.call.debug(
"Patching os.environ with: "
"{'COVERALLS_REPO_TOKEN': 'TOKEN', 'COVERALLS_PARALLEL': ''}"
),
mock.call.warning("Failed submitting coverage with service_name: github"),
mock.call.warning(side_effect[0]),
mock.call.info(
"Trying submitting coverage with service_name: github-actions..."
),
mock.call.debug(
"Patching os.environ with: "
"{'COVERALLS_REPO_TOKEN': 'TOKEN', 'COVERALLS_PARALLEL': ''}"
),
mock.call.debug(side_effect[1]),
mock.call.info(url),
]
def test_run_coveralls_wear_error_twice(self):
"""Exits with error code if Coveralls.wear() fails twice."""
side_effect = (
CoverallsException("Error 1"),
CoverallsException("Error 2"),
)
with patch_coveralls_wear() as m_wear, pytest.raises(SystemExit) as ex_info:
m_wear.side_effect = side_effect
entrypoint.run_coveralls(repo_token="TOKEN")
assert ex_info.value.args == (entrypoint.ExitCode.FAILURE,)
def test_post_webhook(self):
"""
Tests different uses cases:
1) default, no environment variable
2) `GITHUB_RUN_ID` is set
"""
repo_token = "TOKEN"
json_response = {"done": True}
# 1) default, no environment variable
environ = {}
with patch_requests_post(json_response) as m_post, patch_os_envirion(environ):
entrypoint.post_webhook(repo_token)
assert m_post.call_args_list == [
mock.call(
"https://coveralls.io/webhook",
json={
"repo_token": "TOKEN",
"repo_name": None,
"payload": {"build_num": None, "status": "done"},
},
)
]
# 2) `GITHUB_RUN_ID` and `GITHUB_REPOSITORY` are set
environ = {
"GITHUB_RUN_ID": "845347868344",
"GITHUB_REPOSITORY": "AndreMiras/coveralls-python-action",
}
with patch_requests_post(json_response) as m_post, patch_os_envirion(environ):
entrypoint.post_webhook(repo_token)
assert m_post.call_args_list == [
mock.call(
"https://coveralls.io/webhook",
json={
"repo_token": "TOKEN",
"repo_name": "AndreMiras/coveralls-python-action",
"payload": {
"build_num": "845347868344",
"status": "done",
},
},
)
]
def test_post_webhook_error(self):
"""Coveralls.io json error response should raise an exception."""
repo_token = "TOKEN"
json_response = {"error": "Invalid repo token"}
# 1) default, no environment variable
environ = {}
with patch_requests_post(json_response) as m_post, patch_os_envirion(
environ
), pytest.raises(AssertionError) as ex_info:
entrypoint.post_webhook(repo_token)
assert m_post.call_args_list == [
mock.call(
"https://coveralls.io/webhook",
json={
"repo_token": "TOKEN",
"repo_name": None,
"payload": {"build_num": None, "status": "done"},
},
)
]
assert ex_info.value.args == (json_response,)
@pytest.mark.parametrize(
"value,expected",
[
(False, False),
("false", False),
("f", False),
("0", False),
("no", False),
("n", False),
(True, True),
("true", True),
("t", True),
("1", True),
("yes", True),
("y", True),
],
)
def test_str_to_bool(self, value, expected):
"""Possible recognised values."""
assert entrypoint.str_to_bool(value) is expected
@pytest.mark.parametrize("value", ["", "yesn't"])
def test_str_to_bool_value_error(self, value):
"""Other unrecognised string values raise a `ValueError`."""
with pytest.raises(ValueError) as ex_info:
entrypoint.str_to_bool(value)
assert ex_info.value.args == (f"{value} is not a valid boolean value",)
@pytest.mark.parametrize("value", [None, 0])
def test_str_to_bool_attribute_error(self, value):
"""Other unrecognised non-string values raise an `AttributeError`."""
with pytest.raises(AttributeError) as ex_info:
entrypoint.str_to_bool(value)
assert ex_info.value.args[0].endswith(" object has no attribute 'lower'") | 0.619356 | 0.311348 |
import torch
from torch import cos, sin, sign
from .template import ControlledSystemTemplate
class CartPoleGymVersion(ControlledSystemTemplate):
'''Continuous version of the OpenAI Gym cartpole
Inspired by: https://gist.github.com/iandanforth/e3ffb67cf3623153e968f2afdfb01dc8'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = (self.masspole + self.masscart)
self.length = 0.5
self.polemass_length = (self.masspole * self.length)
def _dynamics(self, t, x_):
self.nfe += 1 # increment number of function evaluations
u = self._evaluate_controller(t, x_) # controller
# States
x = x_[..., 0:1]
dx = x_[..., 1:2]
θ = x_[..., 2:3]
dθ = x_[..., 3:4]
# Auxiliary variables
cosθ, sinθ = cos(θ), sin(θ)
temp = (u + self.polemass_length * dθ**2 * sinθ) / self.total_mass
# Differential Equations
ddθ = (self.gravity * sinθ - cosθ * temp) / \
(self.length * (4.0/3.0 - self.masspole * cosθ**2 / self.total_mass))
ddx = temp - self.polemass_length * ddθ * cosθ / self.total_mass
self.cur_f = torch.cat([dx, ddx, dθ, ddθ], -1)
return self.cur_f
def render(self):
raise NotImplementedError("TODO: add the rendering from OpenAI Gym")
class CartPole(ControlledSystemTemplate):
"""
Realistic, continuous version of a cart and pole system. This version considers friction for the cart and the pole.
We do not consider the case in which the normal force can be negative: reasonably, the cart should not try to "jump off" the track.
This also allows us not needing to consider the previous step's sign.
References:
- http://coneural.org/florian/papers/05_cart_pole.pdf
- https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-832-underactuated-robotics-spring-2009/readings/MIT6_832s09_read_ch03.pdf
- https://gist.github.com/iandanforth/e3ffb67cf3623153e968f2afdfb01dc8
- https://github.com/AadityaPatanjali/gym-cartpolemod
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = (self.masspole + self.masscart)
self.length = 0.5
self.polemass_length = (self.masspole * self.length)
self.frictioncart = 0 # 5e-4
self.frictionpole = 0 # 2e-6
def _dynamics(self, t, x_):
self.nfe += 1 # increment number of function evaluations
u = self._evaluate_controller(t, x_) # controller
# States
x, dx, θ, dθ = self._divide_states(x_)
# Auxiliary variables
cosθ, sinθ = cos(θ), sin(θ)
temp = (u + self.polemass_length * dθ**2 * sinθ) / self.total_mass
signed_μc = self.frictioncart * sign(dx)
μp = self.frictionpole
# Differential Equations
nom_ddθ = self.gravity * sinθ - (μp * dθ) / (self.masspole * self.length) - \
cosθ * (temp + (self.masspole * self.length * dθ**2 * signed_μc * cosθ) / self.total_mass - signed_μc * self.gravity) # nominator ddθ
den_ddθ = self.length * (4/3 - self.masspole * cosθ * (cosθ - signed_μc) / self.total_mass) # denominator ddθ
ddθ = nom_ddθ / den_ddθ # angular acceleration of the pole
nc = (self.masscart + self.masspole) * self.gravity - self.masspole * self.length * (ddθ * sinθ + dθ**2 * cosθ) # normal force cart
ddx = temp + (- self.polemass_length * ddθ * cosθ - signed_μc * nc) / self.total_mass # acceleration of the track
self.cur_f = torch.cat([dx, ddx, dθ, ddθ], -1)
return self.cur_f
def _divide_states(self, x_):
x = x_[..., 0:1]
dx = x_[..., 1:2]
θ = x_[..., 2:3]
dθ = x_[..., 3:4]
return x, dx, θ, dθ
def kinetic_energy(self, x_):
x, dx, θ, dθ = self._divide_states(x_)
return 1/2 * (self.masscart + self.masspole) * dx**2 + self.masspole * dx * dθ * self.length * cos(θ) + 1/2 * self.masspole * self.length**2 * dθ**2
def potential_energy(self, x_):
x, _, θ, _ = self._divide_states(x_)
return self.masspole * self.gravity * self.length * cos(θ)
def render(self):
raise NotImplementedError("TODO: add the rendering from OpenAI Gym") | hypersolvers-control/src/env/cartpole.py | import torch
from torch import cos, sin, sign
from .template import ControlledSystemTemplate
class CartPoleGymVersion(ControlledSystemTemplate):
'''Continuous version of the OpenAI Gym cartpole
Inspired by: https://gist.github.com/iandanforth/e3ffb67cf3623153e968f2afdfb01dc8'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = (self.masspole + self.masscart)
self.length = 0.5
self.polemass_length = (self.masspole * self.length)
def _dynamics(self, t, x_):
self.nfe += 1 # increment number of function evaluations
u = self._evaluate_controller(t, x_) # controller
# States
x = x_[..., 0:1]
dx = x_[..., 1:2]
θ = x_[..., 2:3]
dθ = x_[..., 3:4]
# Auxiliary variables
cosθ, sinθ = cos(θ), sin(θ)
temp = (u + self.polemass_length * dθ**2 * sinθ) / self.total_mass
# Differential Equations
ddθ = (self.gravity * sinθ - cosθ * temp) / \
(self.length * (4.0/3.0 - self.masspole * cosθ**2 / self.total_mass))
ddx = temp - self.polemass_length * ddθ * cosθ / self.total_mass
self.cur_f = torch.cat([dx, ddx, dθ, ddθ], -1)
return self.cur_f
def render(self):
raise NotImplementedError("TODO: add the rendering from OpenAI Gym")
class CartPole(ControlledSystemTemplate):
"""
Realistic, continuous version of a cart and pole system. This version considers friction for the cart and the pole.
We do not consider the case in which the normal force can be negative: reasonably, the cart should not try to "jump off" the track.
This also allows us not needing to consider the previous step's sign.
References:
- http://coneural.org/florian/papers/05_cart_pole.pdf
- https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-832-underactuated-robotics-spring-2009/readings/MIT6_832s09_read_ch03.pdf
- https://gist.github.com/iandanforth/e3ffb67cf3623153e968f2afdfb01dc8
- https://github.com/AadityaPatanjali/gym-cartpolemod
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = (self.masspole + self.masscart)
self.length = 0.5
self.polemass_length = (self.masspole * self.length)
self.frictioncart = 0 # 5e-4
self.frictionpole = 0 # 2e-6
def _dynamics(self, t, x_):
self.nfe += 1 # increment number of function evaluations
u = self._evaluate_controller(t, x_) # controller
# States
x, dx, θ, dθ = self._divide_states(x_)
# Auxiliary variables
cosθ, sinθ = cos(θ), sin(θ)
temp = (u + self.polemass_length * dθ**2 * sinθ) / self.total_mass
signed_μc = self.frictioncart * sign(dx)
μp = self.frictionpole
# Differential Equations
nom_ddθ = self.gravity * sinθ - (μp * dθ) / (self.masspole * self.length) - \
cosθ * (temp + (self.masspole * self.length * dθ**2 * signed_μc * cosθ) / self.total_mass - signed_μc * self.gravity) # nominator ddθ
den_ddθ = self.length * (4/3 - self.masspole * cosθ * (cosθ - signed_μc) / self.total_mass) # denominator ddθ
ddθ = nom_ddθ / den_ddθ # angular acceleration of the pole
nc = (self.masscart + self.masspole) * self.gravity - self.masspole * self.length * (ddθ * sinθ + dθ**2 * cosθ) # normal force cart
ddx = temp + (- self.polemass_length * ddθ * cosθ - signed_μc * nc) / self.total_mass # acceleration of the track
self.cur_f = torch.cat([dx, ddx, dθ, ddθ], -1)
return self.cur_f
def _divide_states(self, x_):
x = x_[..., 0:1]
dx = x_[..., 1:2]
θ = x_[..., 2:3]
dθ = x_[..., 3:4]
return x, dx, θ, dθ
def kinetic_energy(self, x_):
x, dx, θ, dθ = self._divide_states(x_)
return 1/2 * (self.masscart + self.masspole) * dx**2 + self.masspole * dx * dθ * self.length * cos(θ) + 1/2 * self.masspole * self.length**2 * dθ**2
def potential_energy(self, x_):
x, _, θ, _ = self._divide_states(x_)
return self.masspole * self.gravity * self.length * cos(θ)
def render(self):
raise NotImplementedError("TODO: add the rendering from OpenAI Gym") | 0.776835 | 0.434401 |
from proliantutils.redfish.resources.system import constants as sys_cons
from proliantutils.redfish.resources.system import mappings as sys_map
from sushy.resources import base
class HealthStatusField(base.CompositeField):
state = base.MappedField(
'State', sys_map.HEALTH_STATE_VALUE_MAP)
health = base.MappedField('Health', sys_map.HEALTH_VALUE_MAP)
class EthernetInterface(base.ResourceBase):
"""This class represents the EthernetInterfaces resource"""
identity = base.Field('Id', required=True)
"""The Ethernet Interface identity string"""
name = base.Field('Name')
"""The name of the resource or array element"""
description = base.Field('Description')
"""Description"""
permanent_mac_address = base.Field('PermanentMACAddress')
"""This is the permanent MAC address assigned to this interface (port) """
mac_address = base.Field('MACAddress')
"""This is the currently configured MAC address of the interface."""
speed_mbps = base.Field('SpeedMbps')
"""This is the current speed in Mbps of this interface."""
status = HealthStatusField("Status")
class EthernetInterfaceCollection(base.ResourceCollectionBase):
_summary = None
@property
def _resource_type(self):
return EthernetInterface
@property
def summary(self):
"""property to return the summary MAC addresses and state
This filters the MACs whose health is OK,
and in 'Enabled' State would be returned.
The returned format will be {<port_id>: <mac_address>}.
This is because RIBCL returns the data in format
{'Port 1': 'aa:bb:cc:dd:ee:ff'} and ironic ilo drivers inspection
consumes the data in this format.
Note: 'Id' is referred to as "Port number".
"""
if self._summary is None:
mac_dict = {}
for eth in self.get_members():
if eth.mac_address is not None:
if (eth.status is not None and
eth.status.health == sys_cons.HEALTH_OK
and eth.status.state ==
sys_cons.HEALTH_STATE_ENABLED):
mac_dict.update(
{'Port ' + eth.identity: eth.mac_address})
self._summary = mac_dict
return self._summary
def _do_refresh(self, force):
"""Do custom resource specific refresh activities
On refresh, all sub-resources are marked as stale, i.e.
greedy-refresh not done for them unless forced by ``force``
argument.
"""
self._summary = None | proliantutils/redfish/resources/system/ethernet_interface.py |
from proliantutils.redfish.resources.system import constants as sys_cons
from proliantutils.redfish.resources.system import mappings as sys_map
from sushy.resources import base
class HealthStatusField(base.CompositeField):
state = base.MappedField(
'State', sys_map.HEALTH_STATE_VALUE_MAP)
health = base.MappedField('Health', sys_map.HEALTH_VALUE_MAP)
class EthernetInterface(base.ResourceBase):
"""This class represents the EthernetInterfaces resource"""
identity = base.Field('Id', required=True)
"""The Ethernet Interface identity string"""
name = base.Field('Name')
"""The name of the resource or array element"""
description = base.Field('Description')
"""Description"""
permanent_mac_address = base.Field('PermanentMACAddress')
"""This is the permanent MAC address assigned to this interface (port) """
mac_address = base.Field('MACAddress')
"""This is the currently configured MAC address of the interface."""
speed_mbps = base.Field('SpeedMbps')
"""This is the current speed in Mbps of this interface."""
status = HealthStatusField("Status")
class EthernetInterfaceCollection(base.ResourceCollectionBase):
_summary = None
@property
def _resource_type(self):
return EthernetInterface
@property
def summary(self):
"""property to return the summary MAC addresses and state
This filters the MACs whose health is OK,
and in 'Enabled' State would be returned.
The returned format will be {<port_id>: <mac_address>}.
This is because RIBCL returns the data in format
{'Port 1': 'aa:bb:cc:dd:ee:ff'} and ironic ilo drivers inspection
consumes the data in this format.
Note: 'Id' is referred to as "Port number".
"""
if self._summary is None:
mac_dict = {}
for eth in self.get_members():
if eth.mac_address is not None:
if (eth.status is not None and
eth.status.health == sys_cons.HEALTH_OK
and eth.status.state ==
sys_cons.HEALTH_STATE_ENABLED):
mac_dict.update(
{'Port ' + eth.identity: eth.mac_address})
self._summary = mac_dict
return self._summary
def _do_refresh(self, force):
"""Do custom resource specific refresh activities
On refresh, all sub-resources are marked as stale, i.e.
greedy-refresh not done for them unless forced by ``force``
argument.
"""
self._summary = None | 0.608594 | 0.177063 |
from ... pyaz_utils import _call_az
def list(account_name, profile_name):
'''
List the instructions by billing profile id.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- profile_name -- The ID that uniquely identifies a billing profile.
'''
return _call_az("az billing instruction list", locals())
def show(account_name, name, profile_name):
'''
Show the instruction by name. These are custom billing instructions and are only applicable for certain customers.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- Instruction Name.
- profile_name -- The ID that uniquely identifies a billing profile.
'''
return _call_az("az billing instruction show", locals())
def create(account_name, name, profile_name, amount=None, creation_date=None, end_date=None, start_date=None):
'''
Create an instruction. These are custom billing instructions and are only applicable for certain customers.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- Instruction Name.
- profile_name -- The ID that uniquely identifies a billing profile.
Optional Parameters:
- amount -- The amount budgeted for this billing instruction.
- creation_date -- The date this billing instruction was created.
- end_date -- The date this billing instruction is no longer in effect.
- start_date -- The date this billing instruction goes into effect.
'''
return _call_az("az billing instruction create", locals())
def update(account_name, name, profile_name, add=None, amount=None, creation_date=None, end_date=None, force_string=None, remove=None, set=None, start_date=None):
'''
Update an instruction. These are custom billing instructions and are only applicable for certain customers.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- Instruction Name.
- profile_name -- The ID that uniquely identifies a billing profile.
Optional Parameters:
- add -- Add an object to a list of objects by specifying a path and key value pairs. Example: --add property.listProperty <key=value, string or JSON string>
- amount -- The amount budgeted for this billing instruction.
- creation_date -- The date this billing instruction was created.
- end_date -- The date this billing instruction is no longer in effect.
- force_string -- When using 'set' or 'add', preserve string literals instead of attempting to convert to JSON.
- remove -- Remove a property or an element from a list. Example: --remove property.list <indexToRemove> OR --remove propertyToRemove
- set -- Update an object by specifying a property path and value to set. Example: --set property1.property2=<value>
- start_date -- The date this billing instruction goes into effect.
'''
return _call_az("az billing instruction update", locals()) | pyaz/billing/instruction/__init__.py | from ... pyaz_utils import _call_az
def list(account_name, profile_name):
'''
List the instructions by billing profile id.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- profile_name -- The ID that uniquely identifies a billing profile.
'''
return _call_az("az billing instruction list", locals())
def show(account_name, name, profile_name):
'''
Show the instruction by name. These are custom billing instructions and are only applicable for certain customers.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- Instruction Name.
- profile_name -- The ID that uniquely identifies a billing profile.
'''
return _call_az("az billing instruction show", locals())
def create(account_name, name, profile_name, amount=None, creation_date=None, end_date=None, start_date=None):
'''
Create an instruction. These are custom billing instructions and are only applicable for certain customers.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- Instruction Name.
- profile_name -- The ID that uniquely identifies a billing profile.
Optional Parameters:
- amount -- The amount budgeted for this billing instruction.
- creation_date -- The date this billing instruction was created.
- end_date -- The date this billing instruction is no longer in effect.
- start_date -- The date this billing instruction goes into effect.
'''
return _call_az("az billing instruction create", locals())
def update(account_name, name, profile_name, add=None, amount=None, creation_date=None, end_date=None, force_string=None, remove=None, set=None, start_date=None):
'''
Update an instruction. These are custom billing instructions and are only applicable for certain customers.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- Instruction Name.
- profile_name -- The ID that uniquely identifies a billing profile.
Optional Parameters:
- add -- Add an object to a list of objects by specifying a path and key value pairs. Example: --add property.listProperty <key=value, string or JSON string>
- amount -- The amount budgeted for this billing instruction.
- creation_date -- The date this billing instruction was created.
- end_date -- The date this billing instruction is no longer in effect.
- force_string -- When using 'set' or 'add', preserve string literals instead of attempting to convert to JSON.
- remove -- Remove a property or an element from a list. Example: --remove property.list <indexToRemove> OR --remove propertyToRemove
- set -- Update an object by specifying a property path and value to set. Example: --set property1.property2=<value>
- start_date -- The date this billing instruction goes into effect.
'''
return _call_az("az billing instruction update", locals()) | 0.780997 | 0.180179 |
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework import filters, viewsets
from rest_framework import generics
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from .serializers import RecordWorkProgramSerializer, RecordAcademicPlanSerializer, WorkProgramSerializerForStatistic, \
WorkProgramInFieldOfStudySerializerForStatistic, StructuralUnitWithWpSerializer
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from django.db.models.aggregates import Count
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from dataprocessing.models import User
from workprogramsapp.expertise.models import Expertise
from workprogramsapp.models import WorkProgram, EducationalProgram, WorkProgramInFieldOfStudy, AcademicPlan, \
DisciplineBlock, \
DisciplineBlockModule, WorkProgramChangeInDisciplineBlockModule, ImplementationAcademicPlan, FieldOfStudy, \
СertificationEvaluationTool
from .serializers import WorkProgramInFieldOfStudySerializerForStatistic, \
WorkProgramSerializerForStatistic, SuperShortWorkProgramSerializer, WorkProgramSerializerForStatisticExtended, \
ShortStructuralUnitSerializer, ShortAcademicPlan, AcademicPlansDescriptionWpSerializer
from workprogramsapp.workprogram_additions.models import StructuralUnit
@api_view(['GET'])
@permission_classes((AllowAny,))
def StructuralUnits(request):
"""
API-запрос на просмотр структурных подразделений
"""
su = StructuralUnit.objects.all()
results = []
for i in su:
results.append({'value': i.id, 'label': i.title})
return Response(results)
@api_view(['GET'])
@permission_classes((AllowAny,))
def AcademicPlans(request):
"""
API-запрос на просмотр УП
"""
ap = AcademicPlan.objects.all()
results = []
for i in ap:
results.append({'value': i.id, 'label': i.educational_profile + " " + i.year})
return Response(results)
class OneAcademicPlanWithDescriptionWp(generics.RetrieveAPIView):
"""
Получение конкретного учебного плана по его id со всеми описаниями РПД
"""
queryset = AcademicPlan.objects.all()
serializer_class = AcademicPlansDescriptionWpSerializer
permission_classes = [AllowAny]
class RecordOfWorkProgram(APIView):
# Количество рабочих программ по квалификации
permission_classes = [AllowAny]
def get(self, request, qualification):
queryset = WorkProgram.objects.all()
if qualification != 'all_q':
queryset = queryset.filter(qualification=qualification)
return Response({"quantity": len(queryset)})
class RecordOfWorkProgramQuality(APIView):
"""
Сколько РПД имеют редакторов, в скольких РПД заполнены разделы, сколько РПД без пререквизитов.
Сколько РПД не привязаны к учебному плану, не указан язык реализации, структурное подразделение
"""
permission_classes = [AllowAny]
def get(self, request):
queryset = WorkProgram.objects.all()
without_language = queryset.filter(language=None)
without_editors = queryset.filter(editors=None)
without_structural_unit = queryset.filter(structural_unit=None)
without_prerequisites = queryset.filter(prerequisites=None)
without_discipline_sections = queryset.filter(discipline_sections=None)
without_academic_plan = queryset.filter(work_program_in_change_block=None)
without_outcomes = queryset.filter(outcomes=None)
# serializer = RecordWorkProgramSerializer(queryset, many=True)
return Response({"all": len(queryset),
"without_language": len(without_language),
"without_editors": len(without_editors),
"without_structural_unit": len(without_structural_unit),
"without_prerequisites": len(without_prerequisites),
"without_discipline_sections": len(without_discipline_sections),
"without_academic_plan": len(without_academic_plan),
"without_outcomes": len(without_outcomes)})
class RecordOfAcademicPlan(APIView):
# Количество учебных планов по квалификации и году
permission_classes = [AllowAny]
def get(self, request, qualification, year):
queryset = AcademicPlan.objects.all()
if qualification != 'all_q':
queryset = queryset.filter(qualification=qualification)
if year != 'all_years':
queryset = queryset.filter(year=year)
return Response({"quantity": len(queryset)})
@api_view(['GET'])
@permission_classes((AllowAny,))
def EmptyStringWp(request):
"""
API-запрос на просмотр РПД, без id строки
"""
empty_wp = (WorkProgramInFieldOfStudy.objects.filter(work_program__editors__isnull=False,
id_str_up__isnull=True)).distinct()
serializer = WorkProgramInFieldOfStudySerializerForStatistic(empty_wp, many=True)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes((AllowAny,))
def WpWithoutAP(request):
"""
API-запрос на просмотр РПД, которых нету в УП
"""
empty_wp = (WorkProgram.objects.filter(zuns_for_wp=None,
editors__isnull=False)).distinct()
serializer = WorkProgramSerializerForStatistic(empty_wp, many=True)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes((AllowAny,))
def WpWithSimilarCode(request):
"""
API-запрос на просмотр РПД с одинаковым дисциплин кодом
"""
wp_counter_code = WorkProgram.objects.all().values('discipline_code').annotate(
total=Count('discipline_code')).filter(total__gt=1)
print(wp_counter_code)
similar_codes = []
for wp in wp_counter_code:
similar_codes.append(wp['discipline_code'])
similar_wp = WorkProgram.objects.filter(discipline_code__in=similar_codes).order_by("discipline_code")
serializer = WorkProgramSerializerForStatistic(similar_wp, many=True)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes((AllowAny,))
def SimpleStatistic(request):
"""
API-запрос на просмотр различной статистики по РПД и пользователям
"""
registered_users = User.objects.count()
rpd_users = User.objects.filter(editors__isnull=False).distinct().count()
on_expertise = Expertise.objects.filter(expertise_status="EX").count()
approved = Expertise.objects.filter(expertise_status="AC").count()
in_work = Expertise.objects.filter(expertise_status="WK").count() + WorkProgram.objects.filter(
expertise_with_rpd__isnull=True).distinct().count()
editors_rpd = WorkProgram.objects.filter(editors__isnull=False).count()
return Response(
{
"registered_users": registered_users,
"users_in_rpd": rpd_users,
"rpd_with_editors": editors_rpd,
"rpd_on_expertise": on_expertise,
"rpd_approved": approved,
"rpd_in_work": in_work
}
)
@api_view(['GET'])
@permission_classes((AllowAny,))
def WpWithoutStructuralUnit(request):
"""
API-запрос на на просмотр РПД без структурного подразделения
"""
wp_without_unit = WorkProgram.objects.filter(structural_unit__isnull=True)
serializer = WorkProgramSerializerForStatistic(wp_without_unit, many=True)
# print(serializer.data)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes((AllowAny,))
def StructuralUnitWp(request):
"""
API-запрос на просмотр РПД в структурныхх подразделениях; Можно фильтровать посредством параметров в адресной строке
Поле филтрации: status - статус РПД
Параметры: EX - на экспертизе, AC - одобрена, WK - в работе
Пример запроса:
https://op.itmo.ru/api/statistic/structural/workprogram?status=EX - Все РПД из структруных подразделений на экспертизе
"""
try:
status_filter = request.query_params["status"]
except KeyError:
status_filter = ""
print(status_filter)
units = StructuralUnit.objects.all()
result = []
for unit in units:
if status_filter == "WK":
needed_wp = (WorkProgram.objects.filter(expertise_with_rpd__isnull=True,
structural_unit=unit) | WorkProgram.objects.filter(
expertise_with_rpd__expertise_status__contains=status_filter,
structural_unit=unit)).distinct()
elif status_filter == "":
needed_wp = WorkProgram.objects.filter(structural_unit=unit).distinct()
else:
needed_wp = WorkProgram.objects.filter(expertise_with_rpd__expertise_status__contains=status_filter,
structural_unit=unit).distinct()
serializer = WorkProgramSerializerForStatistic(needed_wp, many=True)
result.append({"id": unit.id,
"title": unit.title,
"work_programs": serializer.data})
return Response(result)
class WorkProgramDetailsWithApAndSemesters1(generics.ListAPIView):
"""
Запрос с филтрами для рпд в структурных подразделениях
-----------------------------------------------------
Обязательные параметры:
structural_unit_id - id структрных подразделений, для которых надо получить РПД, может быть несколько
Необязательные параметры:
year - Год учбеного плана в котором реализуется РПД, может быть несколько
semester - Семетр в котором реализуется РПД
status - Тип статуса РПД (EX - на экспертизе, AC - одобрена, WK - в работе), только в одном экземпляре
Пример запроса:
http://127.0.0.1:8000/api/statistic/structural/workprogram_extend?structural_unit_id=5&semester=5&year=2020&year=2019
Все РПД из структурного подразделения с ID 5, реализующиеся в 5 семестре, для УП 2020 и 2019 года
"""
queryset = WorkProgram.objects.all()
serializer_class = WorkProgramSerializerForStatisticExtended
permission_classes = [AllowAny]
@api_view(['GET'])
@permission_classes((AllowAny,))
def WorkProgramDetailsWithApAndSemesters(request):
print(request.query_params)
status_filter = request.query_params["status"] if "status" in request.query_params else ""
structural_unit_id = request.query_params.getlist(
"structural_unit_id") if "structural_unit_id" in request.query_params else []
year = request.query_params.getlist("year") if "year" in request.query_params \
else [x for x in range(2000, 2050)]
semester = request.query_params.getlist("semester") if "semester" in request.query_params else [-1]
cred_regex = r""
structural_unit_id = [int(x) for x in structural_unit_id]
print(structural_unit_id)
for i in range(12):
if str(i + 1) in semester:
cred_regex += "[^0],"
else:
cred_regex += "[0-9\-],"
cred_regex = cred_regex[:-1]
print(cred_regex)
if status_filter == "WK":
needed_wp = (WorkProgram.objects.filter(expertise_with_rpd__isnull=True,
zuns_for_wp__work_program_change_in_discipline_block_module__discipline_block_module__descipline_block__academic_plan__year__in=year,
structural_unit__in=structural_unit_id,
zuns_for_wp__work_program_change_in_discipline_block_module__credit_units__iregex=cred_regex) |
WorkProgram.objects.filter(
expertise_with_rpd__expertise_status__contains=status_filter,
zuns_for_wp__work_program_change_in_discipline_block_module__discipline_block_module__descipline_block__academic_plan__year__in=year,
structural_unit__in=structural_unit_id,
zuns_for_wp__work_program_change_in_discipline_block_module__credit_units__iregex=cred_regex)).distinct()
elif status_filter == "":
needed_wp = WorkProgram.objects.filter(structural_unit__in=structural_unit_id,
zuns_for_wp__work_program_change_in_discipline_block_module__discipline_block_module__descipline_block__academic_plan__year__in=year,
zuns_for_wp__work_program_change_in_discipline_block_module__credit_units__iregex=cred_regex).distinct()
else:
needed_wp = WorkProgram.objects.filter(expertise_with_rpd__expertise_status__contains=status_filter,
zuns_for_wp__work_program_change_in_discipline_block_module__discipline_block_module__descipline_block__academic_plan__year__in=year,
structural_unit__in=structural_unit_id,
zuns_for_wp__work_program_change_in_discipline_block_module__credit_units__iregex=cred_regex).distinct()
print(len(WorkProgram.objects.filter(structural_unit=6)))
serializer = WorkProgramSerializerForStatisticExtended(needed_wp, many=True)
return Response(serializer.data) | application/records/views.py | from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework import filters, viewsets
from rest_framework import generics
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from .serializers import RecordWorkProgramSerializer, RecordAcademicPlanSerializer, WorkProgramSerializerForStatistic, \
WorkProgramInFieldOfStudySerializerForStatistic, StructuralUnitWithWpSerializer
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from django.db.models.aggregates import Count
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from dataprocessing.models import User
from workprogramsapp.expertise.models import Expertise
from workprogramsapp.models import WorkProgram, EducationalProgram, WorkProgramInFieldOfStudy, AcademicPlan, \
DisciplineBlock, \
DisciplineBlockModule, WorkProgramChangeInDisciplineBlockModule, ImplementationAcademicPlan, FieldOfStudy, \
СertificationEvaluationTool
from .serializers import WorkProgramInFieldOfStudySerializerForStatistic, \
WorkProgramSerializerForStatistic, SuperShortWorkProgramSerializer, WorkProgramSerializerForStatisticExtended, \
ShortStructuralUnitSerializer, ShortAcademicPlan, AcademicPlansDescriptionWpSerializer
from workprogramsapp.workprogram_additions.models import StructuralUnit
@api_view(['GET'])
@permission_classes((AllowAny,))
def StructuralUnits(request):
"""
API-запрос на просмотр структурных подразделений
"""
su = StructuralUnit.objects.all()
results = []
for i in su:
results.append({'value': i.id, 'label': i.title})
return Response(results)
@api_view(['GET'])
@permission_classes((AllowAny,))
def AcademicPlans(request):
"""
API-запрос на просмотр УП
"""
ap = AcademicPlan.objects.all()
results = []
for i in ap:
results.append({'value': i.id, 'label': i.educational_profile + " " + i.year})
return Response(results)
class OneAcademicPlanWithDescriptionWp(generics.RetrieveAPIView):
"""
Получение конкретного учебного плана по его id со всеми описаниями РПД
"""
queryset = AcademicPlan.objects.all()
serializer_class = AcademicPlansDescriptionWpSerializer
permission_classes = [AllowAny]
class RecordOfWorkProgram(APIView):
# Количество рабочих программ по квалификации
permission_classes = [AllowAny]
def get(self, request, qualification):
queryset = WorkProgram.objects.all()
if qualification != 'all_q':
queryset = queryset.filter(qualification=qualification)
return Response({"quantity": len(queryset)})
class RecordOfWorkProgramQuality(APIView):
"""
Сколько РПД имеют редакторов, в скольких РПД заполнены разделы, сколько РПД без пререквизитов.
Сколько РПД не привязаны к учебному плану, не указан язык реализации, структурное подразделение
"""
permission_classes = [AllowAny]
def get(self, request):
queryset = WorkProgram.objects.all()
without_language = queryset.filter(language=None)
without_editors = queryset.filter(editors=None)
without_structural_unit = queryset.filter(structural_unit=None)
without_prerequisites = queryset.filter(prerequisites=None)
without_discipline_sections = queryset.filter(discipline_sections=None)
without_academic_plan = queryset.filter(work_program_in_change_block=None)
without_outcomes = queryset.filter(outcomes=None)
# serializer = RecordWorkProgramSerializer(queryset, many=True)
return Response({"all": len(queryset),
"without_language": len(without_language),
"without_editors": len(without_editors),
"without_structural_unit": len(without_structural_unit),
"without_prerequisites": len(without_prerequisites),
"without_discipline_sections": len(without_discipline_sections),
"without_academic_plan": len(without_academic_plan),
"without_outcomes": len(without_outcomes)})
class RecordOfAcademicPlan(APIView):
# Количество учебных планов по квалификации и году
permission_classes = [AllowAny]
def get(self, request, qualification, year):
queryset = AcademicPlan.objects.all()
if qualification != 'all_q':
queryset = queryset.filter(qualification=qualification)
if year != 'all_years':
queryset = queryset.filter(year=year)
return Response({"quantity": len(queryset)})
@api_view(['GET'])
@permission_classes((AllowAny,))
def EmptyStringWp(request):
"""
API-запрос на просмотр РПД, без id строки
"""
empty_wp = (WorkProgramInFieldOfStudy.objects.filter(work_program__editors__isnull=False,
id_str_up__isnull=True)).distinct()
serializer = WorkProgramInFieldOfStudySerializerForStatistic(empty_wp, many=True)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes((AllowAny,))
def WpWithoutAP(request):
"""
API-запрос на просмотр РПД, которых нету в УП
"""
empty_wp = (WorkProgram.objects.filter(zuns_for_wp=None,
editors__isnull=False)).distinct()
serializer = WorkProgramSerializerForStatistic(empty_wp, many=True)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes((AllowAny,))
def WpWithSimilarCode(request):
"""
API-запрос на просмотр РПД с одинаковым дисциплин кодом
"""
wp_counter_code = WorkProgram.objects.all().values('discipline_code').annotate(
total=Count('discipline_code')).filter(total__gt=1)
print(wp_counter_code)
similar_codes = []
for wp in wp_counter_code:
similar_codes.append(wp['discipline_code'])
similar_wp = WorkProgram.objects.filter(discipline_code__in=similar_codes).order_by("discipline_code")
serializer = WorkProgramSerializerForStatistic(similar_wp, many=True)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes((AllowAny,))
def SimpleStatistic(request):
"""
API-запрос на просмотр различной статистики по РПД и пользователям
"""
registered_users = User.objects.count()
rpd_users = User.objects.filter(editors__isnull=False).distinct().count()
on_expertise = Expertise.objects.filter(expertise_status="EX").count()
approved = Expertise.objects.filter(expertise_status="AC").count()
in_work = Expertise.objects.filter(expertise_status="WK").count() + WorkProgram.objects.filter(
expertise_with_rpd__isnull=True).distinct().count()
editors_rpd = WorkProgram.objects.filter(editors__isnull=False).count()
return Response(
{
"registered_users": registered_users,
"users_in_rpd": rpd_users,
"rpd_with_editors": editors_rpd,
"rpd_on_expertise": on_expertise,
"rpd_approved": approved,
"rpd_in_work": in_work
}
)
@api_view(['GET'])
@permission_classes((AllowAny,))
def WpWithoutStructuralUnit(request):
"""
API-запрос на на просмотр РПД без структурного подразделения
"""
wp_without_unit = WorkProgram.objects.filter(structural_unit__isnull=True)
serializer = WorkProgramSerializerForStatistic(wp_without_unit, many=True)
# print(serializer.data)
return Response(serializer.data)
@api_view(['GET'])
@permission_classes((AllowAny,))
def StructuralUnitWp(request):
"""
API-запрос на просмотр РПД в структурныхх подразделениях; Можно фильтровать посредством параметров в адресной строке
Поле филтрации: status - статус РПД
Параметры: EX - на экспертизе, AC - одобрена, WK - в работе
Пример запроса:
https://op.itmo.ru/api/statistic/structural/workprogram?status=EX - Все РПД из структруных подразделений на экспертизе
"""
try:
status_filter = request.query_params["status"]
except KeyError:
status_filter = ""
print(status_filter)
units = StructuralUnit.objects.all()
result = []
for unit in units:
if status_filter == "WK":
needed_wp = (WorkProgram.objects.filter(expertise_with_rpd__isnull=True,
structural_unit=unit) | WorkProgram.objects.filter(
expertise_with_rpd__expertise_status__contains=status_filter,
structural_unit=unit)).distinct()
elif status_filter == "":
needed_wp = WorkProgram.objects.filter(structural_unit=unit).distinct()
else:
needed_wp = WorkProgram.objects.filter(expertise_with_rpd__expertise_status__contains=status_filter,
structural_unit=unit).distinct()
serializer = WorkProgramSerializerForStatistic(needed_wp, many=True)
result.append({"id": unit.id,
"title": unit.title,
"work_programs": serializer.data})
return Response(result)
class WorkProgramDetailsWithApAndSemesters1(generics.ListAPIView):
"""
Запрос с филтрами для рпд в структурных подразделениях
-----------------------------------------------------
Обязательные параметры:
structural_unit_id - id структрных подразделений, для которых надо получить РПД, может быть несколько
Необязательные параметры:
year - Год учбеного плана в котором реализуется РПД, может быть несколько
semester - Семетр в котором реализуется РПД
status - Тип статуса РПД (EX - на экспертизе, AC - одобрена, WK - в работе), только в одном экземпляре
Пример запроса:
http://127.0.0.1:8000/api/statistic/structural/workprogram_extend?structural_unit_id=5&semester=5&year=2020&year=2019
Все РПД из структурного подразделения с ID 5, реализующиеся в 5 семестре, для УП 2020 и 2019 года
"""
queryset = WorkProgram.objects.all()
serializer_class = WorkProgramSerializerForStatisticExtended
permission_classes = [AllowAny]
@api_view(['GET'])
@permission_classes((AllowAny,))
def WorkProgramDetailsWithApAndSemesters(request):
print(request.query_params)
status_filter = request.query_params["status"] if "status" in request.query_params else ""
structural_unit_id = request.query_params.getlist(
"structural_unit_id") if "structural_unit_id" in request.query_params else []
year = request.query_params.getlist("year") if "year" in request.query_params \
else [x for x in range(2000, 2050)]
semester = request.query_params.getlist("semester") if "semester" in request.query_params else [-1]
cred_regex = r""
structural_unit_id = [int(x) for x in structural_unit_id]
print(structural_unit_id)
for i in range(12):
if str(i + 1) in semester:
cred_regex += "[^0],"
else:
cred_regex += "[0-9\-],"
cred_regex = cred_regex[:-1]
print(cred_regex)
if status_filter == "WK":
needed_wp = (WorkProgram.objects.filter(expertise_with_rpd__isnull=True,
zuns_for_wp__work_program_change_in_discipline_block_module__discipline_block_module__descipline_block__academic_plan__year__in=year,
structural_unit__in=structural_unit_id,
zuns_for_wp__work_program_change_in_discipline_block_module__credit_units__iregex=cred_regex) |
WorkProgram.objects.filter(
expertise_with_rpd__expertise_status__contains=status_filter,
zuns_for_wp__work_program_change_in_discipline_block_module__discipline_block_module__descipline_block__academic_plan__year__in=year,
structural_unit__in=structural_unit_id,
zuns_for_wp__work_program_change_in_discipline_block_module__credit_units__iregex=cred_regex)).distinct()
elif status_filter == "":
needed_wp = WorkProgram.objects.filter(structural_unit__in=structural_unit_id,
zuns_for_wp__work_program_change_in_discipline_block_module__discipline_block_module__descipline_block__academic_plan__year__in=year,
zuns_for_wp__work_program_change_in_discipline_block_module__credit_units__iregex=cred_regex).distinct()
else:
needed_wp = WorkProgram.objects.filter(expertise_with_rpd__expertise_status__contains=status_filter,
zuns_for_wp__work_program_change_in_discipline_block_module__discipline_block_module__descipline_block__academic_plan__year__in=year,
structural_unit__in=structural_unit_id,
zuns_for_wp__work_program_change_in_discipline_block_module__credit_units__iregex=cred_regex).distinct()
print(len(WorkProgram.objects.filter(structural_unit=6)))
serializer = WorkProgramSerializerForStatisticExtended(needed_wp, many=True)
return Response(serializer.data) | 0.511961 | 0.232757 |
from common import *
import datetime
import argparse
import time
here = os.path.abspath(os.path.dirname(__file__))
app_dir = os.path.join(here, '../pyg/multi_gpu')
"""
if log_dir is not None, it will only parse logs
"""
def overall_perf_test(log_folder=None, mock=False):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_pyg_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=8,
num_col=4
).update_col_definition(
col_id=0,
definition='sample_time'
).update_col_definition(
col_id=1,
definition='copy_time'
).update_col_definition(
col_id=2,
definition='train_time'
).update_col_definition(
col_id=3,
definition='epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 2],
app=App.gcn,
dataset=Dataset.products,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=1,
col_range=[0, 2],
app=App.gcn,
dataset=Dataset.twitter,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=2,
col_range=[0, 2],
app=App.gcn,
dataset=Dataset.papers100M,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=3,
col_range=[0, 2],
app=App.gcn,
dataset=Dataset.uk_2006_05,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=4,
col_range=[0, 2],
app=App.graphsage,
dataset=Dataset.products,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=5,
col_range=[0, 2],
app=App.graphsage,
dataset=Dataset.twitter,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=6,
col_range=[0, 2],
app=App.graphsage,
dataset=Dataset.papers100M,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=7,
col_range=[0, 2],
app=App.graphsage,
dataset=Dataset.uk_2006_05,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=0,
col_range=[3, 3],
app=App.gcn,
dataset=Dataset.products,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=1,
col_range=[3, 3],
app=App.gcn,
dataset=Dataset.twitter,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=2,
col_range=[3, 3],
app=App.gcn,
dataset=Dataset.papers100M,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=3,
col_range=[3, 3],
app=App.gcn,
dataset=Dataset.uk_2006_05,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=4,
col_range=[3, 3],
app=App.graphsage,
dataset=Dataset.products,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=5,
col_range=[3, 3],
app=App.graphsage,
dataset=Dataset.twitter,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=6,
col_range=[3, 3],
app=App.graphsage,
dataset=Dataset.papers100M,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=7,
col_range=[3, 3],
app=App.graphsage,
dataset=Dataset.uk_2006_05,
BOOL_pipelining='pipelining'
).create()
ConfigList(
test_group_name='PyG overall performance test'
).select(
'app',
[App.gcn, App.graphsage]
).override(
'num_epoch',
[10]
).override(
'BOOL_pipelining',
['pipelining', 'no_pipelining']
).override(
'num_sampling_worker',
[40],
).override(
'devices',
['0 1 2 3 4 5 6 7']
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir
)
toc = time.time()
print(
'PyG overall performance test uses {:.4f} secs'.format(toc - tic))
def breakdown_test(log_folder=None, mock=False):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_pyg_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=8,
num_col=4
).update_col_definition(
col_id=0,
definition='sample_time'
).update_col_definition(
col_id=1,
definition='copy_time'
).update_col_definition(
col_id=2,
definition='train_time'
).update_col_definition(
col_id=3,
definition='epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 3],
app=App.gcn,
dataset=Dataset.products,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=1,
col_range=[0, 3],
app=App.gcn,
dataset=Dataset.twitter,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=2,
col_range=[0, 3],
app=App.gcn,
dataset=Dataset.papers100M,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=3,
col_range=[0, 3],
app=App.gcn,
dataset=Dataset.uk_2006_05,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=4,
col_range=[0, 3],
app=App.graphsage,
dataset=Dataset.products,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=5,
col_range=[0, 3],
app=App.graphsage,
dataset=Dataset.twitter,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=6,
col_range=[0, 3],
app=App.graphsage,
dataset=Dataset.papers100M,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=7,
col_range=[0, 3],
app=App.graphsage,
dataset=Dataset.uk_2006_05,
BOOL_pipelining='no_pipelining'
).create()
ConfigList(
test_group_name='PyG breakdown test'
).select(
'app',
[App.gcn, App.graphsage]
).override(
'num_epoch',
[10]
).override(
'BOOL_pipelining',
['no_pipelining']
).override(
'num_sampling_worker',
[40],
).override(
'devices',
['0']
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir
)
toc = time.time()
print(
'PyG breakdown test uses {:.4f} secs'.format(toc - tic))
if __name__ == '__main__':
argparser = argparse.ArgumentParser("SGNN DGL runner")
argparser.add_argument('-l', '--log-folder', default=None)
argparser.add_argument('-m', '--mock', action='store_true', default=False)
args = argparser.parse_args()
overall_perf_test(args.log_folder, args.mock)
breakdown_test(args.log_folder, args.mock) | example/auto_runner/run_pyg.py | from common import *
import datetime
import argparse
import time
here = os.path.abspath(os.path.dirname(__file__))
app_dir = os.path.join(here, '../pyg/multi_gpu')
"""
if log_dir is not None, it will only parse logs
"""
def overall_perf_test(log_folder=None, mock=False):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_pyg_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=8,
num_col=4
).update_col_definition(
col_id=0,
definition='sample_time'
).update_col_definition(
col_id=1,
definition='copy_time'
).update_col_definition(
col_id=2,
definition='train_time'
).update_col_definition(
col_id=3,
definition='epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 2],
app=App.gcn,
dataset=Dataset.products,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=1,
col_range=[0, 2],
app=App.gcn,
dataset=Dataset.twitter,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=2,
col_range=[0, 2],
app=App.gcn,
dataset=Dataset.papers100M,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=3,
col_range=[0, 2],
app=App.gcn,
dataset=Dataset.uk_2006_05,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=4,
col_range=[0, 2],
app=App.graphsage,
dataset=Dataset.products,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=5,
col_range=[0, 2],
app=App.graphsage,
dataset=Dataset.twitter,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=6,
col_range=[0, 2],
app=App.graphsage,
dataset=Dataset.papers100M,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=7,
col_range=[0, 2],
app=App.graphsage,
dataset=Dataset.uk_2006_05,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=0,
col_range=[3, 3],
app=App.gcn,
dataset=Dataset.products,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=1,
col_range=[3, 3],
app=App.gcn,
dataset=Dataset.twitter,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=2,
col_range=[3, 3],
app=App.gcn,
dataset=Dataset.papers100M,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=3,
col_range=[3, 3],
app=App.gcn,
dataset=Dataset.uk_2006_05,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=4,
col_range=[3, 3],
app=App.graphsage,
dataset=Dataset.products,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=5,
col_range=[3, 3],
app=App.graphsage,
dataset=Dataset.twitter,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=6,
col_range=[3, 3],
app=App.graphsage,
dataset=Dataset.papers100M,
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=7,
col_range=[3, 3],
app=App.graphsage,
dataset=Dataset.uk_2006_05,
BOOL_pipelining='pipelining'
).create()
ConfigList(
test_group_name='PyG overall performance test'
).select(
'app',
[App.gcn, App.graphsage]
).override(
'num_epoch',
[10]
).override(
'BOOL_pipelining',
['pipelining', 'no_pipelining']
).override(
'num_sampling_worker',
[40],
).override(
'devices',
['0 1 2 3 4 5 6 7']
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir
)
toc = time.time()
print(
'PyG overall performance test uses {:.4f} secs'.format(toc - tic))
def breakdown_test(log_folder=None, mock=False):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_pyg_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=8,
num_col=4
).update_col_definition(
col_id=0,
definition='sample_time'
).update_col_definition(
col_id=1,
definition='copy_time'
).update_col_definition(
col_id=2,
definition='train_time'
).update_col_definition(
col_id=3,
definition='epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 3],
app=App.gcn,
dataset=Dataset.products,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=1,
col_range=[0, 3],
app=App.gcn,
dataset=Dataset.twitter,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=2,
col_range=[0, 3],
app=App.gcn,
dataset=Dataset.papers100M,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=3,
col_range=[0, 3],
app=App.gcn,
dataset=Dataset.uk_2006_05,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=4,
col_range=[0, 3],
app=App.graphsage,
dataset=Dataset.products,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=5,
col_range=[0, 3],
app=App.graphsage,
dataset=Dataset.twitter,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=6,
col_range=[0, 3],
app=App.graphsage,
dataset=Dataset.papers100M,
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=7,
col_range=[0, 3],
app=App.graphsage,
dataset=Dataset.uk_2006_05,
BOOL_pipelining='no_pipelining'
).create()
ConfigList(
test_group_name='PyG breakdown test'
).select(
'app',
[App.gcn, App.graphsage]
).override(
'num_epoch',
[10]
).override(
'BOOL_pipelining',
['no_pipelining']
).override(
'num_sampling_worker',
[40],
).override(
'devices',
['0']
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir
)
toc = time.time()
print(
'PyG breakdown test uses {:.4f} secs'.format(toc - tic))
if __name__ == '__main__':
argparser = argparse.ArgumentParser("SGNN DGL runner")
argparser.add_argument('-l', '--log-folder', default=None)
argparser.add_argument('-m', '--mock', action='store_true', default=False)
args = argparser.parse_args()
overall_perf_test(args.log_folder, args.mock)
breakdown_test(args.log_folder, args.mock) | 0.374333 | 0.142769 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Tests for results_lib."""
import contextlib
import os
import shutil
import tempfile
from six.moves import xrange
import tensorflow as tf
from single_task import results_lib # brain coder
@contextlib.contextmanager
def temporary_directory(suffix='', prefix='tmp', base_path=None):
"""A context manager to create a temporary directory and clean up on exit.
The parameters are the same ones expected by tempfile.mkdtemp.
The directory will be securely and atomically created.
Everything under it will be removed when exiting the context.
Args:
suffix: optional suffix.
prefix: options prefix.
base_path: the base path under which to create the temporary directory.
Yields:
The absolute path of the new temporary directory.
"""
temp_dir_path = tempfile.mkdtemp(suffix, prefix, base_path)
try:
yield temp_dir_path
finally:
try:
shutil.rmtree(temp_dir_path)
except OSError as e:
if e.message == 'Cannot call rmtree on a symbolic link':
# Interesting synthetic exception made up by shutil.rmtree.
# Means we received a symlink from mkdtemp.
# Also means must clean up the symlink instead.
os.unlink(temp_dir_path)
else:
raise
def freeze(dictionary):
"""Convert dict to hashable frozenset."""
return frozenset(dictionary.iteritems())
class ResultsLibTest(tf.test.TestCase):
def testResults(self):
with temporary_directory() as logdir:
results_obj = results_lib.Results(logdir)
self.assertEqual(results_obj.read_this_shard(), [])
results_obj.append(
{'foo': 1.5, 'bar': 2.5, 'baz': 0})
results_obj.append(
{'foo': 5.5, 'bar': -1, 'baz': 2})
self.assertEqual(
results_obj.read_this_shard(),
[{'foo': 1.5, 'bar': 2.5, 'baz': 0},
{'foo': 5.5, 'bar': -1, 'baz': 2}])
def testShardedResults(self):
with temporary_directory() as logdir:
n = 4 # Number of shards.
results_objs = [
results_lib.Results(logdir, shard_id=i) for i in xrange(n)]
for i, robj in enumerate(results_objs):
robj.append({'foo': i, 'bar': 1 + i * 2})
results_list, _ = results_objs[0].read_all()
# Check results. Order does not matter here.
self.assertEqual(
set(freeze(r) for r in results_list),
set(freeze({'foo': i, 'bar': 1 + i * 2}) for i in xrange(n)))
if __name__ == '__main__':
tf.test.main() | research/brain_coder/single_task/results_lib_test.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Tests for results_lib."""
import contextlib
import os
import shutil
import tempfile
from six.moves import xrange
import tensorflow as tf
from single_task import results_lib # brain coder
@contextlib.contextmanager
def temporary_directory(suffix='', prefix='tmp', base_path=None):
"""A context manager to create a temporary directory and clean up on exit.
The parameters are the same ones expected by tempfile.mkdtemp.
The directory will be securely and atomically created.
Everything under it will be removed when exiting the context.
Args:
suffix: optional suffix.
prefix: options prefix.
base_path: the base path under which to create the temporary directory.
Yields:
The absolute path of the new temporary directory.
"""
temp_dir_path = tempfile.mkdtemp(suffix, prefix, base_path)
try:
yield temp_dir_path
finally:
try:
shutil.rmtree(temp_dir_path)
except OSError as e:
if e.message == 'Cannot call rmtree on a symbolic link':
# Interesting synthetic exception made up by shutil.rmtree.
# Means we received a symlink from mkdtemp.
# Also means must clean up the symlink instead.
os.unlink(temp_dir_path)
else:
raise
def freeze(dictionary):
"""Convert dict to hashable frozenset."""
return frozenset(dictionary.iteritems())
class ResultsLibTest(tf.test.TestCase):
def testResults(self):
with temporary_directory() as logdir:
results_obj = results_lib.Results(logdir)
self.assertEqual(results_obj.read_this_shard(), [])
results_obj.append(
{'foo': 1.5, 'bar': 2.5, 'baz': 0})
results_obj.append(
{'foo': 5.5, 'bar': -1, 'baz': 2})
self.assertEqual(
results_obj.read_this_shard(),
[{'foo': 1.5, 'bar': 2.5, 'baz': 0},
{'foo': 5.5, 'bar': -1, 'baz': 2}])
def testShardedResults(self):
with temporary_directory() as logdir:
n = 4 # Number of shards.
results_objs = [
results_lib.Results(logdir, shard_id=i) for i in xrange(n)]
for i, robj in enumerate(results_objs):
robj.append({'foo': i, 'bar': 1 + i * 2})
results_list, _ = results_objs[0].read_all()
# Check results. Order does not matter here.
self.assertEqual(
set(freeze(r) for r in results_list),
set(freeze({'foo': i, 'bar': 1 + i * 2}) for i in xrange(n)))
if __name__ == '__main__':
tf.test.main() | 0.599016 | 0.179189 |
import constant_parameters as c # pylint: disable=unused-wildcard-import
import button_listener as bl # pylint: disable=unused-wildcard-import
import session_manager as sm # pylint: disable=unused-wildcard-import
import bedtime_protocol as bp # pylint: disable=unused-wildcard-import
import light_effects as leff
inDemoMode = False
def green_button_clicked(timeHeld):
if bp.isInWakeUpPeriod:
return
sm.start_sprint(c.STANDARD_SPRINT_LENGTH, timeHeld)
if sm.inInterruption:
sm.end_interruption()
def yellow_button_clicked():
if sm.inSession:
sm.start_break(c.STANDARD_BREAK_LENGTH)
def red_button_clicked():
pass
def green_button_just_pressed():
if bp.isInWakeUpPeriod:
bp.woke_up()
def yellow_button_just_pressed():
pass
def red_button_just_pressed():
global inDemoMode
if sm.inSession:
sm.start_interruption() # because there is no hold function during session, so more immediate
if inDemoMode and bl.isOneButtonPressed():
leff.kill_effect()
inDemoMode = False
isButtonHoldsEnabled = True
def disable_all_button_holds_until_all_released():
global isButtonHoldsEnabled
isButtonHoldsEnabled = False
def update_button_logic():
global isButtonHoldsEnabled, inDemoMode
if not bl.isGreenPressed and not bl.isYellowPressed and not bl.isRedPressed:
isButtonHoldsEnabled = True
if isButtonHoldsEnabled:
if bl.greenTimeHeld > c.QUICK_SPRINT_HOLD_TIME and bl.isOneButtonPressed():
sm.start_sprint(c.QUICK_SPRINT_LENGTH)
disable_all_button_holds_until_all_released()
if sm.inSession:
if bl.yellowTimeHeld > c.LONG_BREAK_HOLD_TIME and bl.isOneButtonPressed():
sm.start_break(c.LONG_BREAK_LENGTH)
disable_all_button_holds_until_all_released()
if bl.greenTimeHeld > c.END_SESSION_HOLD_TIME and bl.yellowTimeHeld > c.END_SESSION_HOLD_TIME:
sm.end_session()
disable_all_button_holds_until_all_released()
else:
if bl.yellowTimeHeld > c.BEDTIME_SHUTDOWN_HOLD_TIME and bl.isOneButtonPressed():
bp.bedtime()
disable_all_button_holds_until_all_released()
if bl.redTimeHeld > c.ABORT_BEDTIME_PROTOCOL_HOLD_TIME and bl.isOneButtonPressed():
bp.abort_bedtime_protocol()
disable_all_button_holds_until_all_released()
if bl.greenTimeHeld > c.DEMO_MODE_HOLD_TIME and bl.yellowTimeHeld > c.DEMO_MODE_HOLD_TIME and bl.redTimeHeld > c.DEMO_MODE_HOLD_TIME:
leff.start(leff.DEMO_MODE)
inDemoMode = True
print("DEMO MODE")
disable_all_button_holds_until_all_released() | button_logic.py | import constant_parameters as c # pylint: disable=unused-wildcard-import
import button_listener as bl # pylint: disable=unused-wildcard-import
import session_manager as sm # pylint: disable=unused-wildcard-import
import bedtime_protocol as bp # pylint: disable=unused-wildcard-import
import light_effects as leff
inDemoMode = False
def green_button_clicked(timeHeld):
if bp.isInWakeUpPeriod:
return
sm.start_sprint(c.STANDARD_SPRINT_LENGTH, timeHeld)
if sm.inInterruption:
sm.end_interruption()
def yellow_button_clicked():
if sm.inSession:
sm.start_break(c.STANDARD_BREAK_LENGTH)
def red_button_clicked():
pass
def green_button_just_pressed():
if bp.isInWakeUpPeriod:
bp.woke_up()
def yellow_button_just_pressed():
pass
def red_button_just_pressed():
global inDemoMode
if sm.inSession:
sm.start_interruption() # because there is no hold function during session, so more immediate
if inDemoMode and bl.isOneButtonPressed():
leff.kill_effect()
inDemoMode = False
isButtonHoldsEnabled = True
def disable_all_button_holds_until_all_released():
global isButtonHoldsEnabled
isButtonHoldsEnabled = False
def update_button_logic():
global isButtonHoldsEnabled, inDemoMode
if not bl.isGreenPressed and not bl.isYellowPressed and not bl.isRedPressed:
isButtonHoldsEnabled = True
if isButtonHoldsEnabled:
if bl.greenTimeHeld > c.QUICK_SPRINT_HOLD_TIME and bl.isOneButtonPressed():
sm.start_sprint(c.QUICK_SPRINT_LENGTH)
disable_all_button_holds_until_all_released()
if sm.inSession:
if bl.yellowTimeHeld > c.LONG_BREAK_HOLD_TIME and bl.isOneButtonPressed():
sm.start_break(c.LONG_BREAK_LENGTH)
disable_all_button_holds_until_all_released()
if bl.greenTimeHeld > c.END_SESSION_HOLD_TIME and bl.yellowTimeHeld > c.END_SESSION_HOLD_TIME:
sm.end_session()
disable_all_button_holds_until_all_released()
else:
if bl.yellowTimeHeld > c.BEDTIME_SHUTDOWN_HOLD_TIME and bl.isOneButtonPressed():
bp.bedtime()
disable_all_button_holds_until_all_released()
if bl.redTimeHeld > c.ABORT_BEDTIME_PROTOCOL_HOLD_TIME and bl.isOneButtonPressed():
bp.abort_bedtime_protocol()
disable_all_button_holds_until_all_released()
if bl.greenTimeHeld > c.DEMO_MODE_HOLD_TIME and bl.yellowTimeHeld > c.DEMO_MODE_HOLD_TIME and bl.redTimeHeld > c.DEMO_MODE_HOLD_TIME:
leff.start(leff.DEMO_MODE)
inDemoMode = True
print("DEMO MODE")
disable_all_button_holds_until_all_released() | 0.222447 | 0.044848 |
import pytest
from sphinx.testing.util import etree_parse
@pytest.mark.sphinx('qthelp', testroot='basic')
def test_qthelp_basic(app, status, warning):
app.builder.build_all()
qhp = (app.outdir / 'Python.qhp').text()
assert '<customFilter name="Python ">' in qhp
assert '<filterAttribute>Python</filterAttribute>' in qhp
assert '<filterAttribute></filterAttribute>' in qhp
assert '<section title="Python documentation" ref="index.html">' in qhp
assert '<file>genindex.html</file>' in qhp
assert '<file>index.html</file>' in qhp
assert '<file>_static/basic.css</file>' in qhp
assert '<file>_static/down.png</file>' in qhp
qhcp = (app.outdir / 'Python.qhcp').text()
assert '<title>Python documentation</title>' in qhcp
assert '<homePage>qthelp://org.sphinx.python/doc/index.html</homePage>' in qhcp
assert '<startPage>qthelp://org.sphinx.python/doc/index.html</startPage>' in qhcp
assert '<input>Python.qhp</input>' in qhcp
assert '<output>Python.qch</output>' in qhcp
assert '<file>Python.qch</file>' in qhcp
@pytest.mark.sphinx('qthelp', testroot='need-escaped')
def test_qthelp_escaped(app, status, warning):
app.builder.build_all()
et = etree_parse(app.outdir / 'needbescapedbproject.qhp')
customFilter = et.find('.//customFilter')
assert len(customFilter) == 2
assert customFilter.attrib == {'name': 'need <b>"escaped"</b> project '}
assert customFilter[0].text == 'needbescapedbproject'
assert customFilter[1].text is None
toc = et.find('.//toc')
assert len(toc) == 1
assert toc[0].attrib == {'title': 'need <b>"escaped"</b> project documentation',
'ref': 'index.html'}
assert len(toc[0]) == 4
assert toc[0][0].attrib == {'title': '<foo>', 'ref': 'foo.html'}
assert toc[0][0][0].attrib == {'title': 'quux', 'ref': 'quux.html'}
assert toc[0][0][1].attrib == {'title': 'foo "1"', 'ref': 'foo.html#foo-1'}
assert toc[0][0][1][0].attrib == {'title': 'foo.1-1', 'ref': 'foo.html#foo-1-1'}
assert toc[0][0][2].attrib == {'title': 'foo.2', 'ref': 'foo.html#foo-2'}
assert toc[0][1].attrib == {'title': 'bar', 'ref': 'bar.html'}
assert toc[0][2].attrib == {'title': 'http://sphinx-doc.org/',
'ref': 'http://sphinx-doc.org/'}
assert toc[0][3].attrib == {'title': 'baz', 'ref': 'baz.html'}
keywords = et.find('.//keywords')
assert len(keywords) == 2
assert keywords[0].attrib == {'name': '<subsection>', 'ref': 'index.html#index-0'}
assert keywords[1].attrib == {'name': '"subsection"', 'ref': 'index.html#index-0'}
@pytest.mark.sphinx('qthelp', testroot='basic')
def test_qthelp_namespace(app, status, warning):
# default namespace
app.builder.build_all()
qhp = (app.outdir / 'Python.qhp').text()
assert '<namespace>org.sphinx.python</namespace>' in qhp
qhcp = (app.outdir / 'Python.qhcp').text()
assert '<homePage>qthelp://org.sphinx.python/doc/index.html</homePage>' in qhcp
assert '<startPage>qthelp://org.sphinx.python/doc/index.html</startPage>' in qhcp
# give a namespace
app.config.qthelp_namespace = 'org.sphinx-doc.sphinx'
app.builder.build_all()
qhp = (app.outdir / 'Python.qhp').text()
assert '<namespace>org.sphinx-doc.sphinx</namespace>' in qhp
qhcp = (app.outdir / 'Python.qhcp').text()
assert '<homePage>qthelp://org.sphinx-doc.sphinx/doc/index.html</homePage>' in qhcp
assert '<startPage>qthelp://org.sphinx-doc.sphinx/doc/index.html</startPage>' in qhcp
@pytest.mark.sphinx('qthelp', testroot='basic')
def test_qthelp_title(app, status, warning):
# default title
app.builder.build_all()
qhp = (app.outdir / 'Python.qhp').text()
assert '<section title="Python documentation" ref="index.html">' in qhp
qhcp = (app.outdir / 'Python.qhcp').text()
assert '<title>Python documentation</title>' in qhcp
# give a title
app.config.html_title = 'Sphinx <b>"full"</b> title'
app.config.html_short_title = 'Sphinx <b>"short"</b> title'
app.builder.build_all()
qhp = (app.outdir / 'Python.qhp').text()
assert ('<section title="Sphinx <b>"full"</b> title" ref="index.html">'
in qhp)
qhcp = (app.outdir / 'Python.qhcp').text()
assert '<title>Sphinx <b>"short"</b> title</title>' in qhcp | tests/test_build_qthelp.py | import pytest
from sphinx.testing.util import etree_parse
@pytest.mark.sphinx('qthelp', testroot='basic')
def test_qthelp_basic(app, status, warning):
app.builder.build_all()
qhp = (app.outdir / 'Python.qhp').text()
assert '<customFilter name="Python ">' in qhp
assert '<filterAttribute>Python</filterAttribute>' in qhp
assert '<filterAttribute></filterAttribute>' in qhp
assert '<section title="Python documentation" ref="index.html">' in qhp
assert '<file>genindex.html</file>' in qhp
assert '<file>index.html</file>' in qhp
assert '<file>_static/basic.css</file>' in qhp
assert '<file>_static/down.png</file>' in qhp
qhcp = (app.outdir / 'Python.qhcp').text()
assert '<title>Python documentation</title>' in qhcp
assert '<homePage>qthelp://org.sphinx.python/doc/index.html</homePage>' in qhcp
assert '<startPage>qthelp://org.sphinx.python/doc/index.html</startPage>' in qhcp
assert '<input>Python.qhp</input>' in qhcp
assert '<output>Python.qch</output>' in qhcp
assert '<file>Python.qch</file>' in qhcp
@pytest.mark.sphinx('qthelp', testroot='need-escaped')
def test_qthelp_escaped(app, status, warning):
app.builder.build_all()
et = etree_parse(app.outdir / 'needbescapedbproject.qhp')
customFilter = et.find('.//customFilter')
assert len(customFilter) == 2
assert customFilter.attrib == {'name': 'need <b>"escaped"</b> project '}
assert customFilter[0].text == 'needbescapedbproject'
assert customFilter[1].text is None
toc = et.find('.//toc')
assert len(toc) == 1
assert toc[0].attrib == {'title': 'need <b>"escaped"</b> project documentation',
'ref': 'index.html'}
assert len(toc[0]) == 4
assert toc[0][0].attrib == {'title': '<foo>', 'ref': 'foo.html'}
assert toc[0][0][0].attrib == {'title': 'quux', 'ref': 'quux.html'}
assert toc[0][0][1].attrib == {'title': 'foo "1"', 'ref': 'foo.html#foo-1'}
assert toc[0][0][1][0].attrib == {'title': 'foo.1-1', 'ref': 'foo.html#foo-1-1'}
assert toc[0][0][2].attrib == {'title': 'foo.2', 'ref': 'foo.html#foo-2'}
assert toc[0][1].attrib == {'title': 'bar', 'ref': 'bar.html'}
assert toc[0][2].attrib == {'title': 'http://sphinx-doc.org/',
'ref': 'http://sphinx-doc.org/'}
assert toc[0][3].attrib == {'title': 'baz', 'ref': 'baz.html'}
keywords = et.find('.//keywords')
assert len(keywords) == 2
assert keywords[0].attrib == {'name': '<subsection>', 'ref': 'index.html#index-0'}
assert keywords[1].attrib == {'name': '"subsection"', 'ref': 'index.html#index-0'}
@pytest.mark.sphinx('qthelp', testroot='basic')
def test_qthelp_namespace(app, status, warning):
# default namespace
app.builder.build_all()
qhp = (app.outdir / 'Python.qhp').text()
assert '<namespace>org.sphinx.python</namespace>' in qhp
qhcp = (app.outdir / 'Python.qhcp').text()
assert '<homePage>qthelp://org.sphinx.python/doc/index.html</homePage>' in qhcp
assert '<startPage>qthelp://org.sphinx.python/doc/index.html</startPage>' in qhcp
# give a namespace
app.config.qthelp_namespace = 'org.sphinx-doc.sphinx'
app.builder.build_all()
qhp = (app.outdir / 'Python.qhp').text()
assert '<namespace>org.sphinx-doc.sphinx</namespace>' in qhp
qhcp = (app.outdir / 'Python.qhcp').text()
assert '<homePage>qthelp://org.sphinx-doc.sphinx/doc/index.html</homePage>' in qhcp
assert '<startPage>qthelp://org.sphinx-doc.sphinx/doc/index.html</startPage>' in qhcp
@pytest.mark.sphinx('qthelp', testroot='basic')
def test_qthelp_title(app, status, warning):
# default title
app.builder.build_all()
qhp = (app.outdir / 'Python.qhp').text()
assert '<section title="Python documentation" ref="index.html">' in qhp
qhcp = (app.outdir / 'Python.qhcp').text()
assert '<title>Python documentation</title>' in qhcp
# give a title
app.config.html_title = 'Sphinx <b>"full"</b> title'
app.config.html_short_title = 'Sphinx <b>"short"</b> title'
app.builder.build_all()
qhp = (app.outdir / 'Python.qhp').text()
assert ('<section title="Sphinx <b>"full"</b> title" ref="index.html">'
in qhp)
qhcp = (app.outdir / 'Python.qhcp').text()
assert '<title>Sphinx <b>"short"</b> title</title>' in qhcp | 0.441432 | 0.585101 |
import re
from pathlib import Path
from subprocess import check_output
import click
import git
from typing import Optional
import neovim
LINE_RE = re.compile(r'^(?:\w+)\s+([0-9a-f]+)\s')
@click.command()
@click.argument('bufnr')
@click.argument('commitsha')
def main(bufnr, commitsha):
nvim = neovim.attach('stdio')
buf = nvim.buffers[int(bufnr)]
repo = git.Repo(buf.name, search_parent_directories=True)
files = get_files_from_ref(repo, commitsha)
packages = get_packages_from_files(files)
# add highlighting for the current commit
hl_init(nvim)
hl_add(nvim, 'gitrebaseCurrent', commitsha)
# get all other refs from the buffer
refs = set()
for line in buf:
m = LINE_RE.match(line)
if m:
refs.add(m.group(1))
for ref in refs:
if ref != commitsha:
otherfiles = get_files_from_ref(repo, ref)
if otherfiles == files:
hl_add(nvim, 'gitrebaseSameFiles', ref)
elif otherfiles & files:
hl_add(nvim, 'gitrebaseCommonFiles', ref)
else:
otherpackages = get_packages_from_files(otherfiles)
if otherpackages & packages:
hl_add(nvim, 'gitrebaseCommonPackages', ref)
def get_files_from_ref(repo, ref):
cmd = [
'git',
'diff-tree',
'--no-commit-id',
'--name-only',
'-r',
ref
]
lines = check_output(cmd).decode('utf-8').splitlines()
return set(lines)
def get_packages_from_files(files):
packages = set()
for examine in map(Path, files):
pkg = get_package_from_path(examine)
if pkg:
packages.add(str(pkg))
return packages
def get_package_from_path(examine: Path) -> Optional[Path]:
while len(examine.parts):
for ini in ('setup.py', 'setup.cfg', 'flit.ini'):
if (examine / ini).exists():
return examine
examine = examine.parent
return None
def hl_init(nvim):
# link highlight groups
nvim.command('hi! link gitrebaseSameFiles Typedef')
nvim.command('hi! link gitrebaseCommonFiles Operator')
nvim.command('hi! link gitrebaseCommonPackages Macro')
nvim.command('hi! link gitrebaseCurrent Function')
# clear highlight groups
nvim.command('syn clear gitrebaseSameFiles')
nvim.command('syn clear gitrebaseCommonFiles')
nvim.command('syn clear gitrebaseCommonPackages')
nvim.command('syn clear gitrebaseCurrent')
def hl_add(nvim, group, ref):
nvim.command('syn keyword {} {} containedin=gitrebaseCommit'.format(
group, ref))
if __name__ == '__main__':
# make a list of all files in that sha
main() | vim/after/ftplugin/gitrebase.py | import re
from pathlib import Path
from subprocess import check_output
import click
import git
from typing import Optional
import neovim
LINE_RE = re.compile(r'^(?:\w+)\s+([0-9a-f]+)\s')
@click.command()
@click.argument('bufnr')
@click.argument('commitsha')
def main(bufnr, commitsha):
nvim = neovim.attach('stdio')
buf = nvim.buffers[int(bufnr)]
repo = git.Repo(buf.name, search_parent_directories=True)
files = get_files_from_ref(repo, commitsha)
packages = get_packages_from_files(files)
# add highlighting for the current commit
hl_init(nvim)
hl_add(nvim, 'gitrebaseCurrent', commitsha)
# get all other refs from the buffer
refs = set()
for line in buf:
m = LINE_RE.match(line)
if m:
refs.add(m.group(1))
for ref in refs:
if ref != commitsha:
otherfiles = get_files_from_ref(repo, ref)
if otherfiles == files:
hl_add(nvim, 'gitrebaseSameFiles', ref)
elif otherfiles & files:
hl_add(nvim, 'gitrebaseCommonFiles', ref)
else:
otherpackages = get_packages_from_files(otherfiles)
if otherpackages & packages:
hl_add(nvim, 'gitrebaseCommonPackages', ref)
def get_files_from_ref(repo, ref):
cmd = [
'git',
'diff-tree',
'--no-commit-id',
'--name-only',
'-r',
ref
]
lines = check_output(cmd).decode('utf-8').splitlines()
return set(lines)
def get_packages_from_files(files):
packages = set()
for examine in map(Path, files):
pkg = get_package_from_path(examine)
if pkg:
packages.add(str(pkg))
return packages
def get_package_from_path(examine: Path) -> Optional[Path]:
while len(examine.parts):
for ini in ('setup.py', 'setup.cfg', 'flit.ini'):
if (examine / ini).exists():
return examine
examine = examine.parent
return None
def hl_init(nvim):
# link highlight groups
nvim.command('hi! link gitrebaseSameFiles Typedef')
nvim.command('hi! link gitrebaseCommonFiles Operator')
nvim.command('hi! link gitrebaseCommonPackages Macro')
nvim.command('hi! link gitrebaseCurrent Function')
# clear highlight groups
nvim.command('syn clear gitrebaseSameFiles')
nvim.command('syn clear gitrebaseCommonFiles')
nvim.command('syn clear gitrebaseCommonPackages')
nvim.command('syn clear gitrebaseCurrent')
def hl_add(nvim, group, ref):
nvim.command('syn keyword {} {} containedin=gitrebaseCommit'.format(
group, ref))
if __name__ == '__main__':
# make a list of all files in that sha
main() | 0.53048 | 0.084003 |
import argparse
import pathlib
import sqlite3
from ddht._utils import humanize_bytes
from ddht.app import BaseApplication
from ddht.boot_info import BootInfo
from ddht.v5_1.alexandria.abc import AdvertisementDatabaseAPI, ContentStorageAPI
from ddht.v5_1.alexandria.advertisement_db import AdvertisementDatabase
from ddht.v5_1.alexandria.boot_info import AlexandriaBootInfo
from ddht.v5_1.alexandria.broadcast_log import BroadcastLog
from ddht.v5_1.alexandria.content_storage import (
FileSystemContentStorage,
MemoryContentStorage,
)
from ddht.v5_1.alexandria.network import AlexandriaNetwork
from ddht.v5_1.alexandria.rpc_handlers import get_alexandria_rpc_handlers
from ddht.v5_1.alexandria.xdg import get_xdg_alexandria_root
from ddht.v5_1.app import Application
class AlexandriaApplication(BaseApplication):
base_protocol_app: Application
def __init__(self, args: argparse.Namespace, boot_info: BootInfo) -> None:
super().__init__(args, boot_info)
self._alexandria_boot_info = AlexandriaBootInfo.from_namespace(self._args)
self.base_protocol_app = Application(self._args, self._boot_info)
async def run(self) -> None:
self.manager.run_daemon_child_service(self.base_protocol_app)
await self.base_protocol_app.wait_ready()
xdg_alexandria_root = get_xdg_alexandria_root()
xdg_alexandria_root.mkdir(parents=True, exist_ok=True)
max_advertisement_count = self._alexandria_boot_info.max_advertisement_count
commons_content_storage_max_size = (
self._alexandria_boot_info.commons_storage_size
)
commons_content_storage: ContentStorageAPI
commons_storage_display: str
if self._alexandria_boot_info.commons_storage == ":memory:":
commons_content_storage = MemoryContentStorage()
commons_storage_display = "<memory>"
elif self._alexandria_boot_info.commons_storage is None:
commons_content_storage_path = xdg_alexandria_root / "content" / "commons"
commons_content_storage_path.mkdir(parents=True, exist_ok=True)
commons_content_storage = FileSystemContentStorage(
commons_content_storage_path
)
commons_storage_display = str(commons_content_storage_path)
elif isinstance(self._alexandria_boot_info.commons_storage, pathlib.Path):
commons_content_storage = FileSystemContentStorage(
self._alexandria_boot_info.commons_storage,
)
commons_storage_display = str(self._alexandria_boot_info.commons_storage)
else:
raise Exception(
f"Unsupported value: "
f"commons_storage={self._alexandria_boot_info.commons_storage}"
)
pinned_content_storage: ContentStorageAPI
pinned_storage_display: str
if self._alexandria_boot_info.pinned_storage == ":memory:":
pinned_content_storage = MemoryContentStorage()
pinned_storage_display = "<memory>"
elif self._alexandria_boot_info.pinned_storage is None:
pinned_content_storage_path = xdg_alexandria_root / "content" / "pinned"
pinned_content_storage_path.mkdir(parents=True, exist_ok=True)
pinned_content_storage = FileSystemContentStorage(
pinned_content_storage_path
)
pinned_storage_display = str(pinned_content_storage_path)
elif isinstance(self._alexandria_boot_info.pinned_storage, pathlib.Path):
pinned_content_storage = FileSystemContentStorage(
self._alexandria_boot_info.pinned_storage,
)
pinned_storage_display = str(self._alexandria_boot_info.pinned_storage)
else:
raise Exception(
f"Unsupported value: "
f"pinned_storage={self._alexandria_boot_info.pinned_storage}"
)
local_advertisement_db_path = (
xdg_alexandria_root / "advertisements.local.sqlite3"
)
local_advertisement_db: AdvertisementDatabaseAPI = AdvertisementDatabase(
sqlite3.connect(str(local_advertisement_db_path)),
)
remote_advertisement_db_path = (
xdg_alexandria_root / "advertisements.remote.sqlite3"
)
remote_advertisement_db: AdvertisementDatabaseAPI = AdvertisementDatabase(
sqlite3.connect(str(remote_advertisement_db_path)),
)
broadcast_log_db_path = xdg_alexandria_root / "broadcast_log.sqlite3"
broadcast_log = BroadcastLog(sqlite3.connect(str(broadcast_log_db_path)))
alexandria_network = AlexandriaNetwork(
network=self.base_protocol_app.network,
bootnodes=self._alexandria_boot_info.bootnodes,
commons_content_storage=commons_content_storage,
pinned_content_storage=pinned_content_storage,
local_advertisement_db=local_advertisement_db,
remote_advertisement_db=remote_advertisement_db,
broadcast_log=broadcast_log,
commons_content_storage_max_size=commons_content_storage_max_size,
max_advertisement_count=max_advertisement_count,
)
self.manager.run_daemon_child_service(alexandria_network)
self.logger.info("Starting Alexandria...")
self.logger.info("Root Directory : %s", xdg_alexandria_root)
self.logger.info(
"ContentStorage[Commons]: storage=%s items=%d size=%s max_size=%s",
commons_storage_display,
len(commons_content_storage),
humanize_bytes(commons_content_storage.total_size()),
humanize_bytes(commons_content_storage_max_size),
)
self.logger.info(
"ContentStorage[Pinned] : storage=%s items=%d size=%s",
pinned_storage_display,
len(pinned_content_storage),
humanize_bytes(pinned_content_storage.total_size()),
)
self.logger.info(
"AdvertisementDB[local] : storage=%s total=%d",
local_advertisement_db_path,
local_advertisement_db.count(),
)
self.logger.info(
"AdvertisementDB[remote]: storage=%s total=%d max=%d",
remote_advertisement_db_path,
remote_advertisement_db.count(),
max_advertisement_count,
)
self.logger.info(
"BroadcastLog: storage=%s total=%d max=%d",
broadcast_log_db_path,
broadcast_log.count,
broadcast_log.cache_size,
)
await alexandria_network.ready()
if self._boot_info.is_rpc_enabled:
self.base_protocol_app.rpc_server.add_handers(
get_alexandria_rpc_handlers(alexandria_network)
)
await self.manager.wait_finished() | ddht/v5_1/alexandria/app.py | import argparse
import pathlib
import sqlite3
from ddht._utils import humanize_bytes
from ddht.app import BaseApplication
from ddht.boot_info import BootInfo
from ddht.v5_1.alexandria.abc import AdvertisementDatabaseAPI, ContentStorageAPI
from ddht.v5_1.alexandria.advertisement_db import AdvertisementDatabase
from ddht.v5_1.alexandria.boot_info import AlexandriaBootInfo
from ddht.v5_1.alexandria.broadcast_log import BroadcastLog
from ddht.v5_1.alexandria.content_storage import (
FileSystemContentStorage,
MemoryContentStorage,
)
from ddht.v5_1.alexandria.network import AlexandriaNetwork
from ddht.v5_1.alexandria.rpc_handlers import get_alexandria_rpc_handlers
from ddht.v5_1.alexandria.xdg import get_xdg_alexandria_root
from ddht.v5_1.app import Application
class AlexandriaApplication(BaseApplication):
base_protocol_app: Application
def __init__(self, args: argparse.Namespace, boot_info: BootInfo) -> None:
super().__init__(args, boot_info)
self._alexandria_boot_info = AlexandriaBootInfo.from_namespace(self._args)
self.base_protocol_app = Application(self._args, self._boot_info)
async def run(self) -> None:
self.manager.run_daemon_child_service(self.base_protocol_app)
await self.base_protocol_app.wait_ready()
xdg_alexandria_root = get_xdg_alexandria_root()
xdg_alexandria_root.mkdir(parents=True, exist_ok=True)
max_advertisement_count = self._alexandria_boot_info.max_advertisement_count
commons_content_storage_max_size = (
self._alexandria_boot_info.commons_storage_size
)
commons_content_storage: ContentStorageAPI
commons_storage_display: str
if self._alexandria_boot_info.commons_storage == ":memory:":
commons_content_storage = MemoryContentStorage()
commons_storage_display = "<memory>"
elif self._alexandria_boot_info.commons_storage is None:
commons_content_storage_path = xdg_alexandria_root / "content" / "commons"
commons_content_storage_path.mkdir(parents=True, exist_ok=True)
commons_content_storage = FileSystemContentStorage(
commons_content_storage_path
)
commons_storage_display = str(commons_content_storage_path)
elif isinstance(self._alexandria_boot_info.commons_storage, pathlib.Path):
commons_content_storage = FileSystemContentStorage(
self._alexandria_boot_info.commons_storage,
)
commons_storage_display = str(self._alexandria_boot_info.commons_storage)
else:
raise Exception(
f"Unsupported value: "
f"commons_storage={self._alexandria_boot_info.commons_storage}"
)
pinned_content_storage: ContentStorageAPI
pinned_storage_display: str
if self._alexandria_boot_info.pinned_storage == ":memory:":
pinned_content_storage = MemoryContentStorage()
pinned_storage_display = "<memory>"
elif self._alexandria_boot_info.pinned_storage is None:
pinned_content_storage_path = xdg_alexandria_root / "content" / "pinned"
pinned_content_storage_path.mkdir(parents=True, exist_ok=True)
pinned_content_storage = FileSystemContentStorage(
pinned_content_storage_path
)
pinned_storage_display = str(pinned_content_storage_path)
elif isinstance(self._alexandria_boot_info.pinned_storage, pathlib.Path):
pinned_content_storage = FileSystemContentStorage(
self._alexandria_boot_info.pinned_storage,
)
pinned_storage_display = str(self._alexandria_boot_info.pinned_storage)
else:
raise Exception(
f"Unsupported value: "
f"pinned_storage={self._alexandria_boot_info.pinned_storage}"
)
local_advertisement_db_path = (
xdg_alexandria_root / "advertisements.local.sqlite3"
)
local_advertisement_db: AdvertisementDatabaseAPI = AdvertisementDatabase(
sqlite3.connect(str(local_advertisement_db_path)),
)
remote_advertisement_db_path = (
xdg_alexandria_root / "advertisements.remote.sqlite3"
)
remote_advertisement_db: AdvertisementDatabaseAPI = AdvertisementDatabase(
sqlite3.connect(str(remote_advertisement_db_path)),
)
broadcast_log_db_path = xdg_alexandria_root / "broadcast_log.sqlite3"
broadcast_log = BroadcastLog(sqlite3.connect(str(broadcast_log_db_path)))
alexandria_network = AlexandriaNetwork(
network=self.base_protocol_app.network,
bootnodes=self._alexandria_boot_info.bootnodes,
commons_content_storage=commons_content_storage,
pinned_content_storage=pinned_content_storage,
local_advertisement_db=local_advertisement_db,
remote_advertisement_db=remote_advertisement_db,
broadcast_log=broadcast_log,
commons_content_storage_max_size=commons_content_storage_max_size,
max_advertisement_count=max_advertisement_count,
)
self.manager.run_daemon_child_service(alexandria_network)
self.logger.info("Starting Alexandria...")
self.logger.info("Root Directory : %s", xdg_alexandria_root)
self.logger.info(
"ContentStorage[Commons]: storage=%s items=%d size=%s max_size=%s",
commons_storage_display,
len(commons_content_storage),
humanize_bytes(commons_content_storage.total_size()),
humanize_bytes(commons_content_storage_max_size),
)
self.logger.info(
"ContentStorage[Pinned] : storage=%s items=%d size=%s",
pinned_storage_display,
len(pinned_content_storage),
humanize_bytes(pinned_content_storage.total_size()),
)
self.logger.info(
"AdvertisementDB[local] : storage=%s total=%d",
local_advertisement_db_path,
local_advertisement_db.count(),
)
self.logger.info(
"AdvertisementDB[remote]: storage=%s total=%d max=%d",
remote_advertisement_db_path,
remote_advertisement_db.count(),
max_advertisement_count,
)
self.logger.info(
"BroadcastLog: storage=%s total=%d max=%d",
broadcast_log_db_path,
broadcast_log.count,
broadcast_log.cache_size,
)
await alexandria_network.ready()
if self._boot_info.is_rpc_enabled:
self.base_protocol_app.rpc_server.add_handers(
get_alexandria_rpc_handlers(alexandria_network)
)
await self.manager.wait_finished() | 0.316581 | 0.143158 |
from fastapi import APIRouter, Depends, WebSocket
from fastapi_websocket_pubsub import PubSubEndpoint
from opal_common.confi.confi import load_conf_if_none
from opal_common.config import opal_common_config
from opal_common.logger import logger
from opal_common.authentication.signer import JWTSigner
from opal_common.authentication.deps import WebsocketJWTAuthenticator
from opal_server.config import opal_server_config
class PubSub:
"""
Warpper for the Pub/Sub channel used for both policy and data updates
"""
def __init__(self, signer: JWTSigner, broadcaster_uri:str=None):
"""
Args:
broadcaster_uri (str, optional): Which server/medium should the PubSub use for broadcasting. Defaults to BROADCAST_URI.
None means no broadcasting.
"""
broadcaster_uri = load_conf_if_none(broadcaster_uri, opal_server_config.BROADCAST_URI)
self.router = APIRouter()
self.endpoint = PubSubEndpoint(broadcaster=broadcaster_uri, rpc_channel_get_remote_id=opal_common_config.STATISTICS_ENABLED)
authenticator = WebsocketJWTAuthenticator(signer)
@self.router.websocket("/ws")
async def websocket_rpc_endpoint(websocket: WebSocket, logged_in: bool = Depends(authenticator)):
"""
this is the main websocket endpoint the sidecar uses to register on policy updates.
as you can see, this endpoint is protected by an HTTP Authorization Bearer token.
"""
if not logged_in:
logger.info("Closing connection, remote address: {remote_address}", remote_address=websocket.client, reason="Authentication failed")
await websocket.close()
return
# Init PubSub main-loop with or without broadcasting
if broadcaster_uri is not None:
async with self.endpoint.broadcaster:
await self.endpoint.main_loop(websocket)
else:
await self.endpoint.main_loop(websocket) | opal_server/pubsub.py | from fastapi import APIRouter, Depends, WebSocket
from fastapi_websocket_pubsub import PubSubEndpoint
from opal_common.confi.confi import load_conf_if_none
from opal_common.config import opal_common_config
from opal_common.logger import logger
from opal_common.authentication.signer import JWTSigner
from opal_common.authentication.deps import WebsocketJWTAuthenticator
from opal_server.config import opal_server_config
class PubSub:
"""
Warpper for the Pub/Sub channel used for both policy and data updates
"""
def __init__(self, signer: JWTSigner, broadcaster_uri:str=None):
"""
Args:
broadcaster_uri (str, optional): Which server/medium should the PubSub use for broadcasting. Defaults to BROADCAST_URI.
None means no broadcasting.
"""
broadcaster_uri = load_conf_if_none(broadcaster_uri, opal_server_config.BROADCAST_URI)
self.router = APIRouter()
self.endpoint = PubSubEndpoint(broadcaster=broadcaster_uri, rpc_channel_get_remote_id=opal_common_config.STATISTICS_ENABLED)
authenticator = WebsocketJWTAuthenticator(signer)
@self.router.websocket("/ws")
async def websocket_rpc_endpoint(websocket: WebSocket, logged_in: bool = Depends(authenticator)):
"""
this is the main websocket endpoint the sidecar uses to register on policy updates.
as you can see, this endpoint is protected by an HTTP Authorization Bearer token.
"""
if not logged_in:
logger.info("Closing connection, remote address: {remote_address}", remote_address=websocket.client, reason="Authentication failed")
await websocket.close()
return
# Init PubSub main-loop with or without broadcasting
if broadcaster_uri is not None:
async with self.endpoint.broadcaster:
await self.endpoint.main_loop(websocket)
else:
await self.endpoint.main_loop(websocket) | 0.573559 | 0.085556 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy
from tensorflow.contrib.timeseries.python.timeseries import ar_model
from tensorflow.contrib.timeseries.python.timeseries import estimators
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import saved_model_utils
from tensorflow.python.client import session
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
class _SeedRunConfig(estimator_lib.RunConfig):
@property
def tf_random_seed(self):
return 3
class TimeSeriesRegressorTest(test.TestCase):
def _fit_restore_fit_test_template(self, estimator_fn, dtype):
"""Tests restoring previously fit models."""
model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
first_estimator = estimator_fn(model_dir)
times = numpy.arange(20, dtype=numpy.int64)
values = numpy.arange(20, dtype=dtype.as_numpy_dtype)
features = {
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), shuffle_seed=2, num_threads=1,
batch_size=16, window_size=16)
eval_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), shuffle_seed=3, num_threads=1,
batch_size=16, window_size=16)
first_estimator.train(input_fn=train_input_fn, steps=5)
first_loss_before_fit = first_estimator.evaluate(
input_fn=eval_input_fn, steps=1)["loss"]
first_estimator.train(input_fn=train_input_fn, steps=50)
first_loss_after_fit = first_estimator.evaluate(
input_fn=eval_input_fn, steps=1)["loss"]
self.assertLess(first_loss_after_fit, first_loss_before_fit)
second_estimator = estimator_fn(model_dir)
second_estimator.train(input_fn=train_input_fn, steps=2)
whole_dataset_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(features))
whole_dataset_evaluation = second_estimator.evaluate(
input_fn=whole_dataset_input_fn, steps=1)
predict_input_fn = input_pipeline.predict_continuation_input_fn(
evaluation=whole_dataset_evaluation,
steps=10)
# Also tests that limit_epochs in predict_continuation_input_fn prevents
# infinite iteration
(estimator_predictions,
) = list(second_estimator.predict(input_fn=predict_input_fn))
self.assertAllEqual([10, 1], estimator_predictions["mean"].shape)
input_receiver_fn = first_estimator.build_raw_serving_input_receiver_fn()
export_location = first_estimator.export_savedmodel(self.get_temp_dir(),
input_receiver_fn)
with ops.Graph().as_default():
with session.Session() as sess:
signatures = loader.load(sess, [tag_constants.SERVING], export_location)
# Test that prediction and filtering can continue from evaluation output
saved_prediction = saved_model_utils.predict_continuation(
continue_from=whole_dataset_evaluation,
steps=10,
signatures=signatures,
session=sess)
# Saved model predictions should be the same as Estimator predictions
# starting from the same evaluation.
for prediction_key, prediction_value in estimator_predictions.items():
self.assertAllClose(prediction_value,
numpy.squeeze(
saved_prediction[prediction_key], axis=0))
first_filtering = saved_model_utils.filter_continuation(
continue_from=whole_dataset_evaluation,
features={
feature_keys.FilteringFeatures.TIMES: times[None, -1] + 2,
feature_keys.FilteringFeatures.VALUES: values[None, -1] + 2.
},
signatures=signatures,
session=sess)
# Test that prediction and filtering can continue from filtering output
second_saved_prediction = saved_model_utils.predict_continuation(
continue_from=first_filtering,
steps=1,
signatures=signatures,
session=sess)
self.assertEqual(
times[-1] + 3,
numpy.squeeze(
second_saved_prediction[feature_keys.PredictionResults.TIMES]))
saved_model_utils.filter_continuation(
continue_from=first_filtering,
features={
feature_keys.FilteringFeatures.TIMES: times[-1] + 3,
feature_keys.FilteringFeatures.VALUES: values[-1] + 3.
},
signatures=signatures,
session=sess)
def test_fit_restore_fit_ar_regressor(self):
def _estimator_fn(model_dir):
return estimators.ARRegressor(
periodicities=10, input_window_size=10, output_window_size=6,
num_features=1, model_dir=model_dir, config=_SeedRunConfig(),
# This test is flaky with normal likelihood loss (could add more
# training iterations instead).
loss=ar_model.ARModel.SQUARED_LOSS)
self._fit_restore_fit_test_template(_estimator_fn, dtype=dtypes.float32)
def test_fit_restore_fit_structural_ensemble_regressor(self):
dtype = dtypes.float32
def _estimator_fn(model_dir):
return estimators.StructuralEnsembleRegressor(
num_features=1, periodicities=10, model_dir=model_dir, dtype=dtype,
config=_SeedRunConfig())
self._fit_restore_fit_test_template(_estimator_fn, dtype=dtype)
if __name__ == "__main__":
test.main() | tensorflow/contrib/timeseries/python/timeseries/estimators_test.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy
from tensorflow.contrib.timeseries.python.timeseries import ar_model
from tensorflow.contrib.timeseries.python.timeseries import estimators
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import saved_model_utils
from tensorflow.python.client import session
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
class _SeedRunConfig(estimator_lib.RunConfig):
@property
def tf_random_seed(self):
return 3
class TimeSeriesRegressorTest(test.TestCase):
def _fit_restore_fit_test_template(self, estimator_fn, dtype):
"""Tests restoring previously fit models."""
model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
first_estimator = estimator_fn(model_dir)
times = numpy.arange(20, dtype=numpy.int64)
values = numpy.arange(20, dtype=dtype.as_numpy_dtype)
features = {
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), shuffle_seed=2, num_threads=1,
batch_size=16, window_size=16)
eval_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), shuffle_seed=3, num_threads=1,
batch_size=16, window_size=16)
first_estimator.train(input_fn=train_input_fn, steps=5)
first_loss_before_fit = first_estimator.evaluate(
input_fn=eval_input_fn, steps=1)["loss"]
first_estimator.train(input_fn=train_input_fn, steps=50)
first_loss_after_fit = first_estimator.evaluate(
input_fn=eval_input_fn, steps=1)["loss"]
self.assertLess(first_loss_after_fit, first_loss_before_fit)
second_estimator = estimator_fn(model_dir)
second_estimator.train(input_fn=train_input_fn, steps=2)
whole_dataset_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(features))
whole_dataset_evaluation = second_estimator.evaluate(
input_fn=whole_dataset_input_fn, steps=1)
predict_input_fn = input_pipeline.predict_continuation_input_fn(
evaluation=whole_dataset_evaluation,
steps=10)
# Also tests that limit_epochs in predict_continuation_input_fn prevents
# infinite iteration
(estimator_predictions,
) = list(second_estimator.predict(input_fn=predict_input_fn))
self.assertAllEqual([10, 1], estimator_predictions["mean"].shape)
input_receiver_fn = first_estimator.build_raw_serving_input_receiver_fn()
export_location = first_estimator.export_savedmodel(self.get_temp_dir(),
input_receiver_fn)
with ops.Graph().as_default():
with session.Session() as sess:
signatures = loader.load(sess, [tag_constants.SERVING], export_location)
# Test that prediction and filtering can continue from evaluation output
saved_prediction = saved_model_utils.predict_continuation(
continue_from=whole_dataset_evaluation,
steps=10,
signatures=signatures,
session=sess)
# Saved model predictions should be the same as Estimator predictions
# starting from the same evaluation.
for prediction_key, prediction_value in estimator_predictions.items():
self.assertAllClose(prediction_value,
numpy.squeeze(
saved_prediction[prediction_key], axis=0))
first_filtering = saved_model_utils.filter_continuation(
continue_from=whole_dataset_evaluation,
features={
feature_keys.FilteringFeatures.TIMES: times[None, -1] + 2,
feature_keys.FilteringFeatures.VALUES: values[None, -1] + 2.
},
signatures=signatures,
session=sess)
# Test that prediction and filtering can continue from filtering output
second_saved_prediction = saved_model_utils.predict_continuation(
continue_from=first_filtering,
steps=1,
signatures=signatures,
session=sess)
self.assertEqual(
times[-1] + 3,
numpy.squeeze(
second_saved_prediction[feature_keys.PredictionResults.TIMES]))
saved_model_utils.filter_continuation(
continue_from=first_filtering,
features={
feature_keys.FilteringFeatures.TIMES: times[-1] + 3,
feature_keys.FilteringFeatures.VALUES: values[-1] + 3.
},
signatures=signatures,
session=sess)
def test_fit_restore_fit_ar_regressor(self):
def _estimator_fn(model_dir):
return estimators.ARRegressor(
periodicities=10, input_window_size=10, output_window_size=6,
num_features=1, model_dir=model_dir, config=_SeedRunConfig(),
# This test is flaky with normal likelihood loss (could add more
# training iterations instead).
loss=ar_model.ARModel.SQUARED_LOSS)
self._fit_restore_fit_test_template(_estimator_fn, dtype=dtypes.float32)
def test_fit_restore_fit_structural_ensemble_regressor(self):
dtype = dtypes.float32
def _estimator_fn(model_dir):
return estimators.StructuralEnsembleRegressor(
num_features=1, periodicities=10, model_dir=model_dir, dtype=dtype,
config=_SeedRunConfig())
self._fit_restore_fit_test_template(_estimator_fn, dtype=dtype)
if __name__ == "__main__":
test.main() | 0.800263 | 0.331525 |
from typing import Any, Generator, Union, Iterable, Tuple, List, Dict, Callable
def is_prime(n: int) -> bool:
"""Returns whether n is a prime number
Args:
n (int): The number to check
Returns:
bool: Whether n is a prime number
"""
if n <= 3:
return n > 1
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i**2 <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def prev_prime(n: int) -> int:
"""Returns the previous prime number before n
Args:
n (int): The number to find the previous prime of
Returns:
int: The previous prime number before n
"""
if n <= 1:
return 2
n -= 1
while not is_prime(n):
n -= 1
if n < 2:
return None
return n
def next_prime(n: int) -> int:
"""Returns the next prime number after n
Args:
n (int): The number to find the next prime of
Returns:
int: The next prime number after n
"""
n += 1
while not is_prime(n):
n += 1
return n
def nth_prime(n: int) -> int:
"""Returns the nth prime number
Args:
n (int): Which prime number to find
Returns:
int: The nth prime number
"""
primes = [2]
i = 3
while len(primes) < n:
if is_prime(i):
primes.append(i)
i += 2
return primes[-1]
def fibn(endn: int) -> List[int]:
"""Given an end number, returns the fibonacci numbers up to that number
Args:
endi (int): The end number
Returns:
list: The fibonacci numbers up to that number
"""
fibs = [0, 1]
while fibs[-1] < endn:
fibs.append(fibs[-2] + fibs[-1])
return fibs[:-1]
def fibi(endi: int) -> List[int]:
"""Given an end number, returns the number fibonacci numbers up to that number
Args:
endn (int): The end number
Returns:
list: The fibonacci numbers up to that number
"""
fibs = [0, 1]
for _ in range(endi - 2):
fibs.append(fibs[-2] + fibs[-1])
return fibs
def fib() -> Generator[int, None, None]:
"""Returns the fibonacci numbers generator
Yields:
int: The next fibonacci number
"""
a, b = 0, 1
while True:
yield a
a, b = b, a + b
def zeros_after(n: int) -> int:
"""Counts the number of trailing zeros in n
Args:
n (int): The number to count the zeros of
Returns:
int: The number of trailing zeros in n
"""
zeros = 0
for i in str(n)[::-1]:
if i != "0":
break
zeros += 1
return zeros
def counter(dic: Dict, ns: Iterable) -> Dict:
"""Counts the number of times each element in ns appears and adds it to dic
Args:
dic (dict): The dictionary to add the counts to
ns (iterable): The elements to count
Returns:
dict: The dictionary with the counts added
"""
for n in ns:
dic[n] = dic.get(n, 0) + 1
return dic
def diviz(n: int) -> list:
"""Returns all the numbers that divide n
Args:
n (int): The number to divide
Returns:
list: The numbers that divide n
"""
if is_prime(n):
return [1, n]
if n in (0, 1):
return [1] * n
a = [1]
for i in range(2, n - 1):
if n % i == 0:
a.append(i)
return a + [n]
def diviz_fl(n: int) -> list[int]:
"""Returns the first and last numbers that divide n
Args:
n (int): The number to divide
Returns:
list: The first and last numbers that divide n
"""
if is_prime(n) or n in (0, 1):
return [1, n]
for i in range(2, n - 1):
if n % i == 0:
return [i, int(n / i)]
def largest_prime_div(n: int) -> int:
"""Returns the largest prime divisor of n
Args:
n (int): The number to divide
Returns:
int: The largest prime divisor of n
"""
i = 2
while i * i <= n:
if n % i:
i += 1
else:
n //= i
return n
def prime_div(n: int) -> list:
"""Returns the prime divisors of n
Args:
n (int): The number to divide
Returns:
list: The prime divisors of n
"""
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
def bin_search_min(func: Callable) -> int:
"""Finds the minimum value where the function returns True
Args:
func (Callable): The function to test
Returns:
int: The minimum value where the function returns True
"""
i, j = -10, -10
while func(i):
i *= 10
i //= 10
j = i
while j != 0:
if func(i):
i += j
else:
i -= j
j = -(-j // 10)
return i
def bin_search(func: Callable, mi: int, ma: int) -> int:
"""Find the value where the function returns 0. The function should return -1 if the value is too low, 1 if the value is too high, and 0 if the value is correct.
Args:
func (Callable): The function to test
mi (int): The minimum value to test
ma (int): The maximum value to test
Returns:
int: The value where the function returns 0
"""
pi = i = (mi + ma) // 2
while True:
if (j := func(i)) == -1:
mi = i - 1
pi, i = i, (ma + i) // 2
if pi == i:
raise ValueError("Value not found in given range")
elif j == 0:
return i
elif j == 1:
ma = i + 1
i = (mi + i) // 2
def bin_search_max(func):
"""Finds the maximum value where the function returns True
Args:
func (Callable): The function to test
Returns:
int: The maximum value where the function returns True
"""
i, j = 10, 10
while func(i):
i *= 10
i //= 10
j = i
while j != 0:
if func(i):
i += j
else:
i -= j
j //= 10
return i
def teef(
tr: Callable,
exces: Union[Tuple[BaseException], BaseException] = (),
exc: Callable = lambda a, e: None,
els: Callable = lambda a: None,
fin: Callable = lambda a: None,
) -> Tuple[Tuple[Any, Any, Any, Any], Dict]:
"""teef: Try Except Else Finally. All functions get called with a dictionary for talking between them.
And the except function also gets the exception as an argument.
Args:
tr (Callable): try function
exces (Union[Tuple[BaseException], BaseException], optional): Exceptions. Defaults to ().
exc (Callable, optional): except function. Defaults to lambda a, e: None.
els (Callable, optional): else function. Defaults to lambda a: None.
fin (Callable, optional): finally function. Defaults to lambda a: None.
Returns:
Tuple[Tuple[Any, Any, Any, Any], Dict]: The results of the functions and the dictionary
"""
ret = [None, None, None, None]
gls = {}
try:
ret[0] = tr(gls)
except exces as e:
ret[1] = exc(gls, e)
else:
ret[2] = els(gls)
finally:
ret[3] = fin(gls)
return tuple(ret), gls
def loop(setup: Callable, cond: Callable, func: Callable) -> Dict:
"""A for loop like in C, C++, Rust, etc. Every function gets a dictionary for talking between them.
Args:
setup (Callable): The function that gets called before the loop
cond (Callable): The condition function returns True or False
func (Callable): The function to call for each iteration
Returns:
Dict: The dictionary
"""
gls = {}
setup(gls)
while cond(gls):
func(gls)
return gls | BlockOL/__init__.py |
from typing import Any, Generator, Union, Iterable, Tuple, List, Dict, Callable
def is_prime(n: int) -> bool:
"""Returns whether n is a prime number
Args:
n (int): The number to check
Returns:
bool: Whether n is a prime number
"""
if n <= 3:
return n > 1
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i**2 <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def prev_prime(n: int) -> int:
"""Returns the previous prime number before n
Args:
n (int): The number to find the previous prime of
Returns:
int: The previous prime number before n
"""
if n <= 1:
return 2
n -= 1
while not is_prime(n):
n -= 1
if n < 2:
return None
return n
def next_prime(n: int) -> int:
"""Returns the next prime number after n
Args:
n (int): The number to find the next prime of
Returns:
int: The next prime number after n
"""
n += 1
while not is_prime(n):
n += 1
return n
def nth_prime(n: int) -> int:
"""Returns the nth prime number
Args:
n (int): Which prime number to find
Returns:
int: The nth prime number
"""
primes = [2]
i = 3
while len(primes) < n:
if is_prime(i):
primes.append(i)
i += 2
return primes[-1]
def fibn(endn: int) -> List[int]:
"""Given an end number, returns the fibonacci numbers up to that number
Args:
endi (int): The end number
Returns:
list: The fibonacci numbers up to that number
"""
fibs = [0, 1]
while fibs[-1] < endn:
fibs.append(fibs[-2] + fibs[-1])
return fibs[:-1]
def fibi(endi: int) -> List[int]:
"""Given an end number, returns the number fibonacci numbers up to that number
Args:
endn (int): The end number
Returns:
list: The fibonacci numbers up to that number
"""
fibs = [0, 1]
for _ in range(endi - 2):
fibs.append(fibs[-2] + fibs[-1])
return fibs
def fib() -> Generator[int, None, None]:
"""Returns the fibonacci numbers generator
Yields:
int: The next fibonacci number
"""
a, b = 0, 1
while True:
yield a
a, b = b, a + b
def zeros_after(n: int) -> int:
"""Counts the number of trailing zeros in n
Args:
n (int): The number to count the zeros of
Returns:
int: The number of trailing zeros in n
"""
zeros = 0
for i in str(n)[::-1]:
if i != "0":
break
zeros += 1
return zeros
def counter(dic: Dict, ns: Iterable) -> Dict:
"""Counts the number of times each element in ns appears and adds it to dic
Args:
dic (dict): The dictionary to add the counts to
ns (iterable): The elements to count
Returns:
dict: The dictionary with the counts added
"""
for n in ns:
dic[n] = dic.get(n, 0) + 1
return dic
def diviz(n: int) -> list:
"""Returns all the numbers that divide n
Args:
n (int): The number to divide
Returns:
list: The numbers that divide n
"""
if is_prime(n):
return [1, n]
if n in (0, 1):
return [1] * n
a = [1]
for i in range(2, n - 1):
if n % i == 0:
a.append(i)
return a + [n]
def diviz_fl(n: int) -> list[int]:
"""Returns the first and last numbers that divide n
Args:
n (int): The number to divide
Returns:
list: The first and last numbers that divide n
"""
if is_prime(n) or n in (0, 1):
return [1, n]
for i in range(2, n - 1):
if n % i == 0:
return [i, int(n / i)]
def largest_prime_div(n: int) -> int:
"""Returns the largest prime divisor of n
Args:
n (int): The number to divide
Returns:
int: The largest prime divisor of n
"""
i = 2
while i * i <= n:
if n % i:
i += 1
else:
n //= i
return n
def prime_div(n: int) -> list:
"""Returns the prime divisors of n
Args:
n (int): The number to divide
Returns:
list: The prime divisors of n
"""
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
def bin_search_min(func: Callable) -> int:
"""Finds the minimum value where the function returns True
Args:
func (Callable): The function to test
Returns:
int: The minimum value where the function returns True
"""
i, j = -10, -10
while func(i):
i *= 10
i //= 10
j = i
while j != 0:
if func(i):
i += j
else:
i -= j
j = -(-j // 10)
return i
def bin_search(func: Callable, mi: int, ma: int) -> int:
"""Find the value where the function returns 0. The function should return -1 if the value is too low, 1 if the value is too high, and 0 if the value is correct.
Args:
func (Callable): The function to test
mi (int): The minimum value to test
ma (int): The maximum value to test
Returns:
int: The value where the function returns 0
"""
pi = i = (mi + ma) // 2
while True:
if (j := func(i)) == -1:
mi = i - 1
pi, i = i, (ma + i) // 2
if pi == i:
raise ValueError("Value not found in given range")
elif j == 0:
return i
elif j == 1:
ma = i + 1
i = (mi + i) // 2
def bin_search_max(func):
"""Finds the maximum value where the function returns True
Args:
func (Callable): The function to test
Returns:
int: The maximum value where the function returns True
"""
i, j = 10, 10
while func(i):
i *= 10
i //= 10
j = i
while j != 0:
if func(i):
i += j
else:
i -= j
j //= 10
return i
def teef(
tr: Callable,
exces: Union[Tuple[BaseException], BaseException] = (),
exc: Callable = lambda a, e: None,
els: Callable = lambda a: None,
fin: Callable = lambda a: None,
) -> Tuple[Tuple[Any, Any, Any, Any], Dict]:
"""teef: Try Except Else Finally. All functions get called with a dictionary for talking between them.
And the except function also gets the exception as an argument.
Args:
tr (Callable): try function
exces (Union[Tuple[BaseException], BaseException], optional): Exceptions. Defaults to ().
exc (Callable, optional): except function. Defaults to lambda a, e: None.
els (Callable, optional): else function. Defaults to lambda a: None.
fin (Callable, optional): finally function. Defaults to lambda a: None.
Returns:
Tuple[Tuple[Any, Any, Any, Any], Dict]: The results of the functions and the dictionary
"""
ret = [None, None, None, None]
gls = {}
try:
ret[0] = tr(gls)
except exces as e:
ret[1] = exc(gls, e)
else:
ret[2] = els(gls)
finally:
ret[3] = fin(gls)
return tuple(ret), gls
def loop(setup: Callable, cond: Callable, func: Callable) -> Dict:
"""A for loop like in C, C++, Rust, etc. Every function gets a dictionary for talking between them.
Args:
setup (Callable): The function that gets called before the loop
cond (Callable): The condition function returns True or False
func (Callable): The function to call for each iteration
Returns:
Dict: The dictionary
"""
gls = {}
setup(gls)
while cond(gls):
func(gls)
return gls | 0.932898 | 0.718681 |
# Commented out IPython magic to ensure Python compatibility.
import os, re, time, json
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import numpy as np
try:
# %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from matplotlib import pyplot as plt
import tensorflow_datasets as tfds
print("Tensorflow version " + tf.__version__)
"""## Parameters
- Define the batch size
- Define the class (category) names
"""
BATCH_SIZE = 32
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
"""Define some functions that will help you to create some visualizations. (These will be used later)"""
#@title Visualization Utilities[RUN ME]
#Matplotlib config
plt.rc('image', cmap='gray')
plt.rc('grid', linewidth=0)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# utility to display a row of digits with their predictions
def display_images(digits, predictions, labels, title):
n = 10
indexes = np.random.choice(len(predictions), size=n)
n_digits = digits[indexes]
n_predictions = predictions[indexes]
n_predictions = n_predictions.reshape((n,))
n_labels = labels[indexes]
fig = plt.figure(figsize=(20, 4))
plt.title(title)
plt.yticks([])
plt.xticks([])
for i in range(10):
ax = fig.add_subplot(1, 10, i+1)
class_index = n_predictions[i]
plt.xlabel(classes[class_index])
plt.xticks([])
plt.yticks([])
plt.imshow(n_digits[i])
# utility to display training and validation curves
def plot_metrics(metric_name, title, ylim=5):
plt.title(title)
plt.ylim(0,ylim)
plt.plot(history.history[metric_name],color='blue',label=metric_name)
plt.plot(history.history['val_' + metric_name],color='green',label='val_' + metric_name)
"""## Loading and Preprocessing Data
[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset has 32 x 32 RGB images belonging to 10 classes. You will load the dataset from Keras.
"""
(training_images, training_labels) , (validation_images, validation_labels) = tf.keras.datasets.cifar10.load_data()
"""### Visualize Dataset
Use the `display_image` to view some of the images and their class labels.
"""
display_images(training_images, training_labels, training_labels, "Training Data" )
display_images(validation_images, validation_labels, validation_labels, "Training Data" )
"""### Preprocess Dataset
Here, you'll perform normalization on images in training and validation set.
- You'll use the function [preprocess_input](https://github.com/keras-team/keras-applications/blob/master/keras_applications/resnet50.py) from the ResNet50 model in Keras.
"""
def preprocess_image_input(input_images):
input_images = input_images.astype('float32')
output_ims = tf.keras.applications.resnet50.preprocess_input(input_images)
return output_ims
train_X = preprocess_image_input(training_images)
valid_X = preprocess_image_input(validation_images)
"""## Define the Network
You will be performing transfer learning on **ResNet50** available in Keras.
- You'll load pre-trained **imagenet weights** to the model.
- You'll choose to retain all layers of **ResNet50** along with the final classification layers.
"""
'''
Feature Extraction is performed by ResNet50 pretrained on imagenet weights.
Input size is 224 x 224.
'''
def feature_extractor(inputs):
feature_extractor = tf.keras.applications.resnet.ResNet50(input_shape=(224, 224, 3),
include_top=False,
weights='imagenet')(inputs)
return feature_extractor
'''
Defines final dense layers and subsequent softmax layer for classification.
'''
def classifier(inputs):
x = tf.keras.layers.GlobalAveragePooling2D()(inputs)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1024, activation="relu")(x)
x = tf.keras.layers.Dense(512, activation="relu")(x)
x = tf.keras.layers.Dense(10, activation="softmax", name="classification")(x)
return x
'''
Since input image size is (32 x 32), first upsample the image by factor of (7x7) to transform it to (224 x 224)
Connect the feature extraction and "classifier" layers to build the model.
'''
def final_model(inputs):
resize = tf.keras.layers.UpSampling2D(size=(7,7))(inputs)
resnet_feature_extractor = feature_extractor(resize)
classification_output = classifier(resnet_feature_extractor)
return classification_output
'''
Define the model and compile it.
Use Stochastic Gradient Descent as the optimizer.
Use Sparse Categorical CrossEntropy as the loss function.
'''
def define_compile_model():
inputs = tf.keras.layers.Input(shape=(32,32,3))
classification_output = final_model(inputs)
model = tf.keras.Model(inputs=inputs, outputs = classification_output)
model.compile(optimizer='SGD',
loss='sparse_categorical_crossentropy',
metrics = ['accuracy'])
return model
model = define_compile_model()
model.summary()
"""## Train the model"""
# this will take around 20 minutes to complete
EPOCHS = 4
history = model.fit(train_X, training_labels, epochs=EPOCHS, validation_data = (valid_X, validation_labels), batch_size=64)
"""## Evaluate the Model
Calculate the loss and accuracy metrics using the model's `.evaluate` function.
"""
loss, accuracy = model.evaluate(valid_X, validation_labels, batch_size=64)
"""### Plot Loss and Accuracy Curves
Plot the loss (in blue) and validation loss (in green).
"""
plot_metrics("loss", "Loss")
"""Plot the training accuracy (blue) as well as the validation accuracy (green)."""
plot_metrics("accuracy", "Accuracy")
"""### Visualize predictions
You can take a look at the predictions on the validation set.
"""
probabilities = model.predict(valid_X, batch_size=64)
probabilities = np.argmax(probabilities, axis = 1)
display_images(validation_images, probabilities, validation_labels, "Bad predictions indicated in red.") | code/c3_w1_lab_2_transfer_learning_cifar_10.py | # Commented out IPython magic to ensure Python compatibility.
import os, re, time, json
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import numpy as np
try:
# %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from matplotlib import pyplot as plt
import tensorflow_datasets as tfds
print("Tensorflow version " + tf.__version__)
"""## Parameters
- Define the batch size
- Define the class (category) names
"""
BATCH_SIZE = 32
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
"""Define some functions that will help you to create some visualizations. (These will be used later)"""
#@title Visualization Utilities[RUN ME]
#Matplotlib config
plt.rc('image', cmap='gray')
plt.rc('grid', linewidth=0)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# utility to display a row of digits with their predictions
def display_images(digits, predictions, labels, title):
n = 10
indexes = np.random.choice(len(predictions), size=n)
n_digits = digits[indexes]
n_predictions = predictions[indexes]
n_predictions = n_predictions.reshape((n,))
n_labels = labels[indexes]
fig = plt.figure(figsize=(20, 4))
plt.title(title)
plt.yticks([])
plt.xticks([])
for i in range(10):
ax = fig.add_subplot(1, 10, i+1)
class_index = n_predictions[i]
plt.xlabel(classes[class_index])
plt.xticks([])
plt.yticks([])
plt.imshow(n_digits[i])
# utility to display training and validation curves
def plot_metrics(metric_name, title, ylim=5):
plt.title(title)
plt.ylim(0,ylim)
plt.plot(history.history[metric_name],color='blue',label=metric_name)
plt.plot(history.history['val_' + metric_name],color='green',label='val_' + metric_name)
"""## Loading and Preprocessing Data
[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset has 32 x 32 RGB images belonging to 10 classes. You will load the dataset from Keras.
"""
(training_images, training_labels) , (validation_images, validation_labels) = tf.keras.datasets.cifar10.load_data()
"""### Visualize Dataset
Use the `display_image` to view some of the images and their class labels.
"""
display_images(training_images, training_labels, training_labels, "Training Data" )
display_images(validation_images, validation_labels, validation_labels, "Training Data" )
"""### Preprocess Dataset
Here, you'll perform normalization on images in training and validation set.
- You'll use the function [preprocess_input](https://github.com/keras-team/keras-applications/blob/master/keras_applications/resnet50.py) from the ResNet50 model in Keras.
"""
def preprocess_image_input(input_images):
input_images = input_images.astype('float32')
output_ims = tf.keras.applications.resnet50.preprocess_input(input_images)
return output_ims
train_X = preprocess_image_input(training_images)
valid_X = preprocess_image_input(validation_images)
"""## Define the Network
You will be performing transfer learning on **ResNet50** available in Keras.
- You'll load pre-trained **imagenet weights** to the model.
- You'll choose to retain all layers of **ResNet50** along with the final classification layers.
"""
'''
Feature Extraction is performed by ResNet50 pretrained on imagenet weights.
Input size is 224 x 224.
'''
def feature_extractor(inputs):
feature_extractor = tf.keras.applications.resnet.ResNet50(input_shape=(224, 224, 3),
include_top=False,
weights='imagenet')(inputs)
return feature_extractor
'''
Defines final dense layers and subsequent softmax layer for classification.
'''
def classifier(inputs):
x = tf.keras.layers.GlobalAveragePooling2D()(inputs)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1024, activation="relu")(x)
x = tf.keras.layers.Dense(512, activation="relu")(x)
x = tf.keras.layers.Dense(10, activation="softmax", name="classification")(x)
return x
'''
Since input image size is (32 x 32), first upsample the image by factor of (7x7) to transform it to (224 x 224)
Connect the feature extraction and "classifier" layers to build the model.
'''
def final_model(inputs):
resize = tf.keras.layers.UpSampling2D(size=(7,7))(inputs)
resnet_feature_extractor = feature_extractor(resize)
classification_output = classifier(resnet_feature_extractor)
return classification_output
'''
Define the model and compile it.
Use Stochastic Gradient Descent as the optimizer.
Use Sparse Categorical CrossEntropy as the loss function.
'''
def define_compile_model():
inputs = tf.keras.layers.Input(shape=(32,32,3))
classification_output = final_model(inputs)
model = tf.keras.Model(inputs=inputs, outputs = classification_output)
model.compile(optimizer='SGD',
loss='sparse_categorical_crossentropy',
metrics = ['accuracy'])
return model
model = define_compile_model()
model.summary()
"""## Train the model"""
# this will take around 20 minutes to complete
EPOCHS = 4
history = model.fit(train_X, training_labels, epochs=EPOCHS, validation_data = (valid_X, validation_labels), batch_size=64)
"""## Evaluate the Model
Calculate the loss and accuracy metrics using the model's `.evaluate` function.
"""
loss, accuracy = model.evaluate(valid_X, validation_labels, batch_size=64)
"""### Plot Loss and Accuracy Curves
Plot the loss (in blue) and validation loss (in green).
"""
plot_metrics("loss", "Loss")
"""Plot the training accuracy (blue) as well as the validation accuracy (green)."""
plot_metrics("accuracy", "Accuracy")
"""### Visualize predictions
You can take a look at the predictions on the validation set.
"""
probabilities = model.predict(valid_X, batch_size=64)
probabilities = np.argmax(probabilities, axis = 1)
display_images(validation_images, probabilities, validation_labels, "Bad predictions indicated in red.") | 0.824356 | 0.620392 |
import itertools
import os
import sys
from contextlib import contextmanager
from multiprocessing.pool import ThreadPool
from twitter.common.collections.orderedset import OrderedSet
from twitter.pants.cache import create_artifact_cache
from twitter.pants.base.hash_utils import hash_file
from twitter.pants.base.build_invalidator import CacheKeyGenerator
from twitter.pants.reporting.reporting_utils import items_to_report_element
from twitter.pants.tasks.cache_manager import CacheManager, InvalidationCheck
class TaskError(Exception):
"""Raised to indicate a task has failed."""
class Task(object):
@classmethod
def setup_parser(cls, option_group, args, mkflag):
"""Set up the cmd-line parser.
Subclasses can add flags to the pants command line using the given option group.
Flag names should be created with mkflag([name]) to ensure flags are properly namespaced
amongst other tasks.
"""
def __init__(self, context):
self.context = context
self.dry_run = self.can_dry_run() and self.context.options.dry_run
self._cache_key_generator = CacheKeyGenerator()
self._artifact_cache = None
self._build_invalidator_dir = os.path.join(context.config.get('tasks', 'build_invalidator'),
self.product_type())
def setup_artifact_cache(self, spec):
"""Subclasses can call this in their __init__() to set up artifact caching for that task type.
spec should be a list of urls/file path prefixes, which are used in that order.
By default, no artifact caching is used.
"""
if len(spec) > 0:
pants_workdir = self.context.config.getdefault('pants_workdir')
my_name = self.__class__.__name__
self._artifact_cache = create_artifact_cache(self.context.log, pants_workdir, spec, my_name)
def product_type(self):
"""Set the product type for this task.
By default, each task is considered as creating a unique product type.
Subclasses can override this to specify a shared product type, e.g., 'classes'.
Tasks with the same product type can invalidate each other's targets, e.g., if a ScalaLibrary
depends on a JavaLibrary, a change to the JavaLibrary will invalidate the ScalaLibrary because
they both have the same product type.
"""
return self.__class__.__name__
def can_dry_run(self):
"""Subclasses can override this to indicate that they respect the --dry-run flag.
It's the subclass task's responsibility to do the right thing if this flag is set.
Note that tasks such as codegen and ivy resolution cannot dry-run, because subsequent
cache key computation will fail on missing sources/external deps.
"""
return False
def execute(self, targets):
"""Executes this task against targets, which may be a subset of the current context targets."""
raise TaskError('execute() not implemented')
def invalidate_for(self):
"""Provides extra objects that participate in invalidation.
Subclasses can override and return an object that should be checked for changes when
managing target invalidation. If the pickled form of returned object changes
between runs all targets will be invalidated.
"""
return None
def invalidate_for_files(self):
"""Provides extra files that participate in invalidation.
Subclasses can override and return a list of full paths to extra, non-source files that should
be checked for changes when managing target invalidation. This is useful for tracking
changes to pre-built build tools, e.g., the thrift compiler.
"""
return []
@contextmanager
def invalidated(self, targets, only_buildfiles=False, invalidate_dependents=False,
partition_size_hint=sys.maxint):
"""Checks targets for invalidation, first checking the artifact cache.
Subclasses call this to figure out what to work on.
targets: The targets to check for changes.
only_buildfiles: If True, then only the target's BUILD files are checked for changes, not
its sources.
invalidate_dependents: If True then any targets depending on changed targets are invalidated.
partition_size_hint: Each VersionedTargetSet in the yielded list will represent targets
containing roughly this number of source files, if possible. Set to
sys.maxint for a single VersionedTargetSet. Set to 0 for one
VersionedTargetSet per target. It is up to the caller to do the right
thing with whatever partitioning it asks for.
Yields an InvalidationCheck object reflecting the (partitioned) targets.
If no exceptions are thrown by work in the block, the build cache is updated for the targets.
Note: the artifact cache is not updated. That must be done manually.
"""
with self.context.new_workunit('invalidation'):
extra_data = []
extra_data.append(self.invalidate_for())
for f in self.invalidate_for_files():
extra_data.append(hash_file(f))
cache_manager = CacheManager(self._cache_key_generator,
self._build_invalidator_dir,
invalidate_dependents,
extra_data,
only_externaldeps=only_buildfiles)
invalidation_check = cache_manager.check(targets, partition_size_hint)
# See if we have entire partitions cached.
if invalidation_check.invalid_vts and self._artifact_cache and \
self.context.options.read_from_artifact_cache:
with self.context.new_workunit('cache'):
all_cached_targets = []
partitions_to_check = \
[vt for vt in invalidation_check.all_vts_partitioned if not vt.valid]
cached_partitions, uncached_partitions = self.check_artifact_cache(partitions_to_check)
for vt in cached_partitions:
for t in vt.targets:
all_cached_targets.append(t)
# See if we have any individual targets from the uncached partitions.
vts_to_check = [vt for vt in itertools.chain.from_iterable(
[x.versioned_targets for x in uncached_partitions]) if not vt.valid]
cached_targets, uncached_targets = self.check_artifact_cache(vts_to_check)
for vt in cached_targets:
all_cached_targets.append(vt.target)
if all_cached_targets:
# Do some reporting.
for t in all_cached_targets:
self.context.run_tracker.artifact_cache_stats.add_hit('default', t)
self._report_targets('Using cached artifacts for ', all_cached_targets, '.')
# Now that we've checked the cache, re-partition whatever is still invalid.
if uncached_targets:
for vts in uncached_targets:
self.context.run_tracker.artifact_cache_stats.add_miss('default', vts.target)
self._report_targets('No cached artifacts for ',
[vt.target for vt in uncached_targets], '.')
invalidation_check = \
InvalidationCheck(invalidation_check.all_vts, uncached_targets, partition_size_hint)
# Do some reporting.
targets = []
sources = []
num_invalid_partitions = len(invalidation_check.invalid_vts_partitioned)
for vt in invalidation_check.invalid_vts_partitioned:
targets.extend(vt.targets)
sources.extend(vt.cache_key.sources)
if len(targets):
msg_elements = ['Invalidated ',
items_to_report_element([t.address.reference() for t in targets], 'target')]
if len(sources) > 0:
msg_elements.append(' containing ')
msg_elements.append(items_to_report_element(sources, 'source file'))
if num_invalid_partitions > 1:
msg_elements.append(' in %d target partitions' % num_invalid_partitions)
msg_elements.append('.')
self.context.log.info(*msg_elements)
# Yield the result, and then mark the targets as up to date.
yield invalidation_check
if not self.dry_run:
for vt in invalidation_check.invalid_vts:
vt.update() # In case the caller doesn't update.
def check_artifact_cache(self, vts):
"""Checks the artifact cache for the specified VersionedTargetSets.
Returns a list of the ones that were satisfied from the cache. These don't require building.
"""
if not vts:
return [], []
cached_vts = []
uncached_vts = OrderedSet(vts)
with self.context.new_workunit('check'):
pool = ThreadPool(processes=6)
res = pool.map(lambda vt: self._artifact_cache.use_cached_files(vt.cache_key),
vts, chunksize=1)
pool.close()
pool.join()
for vt, was_in_cache in zip(vts, res):
if was_in_cache:
cached_vts.append(vt)
uncached_vts.discard(vt)
vt.update()
return cached_vts, list(uncached_vts)
def update_artifact_cache(self, vts_artifactfiles_pairs):
"""Write to the artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of paths to artifacts for the VersionedTargetSet.
"""
if self._artifact_cache and self.context.options.write_to_artifact_cache:
with self.context.new_workunit('cache'):
# Do some reporting.
targets = set()
for vts, artifactfiles in vts_artifactfiles_pairs:
targets.update(vts.targets)
self._report_targets('Caching artifacts for ', list(targets), '.')
with self.context.new_workunit('update'):
# Cache the artifacts.
for vts, artifactfiles in vts_artifactfiles_pairs:
if self.context.options.verify_artifact_cache:
pass # TODO: Verify that the artifact we just built is identical to the cached one.
self._artifact_cache.insert(vts.cache_key, artifactfiles)
def _report_targets(self, prefix, targets, suffix):
self.context.log.info(
prefix,
items_to_report_element([t.address.reference() for t in targets], 'target'),
suffix)
__all__ = (
'TaskError',
'Task'
) | src/python/twitter/pants/tasks/__init__.py |
import itertools
import os
import sys
from contextlib import contextmanager
from multiprocessing.pool import ThreadPool
from twitter.common.collections.orderedset import OrderedSet
from twitter.pants.cache import create_artifact_cache
from twitter.pants.base.hash_utils import hash_file
from twitter.pants.base.build_invalidator import CacheKeyGenerator
from twitter.pants.reporting.reporting_utils import items_to_report_element
from twitter.pants.tasks.cache_manager import CacheManager, InvalidationCheck
class TaskError(Exception):
"""Raised to indicate a task has failed."""
class Task(object):
@classmethod
def setup_parser(cls, option_group, args, mkflag):
"""Set up the cmd-line parser.
Subclasses can add flags to the pants command line using the given option group.
Flag names should be created with mkflag([name]) to ensure flags are properly namespaced
amongst other tasks.
"""
def __init__(self, context):
self.context = context
self.dry_run = self.can_dry_run() and self.context.options.dry_run
self._cache_key_generator = CacheKeyGenerator()
self._artifact_cache = None
self._build_invalidator_dir = os.path.join(context.config.get('tasks', 'build_invalidator'),
self.product_type())
def setup_artifact_cache(self, spec):
"""Subclasses can call this in their __init__() to set up artifact caching for that task type.
spec should be a list of urls/file path prefixes, which are used in that order.
By default, no artifact caching is used.
"""
if len(spec) > 0:
pants_workdir = self.context.config.getdefault('pants_workdir')
my_name = self.__class__.__name__
self._artifact_cache = create_artifact_cache(self.context.log, pants_workdir, spec, my_name)
def product_type(self):
"""Set the product type for this task.
By default, each task is considered as creating a unique product type.
Subclasses can override this to specify a shared product type, e.g., 'classes'.
Tasks with the same product type can invalidate each other's targets, e.g., if a ScalaLibrary
depends on a JavaLibrary, a change to the JavaLibrary will invalidate the ScalaLibrary because
they both have the same product type.
"""
return self.__class__.__name__
def can_dry_run(self):
"""Subclasses can override this to indicate that they respect the --dry-run flag.
It's the subclass task's responsibility to do the right thing if this flag is set.
Note that tasks such as codegen and ivy resolution cannot dry-run, because subsequent
cache key computation will fail on missing sources/external deps.
"""
return False
def execute(self, targets):
"""Executes this task against targets, which may be a subset of the current context targets."""
raise TaskError('execute() not implemented')
def invalidate_for(self):
"""Provides extra objects that participate in invalidation.
Subclasses can override and return an object that should be checked for changes when
managing target invalidation. If the pickled form of returned object changes
between runs all targets will be invalidated.
"""
return None
def invalidate_for_files(self):
"""Provides extra files that participate in invalidation.
Subclasses can override and return a list of full paths to extra, non-source files that should
be checked for changes when managing target invalidation. This is useful for tracking
changes to pre-built build tools, e.g., the thrift compiler.
"""
return []
@contextmanager
def invalidated(self, targets, only_buildfiles=False, invalidate_dependents=False,
partition_size_hint=sys.maxint):
"""Checks targets for invalidation, first checking the artifact cache.
Subclasses call this to figure out what to work on.
targets: The targets to check for changes.
only_buildfiles: If True, then only the target's BUILD files are checked for changes, not
its sources.
invalidate_dependents: If True then any targets depending on changed targets are invalidated.
partition_size_hint: Each VersionedTargetSet in the yielded list will represent targets
containing roughly this number of source files, if possible. Set to
sys.maxint for a single VersionedTargetSet. Set to 0 for one
VersionedTargetSet per target. It is up to the caller to do the right
thing with whatever partitioning it asks for.
Yields an InvalidationCheck object reflecting the (partitioned) targets.
If no exceptions are thrown by work in the block, the build cache is updated for the targets.
Note: the artifact cache is not updated. That must be done manually.
"""
with self.context.new_workunit('invalidation'):
extra_data = []
extra_data.append(self.invalidate_for())
for f in self.invalidate_for_files():
extra_data.append(hash_file(f))
cache_manager = CacheManager(self._cache_key_generator,
self._build_invalidator_dir,
invalidate_dependents,
extra_data,
only_externaldeps=only_buildfiles)
invalidation_check = cache_manager.check(targets, partition_size_hint)
# See if we have entire partitions cached.
if invalidation_check.invalid_vts and self._artifact_cache and \
self.context.options.read_from_artifact_cache:
with self.context.new_workunit('cache'):
all_cached_targets = []
partitions_to_check = \
[vt for vt in invalidation_check.all_vts_partitioned if not vt.valid]
cached_partitions, uncached_partitions = self.check_artifact_cache(partitions_to_check)
for vt in cached_partitions:
for t in vt.targets:
all_cached_targets.append(t)
# See if we have any individual targets from the uncached partitions.
vts_to_check = [vt for vt in itertools.chain.from_iterable(
[x.versioned_targets for x in uncached_partitions]) if not vt.valid]
cached_targets, uncached_targets = self.check_artifact_cache(vts_to_check)
for vt in cached_targets:
all_cached_targets.append(vt.target)
if all_cached_targets:
# Do some reporting.
for t in all_cached_targets:
self.context.run_tracker.artifact_cache_stats.add_hit('default', t)
self._report_targets('Using cached artifacts for ', all_cached_targets, '.')
# Now that we've checked the cache, re-partition whatever is still invalid.
if uncached_targets:
for vts in uncached_targets:
self.context.run_tracker.artifact_cache_stats.add_miss('default', vts.target)
self._report_targets('No cached artifacts for ',
[vt.target for vt in uncached_targets], '.')
invalidation_check = \
InvalidationCheck(invalidation_check.all_vts, uncached_targets, partition_size_hint)
# Do some reporting.
targets = []
sources = []
num_invalid_partitions = len(invalidation_check.invalid_vts_partitioned)
for vt in invalidation_check.invalid_vts_partitioned:
targets.extend(vt.targets)
sources.extend(vt.cache_key.sources)
if len(targets):
msg_elements = ['Invalidated ',
items_to_report_element([t.address.reference() for t in targets], 'target')]
if len(sources) > 0:
msg_elements.append(' containing ')
msg_elements.append(items_to_report_element(sources, 'source file'))
if num_invalid_partitions > 1:
msg_elements.append(' in %d target partitions' % num_invalid_partitions)
msg_elements.append('.')
self.context.log.info(*msg_elements)
# Yield the result, and then mark the targets as up to date.
yield invalidation_check
if not self.dry_run:
for vt in invalidation_check.invalid_vts:
vt.update() # In case the caller doesn't update.
def check_artifact_cache(self, vts):
"""Checks the artifact cache for the specified VersionedTargetSets.
Returns a list of the ones that were satisfied from the cache. These don't require building.
"""
if not vts:
return [], []
cached_vts = []
uncached_vts = OrderedSet(vts)
with self.context.new_workunit('check'):
pool = ThreadPool(processes=6)
res = pool.map(lambda vt: self._artifact_cache.use_cached_files(vt.cache_key),
vts, chunksize=1)
pool.close()
pool.join()
for vt, was_in_cache in zip(vts, res):
if was_in_cache:
cached_vts.append(vt)
uncached_vts.discard(vt)
vt.update()
return cached_vts, list(uncached_vts)
def update_artifact_cache(self, vts_artifactfiles_pairs):
"""Write to the artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of paths to artifacts for the VersionedTargetSet.
"""
if self._artifact_cache and self.context.options.write_to_artifact_cache:
with self.context.new_workunit('cache'):
# Do some reporting.
targets = set()
for vts, artifactfiles in vts_artifactfiles_pairs:
targets.update(vts.targets)
self._report_targets('Caching artifacts for ', list(targets), '.')
with self.context.new_workunit('update'):
# Cache the artifacts.
for vts, artifactfiles in vts_artifactfiles_pairs:
if self.context.options.verify_artifact_cache:
pass # TODO: Verify that the artifact we just built is identical to the cached one.
self._artifact_cache.insert(vts.cache_key, artifactfiles)
def _report_targets(self, prefix, targets, suffix):
self.context.log.info(
prefix,
items_to_report_element([t.address.reference() for t in targets], 'target'),
suffix)
__all__ = (
'TaskError',
'Task'
) | 0.555676 | 0.232811 |
import sys
sys.path.append('src/main/classes')
import administrador
import cafeicultor
import unittest
from unittest import TestCase
import pymongo
sys.path.append('src/main/entidades')
import mediador
import bancoDeDados
class AdministradorTest(TestCase):
@classmethod
def setUpClass(cls):
#Configuração do BD:
cls.cliente = pymongo.MongoClient("mongodb+srv://admin:armazemMS@clusterc214.wv3t7.mongodb.net/ArmazemMS?retryWrites=true&w=majority")
cls.db = cls.cliente["ArmazemMS"]
cls.colecao = "Teste_Usuarios"
cls.db.Teste_Usuarios.delete_many({}) #limpar a coleção antes de fazer os testes
#Objetos:
cls.a = administrador.Administrador("Admin","<EMAIL>","Admin#2020")
cls.c = cafeicultor.Cafeicultor("Joao","<EMAIL>","teste123","3534-9965","15923678941","Itamogi","Sítio A","Banco do Brasil","8218-X","895-9")
cls.b = bancoDeDados.BancoDeDados()
def test_buscarCafeicultorBdVazio(self):
#Buscar cafeicultor/Resposta:
m = mediador.MediadorDoAdministrador(self.colecao,self.b)
resposta = self.a.buscarCafeicultores(m)
#Valor esperado:
esperado = '<table class="table" id="tabela"><thead><tr><th scope="col">#</th><th scope="col">Cafeicultor</th><th scope="col">Telefone</th><th scope="col"></th><th scope="col"></th></tr></thead><tbody></tbody></table>'
#Comparação:
self.assertEqual(esperado,resposta)
def test_buscarCafeicultor(self):
#Cadastrar cafeicultor:
m = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c)
self.a.cadastrarCafeicultor(m)
#Buscar cafeicultor/Resposta:
m1 = mediador.MediadorDoAdministrador(self.colecao,self.b)
resposta = self.a.buscarCafeicultores(m1)
#Valor esperado:
esperado='<table class="table" id="tabela"><thead><tr><th scope="col">#</th><th scope="col">Cafeicultor</th><th scope="col">Telefone</th><th scope="col"></th><th scope="col"></th></tr></thead><tbody><tr><th scope="row">0</th><td class="nome">Joao</td><td >3534-9965</td><td><button type="button" class="btn btn-primary" id="verCafeicultor" onclick="verCafeicultor(0)">Ver</button></td><td><button type="button" class="btn btn-primary" id="editarCafeicultor" onclick="editarCafeicultor(0)">Editar</button></td> </tr></tbody></table>'
#Comparação:
self.assertEqual(esperado,resposta)
#Excluir cafeicultor:
m2 = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c,indice=0)
self.c.excluirCafe(m2)
def test_cadastrarCafeicultor(self):
#Cadastrar cafeicultor:
m = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c)
self.a.cadastrarCafeicultor(m)
#Obter cafeicultor/Resposta:
m2 = mediador.MediadorDoAdministrador(self.colecao,self.b,indice=0)
resposta = self.a.getCafeicultor(m2)
#Comparações:
self.assertEqual("Joao",resposta.nomeGet())
self.assertEqual("<EMAIL>",resposta.loginGet())
self.assertEqual("teste123",resposta.senhaGet())
self.assertEqual("3534-9965",resposta.telefoneGet())
self.assertEqual("15923678941",resposta.cpfGet())
self.assertEqual("Itamogi",resposta.cidadeGet())
self.assertEqual("Sítio A",resposta.enderecoGet())
self.assertEqual("Banco do Brasil",resposta.bancoGet())
self.assertEqual("8218-X",resposta.agenciaGet())
self.assertEqual("895-9",resposta.contaGet())
#Excluir cafeicultor:
m3 = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c,indice=0)
self.c.excluirCafe(m3)
def test_editarCafeicultor(self):
#Cadastrar cafeicultor:
m = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c)
self.a.cadastrarCafeicultor(m)
#Novo cafeicultor:
self.cafeicultorAlterado = cafeicultor.Cafeicultor("<NAME>","<EMAIL>","teste123","3534-9965","15923678941","<NAME>","Sítio A","Bradesco","8156-0","1234-9")
#Editar cafeicultor:
m2= mediador.MediadorDoAdministrador(self.colecao,self.b,self.cafeicultorAlterado,indice=0)
self.a.editarCafeicultor(m2)
#Obter cafeicultor/Resposta:
m3 = mediador.MediadorDoAdministrador(self.colecao,self.b,indice=0)
resposta = self.a.getCafeicultor(m3)
#Comparações:
self.assertEqual("<NAME>",resposta.nomeGet())
self.assertEqual("<EMAIL>",resposta.loginGet())
self.assertEqual("teste123",resposta.senhaGet())
self.assertEqual("3534-9965",resposta.telefoneGet())
self.assertEqual("15923678941",resposta.cpfGet())
self.assertEqual("Pouso Alegre",resposta.cidadeGet())
self.assertEqual("Sítio A",resposta.enderecoGet())
self.assertEqual("Bradesco",resposta.bancoGet())
self.assertEqual("8156-0",resposta.agenciaGet())
self.assertEqual("1234-9",resposta.contaGet())
#Excluir cafeicultor:
m5 = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c,indice=0)
self.a.excluirCafeicultor(m5)
def test_excluirCafeicultor(self):
#Cadastrar cafeicultor:
m = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c)
self.a.cadastrarCafeicultor(m)
#Excluir cafeicultor:
m2 = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c,indice=0)
self.a.excluirCafeicultor(m2)
#Obter cafeicultor:
m3 = mediador.MediadorDoAdministrador(self.colecao,self.b,indice=0)
with self.assertRaises(IndexError):
self.a.getCafeicultor(m3)
if __name__ == "__main__":
unittest.main() | src/unittest/classes/testAdministrador.py | import sys
sys.path.append('src/main/classes')
import administrador
import cafeicultor
import unittest
from unittest import TestCase
import pymongo
sys.path.append('src/main/entidades')
import mediador
import bancoDeDados
class AdministradorTest(TestCase):
@classmethod
def setUpClass(cls):
#Configuração do BD:
cls.cliente = pymongo.MongoClient("mongodb+srv://admin:armazemMS@clusterc214.wv3t7.mongodb.net/ArmazemMS?retryWrites=true&w=majority")
cls.db = cls.cliente["ArmazemMS"]
cls.colecao = "Teste_Usuarios"
cls.db.Teste_Usuarios.delete_many({}) #limpar a coleção antes de fazer os testes
#Objetos:
cls.a = administrador.Administrador("Admin","<EMAIL>","Admin#2020")
cls.c = cafeicultor.Cafeicultor("Joao","<EMAIL>","teste123","3534-9965","15923678941","Itamogi","Sítio A","Banco do Brasil","8218-X","895-9")
cls.b = bancoDeDados.BancoDeDados()
def test_buscarCafeicultorBdVazio(self):
#Buscar cafeicultor/Resposta:
m = mediador.MediadorDoAdministrador(self.colecao,self.b)
resposta = self.a.buscarCafeicultores(m)
#Valor esperado:
esperado = '<table class="table" id="tabela"><thead><tr><th scope="col">#</th><th scope="col">Cafeicultor</th><th scope="col">Telefone</th><th scope="col"></th><th scope="col"></th></tr></thead><tbody></tbody></table>'
#Comparação:
self.assertEqual(esperado,resposta)
def test_buscarCafeicultor(self):
#Cadastrar cafeicultor:
m = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c)
self.a.cadastrarCafeicultor(m)
#Buscar cafeicultor/Resposta:
m1 = mediador.MediadorDoAdministrador(self.colecao,self.b)
resposta = self.a.buscarCafeicultores(m1)
#Valor esperado:
esperado='<table class="table" id="tabela"><thead><tr><th scope="col">#</th><th scope="col">Cafeicultor</th><th scope="col">Telefone</th><th scope="col"></th><th scope="col"></th></tr></thead><tbody><tr><th scope="row">0</th><td class="nome">Joao</td><td >3534-9965</td><td><button type="button" class="btn btn-primary" id="verCafeicultor" onclick="verCafeicultor(0)">Ver</button></td><td><button type="button" class="btn btn-primary" id="editarCafeicultor" onclick="editarCafeicultor(0)">Editar</button></td> </tr></tbody></table>'
#Comparação:
self.assertEqual(esperado,resposta)
#Excluir cafeicultor:
m2 = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c,indice=0)
self.c.excluirCafe(m2)
def test_cadastrarCafeicultor(self):
#Cadastrar cafeicultor:
m = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c)
self.a.cadastrarCafeicultor(m)
#Obter cafeicultor/Resposta:
m2 = mediador.MediadorDoAdministrador(self.colecao,self.b,indice=0)
resposta = self.a.getCafeicultor(m2)
#Comparações:
self.assertEqual("Joao",resposta.nomeGet())
self.assertEqual("<EMAIL>",resposta.loginGet())
self.assertEqual("teste123",resposta.senhaGet())
self.assertEqual("3534-9965",resposta.telefoneGet())
self.assertEqual("15923678941",resposta.cpfGet())
self.assertEqual("Itamogi",resposta.cidadeGet())
self.assertEqual("Sítio A",resposta.enderecoGet())
self.assertEqual("Banco do Brasil",resposta.bancoGet())
self.assertEqual("8218-X",resposta.agenciaGet())
self.assertEqual("895-9",resposta.contaGet())
#Excluir cafeicultor:
m3 = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c,indice=0)
self.c.excluirCafe(m3)
def test_editarCafeicultor(self):
#Cadastrar cafeicultor:
m = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c)
self.a.cadastrarCafeicultor(m)
#Novo cafeicultor:
self.cafeicultorAlterado = cafeicultor.Cafeicultor("<NAME>","<EMAIL>","teste123","3534-9965","15923678941","<NAME>","Sítio A","Bradesco","8156-0","1234-9")
#Editar cafeicultor:
m2= mediador.MediadorDoAdministrador(self.colecao,self.b,self.cafeicultorAlterado,indice=0)
self.a.editarCafeicultor(m2)
#Obter cafeicultor/Resposta:
m3 = mediador.MediadorDoAdministrador(self.colecao,self.b,indice=0)
resposta = self.a.getCafeicultor(m3)
#Comparações:
self.assertEqual("<NAME>",resposta.nomeGet())
self.assertEqual("<EMAIL>",resposta.loginGet())
self.assertEqual("teste123",resposta.senhaGet())
self.assertEqual("3534-9965",resposta.telefoneGet())
self.assertEqual("15923678941",resposta.cpfGet())
self.assertEqual("Pouso Alegre",resposta.cidadeGet())
self.assertEqual("Sítio A",resposta.enderecoGet())
self.assertEqual("Bradesco",resposta.bancoGet())
self.assertEqual("8156-0",resposta.agenciaGet())
self.assertEqual("1234-9",resposta.contaGet())
#Excluir cafeicultor:
m5 = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c,indice=0)
self.a.excluirCafeicultor(m5)
def test_excluirCafeicultor(self):
#Cadastrar cafeicultor:
m = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c)
self.a.cadastrarCafeicultor(m)
#Excluir cafeicultor:
m2 = mediador.MediadorDoAdministrador(self.colecao,self.b,self.c,indice=0)
self.a.excluirCafeicultor(m2)
#Obter cafeicultor:
m3 = mediador.MediadorDoAdministrador(self.colecao,self.b,indice=0)
with self.assertRaises(IndexError):
self.a.getCafeicultor(m3)
if __name__ == "__main__":
unittest.main() | 0.184804 | 0.185559 |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.opt import Opt
from parlai.utils.misc import Timer, round_sigfigs, set_namedtuple_defaults, nice_report
import parlai.utils.strings as string_utils
from copy import deepcopy
import random
import time
import unittest
from parlai.utils.data import DatatypeHelper
class TestUtils(unittest.TestCase):
def test_report_render(self):
"""
Test rendering of nice reports.
"""
report_s = nice_report({'foo': 3})
assert "foo" in report_s
assert "3" in report_s
assert nice_report({}) == ""
def test_round_sigfigs(self):
x = 0
y = 0
assert round_sigfigs(x, 2) == y
x = 100
y = 100
assert round_sigfigs(x, 2) == y
x = 0.01
y = 0.01
assert round_sigfigs(x, 2) == y
x = 0.00123
y = 0.001
assert round_sigfigs(x, 1) == y
x = 0.37
y = 0.4
assert round_sigfigs(x, 1) == y
x = 2353
y = 2350
assert round_sigfigs(x, 3) == y
x = 3547345734
y = 3547350000
assert round_sigfigs(x, 6) == y
x = 0.0000046246
y = 0.00000462
assert round_sigfigs(x, 3) == y
def test_timer(self):
t = Timer()
time.sleep(1e-6)
elapsed = t.stop().time()
assert elapsed > 0
same = t.time()
assert elapsed == same
t.resume()
time.sleep(1e-6)
more = t.time()
assert more > elapsed
rabbit = Timer()
time.sleep(1e-6)
turtle = Timer()
time.sleep(1e-6)
assert turtle.time() > 0
assert turtle.time() < rabbit.time()
def test_setnamedtupledefaults(self):
from collections import namedtuple
NT = namedtuple("NT", ("a", "b", "c"))
# Shouldn't be able to construct a namedtuple without providing info
try:
NT()
self.fail("Shouldn't be able to construct namedtuple")
except TypeError:
pass
# Test setting default value
set_namedtuple_defaults(NT)
nt = NT()
assert nt.a is None
assert nt.b is None
assert nt.c is None
# Test setting it with something else
set_namedtuple_defaults(NT, default=1)
nt = NT()
assert nt.a == 1
assert nt.b == 1
assert nt.c == 1
def test_opt(self):
opt = {'x': 0}
opt = Opt(opt)
opt['x'] += 1
opt['x'] = 10
self.assertEqual(opt.history[0][0], 'x', 'History not set properly')
self.assertEqual(opt.history[0][1], 1, 'History not set properly')
self.assertEqual(opt.history[1][0], 'x', 'History not set properly')
self.assertEqual(opt.history[1][1], 10, 'History not set properly')
opt_copy = deepcopy(opt)
self.assertEqual(opt_copy.history[0][1], 1, 'Deepcopy history not set properly')
self.assertEqual(
opt_copy.history[1][1], 10, 'Deepcopy history not set properly'
)
class TestStrings(unittest.TestCase):
def test_normalize_reply_version1(self):
assert string_utils.normalize_reply("I ' ve a cat .") == "I've a cat."
assert (
string_utils.normalize_reply("do you think i can dance?")
== "Do you think I can dance?"
)
assert string_utils.normalize_reply("I ' m silly '") == "I'm silly'"
def test_normalize_reply_version2(self):
assert string_utils.normalize_reply("Add a period", 2) == "Add a period."
assert string_utils.normalize_reply("Add a period?", 2) == "Add a period?"
assert string_utils.normalize_reply("Add a period!", 2) == "Add a period!"
assert string_utils.normalize_reply('"Add a period"', 2) == '"add a period"'
def test_uppercase(self):
assert string_utils.uppercase("this is a test") == "This is a test"
assert string_utils.uppercase("tEst") == "TEst"
class TestDatatypeHelper(unittest.TestCase):
def test_fold(self):
assert DatatypeHelper.fold("train") == "train"
assert DatatypeHelper.fold("train:ordered") == "train"
assert DatatypeHelper.fold("train:stream") == "train"
assert DatatypeHelper.fold("train:stream:ordered") == "train"
assert DatatypeHelper.fold("train:evalmode") == "train"
assert DatatypeHelper.fold("train:stream:evalmode") == "train"
assert DatatypeHelper.fold("valid") == "valid"
assert DatatypeHelper.fold("valid:stream") == "valid"
assert DatatypeHelper.fold("test") == "test"
assert DatatypeHelper.fold("test:stream") == "test"
def test_should_cycle(self):
assert DatatypeHelper.should_cycle("train") is True
assert DatatypeHelper.should_cycle("train:evalmode") is False
assert DatatypeHelper.should_cycle("train:ordered") is False
assert DatatypeHelper.should_cycle("train:stream") is True
assert DatatypeHelper.should_cycle("valid") is False
assert DatatypeHelper.should_cycle("valid:stream") is False
assert DatatypeHelper.should_cycle("test") is False
assert DatatypeHelper.should_cycle("test:stream") is False
def test_should_shuffle(self):
assert DatatypeHelper.should_shuffle("train") is True
assert DatatypeHelper.should_shuffle("train:evalmode") is False
assert DatatypeHelper.should_shuffle("train:ordered") is False
assert DatatypeHelper.should_shuffle("train:stream") is False
assert DatatypeHelper.should_shuffle("valid") is False
assert DatatypeHelper.should_shuffle("valid:stream") is False
assert DatatypeHelper.should_shuffle("test") is False
assert DatatypeHelper.should_shuffle("test:stream") is False
def test_is_training(self):
assert DatatypeHelper.is_training("train") is True
assert DatatypeHelper.is_training("train:evalmode") is False
assert DatatypeHelper.is_training("train:ordered") is True
assert DatatypeHelper.is_training("train:stream") is True
assert DatatypeHelper.is_training("valid") is False
assert DatatypeHelper.is_training("valid:stream") is False
assert DatatypeHelper.is_training("test") is False
assert DatatypeHelper.is_training("test:stream") is False
def test_split_domains_by_fold(self):
TOTAL_LEN = random.randint(100, 200)
a_end = random.randrange(1, TOTAL_LEN)
b_end = random.randrange(a_end, TOTAL_LEN)
DOMAIN_A = [i for i in range(0, a_end)]
DOMAIN_B = [i for i in range(a_end, b_end)]
DOMAIN_C = [i for i in range(b_end, TOTAL_LEN)]
DOMAINS_A = [deepcopy(DOMAIN_A)]
DOMAINS_A_B = [deepcopy(DOMAIN_A), deepcopy(DOMAIN_B)]
DOMAINS_C_B_A = [deepcopy(DOMAIN_C), deepcopy(DOMAIN_B), deepcopy(DOMAIN_A)]
train_frac = random.uniform(0, 1)
valid_frac = random.uniform(0, 1 - train_frac)
test_frac = 1 - train_frac - valid_frac
TRAIN_A = DatatypeHelper.split_domains_by_fold(
"train", DOMAINS_A, train_frac, valid_frac, test_frac
)
TRAIN_A_B = DatatypeHelper.split_domains_by_fold(
"train", DOMAINS_A_B, train_frac, valid_frac, test_frac
)
TRAIN_C_B_A = DatatypeHelper.split_domains_by_fold(
"train", deepcopy(DOMAINS_C_B_A), train_frac, valid_frac, test_frac
)
# Check to make sure selected values for a fold within a domain are consistent even if different domains are used, and presented in different orders
for val in DOMAIN_A:
state = bool(val in TRAIN_A)
assert bool(val in TRAIN_A_B) == state
assert bool(val in TRAIN_C_B_A) == state
for val in DOMAIN_B:
state = bool(val in TRAIN_A_B)
assert bool(val in TRAIN_C_B_A) == state
# Check that train + valid + test covers everything
VALID_C_B_A = DatatypeHelper.split_domains_by_fold(
"valid", deepcopy(DOMAINS_C_B_A), train_frac, valid_frac, test_frac
)
TEST_C_B_A = DatatypeHelper.split_domains_by_fold(
"test", deepcopy(DOMAINS_C_B_A), train_frac, valid_frac, test_frac
)
assert len(TRAIN_C_B_A) + len(VALID_C_B_A) + len(TEST_C_B_A) is TOTAL_LEN
assert len(set(TRAIN_C_B_A + VALID_C_B_A + TEST_C_B_A)) is TOTAL_LEN
if __name__ == '__main__':
unittest.main() | tests/test_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.opt import Opt
from parlai.utils.misc import Timer, round_sigfigs, set_namedtuple_defaults, nice_report
import parlai.utils.strings as string_utils
from copy import deepcopy
import random
import time
import unittest
from parlai.utils.data import DatatypeHelper
class TestUtils(unittest.TestCase):
def test_report_render(self):
"""
Test rendering of nice reports.
"""
report_s = nice_report({'foo': 3})
assert "foo" in report_s
assert "3" in report_s
assert nice_report({}) == ""
def test_round_sigfigs(self):
x = 0
y = 0
assert round_sigfigs(x, 2) == y
x = 100
y = 100
assert round_sigfigs(x, 2) == y
x = 0.01
y = 0.01
assert round_sigfigs(x, 2) == y
x = 0.00123
y = 0.001
assert round_sigfigs(x, 1) == y
x = 0.37
y = 0.4
assert round_sigfigs(x, 1) == y
x = 2353
y = 2350
assert round_sigfigs(x, 3) == y
x = 3547345734
y = 3547350000
assert round_sigfigs(x, 6) == y
x = 0.0000046246
y = 0.00000462
assert round_sigfigs(x, 3) == y
def test_timer(self):
t = Timer()
time.sleep(1e-6)
elapsed = t.stop().time()
assert elapsed > 0
same = t.time()
assert elapsed == same
t.resume()
time.sleep(1e-6)
more = t.time()
assert more > elapsed
rabbit = Timer()
time.sleep(1e-6)
turtle = Timer()
time.sleep(1e-6)
assert turtle.time() > 0
assert turtle.time() < rabbit.time()
def test_setnamedtupledefaults(self):
from collections import namedtuple
NT = namedtuple("NT", ("a", "b", "c"))
# Shouldn't be able to construct a namedtuple without providing info
try:
NT()
self.fail("Shouldn't be able to construct namedtuple")
except TypeError:
pass
# Test setting default value
set_namedtuple_defaults(NT)
nt = NT()
assert nt.a is None
assert nt.b is None
assert nt.c is None
# Test setting it with something else
set_namedtuple_defaults(NT, default=1)
nt = NT()
assert nt.a == 1
assert nt.b == 1
assert nt.c == 1
def test_opt(self):
opt = {'x': 0}
opt = Opt(opt)
opt['x'] += 1
opt['x'] = 10
self.assertEqual(opt.history[0][0], 'x', 'History not set properly')
self.assertEqual(opt.history[0][1], 1, 'History not set properly')
self.assertEqual(opt.history[1][0], 'x', 'History not set properly')
self.assertEqual(opt.history[1][1], 10, 'History not set properly')
opt_copy = deepcopy(opt)
self.assertEqual(opt_copy.history[0][1], 1, 'Deepcopy history not set properly')
self.assertEqual(
opt_copy.history[1][1], 10, 'Deepcopy history not set properly'
)
class TestStrings(unittest.TestCase):
def test_normalize_reply_version1(self):
assert string_utils.normalize_reply("I ' ve a cat .") == "I've a cat."
assert (
string_utils.normalize_reply("do you think i can dance?")
== "Do you think I can dance?"
)
assert string_utils.normalize_reply("I ' m silly '") == "I'm silly'"
def test_normalize_reply_version2(self):
assert string_utils.normalize_reply("Add a period", 2) == "Add a period."
assert string_utils.normalize_reply("Add a period?", 2) == "Add a period?"
assert string_utils.normalize_reply("Add a period!", 2) == "Add a period!"
assert string_utils.normalize_reply('"Add a period"', 2) == '"add a period"'
def test_uppercase(self):
assert string_utils.uppercase("this is a test") == "This is a test"
assert string_utils.uppercase("tEst") == "TEst"
class TestDatatypeHelper(unittest.TestCase):
def test_fold(self):
assert DatatypeHelper.fold("train") == "train"
assert DatatypeHelper.fold("train:ordered") == "train"
assert DatatypeHelper.fold("train:stream") == "train"
assert DatatypeHelper.fold("train:stream:ordered") == "train"
assert DatatypeHelper.fold("train:evalmode") == "train"
assert DatatypeHelper.fold("train:stream:evalmode") == "train"
assert DatatypeHelper.fold("valid") == "valid"
assert DatatypeHelper.fold("valid:stream") == "valid"
assert DatatypeHelper.fold("test") == "test"
assert DatatypeHelper.fold("test:stream") == "test"
def test_should_cycle(self):
assert DatatypeHelper.should_cycle("train") is True
assert DatatypeHelper.should_cycle("train:evalmode") is False
assert DatatypeHelper.should_cycle("train:ordered") is False
assert DatatypeHelper.should_cycle("train:stream") is True
assert DatatypeHelper.should_cycle("valid") is False
assert DatatypeHelper.should_cycle("valid:stream") is False
assert DatatypeHelper.should_cycle("test") is False
assert DatatypeHelper.should_cycle("test:stream") is False
def test_should_shuffle(self):
assert DatatypeHelper.should_shuffle("train") is True
assert DatatypeHelper.should_shuffle("train:evalmode") is False
assert DatatypeHelper.should_shuffle("train:ordered") is False
assert DatatypeHelper.should_shuffle("train:stream") is False
assert DatatypeHelper.should_shuffle("valid") is False
assert DatatypeHelper.should_shuffle("valid:stream") is False
assert DatatypeHelper.should_shuffle("test") is False
assert DatatypeHelper.should_shuffle("test:stream") is False
def test_is_training(self):
assert DatatypeHelper.is_training("train") is True
assert DatatypeHelper.is_training("train:evalmode") is False
assert DatatypeHelper.is_training("train:ordered") is True
assert DatatypeHelper.is_training("train:stream") is True
assert DatatypeHelper.is_training("valid") is False
assert DatatypeHelper.is_training("valid:stream") is False
assert DatatypeHelper.is_training("test") is False
assert DatatypeHelper.is_training("test:stream") is False
def test_split_domains_by_fold(self):
TOTAL_LEN = random.randint(100, 200)
a_end = random.randrange(1, TOTAL_LEN)
b_end = random.randrange(a_end, TOTAL_LEN)
DOMAIN_A = [i for i in range(0, a_end)]
DOMAIN_B = [i for i in range(a_end, b_end)]
DOMAIN_C = [i for i in range(b_end, TOTAL_LEN)]
DOMAINS_A = [deepcopy(DOMAIN_A)]
DOMAINS_A_B = [deepcopy(DOMAIN_A), deepcopy(DOMAIN_B)]
DOMAINS_C_B_A = [deepcopy(DOMAIN_C), deepcopy(DOMAIN_B), deepcopy(DOMAIN_A)]
train_frac = random.uniform(0, 1)
valid_frac = random.uniform(0, 1 - train_frac)
test_frac = 1 - train_frac - valid_frac
TRAIN_A = DatatypeHelper.split_domains_by_fold(
"train", DOMAINS_A, train_frac, valid_frac, test_frac
)
TRAIN_A_B = DatatypeHelper.split_domains_by_fold(
"train", DOMAINS_A_B, train_frac, valid_frac, test_frac
)
TRAIN_C_B_A = DatatypeHelper.split_domains_by_fold(
"train", deepcopy(DOMAINS_C_B_A), train_frac, valid_frac, test_frac
)
# Check to make sure selected values for a fold within a domain are consistent even if different domains are used, and presented in different orders
for val in DOMAIN_A:
state = bool(val in TRAIN_A)
assert bool(val in TRAIN_A_B) == state
assert bool(val in TRAIN_C_B_A) == state
for val in DOMAIN_B:
state = bool(val in TRAIN_A_B)
assert bool(val in TRAIN_C_B_A) == state
# Check that train + valid + test covers everything
VALID_C_B_A = DatatypeHelper.split_domains_by_fold(
"valid", deepcopy(DOMAINS_C_B_A), train_frac, valid_frac, test_frac
)
TEST_C_B_A = DatatypeHelper.split_domains_by_fold(
"test", deepcopy(DOMAINS_C_B_A), train_frac, valid_frac, test_frac
)
assert len(TRAIN_C_B_A) + len(VALID_C_B_A) + len(TEST_C_B_A) is TOTAL_LEN
assert len(set(TRAIN_C_B_A + VALID_C_B_A + TEST_C_B_A)) is TOTAL_LEN
if __name__ == '__main__':
unittest.main() | 0.843154 | 0.549278 |
import functools
from typing import Callable, Any
from telegram import Update, Message, InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery
from telegram.ext import (
Dispatcher,
ConversationHandler,
CommandHandler,
MessageHandler,
Filters,
CallbackQueryHandler,
CallbackContext,
)
from app import db, bot, app
from app.contants import DEFAULT_GREETING, HOST, DEFAULT_GROUP
from app.enum import Menu
from app.models import Greeting, Post, City, Position, Subscription, utc_now, UserChat
from app.utils import update_list_page, get_cities_keyboard, get_positions_keyboard, AnyHandler, get_largest_photo, \
MenuStringHandler
HandlerFunction = Callable[[Update, CallbackContext], Any]
SET_GREETING = 'greeting'
CREATE_JOB = 'create_job'
SEND_PHOTO = 'send_photo'
def _empty_callback(update: Update, context: CallbackContext):
update.callback_query.answer()
def admin_required(handler: HandlerFunction) -> HandlerFunction:
""" Decorator for protecting admin handlers from unnecessary access """
@functools.wraps(handler)
def wrapper(update: Update, context: CallbackContext):
message: Message = update.message or update.callback_query.message
chat_id = message.chat_id
chat = UserChat.query.get(chat_id)
if not chat.is_admin:
app.logger.info('Access denied to admin handler')
return
return handler(update, context)
return wrapper
def send_post(post: Post):
query = db.session.query(Subscription).distinct(Subscription.chat_id)
if post.city_id is not None:
query = query.filter(Subscription.city_id == post.city_id)
if post.position_id is not None:
query = query.filter(Subscription.position_id == post.position_id)
items = query.all()
for subscription in items:
if post.image_id:
bot.send_photo(
chat_id=subscription.chat_id,
photo=post.image_id,
caption=post.text,
parse_mode="Markdown",
disable_web_page_preview=False,
)
else:
bot.send_message(
chat_id=subscription.chat_id,
text=post.text,
parse_mode='Markdown',
disable_web_page_preview=False,
)
post.date_sent = utc_now()
db.session.commit()
return items
def _get_post_id(update: Update):
callback_query: CallbackQuery = update.callback_query
callback_query.answer()
_, post_id, *_ = callback_query.data.strip().split('.')
return post_id
@admin_required
def get_greeting(update: Update, context: CallbackContext):
greeting = Greeting.query.get(1)
text = greeting.text if greeting else DEFAULT_GREETING
update.message.reply_text(
"Напиши мені текст, яким я буду вітатися з новими користувачами. Якщо усе "
"супер, то введи команду /cancel\n\n"
"Зараз я вітаюся таким повідомленням:\n"
f"{text}",
parse_mode="Markdown"
)
return SET_GREETING
def update_greeting(update: Update, context: CallbackContext):
message: Message = update.message
Greeting.set_text(text=message.text)
update.message.reply_text(
"Вітання змінено! Якщо захочеш змінити привітання, "
"введи команду /greeting знову",
parse_mode="Markdown",
)
return ConversationHandler.END
def greeting_fallback(update: Update, context: CallbackContext):
update.message.reply_text(
text="Введіть, будь ласка, текст для привітання",
parse_mode="Markdown",
)
def cancel_update_greeting(update: Update, context: CallbackContext):
update.message.reply_text(
text=(
"Ви залишили старе привітання, якщо захочеш змінити "
"привітання, введи команду /greeting знову"
),
parse_mode="Markdown",
)
return ConversationHandler.END
@admin_required
def get_statistic(update: Update, context: CallbackContext):
update.message.reply_text(
text=(
f"Посилання з даними:\n\n"
f"Користувачі: {HOST}/users \n\n"
f"Дії: {HOST}/actions \n\n"
f"Підписки: {HOST}/subscriptions \n\n"
),
parse_mode="Markdown",
disable_web_page_preview=True,
)
@admin_required
def create_job(update: Update, context: CallbackContext):
update.message.reply_text(
"Напиши текст-повідомлення, який хочеш розіслати користувачам. "
"Я не надсилатиму сповіщення одразу, а лише тоді, коли натиснеш "
"кнопку 'Опублікувати'. Для повідомлення використовується Markdown.\n\n"
"Щоб додати зображення до тексту, пиши посилання на це зображення у "
"такому форматі: [.](https://picsum.photos/id/501/536/354)",
parse_mode="Markdown",
disable_web_page_preview=True,
)
return CREATE_JOB
def post_fallback(update: Update, context: CallbackContext):
message: Message = update.message or update.callback_query.message
message.reply_text("Введіть, будь ласка, текст. Зображення ви можете прикріпити пізніше.")
def save_post(update: Update, context: CallbackContext):
message: Message = update.message
job_text: str = message.text or message.caption or ''
photo_id = get_largest_photo(message.photo)
# Insert raw post to database
post = Post(text=job_text, image_id=photo_id)
db.session.add(post)
db.session.commit()
_send_job_post(post, update)
return ConversationHandler.END
def _send_job_post(post: Post, update: Update):
# build reply text
buttons = []
city = City.query.get(post.city_id) if post.city_id is not None else None
position = Position.query.get(post.position_id) if post.position_id is not None else None
city_text = f'Змінити місто ({city.name})' if city else 'Додати місто 🏙️'
button = InlineKeyboardButton(text=city_text, callback_data=f'post.{post.id}.city.page')
buttons.append([button])
position_text = f'Змінити категорію ({position.name})' if position else 'Додати категорію 🤖'
button = InlineKeyboardButton(text=position_text, callback_data=f'post.{post.id}.position.page')
buttons.append([button])
button = InlineKeyboardButton(text='Опублікувати 📨️', callback_data=f'post.{post.id}.publish')
buttons.append([button])
button = InlineKeyboardButton(text='Видалити ❌', callback_data=f'post.{post.id}.delete')
buttons.append([button])
message_text = post.text
if post.is_sent:
values = '/'.join(i.name for i in [city, position] if i)
text = 'Відправлено 🎉 {}'.format(f'({values})' if values else '')
button = InlineKeyboardButton(text=text, callback_data=f'post.{post.id}.none')
buttons = [[button]]
markup = InlineKeyboardMarkup(buttons, resize_keyboard=True)
message: Message = update.message or update.callback_query.message
if update.callback_query:
if post.image_id:
message.edit_caption(
caption=message_text,
reply_markup=markup,
parse_mode="Markdown",
)
else:
message.edit_text(
text=message_text,
reply_markup=markup,
parse_mode="Markdown",
)
return
if post.image_id is not None:
message.reply_photo(
photo=post.image_id,
caption=message_text,
parse_mode="Markdown",
reply_markup=markup,
)
else:
message.reply_text(
text=message_text,
parse_mode="Markdown",
reply_markup=markup,
)
def city_page(update: Update, context: CallbackContext):
post_id = _get_post_id(update)
post = Post.query.get(post_id)
text = (
"Вкажіть місто публікації вакансії, для цього оберіть один і "
"з варіантів зі списку нижче. Використовуйте кнопки ⬅️️ та ➡️ для "
"навігації між сторінками списку"
)
reply_markup = get_cities_keyboard(prefix=f'post.{post_id}.city')
if post.image_id:
update.callback_query.message.edit_caption(caption=text, reply_markup=reply_markup)
else:
update.callback_query.message.edit_text(text=text, reply_markup=reply_markup)
def city_navigate(update: Update, context: CallbackContext):
post_id = _get_post_id(update)
return update_list_page(update, prefix=f'post.{post_id}.city', func=get_cities_keyboard)
def city_choose(update: Update, context: CallbackContext):
callback_query: CallbackQuery = update.callback_query
callback_query.answer()
_, post_id, _, city_id = callback_query.data.strip().split('.')
# Save changes
post = Post.query.get(post_id)
post.city_id = city_id
db.session.commit()
_send_job_post(post, update)
def position_page(update: Update, context: CallbackContext):
post_id = _get_post_id(update)
post = Post.query.get(post_id)
text = (
"Оберіть одну з категорій для насилання вакансії, для цього оберіть "
"один із варіантів зі списку нижче. Використовуйте кнопки ⬅️️ та ➡️ для "
"навігації між сторінками списку."
)
reply_markup = get_positions_keyboard(prefix=f'post.{post_id}.position')
if post.image_id:
update.callback_query.message.edit_caption(caption=text, reply_markup=reply_markup)
else:
update.callback_query.message.edit_text(text=text, reply_markup=reply_markup)
def position_navigate(update: Update, context: CallbackContext):
post_id = _get_post_id(update)
return update_list_page(update, prefix=f'post.{post_id}.position', func=get_positions_keyboard)
def position_choose(update: Update, context: CallbackContext):
callback_query: CallbackQuery = update.callback_query
callback_query.answer()
_, post_id, _, position_id = callback_query.data.strip().split('.')
# Save changes
post = Post.query.get(post_id)
post.position_id = position_id
db.session.commit()
_send_job_post(post, update)
def delete_post(update: Update, context: CallbackContext):
post_id = _get_post_id(update)
post = Post.query.get(post_id)
db.session.delete(post)
db.session.commit()
update.callback_query.message.delete()
def publish_post(update: Update, context: CallbackContext):
post_id = _get_post_id(update)
post = Post.query.get(post_id)
text = 'Відправляємо повідомлення ⌛'
if post.image_id:
update.callback_query.message.edit_caption(caption=text)
else:
update.callback_query.edit_message_text(text)
send_post(post)
_send_job_post(post, update)
def cancel_create_post(update: Update, context: CallbackContext):
update.message.reply_text("Гаразд, ви відхили створення повідомлення")
return ConversationHandler.END
def print_bad_query(update: Update, context: CallbackContext):
update.callback_query.answer()
def add_admin_handlers(dp: Dispatcher):
dp.add_handler(
ConversationHandler(
entry_points=[
MenuStringHandler(Menu.greeting, get_greeting),
CommandHandler('greeting', get_greeting)
],
states={
SET_GREETING: [
MenuStringHandler(Menu, cancel_update_greeting),
CommandHandler('cancel', cancel_update_greeting),
MessageHandler(Filters.command, cancel_update_greeting),
MessageHandler(Filters.text, update_greeting)
],
},
fallbacks=[AnyHandler(greeting_fallback)],
allow_reentry=True,
),
group=1,
)
dp.add_handler(MenuStringHandler(Menu.stat, get_statistic), group=DEFAULT_GROUP)
dp.add_handler(CommandHandler('stat', get_statistic), group=DEFAULT_GROUP)
dp.add_handler(
ConversationHandler(
entry_points=[
MenuStringHandler(Menu.post, create_job),
CommandHandler('post', create_job),
],
states={
CREATE_JOB: [
MenuStringHandler(Menu, cancel_create_post),
CommandHandler('cancel', cancel_create_post),
MessageHandler(Filters.command, cancel_create_post),
MessageHandler(Filters.text | Filters.photo, save_post),
],
},
fallbacks=[AnyHandler(post_fallback)],
allow_reentry=True,
),
group=2,
)
dp.add_handler(CallbackQueryHandler(city_page, pattern=r'post\.\d+\.city\.page'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(city_navigate, pattern=r'post\.\d+\.city\.(prev|next)\.\d+'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(city_choose, pattern=r'post\.\d+\.city\.\d+'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(position_page, pattern=r'post\.\d+\.position\.page'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(position_navigate, pattern=r'post\.\d+\.position\.(prev|next)\.\d+'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(position_choose, pattern=r'post\.\d+\.position\.\d+'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(delete_post, pattern=r'post\.\d+\.delete'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(publish_post, pattern=r'post\.\d+\.publish'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(_empty_callback, pattern=r'post\.\d+\.none'), group=DEFAULT_GROUP) | app/handlers/admin.py | import functools
from typing import Callable, Any
from telegram import Update, Message, InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery
from telegram.ext import (
Dispatcher,
ConversationHandler,
CommandHandler,
MessageHandler,
Filters,
CallbackQueryHandler,
CallbackContext,
)
from app import db, bot, app
from app.contants import DEFAULT_GREETING, HOST, DEFAULT_GROUP
from app.enum import Menu
from app.models import Greeting, Post, City, Position, Subscription, utc_now, UserChat
from app.utils import update_list_page, get_cities_keyboard, get_positions_keyboard, AnyHandler, get_largest_photo, \
MenuStringHandler
HandlerFunction = Callable[[Update, CallbackContext], Any]
SET_GREETING = 'greeting'
CREATE_JOB = 'create_job'
SEND_PHOTO = 'send_photo'
def _empty_callback(update: Update, context: CallbackContext):
update.callback_query.answer()
def admin_required(handler: HandlerFunction) -> HandlerFunction:
""" Decorator for protecting admin handlers from unnecessary access """
@functools.wraps(handler)
def wrapper(update: Update, context: CallbackContext):
message: Message = update.message or update.callback_query.message
chat_id = message.chat_id
chat = UserChat.query.get(chat_id)
if not chat.is_admin:
app.logger.info('Access denied to admin handler')
return
return handler(update, context)
return wrapper
def send_post(post: Post):
query = db.session.query(Subscription).distinct(Subscription.chat_id)
if post.city_id is not None:
query = query.filter(Subscription.city_id == post.city_id)
if post.position_id is not None:
query = query.filter(Subscription.position_id == post.position_id)
items = query.all()
for subscription in items:
if post.image_id:
bot.send_photo(
chat_id=subscription.chat_id,
photo=post.image_id,
caption=post.text,
parse_mode="Markdown",
disable_web_page_preview=False,
)
else:
bot.send_message(
chat_id=subscription.chat_id,
text=post.text,
parse_mode='Markdown',
disable_web_page_preview=False,
)
post.date_sent = utc_now()
db.session.commit()
return items
def _get_post_id(update: Update):
callback_query: CallbackQuery = update.callback_query
callback_query.answer()
_, post_id, *_ = callback_query.data.strip().split('.')
return post_id
@admin_required
def get_greeting(update: Update, context: CallbackContext):
greeting = Greeting.query.get(1)
text = greeting.text if greeting else DEFAULT_GREETING
update.message.reply_text(
"Напиши мені текст, яким я буду вітатися з новими користувачами. Якщо усе "
"супер, то введи команду /cancel\n\n"
"Зараз я вітаюся таким повідомленням:\n"
f"{text}",
parse_mode="Markdown"
)
return SET_GREETING
def update_greeting(update: Update, context: CallbackContext):
message: Message = update.message
Greeting.set_text(text=message.text)
update.message.reply_text(
"Вітання змінено! Якщо захочеш змінити привітання, "
"введи команду /greeting знову",
parse_mode="Markdown",
)
return ConversationHandler.END
def greeting_fallback(update: Update, context: CallbackContext):
update.message.reply_text(
text="Введіть, будь ласка, текст для привітання",
parse_mode="Markdown",
)
def cancel_update_greeting(update: Update, context: CallbackContext):
update.message.reply_text(
text=(
"Ви залишили старе привітання, якщо захочеш змінити "
"привітання, введи команду /greeting знову"
),
parse_mode="Markdown",
)
return ConversationHandler.END
@admin_required
def get_statistic(update: Update, context: CallbackContext):
update.message.reply_text(
text=(
f"Посилання з даними:\n\n"
f"Користувачі: {HOST}/users \n\n"
f"Дії: {HOST}/actions \n\n"
f"Підписки: {HOST}/subscriptions \n\n"
),
parse_mode="Markdown",
disable_web_page_preview=True,
)
@admin_required
def create_job(update: Update, context: CallbackContext):
update.message.reply_text(
"Напиши текст-повідомлення, який хочеш розіслати користувачам. "
"Я не надсилатиму сповіщення одразу, а лише тоді, коли натиснеш "
"кнопку 'Опублікувати'. Для повідомлення використовується Markdown.\n\n"
"Щоб додати зображення до тексту, пиши посилання на це зображення у "
"такому форматі: [.](https://picsum.photos/id/501/536/354)",
parse_mode="Markdown",
disable_web_page_preview=True,
)
return CREATE_JOB
def post_fallback(update: Update, context: CallbackContext):
message: Message = update.message or update.callback_query.message
message.reply_text("Введіть, будь ласка, текст. Зображення ви можете прикріпити пізніше.")
def save_post(update: Update, context: CallbackContext):
message: Message = update.message
job_text: str = message.text or message.caption or ''
photo_id = get_largest_photo(message.photo)
# Insert raw post to database
post = Post(text=job_text, image_id=photo_id)
db.session.add(post)
db.session.commit()
_send_job_post(post, update)
return ConversationHandler.END
def _send_job_post(post: Post, update: Update):
# build reply text
buttons = []
city = City.query.get(post.city_id) if post.city_id is not None else None
position = Position.query.get(post.position_id) if post.position_id is not None else None
city_text = f'Змінити місто ({city.name})' if city else 'Додати місто 🏙️'
button = InlineKeyboardButton(text=city_text, callback_data=f'post.{post.id}.city.page')
buttons.append([button])
position_text = f'Змінити категорію ({position.name})' if position else 'Додати категорію 🤖'
button = InlineKeyboardButton(text=position_text, callback_data=f'post.{post.id}.position.page')
buttons.append([button])
button = InlineKeyboardButton(text='Опублікувати 📨️', callback_data=f'post.{post.id}.publish')
buttons.append([button])
button = InlineKeyboardButton(text='Видалити ❌', callback_data=f'post.{post.id}.delete')
buttons.append([button])
message_text = post.text
if post.is_sent:
values = '/'.join(i.name for i in [city, position] if i)
text = 'Відправлено 🎉 {}'.format(f'({values})' if values else '')
button = InlineKeyboardButton(text=text, callback_data=f'post.{post.id}.none')
buttons = [[button]]
markup = InlineKeyboardMarkup(buttons, resize_keyboard=True)
message: Message = update.message or update.callback_query.message
if update.callback_query:
if post.image_id:
message.edit_caption(
caption=message_text,
reply_markup=markup,
parse_mode="Markdown",
)
else:
message.edit_text(
text=message_text,
reply_markup=markup,
parse_mode="Markdown",
)
return
if post.image_id is not None:
message.reply_photo(
photo=post.image_id,
caption=message_text,
parse_mode="Markdown",
reply_markup=markup,
)
else:
message.reply_text(
text=message_text,
parse_mode="Markdown",
reply_markup=markup,
)
def city_page(update: Update, context: CallbackContext):
post_id = _get_post_id(update)
post = Post.query.get(post_id)
text = (
"Вкажіть місто публікації вакансії, для цього оберіть один і "
"з варіантів зі списку нижче. Використовуйте кнопки ⬅️️ та ➡️ для "
"навігації між сторінками списку"
)
reply_markup = get_cities_keyboard(prefix=f'post.{post_id}.city')
if post.image_id:
update.callback_query.message.edit_caption(caption=text, reply_markup=reply_markup)
else:
update.callback_query.message.edit_text(text=text, reply_markup=reply_markup)
def city_navigate(update: Update, context: CallbackContext):
post_id = _get_post_id(update)
return update_list_page(update, prefix=f'post.{post_id}.city', func=get_cities_keyboard)
def city_choose(update: Update, context: CallbackContext):
callback_query: CallbackQuery = update.callback_query
callback_query.answer()
_, post_id, _, city_id = callback_query.data.strip().split('.')
# Save changes
post = Post.query.get(post_id)
post.city_id = city_id
db.session.commit()
_send_job_post(post, update)
def position_page(update: Update, context: CallbackContext):
post_id = _get_post_id(update)
post = Post.query.get(post_id)
text = (
"Оберіть одну з категорій для насилання вакансії, для цього оберіть "
"один із варіантів зі списку нижче. Використовуйте кнопки ⬅️️ та ➡️ для "
"навігації між сторінками списку."
)
reply_markup = get_positions_keyboard(prefix=f'post.{post_id}.position')
if post.image_id:
update.callback_query.message.edit_caption(caption=text, reply_markup=reply_markup)
else:
update.callback_query.message.edit_text(text=text, reply_markup=reply_markup)
def position_navigate(update: Update, context: CallbackContext):
post_id = _get_post_id(update)
return update_list_page(update, prefix=f'post.{post_id}.position', func=get_positions_keyboard)
def position_choose(update: Update, context: CallbackContext):
callback_query: CallbackQuery = update.callback_query
callback_query.answer()
_, post_id, _, position_id = callback_query.data.strip().split('.')
# Save changes
post = Post.query.get(post_id)
post.position_id = position_id
db.session.commit()
_send_job_post(post, update)
def delete_post(update: Update, context: CallbackContext):
post_id = _get_post_id(update)
post = Post.query.get(post_id)
db.session.delete(post)
db.session.commit()
update.callback_query.message.delete()
def publish_post(update: Update, context: CallbackContext):
post_id = _get_post_id(update)
post = Post.query.get(post_id)
text = 'Відправляємо повідомлення ⌛'
if post.image_id:
update.callback_query.message.edit_caption(caption=text)
else:
update.callback_query.edit_message_text(text)
send_post(post)
_send_job_post(post, update)
def cancel_create_post(update: Update, context: CallbackContext):
update.message.reply_text("Гаразд, ви відхили створення повідомлення")
return ConversationHandler.END
def print_bad_query(update: Update, context: CallbackContext):
update.callback_query.answer()
def add_admin_handlers(dp: Dispatcher):
dp.add_handler(
ConversationHandler(
entry_points=[
MenuStringHandler(Menu.greeting, get_greeting),
CommandHandler('greeting', get_greeting)
],
states={
SET_GREETING: [
MenuStringHandler(Menu, cancel_update_greeting),
CommandHandler('cancel', cancel_update_greeting),
MessageHandler(Filters.command, cancel_update_greeting),
MessageHandler(Filters.text, update_greeting)
],
},
fallbacks=[AnyHandler(greeting_fallback)],
allow_reentry=True,
),
group=1,
)
dp.add_handler(MenuStringHandler(Menu.stat, get_statistic), group=DEFAULT_GROUP)
dp.add_handler(CommandHandler('stat', get_statistic), group=DEFAULT_GROUP)
dp.add_handler(
ConversationHandler(
entry_points=[
MenuStringHandler(Menu.post, create_job),
CommandHandler('post', create_job),
],
states={
CREATE_JOB: [
MenuStringHandler(Menu, cancel_create_post),
CommandHandler('cancel', cancel_create_post),
MessageHandler(Filters.command, cancel_create_post),
MessageHandler(Filters.text | Filters.photo, save_post),
],
},
fallbacks=[AnyHandler(post_fallback)],
allow_reentry=True,
),
group=2,
)
dp.add_handler(CallbackQueryHandler(city_page, pattern=r'post\.\d+\.city\.page'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(city_navigate, pattern=r'post\.\d+\.city\.(prev|next)\.\d+'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(city_choose, pattern=r'post\.\d+\.city\.\d+'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(position_page, pattern=r'post\.\d+\.position\.page'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(position_navigate, pattern=r'post\.\d+\.position\.(prev|next)\.\d+'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(position_choose, pattern=r'post\.\d+\.position\.\d+'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(delete_post, pattern=r'post\.\d+\.delete'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(publish_post, pattern=r'post\.\d+\.publish'), group=DEFAULT_GROUP)
dp.add_handler(CallbackQueryHandler(_empty_callback, pattern=r'post\.\d+\.none'), group=DEFAULT_GROUP) | 0.563018 | 0.05902 |
from __future__ import absolute_import
from sentry.models import Environment, OrganizationMember, OrganizationMemberTeam, Project, Release, ReleaseProject, ReleaseProjectEnvironment, Rule
from sentry.testutils import TestCase
class ProjectTest(TestCase):
def test_member_set_simple(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org)
project = self.create_project(teams=[team])
member = OrganizationMember.objects.get(
user=user,
organization=org,
)
OrganizationMemberTeam.objects.create(
organizationmember=member,
team=team,
)
assert list(project.member_set.all()) == [member]
def test_inactive_global_member(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org)
project = self.create_project(teams=[team])
OrganizationMember.objects.get(
user=user,
organization=org,
)
assert list(project.member_set.all()) == []
def test_transfer_to(self):
from_org = self.create_organization()
from_team = self.create_team(organization=from_org)
to_org = self.create_organization()
to_team = self.create_team(organization=to_org)
project = self.create_project(teams=[from_team])
rule = Rule.objects.create(
project=project,
environment_id=Environment.get_or_create(project, 'production').id,
label='Golden Rule',
data={},
)
project.transfer_to(to_team)
project = Project.objects.get(id=project.id)
assert project.teams.count() == 1
assert project.teams.first() == to_team
assert project.organization_id == to_org.id
updated_rule = project.rule_set.get(label='Golden Rule')
assert updated_rule.id == rule.id
assert updated_rule.environment_id != rule.environment_id
assert updated_rule.environment_id == Environment.get_or_create(project, 'production').id
def test_transfer_to_slug_collision(self):
from_org = self.create_organization()
from_team = self.create_team(organization=from_org)
project = self.create_project(teams=[from_team], slug='matt')
to_org = self.create_organization()
to_team = self.create_team(organization=to_org)
# conflicting project slug
self.create_project(teams=[to_team], slug='matt')
assert Project.objects.filter(organization=to_org).count() == 1
project.transfer_to(to_team)
project = Project.objects.get(id=project.id)
assert project.teams.count() == 1
assert project.teams.first() == to_team
assert project.organization_id == to_org.id
assert project.slug != 'matt'
assert Project.objects.filter(organization=to_org).count() == 2
assert Project.objects.filter(organization=from_org).count() == 0
def test_transfer_to_releases(self):
from_org = self.create_organization()
from_team = self.create_team(organization=from_org)
to_org = self.create_organization()
to_team = self.create_team(organization=to_org)
project = self.create_project(teams=[from_team])
environment = Environment.get_or_create(project, 'production')
release = Release.get_or_create(project=project, version='1.0')
ReleaseProjectEnvironment.objects.create(
project=project,
release=release,
environment=environment,
)
assert ReleaseProjectEnvironment.objects.filter(
project=project,
release=release,
environment=environment,
).exists()
assert ReleaseProject.objects.filter(
project=project,
release=release,
).exists()
project.transfer_to(to_team)
project = Project.objects.get(id=project.id)
assert project.teams.count() == 1
assert project.teams.first() == to_team
assert project.organization_id == to_org.id
assert not ReleaseProjectEnvironment.objects.filter(
project=project,
release=release,
environment=environment,
).exists()
assert not ReleaseProject.objects.filter(
project=project,
release=release,
).exists() | tests/sentry/models/test_project.py |
from __future__ import absolute_import
from sentry.models import Environment, OrganizationMember, OrganizationMemberTeam, Project, Release, ReleaseProject, ReleaseProjectEnvironment, Rule
from sentry.testutils import TestCase
class ProjectTest(TestCase):
def test_member_set_simple(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org)
project = self.create_project(teams=[team])
member = OrganizationMember.objects.get(
user=user,
organization=org,
)
OrganizationMemberTeam.objects.create(
organizationmember=member,
team=team,
)
assert list(project.member_set.all()) == [member]
def test_inactive_global_member(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org)
project = self.create_project(teams=[team])
OrganizationMember.objects.get(
user=user,
organization=org,
)
assert list(project.member_set.all()) == []
def test_transfer_to(self):
from_org = self.create_organization()
from_team = self.create_team(organization=from_org)
to_org = self.create_organization()
to_team = self.create_team(organization=to_org)
project = self.create_project(teams=[from_team])
rule = Rule.objects.create(
project=project,
environment_id=Environment.get_or_create(project, 'production').id,
label='Golden Rule',
data={},
)
project.transfer_to(to_team)
project = Project.objects.get(id=project.id)
assert project.teams.count() == 1
assert project.teams.first() == to_team
assert project.organization_id == to_org.id
updated_rule = project.rule_set.get(label='Golden Rule')
assert updated_rule.id == rule.id
assert updated_rule.environment_id != rule.environment_id
assert updated_rule.environment_id == Environment.get_or_create(project, 'production').id
def test_transfer_to_slug_collision(self):
from_org = self.create_organization()
from_team = self.create_team(organization=from_org)
project = self.create_project(teams=[from_team], slug='matt')
to_org = self.create_organization()
to_team = self.create_team(organization=to_org)
# conflicting project slug
self.create_project(teams=[to_team], slug='matt')
assert Project.objects.filter(organization=to_org).count() == 1
project.transfer_to(to_team)
project = Project.objects.get(id=project.id)
assert project.teams.count() == 1
assert project.teams.first() == to_team
assert project.organization_id == to_org.id
assert project.slug != 'matt'
assert Project.objects.filter(organization=to_org).count() == 2
assert Project.objects.filter(organization=from_org).count() == 0
def test_transfer_to_releases(self):
from_org = self.create_organization()
from_team = self.create_team(organization=from_org)
to_org = self.create_organization()
to_team = self.create_team(organization=to_org)
project = self.create_project(teams=[from_team])
environment = Environment.get_or_create(project, 'production')
release = Release.get_or_create(project=project, version='1.0')
ReleaseProjectEnvironment.objects.create(
project=project,
release=release,
environment=environment,
)
assert ReleaseProjectEnvironment.objects.filter(
project=project,
release=release,
environment=environment,
).exists()
assert ReleaseProject.objects.filter(
project=project,
release=release,
).exists()
project.transfer_to(to_team)
project = Project.objects.get(id=project.id)
assert project.teams.count() == 1
assert project.teams.first() == to_team
assert project.organization_id == to_org.id
assert not ReleaseProjectEnvironment.objects.filter(
project=project,
release=release,
environment=environment,
).exists()
assert not ReleaseProject.objects.filter(
project=project,
release=release,
).exists() | 0.746971 | 0.390418 |
import collections
import contextlib
import typing
from typing import Any, Union, Tuple, List
from typo.utils import type_name
class Codegen:
_v_cache_seq = {list: True, tuple: True, str: True, bytes: True,
bytearray: True, memoryview: True}
_v_cache_mut_seq = {list: True}
def __init__(self, typevars=None):
# TODO: accept list of handlers, build the set of typevars here
self.lines = []
self.indent_level = 0
self.next_var_id = 0
self.next_type_id = 0
self.types = {}
self.typevars = sorted(typevars or [], key=str)
# TODO: all names injected through context should start with underscore
self.context = {
'collections': collections,
'typing': typing,
'rt_fail': self.rt_fail,
'rt_type_fail': self.rt_type_fail,
'rt_fail_msg': self.rt_fail_msg,
'v_cache_seq': self._v_cache_seq,
'v_cache_mut_seq': self._v_cache_mut_seq,
}
for i, tv in enumerate(self.typevars):
if tv.__constraints__:
self.context['constraints_{}'.format(i)] = tv.__constraints__
def typevar_id(self, typevar):
return self.typevars.index(typevar)
def init_typevars(self):
self.write_line('tv = [{!r}]'.format([None] * len(self.typevars)))
def compile(self, name):
context = self.context.copy()
exec(str(self), context)
return context[name]
@staticmethod
def rt_fail(desc: str, expected: str, var: Any, got: str, **kwargs):
raise TypeError('invalid {}: expected {}, got {}'
.format(desc.format(**kwargs), expected, got.format(**kwargs)))
@staticmethod
def rt_type_fail(desc: str, expected: str, var: Any, **kwargs):
raise TypeError('invalid {}: expected {}, got {}'
.format(desc.format(**kwargs), expected, type_name(type(var))))
@staticmethod
def rt_fail_msg(desc: str, msg: str, var: Any, **kwargs):
raise TypeError('invalid {}: {}'.format(desc.format(**kwargs),
msg.format(tp=type_name(type(var)), **kwargs)))
def write_line(self, line):
self.lines.append(' ' * self.indent_level * 4 + line)
@contextlib.contextmanager
def indent(self):
self.indent_level += 1
yield
self.indent_level -= 1
def new_var(self):
varname = 'v_{:03d}'.format(self.next_var_id)
self.next_var_id += 1
return varname
def new_vars(self, n):
return tuple(self.new_var() for _ in range(n))
def ref_type(self, tp):
if tp.__module__ == 'builtins':
return tp.__name__
elif tp.__module__ == 'collections.abc':
return 'collections.' + tp.__name__
elif tp.__module__ == 'typing':
return 'typing.' + tp.__name__
elif tp not in self.types:
varname = 'T_{}'.format(self.next_type_id)
self.next_type_id += 1
self.types[tp] = varname
self.context[varname] = tp
return self.types[tp]
def fail(self, desc: str, expected: str, varname: str, got: str=None):
if desc is None:
self.write_line('raise TypeError')
elif got is None:
self.write_line('rt_type_fail("{}", "{}", {}, **locals())'
.format(desc, expected, varname))
else:
self.write_line('rt_fail("{}", "{}", {}, "{}", **locals())'
.format(desc, expected, varname, got))
def fail_msg(self, desc: str, msg: str, varname: str):
if desc is None:
self.write_line('raise TypeError')
else:
self.write_line('rt_fail_msg("{}", "{}", {}, **locals())'
.format(desc, msg, varname))
def if_not_isinstance(self, varname: str, tp: Union[type, Tuple[type, ...]]) -> None:
if isinstance(tp, tuple):
if len(tp) == 1:
tp = self.ref_type(tp[0])
else:
tp = '({})'.format(', '.join(map(self.ref_type, tp)))
else:
tp = self.ref_type(tp)
self.write_line('if not isinstance({}, {}):'.format(varname, tp))
def check_type(self, varname: str, desc: str, tp: Union[Tuple[type, ...], type]):
if isinstance(tp, tuple):
if len(tp) == 1:
expected = type_name(tp)
else:
expected = ' or '.join(map(type_name, tp))
else:
expected = type_name(tp)
self.if_not_isinstance(varname, tp)
with self.indent():
self.fail(desc, expected, varname)
def iter_and_check(self, varname: str, desc: str,
handler: 'typo.handlers.Handler') -> None:
var_v = self.new_var()
self.write_line('for {} in {}:'.format(var_v, varname))
with self.indent():
handler(self, var_v, None if desc is None else
'item of {}'.format(desc))
def enumerate_and_check(self, varname: str, desc: str,
handler: 'typo.handlers.Handler') -> None:
var_i, var_v = self.new_var(), self.new_var()
self.write_line('for {}, {} in enumerate({}):'.format(var_i, var_v, varname))
with self.indent():
handler(self, var_v, None if desc is None else
'item #{{{}}} of {}'.format(var_i, desc))
def check_attrs_cached(self, varname: str, desc: str, expected: str,
cache: str, attrs: List[str]) -> None:
var_t = self.new_var()
self.write_line('{} = type({})'.format(var_t, varname))
self.write_line('if {} in {}:'.format(var_t, cache))
var_a = self.new_var()
with self.indent():
self.write_line('{} = {}[{}]'.format(var_a, cache, var_t))
self.write_line('else:')
with self.indent():
conds = ['hasattr({}, "{}")'.format(varname, attr) for attr in attrs]
self.write_line('{} = {}'.format(var_a, ' and '.join(conds)))
self.write_line('{}[{}] = {}'.format(cache, var_t, var_a))
self.write_line('if not {}:'.format(var_a))
with self.indent():
self.fail(desc, expected, varname)
def __str__(self):
return '\n'.join(self.lines) + '\n' | typo/codegen.py |
import collections
import contextlib
import typing
from typing import Any, Union, Tuple, List
from typo.utils import type_name
class Codegen:
_v_cache_seq = {list: True, tuple: True, str: True, bytes: True,
bytearray: True, memoryview: True}
_v_cache_mut_seq = {list: True}
def __init__(self, typevars=None):
# TODO: accept list of handlers, build the set of typevars here
self.lines = []
self.indent_level = 0
self.next_var_id = 0
self.next_type_id = 0
self.types = {}
self.typevars = sorted(typevars or [], key=str)
# TODO: all names injected through context should start with underscore
self.context = {
'collections': collections,
'typing': typing,
'rt_fail': self.rt_fail,
'rt_type_fail': self.rt_type_fail,
'rt_fail_msg': self.rt_fail_msg,
'v_cache_seq': self._v_cache_seq,
'v_cache_mut_seq': self._v_cache_mut_seq,
}
for i, tv in enumerate(self.typevars):
if tv.__constraints__:
self.context['constraints_{}'.format(i)] = tv.__constraints__
def typevar_id(self, typevar):
return self.typevars.index(typevar)
def init_typevars(self):
self.write_line('tv = [{!r}]'.format([None] * len(self.typevars)))
def compile(self, name):
context = self.context.copy()
exec(str(self), context)
return context[name]
@staticmethod
def rt_fail(desc: str, expected: str, var: Any, got: str, **kwargs):
raise TypeError('invalid {}: expected {}, got {}'
.format(desc.format(**kwargs), expected, got.format(**kwargs)))
@staticmethod
def rt_type_fail(desc: str, expected: str, var: Any, **kwargs):
raise TypeError('invalid {}: expected {}, got {}'
.format(desc.format(**kwargs), expected, type_name(type(var))))
@staticmethod
def rt_fail_msg(desc: str, msg: str, var: Any, **kwargs):
raise TypeError('invalid {}: {}'.format(desc.format(**kwargs),
msg.format(tp=type_name(type(var)), **kwargs)))
def write_line(self, line):
self.lines.append(' ' * self.indent_level * 4 + line)
@contextlib.contextmanager
def indent(self):
self.indent_level += 1
yield
self.indent_level -= 1
def new_var(self):
varname = 'v_{:03d}'.format(self.next_var_id)
self.next_var_id += 1
return varname
def new_vars(self, n):
return tuple(self.new_var() for _ in range(n))
def ref_type(self, tp):
if tp.__module__ == 'builtins':
return tp.__name__
elif tp.__module__ == 'collections.abc':
return 'collections.' + tp.__name__
elif tp.__module__ == 'typing':
return 'typing.' + tp.__name__
elif tp not in self.types:
varname = 'T_{}'.format(self.next_type_id)
self.next_type_id += 1
self.types[tp] = varname
self.context[varname] = tp
return self.types[tp]
def fail(self, desc: str, expected: str, varname: str, got: str=None):
if desc is None:
self.write_line('raise TypeError')
elif got is None:
self.write_line('rt_type_fail("{}", "{}", {}, **locals())'
.format(desc, expected, varname))
else:
self.write_line('rt_fail("{}", "{}", {}, "{}", **locals())'
.format(desc, expected, varname, got))
def fail_msg(self, desc: str, msg: str, varname: str):
if desc is None:
self.write_line('raise TypeError')
else:
self.write_line('rt_fail_msg("{}", "{}", {}, **locals())'
.format(desc, msg, varname))
def if_not_isinstance(self, varname: str, tp: Union[type, Tuple[type, ...]]) -> None:
if isinstance(tp, tuple):
if len(tp) == 1:
tp = self.ref_type(tp[0])
else:
tp = '({})'.format(', '.join(map(self.ref_type, tp)))
else:
tp = self.ref_type(tp)
self.write_line('if not isinstance({}, {}):'.format(varname, tp))
def check_type(self, varname: str, desc: str, tp: Union[Tuple[type, ...], type]):
if isinstance(tp, tuple):
if len(tp) == 1:
expected = type_name(tp)
else:
expected = ' or '.join(map(type_name, tp))
else:
expected = type_name(tp)
self.if_not_isinstance(varname, tp)
with self.indent():
self.fail(desc, expected, varname)
def iter_and_check(self, varname: str, desc: str,
handler: 'typo.handlers.Handler') -> None:
var_v = self.new_var()
self.write_line('for {} in {}:'.format(var_v, varname))
with self.indent():
handler(self, var_v, None if desc is None else
'item of {}'.format(desc))
def enumerate_and_check(self, varname: str, desc: str,
handler: 'typo.handlers.Handler') -> None:
var_i, var_v = self.new_var(), self.new_var()
self.write_line('for {}, {} in enumerate({}):'.format(var_i, var_v, varname))
with self.indent():
handler(self, var_v, None if desc is None else
'item #{{{}}} of {}'.format(var_i, desc))
def check_attrs_cached(self, varname: str, desc: str, expected: str,
cache: str, attrs: List[str]) -> None:
var_t = self.new_var()
self.write_line('{} = type({})'.format(var_t, varname))
self.write_line('if {} in {}:'.format(var_t, cache))
var_a = self.new_var()
with self.indent():
self.write_line('{} = {}[{}]'.format(var_a, cache, var_t))
self.write_line('else:')
with self.indent():
conds = ['hasattr({}, "{}")'.format(varname, attr) for attr in attrs]
self.write_line('{} = {}'.format(var_a, ' and '.join(conds)))
self.write_line('{}[{}] = {}'.format(cache, var_t, var_a))
self.write_line('if not {}:'.format(var_a))
with self.indent():
self.fail(desc, expected, varname)
def __str__(self):
return '\n'.join(self.lines) + '\n' | 0.460774 | 0.129375 |
from __future__ import print_function, division
from sympy.matrices.dense import MutableDenseMatrix
from sympy.utilities.iterables import flatten, numbered_symbols
from sympy.core.symbol import Symbol, Dummy, symbols
from sympy import S
class NewMatrix(MutableDenseMatrix):
"""
Supports elements which can't be Sympified.
See docstrings in sympy/matrices/matrices.py
"""
@staticmethod
def _sympify(a):
return a
def row_join(self, rhs):
from sympy.matrices import MutableMatrix
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(rhs)
if self.rows != rhs.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
newmat = NewMatrix.zeros(self.rows, self.cols + rhs.cols)
newmat[:, :self.cols] = self
newmat[:, self.cols:] = rhs
return type(self)(newmat)
def col_join(self, bott):
from sympy.matrices import MutableMatrix
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(bott)
if self.cols != bott.cols:
raise ShapeError(
"`self` and `bott` must have the same number of columns.")
newmat = NewMatrix.zeros(self.rows + bott.rows, self.cols)
newmat[:self.rows, :] = self
newmat[self.rows:, :] = bott
return type(self)(newmat)
def gauss_jordan_solve(self, b, freevar=False):
from sympy.matrices import Matrix, zeros
aug = self.hstack(self.copy(), b.copy())
row, col = aug[:, :-1].shape
# solve by reduced row echelon form
A, pivots = aug.rref()
A, v = A[:, :-1], A[:, -1]
pivots = list(filter(lambda p: p < col, pivots))
rank = len(pivots)
# Bring to block form
permutation = Matrix(range(col)).T
A = A.vstack(A, permutation)
for i, c in enumerate(pivots):
A.col_swap(i, c)
A, permutation = A[:-1, :], A[-1, :]
# check for existence of solutions
# rank of aug Matrix should be equal to rank of coefficient matrix
if not v[rank:, 0].is_zero:
raise ValueError("Linear system has no solution")
# Get index of free symbols (free parameters)
free_var_index = permutation[len(pivots):] # non-pivots columns are free variables
# Free parameters
tau = NewMatrix([S(1) for k in range(col - rank)]).reshape(col - rank, 1)
# Full parametric solution
V = A[:rank, rank:]
vt = v[:rank, 0]
free_sol = tau.vstack(vt - V*tau, tau)
# Undo permutation
sol = NewMatrix.zeros(col, 1)
for k, v in enumerate(free_sol):
sol[permutation[k], 0] = v
if freevar:
return sol, tau, free_var_index
else:
return sol, tau | sympy/holonomic/linearsolver.py |
from __future__ import print_function, division
from sympy.matrices.dense import MutableDenseMatrix
from sympy.utilities.iterables import flatten, numbered_symbols
from sympy.core.symbol import Symbol, Dummy, symbols
from sympy import S
class NewMatrix(MutableDenseMatrix):
"""
Supports elements which can't be Sympified.
See docstrings in sympy/matrices/matrices.py
"""
@staticmethod
def _sympify(a):
return a
def row_join(self, rhs):
from sympy.matrices import MutableMatrix
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(rhs)
if self.rows != rhs.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
newmat = NewMatrix.zeros(self.rows, self.cols + rhs.cols)
newmat[:, :self.cols] = self
newmat[:, self.cols:] = rhs
return type(self)(newmat)
def col_join(self, bott):
from sympy.matrices import MutableMatrix
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(bott)
if self.cols != bott.cols:
raise ShapeError(
"`self` and `bott` must have the same number of columns.")
newmat = NewMatrix.zeros(self.rows + bott.rows, self.cols)
newmat[:self.rows, :] = self
newmat[self.rows:, :] = bott
return type(self)(newmat)
def gauss_jordan_solve(self, b, freevar=False):
from sympy.matrices import Matrix, zeros
aug = self.hstack(self.copy(), b.copy())
row, col = aug[:, :-1].shape
# solve by reduced row echelon form
A, pivots = aug.rref()
A, v = A[:, :-1], A[:, -1]
pivots = list(filter(lambda p: p < col, pivots))
rank = len(pivots)
# Bring to block form
permutation = Matrix(range(col)).T
A = A.vstack(A, permutation)
for i, c in enumerate(pivots):
A.col_swap(i, c)
A, permutation = A[:-1, :], A[-1, :]
# check for existence of solutions
# rank of aug Matrix should be equal to rank of coefficient matrix
if not v[rank:, 0].is_zero:
raise ValueError("Linear system has no solution")
# Get index of free symbols (free parameters)
free_var_index = permutation[len(pivots):] # non-pivots columns are free variables
# Free parameters
tau = NewMatrix([S(1) for k in range(col - rank)]).reshape(col - rank, 1)
# Full parametric solution
V = A[:rank, rank:]
vt = v[:rank, 0]
free_sol = tau.vstack(vt - V*tau, tau)
# Undo permutation
sol = NewMatrix.zeros(col, 1)
for k, v in enumerate(free_sol):
sol[permutation[k], 0] = v
if freevar:
return sol, tau, free_var_index
else:
return sol, tau | 0.83545 | 0.464598 |
import csv
import gc
import numpy as np
from pathlib import Path
from torch.utils.data import Dataset
from pytorch_pretrained_bert.tokenization import BertTokenizer
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeature(object):
def __init__(self,input_ids,input_mask,segment_ids,label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class CreateDataset(Dataset):
def __init__(self,data,max_seq_len,tokenizer,example_type,seed):
self.seed = seed
self.data = data
self.max_seq_len = max_seq_len
self.example_type = example_type
self.tokenizer = tokenizer
self.build_examples()
def read_data(self,data_path,quotechar = None):
lines = []
with open(data_path,'r',encoding='utf-8') as fr:
reader = csv.reader(fr,delimiter = '\t',quotechar = quotechar)
for line in reader:
lines.append(line)
return lines
def build_examples(self):
if isinstance(self.data,Path):
lines = self.read_data(data_path=self.data)
else:
lines = self.data
self.examples = []
for i,line in enumerate(lines):
guid = '%s-%d'%(self.example_type,i)
text_a = line[0]
label = line[1]
if isinstance(label,str):
label = [np.float32(x) for x in label.split(",")]
else:
label = [np.float32(x) for x in list(label)]
text_b = None
example = InputExample(guid = guid,text_a = text_a,text_b=text_b,label= label)
self.examples.append(example)
del lines
del self.data
gc.collect()
def truncate_seq_pair(self,tokens_a,tokens_b,max_length):
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def build_features(self,example):
'''
# E.g.1
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# E.g.2
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
'''
tokens_a = self.tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = self.tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
self.truncate_seq_pair(tokens_a,tokens_b,max_length = self.max_seq_len - 3)
else:
# Account for [CLS] and [SEP] with '-2'
if len(tokens_a) > self.max_seq_len - 2:
tokens_a = tokens_a[:self.max_seq_len - 2]
tokens = ['[CLS]'] + tokens_a + ['[SEP]']
segment_ids = [0] * len(tokens) # 对应type_ids
if tokens_b:
tokens += tokens_b + ['[SEP]']
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# zero padding
padding = [0] * (self.max_seq_len - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == self.max_seq_len
assert len(input_mask) == self.max_seq_len
assert len(segment_ids) == self.max_seq_len
# label
label_id = example.label
feature = InputFeature(input_ids = input_ids,input_mask = input_mask,
segment_ids = segment_ids,label_id = label_id)
return feature
def preprocess(self,index):
example = self.examples[index]
feature = self.build_features(example)
return np.array(feature.input_ids),np.array(feature.input_mask),\
np.array(feature.segment_ids),np.array(feature.label_id)
def __getitem__(self, index):
return self.preprocess(index)
def __len__(self):
return len(self.examples) | Code/HypoBertClas/pybert/io/dataset.py | import csv
import gc
import numpy as np
from pathlib import Path
from torch.utils.data import Dataset
from pytorch_pretrained_bert.tokenization import BertTokenizer
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeature(object):
def __init__(self,input_ids,input_mask,segment_ids,label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class CreateDataset(Dataset):
def __init__(self,data,max_seq_len,tokenizer,example_type,seed):
self.seed = seed
self.data = data
self.max_seq_len = max_seq_len
self.example_type = example_type
self.tokenizer = tokenizer
self.build_examples()
def read_data(self,data_path,quotechar = None):
lines = []
with open(data_path,'r',encoding='utf-8') as fr:
reader = csv.reader(fr,delimiter = '\t',quotechar = quotechar)
for line in reader:
lines.append(line)
return lines
def build_examples(self):
if isinstance(self.data,Path):
lines = self.read_data(data_path=self.data)
else:
lines = self.data
self.examples = []
for i,line in enumerate(lines):
guid = '%s-%d'%(self.example_type,i)
text_a = line[0]
label = line[1]
if isinstance(label,str):
label = [np.float32(x) for x in label.split(",")]
else:
label = [np.float32(x) for x in list(label)]
text_b = None
example = InputExample(guid = guid,text_a = text_a,text_b=text_b,label= label)
self.examples.append(example)
del lines
del self.data
gc.collect()
def truncate_seq_pair(self,tokens_a,tokens_b,max_length):
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def build_features(self,example):
'''
# E.g.1
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# E.g.2
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
'''
tokens_a = self.tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = self.tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
self.truncate_seq_pair(tokens_a,tokens_b,max_length = self.max_seq_len - 3)
else:
# Account for [CLS] and [SEP] with '-2'
if len(tokens_a) > self.max_seq_len - 2:
tokens_a = tokens_a[:self.max_seq_len - 2]
tokens = ['[CLS]'] + tokens_a + ['[SEP]']
segment_ids = [0] * len(tokens) # 对应type_ids
if tokens_b:
tokens += tokens_b + ['[SEP]']
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# zero padding
padding = [0] * (self.max_seq_len - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == self.max_seq_len
assert len(input_mask) == self.max_seq_len
assert len(segment_ids) == self.max_seq_len
# label
label_id = example.label
feature = InputFeature(input_ids = input_ids,input_mask = input_mask,
segment_ids = segment_ids,label_id = label_id)
return feature
def preprocess(self,index):
example = self.examples[index]
feature = self.build_features(example)
return np.array(feature.input_ids),np.array(feature.input_mask),\
np.array(feature.segment_ids),np.array(feature.label_id)
def __getitem__(self, index):
return self.preprocess(index)
def __len__(self):
return len(self.examples) | 0.550849 | 0.222742 |
import media
import fresh_tomatoes
# Movies to be shown in the page
ironman = media.Movie("Ironman",
"After being held captive in an Afghan cave, billionaire engineer <NAME> creates a unique weaponized suit of armor to fight evil.", # noqa
"https://images-na.ssl-images-amazon.com/images/M/MV5BMTczNTI2ODUwOF5BMl5BanBnXkFtZTcwMTU0NTIzMw@@._V1_SY1000_CR0,0,674,1000_AL_.jpg", # noqa
"https://www.youtube.com/watch?v=8hYlB38asDY&ab_channel=TheMovieChanneI") # noqa
limitless = media.Movie("Limitless",
"With the help of a mysterious pill that enables the user to access 100 percent of his brain abilities, a struggling writer becomes a financial wizard, but it also puts him in a new world with lots of dangers.", # noqa
"https://images-na.ssl-images-amazon.com/images/M/MV5BYmViZGM0MGItZTdiYi00ZDU4LWIxNDYtNTc1NWQ5Njc2N2YwXkEyXkFqcGdeQXVyNDk3NzU2MTQ@._V1_SY1000_CR0,0,692,1000_AL_.jpg", # noqa
"https://www.youtube.com/watch?v=vUkAfjfWh5g&ab_channel=eOnefilms") # noqa
lego_movie = media.Movie("The Lego Movie",
"An ordinary Lego construction worker, thought to be the prophesied 'Special', is recruited to join a quest to stop an evil tyrant from gluing the Lego universe into eternal stasis.", # noqa
"https://images-na.ssl-images-amazon.com/images/M/MV5BMTg4MDk1ODExN15BMl5BanBnXkFtZTgwNzIyNjg3MDE@._V1_SY1000_CR0,0,674,1000_AL_.jpg", # noqa
"hhttps://www.youtube.com/watch?v=fZ_JOBCLF-I&ab_channel=WarnerBros.Pictures") # noqa
summer_wars = media.Movie("Summer Wars",
"A student tries to fix a problem he accidentally caused in OZ, a digital world, while pretending to be the fiance of his friend at her grandmother's 90th birthday.", # noqa
"https://images-na.ssl-images-amazon.com/images/M/MV5BMTYyOTk3OTQzM15BMl5BanBnXkFtZTcwMjU4NDYyNA@@._V1_SY1000_CR0,0,681,1000_AL_.jpg", # noqa
"https://www.youtube.com/watch?v=HjLE8BmWfKA&ab_channel=AwesomenessDreams") # noqa
oceans_eleven = media.Movie("Ocean's Eleven",
"<NAME> and his eleven accomplices plan to rob three Las Vegas casinos simultaneously.", # noqa
"https://images-na.ssl-images-amazon.com/images/M/MV5BYzVmYzVkMmUtOGRhMi00MTNmLThlMmUtZTljYjlkMjNkMjJkXkEyXkFqcGdeQXVyNDk3NzU2MTQ@._V1_SY1000_CR0,0,675,1000_AL_.jpg", # noqa
"https://www.youtube.com/watch?v=imm6OR605UI&ab_channel=MovieStation") # noqa
scott_pilgrim = media.Movie("Scott Pilgrim vs. the World",
"<NAME> must defeat his new girlfriend's seven evil exes in order to win her heart.", # noqa
"https://images-na.ssl-images-amazon.com/images/M/MV5BMTkwNTczNTMyOF5BMl5BanBnXkFtZTcwNzUxOTUyMw@@._V1_SY1000_CR0,0,675,1000_AL_.jpg", # noqa
"https://www.youtube.com/watch?v=7wd5KEaOtm4&ab_channel=UniversalPictures") # noqa
# Putting the movies to an array so fresh_tomatoes can display them
movies = [ironman,
limitless,
lego_movie,
summer_wars,
oceans_eleven,
scott_pilgrim]
# Create and open the movie page through fresh_tomatoes
fresh_tomatoes.open_movies_page(movies) | source/trailer_website.py | import media
import fresh_tomatoes
# Movies to be shown in the page
ironman = media.Movie("Ironman",
"After being held captive in an Afghan cave, billionaire engineer <NAME> creates a unique weaponized suit of armor to fight evil.", # noqa
"https://images-na.ssl-images-amazon.com/images/M/MV5BMTczNTI2ODUwOF5BMl5BanBnXkFtZTcwMTU0NTIzMw@@._V1_SY1000_CR0,0,674,1000_AL_.jpg", # noqa
"https://www.youtube.com/watch?v=8hYlB38asDY&ab_channel=TheMovieChanneI") # noqa
limitless = media.Movie("Limitless",
"With the help of a mysterious pill that enables the user to access 100 percent of his brain abilities, a struggling writer becomes a financial wizard, but it also puts him in a new world with lots of dangers.", # noqa
"https://images-na.ssl-images-amazon.com/images/M/MV5BYmViZGM0MGItZTdiYi00ZDU4LWIxNDYtNTc1NWQ5Njc2N2YwXkEyXkFqcGdeQXVyNDk3NzU2MTQ@._V1_SY1000_CR0,0,692,1000_AL_.jpg", # noqa
"https://www.youtube.com/watch?v=vUkAfjfWh5g&ab_channel=eOnefilms") # noqa
lego_movie = media.Movie("The Lego Movie",
"An ordinary Lego construction worker, thought to be the prophesied 'Special', is recruited to join a quest to stop an evil tyrant from gluing the Lego universe into eternal stasis.", # noqa
"https://images-na.ssl-images-amazon.com/images/M/MV5BMTg4MDk1ODExN15BMl5BanBnXkFtZTgwNzIyNjg3MDE@._V1_SY1000_CR0,0,674,1000_AL_.jpg", # noqa
"hhttps://www.youtube.com/watch?v=fZ_JOBCLF-I&ab_channel=WarnerBros.Pictures") # noqa
summer_wars = media.Movie("Summer Wars",
"A student tries to fix a problem he accidentally caused in OZ, a digital world, while pretending to be the fiance of his friend at her grandmother's 90th birthday.", # noqa
"https://images-na.ssl-images-amazon.com/images/M/MV5BMTYyOTk3OTQzM15BMl5BanBnXkFtZTcwMjU4NDYyNA@@._V1_SY1000_CR0,0,681,1000_AL_.jpg", # noqa
"https://www.youtube.com/watch?v=HjLE8BmWfKA&ab_channel=AwesomenessDreams") # noqa
oceans_eleven = media.Movie("Ocean's Eleven",
"<NAME> and his eleven accomplices plan to rob three Las Vegas casinos simultaneously.", # noqa
"https://images-na.ssl-images-amazon.com/images/M/MV5BYzVmYzVkMmUtOGRhMi00MTNmLThlMmUtZTljYjlkMjNkMjJkXkEyXkFqcGdeQXVyNDk3NzU2MTQ@._V1_SY1000_CR0,0,675,1000_AL_.jpg", # noqa
"https://www.youtube.com/watch?v=imm6OR605UI&ab_channel=MovieStation") # noqa
scott_pilgrim = media.Movie("Scott Pilgrim vs. the World",
"<NAME> must defeat his new girlfriend's seven evil exes in order to win her heart.", # noqa
"https://images-na.ssl-images-amazon.com/images/M/MV5BMTkwNTczNTMyOF5BMl5BanBnXkFtZTcwNzUxOTUyMw@@._V1_SY1000_CR0,0,675,1000_AL_.jpg", # noqa
"https://www.youtube.com/watch?v=7wd5KEaOtm4&ab_channel=UniversalPictures") # noqa
# Putting the movies to an array so fresh_tomatoes can display them
movies = [ironman,
limitless,
lego_movie,
summer_wars,
oceans_eleven,
scott_pilgrim]
# Create and open the movie page through fresh_tomatoes
fresh_tomatoes.open_movies_page(movies) | 0.276691 | 0.330147 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_healthchecks_http_probe_result_facts
short_description: Fetches details about one or multiple HttpProbeResult resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple HttpProbeResult resources in Oracle Cloud Infrastructure
- Gets the HTTP probe results for the specified probe or monitor, where
the `probeConfigurationId` is the OCID of either a monitor or an
on-demand probe.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
probe_configuration_id:
description:
- The OCID of a monitor or on-demand probe.
type: str
required: true
start_time_greater_than_or_equal_to:
description:
- Returns results with a `startTime` equal to or greater than the specified value.
type: float
start_time_less_than_or_equal_to:
description:
- Returns results with a `startTime` equal to or less than the specified value.
type: float
sort_order:
description:
- Controls the sort order of results.
type: str
choices:
- "ASC"
- "DESC"
target:
description:
- Filters results that match the `target`.
type: str
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List http_probe_results
oci_healthchecks_http_probe_result_facts:
# required
probe_configuration_id: "ocid1.probeconfiguration.oc1..xxxxxxEXAMPLExxxxxx"
# optional
start_time_greater_than_or_equal_to: 3.4
start_time_less_than_or_equal_to: 3.4
sort_order: ASC
target: target_example
"""
RETURN = """
http_probe_results:
description:
- List of HttpProbeResult resources
returned: on success
type: complex
contains:
key:
description:
- A value identifying this specific probe result. The key is only unique within
the results of its probe configuration. The key may be reused after 90 days.
returned: on success
type: str
sample: key_example
probe_configuration_id:
description:
- The OCID of the monitor or on-demand probe responsible for creating this result.
returned: on success
type: str
sample: "ocid1.probeconfiguration.oc1..xxxxxxEXAMPLExxxxxx"
start_time:
description:
- The date and time the probe was executed, expressed in milliseconds since the
POSIX epoch. This field is defined by the PerformanceResourceTiming interface
of the W3C Resource Timing specification. For more information, see
L(Resource Timing,https://w3c.github.io/resource-timing/#sec-resource-timing).
returned: on success
type: float
sample: 1.2
target:
description:
- The target hostname or IP address of the probe.
returned: on success
type: str
sample: target_example
vantage_point_name:
description:
- The name of the vantage point that executed the probe.
returned: on success
type: str
sample: vantage_point_name_example
is_timed_out:
description:
- True if the probe did not complete before the configured `timeoutInSeconds` value.
returned: on success
type: bool
sample: true
is_healthy:
description:
- True if the probe result is determined to be healthy based on probe
type-specific criteria. For HTTP probes, a probe result is considered
healthy if the HTTP response code is greater than or equal to 200 and
less than 300.
returned: on success
type: bool
sample: true
error_category:
description:
- "The category of error if an error occurs executing the probe.
The `errorMessage` field provides a message with the error details.
* NONE - No error
* DNS - DNS errors
* TRANSPORT - Transport-related errors, for example a \\"TLS certificate expired\\" error.
* NETWORK - Network-related errors, for example a \\"network unreachable\\" error.
* SYSTEM - Internal system errors."
returned: on success
type: str
sample: NONE
error_message:
description:
- The error information indicating why a probe execution failed.
returned: on success
type: str
sample: error_message_example
protocol:
description:
- ""
returned: on success
type: str
sample: HTTP
connection:
description:
- ""
returned: on success
type: complex
contains:
address:
description:
- The connection IP address.
returned: on success
type: str
sample: address_example
port:
description:
- The port.
returned: on success
type: int
sample: 56
connect_duration:
description:
- Total connect duration, calculated using `connectEnd` minus `connectStart`.
returned: on success
type: float
sample: 1.2
secure_connect_duration:
description:
- The duration to secure the connection. This value will be zero for
insecure connections. Calculated using `connectEnd` minus `secureConnectionStart`.
returned: on success
type: float
sample: 1.2
dns:
description:
- ""
returned: on success
type: complex
contains:
domain_lookup_duration:
description:
- Total DNS resolution duration, in milliseconds. Calculated using `domainLookupEnd`
minus `domainLookupStart`.
returned: on success
type: float
sample: 1.2
addresses:
description:
- The addresses returned by DNS resolution.
returned: on success
type: list
sample: []
status_code:
description:
- The HTTP response status code.
returned: on success
type: int
sample: 56
domain_lookup_start:
description:
- The time immediately before the vantage point starts the domain name lookup for
the resource.
returned: on success
type: float
sample: 1.2
domain_lookup_end:
description:
- The time immediately before the vantage point finishes the domain name lookup for
the resource.
returned: on success
type: float
sample: 1.2
connect_start:
description:
- The time immediately before the vantage point starts establishing the connection
to the server to retrieve the resource.
returned: on success
type: float
sample: 1.2
secure_connection_start:
description:
- The time immediately before the vantage point starts the handshake process to
secure the current connection.
returned: on success
type: float
sample: 1.2
connect_end:
description:
- The time immediately after the vantage point finishes establishing the connection
to the server to retrieve the resource.
returned: on success
type: float
sample: 1.2
fetch_start:
description:
- The time immediately before the vantage point starts to fetch the resource.
returned: on success
type: float
sample: 1.2
request_start:
description:
- The time immediately before the vantage point starts requesting the resource from
the server.
returned: on success
type: float
sample: 1.2
response_start:
description:
- The time immediately after the vantage point's HTTP parser receives the first byte
of the response.
returned: on success
type: float
sample: 1.2
response_end:
description:
- The time immediately after the vantage point receives the last byte of the response
or immediately before the transport connection is closed, whichever comes first.
returned: on success
type: float
sample: 1.2
duration:
description:
- The total duration from start of request until response is fully consumed or the
connection is closed.
returned: on success
type: float
sample: 1.2
encoded_body_size:
description:
- The size, in octets, of the payload body prior to removing any applied
content-codings.
returned: on success
type: int
sample: 56
sample: [{
"key": "key_example",
"probe_configuration_id": "ocid1.probeconfiguration.oc1..xxxxxxEXAMPLExxxxxx",
"start_time": 1.2,
"target": "target_example",
"vantage_point_name": "vantage_point_name_example",
"is_timed_out": true,
"is_healthy": true,
"error_category": "NONE",
"error_message": "error_message_example",
"protocol": "HTTP",
"connection": {
"address": "address_example",
"port": 56,
"connect_duration": 1.2,
"secure_connect_duration": 1.2
},
"dns": {
"domain_lookup_duration": 1.2,
"addresses": []
},
"status_code": 56,
"domain_lookup_start": 1.2,
"domain_lookup_end": 1.2,
"connect_start": 1.2,
"secure_connection_start": 1.2,
"connect_end": 1.2,
"fetch_start": 1.2,
"request_start": 1.2,
"response_start": 1.2,
"response_end": 1.2,
"duration": 1.2,
"encoded_body_size": 56
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.healthchecks import HealthChecksClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class HttpProbeResultFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return [
"probe_configuration_id",
]
def list_resources(self):
optional_list_method_params = [
"start_time_greater_than_or_equal_to",
"start_time_less_than_or_equal_to",
"sort_order",
"target",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_http_probe_results,
probe_configuration_id=self.module.params.get("probe_configuration_id"),
**optional_kwargs
)
HttpProbeResultFactsHelperCustom = get_custom_class("HttpProbeResultFactsHelperCustom")
class ResourceFactsHelper(
HttpProbeResultFactsHelperCustom, HttpProbeResultFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
probe_configuration_id=dict(type="str", required=True),
start_time_greater_than_or_equal_to=dict(type="float"),
start_time_less_than_or_equal_to=dict(type="float"),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
target=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="http_probe_result",
service_client_class=HealthChecksClient,
namespace="healthchecks",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(http_probe_results=result)
if __name__ == "__main__":
main() | plugins/modules/oci_healthchecks_http_probe_result_facts.py |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_healthchecks_http_probe_result_facts
short_description: Fetches details about one or multiple HttpProbeResult resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple HttpProbeResult resources in Oracle Cloud Infrastructure
- Gets the HTTP probe results for the specified probe or monitor, where
the `probeConfigurationId` is the OCID of either a monitor or an
on-demand probe.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
probe_configuration_id:
description:
- The OCID of a monitor or on-demand probe.
type: str
required: true
start_time_greater_than_or_equal_to:
description:
- Returns results with a `startTime` equal to or greater than the specified value.
type: float
start_time_less_than_or_equal_to:
description:
- Returns results with a `startTime` equal to or less than the specified value.
type: float
sort_order:
description:
- Controls the sort order of results.
type: str
choices:
- "ASC"
- "DESC"
target:
description:
- Filters results that match the `target`.
type: str
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List http_probe_results
oci_healthchecks_http_probe_result_facts:
# required
probe_configuration_id: "ocid1.probeconfiguration.oc1..xxxxxxEXAMPLExxxxxx"
# optional
start_time_greater_than_or_equal_to: 3.4
start_time_less_than_or_equal_to: 3.4
sort_order: ASC
target: target_example
"""
RETURN = """
http_probe_results:
description:
- List of HttpProbeResult resources
returned: on success
type: complex
contains:
key:
description:
- A value identifying this specific probe result. The key is only unique within
the results of its probe configuration. The key may be reused after 90 days.
returned: on success
type: str
sample: key_example
probe_configuration_id:
description:
- The OCID of the monitor or on-demand probe responsible for creating this result.
returned: on success
type: str
sample: "ocid1.probeconfiguration.oc1..xxxxxxEXAMPLExxxxxx"
start_time:
description:
- The date and time the probe was executed, expressed in milliseconds since the
POSIX epoch. This field is defined by the PerformanceResourceTiming interface
of the W3C Resource Timing specification. For more information, see
L(Resource Timing,https://w3c.github.io/resource-timing/#sec-resource-timing).
returned: on success
type: float
sample: 1.2
target:
description:
- The target hostname or IP address of the probe.
returned: on success
type: str
sample: target_example
vantage_point_name:
description:
- The name of the vantage point that executed the probe.
returned: on success
type: str
sample: vantage_point_name_example
is_timed_out:
description:
- True if the probe did not complete before the configured `timeoutInSeconds` value.
returned: on success
type: bool
sample: true
is_healthy:
description:
- True if the probe result is determined to be healthy based on probe
type-specific criteria. For HTTP probes, a probe result is considered
healthy if the HTTP response code is greater than or equal to 200 and
less than 300.
returned: on success
type: bool
sample: true
error_category:
description:
- "The category of error if an error occurs executing the probe.
The `errorMessage` field provides a message with the error details.
* NONE - No error
* DNS - DNS errors
* TRANSPORT - Transport-related errors, for example a \\"TLS certificate expired\\" error.
* NETWORK - Network-related errors, for example a \\"network unreachable\\" error.
* SYSTEM - Internal system errors."
returned: on success
type: str
sample: NONE
error_message:
description:
- The error information indicating why a probe execution failed.
returned: on success
type: str
sample: error_message_example
protocol:
description:
- ""
returned: on success
type: str
sample: HTTP
connection:
description:
- ""
returned: on success
type: complex
contains:
address:
description:
- The connection IP address.
returned: on success
type: str
sample: address_example
port:
description:
- The port.
returned: on success
type: int
sample: 56
connect_duration:
description:
- Total connect duration, calculated using `connectEnd` minus `connectStart`.
returned: on success
type: float
sample: 1.2
secure_connect_duration:
description:
- The duration to secure the connection. This value will be zero for
insecure connections. Calculated using `connectEnd` minus `secureConnectionStart`.
returned: on success
type: float
sample: 1.2
dns:
description:
- ""
returned: on success
type: complex
contains:
domain_lookup_duration:
description:
- Total DNS resolution duration, in milliseconds. Calculated using `domainLookupEnd`
minus `domainLookupStart`.
returned: on success
type: float
sample: 1.2
addresses:
description:
- The addresses returned by DNS resolution.
returned: on success
type: list
sample: []
status_code:
description:
- The HTTP response status code.
returned: on success
type: int
sample: 56
domain_lookup_start:
description:
- The time immediately before the vantage point starts the domain name lookup for
the resource.
returned: on success
type: float
sample: 1.2
domain_lookup_end:
description:
- The time immediately before the vantage point finishes the domain name lookup for
the resource.
returned: on success
type: float
sample: 1.2
connect_start:
description:
- The time immediately before the vantage point starts establishing the connection
to the server to retrieve the resource.
returned: on success
type: float
sample: 1.2
secure_connection_start:
description:
- The time immediately before the vantage point starts the handshake process to
secure the current connection.
returned: on success
type: float
sample: 1.2
connect_end:
description:
- The time immediately after the vantage point finishes establishing the connection
to the server to retrieve the resource.
returned: on success
type: float
sample: 1.2
fetch_start:
description:
- The time immediately before the vantage point starts to fetch the resource.
returned: on success
type: float
sample: 1.2
request_start:
description:
- The time immediately before the vantage point starts requesting the resource from
the server.
returned: on success
type: float
sample: 1.2
response_start:
description:
- The time immediately after the vantage point's HTTP parser receives the first byte
of the response.
returned: on success
type: float
sample: 1.2
response_end:
description:
- The time immediately after the vantage point receives the last byte of the response
or immediately before the transport connection is closed, whichever comes first.
returned: on success
type: float
sample: 1.2
duration:
description:
- The total duration from start of request until response is fully consumed or the
connection is closed.
returned: on success
type: float
sample: 1.2
encoded_body_size:
description:
- The size, in octets, of the payload body prior to removing any applied
content-codings.
returned: on success
type: int
sample: 56
sample: [{
"key": "key_example",
"probe_configuration_id": "ocid1.probeconfiguration.oc1..xxxxxxEXAMPLExxxxxx",
"start_time": 1.2,
"target": "target_example",
"vantage_point_name": "vantage_point_name_example",
"is_timed_out": true,
"is_healthy": true,
"error_category": "NONE",
"error_message": "error_message_example",
"protocol": "HTTP",
"connection": {
"address": "address_example",
"port": 56,
"connect_duration": 1.2,
"secure_connect_duration": 1.2
},
"dns": {
"domain_lookup_duration": 1.2,
"addresses": []
},
"status_code": 56,
"domain_lookup_start": 1.2,
"domain_lookup_end": 1.2,
"connect_start": 1.2,
"secure_connection_start": 1.2,
"connect_end": 1.2,
"fetch_start": 1.2,
"request_start": 1.2,
"response_start": 1.2,
"response_end": 1.2,
"duration": 1.2,
"encoded_body_size": 56
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.healthchecks import HealthChecksClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class HttpProbeResultFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return [
"probe_configuration_id",
]
def list_resources(self):
optional_list_method_params = [
"start_time_greater_than_or_equal_to",
"start_time_less_than_or_equal_to",
"sort_order",
"target",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_http_probe_results,
probe_configuration_id=self.module.params.get("probe_configuration_id"),
**optional_kwargs
)
HttpProbeResultFactsHelperCustom = get_custom_class("HttpProbeResultFactsHelperCustom")
class ResourceFactsHelper(
HttpProbeResultFactsHelperCustom, HttpProbeResultFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
probe_configuration_id=dict(type="str", required=True),
start_time_greater_than_or_equal_to=dict(type="float"),
start_time_less_than_or_equal_to=dict(type="float"),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
target=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="http_probe_result",
service_client_class=HealthChecksClient,
namespace="healthchecks",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(http_probe_results=result)
if __name__ == "__main__":
main() | 0.92763 | 0.358887 |
import os
import argparse
import numpy as np
from comet_ml import Experiment
import keras.backend as K
from keras.callbacks import ModelCheckpoint, TerminateOnNaN
from keras.optimizers import Adam
from model import create_model
from losses.perceptual_loss import perceptual_loss
from dataset import TrainDatasetSequence, TestDatasetSequence
from callbacks.log_images import LogImages
from utils import relative_path, file_listing
def psnr_metric(max_pixel=1.0):
'''
Computes PSNR metric
Args:
max_pixel: Max value pixel can take on. We scale inputs to (0, 1
Note: 2.303 is natural and log10 conversion factor
'''
def psnr(y_true, y_pred):
return (10.0 * K.log((max_pixel ** 2) / (K.mean(K.square(y_pred - y_true), axis=-1)))) / 2.303
return psnr
def train(model, args, experiment):
output_shape = (args.input_h * args.scale, args.input_w * args.scale, 3)
train_seq = TrainDatasetSequence(args.train_dataset,
batch_size=args.batch_size,
input_size=(args.input_w, args.input_h),
scale=args.scale)
test_seq = TestDatasetSequence(args.test_dataset,
batch_size=args.batch_size,
input_size=(args.input_w, args.input_h),
scale=args.scale)
model.compile(optimizer=Adam(lr=3e-4),
loss=perceptual_loss(output_shape),
metrics=['mse', psnr_metric()])
model.summary()
if args.weights:
model.load_weights(args.weights)
callbacks = [
ModelCheckpoint(
args.model_save_path + 'sr_{epoch:02d}_{val_loss:.3f}.h5',
save_weights_only=True,
verbose=1),
TerminateOnNaN(),
LogImages(
experiment,
paths=file_listing(args.validation_path),
input_size=(args.input_w, args.input_h),
scale=args.scale,
log_iters=500)
]
model.fit_generator(
train_seq,
epochs=args.epochs,
validation_data=test_seq,
use_multiprocessing=True,
workers=8,
callbacks=callbacks)
if __name__ == '__main__':
# Command line arguments parsing
parser = argparse.ArgumentParser(description='Train a colorization deep learning model')
parser.add_argument('--train-dataset',
type=str,
default=relative_path('../data/imagenet-sample/train/'),
help='Train dataset base path. Folder should contain subfolder for each class.')
parser.add_argument('--test-dataset',
type=str,
default=relative_path('../data/imagenet-sample/test/'),
help='Test dataset base path. Folder should contain images directly.')
parser.add_argument('--validation-path',
type=str,
default=relative_path('../data/imagenet-sample/val/'),
help='Path to directory with validation images that will be uploaded to comet after each epoch')
parser.add_argument('--batch-size',
type=int,
default=32,
help='Batch size used during training')
parser.add_argument('--input-w',
type=int,
default=128,
help='Image width')
parser.add_argument('--input-h',
type=int,
default=128,
help='Image height')
parser.add_argument('--scale',
type=int,
default=3,
help='Target img size / input img size scale factor')
parser.add_argument('--weights',
type=str,
help='Model weights')
parser.add_argument('--epochs',
type=int,
default=100,
help='Number of epochs')
parser.add_argument('--model-save-path',
type=str,
default=relative_path('../model/'),
help='Base directory to save model during training')
args = parser.parse_args()
# CometML experiment
experiment = Experiment(api_key=os.getenv('COMET_API_KEY'),
project_name=os.getenv('COMET_PROJECTNAME'),
workspace=os.getenv('COMET_WORKSPACE'))
# Train
model = create_model((args.input_h, args.input_w, 3), args.scale)
train(model, args, experiment) | src/train.py | import os
import argparse
import numpy as np
from comet_ml import Experiment
import keras.backend as K
from keras.callbacks import ModelCheckpoint, TerminateOnNaN
from keras.optimizers import Adam
from model import create_model
from losses.perceptual_loss import perceptual_loss
from dataset import TrainDatasetSequence, TestDatasetSequence
from callbacks.log_images import LogImages
from utils import relative_path, file_listing
def psnr_metric(max_pixel=1.0):
'''
Computes PSNR metric
Args:
max_pixel: Max value pixel can take on. We scale inputs to (0, 1
Note: 2.303 is natural and log10 conversion factor
'''
def psnr(y_true, y_pred):
return (10.0 * K.log((max_pixel ** 2) / (K.mean(K.square(y_pred - y_true), axis=-1)))) / 2.303
return psnr
def train(model, args, experiment):
output_shape = (args.input_h * args.scale, args.input_w * args.scale, 3)
train_seq = TrainDatasetSequence(args.train_dataset,
batch_size=args.batch_size,
input_size=(args.input_w, args.input_h),
scale=args.scale)
test_seq = TestDatasetSequence(args.test_dataset,
batch_size=args.batch_size,
input_size=(args.input_w, args.input_h),
scale=args.scale)
model.compile(optimizer=Adam(lr=3e-4),
loss=perceptual_loss(output_shape),
metrics=['mse', psnr_metric()])
model.summary()
if args.weights:
model.load_weights(args.weights)
callbacks = [
ModelCheckpoint(
args.model_save_path + 'sr_{epoch:02d}_{val_loss:.3f}.h5',
save_weights_only=True,
verbose=1),
TerminateOnNaN(),
LogImages(
experiment,
paths=file_listing(args.validation_path),
input_size=(args.input_w, args.input_h),
scale=args.scale,
log_iters=500)
]
model.fit_generator(
train_seq,
epochs=args.epochs,
validation_data=test_seq,
use_multiprocessing=True,
workers=8,
callbacks=callbacks)
if __name__ == '__main__':
# Command line arguments parsing
parser = argparse.ArgumentParser(description='Train a colorization deep learning model')
parser.add_argument('--train-dataset',
type=str,
default=relative_path('../data/imagenet-sample/train/'),
help='Train dataset base path. Folder should contain subfolder for each class.')
parser.add_argument('--test-dataset',
type=str,
default=relative_path('../data/imagenet-sample/test/'),
help='Test dataset base path. Folder should contain images directly.')
parser.add_argument('--validation-path',
type=str,
default=relative_path('../data/imagenet-sample/val/'),
help='Path to directory with validation images that will be uploaded to comet after each epoch')
parser.add_argument('--batch-size',
type=int,
default=32,
help='Batch size used during training')
parser.add_argument('--input-w',
type=int,
default=128,
help='Image width')
parser.add_argument('--input-h',
type=int,
default=128,
help='Image height')
parser.add_argument('--scale',
type=int,
default=3,
help='Target img size / input img size scale factor')
parser.add_argument('--weights',
type=str,
help='Model weights')
parser.add_argument('--epochs',
type=int,
default=100,
help='Number of epochs')
parser.add_argument('--model-save-path',
type=str,
default=relative_path('../model/'),
help='Base directory to save model during training')
args = parser.parse_args()
# CometML experiment
experiment = Experiment(api_key=os.getenv('COMET_API_KEY'),
project_name=os.getenv('COMET_PROJECTNAME'),
workspace=os.getenv('COMET_WORKSPACE'))
# Train
model = create_model((args.input_h, args.input_w, 3), args.scale)
train(model, args, experiment) | 0.803367 | 0.221256 |
import time
import xpedite.util
import logging
from xpedite.report.env import EnvReportBuilder
from xpedite.report.reportbuilder import ReportBuilder
LOGGER = logging.getLogger(__name__)
class Report(object):
"""Class to store detailed report for a profiling session"""
class Markup(object):
"""Class to store detailed latency statistics for txns with a common route"""
def __init__(self, name, title, description, content):
"""Constructs object to store markup for profile reports"""
self.name = name
self.title = title
self.description = description
self.content = content
class Category(object):
"""Class to store a histogram and reports for a category of txns"""
def __init__(self, name, histogram):
"""Constructs a container object to hold histogram and reports for routes in a category"""
self.name = name
self.histogram = histogram
self.routes = []
def addRoute(self, name, title, description, content):
"""Adds a markup with detailed latency statistics"""
self.routes.append(Report.Markup(name, title, description, content))
def __init__(self, app, profiles, envReport, categories):
"""Constructs object to hold profile data and reports for a profiling session"""
self.app = app
self.profiles = profiles
self.envReport = envReport
self.categories = categories
@property
def runId(self):
"""Unique run id for this report"""
return self.app.runId
def makeBenchmark(self, path):
"""
Persists samples for current run in the given path for future benchmarking
:param path: Path to persist profiles for the current session
"""
return self.profiles.makeBenchmark(path)
def generateEnvironmentReport(app, repo, resultOrder, classifier, txnFilter, benchmarkPaths):
"""
Generates report with environment details
:param app: an instance of xpedite app, to interact with target application
:param repo: Repository of loaded transactions
:param resultOrder: Sort order of transactions in latency constituent reports
:param classifier: Predicate to classify transactions into different categories
:param txnFilter: Lambda to filter transactions prior to report generation
:param benchmarkPaths: List of stored reports from previous runs, for benchmarking
"""
markup = EnvReportBuilder().buildEnvironmentReportFile(app, repo, resultOrder, classifier, txnFilter, benchmarkPaths)
if markup:
description = """
Test environment report (cpu clock frequency, kernel configuration etc.)
"""
title = 'Test Environment Report'
return Report.Markup(title, title, description, markup)
return None
def generate(app, profiles, histograms, resultOrder, classifier, txnFilter, benchmarkPaths, reportThreshold):
"""
Generates latency breakup reports for a list of profiles
:param app: an instance of xpedite app, to interact with target application
:param profiles: Profile data for the current profile session
:param histograms: Latency distribuion histograms for each category/route combination
:param resultOrder: Sort order of transactions in latency constituent reports
:param classifier: Predicate to classify transactions into different categories
:param txnFilter: Lambda to filter transactions prior to report generation
:param benchmarkPaths: List of stored reports from previous runs, for benchmarking
:param reportThreshold: Threshold for number of transactions rendered in html reports.
"""
envReport = generateEnvironmentReport(app, profiles.transactionRepo, resultOrder, classifier,
txnFilter, benchmarkPaths)
categories = {name : Report.Category(name, histogram) for name, histogram in histograms.iteritems()}
for profile in profiles:
category = categories.get(profile.category, None)
if category:
begin = time.time()
title = '{} latency statistics [{} transactions]'.format(profile.name, len(profile.current))
LOGGER.info('generating report %s -> ', title)
markup = ReportBuilder().buildReport(profile.current, profile.benchmarks, profile.reportProbes,
profile.name, resultOrder, reportThreshold)
markupSize = xpedite.util.formatHumanReadable(len(markup))
title = '{} - ({})'.format(title, markupSize)
description = '\n\t{}\n\t'.format(title)
elapsed = time.time() - begin
LOGGER.completed('completed %s in %0.2f sec.', markupSize, elapsed)
category.addRoute(profile.name, title, description, markup)
return Report(app, profiles, envReport, categories) | scripts/lib/xpedite/report/__init__.py | import time
import xpedite.util
import logging
from xpedite.report.env import EnvReportBuilder
from xpedite.report.reportbuilder import ReportBuilder
LOGGER = logging.getLogger(__name__)
class Report(object):
"""Class to store detailed report for a profiling session"""
class Markup(object):
"""Class to store detailed latency statistics for txns with a common route"""
def __init__(self, name, title, description, content):
"""Constructs object to store markup for profile reports"""
self.name = name
self.title = title
self.description = description
self.content = content
class Category(object):
"""Class to store a histogram and reports for a category of txns"""
def __init__(self, name, histogram):
"""Constructs a container object to hold histogram and reports for routes in a category"""
self.name = name
self.histogram = histogram
self.routes = []
def addRoute(self, name, title, description, content):
"""Adds a markup with detailed latency statistics"""
self.routes.append(Report.Markup(name, title, description, content))
def __init__(self, app, profiles, envReport, categories):
"""Constructs object to hold profile data and reports for a profiling session"""
self.app = app
self.profiles = profiles
self.envReport = envReport
self.categories = categories
@property
def runId(self):
"""Unique run id for this report"""
return self.app.runId
def makeBenchmark(self, path):
"""
Persists samples for current run in the given path for future benchmarking
:param path: Path to persist profiles for the current session
"""
return self.profiles.makeBenchmark(path)
def generateEnvironmentReport(app, repo, resultOrder, classifier, txnFilter, benchmarkPaths):
"""
Generates report with environment details
:param app: an instance of xpedite app, to interact with target application
:param repo: Repository of loaded transactions
:param resultOrder: Sort order of transactions in latency constituent reports
:param classifier: Predicate to classify transactions into different categories
:param txnFilter: Lambda to filter transactions prior to report generation
:param benchmarkPaths: List of stored reports from previous runs, for benchmarking
"""
markup = EnvReportBuilder().buildEnvironmentReportFile(app, repo, resultOrder, classifier, txnFilter, benchmarkPaths)
if markup:
description = """
Test environment report (cpu clock frequency, kernel configuration etc.)
"""
title = 'Test Environment Report'
return Report.Markup(title, title, description, markup)
return None
def generate(app, profiles, histograms, resultOrder, classifier, txnFilter, benchmarkPaths, reportThreshold):
"""
Generates latency breakup reports for a list of profiles
:param app: an instance of xpedite app, to interact with target application
:param profiles: Profile data for the current profile session
:param histograms: Latency distribuion histograms for each category/route combination
:param resultOrder: Sort order of transactions in latency constituent reports
:param classifier: Predicate to classify transactions into different categories
:param txnFilter: Lambda to filter transactions prior to report generation
:param benchmarkPaths: List of stored reports from previous runs, for benchmarking
:param reportThreshold: Threshold for number of transactions rendered in html reports.
"""
envReport = generateEnvironmentReport(app, profiles.transactionRepo, resultOrder, classifier,
txnFilter, benchmarkPaths)
categories = {name : Report.Category(name, histogram) for name, histogram in histograms.iteritems()}
for profile in profiles:
category = categories.get(profile.category, None)
if category:
begin = time.time()
title = '{} latency statistics [{} transactions]'.format(profile.name, len(profile.current))
LOGGER.info('generating report %s -> ', title)
markup = ReportBuilder().buildReport(profile.current, profile.benchmarks, profile.reportProbes,
profile.name, resultOrder, reportThreshold)
markupSize = xpedite.util.formatHumanReadable(len(markup))
title = '{} - ({})'.format(title, markupSize)
description = '\n\t{}\n\t'.format(title)
elapsed = time.time() - begin
LOGGER.completed('completed %s in %0.2f sec.', markupSize, elapsed)
category.addRoute(profile.name, title, description, markup)
return Report(app, profiles, envReport, categories) | 0.867247 | 0.203906 |
from uuid import uuid4
import requests
from contextlib import ExitStack
from gzip import GzipFile
from io import TextIOWrapper
import json
import flux
import pytest
from .utils import raises_not_found
def test_add_error(error_container, error_data, webapp):
timestamp = flux.current_timeline.time()
error_container.add_error(error_data['exception'],
error_data['exception_type'],
error_data['traceback'],
timestamp=timestamp)
error_container.refresh()
[first_error] = error_container.query_errors()
assert first_error.message == error_data['exception']
assert first_error.exception_type == error_data['exception_type']
assert first_error.timestamp == timestamp
assert first_error.traceback is None
resp = requests.get(webapp.url.add_path(first_error.traceback_url))
resp.raise_for_status()
assert resp.json()['traceback'] == error_data['traceback']
def test_add_error_just_msg(error_container):
error_container.add_error('msg')
[err] = error_container.refresh().query_errors()
assert err.message == 'msg'
assert not err.is_failure
def test_add_failure_just_msg(error_container):
error_container.add_failure('msg')
[err] = error_container.refresh().query_errors()
assert err.message == 'msg'
assert err.is_failure
def test_add_failure_num_failures(error_container):
error_container.add_failure('F')
error_container.report_end()
assert error_container.refresh().num_failures == 1
assert error_container.refresh().num_errors == 0
def test_add_failure_status(error_container):
error_container.add_failure('F')
error_container.report_end()
assert error_container.refresh().status == 'FAILURE'
def test_add_error_num_errors(error_container):
error_container.add_error('E')
error_container.report_end()
assert error_container.refresh().num_failures == 0
assert error_container.refresh().num_errors == 1
def test_add_error_status(error_container):
error_container.add_error('E')
error_container.report_end()
assert error_container.refresh().status == 'ERROR'
def test_add_error_no_timestamp(error_container, error_data, webapp):
error_container.add_error(error_data['exception'],
error_data['exception_type'],
error_data['traceback'])
error_container.refresh()
[first_error] = error_container.query_errors()
assert first_error.message == error_data['exception']
assert first_error.exception_type == error_data['exception_type']
assert first_error.timestamp == flux.current_timeline.time()
assert requests.get(webapp.url.add_path(first_error.traceback_url)).json()['traceback'] == error_data['traceback']
def test_add_error_nonexistent(nonexistent_error_container, error_data):
with raises_not_found():
nonexistent_error_container.add_error(error_data['exception'],
error_data['exception_type'],
error_data['traceback'])
def test_add_error_stream_upload_traceback(error_container, traceback_file, error_data, compress_traceback_file):
error = error_container.add_error(error_data['exception'], error_data['exception_type'], traceback=error_data['traceback'])
assert error.id
error_container.client.api.session.put(error.api_url.add_path('traceback'), data=traceback_file)
url = error.refresh().traceback_url
url = error_container.client.api.url.add_path(url)
traceback_file.seek(0)
got_contents = error_container.client.api.session.get(url).content
if compress_traceback_file:
assert got_contents == GzipFile(fileobj=traceback_file).read()
else:
assert got_contents == traceback_file.read()
def test_add_error_upload_not_allowed_twice(error_container, error_data, traceback_file):
error = error_container.add_error(error_data['exception'], error_data['exception_type'], error_data['traceback'])
assert error.traceback_url
traceback_url = error.api_url.add_path('traceback')
resp = error_container.client.api.session.put(traceback_url, data=traceback_file)
assert resp.status_code == requests.codes.conflict
@pytest.fixture
def traceback_file(error_data, tmpdir, request, compress_traceback_file):
assert error_data['traceback']
path = tmpdir.join(str(uuid4()))
with ExitStack() as stack:
f = stack.enter_context(path.open('wb'))
if compress_traceback_file:
f = stack.enter_context(GzipFile(fileobj=f))
f = stack.enter_context(TextIOWrapper(f))
json.dump({'traceback': error_data['traceback'], 'exception': {'attributes': None}}, f)
returned = path.open('rb')
request.addfinalizer(returned.close)
return returned
@pytest.fixture(params=[True, False])
def compress_traceback_file(request):
return request.param | tests/test_add_errors.py | from uuid import uuid4
import requests
from contextlib import ExitStack
from gzip import GzipFile
from io import TextIOWrapper
import json
import flux
import pytest
from .utils import raises_not_found
def test_add_error(error_container, error_data, webapp):
timestamp = flux.current_timeline.time()
error_container.add_error(error_data['exception'],
error_data['exception_type'],
error_data['traceback'],
timestamp=timestamp)
error_container.refresh()
[first_error] = error_container.query_errors()
assert first_error.message == error_data['exception']
assert first_error.exception_type == error_data['exception_type']
assert first_error.timestamp == timestamp
assert first_error.traceback is None
resp = requests.get(webapp.url.add_path(first_error.traceback_url))
resp.raise_for_status()
assert resp.json()['traceback'] == error_data['traceback']
def test_add_error_just_msg(error_container):
error_container.add_error('msg')
[err] = error_container.refresh().query_errors()
assert err.message == 'msg'
assert not err.is_failure
def test_add_failure_just_msg(error_container):
error_container.add_failure('msg')
[err] = error_container.refresh().query_errors()
assert err.message == 'msg'
assert err.is_failure
def test_add_failure_num_failures(error_container):
error_container.add_failure('F')
error_container.report_end()
assert error_container.refresh().num_failures == 1
assert error_container.refresh().num_errors == 0
def test_add_failure_status(error_container):
error_container.add_failure('F')
error_container.report_end()
assert error_container.refresh().status == 'FAILURE'
def test_add_error_num_errors(error_container):
error_container.add_error('E')
error_container.report_end()
assert error_container.refresh().num_failures == 0
assert error_container.refresh().num_errors == 1
def test_add_error_status(error_container):
error_container.add_error('E')
error_container.report_end()
assert error_container.refresh().status == 'ERROR'
def test_add_error_no_timestamp(error_container, error_data, webapp):
error_container.add_error(error_data['exception'],
error_data['exception_type'],
error_data['traceback'])
error_container.refresh()
[first_error] = error_container.query_errors()
assert first_error.message == error_data['exception']
assert first_error.exception_type == error_data['exception_type']
assert first_error.timestamp == flux.current_timeline.time()
assert requests.get(webapp.url.add_path(first_error.traceback_url)).json()['traceback'] == error_data['traceback']
def test_add_error_nonexistent(nonexistent_error_container, error_data):
with raises_not_found():
nonexistent_error_container.add_error(error_data['exception'],
error_data['exception_type'],
error_data['traceback'])
def test_add_error_stream_upload_traceback(error_container, traceback_file, error_data, compress_traceback_file):
error = error_container.add_error(error_data['exception'], error_data['exception_type'], traceback=error_data['traceback'])
assert error.id
error_container.client.api.session.put(error.api_url.add_path('traceback'), data=traceback_file)
url = error.refresh().traceback_url
url = error_container.client.api.url.add_path(url)
traceback_file.seek(0)
got_contents = error_container.client.api.session.get(url).content
if compress_traceback_file:
assert got_contents == GzipFile(fileobj=traceback_file).read()
else:
assert got_contents == traceback_file.read()
def test_add_error_upload_not_allowed_twice(error_container, error_data, traceback_file):
error = error_container.add_error(error_data['exception'], error_data['exception_type'], error_data['traceback'])
assert error.traceback_url
traceback_url = error.api_url.add_path('traceback')
resp = error_container.client.api.session.put(traceback_url, data=traceback_file)
assert resp.status_code == requests.codes.conflict
@pytest.fixture
def traceback_file(error_data, tmpdir, request, compress_traceback_file):
assert error_data['traceback']
path = tmpdir.join(str(uuid4()))
with ExitStack() as stack:
f = stack.enter_context(path.open('wb'))
if compress_traceback_file:
f = stack.enter_context(GzipFile(fileobj=f))
f = stack.enter_context(TextIOWrapper(f))
json.dump({'traceback': error_data['traceback'], 'exception': {'attributes': None}}, f)
returned = path.open('rb')
request.addfinalizer(returned.close)
return returned
@pytest.fixture(params=[True, False])
def compress_traceback_file(request):
return request.param | 0.304042 | 0.249893 |
from math import copysign
import pxng
from pxng.colors import *
from pxng.keys import *
def update(window: pxng.Window):
handle_input(window)
window.draw_grid(tint=DARK_GREY)
window.draw_text(10, 10, 'Text Rendering', tint=LIGHT_BLUE)
# Use the bitmap font as something interesting to scroll through
font: pxng.Font = window.context['font']
font_data = font.data
page_width = font.glyph_width * 2 # a character is 8 bytes and we can show 16 bytes
page_height = font.glyph_height * font.grid_height
page_count = font.grid_width // 2
line = window.context['line']
row_count = 0x20 # 32
column_count = 0x10 # 16
window.tint = LIGHT_GREEN
for row in range(row_count):
addr = line * column_count + row * column_count
addr %= 0x10000
row_txt = f'${addr:04X}'
for col in range(column_count):
# since we only show 16 columns we divide the bitmap into 8 pages
page = ((row + line) // page_height) % page_count
v = font_data[(row + line) % page_height, (col + page * page_width)]
row_txt += f' {v & 0xFF:02X}'
window.draw_text(15, 25 + row * 6, row_txt, scale=0.5)
text = 'Text Rotated 90 degrees'
window.draw_text(5, 215, text, scale=0.5, tint=LIGHT_ORANGE, angle=-90)
window.tint = LIGHT_AZURE
window.draw_text(230, 25, 'SPACE : toggle auto', scale=0.5)
window.draw_text(230, 31, 'UP : 1 line up', scale=0.5)
window.draw_text(230, 37, 'DOWN : 1 line down', scale=0.5)
window.draw_text(230, 43, 'PAGE UP : 1 page up', scale=0.5)
window.draw_text(230, 49, 'PAGE DOWN: 1 page down', scale=0.5)
window.draw_text(230, 55, 'HOME : go to top', scale=0.5)
window.draw_text(230, 61, 'END : go to end', scale=0.5)
text = f'Frames rendered: {window.context["frame"]}'
window.draw_text(10, 226, text, tint=LIGHT_YELLOW, scale=0.5)
text = 'Auto scrolling' if not window.context['paused'] else 'Manual scrolling'
window.draw_text(10, 232, text, tint=LIGHT_YELLOW, scale=0.5)
if not window.context['paused']:
window.context['frame'] += 1
window.context['line'] += 1
def handle_input(window):
if window.key_state(KEY_SPACE).pressed:
window.context['paused'] = not window.context['paused']
if window.key_state(KEY_Q).pressed:
window.close_window()
if window.key_state(KEY_UP).pressed:
window.context['line'] -= 1
if window.key_state(KEY_DOWN).pressed:
window.context['line'] += 1
if window.key_state(KEY_PAGE_UP).pressed:
window.context['line'] -= 0x20
if window.key_state(KEY_PAGE_DOWN).pressed:
window.context['line'] += 0x20
if window.key_state(KEY_HOME).pressed:
window.context['line'] = 0x000
if window.key_state(KEY_END).pressed:
window.context['line'] = 0xFE0
scroll = window.mouse.scroll_dy
if window.mouse.hover and scroll != 0:
window.context['line'] -= int(copysign(max(1.0, abs(scroll)), scroll))
if window.context['line'] < 0:
window.context['line'] = 0
if window.context['line'] > 0xFE0:
window.context['line'] = 0xFE0
if __name__ == "__main__":
window = pxng.Window(640, 480, 'PixelEngine', scale=2)
window.context['line'] = 0
window.context['frame'] = 0
window.context['paused'] = False
window.context['font'] = window.create_default_font()
window.set_update_handler(update)
window.start_event_loop() | examples/text_rendering.py | from math import copysign
import pxng
from pxng.colors import *
from pxng.keys import *
def update(window: pxng.Window):
handle_input(window)
window.draw_grid(tint=DARK_GREY)
window.draw_text(10, 10, 'Text Rendering', tint=LIGHT_BLUE)
# Use the bitmap font as something interesting to scroll through
font: pxng.Font = window.context['font']
font_data = font.data
page_width = font.glyph_width * 2 # a character is 8 bytes and we can show 16 bytes
page_height = font.glyph_height * font.grid_height
page_count = font.grid_width // 2
line = window.context['line']
row_count = 0x20 # 32
column_count = 0x10 # 16
window.tint = LIGHT_GREEN
for row in range(row_count):
addr = line * column_count + row * column_count
addr %= 0x10000
row_txt = f'${addr:04X}'
for col in range(column_count):
# since we only show 16 columns we divide the bitmap into 8 pages
page = ((row + line) // page_height) % page_count
v = font_data[(row + line) % page_height, (col + page * page_width)]
row_txt += f' {v & 0xFF:02X}'
window.draw_text(15, 25 + row * 6, row_txt, scale=0.5)
text = 'Text Rotated 90 degrees'
window.draw_text(5, 215, text, scale=0.5, tint=LIGHT_ORANGE, angle=-90)
window.tint = LIGHT_AZURE
window.draw_text(230, 25, 'SPACE : toggle auto', scale=0.5)
window.draw_text(230, 31, 'UP : 1 line up', scale=0.5)
window.draw_text(230, 37, 'DOWN : 1 line down', scale=0.5)
window.draw_text(230, 43, 'PAGE UP : 1 page up', scale=0.5)
window.draw_text(230, 49, 'PAGE DOWN: 1 page down', scale=0.5)
window.draw_text(230, 55, 'HOME : go to top', scale=0.5)
window.draw_text(230, 61, 'END : go to end', scale=0.5)
text = f'Frames rendered: {window.context["frame"]}'
window.draw_text(10, 226, text, tint=LIGHT_YELLOW, scale=0.5)
text = 'Auto scrolling' if not window.context['paused'] else 'Manual scrolling'
window.draw_text(10, 232, text, tint=LIGHT_YELLOW, scale=0.5)
if not window.context['paused']:
window.context['frame'] += 1
window.context['line'] += 1
def handle_input(window):
if window.key_state(KEY_SPACE).pressed:
window.context['paused'] = not window.context['paused']
if window.key_state(KEY_Q).pressed:
window.close_window()
if window.key_state(KEY_UP).pressed:
window.context['line'] -= 1
if window.key_state(KEY_DOWN).pressed:
window.context['line'] += 1
if window.key_state(KEY_PAGE_UP).pressed:
window.context['line'] -= 0x20
if window.key_state(KEY_PAGE_DOWN).pressed:
window.context['line'] += 0x20
if window.key_state(KEY_HOME).pressed:
window.context['line'] = 0x000
if window.key_state(KEY_END).pressed:
window.context['line'] = 0xFE0
scroll = window.mouse.scroll_dy
if window.mouse.hover and scroll != 0:
window.context['line'] -= int(copysign(max(1.0, abs(scroll)), scroll))
if window.context['line'] < 0:
window.context['line'] = 0
if window.context['line'] > 0xFE0:
window.context['line'] = 0xFE0
if __name__ == "__main__":
window = pxng.Window(640, 480, 'PixelEngine', scale=2)
window.context['line'] = 0
window.context['frame'] = 0
window.context['paused'] = False
window.context['font'] = window.create_default_font()
window.set_update_handler(update)
window.start_event_loop() | 0.617282 | 0.186706 |
# Выполнение задания Ultra Lite:
#1. Создадим два родительских класса: один класс - это 2D-классические геометрические фигуры, из которого затем,
# применив принцип полиморфизма, создадим два других класса - расчет периметра и расчет площади,
# второй родительский класс - это любое натуральное число, для которого осуществляется проверка является или оно простым,
# затем, применив принцип наследования, создадим новый класс, который проверяет является ли какое-либо расположение цифр в заданном числе простым числом.
class Formulas_2D: # создание класса Formulas_2D
'''
Домументрирование класса.
Класс Formulas_2D выводит по одному примеру из основных формул плоской геометрической фигуры,
объект класса - это фигура.
Для создания объекта класса Formulas_2D и вывода формул его периметра и площади необходимо указать один аргумент:
тип фигуры и применить к объекту метод вывода формул: .formulas()
Тип фигуры (аргумент type_figure) может быть один из: quadrat (квадрат), rectangle (прямоугольник),
parallelogram (параллелограмм), rhombus (ромб), trapezoid (трапеция), triangle (треугольник), circle (окружность).
'''
def __init__(self, type_figure): # встроенный метод класса, инициализация
self.figure = type_figure # аргумент, тип фигуры
if self.figure not in ['circle', 'quadrat', 'rectangle', 'parallelogram', 'triangle', 'trapezoid', 'rhombus']:
raise Exception ('Ошибка ввода аргументов') # если уловие True, то принудительно запустить исключение
def __str__(self): # встроенный метод класса, что выводить на экран когда для вывода подается сам объект
return f'Фигура {self.figure}'
def formulas(self): #метод класса, вывод формул площади и периметра фигуры
dict_formul = {'quadrat': 'P = 4 * a, S = a * a, где a - длина стороны, P - периметр, S - площадь',
'rectangle': 'P = 2*(a+b), S = a * b, где a,b - длины сторон, P - периметр, S - площадь',
'parallelogram': 'P = 2*(a+b), S = a(b) * h, где a,b - длины сторон, h - длина высоты опущенной на эту сторону, P - периметр, S - площадь',
'rhombus': 'P = 4 * a, S = a * h, где a - длина стороны, h - длина высоты опущенной на эту сторону, P - периметр, S - площадь',
'trapezoid': 'P = a+b+с+d, S = h*(a+b)/2, где a,b - длины оснований, h - длина высоты, P - периметр, S - площадь',
'triangle': 'P = a+b+с, S = (a * h)/2, где a - длина стороны, h - длина высоты опущенной на эту сторону, P - периметр, S - площадь',
'circle': 'P = 2*pi*R, S = pi * R^2, где R - радиус окружности, P - длина окружности, S - площадь'}
return dict_formul[self.figure]
# проверка откуда запускается наш код, если True, то как отдельный скрипт и выполняется все, что ниже, если False, то
# импоритируется из файла .py как отдельный модуль, полностью или частично по обращению к его методам
if __name__ == '__main__':
try: # далее код программы, который может вызвать исключение:
# help(Formulas_2D) # вызов справки по созданному классу с его документацией
figure = Formulas_2D('triangle') # определеяем объект класса
#figure.figure = 'quadrat' # после применения инкапсуляции доступ к эти аргументам извне будет недоступен
#figure._Formulas_2D__figure = 'quadrat' #если не применить инкапсуляцию к этим аргументам, то их можно изменить после определения объекта
print(figure, figure.formulas()) #и тогда результат будет неверным, относительно исхоных входных параметров при определении объекта
except: # если поймано исключение, то выдать следующее сообщение:
print('Ошибка ввода аргумента, несоответсвующий тип фигуры, см help(Formulas_2D)')
class Prime_numbers: # создание класса Prime_numbers
'''
Домументрирование класса.
Класс Prime_numbers проверяет является ли введеное число простым, объект класса - это число.
Для создания объекта класса Prime_numbers необходимо указать один аргумент: натуральное (целое положительное) число.
Справка:
Простое число — натуральное (целое положительное) число, имеющее ровно два различных натуральных делителя — единицу
и самого себя. Другими словами, число x является простым, если оно больше 1 и при этом делится без остатка только на 1 и на x.
'''
def __init__(self, n): # встроенный метод класса, инициализация
self.__number = n
if type(self.__number) != int or self.__number < 2:
raise Exception ('Ошибка, введеное число не натуральное') # если уловие True, то принудительно запустить исключение
def __str__(self): # встроенный метод класса, что выводить на экран когда для вывода подается сам объект
return f'Натуральное число {self.__number}'
def prime_number(self): #создаем функцию(метод класса) с именем prime_number
if self.__number == 2 or self.__number == 3: #если число равно 2 или 3, то оно простое, тогда функция возвращает True
return True #возврат значения True
else:
for i in range(2, int(self.__number/2)+1): #проверяем существует ли для данного числа целочисленный делитель от 2 до int(number/2), если делитель встретился, то сразу возврат значения False
if self.__number % i == 0:
return False
return True #если целочисленный делитель не обнаружен, то возвращается значение True (число простое)
if __name__ == '__main__':
try: # далее код программы, который может вызвать исключение:
#help(Prime_numbers) # вызов справки по созданному классу с его документацией
num = Prime_numbers(10) # определеяем объект класса
num.__number = 11 # после применения инкапсуляции доступ к эти аргументам извне будет недоступен
# num.number = 11 #если не применить инкапсуляцию к этим аргументам, то их можно изменить после определения объекта
print('\n', num, ' является простым? - ', num.prime_number(), sep = '') #и тогда результат будет неверным, относительно исхоных входных параметров при определении объекта
except: # если поймано исключение, то выдать следующее сообщение:
print('\nОшибка, введеное число не натуральное или меньше 2, см help(Prime_numbers)') | Ultra_lite_parental_class.py |
# Выполнение задания Ultra Lite:
#1. Создадим два родительских класса: один класс - это 2D-классические геометрические фигуры, из которого затем,
# применив принцип полиморфизма, создадим два других класса - расчет периметра и расчет площади,
# второй родительский класс - это любое натуральное число, для которого осуществляется проверка является или оно простым,
# затем, применив принцип наследования, создадим новый класс, который проверяет является ли какое-либо расположение цифр в заданном числе простым числом.
class Formulas_2D: # создание класса Formulas_2D
'''
Домументрирование класса.
Класс Formulas_2D выводит по одному примеру из основных формул плоской геометрической фигуры,
объект класса - это фигура.
Для создания объекта класса Formulas_2D и вывода формул его периметра и площади необходимо указать один аргумент:
тип фигуры и применить к объекту метод вывода формул: .formulas()
Тип фигуры (аргумент type_figure) может быть один из: quadrat (квадрат), rectangle (прямоугольник),
parallelogram (параллелограмм), rhombus (ромб), trapezoid (трапеция), triangle (треугольник), circle (окружность).
'''
def __init__(self, type_figure): # встроенный метод класса, инициализация
self.figure = type_figure # аргумент, тип фигуры
if self.figure not in ['circle', 'quadrat', 'rectangle', 'parallelogram', 'triangle', 'trapezoid', 'rhombus']:
raise Exception ('Ошибка ввода аргументов') # если уловие True, то принудительно запустить исключение
def __str__(self): # встроенный метод класса, что выводить на экран когда для вывода подается сам объект
return f'Фигура {self.figure}'
def formulas(self): #метод класса, вывод формул площади и периметра фигуры
dict_formul = {'quadrat': 'P = 4 * a, S = a * a, где a - длина стороны, P - периметр, S - площадь',
'rectangle': 'P = 2*(a+b), S = a * b, где a,b - длины сторон, P - периметр, S - площадь',
'parallelogram': 'P = 2*(a+b), S = a(b) * h, где a,b - длины сторон, h - длина высоты опущенной на эту сторону, P - периметр, S - площадь',
'rhombus': 'P = 4 * a, S = a * h, где a - длина стороны, h - длина высоты опущенной на эту сторону, P - периметр, S - площадь',
'trapezoid': 'P = a+b+с+d, S = h*(a+b)/2, где a,b - длины оснований, h - длина высоты, P - периметр, S - площадь',
'triangle': 'P = a+b+с, S = (a * h)/2, где a - длина стороны, h - длина высоты опущенной на эту сторону, P - периметр, S - площадь',
'circle': 'P = 2*pi*R, S = pi * R^2, где R - радиус окружности, P - длина окружности, S - площадь'}
return dict_formul[self.figure]
# проверка откуда запускается наш код, если True, то как отдельный скрипт и выполняется все, что ниже, если False, то
# импоритируется из файла .py как отдельный модуль, полностью или частично по обращению к его методам
if __name__ == '__main__':
try: # далее код программы, который может вызвать исключение:
# help(Formulas_2D) # вызов справки по созданному классу с его документацией
figure = Formulas_2D('triangle') # определеяем объект класса
#figure.figure = 'quadrat' # после применения инкапсуляции доступ к эти аргументам извне будет недоступен
#figure._Formulas_2D__figure = 'quadrat' #если не применить инкапсуляцию к этим аргументам, то их можно изменить после определения объекта
print(figure, figure.formulas()) #и тогда результат будет неверным, относительно исхоных входных параметров при определении объекта
except: # если поймано исключение, то выдать следующее сообщение:
print('Ошибка ввода аргумента, несоответсвующий тип фигуры, см help(Formulas_2D)')
class Prime_numbers: # создание класса Prime_numbers
'''
Домументрирование класса.
Класс Prime_numbers проверяет является ли введеное число простым, объект класса - это число.
Для создания объекта класса Prime_numbers необходимо указать один аргумент: натуральное (целое положительное) число.
Справка:
Простое число — натуральное (целое положительное) число, имеющее ровно два различных натуральных делителя — единицу
и самого себя. Другими словами, число x является простым, если оно больше 1 и при этом делится без остатка только на 1 и на x.
'''
def __init__(self, n): # встроенный метод класса, инициализация
self.__number = n
if type(self.__number) != int or self.__number < 2:
raise Exception ('Ошибка, введеное число не натуральное') # если уловие True, то принудительно запустить исключение
def __str__(self): # встроенный метод класса, что выводить на экран когда для вывода подается сам объект
return f'Натуральное число {self.__number}'
def prime_number(self): #создаем функцию(метод класса) с именем prime_number
if self.__number == 2 or self.__number == 3: #если число равно 2 или 3, то оно простое, тогда функция возвращает True
return True #возврат значения True
else:
for i in range(2, int(self.__number/2)+1): #проверяем существует ли для данного числа целочисленный делитель от 2 до int(number/2), если делитель встретился, то сразу возврат значения False
if self.__number % i == 0:
return False
return True #если целочисленный делитель не обнаружен, то возвращается значение True (число простое)
if __name__ == '__main__':
try: # далее код программы, который может вызвать исключение:
#help(Prime_numbers) # вызов справки по созданному классу с его документацией
num = Prime_numbers(10) # определеяем объект класса
num.__number = 11 # после применения инкапсуляции доступ к эти аргументам извне будет недоступен
# num.number = 11 #если не применить инкапсуляцию к этим аргументам, то их можно изменить после определения объекта
print('\n', num, ' является простым? - ', num.prime_number(), sep = '') #и тогда результат будет неверным, относительно исхоных входных параметров при определении объекта
except: # если поймано исключение, то выдать следующее сообщение:
print('\nОшибка, введеное число не натуральное или меньше 2, см help(Prime_numbers)') | 0.193948 | 0.803174 |
import sys
import matplotlib.pyplot as plt
import os.path
outputFileBaseName = 'scatter'
#plot spec file column names
colNames = ['Gene name', 'Plot or not?', 'Color', 'Marker', 'Size', 'Alpha', 'Layer', 'Label or not?',
'Label font size','Legend group'] # note gene name needs to be first entry in this list or code breaks
if len(sys.argv)<3:
print('\n' + sys.argv[0] + ' requires at least two inputs. These are gene-level log-fold changes or CRISPR scores (CS) and are provided as follows:'
+ '\npython ' + sys.argv[0] + ' [CS treatment A vs initial] [CS treatment B vs initial] [plot spec file]'
+ '\n\nThe output is two image files called scatter.svg, scatter.png (dpi=500). To modify these output formats, edit the code at the very bottom of the script.'
+ '\n\nFormat of the first two input arguments (data files) should be tab or comma separated columns consisting of :'
+ '\nGene name \t CRISPR score '
'\n\nAny gene name that is not present in both files will be ignored. '
'The header of the second column will be the axis label for those data.'
+ '\n\nThe third (optional) argument provides specifications for how all or some data points should be plotted.'
+ ' The plot spec file is also tab or comma delimited, ' + str(len(colNames)) + ' columns in total:'
+ '\n' + '\t'.join(colNames)
+ '\n\nThe plot spec file needs to have column headers (in any order) exactly matching these column names, '
'but can have additional columns (e.g. notes about genes, other data), which will be ignored. '
'\nLikewise in the CRISPR score files, any columns beyond the first two will be ignored.'
'\n\nIf value in "Plot or not?" column = 1, then data for that gene will be plotted. '
'Any value other than 1 will be treated as false. '
'Likewise value in "Label or not?" = 1 means text of gene name will be overlayed on the data point.'
'\n\nLayer should be a number and points with higher value layer are plotted on top. If no layer specified, default is bottom layer.'
'\n\nThe permitted values and meanings for columns Color, Marker, Size, and Alpha can be found in the matplotlib/pyplot documentation:'
'\n https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html'
+ '\n\nThis code was written and tested for python 2.7, might not work with other versions.\n'
)
sys.exit()
fileAvI, fileBvI = sys.argv[1], sys.argv[2]
fileGOI = ''
if len(sys.argv)>3:
fileGOI = sys.argv[3]
def getVals(fileXvY):
CS_XvY = {}
with open(fileXvY,'r') as f:
line = f.readline()
delim = '\t'
if len(line.strip().split('\t'))==1:
delim = ','
label = line.split(delim)[1]
for line in f:
line = line.strip().split(delim)
CS_XvY[line[0]] = float(line[1])
return CS_XvY, label
#load score values
CS_A, xlabel = getVals(fileAvI)
CS_B, ylabel = getVals(fileBvI)
geneList = [g for g in CS_A if g in CS_B]
#load plot specs
GOIs = {}
layers = [-float('inf')]
layerLists = {} #layers[x] = [list of genes to be plotted as layer x]
layerSpecified = [] #list of genes with layer specified
if len(fileGOI)>0:
with open(fileGOI,'r') as f:
#tab or comma delimeter?
header = f.readline().strip()
delim = '\t'
if len(header.strip().split('\t'))==1:
delim = ','
header = header.split(delim)
#find index of relevant columns
colInds = {x:i for i,x in enumerate(header) if x.strip() in colNames}
for x in colNames:
error = False
if x not in colInds:
print('Error: cannot find column `' + x + '` in ' + fileGOI)
error = True
if error : sys.exit()
for line in f:
line = [x.strip() for x in line.split(delim)]
GOIs[line[colInds['Gene name']]] = {x:line[colInds[x]] for x in colNames[1:]}
try:
if int(line[colInds['Layer']]) not in layers:
layers.append(int(line[colInds['Layer']]))
layerLists[int(line[colInds['Layer']])] = []
layerLists[int(line[colInds['Layer']])].append(line[colInds['Gene name']])
layerSpecified.append(line[colInds['Gene name']])
except ValueError:
print('Error: Layer column contains non-integer value in ' + fileGOI + ' for gene ' + line[colInds['Gene name']])
sys.exit()
layers = sorted(layers)
layerLists[-float('inf')] = [g for g in geneList if g not in layerSpecified]
###plot
fig=plt.figure()
ax = plt.subplot()
#determine axes bounds
marginFactor = 1.05
xlim = [marginFactor*min([CS_A[g] for g in geneList] + [0]), marginFactor*max([CS_A[g] for g in geneList])]
ylim = [marginFactor*min([CS_B[g] for g in geneList] + [0]), marginFactor*max([CS_B[g] for g in geneList])]
###MANUALLY SET AXIS BOUNDS HERE
#xlim = [-3, 3]
#ylime = [-3, 3]
ax.plot(xlim,[0, 0],'--',linewidth=1,color='silver',zorder=0)
ax.plot([0, 0],ylim,'--',linewidth=1,color='silver',zorder=0)
legendHandles = []
legendSymbols = []
legendLabels = []
numPtsPlotted = 0
for layerInd, layerKey in enumerate(layers):
for g in layerLists[layerKey]:
#coordinates
x, y = CS_A[g], CS_B[g]
#get plot specs
if g in GOIs: #custom specs
plotOrNot = GOIs[g]['Plot or not?'] == '1'
if plotOrNot:
alpha = float(GOIs[g]['Alpha'])
markerColor = GOIs[g]['Color']
markerShape = GOIs[g]['Marker']
if markerShape == '0': #LibreOffice Calc converts periods into zeros, which aren't valid plot shape
markerShape = '.'
markerSize = float(GOIs[g]['Size'])
legendGroup = GOIs[g]['Legend group']
labelOrNot = GOIs[g]['Label or not?'] == '1'
labelFont = float(GOIs[g]['Label font size'])
else: #default specs
plotOrNot = True
alpha = 1
markerColor = 'b'
markerShape = '.'
markerSize = 10
legendGroup = ''
labelOrNot = False
labelFont = 0
#add point to figure
if plotOrNot:
ax.scatter(x, y,
color=markerColor,
marker=markerShape,
alpha=alpha,
s=markerSize,
zorder=layerInd)
numPtsPlotted += 1
if numPtsPlotted%100==0:
print(str(numPtsPlotted) + ' data points plotted')
# assign to legend group?
if legendGroup != '' and legendGroup not in legendLabels:
legendSymbols.append(markerShape + markerColor)
legendHandles.append(plt.scatter(-1000, -1000, color=markerColor, marker=markerShape, alpha=alpha,
s=markerSize*2, zorder=0))
legendLabels.append(legendGroup)
#overlay gene name?
if labelOrNot:
ax.text(x, y, g, fontsize=labelFont, zorder=max(layers) + 1)
#add legend
if len(legendHandles) > 0:
ax.legend(tuple(legendHandles), tuple(legendLabels), fontsize=6) # ,location=outside)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xlim(xlim)
plt.ylim(ylim)
ax.set_aspect(aspect='equal')
plt.tight_layout()
#save plot to png, svg files
if os.path.isfile(outputFileBaseName + '.png') or os.path.isfile(outputFileBaseName + '.svg'):
fileInd = 0
while True:
newOutputFileBaseName = outputFileBaseName + '_' + str(fileInd)
if os.path.isfile(newOutputFileBaseName + '.png') or os.path.isfile(newOutputFileBaseName + '.svg'):
fileInd += 1
newOutputFileBaseName = outputFileBaseName + '_' + str(fileInd)
else:
plt.savefig(newOutputFileBaseName + '.svg')
plt.savefig(newOutputFileBaseName + '.png',dpi=500)
break
else:
plt.savefig(outputFileBaseName + '.svg')
plt.savefig(outputFileBaseName+ '.png', dpi=500) | plotScatter.py |
import sys
import matplotlib.pyplot as plt
import os.path
outputFileBaseName = 'scatter'
#plot spec file column names
colNames = ['Gene name', 'Plot or not?', 'Color', 'Marker', 'Size', 'Alpha', 'Layer', 'Label or not?',
'Label font size','Legend group'] # note gene name needs to be first entry in this list or code breaks
if len(sys.argv)<3:
print('\n' + sys.argv[0] + ' requires at least two inputs. These are gene-level log-fold changes or CRISPR scores (CS) and are provided as follows:'
+ '\npython ' + sys.argv[0] + ' [CS treatment A vs initial] [CS treatment B vs initial] [plot spec file]'
+ '\n\nThe output is two image files called scatter.svg, scatter.png (dpi=500). To modify these output formats, edit the code at the very bottom of the script.'
+ '\n\nFormat of the first two input arguments (data files) should be tab or comma separated columns consisting of :'
+ '\nGene name \t CRISPR score '
'\n\nAny gene name that is not present in both files will be ignored. '
'The header of the second column will be the axis label for those data.'
+ '\n\nThe third (optional) argument provides specifications for how all or some data points should be plotted.'
+ ' The plot spec file is also tab or comma delimited, ' + str(len(colNames)) + ' columns in total:'
+ '\n' + '\t'.join(colNames)
+ '\n\nThe plot spec file needs to have column headers (in any order) exactly matching these column names, '
'but can have additional columns (e.g. notes about genes, other data), which will be ignored. '
'\nLikewise in the CRISPR score files, any columns beyond the first two will be ignored.'
'\n\nIf value in "Plot or not?" column = 1, then data for that gene will be plotted. '
'Any value other than 1 will be treated as false. '
'Likewise value in "Label or not?" = 1 means text of gene name will be overlayed on the data point.'
'\n\nLayer should be a number and points with higher value layer are plotted on top. If no layer specified, default is bottom layer.'
'\n\nThe permitted values and meanings for columns Color, Marker, Size, and Alpha can be found in the matplotlib/pyplot documentation:'
'\n https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html'
+ '\n\nThis code was written and tested for python 2.7, might not work with other versions.\n'
)
sys.exit()
fileAvI, fileBvI = sys.argv[1], sys.argv[2]
fileGOI = ''
if len(sys.argv)>3:
fileGOI = sys.argv[3]
def getVals(fileXvY):
CS_XvY = {}
with open(fileXvY,'r') as f:
line = f.readline()
delim = '\t'
if len(line.strip().split('\t'))==1:
delim = ','
label = line.split(delim)[1]
for line in f:
line = line.strip().split(delim)
CS_XvY[line[0]] = float(line[1])
return CS_XvY, label
#load score values
CS_A, xlabel = getVals(fileAvI)
CS_B, ylabel = getVals(fileBvI)
geneList = [g for g in CS_A if g in CS_B]
#load plot specs
GOIs = {}
layers = [-float('inf')]
layerLists = {} #layers[x] = [list of genes to be plotted as layer x]
layerSpecified = [] #list of genes with layer specified
if len(fileGOI)>0:
with open(fileGOI,'r') as f:
#tab or comma delimeter?
header = f.readline().strip()
delim = '\t'
if len(header.strip().split('\t'))==1:
delim = ','
header = header.split(delim)
#find index of relevant columns
colInds = {x:i for i,x in enumerate(header) if x.strip() in colNames}
for x in colNames:
error = False
if x not in colInds:
print('Error: cannot find column `' + x + '` in ' + fileGOI)
error = True
if error : sys.exit()
for line in f:
line = [x.strip() for x in line.split(delim)]
GOIs[line[colInds['Gene name']]] = {x:line[colInds[x]] for x in colNames[1:]}
try:
if int(line[colInds['Layer']]) not in layers:
layers.append(int(line[colInds['Layer']]))
layerLists[int(line[colInds['Layer']])] = []
layerLists[int(line[colInds['Layer']])].append(line[colInds['Gene name']])
layerSpecified.append(line[colInds['Gene name']])
except ValueError:
print('Error: Layer column contains non-integer value in ' + fileGOI + ' for gene ' + line[colInds['Gene name']])
sys.exit()
layers = sorted(layers)
layerLists[-float('inf')] = [g for g in geneList if g not in layerSpecified]
###plot
fig=plt.figure()
ax = plt.subplot()
#determine axes bounds
marginFactor = 1.05
xlim = [marginFactor*min([CS_A[g] for g in geneList] + [0]), marginFactor*max([CS_A[g] for g in geneList])]
ylim = [marginFactor*min([CS_B[g] for g in geneList] + [0]), marginFactor*max([CS_B[g] for g in geneList])]
###MANUALLY SET AXIS BOUNDS HERE
#xlim = [-3, 3]
#ylime = [-3, 3]
ax.plot(xlim,[0, 0],'--',linewidth=1,color='silver',zorder=0)
ax.plot([0, 0],ylim,'--',linewidth=1,color='silver',zorder=0)
legendHandles = []
legendSymbols = []
legendLabels = []
numPtsPlotted = 0
for layerInd, layerKey in enumerate(layers):
for g in layerLists[layerKey]:
#coordinates
x, y = CS_A[g], CS_B[g]
#get plot specs
if g in GOIs: #custom specs
plotOrNot = GOIs[g]['Plot or not?'] == '1'
if plotOrNot:
alpha = float(GOIs[g]['Alpha'])
markerColor = GOIs[g]['Color']
markerShape = GOIs[g]['Marker']
if markerShape == '0': #LibreOffice Calc converts periods into zeros, which aren't valid plot shape
markerShape = '.'
markerSize = float(GOIs[g]['Size'])
legendGroup = GOIs[g]['Legend group']
labelOrNot = GOIs[g]['Label or not?'] == '1'
labelFont = float(GOIs[g]['Label font size'])
else: #default specs
plotOrNot = True
alpha = 1
markerColor = 'b'
markerShape = '.'
markerSize = 10
legendGroup = ''
labelOrNot = False
labelFont = 0
#add point to figure
if plotOrNot:
ax.scatter(x, y,
color=markerColor,
marker=markerShape,
alpha=alpha,
s=markerSize,
zorder=layerInd)
numPtsPlotted += 1
if numPtsPlotted%100==0:
print(str(numPtsPlotted) + ' data points plotted')
# assign to legend group?
if legendGroup != '' and legendGroup not in legendLabels:
legendSymbols.append(markerShape + markerColor)
legendHandles.append(plt.scatter(-1000, -1000, color=markerColor, marker=markerShape, alpha=alpha,
s=markerSize*2, zorder=0))
legendLabels.append(legendGroup)
#overlay gene name?
if labelOrNot:
ax.text(x, y, g, fontsize=labelFont, zorder=max(layers) + 1)
#add legend
if len(legendHandles) > 0:
ax.legend(tuple(legendHandles), tuple(legendLabels), fontsize=6) # ,location=outside)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xlim(xlim)
plt.ylim(ylim)
ax.set_aspect(aspect='equal')
plt.tight_layout()
#save plot to png, svg files
if os.path.isfile(outputFileBaseName + '.png') or os.path.isfile(outputFileBaseName + '.svg'):
fileInd = 0
while True:
newOutputFileBaseName = outputFileBaseName + '_' + str(fileInd)
if os.path.isfile(newOutputFileBaseName + '.png') or os.path.isfile(newOutputFileBaseName + '.svg'):
fileInd += 1
newOutputFileBaseName = outputFileBaseName + '_' + str(fileInd)
else:
plt.savefig(newOutputFileBaseName + '.svg')
plt.savefig(newOutputFileBaseName + '.png',dpi=500)
break
else:
plt.savefig(outputFileBaseName + '.svg')
plt.savefig(outputFileBaseName+ '.png', dpi=500) | 0.379263 | 0.445891 |
from modules.file_utils import *
def get_top_level_report(coin_overall: dict, coin_details: dict, coin_report: dict) -> None:
coin_report["name"] = coin_overall["name"]
coin_report["symbol"] = coin_overall["symbol"]
coin_report["current_price"] = coin_overall["current_price"]
coin_report["market_cap"] = coin_overall["market_cap"]
coin_report["volume"] = coin_overall["total_volume"]
coin_report["genesis_date"] = coin_details["genesis_date"]
coin_report["description"] = coin_details["description"]["en"]
return None
def get_link_report(coin_details: dict, coin_report: dict) -> None:
coin_report["links"]["homepage"] = [page for page in coin_details["links"]["homepage"]]
coin_report["links"]["news"] = [page for page in coin_details["links"]["announcement_url"]]
coin_report["links"]["twitter"] = f"https://twitter.com/{coin_details['links']['twitter_screen_name']}"
coin_report["links"]["facebook"] = f"https://www.facebook.com/{coin_details['links']['facebook_username']}"
coin_report["links"]["telegram"] = f"https://t.me/{coin_details['links']['telegram_channel_identifier']}"
coin_report["links"]["subreddit"] = coin_details['links']['subreddit_url']
coin_report["links"]["github"] = [page for page in coin_details["links"]["repos_url"]["github"]]
return None
def get_general_report(coin_overall: dict, coin_details: dict, coin_report: dict) -> None:
coin_report["general"]["sentiment_votes_up_percentage"] = coin_details["sentiment_votes_up_percentage"]
coin_report["general"]["sentiment_votes_down_percentage"] = coin_details["sentiment_votes_down_percentage"]
coin_report["general"]["24h_high"] = coin_overall["high_24h"]
coin_report["general"]["24h_low"] = coin_overall["low_24h"]
coin_report["general"]["24h_price_change_percentage"] = coin_overall["price_change_percentage_24h"]
coin_report["general"]["all_time_high"] = coin_overall["ath"]
coin_report["general"]["all_time_high_date"] = coin_overall["ath_date"]
coin_report["general"]["all_time_low"] = coin_overall["atl"]
coin_report["general"]["all_time_low_date"] = coin_overall["atl_date"]
coin_report["general"]["circulating_supply"] = coin_overall["circulating_supply"]
coin_report["general"]["total_supply"] = coin_overall["total_supply"]
coin_report["general"]["max_supply"] = coin_overall["max_supply"]
return None
def get_exchange_report(criteria: dict, coin_details: dict, exchange_list: list, coin_report: dict) -> None:
for pair in coin_details["tickers"]:
if pair['market']['name'] not in coin_report["exchanges"]:
coin_report["exchanges"][f"{pair['market']['name']}"] = []
coin_report["exchanges"][f"{pair['market']['name']}"].append(load_data("templates/report_exchange.json", "json"))
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["base"] = pair["base"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["target"] = pair["target"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["last_price"] = pair["last"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["bid_ask_spread"] = pair["bid_ask_spread_percentage"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["is_anomaly"] = pair["is_anomaly"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["last_traded_at"] = pair["last_traded_at"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["trade_url"] = pair["trade_url"]
exchange_rating = list(filter(lambda x: x["id"] == pair["market"]["identifier"], exchange_list))[0]["trust_score"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["exchange_rating"] = exchange_rating
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["coingecko_top_8"] = pair["market"]["identifier"] in criteria["top_8_exchange_id_list"]
return None
def get_developer_report(coin_details: dict, coin_report: dict) -> None:
if coin_details["developer_data"] is None:
return None
if "message" in coin_details["developer_data"] and coin_details["developer_data"]["message"] == "Missing Authentication Token":
return None
coin_report["developer_data"]["repo_name"] = coin_details["developer_data"]["name"]
coin_report["developer_data"]["created_at"] = coin_details["developer_data"]["general"]["data"]["created_at"]
coin_report["developer_data"]["description"] = coin_details["developer_data"]["general"]["data"]["description"]
coin_report["developer_data"]["community"] = {
"contributor_count": coin_details["developer_data"]["contributors"]["total"],
"star_count": coin_details["developer_data"]["general"]["data"]["stargazers_count"],
"subscriber_count": coin_details["developer_data"]["general"]["data"]["subscribers_count"],
"fork_count": coin_details["developer_data"]["general"]["data"]["forks"]
}
if coin_details["developer_data"]["issues"]["total"] == 0:
open_issue_percentage = 0 if coin_details["developer_data"]["issues"]["open"] == 0 else 100
else:
open_issue_percentage = round(coin_details["developer_data"]["issues"]["open"] / coin_details["developer_data"]["issues"]["total"], 2) * 100
coin_report["developer_data"]["issues"] = {
"open": coin_details["developer_data"]["issues"]["open"],
"total": coin_details["developer_data"]["issues"]["total"],
"open_percentage": open_issue_percentage
}
if coin_details["developer_data"]["pulls"]["total"] == 0:
open_pull_request_percentage = 0 if coin_details["developer_data"]["pulls"]["open"] == 0 else 100
else:
open_pull_request_percentage = round(coin_details["developer_data"]["pulls"]["open"] / coin_details["developer_data"]["pulls"]["total"], 2) * 100
coin_report["developer_data"]["pull_requests"] = {
"open": coin_details["developer_data"]["pulls"]["open"],
"total": coin_details["developer_data"]["pulls"]["total"],
"open_percentage": open_pull_request_percentage
}
annual_commit_count = sum([week["total"] for week in coin_details["developer_data"]["activity"]["data"]])
week_count = len(coin_details["developer_data"]["activity"]["data"])
coin_report["developer_data"]["commits"] = {
"total": coin_details["developer_data"]["commits"]["total"],
"average_per_week": round(annual_commit_count / week_count, 2),
"last_commit_date": coin_details["developer_data"]["commits"]["lastCommitData"]["commit"]["author"]["date"]
}
line_count = sum(coin_details["developer_data"]["languages"]["data"][language] for language in coin_details["developer_data"]["languages"]["data"])
programming_languages_distribution_percentage = {}
for language in coin_details["developer_data"]["languages"]["data"]:
programming_languages_distribution_percentage[language] = \
round(coin_details["developer_data"]["languages"]["data"][language] / line_count, 2) * 100
coin_report["developer_data"]["technical"] = {
"line_count": line_count,
"programming_languages_distribution": coin_details["developer_data"]["languages"]["data"],
"programming_languages_distribution_percentage": programming_languages_distribution_percentage
}
if "data" in coin_details["developer_data"]["license"]:
coin_report["developer_data"]["license"] = {
"name": coin_details["developer_data"]["license"]["data"]["name"],
"conditions": coin_details["developer_data"]["license"]["data"]["conditions"],
"limitations": coin_details["developer_data"]["license"]["data"]["limitations"],
"permissions": coin_details["developer_data"]["license"]["data"]["permissions"]
}
else:
coin_report["developer_data"]["license"] = {}
return None
def get_social_report(coin_details: dict, coin_report: dict) -> None:
coin_report["social_data"]["facebook_likes"] = coin_details["community_data"]["facebook_likes"]
coin_report["social_data"]["twitter_followers"] = coin_details["community_data"]["twitter_followers"]
coin_report["social_data"]["reddit_average_posts_48h"] = coin_details["community_data"]["reddit_average_posts_48h"]
coin_report["social_data"]["reddit_average_comments_48h"] = coin_details["community_data"]["reddit_average_comments_48h"]
coin_report["social_data"]["reddit_subscribers"] = coin_details["community_data"]["reddit_subscribers"]
coin_report["social_data"]["reddit_accounts_active_48h"] = coin_details["community_data"]["reddit_accounts_active_48h"]
coin_report["social_data"]["telegram_channel_user_count"] = coin_details["community_data"]["telegram_channel_user_count"]
return None
# TODO - Most likely possible to find an api to fetch data about wallets/addresses and their usage
def get_on_chain_report(coin_overall: dict, coin_details: dict, exchange_list: list, coin_report: dict) -> None:
pass
# TODO - Most likely possible only via manual labor
def get_team_quality_report(coin_overall: dict, coin_details: dict, exchange_list: list, coin_report: dict) -> None:
pass
# TODO - Most likely possible only via manual labor
def get_coin_uniqueness_report(coin_overall: dict, coin_details: dict, exchange_list: list, coin_report: dict) -> None:
pass
def load_report_template() -> dict:
coin_report = load_data("templates/report_overall.json" , "json")
coin_report["links"] = load_data("templates/report_links.json", "json")
coin_report["general"] = load_data("templates/report_general.json", "json")
coin_report["developer_data"] = load_data("templates/report_developer_data.json", "json")
coin_report["onchain_data"] = load_data("templates/report_onchain_data.json", "json")
coin_report["social_data"] = load_data("templates/report_social_data.json", "json")
return coin_report
def report() -> None:
coin_list = list(load_data("cache/coin_list.json", "json"))
coins_details = list(load_data("cache/coins_details_data.json", "json"))
exchange_list = list(load_data("cache/exchanges_data.json", "json"))
criteria = load_data("config/criteria.yaml", "yaml")
coin_report_template = load_report_template()
coin_report_list = []
coin_report_index = 0
for coin_details in coins_details:
print(f"Generating report data for {coin_details['id']} coin.")
coin_report_list.append(dict(coin_report_template))
coin_overall = list(filter(lambda x: x["id"] == coin_details["id"], coin_list))[0]
get_top_level_report(coin_overall, coin_details, coin_report_list[-1])
get_link_report(coin_details, coin_report_list[-1])
get_general_report(coin_overall, coin_details, coin_report_list[coin_report_index])
get_exchange_report(criteria, coin_details, exchange_list, coin_report_list[coin_report_index])
get_developer_report(coin_details, coin_report_list[coin_report_index])
get_social_report(coin_details, coin_report_list[coin_report_index])
save_data(coin_report_list[coin_report_index], f"reports/{coin_details['id']}_report.json", "json")
coin_report_index += 1
return None
if __name__ == '__main__':
report() | report.py | from modules.file_utils import *
def get_top_level_report(coin_overall: dict, coin_details: dict, coin_report: dict) -> None:
coin_report["name"] = coin_overall["name"]
coin_report["symbol"] = coin_overall["symbol"]
coin_report["current_price"] = coin_overall["current_price"]
coin_report["market_cap"] = coin_overall["market_cap"]
coin_report["volume"] = coin_overall["total_volume"]
coin_report["genesis_date"] = coin_details["genesis_date"]
coin_report["description"] = coin_details["description"]["en"]
return None
def get_link_report(coin_details: dict, coin_report: dict) -> None:
coin_report["links"]["homepage"] = [page for page in coin_details["links"]["homepage"]]
coin_report["links"]["news"] = [page for page in coin_details["links"]["announcement_url"]]
coin_report["links"]["twitter"] = f"https://twitter.com/{coin_details['links']['twitter_screen_name']}"
coin_report["links"]["facebook"] = f"https://www.facebook.com/{coin_details['links']['facebook_username']}"
coin_report["links"]["telegram"] = f"https://t.me/{coin_details['links']['telegram_channel_identifier']}"
coin_report["links"]["subreddit"] = coin_details['links']['subreddit_url']
coin_report["links"]["github"] = [page for page in coin_details["links"]["repos_url"]["github"]]
return None
def get_general_report(coin_overall: dict, coin_details: dict, coin_report: dict) -> None:
coin_report["general"]["sentiment_votes_up_percentage"] = coin_details["sentiment_votes_up_percentage"]
coin_report["general"]["sentiment_votes_down_percentage"] = coin_details["sentiment_votes_down_percentage"]
coin_report["general"]["24h_high"] = coin_overall["high_24h"]
coin_report["general"]["24h_low"] = coin_overall["low_24h"]
coin_report["general"]["24h_price_change_percentage"] = coin_overall["price_change_percentage_24h"]
coin_report["general"]["all_time_high"] = coin_overall["ath"]
coin_report["general"]["all_time_high_date"] = coin_overall["ath_date"]
coin_report["general"]["all_time_low"] = coin_overall["atl"]
coin_report["general"]["all_time_low_date"] = coin_overall["atl_date"]
coin_report["general"]["circulating_supply"] = coin_overall["circulating_supply"]
coin_report["general"]["total_supply"] = coin_overall["total_supply"]
coin_report["general"]["max_supply"] = coin_overall["max_supply"]
return None
def get_exchange_report(criteria: dict, coin_details: dict, exchange_list: list, coin_report: dict) -> None:
for pair in coin_details["tickers"]:
if pair['market']['name'] not in coin_report["exchanges"]:
coin_report["exchanges"][f"{pair['market']['name']}"] = []
coin_report["exchanges"][f"{pair['market']['name']}"].append(load_data("templates/report_exchange.json", "json"))
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["base"] = pair["base"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["target"] = pair["target"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["last_price"] = pair["last"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["bid_ask_spread"] = pair["bid_ask_spread_percentage"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["is_anomaly"] = pair["is_anomaly"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["last_traded_at"] = pair["last_traded_at"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["trade_url"] = pair["trade_url"]
exchange_rating = list(filter(lambda x: x["id"] == pair["market"]["identifier"], exchange_list))[0]["trust_score"]
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["exchange_rating"] = exchange_rating
coin_report["exchanges"][f"{pair['market']['name']}"][-1]["coingecko_top_8"] = pair["market"]["identifier"] in criteria["top_8_exchange_id_list"]
return None
def get_developer_report(coin_details: dict, coin_report: dict) -> None:
if coin_details["developer_data"] is None:
return None
if "message" in coin_details["developer_data"] and coin_details["developer_data"]["message"] == "Missing Authentication Token":
return None
coin_report["developer_data"]["repo_name"] = coin_details["developer_data"]["name"]
coin_report["developer_data"]["created_at"] = coin_details["developer_data"]["general"]["data"]["created_at"]
coin_report["developer_data"]["description"] = coin_details["developer_data"]["general"]["data"]["description"]
coin_report["developer_data"]["community"] = {
"contributor_count": coin_details["developer_data"]["contributors"]["total"],
"star_count": coin_details["developer_data"]["general"]["data"]["stargazers_count"],
"subscriber_count": coin_details["developer_data"]["general"]["data"]["subscribers_count"],
"fork_count": coin_details["developer_data"]["general"]["data"]["forks"]
}
if coin_details["developer_data"]["issues"]["total"] == 0:
open_issue_percentage = 0 if coin_details["developer_data"]["issues"]["open"] == 0 else 100
else:
open_issue_percentage = round(coin_details["developer_data"]["issues"]["open"] / coin_details["developer_data"]["issues"]["total"], 2) * 100
coin_report["developer_data"]["issues"] = {
"open": coin_details["developer_data"]["issues"]["open"],
"total": coin_details["developer_data"]["issues"]["total"],
"open_percentage": open_issue_percentage
}
if coin_details["developer_data"]["pulls"]["total"] == 0:
open_pull_request_percentage = 0 if coin_details["developer_data"]["pulls"]["open"] == 0 else 100
else:
open_pull_request_percentage = round(coin_details["developer_data"]["pulls"]["open"] / coin_details["developer_data"]["pulls"]["total"], 2) * 100
coin_report["developer_data"]["pull_requests"] = {
"open": coin_details["developer_data"]["pulls"]["open"],
"total": coin_details["developer_data"]["pulls"]["total"],
"open_percentage": open_pull_request_percentage
}
annual_commit_count = sum([week["total"] for week in coin_details["developer_data"]["activity"]["data"]])
week_count = len(coin_details["developer_data"]["activity"]["data"])
coin_report["developer_data"]["commits"] = {
"total": coin_details["developer_data"]["commits"]["total"],
"average_per_week": round(annual_commit_count / week_count, 2),
"last_commit_date": coin_details["developer_data"]["commits"]["lastCommitData"]["commit"]["author"]["date"]
}
line_count = sum(coin_details["developer_data"]["languages"]["data"][language] for language in coin_details["developer_data"]["languages"]["data"])
programming_languages_distribution_percentage = {}
for language in coin_details["developer_data"]["languages"]["data"]:
programming_languages_distribution_percentage[language] = \
round(coin_details["developer_data"]["languages"]["data"][language] / line_count, 2) * 100
coin_report["developer_data"]["technical"] = {
"line_count": line_count,
"programming_languages_distribution": coin_details["developer_data"]["languages"]["data"],
"programming_languages_distribution_percentage": programming_languages_distribution_percentage
}
if "data" in coin_details["developer_data"]["license"]:
coin_report["developer_data"]["license"] = {
"name": coin_details["developer_data"]["license"]["data"]["name"],
"conditions": coin_details["developer_data"]["license"]["data"]["conditions"],
"limitations": coin_details["developer_data"]["license"]["data"]["limitations"],
"permissions": coin_details["developer_data"]["license"]["data"]["permissions"]
}
else:
coin_report["developer_data"]["license"] = {}
return None
def get_social_report(coin_details: dict, coin_report: dict) -> None:
coin_report["social_data"]["facebook_likes"] = coin_details["community_data"]["facebook_likes"]
coin_report["social_data"]["twitter_followers"] = coin_details["community_data"]["twitter_followers"]
coin_report["social_data"]["reddit_average_posts_48h"] = coin_details["community_data"]["reddit_average_posts_48h"]
coin_report["social_data"]["reddit_average_comments_48h"] = coin_details["community_data"]["reddit_average_comments_48h"]
coin_report["social_data"]["reddit_subscribers"] = coin_details["community_data"]["reddit_subscribers"]
coin_report["social_data"]["reddit_accounts_active_48h"] = coin_details["community_data"]["reddit_accounts_active_48h"]
coin_report["social_data"]["telegram_channel_user_count"] = coin_details["community_data"]["telegram_channel_user_count"]
return None
# TODO - Most likely possible to find an api to fetch data about wallets/addresses and their usage
def get_on_chain_report(coin_overall: dict, coin_details: dict, exchange_list: list, coin_report: dict) -> None:
pass
# TODO - Most likely possible only via manual labor
def get_team_quality_report(coin_overall: dict, coin_details: dict, exchange_list: list, coin_report: dict) -> None:
pass
# TODO - Most likely possible only via manual labor
def get_coin_uniqueness_report(coin_overall: dict, coin_details: dict, exchange_list: list, coin_report: dict) -> None:
pass
def load_report_template() -> dict:
coin_report = load_data("templates/report_overall.json" , "json")
coin_report["links"] = load_data("templates/report_links.json", "json")
coin_report["general"] = load_data("templates/report_general.json", "json")
coin_report["developer_data"] = load_data("templates/report_developer_data.json", "json")
coin_report["onchain_data"] = load_data("templates/report_onchain_data.json", "json")
coin_report["social_data"] = load_data("templates/report_social_data.json", "json")
return coin_report
def report() -> None:
coin_list = list(load_data("cache/coin_list.json", "json"))
coins_details = list(load_data("cache/coins_details_data.json", "json"))
exchange_list = list(load_data("cache/exchanges_data.json", "json"))
criteria = load_data("config/criteria.yaml", "yaml")
coin_report_template = load_report_template()
coin_report_list = []
coin_report_index = 0
for coin_details in coins_details:
print(f"Generating report data for {coin_details['id']} coin.")
coin_report_list.append(dict(coin_report_template))
coin_overall = list(filter(lambda x: x["id"] == coin_details["id"], coin_list))[0]
get_top_level_report(coin_overall, coin_details, coin_report_list[-1])
get_link_report(coin_details, coin_report_list[-1])
get_general_report(coin_overall, coin_details, coin_report_list[coin_report_index])
get_exchange_report(criteria, coin_details, exchange_list, coin_report_list[coin_report_index])
get_developer_report(coin_details, coin_report_list[coin_report_index])
get_social_report(coin_details, coin_report_list[coin_report_index])
save_data(coin_report_list[coin_report_index], f"reports/{coin_details['id']}_report.json", "json")
coin_report_index += 1
return None
if __name__ == '__main__':
report() | 0.452778 | 0.110519 |
import datetime
import re
from cifparser.errors import ConversionError
def str_to_stripped(s):
return s.strip()
def str_to_flattened(s):
return ' '.join(s.split())
def str_to_int(s):
try:
return int(s)
except:
raise ConversionError("failed to convert {0} to float".format(s))
def str_to_bool(s):
s = s.lower()
if s in ('true', 'yes', '1'):
return True
if s in ('false', 'no', '0'):
return False
raise ConversionError("failed to convert {0} to bool".format(s))
def str_to_float(s):
try:
return float(s)
except:
raise ConversionError("failed to convert {0} to float".format(s))
def str_to_timedelta(s):
s = s.strip()
try:
m = re.match(r'([1-9]\d*)\s*(.*)', s)
if m is None:
raise Exception("{0} did not match regex".format(s))
value = int(m.group(1))
units = m.group(2).lower().strip()
if units in ('us', 'micro', 'micros', 'microsecond', 'microseconds'):
return datetime.timedelta(microseconds=value)
if units in ('ms', 'milli', 'millis', 'millisecond', 'milliseconds'):
return datetime.timedelta(milliseconds=value)
if units in ('s', 'second', 'seconds'):
return datetime.timedelta(seconds=value)
if units in ('m', 'minute', 'minutes'):
return datetime.timedelta(minutes=value)
if units in ('h', 'hour', 'hours'):
return datetime.timedelta(hours=value)
if units in ('d', 'day', 'days'):
return datetime.timedelta(days=value)
if units in ('w', 'week', 'weeks'):
return datetime.timedelta(weeks=value)
except Exception as e:
raise ConversionError("failed to convert {0} to timedelta".format(s))
def str_to_size(s):
s = s.strip()
try:
m = re.match(r'(0|[1-9]\d*)\s*(.*)', s)
if m is None:
raise Exception("{0} did not match regex".format(s))
value = int(m.group(1))
units = m.group(2).lower().strip()
if units in ('b', 'byte', 'bytes'):
return value
if units in ('kb', 'kilo', 'kilobyte', 'kilobytes'):
return value * 1024
if units in ('mb', 'mega', 'megabyte', 'megabytes'):
return value * 1024 * 1024
if units in ('gb', 'giga', 'gigabyte', 'gigabytes'):
return value * 1024 * 1024 * 1024
if units in ('tb', 'tera', 'terabyte', 'terabytes'):
return value * 1024 * 1024 * 1024 * 1024
if units in ('pb', 'peta', 'petabyte', 'petabytes'):
return value * 1024 * 1024 * 1024 * 1024 * 1024
except Exception as e:
raise ConversionError("failed to convert {0} to size in bytes".format(s))
def str_to_percentage(s):
s = s.strip()
try:
m = re.match(r'(0?\.\d+|[1-9]\d*\.\d+|\d+)\s*%', s)
if m is None:
raise Exception("{0} did not match regex".format(s))
return float(m.group(1)) / 100.0
except Exception as e:
raise ConversionError("failed to convert {0} to percentage".format(s))
def str_to_throughput(s):
s = s.strip()
try:
m = re.match(r'(0?\.\d+|[1-9]\d*\.\d+|\d+)\s*(.*)', s)
if m is None:
raise Exception("{0} did not match regex".format(s))
value = float(m.group(1))
units = m.group(2).strip()
if units in ('bps', 'Bps', 'bytes/s', 'bytes/sec', 'bytes/second'):
return value
if units in ('Kbps', 'kilobytes/s', 'kilobytes/sec', 'kilobytes/second'):
return value * 1024.0
if units in ('Mbps', 'megabytes/s', 'megabytes/sec', 'megabytes/second'):
return value * 1024.0 * 1024.0
if units in ('Gbps', 'gigabytes/s', 'gigabytes/sec', 'gigabytes/second'):
return value * 1024.0 * 1024.0 * 1024.0
if units in ('Tbps', 'terabytes/s', 'terabytes/sec', 'terabytes/second'):
return value * 1024.0 * 1024.0 * 1024.0 * 1024.0
if units in ('Pbps', 'petabytes/s', 'petabytes/sec', 'petabytes/second'):
return value * 1024.0 * 1024.0 * 1024.0 * 1024.0 * 1024.0
except Exception as e:
raise ConversionError("failed to convert {0} to size in bytes".format(s)) | cifparser/converters.py |
import datetime
import re
from cifparser.errors import ConversionError
def str_to_stripped(s):
return s.strip()
def str_to_flattened(s):
return ' '.join(s.split())
def str_to_int(s):
try:
return int(s)
except:
raise ConversionError("failed to convert {0} to float".format(s))
def str_to_bool(s):
s = s.lower()
if s in ('true', 'yes', '1'):
return True
if s in ('false', 'no', '0'):
return False
raise ConversionError("failed to convert {0} to bool".format(s))
def str_to_float(s):
try:
return float(s)
except:
raise ConversionError("failed to convert {0} to float".format(s))
def str_to_timedelta(s):
s = s.strip()
try:
m = re.match(r'([1-9]\d*)\s*(.*)', s)
if m is None:
raise Exception("{0} did not match regex".format(s))
value = int(m.group(1))
units = m.group(2).lower().strip()
if units in ('us', 'micro', 'micros', 'microsecond', 'microseconds'):
return datetime.timedelta(microseconds=value)
if units in ('ms', 'milli', 'millis', 'millisecond', 'milliseconds'):
return datetime.timedelta(milliseconds=value)
if units in ('s', 'second', 'seconds'):
return datetime.timedelta(seconds=value)
if units in ('m', 'minute', 'minutes'):
return datetime.timedelta(minutes=value)
if units in ('h', 'hour', 'hours'):
return datetime.timedelta(hours=value)
if units in ('d', 'day', 'days'):
return datetime.timedelta(days=value)
if units in ('w', 'week', 'weeks'):
return datetime.timedelta(weeks=value)
except Exception as e:
raise ConversionError("failed to convert {0} to timedelta".format(s))
def str_to_size(s):
s = s.strip()
try:
m = re.match(r'(0|[1-9]\d*)\s*(.*)', s)
if m is None:
raise Exception("{0} did not match regex".format(s))
value = int(m.group(1))
units = m.group(2).lower().strip()
if units in ('b', 'byte', 'bytes'):
return value
if units in ('kb', 'kilo', 'kilobyte', 'kilobytes'):
return value * 1024
if units in ('mb', 'mega', 'megabyte', 'megabytes'):
return value * 1024 * 1024
if units in ('gb', 'giga', 'gigabyte', 'gigabytes'):
return value * 1024 * 1024 * 1024
if units in ('tb', 'tera', 'terabyte', 'terabytes'):
return value * 1024 * 1024 * 1024 * 1024
if units in ('pb', 'peta', 'petabyte', 'petabytes'):
return value * 1024 * 1024 * 1024 * 1024 * 1024
except Exception as e:
raise ConversionError("failed to convert {0} to size in bytes".format(s))
def str_to_percentage(s):
s = s.strip()
try:
m = re.match(r'(0?\.\d+|[1-9]\d*\.\d+|\d+)\s*%', s)
if m is None:
raise Exception("{0} did not match regex".format(s))
return float(m.group(1)) / 100.0
except Exception as e:
raise ConversionError("failed to convert {0} to percentage".format(s))
def str_to_throughput(s):
s = s.strip()
try:
m = re.match(r'(0?\.\d+|[1-9]\d*\.\d+|\d+)\s*(.*)', s)
if m is None:
raise Exception("{0} did not match regex".format(s))
value = float(m.group(1))
units = m.group(2).strip()
if units in ('bps', 'Bps', 'bytes/s', 'bytes/sec', 'bytes/second'):
return value
if units in ('Kbps', 'kilobytes/s', 'kilobytes/sec', 'kilobytes/second'):
return value * 1024.0
if units in ('Mbps', 'megabytes/s', 'megabytes/sec', 'megabytes/second'):
return value * 1024.0 * 1024.0
if units in ('Gbps', 'gigabytes/s', 'gigabytes/sec', 'gigabytes/second'):
return value * 1024.0 * 1024.0 * 1024.0
if units in ('Tbps', 'terabytes/s', 'terabytes/sec', 'terabytes/second'):
return value * 1024.0 * 1024.0 * 1024.0 * 1024.0
if units in ('Pbps', 'petabytes/s', 'petabytes/sec', 'petabytes/second'):
return value * 1024.0 * 1024.0 * 1024.0 * 1024.0 * 1024.0
except Exception as e:
raise ConversionError("failed to convert {0} to size in bytes".format(s)) | 0.345547 | 0.215289 |
from __future__ import unicode_literals
from pynodebb.api import Resource
class User(Resource):
def create(self, username, **kwargs):
"""Creates a new NodeBB user.
Args:
username (str): A unique string used to identify the new user.
If the username already exists, NodeBB will automatically
append random numbers after `username` to ensure uniqueness.
**kwargs: All other accepted user properties. You can find out
what they are by referring to `updateProfile`.
Returns:
tuple: Tuple in the form (response_code, json_response)
"""
kwargs.update({'username': username})
return self.client.post('/api/v1/users', **kwargs)
def _update(self, uid, endpoint, **kwargs):
kwargs.update({'_uid': uid})
return self.client.put(endpoint, **kwargs)
def update(self, uid, **kwargs):
"""Updates the user's NodeBB user properties.
Accepted user properties can be found by referring to `updateProfile`.
For a quick reference these are the accepted fields:
username, email, fullname, website, location, birthday, signature
Args:
uid (str): The NodeBB uid for the user we are updating.
**kwargs: A dictionary of user properties we are updating.
Returns:
tuple: Tuple in the form (response_code, json_response)
"""
return self._update(uid, '/api/v1/users/%s' % uid, **kwargs)
def update_settings(self, uid, **kwargs):
"""Updates the user's NodeBB settings.
All settings can be updated. They can be found here:
https://github.com/NodeBB/NodeBB/blob/master/src/user/settings.js#L102-L118
Args:
uid (str): The NodeBB uid for the user we are updating.
**kwargs: A dictionary of settings we are updating.
Returns:
tuple: Tuple in the form (response_code, json_response)
"""
return self._update(uid, '/api/v1/users/%s/settings' % uid, **{
# NodeBB stores booleans as integers in their Redis instance.
k: (int(v) if isinstance(v, bool) else v) for k, v in kwargs.iteritems()
})
def delete(self, uid):
"""Removes the associated NodeBB user.
Warning! This operation is irreversible. Note that if `uid` is None
then, no requests will be made and a 404 will be returned.
Args:
uid (str): The NodeBB uid for the user we are deleting
Returns:
tuple: Tuple in the form (response_code, json_response)
"""
return self.client.delete('/api/v1/users/%s' % uid, **{'_uid': uid})
def change_password(self, uid, new, current=None):
"""Changes the user's password from `current` to `new`.
If a `master_token` was generated then `current=None` is accepted. However
if not, the `current` password is required.
Args:
uid (str): The NodeBB uid for the user we are changing the pw for.
new (str): The new password we want to change to.
current (Optional[str]): The current password we're changing from.
Returns:
tuple: Tuple in the form (response_code, json_response)
"""
payload = {'new': new, 'current': current, '_uid': uid}
return self.client.put('/api/v1/users/%s/password' % uid, **payload)[0]
def get(self, id_, is_username=False):
"""Retrieves the NodeBB user given the user's `id_`.
Fetches for the entire NodeBB user object (only user properties) given the
`id_`. The `id_` can be the user's uid or username. If the `id_` is
expected to be a username, `is_username` must be set to `True`.
Args:
id_ (str): The NodeBB user's email or username.
is_username (Optional[bool]): Whether or not the first argument
is the user's username or not. Defaults to False.
Returns:
tuple: Tuple in the form (response_code, json_response)
"""
return self.client.get(
('/api/user/%s' if is_username else '/api/user/uid/%s') % id_
) if id_ else (404, 'Not Found') | pynodebb/api/users.py | from __future__ import unicode_literals
from pynodebb.api import Resource
class User(Resource):
def create(self, username, **kwargs):
"""Creates a new NodeBB user.
Args:
username (str): A unique string used to identify the new user.
If the username already exists, NodeBB will automatically
append random numbers after `username` to ensure uniqueness.
**kwargs: All other accepted user properties. You can find out
what they are by referring to `updateProfile`.
Returns:
tuple: Tuple in the form (response_code, json_response)
"""
kwargs.update({'username': username})
return self.client.post('/api/v1/users', **kwargs)
def _update(self, uid, endpoint, **kwargs):
kwargs.update({'_uid': uid})
return self.client.put(endpoint, **kwargs)
def update(self, uid, **kwargs):
"""Updates the user's NodeBB user properties.
Accepted user properties can be found by referring to `updateProfile`.
For a quick reference these are the accepted fields:
username, email, fullname, website, location, birthday, signature
Args:
uid (str): The NodeBB uid for the user we are updating.
**kwargs: A dictionary of user properties we are updating.
Returns:
tuple: Tuple in the form (response_code, json_response)
"""
return self._update(uid, '/api/v1/users/%s' % uid, **kwargs)
def update_settings(self, uid, **kwargs):
"""Updates the user's NodeBB settings.
All settings can be updated. They can be found here:
https://github.com/NodeBB/NodeBB/blob/master/src/user/settings.js#L102-L118
Args:
uid (str): The NodeBB uid for the user we are updating.
**kwargs: A dictionary of settings we are updating.
Returns:
tuple: Tuple in the form (response_code, json_response)
"""
return self._update(uid, '/api/v1/users/%s/settings' % uid, **{
# NodeBB stores booleans as integers in their Redis instance.
k: (int(v) if isinstance(v, bool) else v) for k, v in kwargs.iteritems()
})
def delete(self, uid):
"""Removes the associated NodeBB user.
Warning! This operation is irreversible. Note that if `uid` is None
then, no requests will be made and a 404 will be returned.
Args:
uid (str): The NodeBB uid for the user we are deleting
Returns:
tuple: Tuple in the form (response_code, json_response)
"""
return self.client.delete('/api/v1/users/%s' % uid, **{'_uid': uid})
def change_password(self, uid, new, current=None):
"""Changes the user's password from `current` to `new`.
If a `master_token` was generated then `current=None` is accepted. However
if not, the `current` password is required.
Args:
uid (str): The NodeBB uid for the user we are changing the pw for.
new (str): The new password we want to change to.
current (Optional[str]): The current password we're changing from.
Returns:
tuple: Tuple in the form (response_code, json_response)
"""
payload = {'new': new, 'current': current, '_uid': uid}
return self.client.put('/api/v1/users/%s/password' % uid, **payload)[0]
def get(self, id_, is_username=False):
"""Retrieves the NodeBB user given the user's `id_`.
Fetches for the entire NodeBB user object (only user properties) given the
`id_`. The `id_` can be the user's uid or username. If the `id_` is
expected to be a username, `is_username` must be set to `True`.
Args:
id_ (str): The NodeBB user's email or username.
is_username (Optional[bool]): Whether or not the first argument
is the user's username or not. Defaults to False.
Returns:
tuple: Tuple in the form (response_code, json_response)
"""
return self.client.get(
('/api/user/%s' if is_username else '/api/user/uid/%s') % id_
) if id_ else (404, 'Not Found') | 0.886592 | 0.273047 |
import re
import logging
import urllib.parse
import requests
from ox_herd.core.plugins import base
from ox_herd.core import ox_tasks
class SimpleTaskResult:
"""Class to hold task result.
Clairfying the expected return values for a task makes it easier to display
and inspect task results with other tools.
"""
def __init__(self, return_value: str, full_text: str = None,
status_code: int = 0, reason: str = 'OK',
extras: dict = None):
"""Initializer.
:param return_value:str: String return value to display as task
status.
:param full_text:str: Full text of result/response. This is typically
the full response to an HTTP request. You can
provide '' if you want to save space. If you
provide None, then we use return_value.
:param status_code:int: Status code from an HTTP request. Use 0 if
was not an HTTP request.
:param reason:str='OK': String reason provided for HTTP response. Use
'NA' if not an HTTP response.
:param extras=None: Optional dict of additional data.
"""
self.return_value = return_value
self.full_text = full_text if full_text is not None else return_value
self.status_code = status_code
self.reason = reason
self.extras = extras if extras else {}
@classmethod
def fields(cls) -> list:
"""Return list of strings describing main fields in self.
Sub-classes can override if they want additional fields to showup
in to_dict.
"""
_ = cls
return ['return_value', 'full_text', 'status_code',
'reason', 'extras']
def to_dict(self) -> dict:
"Return dict with data in self.fields()"
return {n: getattr(self, n) for n in self.fields()}
class SimpleWebTask(ox_tasks.OxHerdTask, base.OxPluginComponent):
"""Generic command that can be sub-classed for automation.
"""
def __init__(self, *args, base_url=None, base_port='', **kwargs):
"""Initializer.
:arg base_url=None: String url for where your server lives (or
where the version you want to use lives).
"""
if not args and 'name' not in kwargs:
kwargs['name'] = self.__class__.__name__
ox_tasks.OxHerdTask.__init__(self, *args, **kwargs)
# Never use base_url/base_port directly. Instead call
# set_base_url and make_url so we handle ports correctly.
self.__base_url = ''
self.__base_port = None
self.set_base_url(base_url, base_port)
def set_base_url(self, base_url: str, base_port: str = None):
"""Set the base_url (and optinally base_port).
:param base_url: String like http://foo or http://bar:9999
:param base_port=None: Optional base_port if not in base_url.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Set base URL and port to use in connections.
"""
self.__base_url = base_url if base_url else self.make_base_url()
if not self.__base_url:
return
my_base, my_port = urllib.parse.splitport(self.__base_url)
if my_port:
self.__base_url = my_base # take off port and store in base_port
if base_port:
raise ValueError('base_url=%s has port and base_port=%s' % (
self.__base_url, base_port))
base_port = my_port
self.__base_port = base_port
def make_url(self, path: str = '') -> str:
"""Make url with given path.
:param path='': Optional relative path.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: Full string URL.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Combine base URL and port set with set_base_url with path.
"""
result = self.__base_url
if not result:
logging.error('self.__dict__: %s', str(self.__dict__))
raise ValueError('Must have base_url set to call make_url')
if self.__base_port:
result += ':' + str(self.__base_port)
result += '/' + path.lstrip('/')
return result
@classmethod
def make_base_url(cls) -> str:
"""Return string indicating base URL or None if not known.
Sub-classes can override if they know a good default. Otherwise,
should call set_base_url when they have the url to use.
"""
return None
def get_login_info(self) -> (str, str):
"""Return string login field and login value
Sub-classes may want to override.
"""
return 'username', getattr(self, 'username', 'test_user')
def get_password(self, login_name: str) -> str:
"Get password for given login name"
return self.get_secret(login_name, 'test_passwords')
@classmethod
def get_login_route(cls) -> str:
"Return path to login route."
_ = cls
return '/login'
@classmethod
def get_secret(cls, name: str, category: str = 'root') -> str:
"""Lookup a secret like a password or something.
:param name: Name of secret to lookup
:param category='root': Category of secret.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: A string reprsenting the secret you want to get.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Provide a way to lookup secrets like passwords, etc.
"""
raise NotImplementedError
def setup_session(self):
"""Setup a requests.session and return it.
This sets up a session so we are logged in to whatever we get
from self.make_url(self.get_login_route()) using the login from
get_login_info().
"""
session = requests.session()
my_url = self.make_url(self.get_login_route())
csrf_field, csrf = None, None
try:
csrf_field, csrf = self.get_csrf_from_form(session, my_url)
except Exception as problem:
logging.error('Failed to get csrf due to exception: %s', str(
problem))
logging.error('Maybe override get_csrf_from_form?')
raise
login_field, login_name = self.get_login_info()
password = <PASSWORD>(login_name)
data = {login_field: login_name, 'password': password}
if csrf:
data[csrf_field] = csrf
post_resp = session.post(my_url, data, verify=False)
if post_resp.status_code != 200:
raise ValueError('Got unexpected status/reason: %s/%s in login' % (
post_resp.status_code, post_resp.reason))
return session
def do_main(self, session) -> SimpleTaskResult:
"""Sub-classes should override this to do the main work.
Sub-classes should override this to return an instance of SimpleTaskResult
describing the result of running the task. See docs for SimpleTaskResult
for more details.
"""
raise NotImplementedError
@classmethod
def main_call(cls, ox_herd_task):
logging.info('Starting main_call for %s', cls.__name__)
session = ox_herd_task.setup_session()
result = ox_herd_task.do_main(session)
msg = 'Go return_value %s' % (result.return_value)
cls.note_comment(ox_herd_task, msg)
return result
@classmethod
def note_comment(cls, ox_herd_task: ox_tasks.OxHerdTask,
comment: str):
"""Note a comment for a task we ran.
:param ox_herd_task: Task we ran.
:param comment: Comment to note.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Mainly a placeholder in case sub-classes want
to do some kind of logging for comments.
"""
_ = cls
logging.info('Comment for task %s: %s', ox_herd_task.name,
comment)
@staticmethod
def get_csrf_from_form(
session, url: str, csrf_field: str = 'csrf_token',
csrf_re: str = None) -> (str, str):
"""Do a get request for the given url and extract CSRF token.
:param session: Session we have to the web site.
:param url: String URL for form with CSRF token.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: String name for CSRF field and string for CSRF token.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Flask WTForms provides a CSRF token to prevent CSRF
attacks. We need to hit the url with a GET request to
get the csrf_token and include that as a parameter of
our POST request. This function gets the csrf_token.
"""
result = session.get(url)
csrf_re = csrf_re if csrf_re else ' *'.join([
'id="%s"' % csrf_field, 'name="%s"' % csrf_field,
'type="hidden"', 'value="(?P<csrf>[^"]*)">'])
if isinstance(csrf_re, str):
csrf_re = re.compile(csrf_re)
match = csrf_re.search(result.text)
if not match:
raise ValueError('Could not extract csrf from url "%s"' % url)
return csrf_field, match.group('csrf')
def raise_on_bad_status(self, result):
"""Raise ValueError if http response looks like an error.
:param result: Response from an HTTP command such as what
do_main might report.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: False if no problems, otherwise raises an exception.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Raise an exception if result looks like an error.
"""
if result.status_code != 200:
msg = 'Problem in task %s with reason: "%s"' % (
self.__class__.__name__, result.reason)
logging.error(msg)
raise ValueError(msg)
if ' error' in result.text.lower():
self.note_comment(self, 'Saw error in result: ' + str(result))
if isinstance(result.reason, str):
my_reason = result.reason
else:
my_reason = result.reason.text
msg = 'Saw "error" in result for task %s: "%s"' % (
self.__class__.__name__, my_reason)
logging.error(msg)
raise ValueError(msg)
return False | ox_herd/ui/flask_web_ui/ox_herd/web_tasks.py | import re
import logging
import urllib.parse
import requests
from ox_herd.core.plugins import base
from ox_herd.core import ox_tasks
class SimpleTaskResult:
"""Class to hold task result.
Clairfying the expected return values for a task makes it easier to display
and inspect task results with other tools.
"""
def __init__(self, return_value: str, full_text: str = None,
status_code: int = 0, reason: str = 'OK',
extras: dict = None):
"""Initializer.
:param return_value:str: String return value to display as task
status.
:param full_text:str: Full text of result/response. This is typically
the full response to an HTTP request. You can
provide '' if you want to save space. If you
provide None, then we use return_value.
:param status_code:int: Status code from an HTTP request. Use 0 if
was not an HTTP request.
:param reason:str='OK': String reason provided for HTTP response. Use
'NA' if not an HTTP response.
:param extras=None: Optional dict of additional data.
"""
self.return_value = return_value
self.full_text = full_text if full_text is not None else return_value
self.status_code = status_code
self.reason = reason
self.extras = extras if extras else {}
@classmethod
def fields(cls) -> list:
"""Return list of strings describing main fields in self.
Sub-classes can override if they want additional fields to showup
in to_dict.
"""
_ = cls
return ['return_value', 'full_text', 'status_code',
'reason', 'extras']
def to_dict(self) -> dict:
"Return dict with data in self.fields()"
return {n: getattr(self, n) for n in self.fields()}
class SimpleWebTask(ox_tasks.OxHerdTask, base.OxPluginComponent):
"""Generic command that can be sub-classed for automation.
"""
def __init__(self, *args, base_url=None, base_port='', **kwargs):
"""Initializer.
:arg base_url=None: String url for where your server lives (or
where the version you want to use lives).
"""
if not args and 'name' not in kwargs:
kwargs['name'] = self.__class__.__name__
ox_tasks.OxHerdTask.__init__(self, *args, **kwargs)
# Never use base_url/base_port directly. Instead call
# set_base_url and make_url so we handle ports correctly.
self.__base_url = ''
self.__base_port = None
self.set_base_url(base_url, base_port)
def set_base_url(self, base_url: str, base_port: str = None):
"""Set the base_url (and optinally base_port).
:param base_url: String like http://foo or http://bar:9999
:param base_port=None: Optional base_port if not in base_url.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Set base URL and port to use in connections.
"""
self.__base_url = base_url if base_url else self.make_base_url()
if not self.__base_url:
return
my_base, my_port = urllib.parse.splitport(self.__base_url)
if my_port:
self.__base_url = my_base # take off port and store in base_port
if base_port:
raise ValueError('base_url=%s has port and base_port=%s' % (
self.__base_url, base_port))
base_port = my_port
self.__base_port = base_port
def make_url(self, path: str = '') -> str:
"""Make url with given path.
:param path='': Optional relative path.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: Full string URL.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Combine base URL and port set with set_base_url with path.
"""
result = self.__base_url
if not result:
logging.error('self.__dict__: %s', str(self.__dict__))
raise ValueError('Must have base_url set to call make_url')
if self.__base_port:
result += ':' + str(self.__base_port)
result += '/' + path.lstrip('/')
return result
@classmethod
def make_base_url(cls) -> str:
"""Return string indicating base URL or None if not known.
Sub-classes can override if they know a good default. Otherwise,
should call set_base_url when they have the url to use.
"""
return None
def get_login_info(self) -> (str, str):
"""Return string login field and login value
Sub-classes may want to override.
"""
return 'username', getattr(self, 'username', 'test_user')
def get_password(self, login_name: str) -> str:
"Get password for given login name"
return self.get_secret(login_name, 'test_passwords')
@classmethod
def get_login_route(cls) -> str:
"Return path to login route."
_ = cls
return '/login'
@classmethod
def get_secret(cls, name: str, category: str = 'root') -> str:
"""Lookup a secret like a password or something.
:param name: Name of secret to lookup
:param category='root': Category of secret.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: A string reprsenting the secret you want to get.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Provide a way to lookup secrets like passwords, etc.
"""
raise NotImplementedError
def setup_session(self):
"""Setup a requests.session and return it.
This sets up a session so we are logged in to whatever we get
from self.make_url(self.get_login_route()) using the login from
get_login_info().
"""
session = requests.session()
my_url = self.make_url(self.get_login_route())
csrf_field, csrf = None, None
try:
csrf_field, csrf = self.get_csrf_from_form(session, my_url)
except Exception as problem:
logging.error('Failed to get csrf due to exception: %s', str(
problem))
logging.error('Maybe override get_csrf_from_form?')
raise
login_field, login_name = self.get_login_info()
password = <PASSWORD>(login_name)
data = {login_field: login_name, 'password': password}
if csrf:
data[csrf_field] = csrf
post_resp = session.post(my_url, data, verify=False)
if post_resp.status_code != 200:
raise ValueError('Got unexpected status/reason: %s/%s in login' % (
post_resp.status_code, post_resp.reason))
return session
def do_main(self, session) -> SimpleTaskResult:
"""Sub-classes should override this to do the main work.
Sub-classes should override this to return an instance of SimpleTaskResult
describing the result of running the task. See docs for SimpleTaskResult
for more details.
"""
raise NotImplementedError
@classmethod
def main_call(cls, ox_herd_task):
logging.info('Starting main_call for %s', cls.__name__)
session = ox_herd_task.setup_session()
result = ox_herd_task.do_main(session)
msg = 'Go return_value %s' % (result.return_value)
cls.note_comment(ox_herd_task, msg)
return result
@classmethod
def note_comment(cls, ox_herd_task: ox_tasks.OxHerdTask,
comment: str):
"""Note a comment for a task we ran.
:param ox_herd_task: Task we ran.
:param comment: Comment to note.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Mainly a placeholder in case sub-classes want
to do some kind of logging for comments.
"""
_ = cls
logging.info('Comment for task %s: %s', ox_herd_task.name,
comment)
@staticmethod
def get_csrf_from_form(
session, url: str, csrf_field: str = 'csrf_token',
csrf_re: str = None) -> (str, str):
"""Do a get request for the given url and extract CSRF token.
:param session: Session we have to the web site.
:param url: String URL for form with CSRF token.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: String name for CSRF field and string for CSRF token.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Flask WTForms provides a CSRF token to prevent CSRF
attacks. We need to hit the url with a GET request to
get the csrf_token and include that as a parameter of
our POST request. This function gets the csrf_token.
"""
result = session.get(url)
csrf_re = csrf_re if csrf_re else ' *'.join([
'id="%s"' % csrf_field, 'name="%s"' % csrf_field,
'type="hidden"', 'value="(?P<csrf>[^"]*)">'])
if isinstance(csrf_re, str):
csrf_re = re.compile(csrf_re)
match = csrf_re.search(result.text)
if not match:
raise ValueError('Could not extract csrf from url "%s"' % url)
return csrf_field, match.group('csrf')
def raise_on_bad_status(self, result):
"""Raise ValueError if http response looks like an error.
:param result: Response from an HTTP command such as what
do_main might report.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: False if no problems, otherwise raises an exception.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Raise an exception if result looks like an error.
"""
if result.status_code != 200:
msg = 'Problem in task %s with reason: "%s"' % (
self.__class__.__name__, result.reason)
logging.error(msg)
raise ValueError(msg)
if ' error' in result.text.lower():
self.note_comment(self, 'Saw error in result: ' + str(result))
if isinstance(result.reason, str):
my_reason = result.reason
else:
my_reason = result.reason.text
msg = 'Saw "error" in result for task %s: "%s"' % (
self.__class__.__name__, my_reason)
logging.error(msg)
raise ValueError(msg)
return False | 0.733833 | 0.171789 |
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class NatData(Entity):
"""
NAT statistics
.. attribute:: ip_nat_statistics
Global NAT statistics
**type**\: :py:class:`IpNatStatistics <ydk.models.cisco_ios_xe.Cisco_IOS_XE_nat_oper.NatData.IpNatStatistics>`
**presence node**\: True
.. attribute:: ip_nat_translation
IP NAT translations
**type**\: list of :py:class:`IpNatTranslation <ydk.models.cisco_ios_xe.Cisco_IOS_XE_nat_oper.NatData.IpNatTranslation>`
"""
_prefix = 'nat-ios-xe-oper'
_revision = '2017-11-01'
def __init__(self):
super(NatData, self).__init__()
self._top_entity = None
self.yang_name = "nat-data"
self.yang_parent_name = "Cisco-IOS-XE-nat-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("ip-nat-statistics", ("ip_nat_statistics", NatData.IpNatStatistics))])
self._child_list_classes = OrderedDict([("ip-nat-translation", ("ip_nat_translation", NatData.IpNatTranslation))])
self._leafs = OrderedDict()
self.ip_nat_statistics = None
self._children_name_map["ip_nat_statistics"] = "ip-nat-statistics"
self._children_yang_names.add("ip-nat-statistics")
self.ip_nat_translation = YList(self)
self._segment_path = lambda: "Cisco-IOS-XE-nat-oper:nat-data"
def __setattr__(self, name, value):
self._perform_setattr(NatData, [], name, value)
class IpNatStatistics(Entity):
"""
Global NAT statistics
.. attribute:: initialized
Indicates if the NAT feature has been initialized
**type**\: bool
.. attribute:: entries
Total translations
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: statics
Total static translations
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: statics_sorted
Sorted static translations by domain
**type**\: list of int
**range:** 0..18446744073709551615
.. attribute:: flows
Total flows
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: insides
Number of inside interfaces
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: outsides
Number of outside interfaces
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: entry_timeouts
Number of entries which timed out
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: hits
Successful searches with matching NAT session
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: misses
Unsuccessful searches without matching NAT session
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: interrupt_switched
Translated in interrupt switching
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: packets_punted
Packets punted to process
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: frag_pak_count
Counter for saved fragment packets
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: pool_stats_drop
Dropped pool stats from platform
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: mapping_stats_drop
Dropped mapping stats from platform
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: portlist_req_fail
Counter for port block alloc req fails
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ipalias_add_fail
Counter for add ipalias fails drops
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: limit_entry_add_fail
Counter for add limit\_entry fails drops
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: in2out_drops
Counter for NAT inside\->outside drops
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: out2in_drops
Counter for NAT outside\->inside drops
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: mib_addr_binds
MIB counter for address binds
**type**\: int
**range:** 0..4294967295
.. attribute:: mib_addport_binds
MIB counter for address port binds
**type**\: int
**range:** 0..4294967295
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'nat-ios-xe-oper'
_revision = '2017-11-01'
def __init__(self):
super(NatData.IpNatStatistics, self).__init__()
self.yang_name = "ip-nat-statistics"
self.yang_parent_name = "nat-data"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('initialized', YLeaf(YType.boolean, 'initialized')),
('entries', YLeaf(YType.uint64, 'entries')),
('statics', YLeaf(YType.uint64, 'statics')),
('statics_sorted', YLeafList(YType.uint64, 'statics-sorted')),
('flows', YLeaf(YType.uint64, 'flows')),
('insides', YLeaf(YType.uint64, 'insides')),
('outsides', YLeaf(YType.uint64, 'outsides')),
('entry_timeouts', YLeaf(YType.uint64, 'entry-timeouts')),
('hits', YLeaf(YType.uint64, 'hits')),
('misses', YLeaf(YType.uint64, 'misses')),
('interrupt_switched', YLeaf(YType.uint64, 'interrupt-switched')),
('packets_punted', YLeaf(YType.uint64, 'packets-punted')),
('frag_pak_count', YLeaf(YType.uint64, 'frag-pak-count')),
('pool_stats_drop', YLeaf(YType.uint64, 'pool-stats-drop')),
('mapping_stats_drop', YLeaf(YType.uint64, 'mapping-stats-drop')),
('portlist_req_fail', YLeaf(YType.uint64, 'portlist-req-fail')),
('ipalias_add_fail', YLeaf(YType.uint64, 'ipalias-add-fail')),
('limit_entry_add_fail', YLeaf(YType.uint64, 'limit-entry-add-fail')),
('in2out_drops', YLeaf(YType.uint64, 'in2out-drops')),
('out2in_drops', YLeaf(YType.uint64, 'out2in-drops')),
('mib_addr_binds', YLeaf(YType.uint32, 'mib-addr-binds')),
('mib_addport_binds', YLeaf(YType.uint32, 'mib-addport-binds')),
])
self.initialized = None
self.entries = None
self.statics = None
self.statics_sorted = []
self.flows = None
self.insides = None
self.outsides = None
self.entry_timeouts = None
self.hits = None
self.misses = None
self.interrupt_switched = None
self.packets_punted = None
self.frag_pak_count = None
self.pool_stats_drop = None
self.mapping_stats_drop = None
self.portlist_req_fail = None
self.ipalias_add_fail = None
self.limit_entry_add_fail = None
self.in2out_drops = None
self.out2in_drops = None
self.mib_addr_binds = None
self.mib_addport_binds = None
self._segment_path = lambda: "ip-nat-statistics"
self._absolute_path = lambda: "Cisco-IOS-XE-nat-oper:nat-data/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(NatData.IpNatStatistics, ['initialized', 'entries', 'statics', 'statics_sorted', 'flows', 'insides', 'outsides', 'entry_timeouts', 'hits', 'misses', 'interrupt_switched', 'packets_punted', 'frag_pak_count', 'pool_stats_drop', 'mapping_stats_drop', 'portlist_req_fail', 'ipalias_add_fail', 'limit_entry_add_fail', 'in2out_drops', 'out2in_drops', 'mib_addr_binds', 'mib_addport_binds'], name, value)
class IpNatTranslation(Entity):
"""
IP NAT translations
.. attribute:: inside_local_addr (key)
Inside local address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: outside_local_addr (key)
Outside local address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: inside_local_port (key)
Inside local port
**type**\: int
**range:** 0..65535
.. attribute:: outside_local_port (key)
Outside local port
**type**\: int
**range:** 0..65535
.. attribute:: vrfid (key)
VRF ID
**type**\: int
**range:** 0..65535
.. attribute:: protocol (key)
Protocol
**type**\: int
**range:** 0..255
.. attribute:: inside_global_addr
Inside global address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: outside_global_addr
Outside global address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: inside_global_port
Inside global port
**type**\: int
**range:** 0..65535
.. attribute:: outside_global_port
Outside global port
**type**\: int
**range:** 0..65535
.. attribute:: flags
Translation flags
**type**\: int
**range:** 0..4294967295
.. attribute:: application_type
Application type
**type**\: int
**range:** 0..255
"""
_prefix = 'nat-ios-xe-oper'
_revision = '2017-11-01'
def __init__(self):
super(NatData.IpNatTranslation, self).__init__()
self.yang_name = "ip-nat-translation"
self.yang_parent_name = "nat-data"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['inside_local_addr','outside_local_addr','inside_local_port','outside_local_port','vrfid','protocol']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('inside_local_addr', YLeaf(YType.str, 'inside-local-addr')),
('outside_local_addr', YLeaf(YType.str, 'outside-local-addr')),
('inside_local_port', YLeaf(YType.uint16, 'inside-local-port')),
('outside_local_port', YLeaf(YType.uint16, 'outside-local-port')),
('vrfid', YLeaf(YType.uint16, 'vrfid')),
('protocol', YLeaf(YType.uint8, 'protocol')),
('inside_global_addr', YLeaf(YType.str, 'inside-global-addr')),
('outside_global_addr', YLeaf(YType.str, 'outside-global-addr')),
('inside_global_port', YLeaf(YType.uint16, 'inside-global-port')),
('outside_global_port', YLeaf(YType.uint16, 'outside-global-port')),
('flags', YLeaf(YType.uint32, 'flags')),
('application_type', YLeaf(YType.uint8, 'application-type')),
])
self.inside_local_addr = None
self.outside_local_addr = None
self.inside_local_port = None
self.outside_local_port = None
self.vrfid = None
self.protocol = None
self.inside_global_addr = None
self.outside_global_addr = None
self.inside_global_port = None
self.outside_global_port = None
self.flags = None
self.application_type = None
self._segment_path = lambda: "ip-nat-translation" + "[inside-local-addr='" + str(self.inside_local_addr) + "']" + "[outside-local-addr='" + str(self.outside_local_addr) + "']" + "[inside-local-port='" + str(self.inside_local_port) + "']" + "[outside-local-port='" + str(self.outside_local_port) + "']" + "[vrfid='" + str(self.vrfid) + "']" + "[protocol='" + str(self.protocol) + "']"
self._absolute_path = lambda: "Cisco-IOS-XE-nat-oper:nat-data/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(NatData.IpNatTranslation, ['inside_local_addr', 'outside_local_addr', 'inside_local_port', 'outside_local_port', 'vrfid', 'protocol', 'inside_global_addr', 'outside_global_addr', 'inside_global_port', 'outside_global_port', 'flags', 'application_type'], name, value)
def clone_ptr(self):
self._top_entity = NatData()
return self._top_entity | cisco-ios-xe/ydk/models/cisco_ios_xe/Cisco_IOS_XE_nat_oper.py | from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class NatData(Entity):
"""
NAT statistics
.. attribute:: ip_nat_statistics
Global NAT statistics
**type**\: :py:class:`IpNatStatistics <ydk.models.cisco_ios_xe.Cisco_IOS_XE_nat_oper.NatData.IpNatStatistics>`
**presence node**\: True
.. attribute:: ip_nat_translation
IP NAT translations
**type**\: list of :py:class:`IpNatTranslation <ydk.models.cisco_ios_xe.Cisco_IOS_XE_nat_oper.NatData.IpNatTranslation>`
"""
_prefix = 'nat-ios-xe-oper'
_revision = '2017-11-01'
def __init__(self):
super(NatData, self).__init__()
self._top_entity = None
self.yang_name = "nat-data"
self.yang_parent_name = "Cisco-IOS-XE-nat-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("ip-nat-statistics", ("ip_nat_statistics", NatData.IpNatStatistics))])
self._child_list_classes = OrderedDict([("ip-nat-translation", ("ip_nat_translation", NatData.IpNatTranslation))])
self._leafs = OrderedDict()
self.ip_nat_statistics = None
self._children_name_map["ip_nat_statistics"] = "ip-nat-statistics"
self._children_yang_names.add("ip-nat-statistics")
self.ip_nat_translation = YList(self)
self._segment_path = lambda: "Cisco-IOS-XE-nat-oper:nat-data"
def __setattr__(self, name, value):
self._perform_setattr(NatData, [], name, value)
class IpNatStatistics(Entity):
"""
Global NAT statistics
.. attribute:: initialized
Indicates if the NAT feature has been initialized
**type**\: bool
.. attribute:: entries
Total translations
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: statics
Total static translations
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: statics_sorted
Sorted static translations by domain
**type**\: list of int
**range:** 0..18446744073709551615
.. attribute:: flows
Total flows
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: insides
Number of inside interfaces
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: outsides
Number of outside interfaces
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: entry_timeouts
Number of entries which timed out
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: hits
Successful searches with matching NAT session
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: misses
Unsuccessful searches without matching NAT session
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: interrupt_switched
Translated in interrupt switching
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: packets_punted
Packets punted to process
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: frag_pak_count
Counter for saved fragment packets
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: pool_stats_drop
Dropped pool stats from platform
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: mapping_stats_drop
Dropped mapping stats from platform
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: portlist_req_fail
Counter for port block alloc req fails
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: ipalias_add_fail
Counter for add ipalias fails drops
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: limit_entry_add_fail
Counter for add limit\_entry fails drops
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: in2out_drops
Counter for NAT inside\->outside drops
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: out2in_drops
Counter for NAT outside\->inside drops
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: mib_addr_binds
MIB counter for address binds
**type**\: int
**range:** 0..4294967295
.. attribute:: mib_addport_binds
MIB counter for address port binds
**type**\: int
**range:** 0..4294967295
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'nat-ios-xe-oper'
_revision = '2017-11-01'
def __init__(self):
super(NatData.IpNatStatistics, self).__init__()
self.yang_name = "ip-nat-statistics"
self.yang_parent_name = "nat-data"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('initialized', YLeaf(YType.boolean, 'initialized')),
('entries', YLeaf(YType.uint64, 'entries')),
('statics', YLeaf(YType.uint64, 'statics')),
('statics_sorted', YLeafList(YType.uint64, 'statics-sorted')),
('flows', YLeaf(YType.uint64, 'flows')),
('insides', YLeaf(YType.uint64, 'insides')),
('outsides', YLeaf(YType.uint64, 'outsides')),
('entry_timeouts', YLeaf(YType.uint64, 'entry-timeouts')),
('hits', YLeaf(YType.uint64, 'hits')),
('misses', YLeaf(YType.uint64, 'misses')),
('interrupt_switched', YLeaf(YType.uint64, 'interrupt-switched')),
('packets_punted', YLeaf(YType.uint64, 'packets-punted')),
('frag_pak_count', YLeaf(YType.uint64, 'frag-pak-count')),
('pool_stats_drop', YLeaf(YType.uint64, 'pool-stats-drop')),
('mapping_stats_drop', YLeaf(YType.uint64, 'mapping-stats-drop')),
('portlist_req_fail', YLeaf(YType.uint64, 'portlist-req-fail')),
('ipalias_add_fail', YLeaf(YType.uint64, 'ipalias-add-fail')),
('limit_entry_add_fail', YLeaf(YType.uint64, 'limit-entry-add-fail')),
('in2out_drops', YLeaf(YType.uint64, 'in2out-drops')),
('out2in_drops', YLeaf(YType.uint64, 'out2in-drops')),
('mib_addr_binds', YLeaf(YType.uint32, 'mib-addr-binds')),
('mib_addport_binds', YLeaf(YType.uint32, 'mib-addport-binds')),
])
self.initialized = None
self.entries = None
self.statics = None
self.statics_sorted = []
self.flows = None
self.insides = None
self.outsides = None
self.entry_timeouts = None
self.hits = None
self.misses = None
self.interrupt_switched = None
self.packets_punted = None
self.frag_pak_count = None
self.pool_stats_drop = None
self.mapping_stats_drop = None
self.portlist_req_fail = None
self.ipalias_add_fail = None
self.limit_entry_add_fail = None
self.in2out_drops = None
self.out2in_drops = None
self.mib_addr_binds = None
self.mib_addport_binds = None
self._segment_path = lambda: "ip-nat-statistics"
self._absolute_path = lambda: "Cisco-IOS-XE-nat-oper:nat-data/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(NatData.IpNatStatistics, ['initialized', 'entries', 'statics', 'statics_sorted', 'flows', 'insides', 'outsides', 'entry_timeouts', 'hits', 'misses', 'interrupt_switched', 'packets_punted', 'frag_pak_count', 'pool_stats_drop', 'mapping_stats_drop', 'portlist_req_fail', 'ipalias_add_fail', 'limit_entry_add_fail', 'in2out_drops', 'out2in_drops', 'mib_addr_binds', 'mib_addport_binds'], name, value)
class IpNatTranslation(Entity):
"""
IP NAT translations
.. attribute:: inside_local_addr (key)
Inside local address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: outside_local_addr (key)
Outside local address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: inside_local_port (key)
Inside local port
**type**\: int
**range:** 0..65535
.. attribute:: outside_local_port (key)
Outside local port
**type**\: int
**range:** 0..65535
.. attribute:: vrfid (key)
VRF ID
**type**\: int
**range:** 0..65535
.. attribute:: protocol (key)
Protocol
**type**\: int
**range:** 0..255
.. attribute:: inside_global_addr
Inside global address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: outside_global_addr
Outside global address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: inside_global_port
Inside global port
**type**\: int
**range:** 0..65535
.. attribute:: outside_global_port
Outside global port
**type**\: int
**range:** 0..65535
.. attribute:: flags
Translation flags
**type**\: int
**range:** 0..4294967295
.. attribute:: application_type
Application type
**type**\: int
**range:** 0..255
"""
_prefix = 'nat-ios-xe-oper'
_revision = '2017-11-01'
def __init__(self):
super(NatData.IpNatTranslation, self).__init__()
self.yang_name = "ip-nat-translation"
self.yang_parent_name = "nat-data"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['inside_local_addr','outside_local_addr','inside_local_port','outside_local_port','vrfid','protocol']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('inside_local_addr', YLeaf(YType.str, 'inside-local-addr')),
('outside_local_addr', YLeaf(YType.str, 'outside-local-addr')),
('inside_local_port', YLeaf(YType.uint16, 'inside-local-port')),
('outside_local_port', YLeaf(YType.uint16, 'outside-local-port')),
('vrfid', YLeaf(YType.uint16, 'vrfid')),
('protocol', YLeaf(YType.uint8, 'protocol')),
('inside_global_addr', YLeaf(YType.str, 'inside-global-addr')),
('outside_global_addr', YLeaf(YType.str, 'outside-global-addr')),
('inside_global_port', YLeaf(YType.uint16, 'inside-global-port')),
('outside_global_port', YLeaf(YType.uint16, 'outside-global-port')),
('flags', YLeaf(YType.uint32, 'flags')),
('application_type', YLeaf(YType.uint8, 'application-type')),
])
self.inside_local_addr = None
self.outside_local_addr = None
self.inside_local_port = None
self.outside_local_port = None
self.vrfid = None
self.protocol = None
self.inside_global_addr = None
self.outside_global_addr = None
self.inside_global_port = None
self.outside_global_port = None
self.flags = None
self.application_type = None
self._segment_path = lambda: "ip-nat-translation" + "[inside-local-addr='" + str(self.inside_local_addr) + "']" + "[outside-local-addr='" + str(self.outside_local_addr) + "']" + "[inside-local-port='" + str(self.inside_local_port) + "']" + "[outside-local-port='" + str(self.outside_local_port) + "']" + "[vrfid='" + str(self.vrfid) + "']" + "[protocol='" + str(self.protocol) + "']"
self._absolute_path = lambda: "Cisco-IOS-XE-nat-oper:nat-data/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(NatData.IpNatTranslation, ['inside_local_addr', 'outside_local_addr', 'inside_local_port', 'outside_local_port', 'vrfid', 'protocol', 'inside_global_addr', 'outside_global_addr', 'inside_global_port', 'outside_global_port', 'flags', 'application_type'], name, value)
def clone_ptr(self):
self._top_entity = NatData()
return self._top_entity | 0.689306 | 0.12075 |
__version__ = "v1.0"
__copyright__ = "Copyright 2022"
__license__ = "MIT"
__author__ = "<NAME>"
class single(object):
def __init__(self, ):
pass
def trim(action):
def tube(deal):
def wrapper(self, **kwargs):
if action == 'leave_one_out':
res = deal(self, kwargs['ele_loo'])
res.remove(kwargs['ele_loo'])
return res
elif action == 'leave_list_out':
import numpy as np
res = deal(self, kwargs['lis_loo'])
ref = np.arange(len(res))
c = list(set(ref).difference(set(kwargs['lis_loo'])))
# print(c)
res_ = np.array(res)
res_ = res_[c]
return res_
else:
res = deal(self)
return res
return wrapper
return tube
def _get(self, gap=False, universal=False):
if universal:
if gap:
return ['A', 'C', 'G', 'T', '-']
else:
return ['A', 'C', 'G', 'T']
else:
if gap:
return ['A', 'T', 'C', 'G', '-']
else:
return ['A', 'T', 'C', 'G']
@trim(action='normal')
def get(self, gap=False, universal=False):
return self._get(gap=gap, universal=universal)
@trim(action='leave_one_out')
def getEleTrimmed(self, ele_loo, gap=False, universal=False):
return self._get(gap=gap, universal=universal)
@trim(action='leave_list_out')
def getLisTrimmed(self, lis_loo=[], gap=False, universal=False):
return self._get(gap=gap, universal=universal)
def todict(self, nucleotides, reverse=False):
aa_dict = {}
for k, v in enumerate(nucleotides):
aa_dict[v] = k
if reverse:
aa_dict = {v: k for k, v in aa_dict.items()}
return aa_dict
trim = staticmethod(trim)
if __name__ == "__main__":
p = single()
# bs = p.get()
bs = p.getEleTrimmed(ele_loo='A')
# bs = p.getLisTrimmed(lis_loo=[0, 1])
print(bs)
print(p.todict(bs, reverse=True)) | resimpy/util/sequence/symbol/Single.py | __version__ = "v1.0"
__copyright__ = "Copyright 2022"
__license__ = "MIT"
__author__ = "<NAME>"
class single(object):
def __init__(self, ):
pass
def trim(action):
def tube(deal):
def wrapper(self, **kwargs):
if action == 'leave_one_out':
res = deal(self, kwargs['ele_loo'])
res.remove(kwargs['ele_loo'])
return res
elif action == 'leave_list_out':
import numpy as np
res = deal(self, kwargs['lis_loo'])
ref = np.arange(len(res))
c = list(set(ref).difference(set(kwargs['lis_loo'])))
# print(c)
res_ = np.array(res)
res_ = res_[c]
return res_
else:
res = deal(self)
return res
return wrapper
return tube
def _get(self, gap=False, universal=False):
if universal:
if gap:
return ['A', 'C', 'G', 'T', '-']
else:
return ['A', 'C', 'G', 'T']
else:
if gap:
return ['A', 'T', 'C', 'G', '-']
else:
return ['A', 'T', 'C', 'G']
@trim(action='normal')
def get(self, gap=False, universal=False):
return self._get(gap=gap, universal=universal)
@trim(action='leave_one_out')
def getEleTrimmed(self, ele_loo, gap=False, universal=False):
return self._get(gap=gap, universal=universal)
@trim(action='leave_list_out')
def getLisTrimmed(self, lis_loo=[], gap=False, universal=False):
return self._get(gap=gap, universal=universal)
def todict(self, nucleotides, reverse=False):
aa_dict = {}
for k, v in enumerate(nucleotides):
aa_dict[v] = k
if reverse:
aa_dict = {v: k for k, v in aa_dict.items()}
return aa_dict
trim = staticmethod(trim)
if __name__ == "__main__":
p = single()
# bs = p.get()
bs = p.getEleTrimmed(ele_loo='A')
# bs = p.getLisTrimmed(lis_loo=[0, 1])
print(bs)
print(p.todict(bs, reverse=True)) | 0.378804 | 0.118589 |
class JSONBrute:
def info(self, message):
print(f"\033[94m[*]\033[0m {message}")
def success(self, message):
print(f"\033[92m[+]\033[0m {message}")
def warning(self, message):
print(f"\033[93m[!]\033[0m {message}")
def error(self, message):
print(f"\033[91m[-]\033[0m {message}")
def parse_arguments(self):
from argparse import ArgumentParser
parser = ArgumentParser(description="A simple JSON bruteforce tool for penetration testers or hobbyists")
parser.add_argument("--url", type=str, required=True, help="The URL to post the data to.")
parser.add_argument("--wordlist", type=str, required=True, help="The wordlist to use to fuzz with.")
parser.add_argument("--data", type=str, required=True, help="The JSON data to post.")
parser.add_argument("--verbose", nargs="?", const="false", help="Print every request.")
parser.add_argument("--code", type=int, nargs="?", const="201", help="The response code for a successful request (default 201).")
return parser.parse_args()
def parse_wordlist(self, file):
with open(file, mode="r", encoding="iso-8859-1") as data:
wordlist = data.read().splitlines()
return wordlist
def parse_json(self, data):
json = data.split(",")
json = [pair.strip().split("=") for pair in json]
json = {key: value for [key, value] in json}
return json
def parse_fuzzed_parameter(self, json):
fuzzed = list(json.keys())[list(json.values()).index("FUZZ")]
return fuzzed
def find(self, args, wordlist):
from json import loads
import requests
for entry in wordlist:
try:
headers = {
"Content-Type": "application/json"
}
json = self.parse_json(args.data)
fuzzed = self.parse_fuzzed_parameter(json)
json = str(json)
json = json.replace("FUZZ", entry)
json = json.replace("'", "\"")
json = loads(json)
request = requests.post(args.url, headers=headers, json=json)
# --code default value = 201
if not args.code:
args.code = 201
if request.status_code == args.code:
self.success(f"Found \"{fuzzed}\": {json[fuzzed]}")
break
else:
if args.verbose:
self.warning(f"Incorrect \"{fuzzed}\": {json[fuzzed]}")
except requests.ConnectionError:
self.error(f"Failed to connect to {args.url}")
raise SystemExit()
except KeyboardInterrupt:
self.error("Exiting...")
raise SystemExit()
except Exception as err:
self.error(f"Unknown error, please create an issue on github explaining what you did with this error: {err}")
else:
self.warning(f"\"{fuzzed}\" not found")
def run(self):
args = self.parse_arguments()
wordlist = self.parse_wordlist(args.wordlist)
self.info(f"Starting JSONBrute on {args.url}")
self.find(args, wordlist)
JSONBrute().run() | src/jsonbrute.py | class JSONBrute:
def info(self, message):
print(f"\033[94m[*]\033[0m {message}")
def success(self, message):
print(f"\033[92m[+]\033[0m {message}")
def warning(self, message):
print(f"\033[93m[!]\033[0m {message}")
def error(self, message):
print(f"\033[91m[-]\033[0m {message}")
def parse_arguments(self):
from argparse import ArgumentParser
parser = ArgumentParser(description="A simple JSON bruteforce tool for penetration testers or hobbyists")
parser.add_argument("--url", type=str, required=True, help="The URL to post the data to.")
parser.add_argument("--wordlist", type=str, required=True, help="The wordlist to use to fuzz with.")
parser.add_argument("--data", type=str, required=True, help="The JSON data to post.")
parser.add_argument("--verbose", nargs="?", const="false", help="Print every request.")
parser.add_argument("--code", type=int, nargs="?", const="201", help="The response code for a successful request (default 201).")
return parser.parse_args()
def parse_wordlist(self, file):
with open(file, mode="r", encoding="iso-8859-1") as data:
wordlist = data.read().splitlines()
return wordlist
def parse_json(self, data):
json = data.split(",")
json = [pair.strip().split("=") for pair in json]
json = {key: value for [key, value] in json}
return json
def parse_fuzzed_parameter(self, json):
fuzzed = list(json.keys())[list(json.values()).index("FUZZ")]
return fuzzed
def find(self, args, wordlist):
from json import loads
import requests
for entry in wordlist:
try:
headers = {
"Content-Type": "application/json"
}
json = self.parse_json(args.data)
fuzzed = self.parse_fuzzed_parameter(json)
json = str(json)
json = json.replace("FUZZ", entry)
json = json.replace("'", "\"")
json = loads(json)
request = requests.post(args.url, headers=headers, json=json)
# --code default value = 201
if not args.code:
args.code = 201
if request.status_code == args.code:
self.success(f"Found \"{fuzzed}\": {json[fuzzed]}")
break
else:
if args.verbose:
self.warning(f"Incorrect \"{fuzzed}\": {json[fuzzed]}")
except requests.ConnectionError:
self.error(f"Failed to connect to {args.url}")
raise SystemExit()
except KeyboardInterrupt:
self.error("Exiting...")
raise SystemExit()
except Exception as err:
self.error(f"Unknown error, please create an issue on github explaining what you did with this error: {err}")
else:
self.warning(f"\"{fuzzed}\" not found")
def run(self):
args = self.parse_arguments()
wordlist = self.parse_wordlist(args.wordlist)
self.info(f"Starting JSONBrute on {args.url}")
self.find(args, wordlist)
JSONBrute().run() | 0.478041 | 0.240697 |
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.set import TemplateSetNode
from programy.parser.exceptions import ParserException
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphSetTests(TemplateGraphTestClient):
def test_set_template_predicate_as_attrib(self):
template = ET.fromstring("""
<template>
<set name="somepred">Value1</set>
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
set_node = ast.children[0]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateSetNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(None, None), "somepred")
self.assertFalse(set_node.local)
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(None, None), "Value1")
def test_set_template_multi_word_predicate_as_attrib(self):
template = ET.fromstring("""
<template>
<set name="somepred other">Value1</set>
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
set_node = ast.children[0]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateSetNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(None, None), "somepred other")
self.assertFalse(set_node.local)
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(None, None), "Value1")
def test_set_template_predicate_nested(self):
template = ET.fromstring("""
<template>
Some text here
<set name="somepred">Value1</set>
Some text there
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 7)
set_node = ast.children[3]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateSetNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(None, None), "somepred")
self.assertFalse(set_node.local)
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(None, None), "Value1")
def test_set_template_local_as_attrib(self):
template = ET.fromstring("""
<template>
<set var="somevar">Value2</set>
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
set_node = ast.children[0]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateSetNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(None, None), "somevar")
self.assertTrue(set_node.local)
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(None, None), "Value2")
def test_set_template_predicate_as_child(self):
template = ET.fromstring("""
<template>
<set><name>somepred</name>Value3</set>
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
set_node = ast.children[0]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateSetNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(None, None), "somepred")
self.assertFalse(set_node.local)
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(None, None), "Value3")
def test_set_template_local_as_child(self):
template = ET.fromstring("""
<template>
<set><var>somepred</var>Value4</set>
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
set_node = ast.children[0]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateSetNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(None, None), "somepred")
self.assertTrue(set_node.local)
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(None, None), "Value4")
def test_set_name_and_var(self):
template = ET.fromstring("""
<template>
<set name="somepred" var="somevar">Value1</set>
</template>
""")
with self.assertRaises(ParserException):
ast = self.parser.parse_template_expression(template)
def test_set_no_name_or_var(self):
template = ET.fromstring("""
<template>
<set>Value1</set>
</template>
""")
with self.assertRaises(ParserException):
ast = self.parser.parse_template_expression(template) | test/programytest/parser/template/graph_tests/test_set.py | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.set import TemplateSetNode
from programy.parser.exceptions import ParserException
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphSetTests(TemplateGraphTestClient):
def test_set_template_predicate_as_attrib(self):
template = ET.fromstring("""
<template>
<set name="somepred">Value1</set>
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
set_node = ast.children[0]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateSetNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(None, None), "somepred")
self.assertFalse(set_node.local)
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(None, None), "Value1")
def test_set_template_multi_word_predicate_as_attrib(self):
template = ET.fromstring("""
<template>
<set name="somepred other">Value1</set>
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
set_node = ast.children[0]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateSetNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(None, None), "somepred other")
self.assertFalse(set_node.local)
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(None, None), "Value1")
def test_set_template_predicate_nested(self):
template = ET.fromstring("""
<template>
Some text here
<set name="somepred">Value1</set>
Some text there
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 7)
set_node = ast.children[3]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateSetNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(None, None), "somepred")
self.assertFalse(set_node.local)
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(None, None), "Value1")
def test_set_template_local_as_attrib(self):
template = ET.fromstring("""
<template>
<set var="somevar">Value2</set>
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
set_node = ast.children[0]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateSetNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(None, None), "somevar")
self.assertTrue(set_node.local)
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(None, None), "Value2")
def test_set_template_predicate_as_child(self):
template = ET.fromstring("""
<template>
<set><name>somepred</name>Value3</set>
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
set_node = ast.children[0]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateSetNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(None, None), "somepred")
self.assertFalse(set_node.local)
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(None, None), "Value3")
def test_set_template_local_as_child(self):
template = ET.fromstring("""
<template>
<set><var>somepred</var>Value4</set>
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
set_node = ast.children[0]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateSetNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(None, None), "somepred")
self.assertTrue(set_node.local)
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(None, None), "Value4")
def test_set_name_and_var(self):
template = ET.fromstring("""
<template>
<set name="somepred" var="somevar">Value1</set>
</template>
""")
with self.assertRaises(ParserException):
ast = self.parser.parse_template_expression(template)
def test_set_no_name_or_var(self):
template = ET.fromstring("""
<template>
<set>Value1</set>
</template>
""")
with self.assertRaises(ParserException):
ast = self.parser.parse_template_expression(template) | 0.464173 | 0.419291 |
from typing import Any, List, Optional
def make_table(rows: List[List[Any]], labels: Optional[List[Any]] = None, centered: bool = False) -> str:
T = []
Li = []
Lb = []
C = []
B = []
x = 0
rows_length = []
for n in rows[0]:
rows_items = []
p = 0
for items in rows:
p = p+1
rows_items.append(str(items[x]))
longest_string = max(rows_items, key=len)
rows_length.append(longest_string)
x = x+1
if labels != None:
y = 0
for n in labels:
if len(str(n)) > len(rows_length[y]):
rows_length[y] = n
y = y+1
mal = []
for n in rows_length:
mal.append(len(str(n)))
for maxL in mal[0:(len(mal)-1)]:
t = "─"+"─"*maxL+"─"+"┬"
l = "─"+"─"*maxL+"─"+"┼"
b = "─"+"─"*maxL+"─"+"┴"
T.append(t)
B.append(b)
if labels!= None and len(labels) > 0:
Lb.append(l)
for maxL in mal[(len(mal)-1):]:
t = "─"+"─"*maxL+"─"
l = "─"+"─"*maxL+"─"
b = "─"+"─"*maxL+"─"
T.append(t)
B.append(b)
if labels != None and len(labels) > 0:
Lb.append(l)
if labels != None and len(labels)>0 and centered:
p = 0
for n in labels:
maxL = mal[p]
if (maxL%2) == 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
elif (maxL%2) == 0 and len(str(n))%2 != 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
elif (maxL%2) != 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
else:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
cc = f"│ {spaces}{n}{spaces}{e}"
crc = f"│ {spaces}{n}{spaces}{e}│\n"
if n==labels[-1]:
Li.append(crc)
else:
Li.append(cc)
p = p+1
r = 0
for items in rows:
p = 0
for n in items:
maxL = mal[p]
if (maxL%2) == 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
elif (maxL%2) == 0 and len(str(n))%2 != 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
elif (maxL%2) != 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
else:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
cc = f"│ {spaces}{n}{spaces}{e}"
crc = f"│ {spaces}{n}{spaces}{e}│\n"
if n == rows[r][-1]:
C.append(crc)
else:
C.append(cc)
p = p+1
r = r+1
elif labels!=None and len(labels)>0:
p = 0
for n in labels:
maxL = mal[p]
spaces = " "*(maxL - len(str(n)))
c = f"│ {n}{spaces} "
cr = f"│ {n}{spaces} │\n"
if n==labels[-1]:
Li.append(cr)
else:
Li.append(c)
p = p+1
r = 0
for items in rows:
p = 0
for n in items:
maxL = mal[p]
spaces = " "*(maxL - len(str(n)))
c = f"│ {n}{spaces} "
cr = f"│ {n}{spaces} │\n"
if n == rows[r][-1]:
C.append(cr)
else:
C.append(c)
p = p+1
r = r+1
elif centered:
r = 0
for items in rows:
p = 0
for n in items:
maxL = mal[p]
if (maxL%2) == 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
elif (maxL%2) == 0 and len(str(n))%2 != 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
elif (maxL%2) != 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
else:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
cc = f"│ {spaces}{n}{spaces}{e}"
crc = f"│ {spaces}{n}{spaces}{e}│\n"
if n == rows[r][-1]:
C.append(crc)
else:
C.append(cc)
p = p+1
r = r+1
else:
r = 0
for items in rows:
p = 0
for n in items:
maxL = mal[p]
spaces = " "*(maxL - len(str(n)))
c = f"│ {n}{spaces} "
cr = f"│ {n}{spaces} │\n"
if n == rows[r][-1]:
C.append(cr)
else:
C.append(c)
p=p+1
r = r+1
if labels == None:
return "┌" +"".join(T) + "┐"+"\n"+"".join(C)+"└"+"".join(B) + "┘"
else:
return "┌" +"".join(T) + "┐"+"\n" + "".join(Li)+"├"+"".join(Lb)+"┤"+"\n"+"".join(C)+"└"+"".join(B) + "┘" | qualifier/qualifier.py | from typing import Any, List, Optional
def make_table(rows: List[List[Any]], labels: Optional[List[Any]] = None, centered: bool = False) -> str:
T = []
Li = []
Lb = []
C = []
B = []
x = 0
rows_length = []
for n in rows[0]:
rows_items = []
p = 0
for items in rows:
p = p+1
rows_items.append(str(items[x]))
longest_string = max(rows_items, key=len)
rows_length.append(longest_string)
x = x+1
if labels != None:
y = 0
for n in labels:
if len(str(n)) > len(rows_length[y]):
rows_length[y] = n
y = y+1
mal = []
for n in rows_length:
mal.append(len(str(n)))
for maxL in mal[0:(len(mal)-1)]:
t = "─"+"─"*maxL+"─"+"┬"
l = "─"+"─"*maxL+"─"+"┼"
b = "─"+"─"*maxL+"─"+"┴"
T.append(t)
B.append(b)
if labels!= None and len(labels) > 0:
Lb.append(l)
for maxL in mal[(len(mal)-1):]:
t = "─"+"─"*maxL+"─"
l = "─"+"─"*maxL+"─"
b = "─"+"─"*maxL+"─"
T.append(t)
B.append(b)
if labels != None and len(labels) > 0:
Lb.append(l)
if labels != None and len(labels)>0 and centered:
p = 0
for n in labels:
maxL = mal[p]
if (maxL%2) == 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
elif (maxL%2) == 0 and len(str(n))%2 != 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
elif (maxL%2) != 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
else:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
cc = f"│ {spaces}{n}{spaces}{e}"
crc = f"│ {spaces}{n}{spaces}{e}│\n"
if n==labels[-1]:
Li.append(crc)
else:
Li.append(cc)
p = p+1
r = 0
for items in rows:
p = 0
for n in items:
maxL = mal[p]
if (maxL%2) == 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
elif (maxL%2) == 0 and len(str(n))%2 != 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
elif (maxL%2) != 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
else:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
cc = f"│ {spaces}{n}{spaces}{e}"
crc = f"│ {spaces}{n}{spaces}{e}│\n"
if n == rows[r][-1]:
C.append(crc)
else:
C.append(cc)
p = p+1
r = r+1
elif labels!=None and len(labels)>0:
p = 0
for n in labels:
maxL = mal[p]
spaces = " "*(maxL - len(str(n)))
c = f"│ {n}{spaces} "
cr = f"│ {n}{spaces} │\n"
if n==labels[-1]:
Li.append(cr)
else:
Li.append(c)
p = p+1
r = 0
for items in rows:
p = 0
for n in items:
maxL = mal[p]
spaces = " "*(maxL - len(str(n)))
c = f"│ {n}{spaces} "
cr = f"│ {n}{spaces} │\n"
if n == rows[r][-1]:
C.append(cr)
else:
C.append(c)
p = p+1
r = r+1
elif centered:
r = 0
for items in rows:
p = 0
for n in items:
maxL = mal[p]
if (maxL%2) == 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
elif (maxL%2) == 0 and len(str(n))%2 != 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
elif (maxL%2) != 0 and len(str(n))%2 == 0:
spaces = " "*int(((maxL - len(str(n))))/2 + 0.5)
e = ""
else:
spaces = " "*int(((maxL - len(str(n))))/2)
e = " "
cc = f"│ {spaces}{n}{spaces}{e}"
crc = f"│ {spaces}{n}{spaces}{e}│\n"
if n == rows[r][-1]:
C.append(crc)
else:
C.append(cc)
p = p+1
r = r+1
else:
r = 0
for items in rows:
p = 0
for n in items:
maxL = mal[p]
spaces = " "*(maxL - len(str(n)))
c = f"│ {n}{spaces} "
cr = f"│ {n}{spaces} │\n"
if n == rows[r][-1]:
C.append(cr)
else:
C.append(c)
p=p+1
r = r+1
if labels == None:
return "┌" +"".join(T) + "┐"+"\n"+"".join(C)+"└"+"".join(B) + "┘"
else:
return "┌" +"".join(T) + "┐"+"\n" + "".join(Li)+"├"+"".join(Lb)+"┤"+"\n"+"".join(C)+"└"+"".join(B) + "┘" | 0.292797 | 0.428532 |
from nmigen import *
from nmigen.hdl.rec import *
from .crc import CRC
from .defs import *
from .endpoint import *
from .mux import *
__all__ = ["Device"]
class Device(Elaboratable):
"""USB 2.0 device controller.
An USB 2.0 device controller, managing transactions between its endpoints and the host.
Attributes
----------
rx.stb : Signal, in
Receive strobe. Asserted by the underlying PHY when it has data to send.
rx.lst : Signal, in
Receive last. Asserted when `rx.data` holds the last byte of a packet.
rx.data : Signal, in
Receive data.
rx.rdy : Signal, out
Receive ready. Asserted when the device is able to receive data.
tx.stb : Signal, out
Transmit strobe. Asserted by the device when it has data to send.
tx.lst : Signal, out
Transmit last. Asserted when `tx.data` holds the last byte of a packet.
tx.data : Signal, out
Transmit data.
tx.rdy : Signal, in
Transmit ready. Asserted when the underlying PHY is able to receive data.
addr : Signal, in
Device address. Provided by the logic controlling endpoint 0.
"""
def __init__(self):
self.rx = Record([
("stb", 1, DIR_FANIN),
("lst", 1, DIR_FANIN),
("data", 8, DIR_FANIN),
("rdy", 1, DIR_FANOUT),
])
self.tx = Record([
("stb", 1, DIR_FANOUT),
("lst", 1, DIR_FANOUT),
("data", 8, DIR_FANOUT),
("rdy", 1, DIR_FANIN),
])
self.addr = Signal(7)
self._mux_in = InputMultiplexer()
self._mux_out = OutputMultiplexer()
def add_endpoint(self, ep, *, addr, buffered=False):
"""
Add an endpoint to the USB device.
Parameters
----------
ep : :class:`endpoint.InputEndpoint` or :class:`endpoint.OutputEndpoint`
Endpoint interface.
addr : int
Endpoint address.
buffered : bool
Endpoint buffering. Optional. If true, a double buffer is provided between the
the endpoint and the device controller.
"""
if isinstance(ep, InputEndpoint):
self._mux_in .add_endpoint(ep, addr=addr, buffered=buffered)
elif isinstance(ep, OutputEndpoint):
self._mux_out.add_endpoint(ep, addr=addr, buffered=buffered)
else:
raise TypeError("Endpoint must be an InputEndpoint or an OutputEndpoint, not {!r}"
.format(ep))
def elaborate(self, platform):
m = Module()
m.submodules.mux_in = mux_in = self._mux_in
m.submodules.mux_out = mux_out = self._mux_out
m.submodules.token_sink = token_sink = _TokenSink()
m.submodules.data_sink = data_sink = _DataSink()
m.submodules.data_source = data_source = _DataSource()
rx_pid = Signal(4)
rx_pid_r = Signal.like(rx_pid)
m.d.comb += rx_pid.eq(self.rx.data[:4])
# PIDs are followed by a 4-bit field equal to their one's complement.
rx_pid_valid = Signal()
m.d.comb += rx_pid_valid.eq((rx_pid ^ self.rx.data[4:]).all())
# DATA0 and DATA1 PIDs are alternated between non-isochronous transactions to let the
# recipient know if it missed a data packet.
# The `rx_seq[ep]` bit tracks the expected PID for host->device transactions, and
# the `tx_seq[ep]` bit tracks the expected PID for device->host transactions.
# See section 8.6 of the USB 2.0 specification for details.
rx_seq = Array(Signal(16, name="rx_seq"))
tx_seq = Array(Signal(16, name="tx_seq"))
token_dev = Signal(7)
token_ep = Signal(4)
token_setup = Signal()
expect_handshake = Signal()
with m.FSM() as fsm:
with m.State("IDLE"):
m.d.comb += self.rx.rdy.eq(1)
with m.If(self.rx.stb):
with m.If(rx_pid_valid):
m.d.sync += rx_pid_r.eq(rx_pid)
with m.If(self.rx.lst):
with m.If(PacketID.is_handshake(rx_pid)
# Ignore handshake packets if we are not expecting one.
& expect_handshake):
m.next = "RECV-HANDSHAKE"
with m.Else():
with m.If(PacketID.is_token(rx_pid)
# PING packets use the same encoding as token packets.
| (rx_pid == PacketID.PING)):
m.next = "RECV-TOKEN-0"
with m.Else():
m.next = "FLUSH-PACKET"
with m.Elif(~self.rx.lst):
m.next = "FLUSH-PACKET"
with m.State("RECV-HANDSHAKE"):
with m.If(rx_pid_r == PacketID.ACK):
m.d.comb += [
mux_in.pkt.rdy.eq(1),
mux_in.pkt.ack.eq(1),
]
# Toggle the transmitter-side sequence bit upon receipt of an ACK.
m.d.sync += tx_seq[token_ep].eq(~tx_seq[token_ep])
m.d.sync += expect_handshake.eq(0)
m.next = "IDLE"
with m.State("RECV-TOKEN-0"):
m.d.comb += [
token_sink.rx.stb .eq(self.rx.stb),
token_sink.rx.lst .eq(self.rx.lst),
token_sink.rx.data.eq(self.rx.data),
self.rx.rdy.eq(token_sink.rx.rdy),
]
with m.If(token_sink.stb):
with m.If(self.rx.lst):
m.d.sync += [
token_dev.eq(token_sink.dev),
token_ep .eq(token_sink.ep),
]
m.next = "RECV-TOKEN-1"
with m.Else():
m.next = "FLUSH-PACKET"
with m.Elif(self.rx.stb & self.rx.lst):
m.next = "IDLE"
with m.State("RECV-TOKEN-1"):
with m.If(rx_pid_r == PacketID.SOF):
# When a new (micro)frame starts, assume any ongoing transaction has timed out.
m.d.sync += expect_handshake.eq(0)
m.d.comb += [
mux_out.sof.eq(1),
mux_in .sof.eq(1),
]
m.next = "IDLE"
with m.Elif(token_dev == self.addr):
with m.Switch(rx_pid_r):
with m.Case(PacketID.PING):
m.d.sync += mux_out.sel.addr.eq(token_ep)
m.next = "SEND-PONG"
with m.Case(PacketID.SETUP):
m.d.sync += token_setup.eq(1)
# Upon receipt of a SETUP token, we set the receiver-side sequence bit
# to 0, and the transmitter-side sequence bit to 1. This guarantees
# that at the end of the transaction (after receipt of a DATA0 packet),
# both sequence bits will be equal to 1.
m.d.sync += [
rx_seq[token_ep].eq(0),
tx_seq[token_ep].eq(1),
]
m.d.sync += mux_out.sel.addr.eq(token_ep)
m.next = "RECV-DATA-0"
with m.Case(PacketID.OUT):
m.d.sync += mux_out.sel.addr.eq(token_ep)
m.next = "RECV-DATA-0"
with m.Case(PacketID.IN):
m.d.sync += mux_in .sel.addr.eq(token_ep)
m.next = "SEND-DATA-0"
with m.Default():
# Unknown/unsupported token.
m.next = "IDLE"
with m.Else():
# We are not the recipient of this token.
m.next = "IDLE"
with m.State("SEND-PONG"):
with m.If(mux_out.sel.err):
# Invalid endpoint. Abort transaction.
m.next = "IDLE"
with m.Elif(mux_out.pkt.rdy):
m.next = "SEND-ACK"
with m.Else():
m.next = "SEND-NAK"
with m.State("RECV-DATA-0"):
expected_pid = Signal.like(rx_pid)
m.d.comb += expected_pid.eq(Mux(rx_seq[token_ep], PacketID.DATA1, PacketID.DATA0))
with m.If(mux_out.sel.err):
# Invalid endpoint. Abort transaction.
m.next = "FLUSH-PACKET"
with m.Else():
m.d.comb += self.rx.rdy.eq(1)
with m.If(self.rx.stb):
with m.If(self.rx.lst):
m.next = "IDLE"
with m.Elif(rx_pid_valid):
with m.If(mux_out.sel.xfer != Transfer.ISOCHRONOUS):
with m.If(rx_pid == expected_pid):
m.next = "RECV-DATA-1"
with m.Else():
m.next = "FLUSH-PACKET"
with m.Else():
# FIXME: Data PID sequencing for isochronous transfers (section
# 5.9.2 of the USB 2.0 specification) isn't implemented.
# Be lenient and accept any data PID.
with m.If(PacketID.is_data(rx_pid)):
m.next = "RECV-DATA-1"
with m.Else():
m.next = "FLUSH-PACKET"
with m.Else():
m.next = "FLUSH-PACKET"
with m.State("RECV-DATA-1"):
ep_busy = Signal()
m.d.comb += [
data_sink.rx.stb .eq(self.rx.stb),
data_sink.rx.lst .eq(self.rx.lst),
data_sink.rx.data.eq(self.rx.data),
self.rx.rdy.eq(data_sink.rx.rdy),
mux_out.pkt.stb .eq(data_sink.stb),
mux_out.pkt.lst .eq(data_sink.lst),
mux_out.pkt.data .eq(data_sink.data),
mux_out.pkt.zlp .eq(data_sink.zlp),
mux_out.pkt.drop .eq(data_sink.drop),
mux_out.pkt.setup.eq(token_setup),
]
with m.If(mux_out.pkt.stb):
with m.If(~mux_out.pkt.rdy):
m.d.sync += ep_busy.eq(1)
with m.If(mux_out.pkt.lst):
m.d.sync += ep_busy.eq(0)
m.d.sync += token_setup.eq(0)
with m.If(mux_out.pkt.drop):
# CRC check failed. Ignore packet.
m.next = "IDLE"
with m.Elif(mux_out.sel.xfer == Transfer.ISOCHRONOUS):
# Isochronous transactions do not include a handshake.
m.next = "IDLE"
with m.Elif(~mux_out.pkt.rdy | ep_busy):
# Endpoint wasn't able to receive the whole payload.
m.next = "SEND-NAK"
with m.Else():
# Toggle the receiver-side sequence bit upon receipt of a valid data
# packet.
m.d.sync += rx_seq[token_ep].eq(~rx_seq[token_ep])
m.next = "SEND-ACK"
with m.State("SEND-DATA-0"):
with m.If(mux_in.sel.err):
# Invalid endpoint. Abort transaction.
m.next = "IDLE"
with m.Elif(mux_in.pkt.stb):
m.d.comb += [
self.tx.stb.eq(1),
self.tx.lst.eq(0),
]
with m.If(tx_seq[token_ep]):
m.d.comb += [
self.tx.data[:4].eq( PacketID.DATA1),
self.tx.data[4:].eq(~PacketID.DATA1),
]
with m.Else():
m.d.comb += [
self.tx.data[:4].eq( PacketID.DATA0),
self.tx.data[4:].eq(~PacketID.DATA0),
]
with m.If(self.tx.rdy):
m.next = "SEND-DATA-1"
with m.Else():
# Endpoint is not ready to send a payload.
with m.If(mux_in.sel.xfer == Transfer.ISOCHRONOUS):
m.next = "IDLE"
with m.Else():
m.next = "SEND-NAK"
with m.State("SEND-DATA-1"):
m.d.comb += [
data_source.tx.connect(self.tx),
data_source.stb .eq(mux_in.pkt.stb),
data_source.lst .eq(mux_in.pkt.lst),
data_source.data.eq(mux_in.pkt.data),
data_source.zlp .eq(mux_in.pkt.zlp),
mux_in.pkt.rdy.eq(data_source.rdy),
]
with m.If(self.tx.stb & self.tx.lst & self.tx.rdy):
m.d.sync += expect_handshake.eq(1)
m.next = "IDLE"
with m.State("SEND-ACK"):
m.d.comb += [
self.tx.stb.eq(1),
self.tx.lst.eq(1),
self.tx.data[:4].eq( PacketID.ACK),
self.tx.data[4:].eq(~PacketID.ACK),
]
with m.If(self.tx.rdy):
m.next = "IDLE"
with m.State("SEND-NAK"):
m.d.comb += [
self.tx.stb.eq(1),
self.tx.lst.eq(1),
self.tx.data[:4].eq( PacketID.NAK),
self.tx.data[4:].eq(~PacketID.NAK),
]
with m.If(self.tx.rdy):
m.next = "IDLE"
with m.State("FLUSH-PACKET"):
m.d.comb += self.rx.rdy.eq(1)
with m.If(self.rx.stb & self.rx.lst):
m.next = "IDLE"
return m
class _TokenSink(Elaboratable):
def __init__(self):
self.rx = Record([
("stb", 1, DIR_FANIN),
("lst", 1, DIR_FANIN),
("data", 8, DIR_FANIN),
("rdy", 1, DIR_FANOUT),
])
self.stb = Signal()
self.dev = Signal(7)
self.ep = Signal(4)
self.crc = Signal(5)
def elaborate(self, platform):
m = Module()
m.submodules.crc = crc = CRC(poly=0b00101, size=5, dw=11, init=0x1f)
ep_lsb = Signal()
ep_msb = Signal(3)
m.d.comb += self.ep.eq(Cat(ep_lsb, ep_msb))
m.d.comb += self.rx.rdy.eq(1)
with m.If(self.rx.stb):
token_msb = Signal()
with m.If(~token_msb):
m.d.sync += Cat(self.dev, ep_lsb).eq(self.rx.data)
m.d.sync += token_msb.eq(1)
with m.Else():
m.d.comb += Cat(ep_msb, self.crc).eq(self.rx.data)
m.d.comb += [
crc.val.eq(Cat(self.dev, self.ep)),
self.stb.eq(crc.res == self.crc),
]
m.d.sync += token_msb.eq(0)
return m
class _DataSink(Elaboratable):
def __init__(self):
self.rx = Record([
("stb", 1, DIR_FANIN),
("lst", 1, DIR_FANIN),
("data", 8, DIR_FANIN),
("rdy", 1, DIR_FANOUT),
])
self.stb = Signal()
self.lst = Signal()
self.data = Signal(8)
self.zlp = Signal()
self.drop = Signal()
def elaborate(self, platform):
m = Module()
buf_0 = Record([("stb", 1), ("lst", 1), ("data", 8)])
buf_1 = Record.like(buf_0)
m.submodules.crc = crc = CRC(poly=0b11000000000000101, size=16, dw=8, init=0xffff)
with m.If(self.stb & self.lst):
m.d.sync += [
buf_0.stb.eq(0),
buf_1.stb.eq(0),
]
with m.Else():
m.d.comb += self.rx.rdy.eq(1)
with m.If(self.rx.stb):
m.d.sync += [
buf_0.stb.eq(1),
buf_0.lst.eq(self.rx.lst),
buf_0.data.eq(self.rx.data),
buf_1.eq(buf_0),
]
m.d.comb += [
crc.en.eq(self.rx.stb & buf_1.stb),
crc.val.eq(buf_1.data),
crc.clr.eq(self.stb & self.lst),
]
with m.If(buf_1.stb):
with m.If(buf_0.lst):
# We received a zero-length packet. (no data bytes, CRC field is 0)
m.d.comb += [
self.stb.eq(1),
self.lst.eq(1),
self.zlp.eq(1),
self.drop.eq(Cat(buf_1.data, buf_0.data).any()),
]
with m.Else():
m.d.comb += [
self.stb.eq(self.rx.stb),
self.lst.eq(self.rx.lst),
self.data.eq(buf_1.data),
self.drop.eq(crc.res != Cat(buf_0.data, self.rx.data)),
]
return m
class _DataSource(Elaboratable):
def __init__(self):
self.tx = Record([
("stb", 1, DIR_FANOUT),
("lst", 1, DIR_FANOUT),
("data", 8, DIR_FANOUT),
("rdy", 1, DIR_FANIN),
])
self.stb = Signal()
self.lst = Signal()
self.data = Signal(8)
self.zlp = Signal()
self.rdy = Signal()
def elaborate(self, platform):
m = Module()
m.submodules.crc = crc = CRC(poly=0b11000000000000101, size=16, dw=8, init=0xffff)
crc_res_r = Signal.like(crc.res)
with m.FSM():
with m.State("DATA"):
m.d.comb += [
self.rdy.eq(self.tx.rdy),
self.tx.stb.eq(self.stb),
self.tx.data.eq(Mux(self.zlp, 0, self.data)),
crc.en.eq(self.stb & self.rdy),
crc.val.eq(self.data),
]
with m.If(self.stb & self.rdy & self.lst):
m.d.sync += crc_res_r.eq(crc.res)
m.d.comb += crc.clr.eq(1)
with m.If(self.zlp):
m.next = "ZLP"
with m.Else():
m.next = "CRC-0"
with m.State("ZLP"):
m.d.comb += [
self.tx.stb.eq(1),
self.tx.lst.eq(1),
self.tx.data.eq(0),
]
with m.If(self.tx.rdy):
m.next = "DATA"
with m.State("CRC-0"):
m.d.comb += [
self.tx.stb.eq(1),
self.tx.data.eq(crc_res_r[:8]),
]
with m.If(self.tx.rdy):
m.next = "CRC-1"
with m.State("CRC-1"):
m.d.comb += [
self.tx.stb.eq(1),
self.tx.lst.eq(1),
self.tx.data.eq(crc_res_r[8:]),
]
with m.If(self.tx.rdy):
m.next = "DATA"
return m | lambdausb/usb/device.py | from nmigen import *
from nmigen.hdl.rec import *
from .crc import CRC
from .defs import *
from .endpoint import *
from .mux import *
__all__ = ["Device"]
class Device(Elaboratable):
"""USB 2.0 device controller.
An USB 2.0 device controller, managing transactions between its endpoints and the host.
Attributes
----------
rx.stb : Signal, in
Receive strobe. Asserted by the underlying PHY when it has data to send.
rx.lst : Signal, in
Receive last. Asserted when `rx.data` holds the last byte of a packet.
rx.data : Signal, in
Receive data.
rx.rdy : Signal, out
Receive ready. Asserted when the device is able to receive data.
tx.stb : Signal, out
Transmit strobe. Asserted by the device when it has data to send.
tx.lst : Signal, out
Transmit last. Asserted when `tx.data` holds the last byte of a packet.
tx.data : Signal, out
Transmit data.
tx.rdy : Signal, in
Transmit ready. Asserted when the underlying PHY is able to receive data.
addr : Signal, in
Device address. Provided by the logic controlling endpoint 0.
"""
def __init__(self):
self.rx = Record([
("stb", 1, DIR_FANIN),
("lst", 1, DIR_FANIN),
("data", 8, DIR_FANIN),
("rdy", 1, DIR_FANOUT),
])
self.tx = Record([
("stb", 1, DIR_FANOUT),
("lst", 1, DIR_FANOUT),
("data", 8, DIR_FANOUT),
("rdy", 1, DIR_FANIN),
])
self.addr = Signal(7)
self._mux_in = InputMultiplexer()
self._mux_out = OutputMultiplexer()
def add_endpoint(self, ep, *, addr, buffered=False):
"""
Add an endpoint to the USB device.
Parameters
----------
ep : :class:`endpoint.InputEndpoint` or :class:`endpoint.OutputEndpoint`
Endpoint interface.
addr : int
Endpoint address.
buffered : bool
Endpoint buffering. Optional. If true, a double buffer is provided between the
the endpoint and the device controller.
"""
if isinstance(ep, InputEndpoint):
self._mux_in .add_endpoint(ep, addr=addr, buffered=buffered)
elif isinstance(ep, OutputEndpoint):
self._mux_out.add_endpoint(ep, addr=addr, buffered=buffered)
else:
raise TypeError("Endpoint must be an InputEndpoint or an OutputEndpoint, not {!r}"
.format(ep))
def elaborate(self, platform):
m = Module()
m.submodules.mux_in = mux_in = self._mux_in
m.submodules.mux_out = mux_out = self._mux_out
m.submodules.token_sink = token_sink = _TokenSink()
m.submodules.data_sink = data_sink = _DataSink()
m.submodules.data_source = data_source = _DataSource()
rx_pid = Signal(4)
rx_pid_r = Signal.like(rx_pid)
m.d.comb += rx_pid.eq(self.rx.data[:4])
# PIDs are followed by a 4-bit field equal to their one's complement.
rx_pid_valid = Signal()
m.d.comb += rx_pid_valid.eq((rx_pid ^ self.rx.data[4:]).all())
# DATA0 and DATA1 PIDs are alternated between non-isochronous transactions to let the
# recipient know if it missed a data packet.
# The `rx_seq[ep]` bit tracks the expected PID for host->device transactions, and
# the `tx_seq[ep]` bit tracks the expected PID for device->host transactions.
# See section 8.6 of the USB 2.0 specification for details.
rx_seq = Array(Signal(16, name="rx_seq"))
tx_seq = Array(Signal(16, name="tx_seq"))
token_dev = Signal(7)
token_ep = Signal(4)
token_setup = Signal()
expect_handshake = Signal()
with m.FSM() as fsm:
with m.State("IDLE"):
m.d.comb += self.rx.rdy.eq(1)
with m.If(self.rx.stb):
with m.If(rx_pid_valid):
m.d.sync += rx_pid_r.eq(rx_pid)
with m.If(self.rx.lst):
with m.If(PacketID.is_handshake(rx_pid)
# Ignore handshake packets if we are not expecting one.
& expect_handshake):
m.next = "RECV-HANDSHAKE"
with m.Else():
with m.If(PacketID.is_token(rx_pid)
# PING packets use the same encoding as token packets.
| (rx_pid == PacketID.PING)):
m.next = "RECV-TOKEN-0"
with m.Else():
m.next = "FLUSH-PACKET"
with m.Elif(~self.rx.lst):
m.next = "FLUSH-PACKET"
with m.State("RECV-HANDSHAKE"):
with m.If(rx_pid_r == PacketID.ACK):
m.d.comb += [
mux_in.pkt.rdy.eq(1),
mux_in.pkt.ack.eq(1),
]
# Toggle the transmitter-side sequence bit upon receipt of an ACK.
m.d.sync += tx_seq[token_ep].eq(~tx_seq[token_ep])
m.d.sync += expect_handshake.eq(0)
m.next = "IDLE"
with m.State("RECV-TOKEN-0"):
m.d.comb += [
token_sink.rx.stb .eq(self.rx.stb),
token_sink.rx.lst .eq(self.rx.lst),
token_sink.rx.data.eq(self.rx.data),
self.rx.rdy.eq(token_sink.rx.rdy),
]
with m.If(token_sink.stb):
with m.If(self.rx.lst):
m.d.sync += [
token_dev.eq(token_sink.dev),
token_ep .eq(token_sink.ep),
]
m.next = "RECV-TOKEN-1"
with m.Else():
m.next = "FLUSH-PACKET"
with m.Elif(self.rx.stb & self.rx.lst):
m.next = "IDLE"
with m.State("RECV-TOKEN-1"):
with m.If(rx_pid_r == PacketID.SOF):
# When a new (micro)frame starts, assume any ongoing transaction has timed out.
m.d.sync += expect_handshake.eq(0)
m.d.comb += [
mux_out.sof.eq(1),
mux_in .sof.eq(1),
]
m.next = "IDLE"
with m.Elif(token_dev == self.addr):
with m.Switch(rx_pid_r):
with m.Case(PacketID.PING):
m.d.sync += mux_out.sel.addr.eq(token_ep)
m.next = "SEND-PONG"
with m.Case(PacketID.SETUP):
m.d.sync += token_setup.eq(1)
# Upon receipt of a SETUP token, we set the receiver-side sequence bit
# to 0, and the transmitter-side sequence bit to 1. This guarantees
# that at the end of the transaction (after receipt of a DATA0 packet),
# both sequence bits will be equal to 1.
m.d.sync += [
rx_seq[token_ep].eq(0),
tx_seq[token_ep].eq(1),
]
m.d.sync += mux_out.sel.addr.eq(token_ep)
m.next = "RECV-DATA-0"
with m.Case(PacketID.OUT):
m.d.sync += mux_out.sel.addr.eq(token_ep)
m.next = "RECV-DATA-0"
with m.Case(PacketID.IN):
m.d.sync += mux_in .sel.addr.eq(token_ep)
m.next = "SEND-DATA-0"
with m.Default():
# Unknown/unsupported token.
m.next = "IDLE"
with m.Else():
# We are not the recipient of this token.
m.next = "IDLE"
with m.State("SEND-PONG"):
with m.If(mux_out.sel.err):
# Invalid endpoint. Abort transaction.
m.next = "IDLE"
with m.Elif(mux_out.pkt.rdy):
m.next = "SEND-ACK"
with m.Else():
m.next = "SEND-NAK"
with m.State("RECV-DATA-0"):
expected_pid = Signal.like(rx_pid)
m.d.comb += expected_pid.eq(Mux(rx_seq[token_ep], PacketID.DATA1, PacketID.DATA0))
with m.If(mux_out.sel.err):
# Invalid endpoint. Abort transaction.
m.next = "FLUSH-PACKET"
with m.Else():
m.d.comb += self.rx.rdy.eq(1)
with m.If(self.rx.stb):
with m.If(self.rx.lst):
m.next = "IDLE"
with m.Elif(rx_pid_valid):
with m.If(mux_out.sel.xfer != Transfer.ISOCHRONOUS):
with m.If(rx_pid == expected_pid):
m.next = "RECV-DATA-1"
with m.Else():
m.next = "FLUSH-PACKET"
with m.Else():
# FIXME: Data PID sequencing for isochronous transfers (section
# 5.9.2 of the USB 2.0 specification) isn't implemented.
# Be lenient and accept any data PID.
with m.If(PacketID.is_data(rx_pid)):
m.next = "RECV-DATA-1"
with m.Else():
m.next = "FLUSH-PACKET"
with m.Else():
m.next = "FLUSH-PACKET"
with m.State("RECV-DATA-1"):
ep_busy = Signal()
m.d.comb += [
data_sink.rx.stb .eq(self.rx.stb),
data_sink.rx.lst .eq(self.rx.lst),
data_sink.rx.data.eq(self.rx.data),
self.rx.rdy.eq(data_sink.rx.rdy),
mux_out.pkt.stb .eq(data_sink.stb),
mux_out.pkt.lst .eq(data_sink.lst),
mux_out.pkt.data .eq(data_sink.data),
mux_out.pkt.zlp .eq(data_sink.zlp),
mux_out.pkt.drop .eq(data_sink.drop),
mux_out.pkt.setup.eq(token_setup),
]
with m.If(mux_out.pkt.stb):
with m.If(~mux_out.pkt.rdy):
m.d.sync += ep_busy.eq(1)
with m.If(mux_out.pkt.lst):
m.d.sync += ep_busy.eq(0)
m.d.sync += token_setup.eq(0)
with m.If(mux_out.pkt.drop):
# CRC check failed. Ignore packet.
m.next = "IDLE"
with m.Elif(mux_out.sel.xfer == Transfer.ISOCHRONOUS):
# Isochronous transactions do not include a handshake.
m.next = "IDLE"
with m.Elif(~mux_out.pkt.rdy | ep_busy):
# Endpoint wasn't able to receive the whole payload.
m.next = "SEND-NAK"
with m.Else():
# Toggle the receiver-side sequence bit upon receipt of a valid data
# packet.
m.d.sync += rx_seq[token_ep].eq(~rx_seq[token_ep])
m.next = "SEND-ACK"
with m.State("SEND-DATA-0"):
with m.If(mux_in.sel.err):
# Invalid endpoint. Abort transaction.
m.next = "IDLE"
with m.Elif(mux_in.pkt.stb):
m.d.comb += [
self.tx.stb.eq(1),
self.tx.lst.eq(0),
]
with m.If(tx_seq[token_ep]):
m.d.comb += [
self.tx.data[:4].eq( PacketID.DATA1),
self.tx.data[4:].eq(~PacketID.DATA1),
]
with m.Else():
m.d.comb += [
self.tx.data[:4].eq( PacketID.DATA0),
self.tx.data[4:].eq(~PacketID.DATA0),
]
with m.If(self.tx.rdy):
m.next = "SEND-DATA-1"
with m.Else():
# Endpoint is not ready to send a payload.
with m.If(mux_in.sel.xfer == Transfer.ISOCHRONOUS):
m.next = "IDLE"
with m.Else():
m.next = "SEND-NAK"
with m.State("SEND-DATA-1"):
m.d.comb += [
data_source.tx.connect(self.tx),
data_source.stb .eq(mux_in.pkt.stb),
data_source.lst .eq(mux_in.pkt.lst),
data_source.data.eq(mux_in.pkt.data),
data_source.zlp .eq(mux_in.pkt.zlp),
mux_in.pkt.rdy.eq(data_source.rdy),
]
with m.If(self.tx.stb & self.tx.lst & self.tx.rdy):
m.d.sync += expect_handshake.eq(1)
m.next = "IDLE"
with m.State("SEND-ACK"):
m.d.comb += [
self.tx.stb.eq(1),
self.tx.lst.eq(1),
self.tx.data[:4].eq( PacketID.ACK),
self.tx.data[4:].eq(~PacketID.ACK),
]
with m.If(self.tx.rdy):
m.next = "IDLE"
with m.State("SEND-NAK"):
m.d.comb += [
self.tx.stb.eq(1),
self.tx.lst.eq(1),
self.tx.data[:4].eq( PacketID.NAK),
self.tx.data[4:].eq(~PacketID.NAK),
]
with m.If(self.tx.rdy):
m.next = "IDLE"
with m.State("FLUSH-PACKET"):
m.d.comb += self.rx.rdy.eq(1)
with m.If(self.rx.stb & self.rx.lst):
m.next = "IDLE"
return m
class _TokenSink(Elaboratable):
def __init__(self):
self.rx = Record([
("stb", 1, DIR_FANIN),
("lst", 1, DIR_FANIN),
("data", 8, DIR_FANIN),
("rdy", 1, DIR_FANOUT),
])
self.stb = Signal()
self.dev = Signal(7)
self.ep = Signal(4)
self.crc = Signal(5)
def elaborate(self, platform):
m = Module()
m.submodules.crc = crc = CRC(poly=0b00101, size=5, dw=11, init=0x1f)
ep_lsb = Signal()
ep_msb = Signal(3)
m.d.comb += self.ep.eq(Cat(ep_lsb, ep_msb))
m.d.comb += self.rx.rdy.eq(1)
with m.If(self.rx.stb):
token_msb = Signal()
with m.If(~token_msb):
m.d.sync += Cat(self.dev, ep_lsb).eq(self.rx.data)
m.d.sync += token_msb.eq(1)
with m.Else():
m.d.comb += Cat(ep_msb, self.crc).eq(self.rx.data)
m.d.comb += [
crc.val.eq(Cat(self.dev, self.ep)),
self.stb.eq(crc.res == self.crc),
]
m.d.sync += token_msb.eq(0)
return m
class _DataSink(Elaboratable):
def __init__(self):
self.rx = Record([
("stb", 1, DIR_FANIN),
("lst", 1, DIR_FANIN),
("data", 8, DIR_FANIN),
("rdy", 1, DIR_FANOUT),
])
self.stb = Signal()
self.lst = Signal()
self.data = Signal(8)
self.zlp = Signal()
self.drop = Signal()
def elaborate(self, platform):
m = Module()
buf_0 = Record([("stb", 1), ("lst", 1), ("data", 8)])
buf_1 = Record.like(buf_0)
m.submodules.crc = crc = CRC(poly=0b11000000000000101, size=16, dw=8, init=0xffff)
with m.If(self.stb & self.lst):
m.d.sync += [
buf_0.stb.eq(0),
buf_1.stb.eq(0),
]
with m.Else():
m.d.comb += self.rx.rdy.eq(1)
with m.If(self.rx.stb):
m.d.sync += [
buf_0.stb.eq(1),
buf_0.lst.eq(self.rx.lst),
buf_0.data.eq(self.rx.data),
buf_1.eq(buf_0),
]
m.d.comb += [
crc.en.eq(self.rx.stb & buf_1.stb),
crc.val.eq(buf_1.data),
crc.clr.eq(self.stb & self.lst),
]
with m.If(buf_1.stb):
with m.If(buf_0.lst):
# We received a zero-length packet. (no data bytes, CRC field is 0)
m.d.comb += [
self.stb.eq(1),
self.lst.eq(1),
self.zlp.eq(1),
self.drop.eq(Cat(buf_1.data, buf_0.data).any()),
]
with m.Else():
m.d.comb += [
self.stb.eq(self.rx.stb),
self.lst.eq(self.rx.lst),
self.data.eq(buf_1.data),
self.drop.eq(crc.res != Cat(buf_0.data, self.rx.data)),
]
return m
class _DataSource(Elaboratable):
def __init__(self):
self.tx = Record([
("stb", 1, DIR_FANOUT),
("lst", 1, DIR_FANOUT),
("data", 8, DIR_FANOUT),
("rdy", 1, DIR_FANIN),
])
self.stb = Signal()
self.lst = Signal()
self.data = Signal(8)
self.zlp = Signal()
self.rdy = Signal()
def elaborate(self, platform):
m = Module()
m.submodules.crc = crc = CRC(poly=0b11000000000000101, size=16, dw=8, init=0xffff)
crc_res_r = Signal.like(crc.res)
with m.FSM():
with m.State("DATA"):
m.d.comb += [
self.rdy.eq(self.tx.rdy),
self.tx.stb.eq(self.stb),
self.tx.data.eq(Mux(self.zlp, 0, self.data)),
crc.en.eq(self.stb & self.rdy),
crc.val.eq(self.data),
]
with m.If(self.stb & self.rdy & self.lst):
m.d.sync += crc_res_r.eq(crc.res)
m.d.comb += crc.clr.eq(1)
with m.If(self.zlp):
m.next = "ZLP"
with m.Else():
m.next = "CRC-0"
with m.State("ZLP"):
m.d.comb += [
self.tx.stb.eq(1),
self.tx.lst.eq(1),
self.tx.data.eq(0),
]
with m.If(self.tx.rdy):
m.next = "DATA"
with m.State("CRC-0"):
m.d.comb += [
self.tx.stb.eq(1),
self.tx.data.eq(crc_res_r[:8]),
]
with m.If(self.tx.rdy):
m.next = "CRC-1"
with m.State("CRC-1"):
m.d.comb += [
self.tx.stb.eq(1),
self.tx.lst.eq(1),
self.tx.data.eq(crc_res_r[8:]),
]
with m.If(self.tx.rdy):
m.next = "DATA"
return m | 0.795261 | 0.28508 |
import io
import logging
import mistletoe
import pygments
import pygments.formatters.html
import pygments.lexers
import pygments.util
from ._misc import parameters
_logger = logging.getLogger("holocron")
def _pygmentize(code, language):
try:
formatter = _pygmentize.formatter
except AttributeError:
HtmlFormatter = pygments.formatters.html.HtmlFormatter
formatter = _pygmentize.formatter = HtmlFormatter(wrapcode=True)
lexer = pygments.lexers.get_lexer_by_name(language)
return pygments.highlight(code, lexer, formatter)
class _HTMLRenderer(mistletoe.HTMLRenderer):
def __init__(self, *extras, pygmentize):
super(_HTMLRenderer, self).__init__(*extras)
self._pygmentize = pygmentize
self._extract_title = True
self.extracted = {}
def render_document(self, token):
if self._extract_title and token.children:
node = token.children[0]
is_heading = node.__class__.__name__ in (
"Heading",
"SetextHeading",
)
if is_heading and node.level == 1:
self.extracted["title"] = self.render_inner(node)
token.children.pop(0)
return super(_HTMLRenderer, self).render_document(token)
def render_block_code(self, token):
if token.language and self._pygmentize:
try:
code = token.children[0].content
return self._pygmentize(code, token.language)
except pygments.util.ClassNotFound:
_logger.warning("pygmentize: no such langauge: '%s'", token.language)
return super(_HTMLRenderer, self).render_block_code(token)
@parameters(
jsonschema={
"type": "object",
"properties": {"pygmentize": {"type": "boolean"}},
}
)
def process(app, stream, *, pygmentize=False):
pygmentize = pygmentize and _pygmentize
for item in stream:
with _HTMLRenderer(pygmentize=pygmentize) as renderer:
item["content"] = renderer.render(
mistletoe.Document(io.StringIO(item["content"]))
).strip()
if "title" in renderer.extracted:
item["title"] = item.get("title", renderer.extracted["title"])
item["destination"] = item["destination"].with_suffix(".html")
yield item | src/holocron/_processors/commonmark.py |
import io
import logging
import mistletoe
import pygments
import pygments.formatters.html
import pygments.lexers
import pygments.util
from ._misc import parameters
_logger = logging.getLogger("holocron")
def _pygmentize(code, language):
try:
formatter = _pygmentize.formatter
except AttributeError:
HtmlFormatter = pygments.formatters.html.HtmlFormatter
formatter = _pygmentize.formatter = HtmlFormatter(wrapcode=True)
lexer = pygments.lexers.get_lexer_by_name(language)
return pygments.highlight(code, lexer, formatter)
class _HTMLRenderer(mistletoe.HTMLRenderer):
def __init__(self, *extras, pygmentize):
super(_HTMLRenderer, self).__init__(*extras)
self._pygmentize = pygmentize
self._extract_title = True
self.extracted = {}
def render_document(self, token):
if self._extract_title and token.children:
node = token.children[0]
is_heading = node.__class__.__name__ in (
"Heading",
"SetextHeading",
)
if is_heading and node.level == 1:
self.extracted["title"] = self.render_inner(node)
token.children.pop(0)
return super(_HTMLRenderer, self).render_document(token)
def render_block_code(self, token):
if token.language and self._pygmentize:
try:
code = token.children[0].content
return self._pygmentize(code, token.language)
except pygments.util.ClassNotFound:
_logger.warning("pygmentize: no such langauge: '%s'", token.language)
return super(_HTMLRenderer, self).render_block_code(token)
@parameters(
jsonschema={
"type": "object",
"properties": {"pygmentize": {"type": "boolean"}},
}
)
def process(app, stream, *, pygmentize=False):
pygmentize = pygmentize and _pygmentize
for item in stream:
with _HTMLRenderer(pygmentize=pygmentize) as renderer:
item["content"] = renderer.render(
mistletoe.Document(io.StringIO(item["content"]))
).strip()
if "title" in renderer.extracted:
item["title"] = item.get("title", renderer.extracted["title"])
item["destination"] = item["destination"].with_suffix(".html")
yield item | 0.405331 | 0.151247 |
import sys
import os
from models import *
class StoreApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def getInventory(self, **kwargs):
"""Returns pet inventories by status
Args:
Returns: map(String, int)
"""
allParams = []
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getInventory" % key)
params[key] = val
del params['kwargs']
resourcePath = '/store/inventory'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json,application/xml';
headerParams['Content-Type'] = '';
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'map(String, int)')
return responseObject
def placeOrder(self, **kwargs):
"""Place an order for a pet
Args:
body, Order: order placed for purchasing the pet (required)
Returns: Order
"""
allParams = ['body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method placeOrder" % key)
params[key] = val
del params['kwargs']
resourcePath = '/store/order'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json,application/xml';
headerParams['Content-Type'] = '';
if ('body' in params):
bodyParam = params['body']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Order')
return responseObject
def getOrderById(self, **kwargs):
"""Find purchase order by ID
Args:
orderId, str: ID of pet that needs to be fetched (required)
Returns: Order
"""
allParams = ['orderId']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getOrderById" % key)
params[key] = val
del params['kwargs']
resourcePath = '/store/order/{orderId}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json,application/xml';
headerParams['Content-Type'] = '';
if ('orderId' in params):
replacement = str(self.apiClient.toPathValue(params['orderId']))
resourcePath = resourcePath.replace('{' + 'orderId' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Order')
return responseObject
def deleteOrder(self, **kwargs):
"""Delete purchase order by ID
Args:
orderId, str: ID of the order that needs to be deleted (required)
Returns:
"""
allParams = ['orderId']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteOrder" % key)
params[key] = val
del params['kwargs']
resourcePath = '/store/order/{orderId}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json,application/xml';
headerParams['Content-Type'] = '';
if ('orderId' in params):
replacement = str(self.apiClient.toPathValue(params['orderId']))
resourcePath = resourcePath.replace('{' + 'orderId' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files) | samples/client/petstore/python/client/StoreApi.py | import sys
import os
from models import *
class StoreApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def getInventory(self, **kwargs):
"""Returns pet inventories by status
Args:
Returns: map(String, int)
"""
allParams = []
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getInventory" % key)
params[key] = val
del params['kwargs']
resourcePath = '/store/inventory'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json,application/xml';
headerParams['Content-Type'] = '';
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'map(String, int)')
return responseObject
def placeOrder(self, **kwargs):
"""Place an order for a pet
Args:
body, Order: order placed for purchasing the pet (required)
Returns: Order
"""
allParams = ['body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method placeOrder" % key)
params[key] = val
del params['kwargs']
resourcePath = '/store/order'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json,application/xml';
headerParams['Content-Type'] = '';
if ('body' in params):
bodyParam = params['body']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Order')
return responseObject
def getOrderById(self, **kwargs):
"""Find purchase order by ID
Args:
orderId, str: ID of pet that needs to be fetched (required)
Returns: Order
"""
allParams = ['orderId']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getOrderById" % key)
params[key] = val
del params['kwargs']
resourcePath = '/store/order/{orderId}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json,application/xml';
headerParams['Content-Type'] = '';
if ('orderId' in params):
replacement = str(self.apiClient.toPathValue(params['orderId']))
resourcePath = resourcePath.replace('{' + 'orderId' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Order')
return responseObject
def deleteOrder(self, **kwargs):
"""Delete purchase order by ID
Args:
orderId, str: ID of the order that needs to be deleted (required)
Returns:
"""
allParams = ['orderId']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteOrder" % key)
params[key] = val
del params['kwargs']
resourcePath = '/store/order/{orderId}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json,application/xml';
headerParams['Content-Type'] = '';
if ('orderId' in params):
replacement = str(self.apiClient.toPathValue(params['orderId']))
resourcePath = resourcePath.replace('{' + 'orderId' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files) | 0.503906 | 0.095814 |
import pprint
import re # noqa: F401
import six
from nucleus_api.configuration import Configuration
class Order(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_id': 'str',
'amount': 'float',
'commission': 'float',
'create_date': 'datetime',
'_date': 'date',
'id': 'str',
'is_read': 'bool',
'metadata': 'dict(str, str)',
'model_id': 'str',
'order_bulk_id': 'str',
'order_ticket_id': 'str',
'order_type': 'str',
'portfolio_id': 'str',
'price': 'float',
'quantity': 'float',
'secondary_id': 'str',
'security_id': 'str',
'tmp_tracker_id': 'str',
'transaction_code_id': 'str',
'update_date': 'datetime'
}
attribute_map = {
'account_id': 'account_id',
'amount': 'amount',
'commission': 'commission',
'create_date': 'create_date',
'_date': 'date',
'id': 'id',
'is_read': 'is_read',
'metadata': 'metadata',
'model_id': 'model_id',
'order_bulk_id': 'order_bulk_id',
'order_ticket_id': 'order_ticket_id',
'order_type': 'order_type',
'portfolio_id': 'portfolio_id',
'price': 'price',
'quantity': 'quantity',
'secondary_id': 'secondary_id',
'security_id': 'security_id',
'tmp_tracker_id': 'tmp_tracker_id',
'transaction_code_id': 'transaction_code_id',
'update_date': 'update_date'
}
def __init__(self, account_id=None, amount=None, commission=None, create_date=None, _date=None, id=None, is_read=None, metadata=None, model_id=None, order_bulk_id=None, order_ticket_id=None, order_type=None, portfolio_id=None, price=None, quantity=None, secondary_id=None, security_id=None, tmp_tracker_id=None, transaction_code_id=None, update_date=None, _configuration=None): # noqa: E501
"""Order - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._account_id = None
self._amount = None
self._commission = None
self._create_date = None
self.__date = None
self._id = None
self._is_read = None
self._metadata = None
self._model_id = None
self._order_bulk_id = None
self._order_ticket_id = None
self._order_type = None
self._portfolio_id = None
self._price = None
self._quantity = None
self._secondary_id = None
self._security_id = None
self._tmp_tracker_id = None
self._transaction_code_id = None
self._update_date = None
self.discriminator = None
if account_id is not None:
self.account_id = account_id
if amount is not None:
self.amount = amount
if commission is not None:
self.commission = commission
if create_date is not None:
self.create_date = create_date
self._date = _date
if id is not None:
self.id = id
if is_read is not None:
self.is_read = is_read
if metadata is not None:
self.metadata = metadata
if model_id is not None:
self.model_id = model_id
if order_bulk_id is not None:
self.order_bulk_id = order_bulk_id
self.order_ticket_id = order_ticket_id
if order_type is not None:
self.order_type = order_type
if portfolio_id is not None:
self.portfolio_id = portfolio_id
if price is not None:
self.price = price
if quantity is not None:
self.quantity = quantity
if secondary_id is not None:
self.secondary_id = secondary_id
self.security_id = security_id
if tmp_tracker_id is not None:
self.tmp_tracker_id = tmp_tracker_id
self.transaction_code_id = transaction_code_id
if update_date is not None:
self.update_date = update_date
@property
def account_id(self):
"""Gets the account_id of this Order. # noqa: E501
accountId # noqa: E501
:return: The account_id of this Order. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this Order.
accountId # noqa: E501
:param account_id: The account_id of this Order. # noqa: E501
:type: str
"""
self._account_id = account_id
@property
def amount(self):
"""Gets the amount of this Order. # noqa: E501
amount # noqa: E501
:return: The amount of this Order. # noqa: E501
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this Order.
amount # noqa: E501
:param amount: The amount of this Order. # noqa: E501
:type: float
"""
self._amount = amount
@property
def commission(self):
"""Gets the commission of this Order. # noqa: E501
commission # noqa: E501
:return: The commission of this Order. # noqa: E501
:rtype: float
"""
return self._commission
@commission.setter
def commission(self, commission):
"""Sets the commission of this Order.
commission # noqa: E501
:param commission: The commission of this Order. # noqa: E501
:type: float
"""
self._commission = commission
@property
def create_date(self):
"""Gets the create_date of this Order. # noqa: E501
:return: The create_date of this Order. # noqa: E501
:rtype: datetime
"""
return self._create_date
@create_date.setter
def create_date(self, create_date):
"""Sets the create_date of this Order.
:param create_date: The create_date of this Order. # noqa: E501
:type: datetime
"""
self._create_date = create_date
@property
def _date(self):
"""Gets the _date of this Order. # noqa: E501
date # noqa: E501
:return: The _date of this Order. # noqa: E501
:rtype: date
"""
return self.__date
@_date.setter
def _date(self, _date):
"""Sets the _date of this Order.
date # noqa: E501
:param _date: The _date of this Order. # noqa: E501
:type: date
"""
if self._configuration.client_side_validation and _date is None:
raise ValueError("Invalid value for `_date`, must not be `None`") # noqa: E501
self.__date = _date
@property
def id(self):
"""Gets the id of this Order. # noqa: E501
:return: The id of this Order. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Order.
:param id: The id of this Order. # noqa: E501
:type: str
"""
self._id = id
@property
def is_read(self):
"""Gets the is_read of this Order. # noqa: E501
isRead # noqa: E501
:return: The is_read of this Order. # noqa: E501
:rtype: bool
"""
return self._is_read
@is_read.setter
def is_read(self, is_read):
"""Sets the is_read of this Order.
isRead # noqa: E501
:param is_read: The is_read of this Order. # noqa: E501
:type: bool
"""
self._is_read = is_read
@property
def metadata(self):
"""Gets the metadata of this Order. # noqa: E501
metadata # noqa: E501
:return: The metadata of this Order. # noqa: E501
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this Order.
metadata # noqa: E501
:param metadata: The metadata of this Order. # noqa: E501
:type: dict(str, str)
"""
self._metadata = metadata
@property
def model_id(self):
"""Gets the model_id of this Order. # noqa: E501
modelId # noqa: E501
:return: The model_id of this Order. # noqa: E501
:rtype: str
"""
return self._model_id
@model_id.setter
def model_id(self, model_id):
"""Sets the model_id of this Order.
modelId # noqa: E501
:param model_id: The model_id of this Order. # noqa: E501
:type: str
"""
self._model_id = model_id
@property
def order_bulk_id(self):
"""Gets the order_bulk_id of this Order. # noqa: E501
orderBulkId # noqa: E501
:return: The order_bulk_id of this Order. # noqa: E501
:rtype: str
"""
return self._order_bulk_id
@order_bulk_id.setter
def order_bulk_id(self, order_bulk_id):
"""Sets the order_bulk_id of this Order.
orderBulkId # noqa: E501
:param order_bulk_id: The order_bulk_id of this Order. # noqa: E501
:type: str
"""
self._order_bulk_id = order_bulk_id
@property
def order_ticket_id(self):
"""Gets the order_ticket_id of this Order. # noqa: E501
orderTicketId # noqa: E501
:return: The order_ticket_id of this Order. # noqa: E501
:rtype: str
"""
return self._order_ticket_id
@order_ticket_id.setter
def order_ticket_id(self, order_ticket_id):
"""Sets the order_ticket_id of this Order.
orderTicketId # noqa: E501
:param order_ticket_id: The order_ticket_id of this Order. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and order_ticket_id is None:
raise ValueError("Invalid value for `order_ticket_id`, must not be `None`") # noqa: E501
self._order_ticket_id = order_ticket_id
@property
def order_type(self):
"""Gets the order_type of this Order. # noqa: E501
orderType # noqa: E501
:return: The order_type of this Order. # noqa: E501
:rtype: str
"""
return self._order_type
@order_type.setter
def order_type(self, order_type):
"""Sets the order_type of this Order.
orderType # noqa: E501
:param order_type: The order_type of this Order. # noqa: E501
:type: str
"""
self._order_type = order_type
@property
def portfolio_id(self):
"""Gets the portfolio_id of this Order. # noqa: E501
portfolioId # noqa: E501
:return: The portfolio_id of this Order. # noqa: E501
:rtype: str
"""
return self._portfolio_id
@portfolio_id.setter
def portfolio_id(self, portfolio_id):
"""Sets the portfolio_id of this Order.
portfolioId # noqa: E501
:param portfolio_id: The portfolio_id of this Order. # noqa: E501
:type: str
"""
self._portfolio_id = portfolio_id
@property
def price(self):
"""Gets the price of this Order. # noqa: E501
price # noqa: E501
:return: The price of this Order. # noqa: E501
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this Order.
price # noqa: E501
:param price: The price of this Order. # noqa: E501
:type: float
"""
self._price = price
@property
def quantity(self):
"""Gets the quantity of this Order. # noqa: E501
quantity # noqa: E501
:return: The quantity of this Order. # noqa: E501
:rtype: float
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this Order.
quantity # noqa: E501
:param quantity: The quantity of this Order. # noqa: E501
:type: float
"""
self._quantity = quantity
@property
def secondary_id(self):
"""Gets the secondary_id of this Order. # noqa: E501
:return: The secondary_id of this Order. # noqa: E501
:rtype: str
"""
return self._secondary_id
@secondary_id.setter
def secondary_id(self, secondary_id):
"""Sets the secondary_id of this Order.
:param secondary_id: The secondary_id of this Order. # noqa: E501
:type: str
"""
self._secondary_id = secondary_id
@property
def security_id(self):
"""Gets the security_id of this Order. # noqa: E501
securityId # noqa: E501
:return: The security_id of this Order. # noqa: E501
:rtype: str
"""
return self._security_id
@security_id.setter
def security_id(self, security_id):
"""Sets the security_id of this Order.
securityId # noqa: E501
:param security_id: The security_id of this Order. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and security_id is None:
raise ValueError("Invalid value for `security_id`, must not be `None`") # noqa: E501
self._security_id = security_id
@property
def tmp_tracker_id(self):
"""Gets the tmp_tracker_id of this Order. # noqa: E501
:return: The tmp_tracker_id of this Order. # noqa: E501
:rtype: str
"""
return self._tmp_tracker_id
@tmp_tracker_id.setter
def tmp_tracker_id(self, tmp_tracker_id):
"""Sets the tmp_tracker_id of this Order.
:param tmp_tracker_id: The tmp_tracker_id of this Order. # noqa: E501
:type: str
"""
self._tmp_tracker_id = tmp_tracker_id
@property
def transaction_code_id(self):
"""Gets the transaction_code_id of this Order. # noqa: E501
transactionCodeId # noqa: E501
:return: The transaction_code_id of this Order. # noqa: E501
:rtype: str
"""
return self._transaction_code_id
@transaction_code_id.setter
def transaction_code_id(self, transaction_code_id):
"""Sets the transaction_code_id of this Order.
transactionCodeId # noqa: E501
:param transaction_code_id: The transaction_code_id of this Order. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and transaction_code_id is None:
raise ValueError("Invalid value for `transaction_code_id`, must not be `None`") # noqa: E501
self._transaction_code_id = transaction_code_id
@property
def update_date(self):
"""Gets the update_date of this Order. # noqa: E501
:return: The update_date of this Order. # noqa: E501
:rtype: datetime
"""
return self._update_date
@update_date.setter
def update_date(self, update_date):
"""Sets the update_date of this Order.
:param update_date: The update_date of this Order. # noqa: E501
:type: datetime
"""
self._update_date = update_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Order, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Order):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Order):
return True
return self.to_dict() != other.to_dict() | atom/nucleus/python/nucleus_api/models/order.py | import pprint
import re # noqa: F401
import six
from nucleus_api.configuration import Configuration
class Order(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_id': 'str',
'amount': 'float',
'commission': 'float',
'create_date': 'datetime',
'_date': 'date',
'id': 'str',
'is_read': 'bool',
'metadata': 'dict(str, str)',
'model_id': 'str',
'order_bulk_id': 'str',
'order_ticket_id': 'str',
'order_type': 'str',
'portfolio_id': 'str',
'price': 'float',
'quantity': 'float',
'secondary_id': 'str',
'security_id': 'str',
'tmp_tracker_id': 'str',
'transaction_code_id': 'str',
'update_date': 'datetime'
}
attribute_map = {
'account_id': 'account_id',
'amount': 'amount',
'commission': 'commission',
'create_date': 'create_date',
'_date': 'date',
'id': 'id',
'is_read': 'is_read',
'metadata': 'metadata',
'model_id': 'model_id',
'order_bulk_id': 'order_bulk_id',
'order_ticket_id': 'order_ticket_id',
'order_type': 'order_type',
'portfolio_id': 'portfolio_id',
'price': 'price',
'quantity': 'quantity',
'secondary_id': 'secondary_id',
'security_id': 'security_id',
'tmp_tracker_id': 'tmp_tracker_id',
'transaction_code_id': 'transaction_code_id',
'update_date': 'update_date'
}
def __init__(self, account_id=None, amount=None, commission=None, create_date=None, _date=None, id=None, is_read=None, metadata=None, model_id=None, order_bulk_id=None, order_ticket_id=None, order_type=None, portfolio_id=None, price=None, quantity=None, secondary_id=None, security_id=None, tmp_tracker_id=None, transaction_code_id=None, update_date=None, _configuration=None): # noqa: E501
"""Order - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._account_id = None
self._amount = None
self._commission = None
self._create_date = None
self.__date = None
self._id = None
self._is_read = None
self._metadata = None
self._model_id = None
self._order_bulk_id = None
self._order_ticket_id = None
self._order_type = None
self._portfolio_id = None
self._price = None
self._quantity = None
self._secondary_id = None
self._security_id = None
self._tmp_tracker_id = None
self._transaction_code_id = None
self._update_date = None
self.discriminator = None
if account_id is not None:
self.account_id = account_id
if amount is not None:
self.amount = amount
if commission is not None:
self.commission = commission
if create_date is not None:
self.create_date = create_date
self._date = _date
if id is not None:
self.id = id
if is_read is not None:
self.is_read = is_read
if metadata is not None:
self.metadata = metadata
if model_id is not None:
self.model_id = model_id
if order_bulk_id is not None:
self.order_bulk_id = order_bulk_id
self.order_ticket_id = order_ticket_id
if order_type is not None:
self.order_type = order_type
if portfolio_id is not None:
self.portfolio_id = portfolio_id
if price is not None:
self.price = price
if quantity is not None:
self.quantity = quantity
if secondary_id is not None:
self.secondary_id = secondary_id
self.security_id = security_id
if tmp_tracker_id is not None:
self.tmp_tracker_id = tmp_tracker_id
self.transaction_code_id = transaction_code_id
if update_date is not None:
self.update_date = update_date
@property
def account_id(self):
"""Gets the account_id of this Order. # noqa: E501
accountId # noqa: E501
:return: The account_id of this Order. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this Order.
accountId # noqa: E501
:param account_id: The account_id of this Order. # noqa: E501
:type: str
"""
self._account_id = account_id
@property
def amount(self):
"""Gets the amount of this Order. # noqa: E501
amount # noqa: E501
:return: The amount of this Order. # noqa: E501
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this Order.
amount # noqa: E501
:param amount: The amount of this Order. # noqa: E501
:type: float
"""
self._amount = amount
@property
def commission(self):
"""Gets the commission of this Order. # noqa: E501
commission # noqa: E501
:return: The commission of this Order. # noqa: E501
:rtype: float
"""
return self._commission
@commission.setter
def commission(self, commission):
"""Sets the commission of this Order.
commission # noqa: E501
:param commission: The commission of this Order. # noqa: E501
:type: float
"""
self._commission = commission
@property
def create_date(self):
"""Gets the create_date of this Order. # noqa: E501
:return: The create_date of this Order. # noqa: E501
:rtype: datetime
"""
return self._create_date
@create_date.setter
def create_date(self, create_date):
"""Sets the create_date of this Order.
:param create_date: The create_date of this Order. # noqa: E501
:type: datetime
"""
self._create_date = create_date
@property
def _date(self):
"""Gets the _date of this Order. # noqa: E501
date # noqa: E501
:return: The _date of this Order. # noqa: E501
:rtype: date
"""
return self.__date
@_date.setter
def _date(self, _date):
"""Sets the _date of this Order.
date # noqa: E501
:param _date: The _date of this Order. # noqa: E501
:type: date
"""
if self._configuration.client_side_validation and _date is None:
raise ValueError("Invalid value for `_date`, must not be `None`") # noqa: E501
self.__date = _date
@property
def id(self):
"""Gets the id of this Order. # noqa: E501
:return: The id of this Order. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Order.
:param id: The id of this Order. # noqa: E501
:type: str
"""
self._id = id
@property
def is_read(self):
"""Gets the is_read of this Order. # noqa: E501
isRead # noqa: E501
:return: The is_read of this Order. # noqa: E501
:rtype: bool
"""
return self._is_read
@is_read.setter
def is_read(self, is_read):
"""Sets the is_read of this Order.
isRead # noqa: E501
:param is_read: The is_read of this Order. # noqa: E501
:type: bool
"""
self._is_read = is_read
@property
def metadata(self):
"""Gets the metadata of this Order. # noqa: E501
metadata # noqa: E501
:return: The metadata of this Order. # noqa: E501
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this Order.
metadata # noqa: E501
:param metadata: The metadata of this Order. # noqa: E501
:type: dict(str, str)
"""
self._metadata = metadata
@property
def model_id(self):
"""Gets the model_id of this Order. # noqa: E501
modelId # noqa: E501
:return: The model_id of this Order. # noqa: E501
:rtype: str
"""
return self._model_id
@model_id.setter
def model_id(self, model_id):
"""Sets the model_id of this Order.
modelId # noqa: E501
:param model_id: The model_id of this Order. # noqa: E501
:type: str
"""
self._model_id = model_id
@property
def order_bulk_id(self):
"""Gets the order_bulk_id of this Order. # noqa: E501
orderBulkId # noqa: E501
:return: The order_bulk_id of this Order. # noqa: E501
:rtype: str
"""
return self._order_bulk_id
@order_bulk_id.setter
def order_bulk_id(self, order_bulk_id):
"""Sets the order_bulk_id of this Order.
orderBulkId # noqa: E501
:param order_bulk_id: The order_bulk_id of this Order. # noqa: E501
:type: str
"""
self._order_bulk_id = order_bulk_id
@property
def order_ticket_id(self):
"""Gets the order_ticket_id of this Order. # noqa: E501
orderTicketId # noqa: E501
:return: The order_ticket_id of this Order. # noqa: E501
:rtype: str
"""
return self._order_ticket_id
@order_ticket_id.setter
def order_ticket_id(self, order_ticket_id):
"""Sets the order_ticket_id of this Order.
orderTicketId # noqa: E501
:param order_ticket_id: The order_ticket_id of this Order. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and order_ticket_id is None:
raise ValueError("Invalid value for `order_ticket_id`, must not be `None`") # noqa: E501
self._order_ticket_id = order_ticket_id
@property
def order_type(self):
"""Gets the order_type of this Order. # noqa: E501
orderType # noqa: E501
:return: The order_type of this Order. # noqa: E501
:rtype: str
"""
return self._order_type
@order_type.setter
def order_type(self, order_type):
"""Sets the order_type of this Order.
orderType # noqa: E501
:param order_type: The order_type of this Order. # noqa: E501
:type: str
"""
self._order_type = order_type
@property
def portfolio_id(self):
"""Gets the portfolio_id of this Order. # noqa: E501
portfolioId # noqa: E501
:return: The portfolio_id of this Order. # noqa: E501
:rtype: str
"""
return self._portfolio_id
@portfolio_id.setter
def portfolio_id(self, portfolio_id):
"""Sets the portfolio_id of this Order.
portfolioId # noqa: E501
:param portfolio_id: The portfolio_id of this Order. # noqa: E501
:type: str
"""
self._portfolio_id = portfolio_id
@property
def price(self):
"""Gets the price of this Order. # noqa: E501
price # noqa: E501
:return: The price of this Order. # noqa: E501
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this Order.
price # noqa: E501
:param price: The price of this Order. # noqa: E501
:type: float
"""
self._price = price
@property
def quantity(self):
"""Gets the quantity of this Order. # noqa: E501
quantity # noqa: E501
:return: The quantity of this Order. # noqa: E501
:rtype: float
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this Order.
quantity # noqa: E501
:param quantity: The quantity of this Order. # noqa: E501
:type: float
"""
self._quantity = quantity
@property
def secondary_id(self):
"""Gets the secondary_id of this Order. # noqa: E501
:return: The secondary_id of this Order. # noqa: E501
:rtype: str
"""
return self._secondary_id
@secondary_id.setter
def secondary_id(self, secondary_id):
"""Sets the secondary_id of this Order.
:param secondary_id: The secondary_id of this Order. # noqa: E501
:type: str
"""
self._secondary_id = secondary_id
@property
def security_id(self):
"""Gets the security_id of this Order. # noqa: E501
securityId # noqa: E501
:return: The security_id of this Order. # noqa: E501
:rtype: str
"""
return self._security_id
@security_id.setter
def security_id(self, security_id):
"""Sets the security_id of this Order.
securityId # noqa: E501
:param security_id: The security_id of this Order. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and security_id is None:
raise ValueError("Invalid value for `security_id`, must not be `None`") # noqa: E501
self._security_id = security_id
@property
def tmp_tracker_id(self):
"""Gets the tmp_tracker_id of this Order. # noqa: E501
:return: The tmp_tracker_id of this Order. # noqa: E501
:rtype: str
"""
return self._tmp_tracker_id
@tmp_tracker_id.setter
def tmp_tracker_id(self, tmp_tracker_id):
"""Sets the tmp_tracker_id of this Order.
:param tmp_tracker_id: The tmp_tracker_id of this Order. # noqa: E501
:type: str
"""
self._tmp_tracker_id = tmp_tracker_id
@property
def transaction_code_id(self):
"""Gets the transaction_code_id of this Order. # noqa: E501
transactionCodeId # noqa: E501
:return: The transaction_code_id of this Order. # noqa: E501
:rtype: str
"""
return self._transaction_code_id
@transaction_code_id.setter
def transaction_code_id(self, transaction_code_id):
"""Sets the transaction_code_id of this Order.
transactionCodeId # noqa: E501
:param transaction_code_id: The transaction_code_id of this Order. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and transaction_code_id is None:
raise ValueError("Invalid value for `transaction_code_id`, must not be `None`") # noqa: E501
self._transaction_code_id = transaction_code_id
@property
def update_date(self):
"""Gets the update_date of this Order. # noqa: E501
:return: The update_date of this Order. # noqa: E501
:rtype: datetime
"""
return self._update_date
@update_date.setter
def update_date(self, update_date):
"""Sets the update_date of this Order.
:param update_date: The update_date of this Order. # noqa: E501
:type: datetime
"""
self._update_date = update_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Order, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Order):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Order):
return True
return self.to_dict() != other.to_dict() | 0.603348 | 0.088544 |
# Synthetic data
# Precision measure: MSE
# Run: python3 test.py param
# where
# - param = 0 runs test cases with fixed n and varying eps
# - param = 1 runs test cases with fixed eps and varying n
# This program does 50-fold cross-validation.
import sys
import os
# Create a new Theano compile directory on local node (used in cluster computing)
if len(sys.argv) > 1:
v1 = int(sys.argv[1])
mypath1 = "theano"
mypath2 = mypath1+"/theano-tmp-"+str(v1)
if not os.path.exists(mypath1):
os.mkdir(mypath1)
if not os.path.exists(mypath2):
os.mkdir(mypath2)
os.environ["THEANO_FLAGS"] = "base_compiledir="+mypath1+",compiledir="+mypath2
import diffpri as dp
import numpy as np
import csv
# Import data
datapath = '/scratch/work/niemina7/cliptest/' # TODO: set path for input and output data
f = open(datapath+'x_data.csv','rt')
reader = csv.reader(f,delimiter=',')
x = np.array(list(reader)).astype(float)
f.close()
f = open(datapath+'y_data.csv','rt')
reader = csv.reader(f,delimiter=',')
y = np.array(list(reader)).astype(float)
f.close()
# Arguments
if len(sys.argv) > 1:
param = int(sys.argv[1])
else: # default
param = 0
# Test cases
eps = [0.1,0.2,0.5,1.0,2.0,5.0,10.0]
pv_size = [0,100,200,300,400,500,600,700,800]
pv_max = max(pv_size)
d = 10
n_npv = 10
n_test = 100
mcmc = True # use priors instead of fixed values for precision parameter lambda,lambda_0
n_cv = 50
drugid = 0
# Fetch clipping threshold omegas
if param == 0:
t = 'A'
else:
t = 'B'
f = open(datapath+t+'-WX.csv','rt')
reader = csv.reader(f,delimiter=',')
WX = np.array(list(reader)).astype(float)
f.close()
f = open(datapath+t+'-WY.csv','rt')
reader = csv.reader(f,delimiter=',')
WY = np.array(list(reader)).astype(float)
f.close()
if param == 0:
# Cross-validation
for seed in range(n_cv):
S = np.zeros(len(eps),dtype=np.float64)
for i in range(len(eps)):
e = eps[i]
n_pv = 500
w_x = WX[i]
w_y = WY[i]
# Process data
nxx_pv,nxx_npv,nxy_pv,nxy_npv,nyy_pv,nyy_npv,x_test,y_test,B_x,B_y,n_train,private = dp.processData(x,y,d,n_test,n_pv,n_npv,pv_max,w_x,w_y,drugid,seed)
private = False # modification: rlr
# Fit model
if mcmc:
pred = dp.predictMCMC(n_train,nxx_pv,nxx_npv,nxy_pv,nxy_npv,nyy_pv,nyy_npv,B_x,B_y,e,x_test,private)
else:
pred = dp.predictL(nxx_pv,nxx_npv,nxy_pv,nxy_npv,B_x,B_y,e,x_test,private)
# Evaluate
S[i] = dp.precision(pred,y_test)
# Save results into file
csvpath = datapath+'synth-rlr-'+t+'-'+str(seed)+'.csv'
np.savetxt(csvpath,S,delimiter=',')
if param == 1:
# Cross-validation
for seed in range(n_cv):
S = np.zeros(len(pv_size),dtype=np.float64)
for i in range(len(pv_size)):
n_pv = pv_size[i]
e = 2.0
w_x = WX[i]
w_y = WY[i]
# Process data
nxx_pv,nxx_npv,nxy_pv,nxy_npv,nyy_pv,nyy_npv,x_test,y_test,B_x,B_y,n_train,private = dp.processData(x,y,d,n_test,n_pv,n_npv,pv_max,w_x,w_y,drugid,seed)
private = False # modification: rlr
# Fit model
if mcmc:
pred = dp.predictMCMC(n_train,nxx_pv,nxx_npv,nxy_pv,nxy_npv,nyy_pv,nyy_npv,B_x,B_y,e,x_test,private)
else:
pred = dp.predictL(nxx_pv,nxx_npv,nxy_pv,nxy_npv,B_x,B_y,e,x_test,private)
# Evaluate
S[i] = dp.precision(pred,y_test)
# Save results into file
csvpath = datapath+'synth-rlr-'+t+'-'+str(seed)+'.csv'
np.savetxt(csvpath,S,delimiter=',') | python/synthdata/test_rlr.py |
# Synthetic data
# Precision measure: MSE
# Run: python3 test.py param
# where
# - param = 0 runs test cases with fixed n and varying eps
# - param = 1 runs test cases with fixed eps and varying n
# This program does 50-fold cross-validation.
import sys
import os
# Create a new Theano compile directory on local node (used in cluster computing)
if len(sys.argv) > 1:
v1 = int(sys.argv[1])
mypath1 = "theano"
mypath2 = mypath1+"/theano-tmp-"+str(v1)
if not os.path.exists(mypath1):
os.mkdir(mypath1)
if not os.path.exists(mypath2):
os.mkdir(mypath2)
os.environ["THEANO_FLAGS"] = "base_compiledir="+mypath1+",compiledir="+mypath2
import diffpri as dp
import numpy as np
import csv
# Import data
datapath = '/scratch/work/niemina7/cliptest/' # TODO: set path for input and output data
f = open(datapath+'x_data.csv','rt')
reader = csv.reader(f,delimiter=',')
x = np.array(list(reader)).astype(float)
f.close()
f = open(datapath+'y_data.csv','rt')
reader = csv.reader(f,delimiter=',')
y = np.array(list(reader)).astype(float)
f.close()
# Arguments
if len(sys.argv) > 1:
param = int(sys.argv[1])
else: # default
param = 0
# Test cases
eps = [0.1,0.2,0.5,1.0,2.0,5.0,10.0]
pv_size = [0,100,200,300,400,500,600,700,800]
pv_max = max(pv_size)
d = 10
n_npv = 10
n_test = 100
mcmc = True # use priors instead of fixed values for precision parameter lambda,lambda_0
n_cv = 50
drugid = 0
# Fetch clipping threshold omegas
if param == 0:
t = 'A'
else:
t = 'B'
f = open(datapath+t+'-WX.csv','rt')
reader = csv.reader(f,delimiter=',')
WX = np.array(list(reader)).astype(float)
f.close()
f = open(datapath+t+'-WY.csv','rt')
reader = csv.reader(f,delimiter=',')
WY = np.array(list(reader)).astype(float)
f.close()
if param == 0:
# Cross-validation
for seed in range(n_cv):
S = np.zeros(len(eps),dtype=np.float64)
for i in range(len(eps)):
e = eps[i]
n_pv = 500
w_x = WX[i]
w_y = WY[i]
# Process data
nxx_pv,nxx_npv,nxy_pv,nxy_npv,nyy_pv,nyy_npv,x_test,y_test,B_x,B_y,n_train,private = dp.processData(x,y,d,n_test,n_pv,n_npv,pv_max,w_x,w_y,drugid,seed)
private = False # modification: rlr
# Fit model
if mcmc:
pred = dp.predictMCMC(n_train,nxx_pv,nxx_npv,nxy_pv,nxy_npv,nyy_pv,nyy_npv,B_x,B_y,e,x_test,private)
else:
pred = dp.predictL(nxx_pv,nxx_npv,nxy_pv,nxy_npv,B_x,B_y,e,x_test,private)
# Evaluate
S[i] = dp.precision(pred,y_test)
# Save results into file
csvpath = datapath+'synth-rlr-'+t+'-'+str(seed)+'.csv'
np.savetxt(csvpath,S,delimiter=',')
if param == 1:
# Cross-validation
for seed in range(n_cv):
S = np.zeros(len(pv_size),dtype=np.float64)
for i in range(len(pv_size)):
n_pv = pv_size[i]
e = 2.0
w_x = WX[i]
w_y = WY[i]
# Process data
nxx_pv,nxx_npv,nxy_pv,nxy_npv,nyy_pv,nyy_npv,x_test,y_test,B_x,B_y,n_train,private = dp.processData(x,y,d,n_test,n_pv,n_npv,pv_max,w_x,w_y,drugid,seed)
private = False # modification: rlr
# Fit model
if mcmc:
pred = dp.predictMCMC(n_train,nxx_pv,nxx_npv,nxy_pv,nxy_npv,nyy_pv,nyy_npv,B_x,B_y,e,x_test,private)
else:
pred = dp.predictL(nxx_pv,nxx_npv,nxy_pv,nxy_npv,B_x,B_y,e,x_test,private)
# Evaluate
S[i] = dp.precision(pred,y_test)
# Save results into file
csvpath = datapath+'synth-rlr-'+t+'-'+str(seed)+'.csv'
np.savetxt(csvpath,S,delimiter=',') | 0.278747 | 0.369941 |
import json
import os
import boto3
from botocore.exceptions import ClientError
import math
import time
from MediaReplayEnginePluginHelper import OutputHelper
from MediaReplayEnginePluginHelper import PluginHelper
from MediaReplayEnginePluginHelper import Status
from MediaReplayEnginePluginHelper import DataPlane
comp_client = boto3.client('comprehend')
def consolidate_comprehend_results(depResult, comprehend_results):
results = []
result = {}
result['Start'] = depResult['Start']
result['End'] = depResult['End']
result['Label'] = comprehend_results['Sentiment']
result['primary_sentiment'] = result['Label']
result['positive_score'] = comprehend_results['SentimentScore']['Positive']
result['negative_score'] = comprehend_results['SentimentScore']['Negative']
result['neutral_score'] = comprehend_results['SentimentScore']['Neutral']
result['mixed_score'] = comprehend_results['SentimentScore']['Mixed']
if result['positive_score'] > 0.75:
result['positive_flag'] = True
else:
result['positive_flag'] = False
if result['negative_score'] > 0.75:
result['negative_flag'] = True
else:
result['negative_flag'] = False
if result['neutral_score'] > 0.75:
result['neutral_flag'] = True
else:
result['neutral_flag'] = False
if result['mixed_score'] > 0.75:
result['mixed_flag'] = True
else:
result['mixed_flag'] = False
results.append(result)
return results
def lambda_handler(event, context):
print(event)
results = []
mre_dataplane = DataPlane(event)
# 'event' is the input event payload passed to Lambda
mre_outputhelper = OutputHelper(event)
mre_pluginhelper = PluginHelper(event)
try :
# process chunk with ffmpeg using options provided
text_attribute = event['Plugin']['Configuration']['text_attribute']
text_language_code = event['Plugin']['Configuration']['text_language_code']
audio_track_num = event['TrackNumber']
# this plugin expects the dependent plugin to provide the text data to analyze with Amazon Comprehend
dep_plugin = event['Plugin']['DependentPlugins'][0]
print('dep_plugin: ' + dep_plugin)
#get all dependent detector data
depResults = mre_dataplane.get_dependent_plugins_output()
print(depResults)
#execute a comprehend job to detect sentiment for each transciption or whatever the designated text attribute is
#'Sentiment': 'POSITIVE'|'NEGATIVE'|'NEUTRAL'|'MIXED'
for depResult in depResults[dep_plugin]:
response = comp_client.detect_sentiment(
Text=depResult[text_attribute],
LanguageCode=text_language_code
)
print(response)
#process results
results = consolidate_comprehend_results(depResult, response)
print(results)
# Add the results of the plugin to the payload (required if the plugin status is "complete"; Optional if the plugin has any errors)
mre_outputhelper.add_results_to_output(results)
# Persist plugin results for later use
mre_dataplane.save_plugin_results(results)
# Update the processing status of the plugin (required)
mre_outputhelper.update_plugin_status(Status.PLUGIN_COMPLETE)
# Returns expected payload built by MRE helper library
return mre_outputhelper.get_output_object()
except Exception as e:
print(e)
# Update the processing status of the plugin (required)
mre_outputhelper.update_plugin_status(Status.PLUGIN_ERROR)
# Re-raise the exception to MRE processing where it will be handled
raise | source/mre-plugin-samples/Plugins/DetectSentiment/DetectSentiment.py |
import json
import os
import boto3
from botocore.exceptions import ClientError
import math
import time
from MediaReplayEnginePluginHelper import OutputHelper
from MediaReplayEnginePluginHelper import PluginHelper
from MediaReplayEnginePluginHelper import Status
from MediaReplayEnginePluginHelper import DataPlane
comp_client = boto3.client('comprehend')
def consolidate_comprehend_results(depResult, comprehend_results):
results = []
result = {}
result['Start'] = depResult['Start']
result['End'] = depResult['End']
result['Label'] = comprehend_results['Sentiment']
result['primary_sentiment'] = result['Label']
result['positive_score'] = comprehend_results['SentimentScore']['Positive']
result['negative_score'] = comprehend_results['SentimentScore']['Negative']
result['neutral_score'] = comprehend_results['SentimentScore']['Neutral']
result['mixed_score'] = comprehend_results['SentimentScore']['Mixed']
if result['positive_score'] > 0.75:
result['positive_flag'] = True
else:
result['positive_flag'] = False
if result['negative_score'] > 0.75:
result['negative_flag'] = True
else:
result['negative_flag'] = False
if result['neutral_score'] > 0.75:
result['neutral_flag'] = True
else:
result['neutral_flag'] = False
if result['mixed_score'] > 0.75:
result['mixed_flag'] = True
else:
result['mixed_flag'] = False
results.append(result)
return results
def lambda_handler(event, context):
print(event)
results = []
mre_dataplane = DataPlane(event)
# 'event' is the input event payload passed to Lambda
mre_outputhelper = OutputHelper(event)
mre_pluginhelper = PluginHelper(event)
try :
# process chunk with ffmpeg using options provided
text_attribute = event['Plugin']['Configuration']['text_attribute']
text_language_code = event['Plugin']['Configuration']['text_language_code']
audio_track_num = event['TrackNumber']
# this plugin expects the dependent plugin to provide the text data to analyze with Amazon Comprehend
dep_plugin = event['Plugin']['DependentPlugins'][0]
print('dep_plugin: ' + dep_plugin)
#get all dependent detector data
depResults = mre_dataplane.get_dependent_plugins_output()
print(depResults)
#execute a comprehend job to detect sentiment for each transciption or whatever the designated text attribute is
#'Sentiment': 'POSITIVE'|'NEGATIVE'|'NEUTRAL'|'MIXED'
for depResult in depResults[dep_plugin]:
response = comp_client.detect_sentiment(
Text=depResult[text_attribute],
LanguageCode=text_language_code
)
print(response)
#process results
results = consolidate_comprehend_results(depResult, response)
print(results)
# Add the results of the plugin to the payload (required if the plugin status is "complete"; Optional if the plugin has any errors)
mre_outputhelper.add_results_to_output(results)
# Persist plugin results for later use
mre_dataplane.save_plugin_results(results)
# Update the processing status of the plugin (required)
mre_outputhelper.update_plugin_status(Status.PLUGIN_COMPLETE)
# Returns expected payload built by MRE helper library
return mre_outputhelper.get_output_object()
except Exception as e:
print(e)
# Update the processing status of the plugin (required)
mre_outputhelper.update_plugin_status(Status.PLUGIN_ERROR)
# Re-raise the exception to MRE processing where it will be handled
raise | 0.397471 | 0.084229 |
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from fastapi import Depends
from starlette.requests import Request
from starlette.responses import JSONResponse, RedirectResponse
from starlette.routing import Mount, Route
from starlette.staticfiles import StaticFiles
from fastapi_aad_auth import auth, config # noqa: F401
from fastapi_aad_auth._base.state import AuthenticationState
from fastapi_aad_auth.mixins import LoggingMixin
from fastapi_aad_auth.ui.jinja import Jinja2Templates
from fastapi_aad_auth.utilities import urls
class UI(LoggingMixin):
"""Provides Login endpoint methods, which are then wrapped in a factory method."""
def __init__(self, config: 'config.Config', authenticator: 'auth.Authenticator', base_context: Dict[str, Any] = None):
"""Initialise the UI based on the provided configuration.
Keyword Args:
config (fastapi_aad_auth.config.Config): Authentication configuration (includes ui and routing, as well as AAD Application and Tenant IDs)
authenticator (fastapi_aad_auth.auth.Authenticator): The authenticator object
base_context (Dict[str, Any]): Add the authentication to the router
"""
super().__init__()
self.config = config
if base_context is None:
base_context = dict()
self._base_context = base_context
self._authenticator = authenticator
self.login_template_path = Path(self.config.login_ui.template_file)
self.user_template_path = Path(self.config.login_ui.user_template_file)
self.login_templates = Jinja2Templates(directory=str(self.login_template_path.parent))
self.user_templates = Jinja2Templates(directory=str(self.user_template_path.parent))
def _login(self, request: Request, *args, **kwargs):
"""Provide the Login UI."""
if not self.config.enabled or self._authenticator.auth_backend.is_authenticated(request):
return RedirectResponse(self.config.routing.home_path)
context = self._base_context.copy()
context.update(kwargs) # type: ignore
if not self.config.enabled or request.user.is_authenticated:
# This is authenticated so go straight to the homepage
return RedirectResponse(self.config.routing.home_path)
context['request'] = request # type: ignore
if 'login' not in context or context['login'] is None: # type: ignore
post_redirect = self._authenticator._session_validator.pop_post_auth_redirect(request)
context['login'] = '<br>'.join([provider.get_login_button(post_redirect) for provider in self._authenticator._providers]) # type: ignore
self.logger.debug(f'Context {context}')
return self.login_templates.TemplateResponse(self.login_template_path.name, context) # type: ignore
def _get_user(self, request: Request, **kwargs):
"""Provide a UI with information on the user."""
if not self.config.enabled:
return RedirectResponse(self.config.routing.home_path)
context = self._base_context.copy() # type: ignore
context.update(kwargs)
self.logger.debug(f'Getting token for {request.user}')
context['request'] = request # type: ignore
context['token_api_path'] = f'{self.config.routing.user_path}/token'
if self.config.enabled:
self.logger.debug(f'Auth {request.auth}')
try:
context['user'] = self._authenticator._session_validator.get_state_from_session(request).user
except ValueError:
# If we have one provider, we can force the login, otherwise...
return self.__force_authenticate(request)
else:
self.logger.debug('Auth not enabled')
context['token_api_path'] = None # type: ignore
return self.user_templates.TemplateResponse(self.user_template_path.name, context)
def _get_token(self, request: Request, auth_state: AuthenticationState, scopes: Optional[List[str]] = None, ajax: bool = False):
"""Return the access token for the user."""
if not isinstance(auth_state, AuthenticationState):
user = self.__get_user_from_request(request)
else:
user = auth_state.user
if hasattr(user, 'username'): # type: ignore
if scopes is None:
scopes = request.query_params.get('scopes', None)
if isinstance(scopes, str):
scopes = scopes.split(' ') # type: ignore
access_token = self.__get_access_token(user, scopes)
if access_token:
# We want to get the token for each provider that is authenticated
return JSONResponse(access_token) # type: ignore
else:
if any([u in request.headers['user-agent'] for u in ['Mozilla', 'Gecko', 'Trident', 'WebKit', 'Presto', 'Edge', 'Blink']]):
# If we have one provider, we can force the login, otherwise we need to request which login route
return self.__force_authenticate(request, ajax)
else:
return JSONResponse('Unable to access token as user has not authenticated via session')
redirect = '/me/token'
if scopes:
self.logger.debug(f'Getting Access Token with scopes {scopes}')
redirect = urls.with_query_params(redirect, scopes=scopes)
return RedirectResponse(urls.with_query_params(self.config.routing.landing_path, redirect=redirect))
@property
def routes(self):
"""Return the routes for the UI.
Provides the login UI route, and if the routing config has
the ``user_path`` set, it also provides the user description
view (and token endpoint)
"""
async def login(request: Request, *args, **kwargs):
return self._login(request)
routes = [Route(self.config.routing.landing_path, endpoint=login, methods=['GET'], name='login'),
Mount(self.config.login_ui.static_path, StaticFiles(directory=str(self.config.login_ui.static_directory)), name='static-login')]
if self.config.routing.user_path:
@self._authenticator.auth_required()
async def get_user(request: Request):
return self._get_user(request)
async def get_token(request: Request, auth_state: AuthenticationState = Depends(self._authenticator.auth_backend.requires_auth(allow_session=True)), scopes: Optional[List[str]] = None):
ajax = request.query_params.get('ajax', False)
return self._get_token(request, auth_state, scopes, ajax)
routes += [Route(self.config.routing.user_path, endpoint=get_user, methods=['GET'], name='user'),
Route(f'{self.config.routing.user_path}/token', endpoint=get_token, methods=['GET'], name='get-token')]
return routes
def __force_authenticate(self, request: Request, ajax: bool = False) -> Union[JSONResponse, RedirectResponse]:
# lets get the full redirect including any query parameters
redirect = urls.with_query_params(request.url.path, **request.query_params)
self.logger.debug(f'Request {request.url}')
self.logger.info(f'Forcing authentication with redirect = {redirect}')
providers = [u for u in self._authenticator._providers if u.authenticator]
if len(providers) == 1:
redirect_url = urls.with_query_params(providers[0].login_url, redirect=redirect, force=True)
else:
redirect_url = urls.with_query_params(self.config.routing.login_path, redirect=redirect, force=True)
if ajax:
self.logger.debug(f'AJAX is true - handling {redirect_url}')
url = urls.parse_url(redirect_url)
query_params = urls.query_params(redirect_url)
query_params.pop('redirect', None)
self.logger.debug(f'url {url.path}, query_params {query_params}')
response = JSONResponse({'redirect': url.path, 'query_params': query_params}) # type: ignore
else:
response = RedirectResponse(redirect_url) # type: ignore
return response
def __get_access_token(self, user, scopes=None):
access_token = None
for provider in self._authenticator._providers:
if provider.authenticator:
try:
access_token = provider.authenticator.get_access_token(user, scopes)
except ValueError:
pass
if access_token is not None:
break
return access_token
def __get_user_from_request(self, request: Request):
if hasattr(request.user, 'username'):
user = request.user
else:
auth_state = self._authenticator.auth_backend.check(request)
user = auth_state.user
return user | src/fastapi_aad_auth/ui/__init__.py | from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from fastapi import Depends
from starlette.requests import Request
from starlette.responses import JSONResponse, RedirectResponse
from starlette.routing import Mount, Route
from starlette.staticfiles import StaticFiles
from fastapi_aad_auth import auth, config # noqa: F401
from fastapi_aad_auth._base.state import AuthenticationState
from fastapi_aad_auth.mixins import LoggingMixin
from fastapi_aad_auth.ui.jinja import Jinja2Templates
from fastapi_aad_auth.utilities import urls
class UI(LoggingMixin):
"""Provides Login endpoint methods, which are then wrapped in a factory method."""
def __init__(self, config: 'config.Config', authenticator: 'auth.Authenticator', base_context: Dict[str, Any] = None):
"""Initialise the UI based on the provided configuration.
Keyword Args:
config (fastapi_aad_auth.config.Config): Authentication configuration (includes ui and routing, as well as AAD Application and Tenant IDs)
authenticator (fastapi_aad_auth.auth.Authenticator): The authenticator object
base_context (Dict[str, Any]): Add the authentication to the router
"""
super().__init__()
self.config = config
if base_context is None:
base_context = dict()
self._base_context = base_context
self._authenticator = authenticator
self.login_template_path = Path(self.config.login_ui.template_file)
self.user_template_path = Path(self.config.login_ui.user_template_file)
self.login_templates = Jinja2Templates(directory=str(self.login_template_path.parent))
self.user_templates = Jinja2Templates(directory=str(self.user_template_path.parent))
def _login(self, request: Request, *args, **kwargs):
"""Provide the Login UI."""
if not self.config.enabled or self._authenticator.auth_backend.is_authenticated(request):
return RedirectResponse(self.config.routing.home_path)
context = self._base_context.copy()
context.update(kwargs) # type: ignore
if not self.config.enabled or request.user.is_authenticated:
# This is authenticated so go straight to the homepage
return RedirectResponse(self.config.routing.home_path)
context['request'] = request # type: ignore
if 'login' not in context or context['login'] is None: # type: ignore
post_redirect = self._authenticator._session_validator.pop_post_auth_redirect(request)
context['login'] = '<br>'.join([provider.get_login_button(post_redirect) for provider in self._authenticator._providers]) # type: ignore
self.logger.debug(f'Context {context}')
return self.login_templates.TemplateResponse(self.login_template_path.name, context) # type: ignore
def _get_user(self, request: Request, **kwargs):
"""Provide a UI with information on the user."""
if not self.config.enabled:
return RedirectResponse(self.config.routing.home_path)
context = self._base_context.copy() # type: ignore
context.update(kwargs)
self.logger.debug(f'Getting token for {request.user}')
context['request'] = request # type: ignore
context['token_api_path'] = f'{self.config.routing.user_path}/token'
if self.config.enabled:
self.logger.debug(f'Auth {request.auth}')
try:
context['user'] = self._authenticator._session_validator.get_state_from_session(request).user
except ValueError:
# If we have one provider, we can force the login, otherwise...
return self.__force_authenticate(request)
else:
self.logger.debug('Auth not enabled')
context['token_api_path'] = None # type: ignore
return self.user_templates.TemplateResponse(self.user_template_path.name, context)
def _get_token(self, request: Request, auth_state: AuthenticationState, scopes: Optional[List[str]] = None, ajax: bool = False):
"""Return the access token for the user."""
if not isinstance(auth_state, AuthenticationState):
user = self.__get_user_from_request(request)
else:
user = auth_state.user
if hasattr(user, 'username'): # type: ignore
if scopes is None:
scopes = request.query_params.get('scopes', None)
if isinstance(scopes, str):
scopes = scopes.split(' ') # type: ignore
access_token = self.__get_access_token(user, scopes)
if access_token:
# We want to get the token for each provider that is authenticated
return JSONResponse(access_token) # type: ignore
else:
if any([u in request.headers['user-agent'] for u in ['Mozilla', 'Gecko', 'Trident', 'WebKit', 'Presto', 'Edge', 'Blink']]):
# If we have one provider, we can force the login, otherwise we need to request which login route
return self.__force_authenticate(request, ajax)
else:
return JSONResponse('Unable to access token as user has not authenticated via session')
redirect = '/me/token'
if scopes:
self.logger.debug(f'Getting Access Token with scopes {scopes}')
redirect = urls.with_query_params(redirect, scopes=scopes)
return RedirectResponse(urls.with_query_params(self.config.routing.landing_path, redirect=redirect))
@property
def routes(self):
"""Return the routes for the UI.
Provides the login UI route, and if the routing config has
the ``user_path`` set, it also provides the user description
view (and token endpoint)
"""
async def login(request: Request, *args, **kwargs):
return self._login(request)
routes = [Route(self.config.routing.landing_path, endpoint=login, methods=['GET'], name='login'),
Mount(self.config.login_ui.static_path, StaticFiles(directory=str(self.config.login_ui.static_directory)), name='static-login')]
if self.config.routing.user_path:
@self._authenticator.auth_required()
async def get_user(request: Request):
return self._get_user(request)
async def get_token(request: Request, auth_state: AuthenticationState = Depends(self._authenticator.auth_backend.requires_auth(allow_session=True)), scopes: Optional[List[str]] = None):
ajax = request.query_params.get('ajax', False)
return self._get_token(request, auth_state, scopes, ajax)
routes += [Route(self.config.routing.user_path, endpoint=get_user, methods=['GET'], name='user'),
Route(f'{self.config.routing.user_path}/token', endpoint=get_token, methods=['GET'], name='get-token')]
return routes
def __force_authenticate(self, request: Request, ajax: bool = False) -> Union[JSONResponse, RedirectResponse]:
# lets get the full redirect including any query parameters
redirect = urls.with_query_params(request.url.path, **request.query_params)
self.logger.debug(f'Request {request.url}')
self.logger.info(f'Forcing authentication with redirect = {redirect}')
providers = [u for u in self._authenticator._providers if u.authenticator]
if len(providers) == 1:
redirect_url = urls.with_query_params(providers[0].login_url, redirect=redirect, force=True)
else:
redirect_url = urls.with_query_params(self.config.routing.login_path, redirect=redirect, force=True)
if ajax:
self.logger.debug(f'AJAX is true - handling {redirect_url}')
url = urls.parse_url(redirect_url)
query_params = urls.query_params(redirect_url)
query_params.pop('redirect', None)
self.logger.debug(f'url {url.path}, query_params {query_params}')
response = JSONResponse({'redirect': url.path, 'query_params': query_params}) # type: ignore
else:
response = RedirectResponse(redirect_url) # type: ignore
return response
def __get_access_token(self, user, scopes=None):
access_token = None
for provider in self._authenticator._providers:
if provider.authenticator:
try:
access_token = provider.authenticator.get_access_token(user, scopes)
except ValueError:
pass
if access_token is not None:
break
return access_token
def __get_user_from_request(self, request: Request):
if hasattr(request.user, 'username'):
user = request.user
else:
auth_state = self._authenticator.auth_backend.check(request)
user = auth_state.user
return user | 0.855036 | 0.09628 |
import asyncio
import sys
from pathlib import Path
from typing import Any, AsyncIterator, Callable
import pytest
from yarl import URL
from neuro_sdk import Client
from neuro_sdk.url_utils import (
_extract_path,
normalize_local_path_uri,
normalize_storage_path_uri,
uri_from_cli,
)
@pytest.fixture
async def client(
loop: asyncio.AbstractEventLoop, make_client: Callable[..., Client]
) -> AsyncIterator[Client]:
async with make_client("https://example.com") as client:
yield client
# asvetlov: I don't like autouse but it is the fastest fix
@pytest.fixture(autouse=True)
def fake_homedir(monkeypatch: Any, tmp_path: Path) -> Path:
monkeypatch.setenv("HOME", str(tmp_path))
return Path.home()
@pytest.fixture
def pwd() -> Path:
return Path.cwd()
async def test_config_username(token: str, client: Client) -> None:
assert client.username == "user"
def test_uri_from_cli_relative_path() -> None:
uri = uri_from_cli("path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == Path("path/to/file.txt").absolute().as_uri()
def test_uri_from_cli_absolute_path() -> None:
uri = uri_from_cli("/path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == Path("/path/to/file.txt").absolute().as_uri()
def test_uri_from_cli_relative_path_special_chars() -> None:
uri = uri_from_cli("path/to/file#%23:?@~", "testuser", "test-cluster")
assert uri.path.endswith("/path/to/file#%23:?@~")
def test_uri_from_cli_absolute_path_special_chars() -> None:
uri = uri_from_cli("/path/to/file#%23:?@~", "testuser", "test-cluster")
assert _extract_path(uri) == Path("/path/to/file#%23:?@~").absolute()
def test_uri_from_cli_path_with_tilde(fake_homedir: Path) -> None:
uri = uri_from_cli("~/path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == (fake_homedir / "path/to/file.txt").as_uri()
@pytest.mark.skipif(
sys.platform == "win32",
reason="expanduser() does not fail for unknown user on Windows",
)
def test_uri_from_cli_path_with_tilde_unknown_user() -> None:
with pytest.raises(ValueError, match=r"Cannot expand user for "):
uri_from_cli("~unknownuser/path/to/file.txt", "testuser", "test-cluster")
def test_uri_from_cli_tilde_only(fake_homedir: Path) -> None:
uri = uri_from_cli("~", "testuser", "test-cluster")
assert str(uri) == fake_homedir.as_uri()
def test_uri_from_cli_relative_file_uri() -> None:
uri = uri_from_cli("file:path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == Path("path/to/file.txt").absolute().as_uri()
def test_uri_from_cli_absolute_file_uri() -> None:
uri = uri_from_cli("file:/path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == Path("/path/to/file.txt").absolute().as_uri()
uri = uri_from_cli("file:///path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == Path("/path/to/file.txt").absolute().as_uri()
def test_uri_from_cli_relative_file_uri_special_chars() -> None:
uri = uri_from_cli(
"file:path/to/file%23%252d%3f:@~%C3%9F", "testuser", "test-cluster"
)
assert uri.path.endswith("/path/to/file#%2d?:@~ß")
def test_uri_from_cli_absolute_file_uri_special_chars() -> None:
uri = uri_from_cli(
"file:/path/to/file%23%252d%3f:@~%C3%9F", "testuser", "test-cluster"
)
assert uri.path.endswith("/path/to/file#%2d?:@~ß")
def test_uri_from_cli_relative_storage_uri() -> None:
uri = uri_from_cli("storage:path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == "storage://test-cluster/testuser/path/to/file.txt"
uri = uri_from_cli("storage:/path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == "storage://test-cluster/path/to/file.txt"
def test_uri_from_cli_absolute_storage_uri() -> None:
uri = uri_from_cli(
"storage://otheruser/path/to/file.txt", "testuser", "test-cluster"
)
assert str(uri) == "storage://otheruser/path/to/file.txt"
uri = uri_from_cli("storage:///path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == "storage://test-cluster/path/to/file.txt"
def test_uri_from_cli_absolute_storage_uri_special_chars() -> None:
uri = uri_from_cli(
"storage://cluster/user/path/to/file%23%252d%3f:@~%C3%9F",
"testuser",
"test-cluster",
)
assert uri.path == "/user/path/to/file#%2d?:@~ß"
def test_uri_from_cli_numberic_path() -> None:
uri = uri_from_cli("256", "testuser", "test-cluster")
assert str(uri) == Path("256").absolute().as_uri()
uri = uri_from_cli("123456", "testuser", "test-cluster")
assert str(uri) == Path("123456").absolute().as_uri()
uri = uri_from_cli("file:256", "testuser", "test-cluster")
assert str(uri) == Path("256").absolute().as_uri()
uri = uri_from_cli("file:123456", "testuser", "test-cluster")
assert str(uri) == Path("123456").absolute().as_uri()
uri = uri_from_cli("storage:256", "testuser", "test-cluster")
assert str(uri) == "storage://test-cluster/testuser/256"
uri = uri_from_cli("storage:123456", "testuser", "test-cluster")
assert str(uri) == "storage://test-cluster/testuser/123456"
@pytest.mark.parametrize(
"path_or_uri",
[
"https://cluster/user/path/to",
"file://cluster/user/path/to",
"file:/path/to#fragment",
"file:/path/to#",
"file:/path/to?key=value",
"file:/path/to?",
],
)
async def test_uri_from_cli__file__fail(path_or_uri: str) -> None:
with pytest.raises(ValueError):
uri_from_cli(path_or_uri, "u", "c", allowed_schemes=("file",))
@pytest.mark.parametrize(
"uri",
[
"",
"https://cluster/user/path/to",
"storage://cluster/user/path/to#fragment",
"storage://cluster/user/path/to#",
"storage://cluster/user/path/to?key=value",
"storage://cluster/user/path/to?",
"storage://user@cluster/user/path/to",
"storage://:password@cluster/user/path/to",
"storage://:@cluster/user/path/to",
"storage://cluster:1234/user/path/to",
],
)
async def test_uri_from_cli__storage__fail(uri: str) -> None:
with pytest.raises(ValueError):
uri_from_cli(uri, "u", "c", allowed_schemes=("storage",))
@pytest.mark.parametrize(
"uri",
[
"",
"https://cluster/user/image",
"image://cluster/user/image#fragment",
"image://cluster/user/image#",
"image://cluster/user/image?key=value",
"image://cluster/user/image?",
"image://user@cluster/user/image",
"image://:password@cluster/user/image",
"image://:@cluster/user/image",
"image://cluster:1234/user/image",
],
)
async def test_uri_from_cli__image__fail(uri: str) -> None:
with pytest.raises(ValueError):
uri_from_cli(uri, "u", "c", allowed_schemes=("image",))
@pytest.mark.parametrize(
"uri",
[
"",
"https://cluster/bucket/object",
"blob://cluster/bucket/object#fragment",
"blob://cluster/bucket/object#",
"blob://cluster/bucket/object?key=value",
"blob://cluster/bucket/object?",
"blob://user@cluster/bucket/object",
"blob://:password@cluster/bucket/object",
"blob://:@cluster/bucket/object",
"blob://cluster:1234/bucket/object",
],
)
async def test_uri_from_cli__blob__fail(uri: str) -> None:
with pytest.raises(ValueError):
uri_from_cli(uri, "u", "c", allowed_schemes=("blob",))
async def test_normalize_storage_path_uri_no_path(client: Client) -> None:
url = URL("storage:")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/user"
assert str(url) == "storage://test-cluster/user"
async def test_normalize_local_path_uri_no_path(pwd: Path) -> None:
url = URL("file:")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd
async def test_normalize_storage_path_uri_no_slashes(client: Client) -> None:
url = URL("storage:file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/user/file.txt"
assert str(url) == "storage://test-cluster/user/file.txt"
async def test_normalize_local_path_uri_no_slashes(pwd: Path) -> None:
url = URL("file:file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "file.txt"
async def test_normalize_storage_path_uri__0_slashes_relative(client: Client) -> None:
url = URL("storage:path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/user/path/to/file.txt"
assert str(url) == "storage://test-cluster/user/path/to/file.txt"
async def test_normalize_local_path_uri__0_slashes_relative(pwd: Path) -> None:
url = URL("file:path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "path/to/file.txt"
async def test_normalize_storage_path_uri__1_slash_absolute(client: Client) -> None:
url = URL("storage:/path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/path/to/file.txt"
assert str(url) == "storage://test-cluster/path/to/file.txt"
async def test_normalize_local_path_uri__1_slash_absolute(pwd: Path) -> None:
url = URL("file:/path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == Path(pwd.drive + "/path/to/file.txt")
async def test_normalize_storage_path_uri__2_slashes(client: Client) -> None:
url = URL("storage://path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "path"
assert url.path == "/to/file.txt"
assert str(url) == "storage://path/to/file.txt"
async def test_normalize_local_path_uri__2_slashes(pwd: Path) -> None:
url = URL("file://path/to/file.txt")
with pytest.raises(
ValueError, match="Host part is not allowed in file URI, found 'path'"
):
url = normalize_local_path_uri(url)
async def test_normalize_storage_path_uri__3_slashes_relative(client: Client) -> None:
url = URL("storage:///path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/path/to/file.txt"
assert str(url) == "storage://test-cluster/path/to/file.txt"
async def test_normalize_local_path_uri__3_slashes_relative(pwd: Path) -> None:
url = URL("file:///path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == Path(pwd.drive + "/path/to/file.txt")
async def test_normalize_storage_path_uri__4_slashes_relative(client: Client) -> None:
url = URL("storage:////path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/path/to/file.txt"
assert str(url) == "storage://test-cluster/path/to/file.txt"
@pytest.mark.skipif(sys.platform == "win32", reason="Doesn't work on Windows")
async def test_normalize_local_path_uri__4_slashes_relative() -> None:
url = URL("file:////path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert url.path == "/path/to/file.txt"
assert str(url) == f"file:///path/to/file.txt"
@pytest.mark.parametrize(
"uri_str",
[
"",
"https://cluster/user/path/to",
"storage://cluster/user/path/to#fragment",
"storage://cluster/user/path/to?key=value",
"storage://user@cluster/user/path/to",
"storage://:password@cluster/user/path/to",
"storage://:@cluster/user/path/to",
"storage://cluster:1234/user/path/to",
],
)
async def test_normalize_storage_path_uri__fail(uri_str: str) -> None:
uri = URL(uri_str)
with pytest.raises(ValueError):
normalize_storage_path_uri(uri, "test-user", "test-cluster")
async def test_normalize_storage_path_uri__tilde_in_relative_path(
client: Client,
) -> None:
url = URL("storage:~/path/to/file.txt")
with pytest.raises(ValueError, match=".*Cannot expand user.*"):
normalize_storage_path_uri(url, client.username, "test-cluster")
async def test_normalize_local_path_uri__tilde_in_relative_path(
fake_homedir: Path,
) -> None:
url = URL("file:~/path/to/file.txt")
with pytest.raises(ValueError, match=r"Cannot expand user for "):
normalize_local_path_uri(url)
async def test_normalize_storage_path_uri__tilde_in_relative_path_2(
client: Client,
) -> None:
url = URL("storage:./~/path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/user/~/path/to/file.txt"
assert str(url) == "storage://test-cluster/user/~/path/to/file.txt"
async def test_normalize_local_path_uri__tilde_in_relative_path_2(
pwd: Path,
) -> None:
url = URL("file:./~/path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "~/path/to/file.txt"
assert str(url) == (pwd / "~/path/to/file.txt").as_uri().replace("%7E", "~")
async def test_normalize_storage_path_uri__tilde_in_relative_path_3(
client: Client,
) -> None:
url = URL("storage:path/to~file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/user/path/to~file.txt"
assert str(url) == "storage://test-cluster/user/path/to~file.txt"
async def test_normalize_local_path_uri__tilde_in_relative_path_3(
fake_homedir: Path, pwd: Path
) -> None:
url = URL("file:path/to~file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "path/to~file.txt"
assert str(url) == (pwd / "path/to~file.txt").as_uri().replace("%7E", "~")
async def test_normalize_storage_path_uri__tilde_in_absolute_path(
client: Client,
) -> None:
url = URL("storage:/~/path/to/file.txt")
with pytest.raises(ValueError, match=r"Cannot expand user for "):
normalize_storage_path_uri(url, client.username, "test-cluster")
async def test_normalize_local_path_uri__tilde_in_absolute_path(
fake_homedir: Path, pwd: Path
) -> None:
url = URL("file:/~/path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "/~/path/to/file.txt"
assert str(url) == (pwd / "/~/path/to/file.txt").as_uri().replace("%7E", "~")
async def test_normalize_storage_path_uri__tilde_in_host(client: Client) -> None:
url = URL("storage://~/path/to/file.txt")
with pytest.raises(ValueError, match=r"Cannot expand user for "):
normalize_storage_path_uri(url, client.username, "test-cluster")
async def test_normalize_local_path_uri__tilde_in_host(
client: Client, pwd: Path
) -> None:
url = URL("file://~/path/to/file.txt")
with pytest.raises(
ValueError, match=f"Host part is not allowed in file URI, found '~'"
):
url = normalize_local_path_uri(url)
async def test_normalize_storage_path_uri__bad_scheme(client: Client) -> None:
with pytest.raises(ValueError, match="Invalid storage scheme 'other:'"):
url = URL("other:path/to/file.txt")
normalize_storage_path_uri(url, client.username, "test-cluster")
async def test_normalize_local_path_uri__bad_scheme() -> None:
with pytest.raises(ValueError, match="Invalid local file scheme 'other:'"):
url = URL("other:path/to/file.txt")
normalize_local_path_uri(url)
# The tests below check that f(f(x)) == f(x) where f is a path normalization function
async def test_normalize_storage_path_uri__no_slash__double(client: Client) -> None:
url = URL("storage:path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/user/path/to/file.txt"
assert str(url) == "storage://test-cluster/user/path/to/file.txt"
async def test_normalize_local_path_uri__no_slash__double(pwd: Path) -> None:
url = URL("file:path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "path/to/file.txt"
async def test_normalize_storage_path_uri__tilde_slash__double(client: Client) -> None:
url = URL("storage:~/path/to/file.txt")
with pytest.raises(ValueError, match=".*Cannot expand user.*"):
normalize_storage_path_uri(url, client.username, "test-cluster")
async def test_normalize_local_path_uri__tilde_slash__double() -> None:
url = URL("file:~/path/to/file.txt")
with pytest.raises(ValueError, match=r"Cannot expand user for "):
normalize_local_path_uri(url)
async def test_normalize_storage_path_uri__3_slashes__double(client: Client) -> None:
url = URL("storage:///path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/path/to/file.txt"
assert str(url) == "storage://test-cluster/path/to/file.txt"
async def test_normalize_local_path_uri__3_slashes__double(pwd: Path) -> None:
url = URL(f"file:///{pwd}/path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "path/to/file.txt"
assert str(url) == (pwd / "path/to/file.txt").as_uri()
@pytest.mark.skipif(sys.platform != "win32", reason="Requires Windows")
def test_normalized_path() -> None:
p = URL("file:///Z:/neuro/platform-api-clients/python/setup.py")
assert normalize_local_path_uri(p) == p
@pytest.mark.parametrize(
"uri_str",
[
"",
"https://cluster/user/path/to",
"file://cluster/user/path/to",
"file:/path/to#fragment",
"file:/path/to?key=value",
],
)
async def test_normalize_local_path_uri__fail(uri_str: str) -> None:
uri = URL(uri_str)
with pytest.raises(ValueError):
normalize_local_path_uri(uri) | neuro-sdk/tests/test_url_utils.py | import asyncio
import sys
from pathlib import Path
from typing import Any, AsyncIterator, Callable
import pytest
from yarl import URL
from neuro_sdk import Client
from neuro_sdk.url_utils import (
_extract_path,
normalize_local_path_uri,
normalize_storage_path_uri,
uri_from_cli,
)
@pytest.fixture
async def client(
loop: asyncio.AbstractEventLoop, make_client: Callable[..., Client]
) -> AsyncIterator[Client]:
async with make_client("https://example.com") as client:
yield client
# asvetlov: I don't like autouse but it is the fastest fix
@pytest.fixture(autouse=True)
def fake_homedir(monkeypatch: Any, tmp_path: Path) -> Path:
monkeypatch.setenv("HOME", str(tmp_path))
return Path.home()
@pytest.fixture
def pwd() -> Path:
return Path.cwd()
async def test_config_username(token: str, client: Client) -> None:
assert client.username == "user"
def test_uri_from_cli_relative_path() -> None:
uri = uri_from_cli("path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == Path("path/to/file.txt").absolute().as_uri()
def test_uri_from_cli_absolute_path() -> None:
uri = uri_from_cli("/path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == Path("/path/to/file.txt").absolute().as_uri()
def test_uri_from_cli_relative_path_special_chars() -> None:
uri = uri_from_cli("path/to/file#%23:?@~", "testuser", "test-cluster")
assert uri.path.endswith("/path/to/file#%23:?@~")
def test_uri_from_cli_absolute_path_special_chars() -> None:
uri = uri_from_cli("/path/to/file#%23:?@~", "testuser", "test-cluster")
assert _extract_path(uri) == Path("/path/to/file#%23:?@~").absolute()
def test_uri_from_cli_path_with_tilde(fake_homedir: Path) -> None:
uri = uri_from_cli("~/path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == (fake_homedir / "path/to/file.txt").as_uri()
@pytest.mark.skipif(
sys.platform == "win32",
reason="expanduser() does not fail for unknown user on Windows",
)
def test_uri_from_cli_path_with_tilde_unknown_user() -> None:
with pytest.raises(ValueError, match=r"Cannot expand user for "):
uri_from_cli("~unknownuser/path/to/file.txt", "testuser", "test-cluster")
def test_uri_from_cli_tilde_only(fake_homedir: Path) -> None:
uri = uri_from_cli("~", "testuser", "test-cluster")
assert str(uri) == fake_homedir.as_uri()
def test_uri_from_cli_relative_file_uri() -> None:
uri = uri_from_cli("file:path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == Path("path/to/file.txt").absolute().as_uri()
def test_uri_from_cli_absolute_file_uri() -> None:
uri = uri_from_cli("file:/path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == Path("/path/to/file.txt").absolute().as_uri()
uri = uri_from_cli("file:///path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == Path("/path/to/file.txt").absolute().as_uri()
def test_uri_from_cli_relative_file_uri_special_chars() -> None:
uri = uri_from_cli(
"file:path/to/file%23%252d%3f:@~%C3%9F", "testuser", "test-cluster"
)
assert uri.path.endswith("/path/to/file#%2d?:@~ß")
def test_uri_from_cli_absolute_file_uri_special_chars() -> None:
uri = uri_from_cli(
"file:/path/to/file%23%252d%3f:@~%C3%9F", "testuser", "test-cluster"
)
assert uri.path.endswith("/path/to/file#%2d?:@~ß")
def test_uri_from_cli_relative_storage_uri() -> None:
uri = uri_from_cli("storage:path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == "storage://test-cluster/testuser/path/to/file.txt"
uri = uri_from_cli("storage:/path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == "storage://test-cluster/path/to/file.txt"
def test_uri_from_cli_absolute_storage_uri() -> None:
uri = uri_from_cli(
"storage://otheruser/path/to/file.txt", "testuser", "test-cluster"
)
assert str(uri) == "storage://otheruser/path/to/file.txt"
uri = uri_from_cli("storage:///path/to/file.txt", "testuser", "test-cluster")
assert str(uri) == "storage://test-cluster/path/to/file.txt"
def test_uri_from_cli_absolute_storage_uri_special_chars() -> None:
uri = uri_from_cli(
"storage://cluster/user/path/to/file%23%252d%3f:@~%C3%9F",
"testuser",
"test-cluster",
)
assert uri.path == "/user/path/to/file#%2d?:@~ß"
def test_uri_from_cli_numberic_path() -> None:
uri = uri_from_cli("256", "testuser", "test-cluster")
assert str(uri) == Path("256").absolute().as_uri()
uri = uri_from_cli("123456", "testuser", "test-cluster")
assert str(uri) == Path("123456").absolute().as_uri()
uri = uri_from_cli("file:256", "testuser", "test-cluster")
assert str(uri) == Path("256").absolute().as_uri()
uri = uri_from_cli("file:123456", "testuser", "test-cluster")
assert str(uri) == Path("123456").absolute().as_uri()
uri = uri_from_cli("storage:256", "testuser", "test-cluster")
assert str(uri) == "storage://test-cluster/testuser/256"
uri = uri_from_cli("storage:123456", "testuser", "test-cluster")
assert str(uri) == "storage://test-cluster/testuser/123456"
@pytest.mark.parametrize(
"path_or_uri",
[
"https://cluster/user/path/to",
"file://cluster/user/path/to",
"file:/path/to#fragment",
"file:/path/to#",
"file:/path/to?key=value",
"file:/path/to?",
],
)
async def test_uri_from_cli__file__fail(path_or_uri: str) -> None:
with pytest.raises(ValueError):
uri_from_cli(path_or_uri, "u", "c", allowed_schemes=("file",))
@pytest.mark.parametrize(
"uri",
[
"",
"https://cluster/user/path/to",
"storage://cluster/user/path/to#fragment",
"storage://cluster/user/path/to#",
"storage://cluster/user/path/to?key=value",
"storage://cluster/user/path/to?",
"storage://user@cluster/user/path/to",
"storage://:password@cluster/user/path/to",
"storage://:@cluster/user/path/to",
"storage://cluster:1234/user/path/to",
],
)
async def test_uri_from_cli__storage__fail(uri: str) -> None:
with pytest.raises(ValueError):
uri_from_cli(uri, "u", "c", allowed_schemes=("storage",))
@pytest.mark.parametrize(
"uri",
[
"",
"https://cluster/user/image",
"image://cluster/user/image#fragment",
"image://cluster/user/image#",
"image://cluster/user/image?key=value",
"image://cluster/user/image?",
"image://user@cluster/user/image",
"image://:password@cluster/user/image",
"image://:@cluster/user/image",
"image://cluster:1234/user/image",
],
)
async def test_uri_from_cli__image__fail(uri: str) -> None:
with pytest.raises(ValueError):
uri_from_cli(uri, "u", "c", allowed_schemes=("image",))
@pytest.mark.parametrize(
"uri",
[
"",
"https://cluster/bucket/object",
"blob://cluster/bucket/object#fragment",
"blob://cluster/bucket/object#",
"blob://cluster/bucket/object?key=value",
"blob://cluster/bucket/object?",
"blob://user@cluster/bucket/object",
"blob://:password@cluster/bucket/object",
"blob://:@cluster/bucket/object",
"blob://cluster:1234/bucket/object",
],
)
async def test_uri_from_cli__blob__fail(uri: str) -> None:
with pytest.raises(ValueError):
uri_from_cli(uri, "u", "c", allowed_schemes=("blob",))
async def test_normalize_storage_path_uri_no_path(client: Client) -> None:
url = URL("storage:")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/user"
assert str(url) == "storage://test-cluster/user"
async def test_normalize_local_path_uri_no_path(pwd: Path) -> None:
url = URL("file:")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd
async def test_normalize_storage_path_uri_no_slashes(client: Client) -> None:
url = URL("storage:file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/user/file.txt"
assert str(url) == "storage://test-cluster/user/file.txt"
async def test_normalize_local_path_uri_no_slashes(pwd: Path) -> None:
url = URL("file:file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "file.txt"
async def test_normalize_storage_path_uri__0_slashes_relative(client: Client) -> None:
url = URL("storage:path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/user/path/to/file.txt"
assert str(url) == "storage://test-cluster/user/path/to/file.txt"
async def test_normalize_local_path_uri__0_slashes_relative(pwd: Path) -> None:
url = URL("file:path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "path/to/file.txt"
async def test_normalize_storage_path_uri__1_slash_absolute(client: Client) -> None:
url = URL("storage:/path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/path/to/file.txt"
assert str(url) == "storage://test-cluster/path/to/file.txt"
async def test_normalize_local_path_uri__1_slash_absolute(pwd: Path) -> None:
url = URL("file:/path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == Path(pwd.drive + "/path/to/file.txt")
async def test_normalize_storage_path_uri__2_slashes(client: Client) -> None:
url = URL("storage://path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "path"
assert url.path == "/to/file.txt"
assert str(url) == "storage://path/to/file.txt"
async def test_normalize_local_path_uri__2_slashes(pwd: Path) -> None:
url = URL("file://path/to/file.txt")
with pytest.raises(
ValueError, match="Host part is not allowed in file URI, found 'path'"
):
url = normalize_local_path_uri(url)
async def test_normalize_storage_path_uri__3_slashes_relative(client: Client) -> None:
url = URL("storage:///path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/path/to/file.txt"
assert str(url) == "storage://test-cluster/path/to/file.txt"
async def test_normalize_local_path_uri__3_slashes_relative(pwd: Path) -> None:
url = URL("file:///path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == Path(pwd.drive + "/path/to/file.txt")
async def test_normalize_storage_path_uri__4_slashes_relative(client: Client) -> None:
url = URL("storage:////path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/path/to/file.txt"
assert str(url) == "storage://test-cluster/path/to/file.txt"
@pytest.mark.skipif(sys.platform == "win32", reason="Doesn't work on Windows")
async def test_normalize_local_path_uri__4_slashes_relative() -> None:
url = URL("file:////path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert url.path == "/path/to/file.txt"
assert str(url) == f"file:///path/to/file.txt"
@pytest.mark.parametrize(
"uri_str",
[
"",
"https://cluster/user/path/to",
"storage://cluster/user/path/to#fragment",
"storage://cluster/user/path/to?key=value",
"storage://user@cluster/user/path/to",
"storage://:password@cluster/user/path/to",
"storage://:@cluster/user/path/to",
"storage://cluster:1234/user/path/to",
],
)
async def test_normalize_storage_path_uri__fail(uri_str: str) -> None:
uri = URL(uri_str)
with pytest.raises(ValueError):
normalize_storage_path_uri(uri, "test-user", "test-cluster")
async def test_normalize_storage_path_uri__tilde_in_relative_path(
client: Client,
) -> None:
url = URL("storage:~/path/to/file.txt")
with pytest.raises(ValueError, match=".*Cannot expand user.*"):
normalize_storage_path_uri(url, client.username, "test-cluster")
async def test_normalize_local_path_uri__tilde_in_relative_path(
fake_homedir: Path,
) -> None:
url = URL("file:~/path/to/file.txt")
with pytest.raises(ValueError, match=r"Cannot expand user for "):
normalize_local_path_uri(url)
async def test_normalize_storage_path_uri__tilde_in_relative_path_2(
client: Client,
) -> None:
url = URL("storage:./~/path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/user/~/path/to/file.txt"
assert str(url) == "storage://test-cluster/user/~/path/to/file.txt"
async def test_normalize_local_path_uri__tilde_in_relative_path_2(
pwd: Path,
) -> None:
url = URL("file:./~/path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "~/path/to/file.txt"
assert str(url) == (pwd / "~/path/to/file.txt").as_uri().replace("%7E", "~")
async def test_normalize_storage_path_uri__tilde_in_relative_path_3(
client: Client,
) -> None:
url = URL("storage:path/to~file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/user/path/to~file.txt"
assert str(url) == "storage://test-cluster/user/path/to~file.txt"
async def test_normalize_local_path_uri__tilde_in_relative_path_3(
fake_homedir: Path, pwd: Path
) -> None:
url = URL("file:path/to~file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "path/to~file.txt"
assert str(url) == (pwd / "path/to~file.txt").as_uri().replace("%7E", "~")
async def test_normalize_storage_path_uri__tilde_in_absolute_path(
client: Client,
) -> None:
url = URL("storage:/~/path/to/file.txt")
with pytest.raises(ValueError, match=r"Cannot expand user for "):
normalize_storage_path_uri(url, client.username, "test-cluster")
async def test_normalize_local_path_uri__tilde_in_absolute_path(
fake_homedir: Path, pwd: Path
) -> None:
url = URL("file:/~/path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "/~/path/to/file.txt"
assert str(url) == (pwd / "/~/path/to/file.txt").as_uri().replace("%7E", "~")
async def test_normalize_storage_path_uri__tilde_in_host(client: Client) -> None:
url = URL("storage://~/path/to/file.txt")
with pytest.raises(ValueError, match=r"Cannot expand user for "):
normalize_storage_path_uri(url, client.username, "test-cluster")
async def test_normalize_local_path_uri__tilde_in_host(
client: Client, pwd: Path
) -> None:
url = URL("file://~/path/to/file.txt")
with pytest.raises(
ValueError, match=f"Host part is not allowed in file URI, found '~'"
):
url = normalize_local_path_uri(url)
async def test_normalize_storage_path_uri__bad_scheme(client: Client) -> None:
with pytest.raises(ValueError, match="Invalid storage scheme 'other:'"):
url = URL("other:path/to/file.txt")
normalize_storage_path_uri(url, client.username, "test-cluster")
async def test_normalize_local_path_uri__bad_scheme() -> None:
with pytest.raises(ValueError, match="Invalid local file scheme 'other:'"):
url = URL("other:path/to/file.txt")
normalize_local_path_uri(url)
# The tests below check that f(f(x)) == f(x) where f is a path normalization function
async def test_normalize_storage_path_uri__no_slash__double(client: Client) -> None:
url = URL("storage:path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/user/path/to/file.txt"
assert str(url) == "storage://test-cluster/user/path/to/file.txt"
async def test_normalize_local_path_uri__no_slash__double(pwd: Path) -> None:
url = URL("file:path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "path/to/file.txt"
async def test_normalize_storage_path_uri__tilde_slash__double(client: Client) -> None:
url = URL("storage:~/path/to/file.txt")
with pytest.raises(ValueError, match=".*Cannot expand user.*"):
normalize_storage_path_uri(url, client.username, "test-cluster")
async def test_normalize_local_path_uri__tilde_slash__double() -> None:
url = URL("file:~/path/to/file.txt")
with pytest.raises(ValueError, match=r"Cannot expand user for "):
normalize_local_path_uri(url)
async def test_normalize_storage_path_uri__3_slashes__double(client: Client) -> None:
url = URL("storage:///path/to/file.txt")
url = normalize_storage_path_uri(url, client.username, "test-cluster")
assert url.scheme == "storage"
assert url.host == "test-cluster"
assert url.path == "/path/to/file.txt"
assert str(url) == "storage://test-cluster/path/to/file.txt"
async def test_normalize_local_path_uri__3_slashes__double(pwd: Path) -> None:
url = URL(f"file:///{pwd}/path/to/file.txt")
url = normalize_local_path_uri(url)
assert url.scheme == "file"
assert url.host is None
assert _extract_path(url) == pwd / "path/to/file.txt"
assert str(url) == (pwd / "path/to/file.txt").as_uri()
@pytest.mark.skipif(sys.platform != "win32", reason="Requires Windows")
def test_normalized_path() -> None:
p = URL("file:///Z:/neuro/platform-api-clients/python/setup.py")
assert normalize_local_path_uri(p) == p
@pytest.mark.parametrize(
"uri_str",
[
"",
"https://cluster/user/path/to",
"file://cluster/user/path/to",
"file:/path/to#fragment",
"file:/path/to?key=value",
],
)
async def test_normalize_local_path_uri__fail(uri_str: str) -> None:
uri = URL(uri_str)
with pytest.raises(ValueError):
normalize_local_path_uri(uri) | 0.428712 | 0.450601 |
from ducktape.utils.util import wait_until
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.services.performance import PerformanceService
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.services.kafka.directory import kafka_dir, KAFKA_TRUNK
from kafkatest.services.kafka.version import TRUNK, V_0_9_0_0
import os
import subprocess
class ProducerPerformanceService(JmxMixin, PerformanceService):
PERSISTENT_ROOT = "/mnt/producer_performance"
STDOUT_CAPTURE = os.path.join(PERSISTENT_ROOT, "producer_performance.stdout")
STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "producer_performance.stderr")
LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs")
LOG_FILE = os.path.join(LOG_DIR, "producer_performance.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
def __init__(self, context, num_nodes, kafka, topic, num_records, record_size, throughput, version=TRUNK, settings={},
intermediate_stats=False, client_id="producer-performance", jmx_object_names=None, jmx_attributes=[]):
JmxMixin.__init__(self, num_nodes, jmx_object_names, jmx_attributes)
PerformanceService.__init__(self, context, num_nodes)
self.logs = {
"producer_performance_stdout": {
"path": ProducerPerformanceService.STDOUT_CAPTURE,
"collect_default": True},
"producer_performance_stderr": {
"path": ProducerPerformanceService.STDERR_CAPTURE,
"collect_default": True},
"producer_performance_log": {
"path": ProducerPerformanceService.LOG_FILE,
"collect_default": True},
"jmx_log": {
"path": "/mnt/jmx_tool.log",
"collect_default": jmx_object_names is not None
}
}
self.kafka = kafka
self.security_config = kafka.security_config.client_config()
security_protocol = self.security_config.security_protocol
assert version >= V_0_9_0_0 or security_protocol == SecurityConfig.PLAINTEXT, \
"Security protocol %s is only supported if version >= 0.9.0.0, version %s" % (self.security_config, str(version))
self.args = {
'topic': topic,
'kafka_opts': self.security_config.kafka_opts,
'num_records': num_records,
'record_size': record_size,
'throughput': throughput
}
self.settings = settings
self.intermediate_stats = intermediate_stats
self.client_id = client_id
for node in self.nodes:
node.version = version
def start_cmd(self, node):
args = self.args.copy()
args.update({
'bootstrap_servers': self.kafka.bootstrap_servers(self.security_config.security_protocol),
'jmx_port': self.jmx_port,
'client_id': self.client_id,
'kafka_directory': kafka_dir(node)
})
cmd = ""
if node.version < TRUNK:
# In order to ensure more consistent configuration between versions, always use the ProducerPerformance
# tool from trunk
cmd += "for file in /opt/%s/tools/build/libs/kafka-tools*.jar; do CLASSPATH=$CLASSPATH:$file; done; " % KAFKA_TRUNK
cmd += "for file in /opt/%s/tools/build/dependant-libs-${SCALA_VERSION}*/*.jar; do CLASSPATH=$CLASSPATH:$file; done; " % KAFKA_TRUNK
cmd += "export CLASSPATH; "
cmd += " export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % ProducerPerformanceService.LOG4J_CONFIG
cmd += "JMX_PORT=%(jmx_port)d KAFKA_OPTS=%(kafka_opts)s /opt/%(kafka_directory)s/bin/kafka-run-class.sh org.apache.kafka.tools.ProducerPerformance " \
"--topic %(topic)s --num-records %(num_records)d --record-size %(record_size)d --throughput %(throughput)d --producer-props bootstrap.servers=%(bootstrap_servers)s client.id=%(client_id)s" % args
self.security_config.setup_node(node)
if self.security_config.security_protocol != SecurityConfig.PLAINTEXT:
self.settings.update(self.security_config.properties)
for key, value in self.settings.items():
cmd += " %s=%s" % (str(key), str(value))
cmd += " 2>>%s | tee %s" % (ProducerPerformanceService.STDERR_CAPTURE, ProducerPerformanceService.STDOUT_CAPTURE)
return cmd
def pids(self, node):
try:
cmd = "jps | grep -i ProducerPerformance | awk '{print $1}'"
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (subprocess.CalledProcessError, ValueError) as e:
return []
def alive(self, node):
return len(self.pids(node)) > 0
def _worker(self, idx, node):
node.account.ssh("mkdir -p %s" % ProducerPerformanceService.PERSISTENT_ROOT, allow_fail=False)
# Create and upload log properties
log_config = self.render('tools_log4j.properties', log_file=ProducerPerformanceService.LOG_FILE)
node.account.create_file(ProducerPerformanceService.LOG4J_CONFIG, log_config)
cmd = self.start_cmd(node)
self.logger.debug("Producer performance %d command: %s", idx, cmd)
# start ProducerPerformance process
producer_output = node.account.ssh_capture(cmd)
wait_until(lambda: self.alive(node), timeout_sec=20, err_msg="ProducerPerformance failed to start")
# block until there is at least one line of output
first_line = next(producer_output, None)
if first_line is None:
raise Exception("No output from ProducerPerformance")
self.start_jmx_tool(idx, node)
wait_until(lambda: not self.alive(node), timeout_sec=1200, err_msg="ProducerPerformance failed to finish")
self.read_jmx_output(idx, node)
# parse producer output from file
last = None
producer_output = node.account.ssh_capture("cat %s" % ProducerPerformanceService.STDOUT_CAPTURE)
for line in producer_output:
if self.intermediate_stats:
try:
self.stats[idx-1].append(self.parse_stats(line))
except:
# Sometimes there are extraneous log messages
pass
last = line
try:
self.results[idx-1] = self.parse_stats(last)
except:
raise Exception("Unable to parse aggregate performance statistics on node %d: %s" % (idx, last))
def parse_stats(self, line):
parts = line.split(',')
return {
'records': int(parts[0].split()[0]),
'records_per_sec': float(parts[1].split()[0]),
'mbps': float(parts[1].split('(')[1].split()[0]),
'latency_avg_ms': float(parts[2].split()[0]),
'latency_max_ms': float(parts[3].split()[0]),
'latency_50th_ms': float(parts[4].split()[0]),
'latency_95th_ms': float(parts[5].split()[0]),
'latency_99th_ms': float(parts[6].split()[0]),
'latency_999th_ms': float(parts[7].split()[0]),
} | tests/kafkatest/services/performance/producer_performance.py |
from ducktape.utils.util import wait_until
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.services.performance import PerformanceService
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.services.kafka.directory import kafka_dir, KAFKA_TRUNK
from kafkatest.services.kafka.version import TRUNK, V_0_9_0_0
import os
import subprocess
class ProducerPerformanceService(JmxMixin, PerformanceService):
PERSISTENT_ROOT = "/mnt/producer_performance"
STDOUT_CAPTURE = os.path.join(PERSISTENT_ROOT, "producer_performance.stdout")
STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "producer_performance.stderr")
LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs")
LOG_FILE = os.path.join(LOG_DIR, "producer_performance.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
def __init__(self, context, num_nodes, kafka, topic, num_records, record_size, throughput, version=TRUNK, settings={},
intermediate_stats=False, client_id="producer-performance", jmx_object_names=None, jmx_attributes=[]):
JmxMixin.__init__(self, num_nodes, jmx_object_names, jmx_attributes)
PerformanceService.__init__(self, context, num_nodes)
self.logs = {
"producer_performance_stdout": {
"path": ProducerPerformanceService.STDOUT_CAPTURE,
"collect_default": True},
"producer_performance_stderr": {
"path": ProducerPerformanceService.STDERR_CAPTURE,
"collect_default": True},
"producer_performance_log": {
"path": ProducerPerformanceService.LOG_FILE,
"collect_default": True},
"jmx_log": {
"path": "/mnt/jmx_tool.log",
"collect_default": jmx_object_names is not None
}
}
self.kafka = kafka
self.security_config = kafka.security_config.client_config()
security_protocol = self.security_config.security_protocol
assert version >= V_0_9_0_0 or security_protocol == SecurityConfig.PLAINTEXT, \
"Security protocol %s is only supported if version >= 0.9.0.0, version %s" % (self.security_config, str(version))
self.args = {
'topic': topic,
'kafka_opts': self.security_config.kafka_opts,
'num_records': num_records,
'record_size': record_size,
'throughput': throughput
}
self.settings = settings
self.intermediate_stats = intermediate_stats
self.client_id = client_id
for node in self.nodes:
node.version = version
def start_cmd(self, node):
args = self.args.copy()
args.update({
'bootstrap_servers': self.kafka.bootstrap_servers(self.security_config.security_protocol),
'jmx_port': self.jmx_port,
'client_id': self.client_id,
'kafka_directory': kafka_dir(node)
})
cmd = ""
if node.version < TRUNK:
# In order to ensure more consistent configuration between versions, always use the ProducerPerformance
# tool from trunk
cmd += "for file in /opt/%s/tools/build/libs/kafka-tools*.jar; do CLASSPATH=$CLASSPATH:$file; done; " % KAFKA_TRUNK
cmd += "for file in /opt/%s/tools/build/dependant-libs-${SCALA_VERSION}*/*.jar; do CLASSPATH=$CLASSPATH:$file; done; " % KAFKA_TRUNK
cmd += "export CLASSPATH; "
cmd += " export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % ProducerPerformanceService.LOG4J_CONFIG
cmd += "JMX_PORT=%(jmx_port)d KAFKA_OPTS=%(kafka_opts)s /opt/%(kafka_directory)s/bin/kafka-run-class.sh org.apache.kafka.tools.ProducerPerformance " \
"--topic %(topic)s --num-records %(num_records)d --record-size %(record_size)d --throughput %(throughput)d --producer-props bootstrap.servers=%(bootstrap_servers)s client.id=%(client_id)s" % args
self.security_config.setup_node(node)
if self.security_config.security_protocol != SecurityConfig.PLAINTEXT:
self.settings.update(self.security_config.properties)
for key, value in self.settings.items():
cmd += " %s=%s" % (str(key), str(value))
cmd += " 2>>%s | tee %s" % (ProducerPerformanceService.STDERR_CAPTURE, ProducerPerformanceService.STDOUT_CAPTURE)
return cmd
def pids(self, node):
try:
cmd = "jps | grep -i ProducerPerformance | awk '{print $1}'"
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (subprocess.CalledProcessError, ValueError) as e:
return []
def alive(self, node):
return len(self.pids(node)) > 0
def _worker(self, idx, node):
node.account.ssh("mkdir -p %s" % ProducerPerformanceService.PERSISTENT_ROOT, allow_fail=False)
# Create and upload log properties
log_config = self.render('tools_log4j.properties', log_file=ProducerPerformanceService.LOG_FILE)
node.account.create_file(ProducerPerformanceService.LOG4J_CONFIG, log_config)
cmd = self.start_cmd(node)
self.logger.debug("Producer performance %d command: %s", idx, cmd)
# start ProducerPerformance process
producer_output = node.account.ssh_capture(cmd)
wait_until(lambda: self.alive(node), timeout_sec=20, err_msg="ProducerPerformance failed to start")
# block until there is at least one line of output
first_line = next(producer_output, None)
if first_line is None:
raise Exception("No output from ProducerPerformance")
self.start_jmx_tool(idx, node)
wait_until(lambda: not self.alive(node), timeout_sec=1200, err_msg="ProducerPerformance failed to finish")
self.read_jmx_output(idx, node)
# parse producer output from file
last = None
producer_output = node.account.ssh_capture("cat %s" % ProducerPerformanceService.STDOUT_CAPTURE)
for line in producer_output:
if self.intermediate_stats:
try:
self.stats[idx-1].append(self.parse_stats(line))
except:
# Sometimes there are extraneous log messages
pass
last = line
try:
self.results[idx-1] = self.parse_stats(last)
except:
raise Exception("Unable to parse aggregate performance statistics on node %d: %s" % (idx, last))
def parse_stats(self, line):
parts = line.split(',')
return {
'records': int(parts[0].split()[0]),
'records_per_sec': float(parts[1].split()[0]),
'mbps': float(parts[1].split('(')[1].split()[0]),
'latency_avg_ms': float(parts[2].split()[0]),
'latency_max_ms': float(parts[3].split()[0]),
'latency_50th_ms': float(parts[4].split()[0]),
'latency_95th_ms': float(parts[5].split()[0]),
'latency_99th_ms': float(parts[6].split()[0]),
'latency_999th_ms': float(parts[7].split()[0]),
} | 0.526343 | 0.07921 |
import pytest
import click
from click._bashcomplete import get_choices
def choices_without_help(cli, args, incomplete):
completions = get_choices(cli, "dummy", args, incomplete)
return [c[0] for c in completions]
def choices_with_help(cli, args, incomplete):
return list(get_choices(cli, "dummy", args, incomplete))
def test_single_command():
@click.command()
@click.option("--local-opt")
def cli(local_opt):
pass
assert choices_without_help(cli, [], "-") == ["--local-opt"]
assert choices_without_help(cli, [], "") == []
def test_boolean_flag():
@click.command()
@click.option("--shout/--no-shout", default=False)
def cli(local_opt):
pass
assert choices_without_help(cli, [], "-") == ["--shout", "--no-shout"]
def test_multi_value_option():
@click.group()
@click.option("--pos", nargs=2, type=float)
def cli(local_opt):
pass
@cli.command()
@click.option("--local-opt")
def sub(local_opt):
pass
assert choices_without_help(cli, [], "-") == ["--pos"]
assert choices_without_help(cli, ["--pos"], "") == []
assert choices_without_help(cli, ["--pos", "1.0"], "") == []
assert choices_without_help(cli, ["--pos", "1.0", "1.0"], "") == ["sub"]
def test_multi_option():
@click.command()
@click.option("--message", "-m", multiple=True)
def cli(local_opt):
pass
assert choices_without_help(cli, [], "-") == ["--message", "-m"]
assert choices_without_help(cli, ["-m"], "") == []
def test_small_chain():
@click.group()
@click.option("--global-opt")
def cli(global_opt):
pass
@cli.command()
@click.option("--local-opt")
def sub(local_opt):
pass
assert choices_without_help(cli, [], "") == ["sub"]
assert choices_without_help(cli, [], "-") == ["--global-opt"]
assert choices_without_help(cli, ["sub"], "") == []
assert choices_without_help(cli, ["sub"], "-") == ["--local-opt"]
def test_long_chain():
@click.group("cli")
@click.option("--cli-opt")
def cli(cli_opt):
pass
@cli.group("asub")
@click.option("--asub-opt")
def asub(asub_opt):
pass
@asub.group("bsub")
@click.option("--bsub-opt")
def bsub(bsub_opt):
pass
COLORS = ["red", "green", "blue"]
def get_colors(ctx, args, incomplete):
for c in COLORS:
if c.startswith(incomplete):
yield c
def search_colors(ctx, args, incomplete):
for c in COLORS:
if incomplete in c:
yield c
CSUB_OPT_CHOICES = ["foo", "bar"]
CSUB_CHOICES = ["bar", "baz"]
@bsub.command("csub")
@click.option("--csub-opt", type=click.Choice(CSUB_OPT_CHOICES))
@click.option("--csub", type=click.Choice(CSUB_CHOICES))
@click.option("--search-color", autocompletion=search_colors)
@click.argument("color", autocompletion=get_colors)
def csub(csub_opt, color):
pass
assert choices_without_help(cli, [], "-") == ["--cli-opt"]
assert choices_without_help(cli, [], "") == ["asub"]
assert choices_without_help(cli, ["asub"], "-") == ["--asub-opt"]
assert choices_without_help(cli, ["asub"], "") == ["bsub"]
assert choices_without_help(cli, ["asub", "bsub"], "-") == ["--bsub-opt"]
assert choices_without_help(cli, ["asub", "bsub"], "") == ["csub"]
assert choices_without_help(cli, ["asub", "bsub", "csub"], "-") == [
"--csub-opt",
"--csub",
"--search-color",
]
assert (
choices_without_help(cli, ["asub", "bsub", "csub", "--csub-opt"], "")
== CSUB_OPT_CHOICES
)
assert choices_without_help(cli, ["asub", "bsub", "csub"], "--csub") == [
"--csub-opt",
"--csub",
]
assert (
choices_without_help(cli, ["asub", "bsub", "csub", "--csub"], "")
== CSUB_CHOICES
)
assert choices_without_help(cli, ["asub", "bsub", "csub", "--csub-opt"], "f") == [
"foo"
]
assert choices_without_help(cli, ["asub", "bsub", "csub"], "") == COLORS
assert choices_without_help(cli, ["asub", "bsub", "csub"], "b") == ["blue"]
assert choices_without_help(
cli, ["asub", "bsub", "csub", "--search-color"], "een"
) == ["green"]
def test_chaining():
@click.group("cli", chain=True)
@click.option("--cli-opt")
@click.argument("arg", type=click.Choice(["cliarg1", "cliarg2"]))
def cli(cli_opt, arg):
pass
@cli.command()
@click.option("--asub-opt")
def asub(asub_opt):
pass
@cli.command(help="bsub help")
@click.option("--bsub-opt")
@click.argument("arg", type=click.Choice(["arg1", "arg2"]))
def bsub(bsub_opt, arg):
pass
@cli.command()
@click.option("--csub-opt")
@click.argument("arg", type=click.Choice(["carg1", "carg2"]), default="carg1")
def csub(csub_opt, arg):
pass
assert choices_without_help(cli, [], "-") == ["--cli-opt"]
assert choices_without_help(cli, [], "") == ["cliarg1", "cliarg2"]
assert choices_without_help(cli, ["cliarg1", "asub"], "-") == ["--asub-opt"]
assert choices_without_help(cli, ["cliarg1", "asub"], "") == ["bsub", "csub"]
assert choices_without_help(cli, ["cliarg1", "bsub"], "") == ["arg1", "arg2"]
assert choices_without_help(cli, ["cliarg1", "asub", "--asub-opt"], "") == []
assert choices_without_help(
cli, ["cliarg1", "asub", "--asub-opt", "5", "bsub"], "-"
) == ["--bsub-opt"]
assert choices_without_help(cli, ["cliarg1", "asub", "bsub"], "-") == ["--bsub-opt"]
assert choices_without_help(cli, ["cliarg1", "asub", "csub"], "") == [
"carg1",
"carg2",
]
assert choices_without_help(cli, ["cliarg1", "bsub", "arg1", "csub"], "") == [
"carg1",
"carg2",
]
assert choices_without_help(cli, ["cliarg1", "asub", "csub"], "-") == ["--csub-opt"]
assert choices_with_help(cli, ["cliarg1", "asub"], "b") == [("bsub", "bsub help")]
def test_argument_choice():
@click.command()
@click.argument("arg1", required=True, type=click.Choice(["arg11", "arg12"]))
@click.argument("arg2", type=click.Choice(["arg21", "arg22"]), default="arg21")
@click.argument("arg3", type=click.Choice(["arg", "argument"]), default="arg")
def cli():
pass
assert choices_without_help(cli, [], "") == ["arg11", "arg12"]
assert choices_without_help(cli, [], "arg") == ["arg11", "arg12"]
assert choices_without_help(cli, ["arg11"], "") == ["arg21", "arg22"]
assert choices_without_help(cli, ["arg12", "arg21"], "") == ["arg", "argument"]
assert choices_without_help(cli, ["arg12", "arg21"], "argu") == ["argument"]
def test_option_choice():
@click.command()
@click.option("--opt1", type=click.Choice(["opt11", "opt12"]), help="opt1 help")
@click.option("--opt2", type=click.Choice(["opt21", "opt22"]), default="opt21")
@click.option("--opt3", type=click.Choice(["opt", "option"]))
def cli():
pass
assert choices_with_help(cli, [], "-") == [
("--opt1", "opt1 help"),
("--opt2", None),
("--opt3", None),
]
assert choices_without_help(cli, [], "--opt") == ["--opt1", "--opt2", "--opt3"]
assert choices_without_help(cli, [], "--opt1=") == ["opt11", "opt12"]
assert choices_without_help(cli, [], "--opt2=") == ["opt21", "opt22"]
assert choices_without_help(cli, ["--opt2"], "=") == ["opt21", "opt22"]
assert choices_without_help(cli, ["--opt2", "="], "opt") == ["opt21", "opt22"]
assert choices_without_help(cli, ["--opt1"], "") == ["opt11", "opt12"]
assert choices_without_help(cli, ["--opt2"], "") == ["opt21", "opt22"]
assert choices_without_help(cli, ["--opt1", "opt11", "--opt2"], "") == [
"opt21",
"opt22",
]
assert choices_without_help(cli, ["--opt2", "opt21"], "-") == ["--opt1", "--opt3"]
assert choices_without_help(cli, ["--opt1", "opt11"], "-") == ["--opt2", "--opt3"]
assert choices_without_help(cli, ["--opt1"], "opt") == ["opt11", "opt12"]
assert choices_without_help(cli, ["--opt3"], "opti") == ["option"]
assert choices_without_help(cli, ["--opt1", "invalid_opt"], "-") == [
"--opt2",
"--opt3",
]
def test_option_and_arg_choice():
@click.command()
@click.option("--opt1", type=click.Choice(["opt11", "opt12"]))
@click.argument("arg1", required=False, type=click.Choice(["arg11", "arg12"]))
@click.option("--opt2", type=click.Choice(["opt21", "opt22"]))
def cli():
pass
assert choices_without_help(cli, ["--opt1"], "") == ["opt11", "opt12"]
assert choices_without_help(cli, [""], "--opt1=") == ["opt11", "opt12"]
assert choices_without_help(cli, [], "") == ["arg11", "arg12"]
assert choices_without_help(cli, ["--opt2"], "") == ["opt21", "opt22"]
assert choices_without_help(cli, ["arg11"], "--opt") == ["--opt1", "--opt2"]
assert choices_without_help(cli, [], "--opt") == ["--opt1", "--opt2"]
def test_boolean_flag_choice():
@click.command()
@click.option("--shout/--no-shout", default=False)
@click.argument("arg", required=False, type=click.Choice(["arg1", "arg2"]))
def cli(local_opt):
pass
assert choices_without_help(cli, [], "-") == ["--shout", "--no-shout"]
assert choices_without_help(cli, ["--shout"], "") == ["arg1", "arg2"]
def test_multi_value_option_choice():
@click.command()
@click.option("--pos", nargs=2, type=click.Choice(["pos1", "pos2"]))
@click.argument("arg", required=False, type=click.Choice(["arg1", "arg2"]))
def cli(local_opt):
pass
assert choices_without_help(cli, ["--pos"], "") == ["pos1", "pos2"]
assert choices_without_help(cli, ["--pos", "pos1"], "") == ["pos1", "pos2"]
assert choices_without_help(cli, ["--pos", "pos1", "pos2"], "") == ["arg1", "arg2"]
assert choices_without_help(cli, ["--pos", "pos1", "pos2", "arg1"], "") == []
def test_multi_option_choice():
@click.command()
@click.option("--message", "-m", multiple=True, type=click.Choice(["m1", "m2"]))
@click.argument("arg", required=False, type=click.Choice(["arg1", "arg2"]))
def cli(local_opt):
pass
assert choices_without_help(cli, ["-m"], "") == ["m1", "m2"]
assert choices_without_help(cli, ["-m", "m1", "-m"], "") == ["m1", "m2"]
assert choices_without_help(cli, ["-m", "m1"], "") == ["arg1", "arg2"]
def test_variadic_argument_choice():
@click.command()
@click.option("--opt", type=click.Choice(["opt1", "opt2"]))
@click.argument("src", nargs=-1, type=click.Choice(["src1", "src2"]))
def cli(local_opt):
pass
assert choices_without_help(cli, ["src1", "src2"], "") == ["src1", "src2"]
assert choices_without_help(cli, ["src1", "src2"], "--o") == ["--opt"]
assert choices_without_help(cli, ["src1", "src2", "--opt"], "") == ["opt1", "opt2"]
assert choices_without_help(cli, ["src1", "src2"], "") == ["src1", "src2"]
def test_variadic_argument_complete():
def _complete(ctx, args, incomplete):
return ["abc", "def", "ghi", "jkl", "mno", "pqr", "stu", "vwx", "yz"]
@click.group()
def entrypoint():
pass
@click.command()
@click.option("--opt", autocompletion=_complete)
@click.argument("arg", nargs=-1)
def subcommand(opt, arg):
pass
entrypoint.add_command(subcommand)
assert choices_without_help(entrypoint, ["subcommand", "--opt"], "") == _complete(
0, 0, 0
)
assert choices_without_help(
entrypoint, ["subcommand", "whatever", "--opt"], ""
) == _complete(0, 0, 0)
assert (
choices_without_help(entrypoint, ["subcommand", "whatever", "--opt", "abc"], "")
== []
)
def test_long_chain_choice():
@click.group()
def cli():
pass
@cli.group()
@click.option("--sub-opt", type=click.Choice(["subopt1", "subopt2"]))
@click.argument(
"sub-arg", required=False, type=click.Choice(["subarg1", "subarg2"])
)
def sub(sub_opt, sub_arg):
pass
@sub.command(short_help="bsub help")
@click.option("--bsub-opt", type=click.Choice(["bsubopt1", "bsubopt2"]))
@click.argument(
"bsub-arg1", required=False, type=click.Choice(["bsubarg1", "bsubarg2"])
)
@click.argument(
"bbsub-arg2", required=False, type=click.Choice(["bbsubarg1", "bbsubarg2"])
)
def bsub(bsub_opt):
pass
@sub.group("csub")
def csub():
pass
@csub.command()
def dsub():
pass
assert choices_with_help(cli, ["sub", "subarg1"], "") == [
("bsub", "bsub help"),
("csub", ""),
]
assert choices_without_help(cli, ["sub"], "") == ["subarg1", "subarg2"]
assert choices_without_help(cli, ["sub", "--sub-opt"], "") == ["subopt1", "subopt2"]
assert choices_without_help(cli, ["sub", "--sub-opt", "subopt1"], "") == [
"subarg1",
"subarg2",
]
assert choices_without_help(
cli, ["sub", "--sub-opt", "subopt1", "subarg1", "bsub"], "-"
) == ["--bsub-opt"]
assert choices_without_help(
cli, ["sub", "--sub-opt", "subopt1", "subarg1", "bsub"], ""
) == ["bsubarg1", "bsubarg2"]
assert choices_without_help(
cli, ["sub", "--sub-opt", "subopt1", "subarg1", "bsub", "--bsub-opt"], ""
) == ["bsubopt1", "bsubopt2"]
assert choices_without_help(
cli,
[
"sub",
"--sub-opt",
"subopt1",
"subarg1",
"bsub",
"--bsub-opt",
"bsubopt1",
"bsubarg1",
],
"",
) == ["bbsubarg1", "bbsubarg2"]
assert choices_without_help(
cli, ["sub", "--sub-opt", "subopt1", "subarg1", "csub"], ""
) == ["dsub"]
def test_chained_multi():
@click.group()
def cli():
pass
@cli.group()
def sub():
pass
@sub.group()
def bsub():
pass
@sub.group(chain=True)
def csub():
pass
@csub.command()
def dsub():
pass
@csub.command()
def esub():
pass
assert choices_without_help(cli, ["sub"], "") == ["bsub", "csub"]
assert choices_without_help(cli, ["sub"], "c") == ["csub"]
assert choices_without_help(cli, ["sub", "csub"], "") == ["dsub", "esub"]
assert choices_without_help(cli, ["sub", "csub", "dsub"], "") == ["esub"]
def test_hidden():
@click.group()
@click.option("--name", hidden=True)
@click.option("--choices", type=click.Choice([1, 2]), hidden=True)
def cli(name):
pass
@cli.group(hidden=True)
def hgroup():
pass
@hgroup.group()
def hgroupsub():
pass
@cli.command()
def asub():
pass
@cli.command(hidden=True)
@click.option("--hname")
def hsub():
pass
assert choices_without_help(cli, [], "--n") == []
assert choices_without_help(cli, [], "--c") == []
# If the user exactly types out the hidden param, complete its options.
assert choices_without_help(cli, ["--choices"], "") == [1, 2]
assert choices_without_help(cli, [], "") == ["asub"]
assert choices_without_help(cli, [], "") == ["asub"]
assert choices_without_help(cli, [], "h") == []
# If the user exactly types out the hidden command, complete its subcommands.
assert choices_without_help(cli, ["hgroup"], "") == ["hgroupsub"]
assert choices_without_help(cli, ["hsub"], "--h") == ["--hname"]
@pytest.mark.parametrize(
("args", "part", "expect"),
[
([], "-", ["--opt"]),
(["value"], "--", ["--opt"]),
([], "-o", []),
(["--opt"], "-o", []),
(["--"], "", ["name", "-o", "--opt", "--"]),
(["--"], "--o", ["--opt"]),
],
)
def test_args_with_double_dash_complete(args, part, expect):
def _complete(ctx, args, incomplete):
values = ["name", "-o", "--opt", "--"]
return [x for x in values if x.startswith(incomplete)]
@click.command()
@click.option("--opt")
@click.argument("args", nargs=-1, autocompletion=_complete)
def cli(opt, args):
pass
assert choices_without_help(cli, args, part) == expect | tests/test_bashcomplete.py | import pytest
import click
from click._bashcomplete import get_choices
def choices_without_help(cli, args, incomplete):
completions = get_choices(cli, "dummy", args, incomplete)
return [c[0] for c in completions]
def choices_with_help(cli, args, incomplete):
return list(get_choices(cli, "dummy", args, incomplete))
def test_single_command():
@click.command()
@click.option("--local-opt")
def cli(local_opt):
pass
assert choices_without_help(cli, [], "-") == ["--local-opt"]
assert choices_without_help(cli, [], "") == []
def test_boolean_flag():
@click.command()
@click.option("--shout/--no-shout", default=False)
def cli(local_opt):
pass
assert choices_without_help(cli, [], "-") == ["--shout", "--no-shout"]
def test_multi_value_option():
@click.group()
@click.option("--pos", nargs=2, type=float)
def cli(local_opt):
pass
@cli.command()
@click.option("--local-opt")
def sub(local_opt):
pass
assert choices_without_help(cli, [], "-") == ["--pos"]
assert choices_without_help(cli, ["--pos"], "") == []
assert choices_without_help(cli, ["--pos", "1.0"], "") == []
assert choices_without_help(cli, ["--pos", "1.0", "1.0"], "") == ["sub"]
def test_multi_option():
@click.command()
@click.option("--message", "-m", multiple=True)
def cli(local_opt):
pass
assert choices_without_help(cli, [], "-") == ["--message", "-m"]
assert choices_without_help(cli, ["-m"], "") == []
def test_small_chain():
@click.group()
@click.option("--global-opt")
def cli(global_opt):
pass
@cli.command()
@click.option("--local-opt")
def sub(local_opt):
pass
assert choices_without_help(cli, [], "") == ["sub"]
assert choices_without_help(cli, [], "-") == ["--global-opt"]
assert choices_without_help(cli, ["sub"], "") == []
assert choices_without_help(cli, ["sub"], "-") == ["--local-opt"]
def test_long_chain():
@click.group("cli")
@click.option("--cli-opt")
def cli(cli_opt):
pass
@cli.group("asub")
@click.option("--asub-opt")
def asub(asub_opt):
pass
@asub.group("bsub")
@click.option("--bsub-opt")
def bsub(bsub_opt):
pass
COLORS = ["red", "green", "blue"]
def get_colors(ctx, args, incomplete):
for c in COLORS:
if c.startswith(incomplete):
yield c
def search_colors(ctx, args, incomplete):
for c in COLORS:
if incomplete in c:
yield c
CSUB_OPT_CHOICES = ["foo", "bar"]
CSUB_CHOICES = ["bar", "baz"]
@bsub.command("csub")
@click.option("--csub-opt", type=click.Choice(CSUB_OPT_CHOICES))
@click.option("--csub", type=click.Choice(CSUB_CHOICES))
@click.option("--search-color", autocompletion=search_colors)
@click.argument("color", autocompletion=get_colors)
def csub(csub_opt, color):
pass
assert choices_without_help(cli, [], "-") == ["--cli-opt"]
assert choices_without_help(cli, [], "") == ["asub"]
assert choices_without_help(cli, ["asub"], "-") == ["--asub-opt"]
assert choices_without_help(cli, ["asub"], "") == ["bsub"]
assert choices_without_help(cli, ["asub", "bsub"], "-") == ["--bsub-opt"]
assert choices_without_help(cli, ["asub", "bsub"], "") == ["csub"]
assert choices_without_help(cli, ["asub", "bsub", "csub"], "-") == [
"--csub-opt",
"--csub",
"--search-color",
]
assert (
choices_without_help(cli, ["asub", "bsub", "csub", "--csub-opt"], "")
== CSUB_OPT_CHOICES
)
assert choices_without_help(cli, ["asub", "bsub", "csub"], "--csub") == [
"--csub-opt",
"--csub",
]
assert (
choices_without_help(cli, ["asub", "bsub", "csub", "--csub"], "")
== CSUB_CHOICES
)
assert choices_without_help(cli, ["asub", "bsub", "csub", "--csub-opt"], "f") == [
"foo"
]
assert choices_without_help(cli, ["asub", "bsub", "csub"], "") == COLORS
assert choices_without_help(cli, ["asub", "bsub", "csub"], "b") == ["blue"]
assert choices_without_help(
cli, ["asub", "bsub", "csub", "--search-color"], "een"
) == ["green"]
def test_chaining():
@click.group("cli", chain=True)
@click.option("--cli-opt")
@click.argument("arg", type=click.Choice(["cliarg1", "cliarg2"]))
def cli(cli_opt, arg):
pass
@cli.command()
@click.option("--asub-opt")
def asub(asub_opt):
pass
@cli.command(help="bsub help")
@click.option("--bsub-opt")
@click.argument("arg", type=click.Choice(["arg1", "arg2"]))
def bsub(bsub_opt, arg):
pass
@cli.command()
@click.option("--csub-opt")
@click.argument("arg", type=click.Choice(["carg1", "carg2"]), default="carg1")
def csub(csub_opt, arg):
pass
assert choices_without_help(cli, [], "-") == ["--cli-opt"]
assert choices_without_help(cli, [], "") == ["cliarg1", "cliarg2"]
assert choices_without_help(cli, ["cliarg1", "asub"], "-") == ["--asub-opt"]
assert choices_without_help(cli, ["cliarg1", "asub"], "") == ["bsub", "csub"]
assert choices_without_help(cli, ["cliarg1", "bsub"], "") == ["arg1", "arg2"]
assert choices_without_help(cli, ["cliarg1", "asub", "--asub-opt"], "") == []
assert choices_without_help(
cli, ["cliarg1", "asub", "--asub-opt", "5", "bsub"], "-"
) == ["--bsub-opt"]
assert choices_without_help(cli, ["cliarg1", "asub", "bsub"], "-") == ["--bsub-opt"]
assert choices_without_help(cli, ["cliarg1", "asub", "csub"], "") == [
"carg1",
"carg2",
]
assert choices_without_help(cli, ["cliarg1", "bsub", "arg1", "csub"], "") == [
"carg1",
"carg2",
]
assert choices_without_help(cli, ["cliarg1", "asub", "csub"], "-") == ["--csub-opt"]
assert choices_with_help(cli, ["cliarg1", "asub"], "b") == [("bsub", "bsub help")]
def test_argument_choice():
@click.command()
@click.argument("arg1", required=True, type=click.Choice(["arg11", "arg12"]))
@click.argument("arg2", type=click.Choice(["arg21", "arg22"]), default="arg21")
@click.argument("arg3", type=click.Choice(["arg", "argument"]), default="arg")
def cli():
pass
assert choices_without_help(cli, [], "") == ["arg11", "arg12"]
assert choices_without_help(cli, [], "arg") == ["arg11", "arg12"]
assert choices_without_help(cli, ["arg11"], "") == ["arg21", "arg22"]
assert choices_without_help(cli, ["arg12", "arg21"], "") == ["arg", "argument"]
assert choices_without_help(cli, ["arg12", "arg21"], "argu") == ["argument"]
def test_option_choice():
@click.command()
@click.option("--opt1", type=click.Choice(["opt11", "opt12"]), help="opt1 help")
@click.option("--opt2", type=click.Choice(["opt21", "opt22"]), default="opt21")
@click.option("--opt3", type=click.Choice(["opt", "option"]))
def cli():
pass
assert choices_with_help(cli, [], "-") == [
("--opt1", "opt1 help"),
("--opt2", None),
("--opt3", None),
]
assert choices_without_help(cli, [], "--opt") == ["--opt1", "--opt2", "--opt3"]
assert choices_without_help(cli, [], "--opt1=") == ["opt11", "opt12"]
assert choices_without_help(cli, [], "--opt2=") == ["opt21", "opt22"]
assert choices_without_help(cli, ["--opt2"], "=") == ["opt21", "opt22"]
assert choices_without_help(cli, ["--opt2", "="], "opt") == ["opt21", "opt22"]
assert choices_without_help(cli, ["--opt1"], "") == ["opt11", "opt12"]
assert choices_without_help(cli, ["--opt2"], "") == ["opt21", "opt22"]
assert choices_without_help(cli, ["--opt1", "opt11", "--opt2"], "") == [
"opt21",
"opt22",
]
assert choices_without_help(cli, ["--opt2", "opt21"], "-") == ["--opt1", "--opt3"]
assert choices_without_help(cli, ["--opt1", "opt11"], "-") == ["--opt2", "--opt3"]
assert choices_without_help(cli, ["--opt1"], "opt") == ["opt11", "opt12"]
assert choices_without_help(cli, ["--opt3"], "opti") == ["option"]
assert choices_without_help(cli, ["--opt1", "invalid_opt"], "-") == [
"--opt2",
"--opt3",
]
def test_option_and_arg_choice():
@click.command()
@click.option("--opt1", type=click.Choice(["opt11", "opt12"]))
@click.argument("arg1", required=False, type=click.Choice(["arg11", "arg12"]))
@click.option("--opt2", type=click.Choice(["opt21", "opt22"]))
def cli():
pass
assert choices_without_help(cli, ["--opt1"], "") == ["opt11", "opt12"]
assert choices_without_help(cli, [""], "--opt1=") == ["opt11", "opt12"]
assert choices_without_help(cli, [], "") == ["arg11", "arg12"]
assert choices_without_help(cli, ["--opt2"], "") == ["opt21", "opt22"]
assert choices_without_help(cli, ["arg11"], "--opt") == ["--opt1", "--opt2"]
assert choices_without_help(cli, [], "--opt") == ["--opt1", "--opt2"]
def test_boolean_flag_choice():
@click.command()
@click.option("--shout/--no-shout", default=False)
@click.argument("arg", required=False, type=click.Choice(["arg1", "arg2"]))
def cli(local_opt):
pass
assert choices_without_help(cli, [], "-") == ["--shout", "--no-shout"]
assert choices_without_help(cli, ["--shout"], "") == ["arg1", "arg2"]
def test_multi_value_option_choice():
@click.command()
@click.option("--pos", nargs=2, type=click.Choice(["pos1", "pos2"]))
@click.argument("arg", required=False, type=click.Choice(["arg1", "arg2"]))
def cli(local_opt):
pass
assert choices_without_help(cli, ["--pos"], "") == ["pos1", "pos2"]
assert choices_without_help(cli, ["--pos", "pos1"], "") == ["pos1", "pos2"]
assert choices_without_help(cli, ["--pos", "pos1", "pos2"], "") == ["arg1", "arg2"]
assert choices_without_help(cli, ["--pos", "pos1", "pos2", "arg1"], "") == []
def test_multi_option_choice():
@click.command()
@click.option("--message", "-m", multiple=True, type=click.Choice(["m1", "m2"]))
@click.argument("arg", required=False, type=click.Choice(["arg1", "arg2"]))
def cli(local_opt):
pass
assert choices_without_help(cli, ["-m"], "") == ["m1", "m2"]
assert choices_without_help(cli, ["-m", "m1", "-m"], "") == ["m1", "m2"]
assert choices_without_help(cli, ["-m", "m1"], "") == ["arg1", "arg2"]
def test_variadic_argument_choice():
@click.command()
@click.option("--opt", type=click.Choice(["opt1", "opt2"]))
@click.argument("src", nargs=-1, type=click.Choice(["src1", "src2"]))
def cli(local_opt):
pass
assert choices_without_help(cli, ["src1", "src2"], "") == ["src1", "src2"]
assert choices_without_help(cli, ["src1", "src2"], "--o") == ["--opt"]
assert choices_without_help(cli, ["src1", "src2", "--opt"], "") == ["opt1", "opt2"]
assert choices_without_help(cli, ["src1", "src2"], "") == ["src1", "src2"]
def test_variadic_argument_complete():
def _complete(ctx, args, incomplete):
return ["abc", "def", "ghi", "jkl", "mno", "pqr", "stu", "vwx", "yz"]
@click.group()
def entrypoint():
pass
@click.command()
@click.option("--opt", autocompletion=_complete)
@click.argument("arg", nargs=-1)
def subcommand(opt, arg):
pass
entrypoint.add_command(subcommand)
assert choices_without_help(entrypoint, ["subcommand", "--opt"], "") == _complete(
0, 0, 0
)
assert choices_without_help(
entrypoint, ["subcommand", "whatever", "--opt"], ""
) == _complete(0, 0, 0)
assert (
choices_without_help(entrypoint, ["subcommand", "whatever", "--opt", "abc"], "")
== []
)
def test_long_chain_choice():
@click.group()
def cli():
pass
@cli.group()
@click.option("--sub-opt", type=click.Choice(["subopt1", "subopt2"]))
@click.argument(
"sub-arg", required=False, type=click.Choice(["subarg1", "subarg2"])
)
def sub(sub_opt, sub_arg):
pass
@sub.command(short_help="bsub help")
@click.option("--bsub-opt", type=click.Choice(["bsubopt1", "bsubopt2"]))
@click.argument(
"bsub-arg1", required=False, type=click.Choice(["bsubarg1", "bsubarg2"])
)
@click.argument(
"bbsub-arg2", required=False, type=click.Choice(["bbsubarg1", "bbsubarg2"])
)
def bsub(bsub_opt):
pass
@sub.group("csub")
def csub():
pass
@csub.command()
def dsub():
pass
assert choices_with_help(cli, ["sub", "subarg1"], "") == [
("bsub", "bsub help"),
("csub", ""),
]
assert choices_without_help(cli, ["sub"], "") == ["subarg1", "subarg2"]
assert choices_without_help(cli, ["sub", "--sub-opt"], "") == ["subopt1", "subopt2"]
assert choices_without_help(cli, ["sub", "--sub-opt", "subopt1"], "") == [
"subarg1",
"subarg2",
]
assert choices_without_help(
cli, ["sub", "--sub-opt", "subopt1", "subarg1", "bsub"], "-"
) == ["--bsub-opt"]
assert choices_without_help(
cli, ["sub", "--sub-opt", "subopt1", "subarg1", "bsub"], ""
) == ["bsubarg1", "bsubarg2"]
assert choices_without_help(
cli, ["sub", "--sub-opt", "subopt1", "subarg1", "bsub", "--bsub-opt"], ""
) == ["bsubopt1", "bsubopt2"]
assert choices_without_help(
cli,
[
"sub",
"--sub-opt",
"subopt1",
"subarg1",
"bsub",
"--bsub-opt",
"bsubopt1",
"bsubarg1",
],
"",
) == ["bbsubarg1", "bbsubarg2"]
assert choices_without_help(
cli, ["sub", "--sub-opt", "subopt1", "subarg1", "csub"], ""
) == ["dsub"]
def test_chained_multi():
@click.group()
def cli():
pass
@cli.group()
def sub():
pass
@sub.group()
def bsub():
pass
@sub.group(chain=True)
def csub():
pass
@csub.command()
def dsub():
pass
@csub.command()
def esub():
pass
assert choices_without_help(cli, ["sub"], "") == ["bsub", "csub"]
assert choices_without_help(cli, ["sub"], "c") == ["csub"]
assert choices_without_help(cli, ["sub", "csub"], "") == ["dsub", "esub"]
assert choices_without_help(cli, ["sub", "csub", "dsub"], "") == ["esub"]
def test_hidden():
@click.group()
@click.option("--name", hidden=True)
@click.option("--choices", type=click.Choice([1, 2]), hidden=True)
def cli(name):
pass
@cli.group(hidden=True)
def hgroup():
pass
@hgroup.group()
def hgroupsub():
pass
@cli.command()
def asub():
pass
@cli.command(hidden=True)
@click.option("--hname")
def hsub():
pass
assert choices_without_help(cli, [], "--n") == []
assert choices_without_help(cli, [], "--c") == []
# If the user exactly types out the hidden param, complete its options.
assert choices_without_help(cli, ["--choices"], "") == [1, 2]
assert choices_without_help(cli, [], "") == ["asub"]
assert choices_without_help(cli, [], "") == ["asub"]
assert choices_without_help(cli, [], "h") == []
# If the user exactly types out the hidden command, complete its subcommands.
assert choices_without_help(cli, ["hgroup"], "") == ["hgroupsub"]
assert choices_without_help(cli, ["hsub"], "--h") == ["--hname"]
@pytest.mark.parametrize(
("args", "part", "expect"),
[
([], "-", ["--opt"]),
(["value"], "--", ["--opt"]),
([], "-o", []),
(["--opt"], "-o", []),
(["--"], "", ["name", "-o", "--opt", "--"]),
(["--"], "--o", ["--opt"]),
],
)
def test_args_with_double_dash_complete(args, part, expect):
def _complete(ctx, args, incomplete):
values = ["name", "-o", "--opt", "--"]
return [x for x in values if x.startswith(incomplete)]
@click.command()
@click.option("--opt")
@click.argument("args", nargs=-1, autocompletion=_complete)
def cli(opt, args):
pass
assert choices_without_help(cli, args, part) == expect | 0.40592 | 0.283162 |
import warnings
warnings.filterwarnings("ignore")
from pcg_gazebo.generators import WorldGenerator
from pcg_gazebo.visualization import plot_workspace, plot_occupancy_grid
from pcg_gazebo.generators.creators import box_factory
from pcg_gazebo.utils import generate_random_string
world_gen = WorldGenerator()
world_gen.add_asset(
tag='dyn_box',
description=dict(
type='box',
args=dict(
size="5 * __import__('pcg_gazebo').random.rand(3)",
name='cuboid',
mass="max(0.1, __import__('pcg_gazebo').random.rand())",
color='xkcd'
)
)
)
# Check if models where included correctly
print('Asset is available for world generation=', 'dyn_box' in world_gen.assets.tags)
world_gen.add_asset(
tag='static_cylinder',
description=dict(
type='cylinder',
args=dict(
length="2 * __import__('pcg_gazebo').random.rand()",
radius="2 * __import__('pcg_gazebo').random.rand()",
name='cylinder',
color='xkcd'
)
)
)
# Check if models where included correctly
print('Asset is available for world generation=', 'static_cylinder' in world_gen.assets.tags)
world_gen.add_constraint(
name='tangent_to_ground_plane',
type='tangent',
frame='world',
reference=dict(
type='plane',
args=dict(
origin=[0, 0, 0],
normal=[0, 0, 1]
)
)
)
print('List of constraints=', list(world_gen.constraints.tags))
world_gen.add_constraint(
name='my_workspace',
type='workspace',
frame='world',
geometry_type='area',
points=[
[-5, -5, 0],
[-5, 5, 0],
[5, 5, 0],
[5, -5, 0],
]
)
print('List of constraints=', list(world_gen.constraints.tags))
plot_workspace(workspace=world_gen.constraints.get('my_workspace'))
floor_model = box_factory(
size=[
[20, 20, 0.01]
],
mass=1,
use_permutation=True,
name='box_floor'
)[0]
floor_model.name = 'box_floor'
world_gen.add_asset(tag='box_floor', description=floor_model)
world_gen.add_engine(
engine_name='fixed_pose',
tag='gp_engine',
models=['box_floor'],
poses=[[0, 0, -0.02, 0, 0, 0]])
world_gen.set_model_as_ground_plane('box_floor')
# world_gen.run_engines()
NUM_BOXES = 3
NUM_CYLINDER = 3
placement_policy = dict(
models=['dyn_box', 'static_cylinder'],
config=[
dict(
dofs=['x', 'y'],
tag='workspace',
workspace='my_workspace'
),
dict(
dofs=['yaw'],
tag='uniform',
min=-3.141592653589793,
max=3.141592653589793
)
]
)
world_gen.add_engine(
tag='box_placement',
engine_name='random_pose',
models=['dyn_box', 'static_cylinder'],
max_num=dict(
dyn_box=NUM_BOXES,
static_cylinder=NUM_CYLINDER),
model_picker='random',
no_collision=True,
policies=[placement_policy],
constraints=[
dict(
model='dyn_box',
constraint='tangent_to_ground_plane'),
dict(
model='static_cylinder',
constraint='tangent_to_ground_plane')
]
)
# world_gen.init()
world_gen.run_engines()
print(world_gen.world.models.keys())
world_gen.world.create_scene().show()
import matplotlib.pyplot as pyplot
fig = plot_occupancy_grid(world_gen.world.models, with_ground_plane=False, static_models_only=False, ground_plane_models=['box_floor'])
pyplot.show()
fig = plot_occupancy_grid(
world_gen.world.models,
with_ground_plane=False,
static_models_only=True,
ground_plane_models=['box_floor'])
pyplot.show()
fig = world_gen.world.plot_footprints(engine='matplotlib')
pyplot.show() | examples/gen_grid_map.py | import warnings
warnings.filterwarnings("ignore")
from pcg_gazebo.generators import WorldGenerator
from pcg_gazebo.visualization import plot_workspace, plot_occupancy_grid
from pcg_gazebo.generators.creators import box_factory
from pcg_gazebo.utils import generate_random_string
world_gen = WorldGenerator()
world_gen.add_asset(
tag='dyn_box',
description=dict(
type='box',
args=dict(
size="5 * __import__('pcg_gazebo').random.rand(3)",
name='cuboid',
mass="max(0.1, __import__('pcg_gazebo').random.rand())",
color='xkcd'
)
)
)
# Check if models where included correctly
print('Asset is available for world generation=', 'dyn_box' in world_gen.assets.tags)
world_gen.add_asset(
tag='static_cylinder',
description=dict(
type='cylinder',
args=dict(
length="2 * __import__('pcg_gazebo').random.rand()",
radius="2 * __import__('pcg_gazebo').random.rand()",
name='cylinder',
color='xkcd'
)
)
)
# Check if models where included correctly
print('Asset is available for world generation=', 'static_cylinder' in world_gen.assets.tags)
world_gen.add_constraint(
name='tangent_to_ground_plane',
type='tangent',
frame='world',
reference=dict(
type='plane',
args=dict(
origin=[0, 0, 0],
normal=[0, 0, 1]
)
)
)
print('List of constraints=', list(world_gen.constraints.tags))
world_gen.add_constraint(
name='my_workspace',
type='workspace',
frame='world',
geometry_type='area',
points=[
[-5, -5, 0],
[-5, 5, 0],
[5, 5, 0],
[5, -5, 0],
]
)
print('List of constraints=', list(world_gen.constraints.tags))
plot_workspace(workspace=world_gen.constraints.get('my_workspace'))
floor_model = box_factory(
size=[
[20, 20, 0.01]
],
mass=1,
use_permutation=True,
name='box_floor'
)[0]
floor_model.name = 'box_floor'
world_gen.add_asset(tag='box_floor', description=floor_model)
world_gen.add_engine(
engine_name='fixed_pose',
tag='gp_engine',
models=['box_floor'],
poses=[[0, 0, -0.02, 0, 0, 0]])
world_gen.set_model_as_ground_plane('box_floor')
# world_gen.run_engines()
NUM_BOXES = 3
NUM_CYLINDER = 3
placement_policy = dict(
models=['dyn_box', 'static_cylinder'],
config=[
dict(
dofs=['x', 'y'],
tag='workspace',
workspace='my_workspace'
),
dict(
dofs=['yaw'],
tag='uniform',
min=-3.141592653589793,
max=3.141592653589793
)
]
)
world_gen.add_engine(
tag='box_placement',
engine_name='random_pose',
models=['dyn_box', 'static_cylinder'],
max_num=dict(
dyn_box=NUM_BOXES,
static_cylinder=NUM_CYLINDER),
model_picker='random',
no_collision=True,
policies=[placement_policy],
constraints=[
dict(
model='dyn_box',
constraint='tangent_to_ground_plane'),
dict(
model='static_cylinder',
constraint='tangent_to_ground_plane')
]
)
# world_gen.init()
world_gen.run_engines()
print(world_gen.world.models.keys())
world_gen.world.create_scene().show()
import matplotlib.pyplot as pyplot
fig = plot_occupancy_grid(world_gen.world.models, with_ground_plane=False, static_models_only=False, ground_plane_models=['box_floor'])
pyplot.show()
fig = plot_occupancy_grid(
world_gen.world.models,
with_ground_plane=False,
static_models_only=True,
ground_plane_models=['box_floor'])
pyplot.show()
fig = world_gen.world.plot_footprints(engine='matplotlib')
pyplot.show() | 0.599837 | 0.312265 |
import logging
import re
import traceback
import uuid
from enum import IntEnum, unique
from ._journal import send, syslog_priorities
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
_priorities = syslog_priorities()
__all__ = "write", "send", "Priority", "JournaldLogHandler", "Facility"
@unique
class Priority(IntEnum):
PANIC = _priorities["panic"]
WARNING = _priorities["warn"]
ALERT = _priorities["alert"]
NONE = _priorities["none"]
CRITICAL = _priorities["crit"]
DEBUG = _priorities["debug"]
INFO = _priorities["info"]
ERROR = _priorities["error"]
NOTICE = _priorities["notice"]
@unique
class Facility(IntEnum):
KERN = 0
USER = 1
MAIL = 2
DAEMON = 3
AUTH = 4
SYSLOG = 5
LPR = 6
NEWS = 7
UUCP = 8
CLOCK_DAEMON = 9
AUTHPRIV = 10
FTP = 11
NTP = 12
AUDIT = 13
ALERT = 14
CRON = 15
LOCAL0 = 16
LOCAL1 = 17
LOCAL2 = 18
LOCAL3 = 19
LOCAL4 = 20
LOCAL5 = 21
LOCAL6 = 22
LOCAL7 = 23
def write(message, priority=Priority.INFO):
""" Write message into systemd journal
:type priority: Priority
:type message: str
"""
priority = int(Priority(int(priority)))
send(priority=priority, message=message)
class JournaldLogHandler(logging.Handler):
FIELD_BADCHAR_RE = re.compile(r'\W')
LEVELS = {
logging.CRITICAL: Priority.CRITICAL.value,
logging.FATAL: Priority.PANIC.value,
logging.ERROR: Priority.ERROR.value,
logging.WARNING: Priority.WARNING.value,
logging.WARN: Priority.WARNING.value,
logging.INFO: Priority.INFO.value,
logging.DEBUG: Priority.DEBUG.value,
logging.NOTSET: Priority.NONE.value,
}
__slots__ = ("__facility",)
def __init__(self, identifier=None, facility=Facility.DAEMON):
"""
:type identifier: Override default journald identifier
:type facility: Facility
"""
logging.Handler.__init__(self)
self.__identifier = identifier
self.__facility = int(facility)
@staticmethod
def _to_microsecond(ts):
"""
:type ts: float
"""
return int(ts * 1000 * 1000)
def emit(self, record):
message = str(record.getMessage())
tb_message = ""
if record.exc_info:
tb_message = "\n".join(
traceback.format_exception(*record.exc_info)
)
message += "\n"
message += tb_message
ts = self._to_microsecond(record.created)
hash_fields = (
message,
record.funcName,
record.levelno,
record.process,
record.processName,
record.levelname,
record.pathname,
record.name,
record.thread,
record.lineno,
ts,
tb_message,
)
message_id = uuid.uuid3(
uuid.NAMESPACE_OID, "$".join(str(x) for x in hash_fields)
).hex
data = {
key: value
for key, value in record.__dict__.items()
if not key.startswith("_") and value is not None
}
data["message"] = self.format(record)
data["priority"] = self.LEVELS[data.pop("levelno")]
data["syslog_facility"] = self.__facility
data["code_file"] = data.pop("filename")
data["code_line"] = data.pop("lineno")
data["code_func"] = data.pop("funcName")
if self.__identifier:
data["syslog_identifier"] = self.__identifier
else:
data["syslog_identifier"] = data["name"]
if "msg" in data:
data["message_raw"] = data.pop("msg")
data["message_id"] = message_id
data["code_module"] = data.pop("module")
data["logger_name"] = data.pop("name")
data["pid"] = data.pop("process")
data["proccess_name"] = data.pop("processName")
data["errno"] = 0 if not record.exc_info else 255
data["relative_ts"] = self._to_microsecond(data.pop("relativeCreated"))
data["thread_name"] = data.pop("threadName")
args = data.pop("args", [])
if isinstance(args, Mapping):
for key, value in args.items():
key = self.FIELD_BADCHAR_RE.sub('_', key)
data["argument_%s" % key] = value
else:
for idx, item in enumerate(args):
data["argument_%d" % idx] = str(item)
if tb_message:
data["traceback"] = tb_message
send(**data)
handler = JournaldLogHandler()
class JournaldLogger(logging.Logger):
def __init__(self, level, name="root"):
super(JournaldLogger, self).__init__(name, level)
self.addHandler(handler)
Logger = JournaldLogger(logging.WARNING) | cysystemd/journal.py | import logging
import re
import traceback
import uuid
from enum import IntEnum, unique
from ._journal import send, syslog_priorities
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
_priorities = syslog_priorities()
__all__ = "write", "send", "Priority", "JournaldLogHandler", "Facility"
@unique
class Priority(IntEnum):
PANIC = _priorities["panic"]
WARNING = _priorities["warn"]
ALERT = _priorities["alert"]
NONE = _priorities["none"]
CRITICAL = _priorities["crit"]
DEBUG = _priorities["debug"]
INFO = _priorities["info"]
ERROR = _priorities["error"]
NOTICE = _priorities["notice"]
@unique
class Facility(IntEnum):
KERN = 0
USER = 1
MAIL = 2
DAEMON = 3
AUTH = 4
SYSLOG = 5
LPR = 6
NEWS = 7
UUCP = 8
CLOCK_DAEMON = 9
AUTHPRIV = 10
FTP = 11
NTP = 12
AUDIT = 13
ALERT = 14
CRON = 15
LOCAL0 = 16
LOCAL1 = 17
LOCAL2 = 18
LOCAL3 = 19
LOCAL4 = 20
LOCAL5 = 21
LOCAL6 = 22
LOCAL7 = 23
def write(message, priority=Priority.INFO):
""" Write message into systemd journal
:type priority: Priority
:type message: str
"""
priority = int(Priority(int(priority)))
send(priority=priority, message=message)
class JournaldLogHandler(logging.Handler):
FIELD_BADCHAR_RE = re.compile(r'\W')
LEVELS = {
logging.CRITICAL: Priority.CRITICAL.value,
logging.FATAL: Priority.PANIC.value,
logging.ERROR: Priority.ERROR.value,
logging.WARNING: Priority.WARNING.value,
logging.WARN: Priority.WARNING.value,
logging.INFO: Priority.INFO.value,
logging.DEBUG: Priority.DEBUG.value,
logging.NOTSET: Priority.NONE.value,
}
__slots__ = ("__facility",)
def __init__(self, identifier=None, facility=Facility.DAEMON):
"""
:type identifier: Override default journald identifier
:type facility: Facility
"""
logging.Handler.__init__(self)
self.__identifier = identifier
self.__facility = int(facility)
@staticmethod
def _to_microsecond(ts):
"""
:type ts: float
"""
return int(ts * 1000 * 1000)
def emit(self, record):
message = str(record.getMessage())
tb_message = ""
if record.exc_info:
tb_message = "\n".join(
traceback.format_exception(*record.exc_info)
)
message += "\n"
message += tb_message
ts = self._to_microsecond(record.created)
hash_fields = (
message,
record.funcName,
record.levelno,
record.process,
record.processName,
record.levelname,
record.pathname,
record.name,
record.thread,
record.lineno,
ts,
tb_message,
)
message_id = uuid.uuid3(
uuid.NAMESPACE_OID, "$".join(str(x) for x in hash_fields)
).hex
data = {
key: value
for key, value in record.__dict__.items()
if not key.startswith("_") and value is not None
}
data["message"] = self.format(record)
data["priority"] = self.LEVELS[data.pop("levelno")]
data["syslog_facility"] = self.__facility
data["code_file"] = data.pop("filename")
data["code_line"] = data.pop("lineno")
data["code_func"] = data.pop("funcName")
if self.__identifier:
data["syslog_identifier"] = self.__identifier
else:
data["syslog_identifier"] = data["name"]
if "msg" in data:
data["message_raw"] = data.pop("msg")
data["message_id"] = message_id
data["code_module"] = data.pop("module")
data["logger_name"] = data.pop("name")
data["pid"] = data.pop("process")
data["proccess_name"] = data.pop("processName")
data["errno"] = 0 if not record.exc_info else 255
data["relative_ts"] = self._to_microsecond(data.pop("relativeCreated"))
data["thread_name"] = data.pop("threadName")
args = data.pop("args", [])
if isinstance(args, Mapping):
for key, value in args.items():
key = self.FIELD_BADCHAR_RE.sub('_', key)
data["argument_%s" % key] = value
else:
for idx, item in enumerate(args):
data["argument_%d" % idx] = str(item)
if tb_message:
data["traceback"] = tb_message
send(**data)
handler = JournaldLogHandler()
class JournaldLogger(logging.Logger):
def __init__(self, level, name="root"):
super(JournaldLogger, self).__init__(name, level)
self.addHandler(handler)
Logger = JournaldLogger(logging.WARNING) | 0.34798 | 0.07363 |
from homeassistant.components.diagnostics import REDACTED
from homeassistant.components.guardian import (
DATA_PAIRED_SENSOR_MANAGER,
DOMAIN,
PairedSensorManager,
)
from tests.components.diagnostics import get_diagnostics_for_config_entry
async def test_entry_diagnostics(hass, config_entry, hass_client, setup_guardian):
"""Test config entry diagnostics."""
paired_sensor_manager: PairedSensorManager = hass.data[DOMAIN][
config_entry.entry_id
][DATA_PAIRED_SENSOR_MANAGER]
# Simulate the pairing of a paired sensor:
await paired_sensor_manager.async_pair_sensor("AABBCCDDEEFF")
assert await get_diagnostics_for_config_entry(hass, hass_client, config_entry) == {
"entry": {
"title": "Mock Title",
"data": {
"ip_address": "192.168.1.100",
"port": 7777,
"uid": REDACTED,
},
},
"data": {
"valve_controller": {
"sensor_pair_dump": {"pair_count": 1, "paired_uids": REDACTED},
"system_diagnostics": {
"codename": "gvc1",
"uid": REDACTED,
"uptime": 41,
"firmware": "0.20.9-beta+official.ef3",
"rf_modem_firmware": "4.0.0",
"available_heap": 34456,
},
"system_onboard_sensor_status": {"temperature": 71, "wet": False},
"valve_status": {
"enabled": False,
"direction": True,
"state": 0,
"travel_count": 0,
"instantaneous_current": 0,
"instantaneous_current_ddt": 0,
"average_current": 34,
},
"wifi_status": {
"station_connected": True,
"ip_assigned": True,
"mqtt_connected": True,
"rssi": -63,
"channel": 1,
"lan_ipv4": "192.168.1.100",
"lan_ipv6": "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",
"ap_enabled": True,
"ap_clients": 0,
"bssid": REDACTED,
"ssid": REDACTED,
},
},
"paired_sensors": [
{
"uid": REDACTED,
"codename": "gld1",
"temperature": 68,
"wet": False,
"moved": True,
"battery_percentage": 79,
}
],
},
} | tests/components/guardian/test_diagnostics.py | from homeassistant.components.diagnostics import REDACTED
from homeassistant.components.guardian import (
DATA_PAIRED_SENSOR_MANAGER,
DOMAIN,
PairedSensorManager,
)
from tests.components.diagnostics import get_diagnostics_for_config_entry
async def test_entry_diagnostics(hass, config_entry, hass_client, setup_guardian):
"""Test config entry diagnostics."""
paired_sensor_manager: PairedSensorManager = hass.data[DOMAIN][
config_entry.entry_id
][DATA_PAIRED_SENSOR_MANAGER]
# Simulate the pairing of a paired sensor:
await paired_sensor_manager.async_pair_sensor("AABBCCDDEEFF")
assert await get_diagnostics_for_config_entry(hass, hass_client, config_entry) == {
"entry": {
"title": "Mock Title",
"data": {
"ip_address": "192.168.1.100",
"port": 7777,
"uid": REDACTED,
},
},
"data": {
"valve_controller": {
"sensor_pair_dump": {"pair_count": 1, "paired_uids": REDACTED},
"system_diagnostics": {
"codename": "gvc1",
"uid": REDACTED,
"uptime": 41,
"firmware": "0.20.9-beta+official.ef3",
"rf_modem_firmware": "4.0.0",
"available_heap": 34456,
},
"system_onboard_sensor_status": {"temperature": 71, "wet": False},
"valve_status": {
"enabled": False,
"direction": True,
"state": 0,
"travel_count": 0,
"instantaneous_current": 0,
"instantaneous_current_ddt": 0,
"average_current": 34,
},
"wifi_status": {
"station_connected": True,
"ip_assigned": True,
"mqtt_connected": True,
"rssi": -63,
"channel": 1,
"lan_ipv4": "192.168.1.100",
"lan_ipv6": "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",
"ap_enabled": True,
"ap_clients": 0,
"bssid": REDACTED,
"ssid": REDACTED,
},
},
"paired_sensors": [
{
"uid": REDACTED,
"codename": "gld1",
"temperature": 68,
"wet": False,
"moved": True,
"battery_percentage": 79,
}
],
},
} | 0.699049 | 0.261661 |
import math
import argparse
import numpy as np
import cv2
import tensorflow as tf
from gluoncv.data import ImageNet1kAttr
from tf2cv.model_provider import get_model as tf2cv_get_model
def parse_args():
"""
Create python script parameters.
Returns
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate an ImageNet-1K model on TensorFlow 2.0 (demo mode)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--image",
type=str,
required=True,
help="path to testing image")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--mean-rgb",
nargs=3,
type=float,
default=(0.485, 0.456, 0.406),
help="Mean of RGB channels in the dataset")
parser.add_argument(
"--std-rgb",
nargs=3,
type=float,
default=(0.229, 0.224, 0.225),
help="STD of RGB channels in the dataset")
args = parser.parse_args()
return args
def main():
"""
Main body of script.
"""
args = parse_args()
# Load a testing image:
image = cv2.imread(args.image, flags=cv2.IMREAD_COLOR)
# cv2.imshow("image", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
image = cv2.cvtColor(image, code=cv2.COLOR_BGR2RGB)
# Resize image with keeping aspect ratio:
resize_value = int(math.ceil(float(args.input_size) / args.resize_inv_factor))
h, w = image.shape[:2]
if not ((w == resize_value and w <= h) or (h == resize_value and h <= w)):
resize_size = (resize_value, int(resize_value * h / w)) if w < h else (int(resize_value * w / h), resize_value)
image = cv2.resize(image, dsize=resize_size, interpolation=cv2.INTER_LINEAR)
# Center crop of the image:
h, w = image.shape[:2]
th, tw = args.input_size, args.input_size
ih = int(round(0.5 * (h - th)))
jw = int(round(0.5 * (w - tw)))
image = image[ih:(ih + th), jw:(jw + tw), :]
# cv2.imshow("image2", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# Convert image to a float tensor and normalize it:
x = image.astype(np.float32)
x = x / 255.0
x = (x - np.array(args.mean_rgb)) / np.array(args.std_rgb)
# Set No-GPU mode:
if args.num_gpus == 0:
tf.config.set_visible_devices([], "GPU")
# Convert the tensor to a TF tensor:
x = np.expand_dims(x, axis=0)
x = tf.convert_to_tensor(x, dtype=np.float32)
# Create model with loading pretrained weights:
net = tf2cv_get_model(args.model, pretrained=True)
# Evaluate the network:
y = net(x)
probs = tf.nn.softmax(y)
# Show results:
top_k = 5
probs_np = probs.numpy().squeeze(axis=0)
top_k_inds = probs_np.argsort()[::-1][:top_k]
classes = ImageNet1kAttr().classes
print("The input picture is classified to be:")
for k in range(top_k):
print("{idx}: [{class_name}], with probability {prob:.3f}.".format(
idx=(k + 1),
class_name=classes[top_k_inds[k]],
prob=probs_np[top_k_inds[k]]))
if __name__ == "__main__":
main() | examples/demo_tf2.py | import math
import argparse
import numpy as np
import cv2
import tensorflow as tf
from gluoncv.data import ImageNet1kAttr
from tf2cv.model_provider import get_model as tf2cv_get_model
def parse_args():
"""
Create python script parameters.
Returns
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate an ImageNet-1K model on TensorFlow 2.0 (demo mode)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--image",
type=str,
required=True,
help="path to testing image")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--mean-rgb",
nargs=3,
type=float,
default=(0.485, 0.456, 0.406),
help="Mean of RGB channels in the dataset")
parser.add_argument(
"--std-rgb",
nargs=3,
type=float,
default=(0.229, 0.224, 0.225),
help="STD of RGB channels in the dataset")
args = parser.parse_args()
return args
def main():
"""
Main body of script.
"""
args = parse_args()
# Load a testing image:
image = cv2.imread(args.image, flags=cv2.IMREAD_COLOR)
# cv2.imshow("image", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
image = cv2.cvtColor(image, code=cv2.COLOR_BGR2RGB)
# Resize image with keeping aspect ratio:
resize_value = int(math.ceil(float(args.input_size) / args.resize_inv_factor))
h, w = image.shape[:2]
if not ((w == resize_value and w <= h) or (h == resize_value and h <= w)):
resize_size = (resize_value, int(resize_value * h / w)) if w < h else (int(resize_value * w / h), resize_value)
image = cv2.resize(image, dsize=resize_size, interpolation=cv2.INTER_LINEAR)
# Center crop of the image:
h, w = image.shape[:2]
th, tw = args.input_size, args.input_size
ih = int(round(0.5 * (h - th)))
jw = int(round(0.5 * (w - tw)))
image = image[ih:(ih + th), jw:(jw + tw), :]
# cv2.imshow("image2", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# Convert image to a float tensor and normalize it:
x = image.astype(np.float32)
x = x / 255.0
x = (x - np.array(args.mean_rgb)) / np.array(args.std_rgb)
# Set No-GPU mode:
if args.num_gpus == 0:
tf.config.set_visible_devices([], "GPU")
# Convert the tensor to a TF tensor:
x = np.expand_dims(x, axis=0)
x = tf.convert_to_tensor(x, dtype=np.float32)
# Create model with loading pretrained weights:
net = tf2cv_get_model(args.model, pretrained=True)
# Evaluate the network:
y = net(x)
probs = tf.nn.softmax(y)
# Show results:
top_k = 5
probs_np = probs.numpy().squeeze(axis=0)
top_k_inds = probs_np.argsort()[::-1][:top_k]
classes = ImageNet1kAttr().classes
print("The input picture is classified to be:")
for k in range(top_k):
print("{idx}: [{class_name}], with probability {prob:.3f}.".format(
idx=(k + 1),
class_name=classes[top_k_inds[k]],
prob=probs_np[top_k_inds[k]]))
if __name__ == "__main__":
main() | 0.843477 | 0.268896 |
from django.shortcuts import render, HttpResponse
from django.http import JsonResponse
from django.views.generic import View
from django.conf import settings
from .models import Address
from .fusiontable import Fusiontable
import json
class HomeView(View):
"""
displays Home Page, connects Google.maps.api and loads styles, js-libraries, connects
map.js (the script, that displays and manages the main page content), bootstraps data for map.js
"""
template_name = 'home.html'
def get(self, request):
return render(request, self.template_name, {
'bootstrap': json.dumps(settings.GOOGLE_API_OPTIONS),
'api_key': settings.GOOGLE_API_OPTIONS.get('api_key')
})
class MapApiView(View):
"""
the server part of map.js functionality
"""
def get(self, request):
"""
Selects all addresses from the database
:param request:
:return address list:
"""
return JsonResponse({'results': [x.attributes for x in Address.objects.all()]})
def post(self, request):
"""
Adds address to database and fusion table
:param request:
:return address list or error mesage:
"""
try:
address = Address(latLng=request.POST.dict(), api_key=settings.GOOGLE_API_OPTIONS.get('api_key'))
if not address.is_valid:
return JsonResponse({
'message': 'This place has no address'
}, status=404)
fusiontable = Fusiontable(data=address.attributes, options=settings.GOOGLE_API_OPTIONS)
if not fusiontable.already_exists:
fusiontable.save()
address.save()
return JsonResponse({ # Everything's ok! Return info for displaying
'data': [x.attributes for x in Address.objects.all()]
})
except:
return JsonResponse({
'message': 'Google API is not available, please contact site administrator'
}, status=404)
return JsonResponse({
'message': 'This address already exists'
}, status=404)
def delete(self, request):
"""
:param request:
:return:
"""
Address.objects.all().delete()
Fusiontable(settings.GOOGLE_API_OPTIONS).delete_all()
return HttpResponse('ok') | maptapp/views.py | from django.shortcuts import render, HttpResponse
from django.http import JsonResponse
from django.views.generic import View
from django.conf import settings
from .models import Address
from .fusiontable import Fusiontable
import json
class HomeView(View):
"""
displays Home Page, connects Google.maps.api and loads styles, js-libraries, connects
map.js (the script, that displays and manages the main page content), bootstraps data for map.js
"""
template_name = 'home.html'
def get(self, request):
return render(request, self.template_name, {
'bootstrap': json.dumps(settings.GOOGLE_API_OPTIONS),
'api_key': settings.GOOGLE_API_OPTIONS.get('api_key')
})
class MapApiView(View):
"""
the server part of map.js functionality
"""
def get(self, request):
"""
Selects all addresses from the database
:param request:
:return address list:
"""
return JsonResponse({'results': [x.attributes for x in Address.objects.all()]})
def post(self, request):
"""
Adds address to database and fusion table
:param request:
:return address list or error mesage:
"""
try:
address = Address(latLng=request.POST.dict(), api_key=settings.GOOGLE_API_OPTIONS.get('api_key'))
if not address.is_valid:
return JsonResponse({
'message': 'This place has no address'
}, status=404)
fusiontable = Fusiontable(data=address.attributes, options=settings.GOOGLE_API_OPTIONS)
if not fusiontable.already_exists:
fusiontable.save()
address.save()
return JsonResponse({ # Everything's ok! Return info for displaying
'data': [x.attributes for x in Address.objects.all()]
})
except:
return JsonResponse({
'message': 'Google API is not available, please contact site administrator'
}, status=404)
return JsonResponse({
'message': 'This address already exists'
}, status=404)
def delete(self, request):
"""
:param request:
:return:
"""
Address.objects.all().delete()
Fusiontable(settings.GOOGLE_API_OPTIONS).delete_all()
return HttpResponse('ok') | 0.552057 | 0.069069 |
r"""Order utilities for 0x applications.
Setup
-----
Install the package with pip::
pip install 0x-order-utils
Some methods require the caller to pass in a `Web3.BaseProvider`:code: object.
For local testing one may construct such a provider pointing at an instance of
`ganache-cli <https://www.npmjs.com/package/ganache-cli>`_ which has the 0x
contracts deployed on it. For convenience, a docker container is provided for
just this purpose. To start it::
docker run -d -p 8545:8545 0xorg/ganache-cli
"""
from enum import auto, Enum
import json
from typing import cast, Tuple, Union
from pkg_resources import resource_string
from mypy_extensions import TypedDict
from eth_typing import HexStr
from eth_utils import keccak, remove_0x_prefix, to_bytes, to_checksum_address
from web3 import Web3
import web3.exceptions
from web3.providers.base import BaseProvider
from web3.contract import Contract
from zero_ex.contract_addresses import chain_to_addresses, ChainId
import zero_ex.contract_artifacts
from zero_ex.contract_wrappers.exchange import Exchange
from zero_ex.contract_wrappers.exchange.types import Order
from zero_ex.contract_wrappers.order_conversions import order_to_jsdict
from zero_ex.dev_utils.type_assertions import (
assert_is_address,
assert_is_hex_string,
assert_is_provider,
)
from zero_ex.json_schemas import assert_valid
class _Constants:
"""Static data used by order utilities."""
null_address = "0x0000000000000000000000000000000000000000"
eip191_header = b"\x19\x01"
eip712_domain_separator_schema_hash = keccak(
b"EIP712Domain("
+ b"string name,"
+ b"string version,"
+ b"uint256 chainId,"
+ b"address verifyingContract"
+ b")"
)
eip712_domain_struct_header = (
eip712_domain_separator_schema_hash
+ keccak(b"0x Protocol")
+ keccak(b"3.0.0")
)
eip712_order_schema_hash = keccak(
b"Order("
+ b"address makerAddress,"
+ b"address takerAddress,"
+ b"address feeRecipientAddress,"
+ b"address senderAddress,"
+ b"uint256 makerAssetAmount,"
+ b"uint256 takerAssetAmount,"
+ b"uint256 makerFee,"
+ b"uint256 takerFee,"
+ b"uint256 expirationTimeSeconds,"
+ b"uint256 salt,"
+ b"bytes makerAssetData,"
+ b"bytes takerAssetData,"
+ b"bytes makerFeeAssetData,"
+ b"bytes takerFeeAssetData"
+ b")"
)
class SignatureType(Enum):
"""Enumeration of known signature types."""
ILLEGAL = 0
INVALID = auto()
EIP712 = auto()
ETH_SIGN = auto()
WALLET = auto()
VALIDATOR = auto()
PRE_SIGNED = auto()
N_SIGNATURE_TYPES = auto()
def generate_order_hash_hex(
order: Order, exchange_address: str, chain_id: int
) -> str:
"""Calculate the hash of the given order as a hexadecimal string.
:param order: The order to be hashed. Must conform to `the 0x order JSON schema <https://github.com/0xProject/0x-monorepo/blob/development/packages/json-schemas/schemas/order_schema.json>`_.
:param exchange_address: The address to which the 0x Exchange smart
contract has been deployed.
:returns: A string, of ASCII hex digits, representing the order hash.
Inputs and expected result below were copied from
@0x/order-utils/test/order_hash_test.ts
>>> generate_order_hash_hex(
... Order(
... makerAddress="0x0000000000000000000000000000000000000000",
... takerAddress="0x0000000000000000000000000000000000000000",
... feeRecipientAddress="0x0000000000000000000000000000000000000000",
... senderAddress="0x0000000000000000000000000000000000000000",
... makerAssetAmount="0",
... takerAssetAmount="0",
... makerFee="0",
... takerFee="0",
... expirationTimeSeconds="0",
... salt="0",
... makerAssetData=((0).to_bytes(1, byteorder='big') * 20),
... takerAssetData=((0).to_bytes(1, byteorder='big') * 20),
... makerFeeAssetData=((0).to_bytes(1, byteorder='big') * 20),
... takerFeeAssetData=((0).to_bytes(1, byteorder='big') * 20),
... ),
... exchange_address="0x1dc4c1cefef38a777b15aa20260a54e584b16c48",
... chain_id=1337
... )
'cb36e4fedb36508fb707e2c05e21bffc7a72766ccae93f8ff096693fff7f1714'
""" # noqa: E501 (line too long)
assert_is_address(exchange_address, "exchange_address")
assert_valid(
order_to_jsdict(order, chain_id, exchange_address), "/orderSchema"
)
def pad_20_bytes_to_32(twenty_bytes: bytes):
return bytes(12) + twenty_bytes
def int_to_32_big_endian_bytes(i: int):
return i.to_bytes(32, byteorder="big")
eip712_domain_struct_hash = keccak(
_Constants.eip712_domain_struct_header
+ int_to_32_big_endian_bytes(int(chain_id))
+ pad_20_bytes_to_32(to_bytes(hexstr=exchange_address))
)
def ensure_bytes(str_or_bytes: Union[str, bytes]) -> bytes:
return (
to_bytes(hexstr=cast(bytes, str_or_bytes))
if isinstance(str_or_bytes, str)
else str_or_bytes
)
eip712_order_struct_hash = keccak(
_Constants.eip712_order_schema_hash
+ pad_20_bytes_to_32(to_bytes(hexstr=order["makerAddress"]))
+ pad_20_bytes_to_32(to_bytes(hexstr=order["takerAddress"]))
+ pad_20_bytes_to_32(to_bytes(hexstr=order["feeRecipientAddress"]))
+ pad_20_bytes_to_32(to_bytes(hexstr=order["senderAddress"]))
+ int_to_32_big_endian_bytes(int(order["makerAssetAmount"]))
+ int_to_32_big_endian_bytes(int(order["takerAssetAmount"]))
+ int_to_32_big_endian_bytes(int(order["makerFee"]))
+ int_to_32_big_endian_bytes(int(order["takerFee"]))
+ int_to_32_big_endian_bytes(int(order["expirationTimeSeconds"]))
+ int_to_32_big_endian_bytes(int(order["salt"]))
+ keccak(ensure_bytes(order["makerAssetData"]))
+ keccak(ensure_bytes(order["takerAssetData"]))
+ keccak(ensure_bytes(order["makerFeeAssetData"]))
+ keccak(ensure_bytes(order["takerFeeAssetData"]))
)
return keccak(
_Constants.eip191_header
+ eip712_domain_struct_hash
+ eip712_order_struct_hash
).hex()
def is_valid_signature(
provider: BaseProvider, data: str, signature: str, signer_address: str
) -> bool:
"""Check the validity of the supplied signature.
Check if the supplied `signature`:code: corresponds to signing `data`:code:
with the private key corresponding to `signer_address`:code:.
:param provider: A Web3 provider able to access the 0x Exchange contract.
:param data: The hex encoded data signed by the supplied signature.
:param signature: The hex encoded signature.
:param signer_address: The hex encoded address that signed the data to
produce the supplied signature.
:returns: Tuple consisting of a boolean and a string. Boolean is true if
valid, false otherwise. If false, the string describes the reason.
>>> is_valid_signature(
... Web3.HTTPProvider("http://127.0.0.1:8545"),
... '0x6927e990021d23b1eb7b8789f6a6feaf98fe104bb0cf8259421b79f9a34222b0',
... '0x1B61a3ed31b43c8780e905a260a35faefcc527be7516aa11c0256729b5b351bc3340349190569279751135161d22529dc25add4f6069af05be04cacbda2ace225403',
... '0x5409ed021d9299bf6814279a6a1411a7e866a631',
... )
True
""" # noqa: E501 (line too long)
assert_is_provider(provider, "provider")
assert_is_hex_string(data, "data")
assert_is_hex_string(signature, "signature")
assert_is_address(signer_address, "signer_address")
return Exchange(
provider,
chain_to_addresses(
ChainId(
int(Web3(provider).eth.chainId) # pylint: disable=no-member
)
).exchange,
).is_valid_hash_signature.call(
bytes.fromhex(remove_0x_prefix(HexStr(data))),
to_checksum_address(signer_address),
bytes.fromhex(remove_0x_prefix(HexStr(signature))),
)
class ECSignature(TypedDict):
"""Object representation of an elliptic curve signature's parameters."""
v: int
r: str
s: str
def _parse_signature_hex_as_vrs(signature_hex: str) -> ECSignature:
"""Parse signature hex as a concatentation of EC parameters ordered V, R, S.
>>> _parse_signature_hex_as_vrs('0x1b117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d872871137feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b03')
{'v': 27, 'r': '117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d87287113', 's': '7feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b'}
""" # noqa: E501 (line too long)
signature: ECSignature = {
"v": int(signature_hex[2:4], 16),
"r": signature_hex[4:68],
"s": signature_hex[68:132],
}
if signature["v"] == 0 or signature["v"] == 1:
signature["v"] = signature["v"] + 27
return signature
def _parse_signature_hex_as_rsv(signature_hex: str) -> ECSignature:
"""Parse signature hex as a concatentation of EC parameters ordered R, S, V.
>>> _parse_signature_hex_as_rsv('0x117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d872871137feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b00')
{'r': '117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d87287113', 's': '7feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b', 'v': 27}
""" # noqa: E501 (line too long)
signature: ECSignature = {
"r": signature_hex[2:66],
"s": signature_hex[66:130],
"v": int(signature_hex[130:132], 16),
}
if signature["v"] == 0 or signature["v"] == 1:
signature["v"] = signature["v"] + 27
return signature
def _convert_ec_signature_to_vrs_hex(signature: ECSignature) -> str:
"""Convert elliptic curve signature object to hex hash string.
>>> _convert_ec_signature_to_vrs_hex(
... {
... 'r': '117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d87287113',
... 's': '7feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b',
... 'v': 27
... }
... )
'0x1b117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d872871137feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b'
""" # noqa: E501 (line too long)
return (
"0x"
+ signature["v"].to_bytes(1, byteorder="big").hex()
+ signature["r"]
+ signature["s"]
)
def sign_hash(
web3_or_provider: Union[Web3, BaseProvider],
signer_address: str,
hash_hex: str,
) -> str:
"""Sign a message with the given hash, and return the signature.
:param web3_or_provider: Either an instance of `web3.Web3`:code: or
`web3.providers.base.BaseProvider`:code:
:param signer_address: The address of the signing account.
:param hash_hex: A hex string representing the hash, like that returned
from `generate_order_hash_hex()`:code:.
:returns: A string, of ASCII hex digits, representing the signature.
>>> provider = Web3.HTTPProvider("http://127.0.0.1:8545")
>>> sign_hash(
... provider,
... Web3(provider).geth.personal.listAccounts()[0],
... '0x34decbedc118904df65f379a175bb39ca18209d6ce41d5ed549d54e6e0a95004',
... )
'0x1b117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d872871137feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b03'
""" # noqa: E501 (line too long)
web3_instance = None
if isinstance(web3_or_provider, BaseProvider):
web3_instance = Web3(web3_or_provider)
elif isinstance(web3_or_provider, Web3):
web3_instance = web3_or_provider
else:
raise TypeError(
"Expected parameter 'web3_or_provider' to be an instance of either"
+ " Web3 or BaseProvider"
)
assert_is_address(signer_address, "signer_address")
assert_is_hex_string(hash_hex, "hash_hex")
# false positive from pylint: disable=no-member
signature = web3_instance.eth.sign( # type: ignore
signer_address, hexstr=hash_hex.replace("0x", "")
).hex()
valid_v_param_values = [27, 28]
# HACK: There is no consensus on whether the signatureHex string should be
# formatted as v + r + s OR r + s + v, and different clients (even
# different versions of the same client) return the signature params in
# different orders. In order to support all client implementations, we
# parse the signature in both ways, and evaluate if either one is a valid
# signature. r + s + v is the most prevalent format from eth_sign, so we
# attempt this first.
ec_signature = _parse_signature_hex_as_rsv(signature)
if ec_signature["v"] in valid_v_param_values:
signature_as_vrst_hex = (
_convert_ec_signature_to_vrs_hex(ec_signature)
+ _Constants.SignatureType.ETH_SIGN.value.to_bytes(
1, byteorder="big"
).hex()
)
valid = is_valid_signature(
web3_instance.provider,
hash_hex,
signature_as_vrst_hex,
signer_address,
)
if valid is True:
return signature_as_vrst_hex
ec_signature = _parse_signature_hex_as_vrs(signature)
if ec_signature["v"] in valid_v_param_values:
signature_as_vrst_hex = (
_convert_ec_signature_to_vrs_hex(ec_signature)
+ _Constants.SignatureType.ETH_SIGN.value.to_bytes(
1, byteorder="big"
).hex()
)
valid = is_valid_signature(
web3_instance.provider,
hash_hex,
signature_as_vrst_hex,
signer_address,
)
if valid is True:
return signature_as_vrst_hex
raise RuntimeError(
"Signature returned from web3 provider is in an unknown format. "
+ "Signature was: {signature}"
)
def sign_hash_to_bytes(
web3_or_provider: Union[Web3, BaseProvider],
signer_address: str,
hash_hex: str,
) -> bytes:
"""Sign a message with the given hash, and return the signature.
>>> provider = Web3.HTTPProvider("http://127.0.0.1:8545")
>>> sign_hash_to_bytes(
... provider,
... Web3(provider).geth.personal.listAccounts()[0],
... '0x34decbedc118904df65f379a175bb39ca18209d6ce41d5ed549d54e6e0a95004',
... ).decode(encoding='utf_8')
'1b117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d872871137feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b03'
""" # noqa: E501 (line too long)
return remove_0x_prefix(
HexStr(sign_hash(web3_or_provider, signer_address, hash_hex))
).encode(encoding="utf_8") | python-packages/order_utils/src/zero_ex/order_utils/__init__.py | r"""Order utilities for 0x applications.
Setup
-----
Install the package with pip::
pip install 0x-order-utils
Some methods require the caller to pass in a `Web3.BaseProvider`:code: object.
For local testing one may construct such a provider pointing at an instance of
`ganache-cli <https://www.npmjs.com/package/ganache-cli>`_ which has the 0x
contracts deployed on it. For convenience, a docker container is provided for
just this purpose. To start it::
docker run -d -p 8545:8545 0xorg/ganache-cli
"""
from enum import auto, Enum
import json
from typing import cast, Tuple, Union
from pkg_resources import resource_string
from mypy_extensions import TypedDict
from eth_typing import HexStr
from eth_utils import keccak, remove_0x_prefix, to_bytes, to_checksum_address
from web3 import Web3
import web3.exceptions
from web3.providers.base import BaseProvider
from web3.contract import Contract
from zero_ex.contract_addresses import chain_to_addresses, ChainId
import zero_ex.contract_artifacts
from zero_ex.contract_wrappers.exchange import Exchange
from zero_ex.contract_wrappers.exchange.types import Order
from zero_ex.contract_wrappers.order_conversions import order_to_jsdict
from zero_ex.dev_utils.type_assertions import (
assert_is_address,
assert_is_hex_string,
assert_is_provider,
)
from zero_ex.json_schemas import assert_valid
class _Constants:
"""Static data used by order utilities."""
null_address = "0x0000000000000000000000000000000000000000"
eip191_header = b"\x19\x01"
eip712_domain_separator_schema_hash = keccak(
b"EIP712Domain("
+ b"string name,"
+ b"string version,"
+ b"uint256 chainId,"
+ b"address verifyingContract"
+ b")"
)
eip712_domain_struct_header = (
eip712_domain_separator_schema_hash
+ keccak(b"0x Protocol")
+ keccak(b"3.0.0")
)
eip712_order_schema_hash = keccak(
b"Order("
+ b"address makerAddress,"
+ b"address takerAddress,"
+ b"address feeRecipientAddress,"
+ b"address senderAddress,"
+ b"uint256 makerAssetAmount,"
+ b"uint256 takerAssetAmount,"
+ b"uint256 makerFee,"
+ b"uint256 takerFee,"
+ b"uint256 expirationTimeSeconds,"
+ b"uint256 salt,"
+ b"bytes makerAssetData,"
+ b"bytes takerAssetData,"
+ b"bytes makerFeeAssetData,"
+ b"bytes takerFeeAssetData"
+ b")"
)
class SignatureType(Enum):
"""Enumeration of known signature types."""
ILLEGAL = 0
INVALID = auto()
EIP712 = auto()
ETH_SIGN = auto()
WALLET = auto()
VALIDATOR = auto()
PRE_SIGNED = auto()
N_SIGNATURE_TYPES = auto()
def generate_order_hash_hex(
order: Order, exchange_address: str, chain_id: int
) -> str:
"""Calculate the hash of the given order as a hexadecimal string.
:param order: The order to be hashed. Must conform to `the 0x order JSON schema <https://github.com/0xProject/0x-monorepo/blob/development/packages/json-schemas/schemas/order_schema.json>`_.
:param exchange_address: The address to which the 0x Exchange smart
contract has been deployed.
:returns: A string, of ASCII hex digits, representing the order hash.
Inputs and expected result below were copied from
@0x/order-utils/test/order_hash_test.ts
>>> generate_order_hash_hex(
... Order(
... makerAddress="0x0000000000000000000000000000000000000000",
... takerAddress="0x0000000000000000000000000000000000000000",
... feeRecipientAddress="0x0000000000000000000000000000000000000000",
... senderAddress="0x0000000000000000000000000000000000000000",
... makerAssetAmount="0",
... takerAssetAmount="0",
... makerFee="0",
... takerFee="0",
... expirationTimeSeconds="0",
... salt="0",
... makerAssetData=((0).to_bytes(1, byteorder='big') * 20),
... takerAssetData=((0).to_bytes(1, byteorder='big') * 20),
... makerFeeAssetData=((0).to_bytes(1, byteorder='big') * 20),
... takerFeeAssetData=((0).to_bytes(1, byteorder='big') * 20),
... ),
... exchange_address="0x1dc4c1cefef38a777b15aa20260a54e584b16c48",
... chain_id=1337
... )
'cb36e4fedb36508fb707e2c05e21bffc7a72766ccae93f8ff096693fff7f1714'
""" # noqa: E501 (line too long)
assert_is_address(exchange_address, "exchange_address")
assert_valid(
order_to_jsdict(order, chain_id, exchange_address), "/orderSchema"
)
def pad_20_bytes_to_32(twenty_bytes: bytes):
return bytes(12) + twenty_bytes
def int_to_32_big_endian_bytes(i: int):
return i.to_bytes(32, byteorder="big")
eip712_domain_struct_hash = keccak(
_Constants.eip712_domain_struct_header
+ int_to_32_big_endian_bytes(int(chain_id))
+ pad_20_bytes_to_32(to_bytes(hexstr=exchange_address))
)
def ensure_bytes(str_or_bytes: Union[str, bytes]) -> bytes:
return (
to_bytes(hexstr=cast(bytes, str_or_bytes))
if isinstance(str_or_bytes, str)
else str_or_bytes
)
eip712_order_struct_hash = keccak(
_Constants.eip712_order_schema_hash
+ pad_20_bytes_to_32(to_bytes(hexstr=order["makerAddress"]))
+ pad_20_bytes_to_32(to_bytes(hexstr=order["takerAddress"]))
+ pad_20_bytes_to_32(to_bytes(hexstr=order["feeRecipientAddress"]))
+ pad_20_bytes_to_32(to_bytes(hexstr=order["senderAddress"]))
+ int_to_32_big_endian_bytes(int(order["makerAssetAmount"]))
+ int_to_32_big_endian_bytes(int(order["takerAssetAmount"]))
+ int_to_32_big_endian_bytes(int(order["makerFee"]))
+ int_to_32_big_endian_bytes(int(order["takerFee"]))
+ int_to_32_big_endian_bytes(int(order["expirationTimeSeconds"]))
+ int_to_32_big_endian_bytes(int(order["salt"]))
+ keccak(ensure_bytes(order["makerAssetData"]))
+ keccak(ensure_bytes(order["takerAssetData"]))
+ keccak(ensure_bytes(order["makerFeeAssetData"]))
+ keccak(ensure_bytes(order["takerFeeAssetData"]))
)
return keccak(
_Constants.eip191_header
+ eip712_domain_struct_hash
+ eip712_order_struct_hash
).hex()
def is_valid_signature(
provider: BaseProvider, data: str, signature: str, signer_address: str
) -> bool:
"""Check the validity of the supplied signature.
Check if the supplied `signature`:code: corresponds to signing `data`:code:
with the private key corresponding to `signer_address`:code:.
:param provider: A Web3 provider able to access the 0x Exchange contract.
:param data: The hex encoded data signed by the supplied signature.
:param signature: The hex encoded signature.
:param signer_address: The hex encoded address that signed the data to
produce the supplied signature.
:returns: Tuple consisting of a boolean and a string. Boolean is true if
valid, false otherwise. If false, the string describes the reason.
>>> is_valid_signature(
... Web3.HTTPProvider("http://127.0.0.1:8545"),
... '0x6927e990021d23b1eb7b8789f6a6feaf98fe104bb0cf8259421b79f9a34222b0',
... '0x1B61a3ed31b43c8780e905a260a35faefcc527be7516aa11c0256729b5b351bc3340349190569279751135161d22529dc25add4f6069af05be04cacbda2ace225403',
... '0x5409ed021d9299bf6814279a6a1411a7e866a631',
... )
True
""" # noqa: E501 (line too long)
assert_is_provider(provider, "provider")
assert_is_hex_string(data, "data")
assert_is_hex_string(signature, "signature")
assert_is_address(signer_address, "signer_address")
return Exchange(
provider,
chain_to_addresses(
ChainId(
int(Web3(provider).eth.chainId) # pylint: disable=no-member
)
).exchange,
).is_valid_hash_signature.call(
bytes.fromhex(remove_0x_prefix(HexStr(data))),
to_checksum_address(signer_address),
bytes.fromhex(remove_0x_prefix(HexStr(signature))),
)
class ECSignature(TypedDict):
"""Object representation of an elliptic curve signature's parameters."""
v: int
r: str
s: str
def _parse_signature_hex_as_vrs(signature_hex: str) -> ECSignature:
"""Parse signature hex as a concatentation of EC parameters ordered V, R, S.
>>> _parse_signature_hex_as_vrs('0x1b117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d872871137feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b03')
{'v': 27, 'r': '117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d87287113', 's': '7feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b'}
""" # noqa: E501 (line too long)
signature: ECSignature = {
"v": int(signature_hex[2:4], 16),
"r": signature_hex[4:68],
"s": signature_hex[68:132],
}
if signature["v"] == 0 or signature["v"] == 1:
signature["v"] = signature["v"] + 27
return signature
def _parse_signature_hex_as_rsv(signature_hex: str) -> ECSignature:
"""Parse signature hex as a concatentation of EC parameters ordered R, S, V.
>>> _parse_signature_hex_as_rsv('0x117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d872871137feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b00')
{'r': '117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d87287113', 's': '7feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b', 'v': 27}
""" # noqa: E501 (line too long)
signature: ECSignature = {
"r": signature_hex[2:66],
"s": signature_hex[66:130],
"v": int(signature_hex[130:132], 16),
}
if signature["v"] == 0 or signature["v"] == 1:
signature["v"] = signature["v"] + 27
return signature
def _convert_ec_signature_to_vrs_hex(signature: ECSignature) -> str:
"""Convert elliptic curve signature object to hex hash string.
>>> _convert_ec_signature_to_vrs_hex(
... {
... 'r': '117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d87287113',
... 's': '7feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b',
... 'v': 27
... }
... )
'0x1b117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d872871137feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b'
""" # noqa: E501 (line too long)
return (
"0x"
+ signature["v"].to_bytes(1, byteorder="big").hex()
+ signature["r"]
+ signature["s"]
)
def sign_hash(
web3_or_provider: Union[Web3, BaseProvider],
signer_address: str,
hash_hex: str,
) -> str:
"""Sign a message with the given hash, and return the signature.
:param web3_or_provider: Either an instance of `web3.Web3`:code: or
`web3.providers.base.BaseProvider`:code:
:param signer_address: The address of the signing account.
:param hash_hex: A hex string representing the hash, like that returned
from `generate_order_hash_hex()`:code:.
:returns: A string, of ASCII hex digits, representing the signature.
>>> provider = Web3.HTTPProvider("http://127.0.0.1:8545")
>>> sign_hash(
... provider,
... Web3(provider).geth.personal.listAccounts()[0],
... '0x34decbedc118904df65f379a175bb39ca18209d6ce41d5ed549d54e6e0a95004',
... )
'0x1b117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d872871137feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b03'
""" # noqa: E501 (line too long)
web3_instance = None
if isinstance(web3_or_provider, BaseProvider):
web3_instance = Web3(web3_or_provider)
elif isinstance(web3_or_provider, Web3):
web3_instance = web3_or_provider
else:
raise TypeError(
"Expected parameter 'web3_or_provider' to be an instance of either"
+ " Web3 or BaseProvider"
)
assert_is_address(signer_address, "signer_address")
assert_is_hex_string(hash_hex, "hash_hex")
# false positive from pylint: disable=no-member
signature = web3_instance.eth.sign( # type: ignore
signer_address, hexstr=hash_hex.replace("0x", "")
).hex()
valid_v_param_values = [27, 28]
# HACK: There is no consensus on whether the signatureHex string should be
# formatted as v + r + s OR r + s + v, and different clients (even
# different versions of the same client) return the signature params in
# different orders. In order to support all client implementations, we
# parse the signature in both ways, and evaluate if either one is a valid
# signature. r + s + v is the most prevalent format from eth_sign, so we
# attempt this first.
ec_signature = _parse_signature_hex_as_rsv(signature)
if ec_signature["v"] in valid_v_param_values:
signature_as_vrst_hex = (
_convert_ec_signature_to_vrs_hex(ec_signature)
+ _Constants.SignatureType.ETH_SIGN.value.to_bytes(
1, byteorder="big"
).hex()
)
valid = is_valid_signature(
web3_instance.provider,
hash_hex,
signature_as_vrst_hex,
signer_address,
)
if valid is True:
return signature_as_vrst_hex
ec_signature = _parse_signature_hex_as_vrs(signature)
if ec_signature["v"] in valid_v_param_values:
signature_as_vrst_hex = (
_convert_ec_signature_to_vrs_hex(ec_signature)
+ _Constants.SignatureType.ETH_SIGN.value.to_bytes(
1, byteorder="big"
).hex()
)
valid = is_valid_signature(
web3_instance.provider,
hash_hex,
signature_as_vrst_hex,
signer_address,
)
if valid is True:
return signature_as_vrst_hex
raise RuntimeError(
"Signature returned from web3 provider is in an unknown format. "
+ "Signature was: {signature}"
)
def sign_hash_to_bytes(
web3_or_provider: Union[Web3, BaseProvider],
signer_address: str,
hash_hex: str,
) -> bytes:
"""Sign a message with the given hash, and return the signature.
>>> provider = Web3.HTTPProvider("http://127.0.0.1:8545")
>>> sign_hash_to_bytes(
... provider,
... Web3(provider).geth.personal.listAccounts()[0],
... '0x34decbedc118904df65f379a175bb39ca18209d6ce41d5ed549d54e6e0a95004',
... ).decode(encoding='utf_8')
'1b117902c86dfb95fe0d1badd983ee166ad259b27acb220174cbb4460d872871137feabdfe76e05924b484789f79af4ee7fa29ec006cedce1bbf369320d034e10b03'
""" # noqa: E501 (line too long)
return remove_0x_prefix(
HexStr(sign_hash(web3_or_provider, signer_address, hash_hex))
).encode(encoding="utf_8") | 0.901833 | 0.488039 |
__all__ = ['get_dataset_metainfo', 'get_train_data_source', 'get_val_data_source', 'get_test_data_source']
import tensorflow as tf
from .datasets.imagenet1k_cls_dataset import ImageNet1KMetaInfo
from .datasets.cub200_2011_cls_dataset import CUB200MetaInfo
from .datasets.cifar10_cls_dataset import CIFAR10MetaInfo
from .datasets.cifar100_cls_dataset import CIFAR100MetaInfo
from .datasets.svhn_cls_dataset import SVHNMetaInfo
from .datasets.voc_seg_dataset import VOCMetaInfo
from .datasets.ade20k_seg_dataset import ADE20KMetaInfo
from .datasets.cityscapes_seg_dataset import CityscapesMetaInfo
from .datasets.coco_seg_dataset import CocoSegMetaInfo
from .datasets.coco_hpe_dataset import CocoHpeMetaInfo
def get_dataset_metainfo(dataset_name):
"""
Get dataset metainfo by name of dataset.
Parameters
----------
dataset_name : str
Dataset name.
Returns
-------
DatasetMetaInfo
Dataset metainfo.
"""
dataset_metainfo_map = {
"ImageNet1K": ImageNet1KMetaInfo,
"CUB200_2011": CUB200MetaInfo,
"CIFAR10": CIFAR10MetaInfo,
"CIFAR100": CIFAR100MetaInfo,
"SVHN": SVHNMetaInfo,
"VOC": VOCMetaInfo,
"ADE20K": ADE20KMetaInfo,
"Cityscapes": CityscapesMetaInfo,
"CocoSeg": CocoSegMetaInfo,
"CocoHpe": CocoHpeMetaInfo,
}
if dataset_name in dataset_metainfo_map.keys():
return dataset_metainfo_map[dataset_name]()
else:
raise Exception("Unrecognized dataset: {}".format(dataset_name))
def get_train_data_source(ds_metainfo,
batch_size,
data_format="channels_last"):
"""
Get data source for training subset.
Parameters
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns
-------
DataLoader
Data source.
int
Dataset size.
"""
data_generator = ds_metainfo.train_transform(
ds_metainfo=ds_metainfo,
data_format=data_format)
generator = ds_metainfo.train_generator(
data_generator=data_generator,
ds_metainfo=ds_metainfo,
batch_size=batch_size)
return tf.data.Dataset.from_generator(
generator=lambda: generator,
output_types=(tf.float32, tf.float32)),\
generator.n
def get_val_data_source(ds_metainfo,
batch_size,
data_format="channels_last"):
"""
Get data source for validation subset.
Parameters
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns
-------
DataLoader
Data source.
int
Dataset size.
"""
data_generator = ds_metainfo.val_transform(
ds_metainfo=ds_metainfo,
data_format=data_format)
generator = ds_metainfo.val_generator(
data_generator=data_generator,
ds_metainfo=ds_metainfo,
batch_size=batch_size)
if hasattr(generator, "dataset"):
ds_metainfo.update_from_dataset(generator.dataset)
return tf.data.Dataset.from_generator(
generator=lambda: generator,
output_types=(tf.float32, tf.float32)),\
generator.n
def get_test_data_source(ds_metainfo,
batch_size,
data_format="channels_last"):
"""
Get data source for testing subset.
Parameters
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns
-------
DataLoader
Data source.
int
Dataset size.
"""
data_generator = ds_metainfo.test_transform(
ds_metainfo=ds_metainfo,
data_format=data_format)
generator = ds_metainfo.test_generator(
data_generator=data_generator,
ds_metainfo=ds_metainfo,
batch_size=batch_size)
if hasattr(generator, "dataset"):
ds_metainfo.update_from_dataset(generator.dataset)
return tf.data.Dataset.from_generator(
generator=lambda: generator,
output_types=(tf.float32, tf.float32)),\
generator.n | tensorflow2/dataset_utils.py | __all__ = ['get_dataset_metainfo', 'get_train_data_source', 'get_val_data_source', 'get_test_data_source']
import tensorflow as tf
from .datasets.imagenet1k_cls_dataset import ImageNet1KMetaInfo
from .datasets.cub200_2011_cls_dataset import CUB200MetaInfo
from .datasets.cifar10_cls_dataset import CIFAR10MetaInfo
from .datasets.cifar100_cls_dataset import CIFAR100MetaInfo
from .datasets.svhn_cls_dataset import SVHNMetaInfo
from .datasets.voc_seg_dataset import VOCMetaInfo
from .datasets.ade20k_seg_dataset import ADE20KMetaInfo
from .datasets.cityscapes_seg_dataset import CityscapesMetaInfo
from .datasets.coco_seg_dataset import CocoSegMetaInfo
from .datasets.coco_hpe_dataset import CocoHpeMetaInfo
def get_dataset_metainfo(dataset_name):
"""
Get dataset metainfo by name of dataset.
Parameters
----------
dataset_name : str
Dataset name.
Returns
-------
DatasetMetaInfo
Dataset metainfo.
"""
dataset_metainfo_map = {
"ImageNet1K": ImageNet1KMetaInfo,
"CUB200_2011": CUB200MetaInfo,
"CIFAR10": CIFAR10MetaInfo,
"CIFAR100": CIFAR100MetaInfo,
"SVHN": SVHNMetaInfo,
"VOC": VOCMetaInfo,
"ADE20K": ADE20KMetaInfo,
"Cityscapes": CityscapesMetaInfo,
"CocoSeg": CocoSegMetaInfo,
"CocoHpe": CocoHpeMetaInfo,
}
if dataset_name in dataset_metainfo_map.keys():
return dataset_metainfo_map[dataset_name]()
else:
raise Exception("Unrecognized dataset: {}".format(dataset_name))
def get_train_data_source(ds_metainfo,
batch_size,
data_format="channels_last"):
"""
Get data source for training subset.
Parameters
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns
-------
DataLoader
Data source.
int
Dataset size.
"""
data_generator = ds_metainfo.train_transform(
ds_metainfo=ds_metainfo,
data_format=data_format)
generator = ds_metainfo.train_generator(
data_generator=data_generator,
ds_metainfo=ds_metainfo,
batch_size=batch_size)
return tf.data.Dataset.from_generator(
generator=lambda: generator,
output_types=(tf.float32, tf.float32)),\
generator.n
def get_val_data_source(ds_metainfo,
batch_size,
data_format="channels_last"):
"""
Get data source for validation subset.
Parameters
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns
-------
DataLoader
Data source.
int
Dataset size.
"""
data_generator = ds_metainfo.val_transform(
ds_metainfo=ds_metainfo,
data_format=data_format)
generator = ds_metainfo.val_generator(
data_generator=data_generator,
ds_metainfo=ds_metainfo,
batch_size=batch_size)
if hasattr(generator, "dataset"):
ds_metainfo.update_from_dataset(generator.dataset)
return tf.data.Dataset.from_generator(
generator=lambda: generator,
output_types=(tf.float32, tf.float32)),\
generator.n
def get_test_data_source(ds_metainfo,
batch_size,
data_format="channels_last"):
"""
Get data source for testing subset.
Parameters
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns
-------
DataLoader
Data source.
int
Dataset size.
"""
data_generator = ds_metainfo.test_transform(
ds_metainfo=ds_metainfo,
data_format=data_format)
generator = ds_metainfo.test_generator(
data_generator=data_generator,
ds_metainfo=ds_metainfo,
batch_size=batch_size)
if hasattr(generator, "dataset"):
ds_metainfo.update_from_dataset(generator.dataset)
return tf.data.Dataset.from_generator(
generator=lambda: generator,
output_types=(tf.float32, tf.float32)),\
generator.n | 0.902069 | 0.284154 |
r"""
This submodule provides utility constants and functions for working with star data in GIANT.
Most of these utilities are focused on conversions of either units or of representations (ie a bearing to a unit
vector) with the exception of applying proper motion and computing the distance between 2 bearings. For more details on
what is contained refer to the following summary tables and the documentation for each constant/function.
"""
import datetime
from typing import Tuple, Union
import numpy as np
import pandas as pd
from giant._typing import SCALAR_OR_ARRAY, ARRAY_LIKE, Real
__all__ = ['DEG2RAD', 'RAD2DEG', 'DEG2MAS', 'MAS2DEG', 'RAD2MAS', 'MAS2RAD', 'PARSEC2KM', 'STAR_DIST',
'SI_DAYS_PER_YEAR', 'SI_SECONDS_PER_DAY', 'MJD_EPOCH', 'radec_to_unit', 'unit_to_radec',
'timedelta_to_si_years', 'datetime_to_mjd_years', 'apply_proper_motion', 'radec_distance']
"""
Things to import if someone wants to do from giant.catalogues.utilities import *
"""
# CONSTANTS.
DEG2RAD: float = np.pi / 180 # rad/deg
r"""
This constant converts from units of degrees to units of radians through multiplication.
That is ``angle_rad = angle_deg*DEG2RAD`` where ``angle_rad`` is the angle in radians and ``angle_deg`` is the angle in
degrees.
Mathematically this is :math:`\frac{\pi}{180}`.
"""
RAD2DEG: float = 180 / np.pi # deg/rad
r"""
This constant converts from units of radians to units of degrees through multiplication.
That is ``angle_deg = angle_rad*RAD2DEG`` where ``angle_rad`` is the angle in radians and ``angle_deg`` is the angle in
degrees.
Mathematically this is :math:`\frac{180}{\pi}`.
"""
DEG2MAS: float = 3600 * 1000 # mas/deg
r"""
This constant converts from units of degrees to units of milli-arc-seconds through multiplication.
That is ``angle_mas = angle_deg*DEG2MAS`` where ``angle_mas`` is the angle in milli-arc-seconds and ``angle_deg`` is the
angle in degrees.
Mathematically this is :math:`3600000`.
"""
MAS2DEG: float = 1 / DEG2MAS # deg/mas
r"""
This constant converts from units of milli-arc-seconds to units of degrees through multiplication.
That is ``angle_deg = angle_mas*MAS2DEG`` where ``angle_mas`` is the angle in milli-arc-seconds and ``angle_deg`` is the
angle in degrees.
Mathematically this is :math:`\frac{1}{3600000}`.
"""
RAD2MAS: float = RAD2DEG * DEG2MAS
r"""
This constant converts from units of radians to units of milli-arc-seconds through multiplication.
That is ``angle_mas = angle_rad*RAD2MAS`` where ``angle_mas`` is the angle in milli-arc-seconds and ``angle_rad`` is the
angle in radians.
Mathematically this is :math:`\frac{180}{3600000\pi}`.
"""
MAS2RAD: float = 1 / RAD2MAS
r"""
This constant converts from units of milli-arc-seconds to units of radians through multiplication.
That is ``angle_rad = angle_mas*MAS2RAD`` where ``angle_mas`` is the angle in milli-arc-seconds and ``angle_rad`` is the
angle in radians.
Mathematically this is :math:`\frac{3600000\pi}{180}`.
"""
PARSEC2KM: float = 30856775814913.673 # km
r"""
This constant converts from units of parsecs to units of kilometers through multiplication.
That is ``distance_km = distance_parsec*PARSEC2KM`` where ``distance_km`` is the distance in kilometers and
``distance_parsec`` is the distance in parsecs.
Mathematically this is :math:`\frac{3600000\pi}{180}`.
"""
STAR_DIST: float = 5.428047027e15 # km
"""
The average distance of stars from the UCAC4 catalogue in kilometers.
This value is used to set the distance for stars for which there is no distance information available.
"""
SI_DAYS_PER_YEAR: float = 365.25 # days
"""
This constant provides the number of SI days in a SI year.
"""
SI_SECONDS_PER_DAY: int = 86400 # seconds
"""
This constant provides the number of SI seconds in a SI day.
"""
MJD_EPOCH: datetime.datetime = datetime.datetime(1858, 11, 17) # November 17, 1858
"""
This constant provides the standard modified Julian date epoch, November 17, 1858, as a datetime
"""
# UTILITY FUNCTIONS
def radec_to_unit(ra: SCALAR_OR_ARRAY, dec: SCALAR_OR_ARRAY) -> np.ndarray:
r"""
This utility converts (a) right ascension and declination pair(s) expressed in units of radians into (an) unit
vector(s).
The conversion to a unit vector is given by:
.. math::
\hat{\mathbf{x}}=\left[\begin{array}{c}\text{cos}(\delta)\text{cos}(\alpha)\\
\text{cos}(\delta)\text{sin}(\alpha)\\
\text{sin}(\delta)\end{array}\right]
where :math:`\alpha` is the right ascension, :math:`\delta` is the declination, and :math:`\hat{\mathbf{x}}` is
the resulting unit vector.
This method is vectorized, therefore multiple unit vectors can be created from multiple ra, dec pairs at the
same time. When multiple conversions are performed, each unit vector is specified as a column in the array.
This function performs broadcast rules using numpy conventions, therefore you can provide inputs with different
shapes, so long as they are able to be broadcast (you could add them together and numpy wouldn't complain). If you
provide >1D arrays then they will be raveled using c convention.
:param ra: The right ascension(s) to convert to unit vector(s) in units of radians
:param dec: The declination(s) to convert to unit vector(s) in units of radians
:return: A 3xn array of unit vectors corresponding to the right ascension, declination pair(s)
"""
# make sure the arrays are the same length
ra, dec = np.broadcast_arrays(ra, dec)
ra = ra.ravel() # ensure our arrays are flat
dec = dec.ravel() # ensure our arrays are flat
unit_x = np.cos(dec) * np.cos(ra)
unit_y = np.cos(dec) * np.sin(ra)
unit_z = np.sin(dec)
return np.vstack([unit_x, unit_y, unit_z]).squeeze()
def unit_to_radec(unit: ARRAY_LIKE) -> Tuple[SCALAR_OR_ARRAY, SCALAR_OR_ARRAY]:
r"""
This function converts a unit vector(s) into a right ascension/declination bearing(s).
The right ascension is defined as the angle between the x axis and the projection of the unit vector onto the
xy-plane and is output between 0 and pi. The declination is defined as the angle between the xy-plane and the
unit vector and is output between -pi/2 and pi/2 (positive values indicate the vector has a positive z component and
negative values indicate the vector has a negative z component. These are computed using
.. math::
dec = \text{sin}^{-1}(z) \\
ra = \text{tan}^{-1}\left(\frac{y}{x}\right)
Note that the vector input should be along the first axis (or as columns if there are multiple vectors), and that
the vector(s) needs to be of unit length or the results from this function will be invalid.
If the input contains more than 1 vector, then the output will be 2 arrays. Otherwise, if it is a single vector,
the output will be 2 floats as a tuple.
:param unit: The unit vector to be converted to a ra/dec bearing
:return: The right ascension(s) and declination(s) as a tuple in units of radians
"""
if np.shape(unit)[0] != 3:
raise ValueError('The length of the first axis must be 3')
dec = np.arcsin(unit[2])
ra = np.arctan2(unit[1], unit[0])
ra_check = ra < 0
if np.ndim(ra):
ra[ra_check] += 2 * np.pi
elif ra_check:
ra += 2 * np.pi
return ra, dec
def timedelta_to_si_years(delta: datetime.timedelta) -> float:
"""
This function converts a python timedelta object to a fractional number of SI years.
The timedelta object is first converted to seconds using method ``total_seconds``, and this is then converted to SI
years.
:param delta: The python timedelta object to be converted to fractional SI years
:return: The length of time covered by the time delta in units of fractional SI years
"""
return delta.total_seconds() / SI_SECONDS_PER_DAY / SI_DAYS_PER_YEAR
def datetime_to_mjd_years(date: datetime) -> float:
"""
This function converts a python datetime objects to the number of SI years since the MJD Epoch of November 17, 1858.
This is computed by computing the time delta between the :attr:`.MJD_EPOCH` and the input datetime object, and then
using :func:`.timedelta_to_si_years` to convert to the fractional years since the epoch.
:param date: the python datetime object to be converted to MJD years
:return: the number of SI years since November 17, 1858
"""
return timedelta_to_si_years(date - MJD_EPOCH)
def apply_proper_motion(star_records: pd.DataFrame, new_time: Union[Real, datetime.datetime],
copy: bool = True) -> pd.DataFrame:
"""
This function adjusts the right ascension and declination of stars to a new time.
The right ascension and declination are updated using the corresponding proper motion of the stars. The formulation
used here assumes constant linear velocity as described in section 1.2.8 of "The Hipparcos and Tycho2 Catalogues".
The bearing measurement is converted to a unit vector, which is then updated using vector addition with the delta
applied along the vectors of increasing right ascension and increasing declination. This model also allows for
consideration of a radial velocity, but that is currently not implemented.
The stars input into this method should be a pandas dataframe with the GIANT format. Specifically, this function
requires the dataframe to have columns of ``['ra', 'dec', 'ra_proper_motion', 'dec_proper_motion', 'epoch']`` with
units of degrees, degrees/year, and SI years (since January 1, 1) respectively. The updated bearing can be stored
either in a copy of the dataframe, or in-place, depending on the ``copy`` key word argument. Either way the
resulting dataframe is returned.
The ``new_time`` parameter should either be a datetime object, or a float of the modified julian years for the
desired time. The ``copy`` flag states whether to return a copy of the dataframe with the updates applied
(recommended), or to make the updates in place.
:param star_records: a pandas dataframe containing the bearing and proper motion of star records to be updated
:param new_time: the new epoch to calculate the star positions at expressed as a mjy float or python datetime object
:param copy: An option flag indicating whether to make a copy of star_records before applying proper motion
:return: a pandas dataframe containing the star records with bearing values updated to the new epoch
"""
if copy:
out = star_records.copy()
else:
out = star_records
# convert the ra and dec values into radians
ra0 = out['ra'].values * DEG2RAD # type: np.ndarray
dec0 = out['dec'].values * DEG2RAD # type: np.ndarray
# assume linear motion based on Hipparcos Vol 1
# compute the unit vector for each star
r_unit = radec_to_unit(ra0, dec0).reshape(3, -1)
# compute the unit vector in the direction of increasing right ascension
p_unit = np.vstack([-np.sin(ra0), np.cos(ra0), np.zeros(ra0.shape)])
# compute the unit vector in the direction of increasing declination
q_unit = np.vstack([-np.sin(dec0) * np.cos(ra0), -np.sin(dec0) * np.sin(ra0), np.cos(dec0)])
# compute the change in distance per year (future expansion)
zeta0 = 0
start_time = out['epoch'].values
# compute the time delta
if isinstance(new_time, datetime.datetime):
new_time = timedelta_to_si_years(new_time - datetime.datetime(1, 1, 1))
timedelta = new_time - start_time
# compute the new direction vector
r_unit_new = r_unit * (1 + zeta0 * timedelta) + (p_unit * out['ra_proper_motion'].values * DEG2RAD +
q_unit * out['dec_proper_motion'].values * DEG2RAD) * timedelta
r_unit_new /= np.linalg.norm(r_unit_new, axis=0, keepdims=True)
# compute the new ra/dec and store it
ra, dec = unit_to_radec(r_unit_new)
out['ra'] = ra * RAD2DEG
out['dec'] = dec * RAD2DEG
# compute the new uncertainties for ra and dec
out['ra_sigma'] = np.sqrt(out['ra_sigma'] ** 2 + timedelta ** 2 * out['ra_pm_sigma'] ** 2)
out['dec_sigma'] = np.sqrt(out['dec_sigma'] ** 2 + timedelta ** 2 * out['dec_pm_sigma'] ** 2)
# update the epochs
out["epoch"] = new_time
return out
def radec_distance(ra1: SCALAR_OR_ARRAY, dec1: SCALAR_OR_ARRAY,
ra2: SCALAR_OR_ARRAY, dec2: SCALAR_OR_ARRAY) -> SCALAR_OR_ARRAY:
r"""
This function computes the great-circle angular distance in units of radians between ra/dec pairs.
The distance is computed using
.. math::
\text{cos}^{-1}\left(\text{cos}(\delta_1)\text{cos}(\delta_2)\text{cos}(\alpha_1-\alpha_2) +
\text{sin}(\delta_1)\text{sin}(\delta_2)\right)
where :math:`\delta_1` is the first declination, :math:`\delta_2` is the second declination, :math:`\alpha_1` is the
first right ascension, and :math:`\alpha_2` is the second right ascension, all in radians.
This function is vectorized and uses broadcasting rules, therefore you can specify the inputs as mixtures of scalars
and arrays, as long as they can all be broadcast to a common shape. The output will be either a scalar (if all
scalars are input) or an array if any of the inputs are an array.
:param ra1: The right ascension values for the first parts of the pairs with units of radians
:param dec1: The declination values for the first parts of the pairs with units of radians
:param ra2: The right ascension values for the second parts of the pairs with units of radians
:param dec2: The declination values for the second parts of the pairs with units of radians
:return: The great circle angular distance between the points with units of radians
"""
return np.arccos(np.cos(dec1) * np.cos(dec2) * np.cos(ra1 - ra2) + np.sin(dec1) * np.sin(dec2)) | giant/catalogues/utilities.py |
r"""
This submodule provides utility constants and functions for working with star data in GIANT.
Most of these utilities are focused on conversions of either units or of representations (ie a bearing to a unit
vector) with the exception of applying proper motion and computing the distance between 2 bearings. For more details on
what is contained refer to the following summary tables and the documentation for each constant/function.
"""
import datetime
from typing import Tuple, Union
import numpy as np
import pandas as pd
from giant._typing import SCALAR_OR_ARRAY, ARRAY_LIKE, Real
__all__ = ['DEG2RAD', 'RAD2DEG', 'DEG2MAS', 'MAS2DEG', 'RAD2MAS', 'MAS2RAD', 'PARSEC2KM', 'STAR_DIST',
'SI_DAYS_PER_YEAR', 'SI_SECONDS_PER_DAY', 'MJD_EPOCH', 'radec_to_unit', 'unit_to_radec',
'timedelta_to_si_years', 'datetime_to_mjd_years', 'apply_proper_motion', 'radec_distance']
"""
Things to import if someone wants to do from giant.catalogues.utilities import *
"""
# CONSTANTS.
DEG2RAD: float = np.pi / 180 # rad/deg
r"""
This constant converts from units of degrees to units of radians through multiplication.
That is ``angle_rad = angle_deg*DEG2RAD`` where ``angle_rad`` is the angle in radians and ``angle_deg`` is the angle in
degrees.
Mathematically this is :math:`\frac{\pi}{180}`.
"""
RAD2DEG: float = 180 / np.pi # deg/rad
r"""
This constant converts from units of radians to units of degrees through multiplication.
That is ``angle_deg = angle_rad*RAD2DEG`` where ``angle_rad`` is the angle in radians and ``angle_deg`` is the angle in
degrees.
Mathematically this is :math:`\frac{180}{\pi}`.
"""
DEG2MAS: float = 3600 * 1000 # mas/deg
r"""
This constant converts from units of degrees to units of milli-arc-seconds through multiplication.
That is ``angle_mas = angle_deg*DEG2MAS`` where ``angle_mas`` is the angle in milli-arc-seconds and ``angle_deg`` is the
angle in degrees.
Mathematically this is :math:`3600000`.
"""
MAS2DEG: float = 1 / DEG2MAS # deg/mas
r"""
This constant converts from units of milli-arc-seconds to units of degrees through multiplication.
That is ``angle_deg = angle_mas*MAS2DEG`` where ``angle_mas`` is the angle in milli-arc-seconds and ``angle_deg`` is the
angle in degrees.
Mathematically this is :math:`\frac{1}{3600000}`.
"""
RAD2MAS: float = RAD2DEG * DEG2MAS
r"""
This constant converts from units of radians to units of milli-arc-seconds through multiplication.
That is ``angle_mas = angle_rad*RAD2MAS`` where ``angle_mas`` is the angle in milli-arc-seconds and ``angle_rad`` is the
angle in radians.
Mathematically this is :math:`\frac{180}{3600000\pi}`.
"""
MAS2RAD: float = 1 / RAD2MAS
r"""
This constant converts from units of milli-arc-seconds to units of radians through multiplication.
That is ``angle_rad = angle_mas*MAS2RAD`` where ``angle_mas`` is the angle in milli-arc-seconds and ``angle_rad`` is the
angle in radians.
Mathematically this is :math:`\frac{3600000\pi}{180}`.
"""
PARSEC2KM: float = 30856775814913.673 # km
r"""
This constant converts from units of parsecs to units of kilometers through multiplication.
That is ``distance_km = distance_parsec*PARSEC2KM`` where ``distance_km`` is the distance in kilometers and
``distance_parsec`` is the distance in parsecs.
Mathematically this is :math:`\frac{3600000\pi}{180}`.
"""
STAR_DIST: float = 5.428047027e15 # km
"""
The average distance of stars from the UCAC4 catalogue in kilometers.
This value is used to set the distance for stars for which there is no distance information available.
"""
SI_DAYS_PER_YEAR: float = 365.25 # days
"""
This constant provides the number of SI days in a SI year.
"""
SI_SECONDS_PER_DAY: int = 86400 # seconds
"""
This constant provides the number of SI seconds in a SI day.
"""
MJD_EPOCH: datetime.datetime = datetime.datetime(1858, 11, 17) # November 17, 1858
"""
This constant provides the standard modified Julian date epoch, November 17, 1858, as a datetime
"""
# UTILITY FUNCTIONS
def radec_to_unit(ra: SCALAR_OR_ARRAY, dec: SCALAR_OR_ARRAY) -> np.ndarray:
r"""
This utility converts (a) right ascension and declination pair(s) expressed in units of radians into (an) unit
vector(s).
The conversion to a unit vector is given by:
.. math::
\hat{\mathbf{x}}=\left[\begin{array}{c}\text{cos}(\delta)\text{cos}(\alpha)\\
\text{cos}(\delta)\text{sin}(\alpha)\\
\text{sin}(\delta)\end{array}\right]
where :math:`\alpha` is the right ascension, :math:`\delta` is the declination, and :math:`\hat{\mathbf{x}}` is
the resulting unit vector.
This method is vectorized, therefore multiple unit vectors can be created from multiple ra, dec pairs at the
same time. When multiple conversions are performed, each unit vector is specified as a column in the array.
This function performs broadcast rules using numpy conventions, therefore you can provide inputs with different
shapes, so long as they are able to be broadcast (you could add them together and numpy wouldn't complain). If you
provide >1D arrays then they will be raveled using c convention.
:param ra: The right ascension(s) to convert to unit vector(s) in units of radians
:param dec: The declination(s) to convert to unit vector(s) in units of radians
:return: A 3xn array of unit vectors corresponding to the right ascension, declination pair(s)
"""
# make sure the arrays are the same length
ra, dec = np.broadcast_arrays(ra, dec)
ra = ra.ravel() # ensure our arrays are flat
dec = dec.ravel() # ensure our arrays are flat
unit_x = np.cos(dec) * np.cos(ra)
unit_y = np.cos(dec) * np.sin(ra)
unit_z = np.sin(dec)
return np.vstack([unit_x, unit_y, unit_z]).squeeze()
def unit_to_radec(unit: ARRAY_LIKE) -> Tuple[SCALAR_OR_ARRAY, SCALAR_OR_ARRAY]:
r"""
This function converts a unit vector(s) into a right ascension/declination bearing(s).
The right ascension is defined as the angle between the x axis and the projection of the unit vector onto the
xy-plane and is output between 0 and pi. The declination is defined as the angle between the xy-plane and the
unit vector and is output between -pi/2 and pi/2 (positive values indicate the vector has a positive z component and
negative values indicate the vector has a negative z component. These are computed using
.. math::
dec = \text{sin}^{-1}(z) \\
ra = \text{tan}^{-1}\left(\frac{y}{x}\right)
Note that the vector input should be along the first axis (or as columns if there are multiple vectors), and that
the vector(s) needs to be of unit length or the results from this function will be invalid.
If the input contains more than 1 vector, then the output will be 2 arrays. Otherwise, if it is a single vector,
the output will be 2 floats as a tuple.
:param unit: The unit vector to be converted to a ra/dec bearing
:return: The right ascension(s) and declination(s) as a tuple in units of radians
"""
if np.shape(unit)[0] != 3:
raise ValueError('The length of the first axis must be 3')
dec = np.arcsin(unit[2])
ra = np.arctan2(unit[1], unit[0])
ra_check = ra < 0
if np.ndim(ra):
ra[ra_check] += 2 * np.pi
elif ra_check:
ra += 2 * np.pi
return ra, dec
def timedelta_to_si_years(delta: datetime.timedelta) -> float:
"""
This function converts a python timedelta object to a fractional number of SI years.
The timedelta object is first converted to seconds using method ``total_seconds``, and this is then converted to SI
years.
:param delta: The python timedelta object to be converted to fractional SI years
:return: The length of time covered by the time delta in units of fractional SI years
"""
return delta.total_seconds() / SI_SECONDS_PER_DAY / SI_DAYS_PER_YEAR
def datetime_to_mjd_years(date: datetime) -> float:
"""
This function converts a python datetime objects to the number of SI years since the MJD Epoch of November 17, 1858.
This is computed by computing the time delta between the :attr:`.MJD_EPOCH` and the input datetime object, and then
using :func:`.timedelta_to_si_years` to convert to the fractional years since the epoch.
:param date: the python datetime object to be converted to MJD years
:return: the number of SI years since November 17, 1858
"""
return timedelta_to_si_years(date - MJD_EPOCH)
def apply_proper_motion(star_records: pd.DataFrame, new_time: Union[Real, datetime.datetime],
copy: bool = True) -> pd.DataFrame:
"""
This function adjusts the right ascension and declination of stars to a new time.
The right ascension and declination are updated using the corresponding proper motion of the stars. The formulation
used here assumes constant linear velocity as described in section 1.2.8 of "The Hipparcos and Tycho2 Catalogues".
The bearing measurement is converted to a unit vector, which is then updated using vector addition with the delta
applied along the vectors of increasing right ascension and increasing declination. This model also allows for
consideration of a radial velocity, but that is currently not implemented.
The stars input into this method should be a pandas dataframe with the GIANT format. Specifically, this function
requires the dataframe to have columns of ``['ra', 'dec', 'ra_proper_motion', 'dec_proper_motion', 'epoch']`` with
units of degrees, degrees/year, and SI years (since January 1, 1) respectively. The updated bearing can be stored
either in a copy of the dataframe, or in-place, depending on the ``copy`` key word argument. Either way the
resulting dataframe is returned.
The ``new_time`` parameter should either be a datetime object, or a float of the modified julian years for the
desired time. The ``copy`` flag states whether to return a copy of the dataframe with the updates applied
(recommended), or to make the updates in place.
:param star_records: a pandas dataframe containing the bearing and proper motion of star records to be updated
:param new_time: the new epoch to calculate the star positions at expressed as a mjy float or python datetime object
:param copy: An option flag indicating whether to make a copy of star_records before applying proper motion
:return: a pandas dataframe containing the star records with bearing values updated to the new epoch
"""
if copy:
out = star_records.copy()
else:
out = star_records
# convert the ra and dec values into radians
ra0 = out['ra'].values * DEG2RAD # type: np.ndarray
dec0 = out['dec'].values * DEG2RAD # type: np.ndarray
# assume linear motion based on Hipparcos Vol 1
# compute the unit vector for each star
r_unit = radec_to_unit(ra0, dec0).reshape(3, -1)
# compute the unit vector in the direction of increasing right ascension
p_unit = np.vstack([-np.sin(ra0), np.cos(ra0), np.zeros(ra0.shape)])
# compute the unit vector in the direction of increasing declination
q_unit = np.vstack([-np.sin(dec0) * np.cos(ra0), -np.sin(dec0) * np.sin(ra0), np.cos(dec0)])
# compute the change in distance per year (future expansion)
zeta0 = 0
start_time = out['epoch'].values
# compute the time delta
if isinstance(new_time, datetime.datetime):
new_time = timedelta_to_si_years(new_time - datetime.datetime(1, 1, 1))
timedelta = new_time - start_time
# compute the new direction vector
r_unit_new = r_unit * (1 + zeta0 * timedelta) + (p_unit * out['ra_proper_motion'].values * DEG2RAD +
q_unit * out['dec_proper_motion'].values * DEG2RAD) * timedelta
r_unit_new /= np.linalg.norm(r_unit_new, axis=0, keepdims=True)
# compute the new ra/dec and store it
ra, dec = unit_to_radec(r_unit_new)
out['ra'] = ra * RAD2DEG
out['dec'] = dec * RAD2DEG
# compute the new uncertainties for ra and dec
out['ra_sigma'] = np.sqrt(out['ra_sigma'] ** 2 + timedelta ** 2 * out['ra_pm_sigma'] ** 2)
out['dec_sigma'] = np.sqrt(out['dec_sigma'] ** 2 + timedelta ** 2 * out['dec_pm_sigma'] ** 2)
# update the epochs
out["epoch"] = new_time
return out
def radec_distance(ra1: SCALAR_OR_ARRAY, dec1: SCALAR_OR_ARRAY,
ra2: SCALAR_OR_ARRAY, dec2: SCALAR_OR_ARRAY) -> SCALAR_OR_ARRAY:
r"""
This function computes the great-circle angular distance in units of radians between ra/dec pairs.
The distance is computed using
.. math::
\text{cos}^{-1}\left(\text{cos}(\delta_1)\text{cos}(\delta_2)\text{cos}(\alpha_1-\alpha_2) +
\text{sin}(\delta_1)\text{sin}(\delta_2)\right)
where :math:`\delta_1` is the first declination, :math:`\delta_2` is the second declination, :math:`\alpha_1` is the
first right ascension, and :math:`\alpha_2` is the second right ascension, all in radians.
This function is vectorized and uses broadcasting rules, therefore you can specify the inputs as mixtures of scalars
and arrays, as long as they can all be broadcast to a common shape. The output will be either a scalar (if all
scalars are input) or an array if any of the inputs are an array.
:param ra1: The right ascension values for the first parts of the pairs with units of radians
:param dec1: The declination values for the first parts of the pairs with units of radians
:param ra2: The right ascension values for the second parts of the pairs with units of radians
:param dec2: The declination values for the second parts of the pairs with units of radians
:return: The great circle angular distance between the points with units of radians
"""
return np.arccos(np.cos(dec1) * np.cos(dec2) * np.cos(ra1 - ra2) + np.sin(dec1) * np.sin(dec2)) | 0.947113 | 0.860955 |
from typing import List, Tuple, Union
import cv2
import numpy as np
from scipy import optimize
from color_tracker.utils.tracker_object import TrackedObject
def crop_out_polygon_convex(image: np.ndarray, point_array: np.ndarray) -> np.ndarray:
"""
Crops out a convex polygon given from a list of points from an image
:param image: Opencv BGR image
:param point_array: list of points that defines a convex polygon
:return: Cropped out image
"""
point_array = np.reshape(cv2.convexHull(point_array), point_array.shape)
mask = np.zeros(image.shape, dtype=np.uint8)
roi_corners = np.array([point_array], dtype=np.int32)
ignore_mask_color = (255, 255, 255)
cv2.fillConvexPoly(mask, roi_corners, ignore_mask_color)
masked_image = cv2.bitwise_and(image, mask)
return masked_image
def resize_img(image: np.ndarray, min_width: int, min_height: int) -> np.ndarray:
"""
Resize the image with keeping the aspect ratio.
:param image: image
:param min_width: minimum width of the image
:param min_height: minimum height of the image
:return: resized image
"""
h, w = image.shape[:2]
new_w = w
new_h = h
if w > min_width:
new_w = min_width
new_h = int(h * (float(new_w) / w))
h, w = (new_h, new_w)
if h > min_height:
new_h = min_height
new_w = int(w * (float(new_h) / h))
return cv2.resize(image, (new_w, new_h))
def sort_contours_by_area(contours: np.ndarray, descending: bool = True) -> np.ndarray:
if len(contours) > 0:
contours = sorted(contours, key=cv2.contourArea, reverse=descending)
return contours
def filter_contours_by_area(contours: np.ndarray, min_area: float = 0, max_area: float = np.inf) -> np.ndarray:
if len(contours) == 0:
return np.array([])
def _keep_contour(c):
area = cv2.contourArea(c)
if area <= min_area:
return False
if area >= max_area:
return False
return True
return np.array(list(filter(_keep_contour, contours)))
def get_contour_centers(contours: np.ndarray) -> np.ndarray:
"""
Calculate the centers of the contours
:param contours: Contours detected with find_contours
:return: object centers as numpy array
"""
if len(contours) == 0:
return np.array([])
# ((x, y), radius) = cv2.minEnclosingCircle(c)
centers = np.zeros((len(contours), 2), dtype=np.int16)
for i, c in enumerate(contours):
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
centers[i] = center
return centers
def find_object_contours(image: np.ndarray, hsv_lower_value: Union[Tuple[int], List[int]],
hsv_upper_value: Union[Tuple[int], List[int]], kernel: np.ndarray):
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, tuple(hsv_lower_value), tuple(hsv_upper_value))
if kernel is not None:
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=1)
return cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
def get_bbox_for_contours(contours: np.ndarray) -> np.ndarray:
bboxes = []
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
bboxes.append([x, y, x + w, y + h])
return np.array(bboxes)
def calculate_distance_mtx(tracked_objects: List[TrackedObject], points: np.ndarray) -> np.ndarray:
# (nb_tracked_objects, nb_current_detected_points)
cost_mtx = np.zeros((len(tracked_objects), len(points)))
for i, tracked_obj in enumerate(tracked_objects):
for j, point in enumerate(points):
diff = tracked_obj.last_point - point
distance = np.sqrt(diff[0] ** 2 + diff[1] ** 2)
cost_mtx[i][j] = distance
return cost_mtx
def solve_assignment(cost_mtx: np.ndarray) -> List[int]:
nb_tracked_objects, nb_detected_obj_centers = cost_mtx.shape
assignment = [-1] * nb_tracked_objects
row_index, column_index = optimize.linear_sum_assignment(cost_mtx)
for i in range(len(row_index)):
assignment[row_index[i]] = column_index[i]
return assignment
def remove_object_if_too_many_frames_skipped(tracked_objects: List[TrackedObject], assignment: List[int],
max_skipped_frames: int):
for i, tracked_obj in enumerate(tracked_objects):
if tracked_obj.skipped_frames > max_skipped_frames:
del tracked_objects[i]
del assignment[i] | color_tracker/utils/helpers.py | from typing import List, Tuple, Union
import cv2
import numpy as np
from scipy import optimize
from color_tracker.utils.tracker_object import TrackedObject
def crop_out_polygon_convex(image: np.ndarray, point_array: np.ndarray) -> np.ndarray:
"""
Crops out a convex polygon given from a list of points from an image
:param image: Opencv BGR image
:param point_array: list of points that defines a convex polygon
:return: Cropped out image
"""
point_array = np.reshape(cv2.convexHull(point_array), point_array.shape)
mask = np.zeros(image.shape, dtype=np.uint8)
roi_corners = np.array([point_array], dtype=np.int32)
ignore_mask_color = (255, 255, 255)
cv2.fillConvexPoly(mask, roi_corners, ignore_mask_color)
masked_image = cv2.bitwise_and(image, mask)
return masked_image
def resize_img(image: np.ndarray, min_width: int, min_height: int) -> np.ndarray:
"""
Resize the image with keeping the aspect ratio.
:param image: image
:param min_width: minimum width of the image
:param min_height: minimum height of the image
:return: resized image
"""
h, w = image.shape[:2]
new_w = w
new_h = h
if w > min_width:
new_w = min_width
new_h = int(h * (float(new_w) / w))
h, w = (new_h, new_w)
if h > min_height:
new_h = min_height
new_w = int(w * (float(new_h) / h))
return cv2.resize(image, (new_w, new_h))
def sort_contours_by_area(contours: np.ndarray, descending: bool = True) -> np.ndarray:
if len(contours) > 0:
contours = sorted(contours, key=cv2.contourArea, reverse=descending)
return contours
def filter_contours_by_area(contours: np.ndarray, min_area: float = 0, max_area: float = np.inf) -> np.ndarray:
if len(contours) == 0:
return np.array([])
def _keep_contour(c):
area = cv2.contourArea(c)
if area <= min_area:
return False
if area >= max_area:
return False
return True
return np.array(list(filter(_keep_contour, contours)))
def get_contour_centers(contours: np.ndarray) -> np.ndarray:
"""
Calculate the centers of the contours
:param contours: Contours detected with find_contours
:return: object centers as numpy array
"""
if len(contours) == 0:
return np.array([])
# ((x, y), radius) = cv2.minEnclosingCircle(c)
centers = np.zeros((len(contours), 2), dtype=np.int16)
for i, c in enumerate(contours):
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
centers[i] = center
return centers
def find_object_contours(image: np.ndarray, hsv_lower_value: Union[Tuple[int], List[int]],
hsv_upper_value: Union[Tuple[int], List[int]], kernel: np.ndarray):
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, tuple(hsv_lower_value), tuple(hsv_upper_value))
if kernel is not None:
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=1)
return cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
def get_bbox_for_contours(contours: np.ndarray) -> np.ndarray:
bboxes = []
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
bboxes.append([x, y, x + w, y + h])
return np.array(bboxes)
def calculate_distance_mtx(tracked_objects: List[TrackedObject], points: np.ndarray) -> np.ndarray:
# (nb_tracked_objects, nb_current_detected_points)
cost_mtx = np.zeros((len(tracked_objects), len(points)))
for i, tracked_obj in enumerate(tracked_objects):
for j, point in enumerate(points):
diff = tracked_obj.last_point - point
distance = np.sqrt(diff[0] ** 2 + diff[1] ** 2)
cost_mtx[i][j] = distance
return cost_mtx
def solve_assignment(cost_mtx: np.ndarray) -> List[int]:
nb_tracked_objects, nb_detected_obj_centers = cost_mtx.shape
assignment = [-1] * nb_tracked_objects
row_index, column_index = optimize.linear_sum_assignment(cost_mtx)
for i in range(len(row_index)):
assignment[row_index[i]] = column_index[i]
return assignment
def remove_object_if_too_many_frames_skipped(tracked_objects: List[TrackedObject], assignment: List[int],
max_skipped_frames: int):
for i, tracked_obj in enumerate(tracked_objects):
if tracked_obj.skipped_frames > max_skipped_frames:
del tracked_objects[i]
del assignment[i] | 0.906229 | 0.637172 |
from flask import Flask, render_template, request, redirect,send_file
from werkzeug.utils import secure_filename
from functions import split,transform
import io
import os
import pdfrw
from reportlab.pdfgen import canvas
from reportlab.lib.units import cm,mm
# 12 questions adjustments
diff = -0.45*mm#(74.7/2-9.37)*mm
diff_name = -5*mm#diff -5*mm
diff_space = 0#-0.03*mm
space = 2.4*mm + diff_space
space_q = (2.6*mm,4.5*mm+diff_space)
max_h = 296.93*mm
w = -38.6*mm + 42.7*mm#-0.02*mm
h = (125.7*mm - 121.9*mm)#+1*mm
start_id = (35.7*mm, max_h-135.3*mm+0.45*mm+diff)
start_q = (36.8*mm,max_h-208.3*mm+0.45*mm+diff)
error = 0.5*mm
x_name = 110*mm
y_name = 171*mm - diff_name
MAX_ITENS = 12
parameters = [space,space_q,max_h,w,h,start_id,start_q,error,x_name,y_name,y_name]
UPLOAD_FOLDER = 'uploads/'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route("/",methods=["POST", "GET"])
def upload_page():
if request.method == "GET":
return render_template("index.html")
else:
uploaded_file = request.files['file']
if uploaded_file != '':
print(os.getcwd())
arq = os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(uploaded_file.filename))
print(arq)
uploaded_file.save(arq)
name= request.form["name"]
page = int(request.form["page"])
id = request.form["id"]
quest = ""
for i in range(1,MAX_ITENS+1):
key = "customRadioInline"+str(i)
try:
quest += request.form[key]
except KeyError:
quest += "."
output = os.path.join(app.config['UPLOAD_FOLDER'],
"Gabarito_"+"_".join(name.split()))
split(arq, page, output+".pdf")
transform(id,quest,output,name,parameters)
return send_file(output+".pdf",as_attachment =True)#,mimetype = '.pdf')
os.remove(output+".pdf")
os.remove(arq)
if __name__ == "__main__":
#app.run(debug=True, host= '0.0.0.0')
app.run() | app.py | from flask import Flask, render_template, request, redirect,send_file
from werkzeug.utils import secure_filename
from functions import split,transform
import io
import os
import pdfrw
from reportlab.pdfgen import canvas
from reportlab.lib.units import cm,mm
# 12 questions adjustments
diff = -0.45*mm#(74.7/2-9.37)*mm
diff_name = -5*mm#diff -5*mm
diff_space = 0#-0.03*mm
space = 2.4*mm + diff_space
space_q = (2.6*mm,4.5*mm+diff_space)
max_h = 296.93*mm
w = -38.6*mm + 42.7*mm#-0.02*mm
h = (125.7*mm - 121.9*mm)#+1*mm
start_id = (35.7*mm, max_h-135.3*mm+0.45*mm+diff)
start_q = (36.8*mm,max_h-208.3*mm+0.45*mm+diff)
error = 0.5*mm
x_name = 110*mm
y_name = 171*mm - diff_name
MAX_ITENS = 12
parameters = [space,space_q,max_h,w,h,start_id,start_q,error,x_name,y_name,y_name]
UPLOAD_FOLDER = 'uploads/'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route("/",methods=["POST", "GET"])
def upload_page():
if request.method == "GET":
return render_template("index.html")
else:
uploaded_file = request.files['file']
if uploaded_file != '':
print(os.getcwd())
arq = os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(uploaded_file.filename))
print(arq)
uploaded_file.save(arq)
name= request.form["name"]
page = int(request.form["page"])
id = request.form["id"]
quest = ""
for i in range(1,MAX_ITENS+1):
key = "customRadioInline"+str(i)
try:
quest += request.form[key]
except KeyError:
quest += "."
output = os.path.join(app.config['UPLOAD_FOLDER'],
"Gabarito_"+"_".join(name.split()))
split(arq, page, output+".pdf")
transform(id,quest,output,name,parameters)
return send_file(output+".pdf",as_attachment =True)#,mimetype = '.pdf')
os.remove(output+".pdf")
os.remove(arq)
if __name__ == "__main__":
#app.run(debug=True, host= '0.0.0.0')
app.run() | 0.179531 | 0.12234 |
from distutils.version import LooseVersion
import os
import itertools
from subprocess import CalledProcessError
from bcbio import broad
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance.programs import get_version
from bcbio.variation.realign import has_aligned_reads
from bcbio.pipeline.shared import subset_variant_regions
from bcbio.variation import bamprep, vcfutils
_PASS_EXCEPTIONS = set(["java.lang.RuntimeException: "
"java.lang.IllegalArgumentException: "
"Comparison method violates its general contract!",
"java.lang.IllegalArgumentException: "
"Comparison method violates its general contract!"])
def _mutect_call_prep(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""
Preparation work for MuTect.
"""
#FIXME: We assume all other bits in the config are shared
base_config = items[0]["config"]
dbsnp = assoc_files["dbsnp"]
cosmic = assoc_files.get("cosmic")
broad_runner = broad.runner_from_config(base_config, "mutect")
if LooseVersion(broad_runner.get_mutect_version()) < LooseVersion("1.1.5"):
message = ("MuTect 1.1.4 and lower is known to have incompatibilities "
"with Java < 7, and this may lead to problems in analyses. "
"Please use MuTect 1.1.5 or higher (note that it requires "
"Java 7).")
raise ValueError(message)
broad_runner.run_fn("picard_index_ref", ref_file)
for x in align_bams:
broad_runner.run_fn("picard_index", x)
variant_regions = base_config["algorithm"].get("variant_regions", None)
contamination = base_config["algorithm"].get("fraction_contamination", 0)
region = subset_variant_regions(variant_regions, region, out_file)
#FIXME: Add more parameters like fraction contamination etc
params = ["-R", ref_file, "-T", "MuTect"]
params += ["--dbsnp", dbsnp]
tumor_bam = None
normal_bam = None
for bamfile, item in itertools.izip(align_bams, items):
metadata = item["metadata"]
if metadata["phenotype"] == "normal":
normal_bam = bamfile
normal_sample_name = item["name"][1]
elif metadata["phenotype"] == "tumor":
tumor_bam = bamfile
tumor_sample_name = item["name"][1]
if tumor_bam is None or normal_bam is None:
raise ValueError("Missing phenotype definition (tumor or normal) "
"in samples")
params += ["-I:normal", normal_bam]
params += ["-I:tumor", tumor_bam]
params += ["--tumor_sample_name", tumor_sample_name]
params += ["--normal_sample_name", normal_sample_name]
params += ["--fraction_contamination", contamination]
if cosmic is not None:
params += ["--cosmic", cosmic]
if region:
params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule",
"INTERSECTION"]
return broad_runner, params
def mutect_caller(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run the MuTect paired analysis algorithm."""
if out_file is None:
out_file = "%s-paired-variants.vcf" % os.path.splitext(
align_bams[0])[0]
if not file_exists(out_file):
broad_runner, params = \
_mutect_call_prep(align_bams, items, ref_file, assoc_files,
region, out_file)
if (not isinstance(region, (list, tuple)) and
not all(has_aligned_reads(x, region) for x in align_bams)):
vcfutils.write_empty_vcf(out_file)
return
with file_transaction(out_file) as tx_out_file:
# Rationale: MuTect writes another table to stdout,
# which we don't need
params += ["--vcf", tx_out_file, "-o", os.devnull]
broad_runner.run_mutect(params)
return out_file | bcbio/variation/mutect.py |
from distutils.version import LooseVersion
import os
import itertools
from subprocess import CalledProcessError
from bcbio import broad
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance.programs import get_version
from bcbio.variation.realign import has_aligned_reads
from bcbio.pipeline.shared import subset_variant_regions
from bcbio.variation import bamprep, vcfutils
_PASS_EXCEPTIONS = set(["java.lang.RuntimeException: "
"java.lang.IllegalArgumentException: "
"Comparison method violates its general contract!",
"java.lang.IllegalArgumentException: "
"Comparison method violates its general contract!"])
def _mutect_call_prep(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""
Preparation work for MuTect.
"""
#FIXME: We assume all other bits in the config are shared
base_config = items[0]["config"]
dbsnp = assoc_files["dbsnp"]
cosmic = assoc_files.get("cosmic")
broad_runner = broad.runner_from_config(base_config, "mutect")
if LooseVersion(broad_runner.get_mutect_version()) < LooseVersion("1.1.5"):
message = ("MuTect 1.1.4 and lower is known to have incompatibilities "
"with Java < 7, and this may lead to problems in analyses. "
"Please use MuTect 1.1.5 or higher (note that it requires "
"Java 7).")
raise ValueError(message)
broad_runner.run_fn("picard_index_ref", ref_file)
for x in align_bams:
broad_runner.run_fn("picard_index", x)
variant_regions = base_config["algorithm"].get("variant_regions", None)
contamination = base_config["algorithm"].get("fraction_contamination", 0)
region = subset_variant_regions(variant_regions, region, out_file)
#FIXME: Add more parameters like fraction contamination etc
params = ["-R", ref_file, "-T", "MuTect"]
params += ["--dbsnp", dbsnp]
tumor_bam = None
normal_bam = None
for bamfile, item in itertools.izip(align_bams, items):
metadata = item["metadata"]
if metadata["phenotype"] == "normal":
normal_bam = bamfile
normal_sample_name = item["name"][1]
elif metadata["phenotype"] == "tumor":
tumor_bam = bamfile
tumor_sample_name = item["name"][1]
if tumor_bam is None or normal_bam is None:
raise ValueError("Missing phenotype definition (tumor or normal) "
"in samples")
params += ["-I:normal", normal_bam]
params += ["-I:tumor", tumor_bam]
params += ["--tumor_sample_name", tumor_sample_name]
params += ["--normal_sample_name", normal_sample_name]
params += ["--fraction_contamination", contamination]
if cosmic is not None:
params += ["--cosmic", cosmic]
if region:
params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule",
"INTERSECTION"]
return broad_runner, params
def mutect_caller(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run the MuTect paired analysis algorithm."""
if out_file is None:
out_file = "%s-paired-variants.vcf" % os.path.splitext(
align_bams[0])[0]
if not file_exists(out_file):
broad_runner, params = \
_mutect_call_prep(align_bams, items, ref_file, assoc_files,
region, out_file)
if (not isinstance(region, (list, tuple)) and
not all(has_aligned_reads(x, region) for x in align_bams)):
vcfutils.write_empty_vcf(out_file)
return
with file_transaction(out_file) as tx_out_file:
# Rationale: MuTect writes another table to stdout,
# which we don't need
params += ["--vcf", tx_out_file, "-o", os.devnull]
broad_runner.run_mutect(params)
return out_file | 0.362631 | 0.21389 |
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_robot_model(pkg_description):
urdf_dir = os.path.join(pkg_description, 'urdf')
urdf_file = os.path.join(urdf_dir, 'kohm.urdf')
with open(urdf_file, 'r') as infp:
robot_desc = infp.read()
return robot_desc, urdf_file
def generate_launch_description():
# ROS packages
pkg_kohm_description = get_package_share_directory(
'kohm_description')
# Launch arguments
use_sim_time = LaunchConfiguration('use_sim_time', default='true')
robot_desc, urdf_file = generate_robot_model(pkg_kohm_description)
# Nodes
robot_state_publisher = Node(package='robot_state_publisher',
executable='robot_state_publisher',
name='robot_state_publisher',
output='screen',
parameters=[{
'use_sim_time': use_sim_time,
'robot_description': robot_desc,
}])
joint_state_publisher = Node(package='joint_state_publisher',
executable='joint_state_publisher',
name='joint_state_publisher',
output='screen',
parameters=[{
'use_sim_time': use_sim_time
}])
return LaunchDescription([
# Launch Arguments
DeclareLaunchArgument('use_sim_time',
default_value='true',
description='Use simulation clock if true'),
# Nodes
robot_state_publisher,
joint_state_publisher,
]) | kohm_gazebo/launch/include/state_publishers/state_publishers.launch.py | import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_robot_model(pkg_description):
urdf_dir = os.path.join(pkg_description, 'urdf')
urdf_file = os.path.join(urdf_dir, 'kohm.urdf')
with open(urdf_file, 'r') as infp:
robot_desc = infp.read()
return robot_desc, urdf_file
def generate_launch_description():
# ROS packages
pkg_kohm_description = get_package_share_directory(
'kohm_description')
# Launch arguments
use_sim_time = LaunchConfiguration('use_sim_time', default='true')
robot_desc, urdf_file = generate_robot_model(pkg_kohm_description)
# Nodes
robot_state_publisher = Node(package='robot_state_publisher',
executable='robot_state_publisher',
name='robot_state_publisher',
output='screen',
parameters=[{
'use_sim_time': use_sim_time,
'robot_description': robot_desc,
}])
joint_state_publisher = Node(package='joint_state_publisher',
executable='joint_state_publisher',
name='joint_state_publisher',
output='screen',
parameters=[{
'use_sim_time': use_sim_time
}])
return LaunchDescription([
# Launch Arguments
DeclareLaunchArgument('use_sim_time',
default_value='true',
description='Use simulation clock if true'),
# Nodes
robot_state_publisher,
joint_state_publisher,
]) | 0.549641 | 0.134804 |
import logging
import numbers
from typing import Callable, Union
import torch
from ignite.engine import Engine
from ignite.utils import apply_to_type
__all__ = ["TerminateOnNan"]
class TerminateOnNan:
"""TerminateOnNan handler can be used to stop the training if the `process_function`'s output
contains a NaN or infinite number or `torch.tensor`.
The output can be of type: number, tensor or collection of them. The training is stopped if
there is at least a single number/tensor have NaN or Infinite value. For example, if the output is
`[1.23, torch.tensor(...), torch.tensor(float('nan'))]` the handler will stop the training.
Args:
output_transform (callable, optional): a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into a number or `torch.tensor`
or collection of them. This can be useful if, for example, you have a multi-output model and
you want to check one or multiple values of the output.
Examples:
.. code-block:: python
trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
"""
def __init__(self, output_transform: Callable = lambda x: x):
self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
self.logger.addHandler(logging.StreamHandler())
self._output_transform = output_transform
def __call__(self, engine: Engine) -> None:
output = self._output_transform(engine.state.output)
def raise_error(x: Union[numbers.Number, torch.Tensor]) -> None:
if isinstance(x, numbers.Number):
x = torch.tensor(x)
if isinstance(x, torch.Tensor) and not bool(torch.isfinite(x).all()):
raise RuntimeError("Infinite or NaN tensor found.")
try:
apply_to_type(output, (numbers.Number, torch.Tensor), raise_error)
except RuntimeError:
self.logger.warning(
"{}: Output '{}' contains NaN or Inf. Stop training".format(self.__class__.__name__, output)
)
engine.terminate() | ignite/handlers/terminate_on_nan.py | import logging
import numbers
from typing import Callable, Union
import torch
from ignite.engine import Engine
from ignite.utils import apply_to_type
__all__ = ["TerminateOnNan"]
class TerminateOnNan:
"""TerminateOnNan handler can be used to stop the training if the `process_function`'s output
contains a NaN or infinite number or `torch.tensor`.
The output can be of type: number, tensor or collection of them. The training is stopped if
there is at least a single number/tensor have NaN or Infinite value. For example, if the output is
`[1.23, torch.tensor(...), torch.tensor(float('nan'))]` the handler will stop the training.
Args:
output_transform (callable, optional): a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into a number or `torch.tensor`
or collection of them. This can be useful if, for example, you have a multi-output model and
you want to check one or multiple values of the output.
Examples:
.. code-block:: python
trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
"""
def __init__(self, output_transform: Callable = lambda x: x):
self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
self.logger.addHandler(logging.StreamHandler())
self._output_transform = output_transform
def __call__(self, engine: Engine) -> None:
output = self._output_transform(engine.state.output)
def raise_error(x: Union[numbers.Number, torch.Tensor]) -> None:
if isinstance(x, numbers.Number):
x = torch.tensor(x)
if isinstance(x, torch.Tensor) and not bool(torch.isfinite(x).all()):
raise RuntimeError("Infinite or NaN tensor found.")
try:
apply_to_type(output, (numbers.Number, torch.Tensor), raise_error)
except RuntimeError:
self.logger.warning(
"{}: Output '{}' contains NaN or Inf. Stop training".format(self.__class__.__name__, output)
)
engine.terminate() | 0.883513 | 0.502197 |
import psycopg2
from openpyxl import load_workbook
import credentials
from os import listdir
from os.path import isfile, join
conn = psycopg2.connect(host="localhost",database="postgres", user=credentials.username, password=credentials.password, port=credentials.port)
conn.autocommit = True
cursor = conn.cursor()
# Print PostgreSQL Connection properties
print ( conn.get_dsn_parameters(),"\n")
# Print PostgreSQL version
cursor.execute("SELECT version();")
record = cursor.fetchone()
print("You are connected to - ", record,"\n")
drop_table_query = "DROP TABLE Prices"
# try:
# cursor.execute(drop_table_query)
# print('dropped prices table')
# except:
# print("prices table didn't exist, continuing...")
# create_table_query = '''
# CREATE TABLE Prices(
# ticker VARCHAR(10) NOT NULL,
# date DATE NOT NULL,
# open DECIMAL,
# high DECIMAL,
# low DECIMAL,
# close DECIMAL,
# volume BIGINT,
# smavg BIGINT,
# PRIMARY KEY (ticker,date)
# ); '''
# cursor.execute(create_table_query)
pathToData = "../../algDev/data/"
equitiesPath = pathToData + 'commodities/'
postgres_insert_query = """ INSERT INTO Prices (ticker, date, open, high, low, close, volume, smavg) VALUES ('{}','{}',{},{},{},{},{},{})"""
onlyFiles = [f for f in listdir(equitiesPath) if isfile(join(equitiesPath, f)) and f[-5:] == ".xlsx"]
print('files?')
onlyFiles.sort()
print(onlyFiles)
completed = ['AAPL.xlsx', 'ABBV.xlsx', 'ABT.xlsx', 'ACN.xlsx', 'ADBE.xlsx', 'ADP.xlsx', 'AMGN.xlsx', 'AMT.xlsx', 'AMZN.xlsx', 'ANTM.xlsx', 'AVGO.xlsx', 'AXP.xlsx', 'BA.xlsx', 'BAC.xlsx', 'BDX.xlsx', 'BKNG.xlsx', 'BLK.xlsx', 'BMY.xlsx', 'BRK.B.xlsx', 'C.xlsx', 'CAT.xlsx', 'CB.xlsx', 'CHTR.xlsx', 'CI.xlsx', 'CL.xlsx', 'CMCSA.xlsx', 'CME.xlsx', 'COST.xlsx', 'CRM.xlsx', 'CSCO.xlsx', 'CVS.xlsx', 'CVX.xlsx', 'D.xlsx', 'DHR.xlsx', 'DIS.xlsx', 'DUK.xlsx', 'FB.xlsx', 'FIS.xlsx', 'FISV.xlsx', 'GE.xlsx', 'GILD.xlsx', 'GOOG.xlsx', 'GOOGL.xlsx', 'GS.xlsx', 'HD.xlsx', 'HON.xlsx', 'IBM.xlsx', 'INTU.xlsx', 'ISRG.xlsx', 'JNJ.xlsx', 'JPM.xlsx', 'KO.xlsx', 'LIN.xlsx', 'LLY.xlsx', 'LMT.xlsx', 'LOW.xlsx', 'MA.xlsx', 'MCD.xlsx', 'MDLZ.xlsx', 'MDT.xlsx', 'MMM.xlsx', 'MO.xlsx', 'MRK.xlsx', 'MS.xlsx', 'MSFT.xlsx', 'NEE.xlsx', 'SNP.xlsx', 'RE.xlsx']
completedSet = set(completed)
for file in onlyFiles:
if file in completedSet:
continue
tickerName = file[:-5]
print('opening file:', file)
wb = load_workbook(equitiesPath + file)
ws = wb.active
for row in ws.iter_rows(min_row=2, values_only=True):
try:
if not row[0]:
break
dateObj = row[0]
dateStr = str(dateObj.year) + '-' + str(dateObj.month) + '-' + str(dateObj.day)
toUse = ()
if(len(row) == 7):
toUse = (tickerName, dateStr, row[1], row[2], row[3], row[4], row[5], row[6])
elif(len(row) == 2):
toUse = (tickerName, dateStr, "NULL", "NULL", "NULL", row[1], "NULL", "NULL")
else:
raise Exception('INVALID ROW LENGTH')
modified = False
lst = list(toUse)
for i in range(len(toUse)):
if not toUse[i] or toUse[i] == '#N/A':
modified = True
lst[i] = "NULL"
if modified:
toUse = tuple(lst)
formatted = postgres_insert_query.format(*toUse)
print(formatted)
cursor.execute(formatted)
except (Exception, psycopg2.Error) as error:
print ("Error while connecting to PostgreSQL", error)
if(conn):
cursor.close()
conn.close()
print("PostgreSQL connection is closed") | algDev/db/primer.py | import psycopg2
from openpyxl import load_workbook
import credentials
from os import listdir
from os.path import isfile, join
conn = psycopg2.connect(host="localhost",database="postgres", user=credentials.username, password=credentials.password, port=credentials.port)
conn.autocommit = True
cursor = conn.cursor()
# Print PostgreSQL Connection properties
print ( conn.get_dsn_parameters(),"\n")
# Print PostgreSQL version
cursor.execute("SELECT version();")
record = cursor.fetchone()
print("You are connected to - ", record,"\n")
drop_table_query = "DROP TABLE Prices"
# try:
# cursor.execute(drop_table_query)
# print('dropped prices table')
# except:
# print("prices table didn't exist, continuing...")
# create_table_query = '''
# CREATE TABLE Prices(
# ticker VARCHAR(10) NOT NULL,
# date DATE NOT NULL,
# open DECIMAL,
# high DECIMAL,
# low DECIMAL,
# close DECIMAL,
# volume BIGINT,
# smavg BIGINT,
# PRIMARY KEY (ticker,date)
# ); '''
# cursor.execute(create_table_query)
pathToData = "../../algDev/data/"
equitiesPath = pathToData + 'commodities/'
postgres_insert_query = """ INSERT INTO Prices (ticker, date, open, high, low, close, volume, smavg) VALUES ('{}','{}',{},{},{},{},{},{})"""
onlyFiles = [f for f in listdir(equitiesPath) if isfile(join(equitiesPath, f)) and f[-5:] == ".xlsx"]
print('files?')
onlyFiles.sort()
print(onlyFiles)
completed = ['AAPL.xlsx', 'ABBV.xlsx', 'ABT.xlsx', 'ACN.xlsx', 'ADBE.xlsx', 'ADP.xlsx', 'AMGN.xlsx', 'AMT.xlsx', 'AMZN.xlsx', 'ANTM.xlsx', 'AVGO.xlsx', 'AXP.xlsx', 'BA.xlsx', 'BAC.xlsx', 'BDX.xlsx', 'BKNG.xlsx', 'BLK.xlsx', 'BMY.xlsx', 'BRK.B.xlsx', 'C.xlsx', 'CAT.xlsx', 'CB.xlsx', 'CHTR.xlsx', 'CI.xlsx', 'CL.xlsx', 'CMCSA.xlsx', 'CME.xlsx', 'COST.xlsx', 'CRM.xlsx', 'CSCO.xlsx', 'CVS.xlsx', 'CVX.xlsx', 'D.xlsx', 'DHR.xlsx', 'DIS.xlsx', 'DUK.xlsx', 'FB.xlsx', 'FIS.xlsx', 'FISV.xlsx', 'GE.xlsx', 'GILD.xlsx', 'GOOG.xlsx', 'GOOGL.xlsx', 'GS.xlsx', 'HD.xlsx', 'HON.xlsx', 'IBM.xlsx', 'INTU.xlsx', 'ISRG.xlsx', 'JNJ.xlsx', 'JPM.xlsx', 'KO.xlsx', 'LIN.xlsx', 'LLY.xlsx', 'LMT.xlsx', 'LOW.xlsx', 'MA.xlsx', 'MCD.xlsx', 'MDLZ.xlsx', 'MDT.xlsx', 'MMM.xlsx', 'MO.xlsx', 'MRK.xlsx', 'MS.xlsx', 'MSFT.xlsx', 'NEE.xlsx', 'SNP.xlsx', 'RE.xlsx']
completedSet = set(completed)
for file in onlyFiles:
if file in completedSet:
continue
tickerName = file[:-5]
print('opening file:', file)
wb = load_workbook(equitiesPath + file)
ws = wb.active
for row in ws.iter_rows(min_row=2, values_only=True):
try:
if not row[0]:
break
dateObj = row[0]
dateStr = str(dateObj.year) + '-' + str(dateObj.month) + '-' + str(dateObj.day)
toUse = ()
if(len(row) == 7):
toUse = (tickerName, dateStr, row[1], row[2], row[3], row[4], row[5], row[6])
elif(len(row) == 2):
toUse = (tickerName, dateStr, "NULL", "NULL", "NULL", row[1], "NULL", "NULL")
else:
raise Exception('INVALID ROW LENGTH')
modified = False
lst = list(toUse)
for i in range(len(toUse)):
if not toUse[i] or toUse[i] == '#N/A':
modified = True
lst[i] = "NULL"
if modified:
toUse = tuple(lst)
formatted = postgres_insert_query.format(*toUse)
print(formatted)
cursor.execute(formatted)
except (Exception, psycopg2.Error) as error:
print ("Error while connecting to PostgreSQL", error)
if(conn):
cursor.close()
conn.close()
print("PostgreSQL connection is closed") | 0.091855 | 0.142291 |
class MarketReservationError(StandardError):
"""Base class for exceptions in this module."""
pass
class ReservationManager(object):
def __init__(self):
self._buy_reservations = {}
self._sell_reservations = {}
def make_reservation(self, participant):
if (participant.is_buyer()):
self._make_buy_reservation(participant.identity)
else:
self._make_sell_reservation(participant.identity)
def _make_buy_reservation(self, owner):
self._add_reservation(self._buy_reservations, owner, 'buy')
def _add_reservation(self, collection, owner, type):
if owner not in collection:
collection[owner] = False
else:
message = 'Market participant {0} made more than a single {1} reservation.'.format(owner, type)
raise MarketReservationError(message)
def _make_sell_reservation(self, owner):
self._add_reservation(self._sell_reservations, owner, 'sell')
def take_reservation(self, participant):
if (participant.is_buyer()):
self._take_buy_reservation(participant.identity)
else:
self._take_sell_reservation(participant.identity)
def _take_buy_reservation(self, owner):
self._take_reservation(self._buy_reservations, owner, 'buy')
def _take_sell_reservation(self, owner):
self._take_reservation(self._sell_reservations, owner, 'sell')
def _take_reservation(self, collection, owner, type):
if owner in collection and not collection[owner]:
collection[owner] = True
else:
message = 'Market participant {0} made no {1} reservation.'.format(owner, type)
raise MarketReservationError(message)
def has_market_formed(self):
has_buyer = len(self._buy_reservations) > 0
has_seller = len(self._sell_reservations) > 0
return has_buyer and has_seller
def buyer_count(self):
return len(self._buy_reservations)
def seller_count(self):
return len(self._sell_reservations) | services/core/MarketServiceAgent/market_service/reservation_manager.py |
class MarketReservationError(StandardError):
"""Base class for exceptions in this module."""
pass
class ReservationManager(object):
def __init__(self):
self._buy_reservations = {}
self._sell_reservations = {}
def make_reservation(self, participant):
if (participant.is_buyer()):
self._make_buy_reservation(participant.identity)
else:
self._make_sell_reservation(participant.identity)
def _make_buy_reservation(self, owner):
self._add_reservation(self._buy_reservations, owner, 'buy')
def _add_reservation(self, collection, owner, type):
if owner not in collection:
collection[owner] = False
else:
message = 'Market participant {0} made more than a single {1} reservation.'.format(owner, type)
raise MarketReservationError(message)
def _make_sell_reservation(self, owner):
self._add_reservation(self._sell_reservations, owner, 'sell')
def take_reservation(self, participant):
if (participant.is_buyer()):
self._take_buy_reservation(participant.identity)
else:
self._take_sell_reservation(participant.identity)
def _take_buy_reservation(self, owner):
self._take_reservation(self._buy_reservations, owner, 'buy')
def _take_sell_reservation(self, owner):
self._take_reservation(self._sell_reservations, owner, 'sell')
def _take_reservation(self, collection, owner, type):
if owner in collection and not collection[owner]:
collection[owner] = True
else:
message = 'Market participant {0} made no {1} reservation.'.format(owner, type)
raise MarketReservationError(message)
def has_market_formed(self):
has_buyer = len(self._buy_reservations) > 0
has_seller = len(self._sell_reservations) > 0
return has_buyer and has_seller
def buyer_count(self):
return len(self._buy_reservations)
def seller_count(self):
return len(self._sell_reservations) | 0.659844 | 0.101857 |
import matplotlib.pyplot as plt
from graphviz import Digraph
from matplotlib.collections import PolyCollection
def draw_DAG(dag, name=None, view=False):
"""
Draws a DAG that represents the repeater protocol to be executed
:param dag: type DAGTask
The DAGTask representing the repeater protocol to execute
:param name: type str
Name to give to the graph file
:param view: type bool
Toggles if the drawn DAG is opened in a viewer
:return: None
"""
graph_dir = "graphs/"
if name is not None:
dot = Digraph(name=graph_dir + name)
else:
dot = Digraph()
for task in dag.subtasks:
name = "{},A={},C={}".format(task.name, round(task.a, 3), round(task.c, 3))
action = name[0]
dot.node(name, action)
for task in dag.subtasks:
name = "{},A={},C={}".format(task.name, round(task.a, 3), round(task.c, 3))
for child in task.children:
cname = "{},A={},C={}".format(child.name, round(child.a, 3), round(child.c, 3))
dot.edge(name, cname)
dot.render(view=view)
def get_original_taskname(task):
"""
Obtains the original name of a task instance
:param task: type DAGTask
An instance of a Periodic DAG Task to obtain the original task name from
:return: type str
The original PeriodicDAGTask's name
"""
return task.name.split("|")[0]
def schedule_timeline(taskset, schedule):
"""
Produces a visualization of the timeline where repeater protocols are executed in the schedule
:param taskset: type list
List of PeriodicDAGTask's representing the repeater protocols scheduled into the network
:param schedule: type list
List of tuples (start, end, task_instance) that describes the schedule of repeater protocols in the network
:return: None
"""
cats = dict([(task.name, i + 1) for i, task in enumerate(taskset)])
colormapping = dict([(name, "C{}".format(i - 1)) for name, i in cats.items()])
verts = []
colors = []
for d in schedule:
s, e, t = d
name = get_original_taskname(t)
v = [
(s, cats[name] - .4),
(s, cats[name] + .4),
(e, cats[name] + .4),
(e, cats[name] - .4),
(s, cats[name] - .4)
]
verts.append(v)
colors.append(colormapping[name])
bars = PolyCollection(verts, facecolors=colors)
fig, ax = plt.subplots()
ax.add_collection(bars)
ax.autoscale()
ax.set_yticks(list(range(1, len(taskset) + 1)))
ax.set_yticklabels([t.name for t in taskset])
plt.show()
def resource_timeline(taskset, schedule):
"""
Produces a visualization of the timeline where resources are used schedule
:param taskset: type list
List of PeriodicDAGTask's representing the repeater protocols scheduled into the network
:param schedule: type list
List of tuples (start, end, task_instance) that describes the schedule of repeater protocols in the network
:return: None
"""
resources = list(sorted(list(set([r for task in taskset for r in task.resources]))))
cats = dict([(r, i + 1) for i, r in enumerate(resources)])
colormapping = dict([(t.name, "C{}".format(i)) for i, t in enumerate(taskset)])
verts = []
colors = []
for d in schedule:
task_start, _, t = d
name = get_original_taskname(t)
subtasks = [t] if not hasattr(t, 'subtasks') else t.subtasks
offset = task_start
for subtask in sorted(subtasks, key=lambda subtask: -subtask.c):
s = offset + subtask.a
e = s + subtask.c
for r in subtask.resources:
v = [
(s, cats[r] - .4),
(s, cats[r] + .4),
(e, cats[r] + .4),
(e, cats[r] - .4),
(s, cats[r] - .4)
]
verts.append(v)
colors.append(colormapping[name])
bars = PolyCollection(verts, facecolors=colors)
fig, ax = plt.subplots()
ax.add_collection(bars)
ax.autoscale()
ax.set_yticks(list(range(1, len(resources) + 1)))
ax.set_yticklabels([r for r in resources])
plt.show()
def protocol_timeline(scheduled_protocol_task):
"""
Produces a visualization of the timeline of resource utilization by a protocol
:param scheduled_protocol_task: type DAGTask
The DAGTask to visualize on resources
:return: None
"""
resources = list(sorted(scheduled_protocol_task.resources))
resource_intervals = scheduled_protocol_task.get_resource_intervals()
cats = dict([(r, i + 1) for i, r in enumerate(resources)])
colormapping = {
"L": 'C0',
"S": 'C1',
"D": 'C2',
"O": "C3"
}
verts = []
colors = []
for r in resources:
for interval in resource_intervals[r]:
s = interval.begin
e = interval.end
t = interval.data.name[0]
v = [
(s, cats[r] - .4),
(s, cats[r] + .4),
(e, cats[r] + .4),
(e, cats[r] - .4),
(s, cats[r] - .4)
]
verts.append(v)
colors.append(colormapping[t])
bars = PolyCollection(verts, facecolors=colors)
fig, ax = plt.subplots()
ax.add_collection(bars)
ax.autoscale()
yticks = resources
ax.set_yticks(list(range(1, len(resources) + 1)))
ax.set_yticklabels(yticks)
plt.show()
def schedule_and_resource_timelines(taskset, schedule, plot_title=None, plot_sep=True, save_plot=False):
"""
Produces a visualization of the timeline where resources are used schedule and where repeater protocols are
executed
:param taskset: type list
List of PeriodicDAGTask's representing the repeater protocols scheduled into the network
:param schedule: type list
List of tuples (start, end, task_instance) that describes the schedule of repeater protocols in the network
:return: None
"""
task_cats = dict([(task.name, i + 1) for i, task in enumerate(taskset)])
task_colormapping = dict([(name, "C{}".format(i - 1)) for name, i in task_cats.items()])
verts = []
colors = []
resources = set()
for d in schedule:
s, e, t = d
resources |= set(t.resources)
name = get_original_taskname(t)
v = [
(s, task_cats[name] - .4),
(s, task_cats[name] + .4),
(e, task_cats[name] + .4),
(e, task_cats[name] - .4),
(s, task_cats[name] - .4)
]
verts.append(v)
colors.append(task_colormapping[name])
task_bars = PolyCollection(verts, facecolors=colors)
resources = list(sorted(resources))
resource_cats = dict([(r, i + 1) for i, r in enumerate(resources)])
verts = []
colors = []
for d in schedule:
task_start, task_end, t = d
name = get_original_taskname(t)
resource_intervals = t.get_resource_intervals()
for r, itree in resource_intervals.items():
itree.merge_overlaps(strict=False)
for interval in itree:
s = interval.begin + task_start - t.a
e = min(task_end, interval.end + task_start - t.a)
v = [
(s, resource_cats[r] - .4),
(s, resource_cats[r] + .4),
(e, resource_cats[r] + .4),
(e, resource_cats[r] - .4),
(s, resource_cats[r] - .4)
]
verts.append(v)
colors.append(task_colormapping[name])
resource_bars = PolyCollection(verts, facecolors=colors)
if plot_sep:
fig, ax = plt.subplots()
ax.add_collection(task_bars)
ax.autoscale()
ax.set_yticks(list(range(1, len(taskset) + 1)))
ax.set_yticklabels([t.name for t in taskset])
if plot_title:
plt.title(plot_title)
if save_plot and plot_title:
plt.savefig(fname="{}_tasks.png".format(plot_title))
else:
plt.show()
fig, ax = plt.subplots()
ax.add_collection(resource_bars)
ax.autoscale()
# name_map = {'1,0': "Alice", '1,1': "Repeater", '1,2': "Bob", "0,1": "Charlie", "2,1": "David"}
# yticks = [name_map[r[0:3]] + r[3:] for r in resources]
yticks = resources
ax.set_yticks(list(range(1, len(resources) + 1)))
ax.set_yticklabels(yticks)
if plot_title:
plt.title(plot_title)
if save_plot and plot_title:
plt.savefig(fname="{}_resources.png".format(plot_title))
else:
plt.show()
else:
fig, axs = plt.subplots(2)
axs[0].add_collection(task_bars)
axs[0].autoscale()
axs[0].set_yticks(list(range(1, len(taskset) + 1)))
axs[0].set_yticklabels([t.name for t in taskset])
axs[1].add_collection(resource_bars)
axs[1].autoscale()
axs[1].set_yticks(list(range(1, len(resources) + 1)))
axs[1].set_yticklabels([r for r in resources])
if plot_title:
plt.title(plot_title)
if save_plot and plot_title:
plt.savefig(fname="{}.png".format(plot_title))
else:
plt.show() | code/jobscheduling/visualize.py | import matplotlib.pyplot as plt
from graphviz import Digraph
from matplotlib.collections import PolyCollection
def draw_DAG(dag, name=None, view=False):
"""
Draws a DAG that represents the repeater protocol to be executed
:param dag: type DAGTask
The DAGTask representing the repeater protocol to execute
:param name: type str
Name to give to the graph file
:param view: type bool
Toggles if the drawn DAG is opened in a viewer
:return: None
"""
graph_dir = "graphs/"
if name is not None:
dot = Digraph(name=graph_dir + name)
else:
dot = Digraph()
for task in dag.subtasks:
name = "{},A={},C={}".format(task.name, round(task.a, 3), round(task.c, 3))
action = name[0]
dot.node(name, action)
for task in dag.subtasks:
name = "{},A={},C={}".format(task.name, round(task.a, 3), round(task.c, 3))
for child in task.children:
cname = "{},A={},C={}".format(child.name, round(child.a, 3), round(child.c, 3))
dot.edge(name, cname)
dot.render(view=view)
def get_original_taskname(task):
"""
Obtains the original name of a task instance
:param task: type DAGTask
An instance of a Periodic DAG Task to obtain the original task name from
:return: type str
The original PeriodicDAGTask's name
"""
return task.name.split("|")[0]
def schedule_timeline(taskset, schedule):
"""
Produces a visualization of the timeline where repeater protocols are executed in the schedule
:param taskset: type list
List of PeriodicDAGTask's representing the repeater protocols scheduled into the network
:param schedule: type list
List of tuples (start, end, task_instance) that describes the schedule of repeater protocols in the network
:return: None
"""
cats = dict([(task.name, i + 1) for i, task in enumerate(taskset)])
colormapping = dict([(name, "C{}".format(i - 1)) for name, i in cats.items()])
verts = []
colors = []
for d in schedule:
s, e, t = d
name = get_original_taskname(t)
v = [
(s, cats[name] - .4),
(s, cats[name] + .4),
(e, cats[name] + .4),
(e, cats[name] - .4),
(s, cats[name] - .4)
]
verts.append(v)
colors.append(colormapping[name])
bars = PolyCollection(verts, facecolors=colors)
fig, ax = plt.subplots()
ax.add_collection(bars)
ax.autoscale()
ax.set_yticks(list(range(1, len(taskset) + 1)))
ax.set_yticklabels([t.name for t in taskset])
plt.show()
def resource_timeline(taskset, schedule):
"""
Produces a visualization of the timeline where resources are used schedule
:param taskset: type list
List of PeriodicDAGTask's representing the repeater protocols scheduled into the network
:param schedule: type list
List of tuples (start, end, task_instance) that describes the schedule of repeater protocols in the network
:return: None
"""
resources = list(sorted(list(set([r for task in taskset for r in task.resources]))))
cats = dict([(r, i + 1) for i, r in enumerate(resources)])
colormapping = dict([(t.name, "C{}".format(i)) for i, t in enumerate(taskset)])
verts = []
colors = []
for d in schedule:
task_start, _, t = d
name = get_original_taskname(t)
subtasks = [t] if not hasattr(t, 'subtasks') else t.subtasks
offset = task_start
for subtask in sorted(subtasks, key=lambda subtask: -subtask.c):
s = offset + subtask.a
e = s + subtask.c
for r in subtask.resources:
v = [
(s, cats[r] - .4),
(s, cats[r] + .4),
(e, cats[r] + .4),
(e, cats[r] - .4),
(s, cats[r] - .4)
]
verts.append(v)
colors.append(colormapping[name])
bars = PolyCollection(verts, facecolors=colors)
fig, ax = plt.subplots()
ax.add_collection(bars)
ax.autoscale()
ax.set_yticks(list(range(1, len(resources) + 1)))
ax.set_yticklabels([r for r in resources])
plt.show()
def protocol_timeline(scheduled_protocol_task):
"""
Produces a visualization of the timeline of resource utilization by a protocol
:param scheduled_protocol_task: type DAGTask
The DAGTask to visualize on resources
:return: None
"""
resources = list(sorted(scheduled_protocol_task.resources))
resource_intervals = scheduled_protocol_task.get_resource_intervals()
cats = dict([(r, i + 1) for i, r in enumerate(resources)])
colormapping = {
"L": 'C0',
"S": 'C1',
"D": 'C2',
"O": "C3"
}
verts = []
colors = []
for r in resources:
for interval in resource_intervals[r]:
s = interval.begin
e = interval.end
t = interval.data.name[0]
v = [
(s, cats[r] - .4),
(s, cats[r] + .4),
(e, cats[r] + .4),
(e, cats[r] - .4),
(s, cats[r] - .4)
]
verts.append(v)
colors.append(colormapping[t])
bars = PolyCollection(verts, facecolors=colors)
fig, ax = plt.subplots()
ax.add_collection(bars)
ax.autoscale()
yticks = resources
ax.set_yticks(list(range(1, len(resources) + 1)))
ax.set_yticklabels(yticks)
plt.show()
def schedule_and_resource_timelines(taskset, schedule, plot_title=None, plot_sep=True, save_plot=False):
"""
Produces a visualization of the timeline where resources are used schedule and where repeater protocols are
executed
:param taskset: type list
List of PeriodicDAGTask's representing the repeater protocols scheduled into the network
:param schedule: type list
List of tuples (start, end, task_instance) that describes the schedule of repeater protocols in the network
:return: None
"""
task_cats = dict([(task.name, i + 1) for i, task in enumerate(taskset)])
task_colormapping = dict([(name, "C{}".format(i - 1)) for name, i in task_cats.items()])
verts = []
colors = []
resources = set()
for d in schedule:
s, e, t = d
resources |= set(t.resources)
name = get_original_taskname(t)
v = [
(s, task_cats[name] - .4),
(s, task_cats[name] + .4),
(e, task_cats[name] + .4),
(e, task_cats[name] - .4),
(s, task_cats[name] - .4)
]
verts.append(v)
colors.append(task_colormapping[name])
task_bars = PolyCollection(verts, facecolors=colors)
resources = list(sorted(resources))
resource_cats = dict([(r, i + 1) for i, r in enumerate(resources)])
verts = []
colors = []
for d in schedule:
task_start, task_end, t = d
name = get_original_taskname(t)
resource_intervals = t.get_resource_intervals()
for r, itree in resource_intervals.items():
itree.merge_overlaps(strict=False)
for interval in itree:
s = interval.begin + task_start - t.a
e = min(task_end, interval.end + task_start - t.a)
v = [
(s, resource_cats[r] - .4),
(s, resource_cats[r] + .4),
(e, resource_cats[r] + .4),
(e, resource_cats[r] - .4),
(s, resource_cats[r] - .4)
]
verts.append(v)
colors.append(task_colormapping[name])
resource_bars = PolyCollection(verts, facecolors=colors)
if plot_sep:
fig, ax = plt.subplots()
ax.add_collection(task_bars)
ax.autoscale()
ax.set_yticks(list(range(1, len(taskset) + 1)))
ax.set_yticklabels([t.name for t in taskset])
if plot_title:
plt.title(plot_title)
if save_plot and plot_title:
plt.savefig(fname="{}_tasks.png".format(plot_title))
else:
plt.show()
fig, ax = plt.subplots()
ax.add_collection(resource_bars)
ax.autoscale()
# name_map = {'1,0': "Alice", '1,1': "Repeater", '1,2': "Bob", "0,1": "Charlie", "2,1": "David"}
# yticks = [name_map[r[0:3]] + r[3:] for r in resources]
yticks = resources
ax.set_yticks(list(range(1, len(resources) + 1)))
ax.set_yticklabels(yticks)
if plot_title:
plt.title(plot_title)
if save_plot and plot_title:
plt.savefig(fname="{}_resources.png".format(plot_title))
else:
plt.show()
else:
fig, axs = plt.subplots(2)
axs[0].add_collection(task_bars)
axs[0].autoscale()
axs[0].set_yticks(list(range(1, len(taskset) + 1)))
axs[0].set_yticklabels([t.name for t in taskset])
axs[1].add_collection(resource_bars)
axs[1].autoscale()
axs[1].set_yticks(list(range(1, len(resources) + 1)))
axs[1].set_yticklabels([r for r in resources])
if plot_title:
plt.title(plot_title)
if save_plot and plot_title:
plt.savefig(fname="{}.png".format(plot_title))
else:
plt.show() | 0.726717 | 0.592519 |
from crowdsourcing.serializers.task import *
from rest_framework import status, viewsets
from rest_framework.response import Response
from rest_framework.decorators import detail_route, list_route
from django.shortcuts import get_object_or_404
from crowdsourcing.permissions.project import IsProjectOwnerOrCollaborator
from crowdsourcing.models import Task, TaskWorker, TaskWorkerResult, WorkerRequesterRating
from django.utils import timezone
from django.db.models import Q
from rest_framework.permissions import IsAuthenticated
from crowdsourcing.permissions.task import HasExceededReservedLimit
from crowdsourcing.serializers.rating import WorkerRequesterRatingSerializer
from crowdsourcing.experimental_models import SubModule
from datetime import timedelta
class TaskViewSet(viewsets.ModelViewSet):
queryset = Task.objects.all()
serializer_class = TaskSerializer
@detail_route(methods=['post'], permission_classes=[IsProjectOwnerOrCollaborator])
def update_task(self, request, id=None):
task_serializer = TaskSerializer(data=request.data)
task = self.get_object()
if task_serializer.is_valid():
task_serializer.update(task, task_serializer.validated_data)
return Response({'status': 'updated task'})
else:
return Response(task_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
try:
module = request.query_params.get('module')
task = Task.objects.filter(module=module)
task_serialized = TaskSerializer(task, many=True)
return Response(task_serialized.data)
except:
return Response([])
def destroy(self, request, *args, **kwargs):
task_serializer = TaskSerializer()
task = self.get_object()
task_serializer.delete(task)
return Response({'status': 'deleted task'})
@detail_route(methods=['get'])
def retrieve_with_data(self, request, *args, **kwargs):
task = self.get_object()
serializer = TaskSerializer(instance=task, fields=('id', 'task_template', 'module_data', 'status', 'has_comments'))
rating = models.WorkerRequesterRating.objects.filter(origin=request.user.userprofile.id,
target=task.module.owner.profile.id,
origin_type='worker', module=task.module.id)
requester_alias = task.module.owner.alias
module = task.module.id
target = task.module.owner.profile.id
if rating.count() != 0:
rating_serializer = WorkerRequesterRatingSerializer(instance=rating, many=True,
fields=('id', 'weight'))
return Response({'data': serializer.data,
'rating': rating_serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
else:
return Response({'data': serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
@list_route(methods=['get'])
def list_by_module(self, request, **kwargs):
tasks = Task.objects.filter(module=request.query_params.get('module_id'))
task_serializer = TaskSerializer(instance=tasks, many=True, fields=('id', 'status',
'template_items_monitoring',
'task_workers_monitoring',
'has_comments', 'comments'))
response_data = {
'project_name': tasks[0].module.project.name,
'project_id': tasks[0].module.project.id,
'module_name': tasks[0].module.name,
'module_id': tasks[0].module.id,
'tasks': task_serializer.data
}
return Response(response_data, status.HTTP_200_OK)
@list_route(methods=['get'])
def sample_by_submodule(self, request, **kwargs):
submodule = SubModule.objects.get(fake_module_id=request.query_params.get('fake_module_id'))
hours_before_results = submodule.hours_before_results
if submodule.created_timestamp + timedelta(hours=submodule.hours_before_results) <= timezone.now():
results_per_round = submodule.results_per_round
round_exp = submodule.round_exp
sample = len(submodule.taskworkers) == 0
pool = submodule.owner.pool
tasks = Task.objects.filter(module=submodule.origin_module.id)
task_serializer = TaskSerializer(instance=tasks, many=True,
context={'requester': request.user.userprofile.id, 'submodule': submodule.id,
'round_exp': round_exp, 'results_per_round': results_per_round,
'sample': sample, 'pool': pool},
fields=('id', 'status', 'template_items_monitoring', 'has_comments',
'comments', 'task_workers_sampled'))
for task in task_serializer.data:
task['task_workers_monitoring'] = task['task_workers_sampled']
response_data = {
'project_name': tasks[0].module.project.name,
'project_id': tasks[0].module.project.id,
'module_name': tasks[0].module.name,
'module_id': tasks[0].module.id,
'tasks': task_serializer.data
}
return Response(response_data, status.HTTP_200_OK)
else:
return Response([], status.HTTP_200_OK)
@detail_route(methods=['get'])
def list_comments(self, request, **kwargs):
comments = models.TaskComment.objects.filter(task=kwargs['pk'])
serializer = TaskCommentSerializer(instance=comments, many=True, fields=('comment', 'id',))
response_data = {
'task': kwargs['pk'],
'comments': serializer.data
}
return Response(response_data, status.HTTP_200_OK)
@detail_route(methods=['post'])
def post_comment(self, request, **kwargs):
serializer = TaskCommentSerializer(data=request.data)
task_comment_data = {}
if serializer.is_valid():
comment = serializer.create(task=kwargs['pk'], sender=request.user.userprofile)
task_comment_data = TaskCommentSerializer(comment, fields=('id', 'comment',)).data
return Response(task_comment_data, status.HTTP_200_OK)
class TaskWorkerViewSet(viewsets.ModelViewSet):
queryset = TaskWorker.objects.all()
serializer_class = TaskWorkerSerializer
permission_classes = [IsAuthenticated, HasExceededReservedLimit]
lookup_field = 'task__id'
def create(self, request, *args, **kwargs):
serializer = TaskWorkerSerializer(data=request.data)
if serializer.is_valid():
instance, http_status = serializer.create(worker=request.user.userprofile.worker,
module=request.data.get('module', None))
serialized_data = {}
if http_status == 200:
serialized_data = TaskWorkerSerializer(instance=instance).data
return Response(serialized_data, http_status)
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
serializer = TaskWorkerSerializer()
obj = self.queryset.get(task=kwargs['task__id'], worker=request.user.userprofile.worker.id)
instance, http_status = serializer.create(worker=request.user.userprofile.worker, module=obj.task.module_id)
obj.task_status = 6
obj.save()
serialized_data = {}
if http_status == 200:
serialized_data = TaskWorkerSerializer(instance=instance).data
return Response(serialized_data, http_status)
@list_route(methods=['post'])
def bulk_update_status(self, request, *args, **kwargs):
task_status = request.data.get('task_status', -1)
task_workers = TaskWorker.objects.filter(id__in=tuple(request.data.get('task_workers', [])))
task_workers.update(task_status=task_status, last_updated=timezone.now())
return Response(TaskWorkerSerializer(instance=task_workers, many=True,
fields=('id', 'task', 'task_status', 'task_worker_results_monitoring',
'worker_alias', 'updated_delta')).data, status.HTTP_200_OK)
@list_route(methods=['get'])
def list_by_status(self, request, *args, **kwargs):
status_map = {1: 'In Progress', 2: 'Submitted', 3: 'Accepted', 4: 'Rejected', 5: 'Returned'}
response = dict()
for key, value in status_map.iteritems():
task_workers = TaskWorker.objects.filter(worker=request.user.userprofile.worker, task_status=key)
serializer = TaskWorkerSerializer(instance=task_workers, many=True,
fields=(
'id', 'task_status', 'task', 'requester_alias', 'module', 'project_name',
'is_paid', 'last_updated'))
response[value] = serializer.data
return Response(response, status.HTTP_200_OK)
@detail_route(methods=['get'])
def retrieve_with_data_and_results(self, request, *args, **kwargs):
task_worker = TaskWorker.objects.get(id=request.query_params['id'])
serializer = TaskWorkerSerializer(instance=task_worker,
fields=('task', 'task_status', 'task_template', 'has_comments'))
rating = models.WorkerRequesterRating.objects.filter(origin=request.user.userprofile.id,
target=task_worker.task.module.owner.profile.id,
origin_type='worker', module=task_worker.task.module.id)
requester_alias = task_worker.task.module.owner.alias
module = task_worker.task.module.id
target = task_worker.task.module.owner.profile.id
if rating.count() != 0:
rating_serializer = WorkerRequesterRatingSerializer(instance=rating, many=True,
fields=('id', 'weight'))
return Response({'data': serializer.data,
'rating': rating_serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
else:
return Response({'data': serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
@list_route(methods=['post'])
def drop_saved_tasks(self, request, *args, **kwargs):
task_ids = request.data.get('task_ids', [])
self.queryset.filter(task_id__in=task_ids, worker=request.user.userprofile.worker.id).update(
task_status=6, last_updated=timezone.now())
return Response('Success', status.HTTP_200_OK)
@list_route(methods=['post'])
def bulk_pay_by_module(self, request, *args, **kwargs):
module = request.data.get('module')
accepted, rejected = 3, 4
task_workers = TaskWorker.objects.filter(task__module=module).filter(
Q(task_status=accepted) | Q(task_status=rejected))
task_workers.update(is_paid=True, last_updated=timezone.now())
return Response('Success', status.HTTP_200_OK)
class TaskWorkerResultViewSet(viewsets.ModelViewSet):
queryset = TaskWorkerResult.objects.all()
serializer_class = TaskWorkerResultSerializer
# permission_classes = [IsOwnerOrReadOnly]
def update(self, request, *args, **kwargs):
task_worker_result_serializer = TaskWorkerResultSerializer(data=request.data)
task_worker_result = self.queryset.filter(id=kwargs['pk'])[0]
status = 1
if 'status' in request.data:
status = request.data['status']
task_worker_result.status = status
task_worker_result.save()
return Response("Success")
def retrieve(self, request, *args, **kwargs):
worker = get_object_or_404(self.queryset, worker=request.worker)
serializer = TaskWorkerResultSerializer(instance=worker)
return Response(serializer.data)
@list_route(methods=['post'], url_path="submit-results")
def submit_results(self, request, *args, **kwargs):
task = request.data.get('task', None)
template_items = request.data.get('template_items', [])
task_status = request.data.get('task_status', None)
saved = request.data.get('saved')
task_worker = TaskWorker.objects.get(worker=request.user.userprofile.worker, task=task)
task_worker.task_status = task_status
task_worker.save()
task_worker_results = TaskWorkerResult.objects.filter(task_worker_id=task_worker.id)
if task_status == 1:
serializer = TaskWorkerResultSerializer(data=template_items, many=True, partial=True)
else:
serializer = TaskWorkerResultSerializer(data=template_items, many=True)
if serializer.is_valid():
if task_worker_results.count() != 0:
serializer.update(task_worker_results, serializer.validated_data)
else:
serializer.create(task_worker=task_worker)
if task_status == 1 or saved:
return Response('Success', status.HTTP_200_OK)
elif task_status == 2 and not saved:
task_worker_serializer = TaskWorkerSerializer()
instance, http_status = task_worker_serializer.create(
worker=request.user.userprofile.worker, module=task_worker.task.module_id)
serialized_data = {}
if http_status == 200:
serialized_data = TaskWorkerSerializer(instance=instance).data
return Response(serialized_data, http_status)
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
class CurrencyViewSet(viewsets.ModelViewSet):
from crowdsourcing.models import Currency
queryset = Currency.objects.all()
serializer_class = CurrencySerializer | crowdsourcing/viewsets/task.py | from crowdsourcing.serializers.task import *
from rest_framework import status, viewsets
from rest_framework.response import Response
from rest_framework.decorators import detail_route, list_route
from django.shortcuts import get_object_or_404
from crowdsourcing.permissions.project import IsProjectOwnerOrCollaborator
from crowdsourcing.models import Task, TaskWorker, TaskWorkerResult, WorkerRequesterRating
from django.utils import timezone
from django.db.models import Q
from rest_framework.permissions import IsAuthenticated
from crowdsourcing.permissions.task import HasExceededReservedLimit
from crowdsourcing.serializers.rating import WorkerRequesterRatingSerializer
from crowdsourcing.experimental_models import SubModule
from datetime import timedelta
class TaskViewSet(viewsets.ModelViewSet):
queryset = Task.objects.all()
serializer_class = TaskSerializer
@detail_route(methods=['post'], permission_classes=[IsProjectOwnerOrCollaborator])
def update_task(self, request, id=None):
task_serializer = TaskSerializer(data=request.data)
task = self.get_object()
if task_serializer.is_valid():
task_serializer.update(task, task_serializer.validated_data)
return Response({'status': 'updated task'})
else:
return Response(task_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
try:
module = request.query_params.get('module')
task = Task.objects.filter(module=module)
task_serialized = TaskSerializer(task, many=True)
return Response(task_serialized.data)
except:
return Response([])
def destroy(self, request, *args, **kwargs):
task_serializer = TaskSerializer()
task = self.get_object()
task_serializer.delete(task)
return Response({'status': 'deleted task'})
@detail_route(methods=['get'])
def retrieve_with_data(self, request, *args, **kwargs):
task = self.get_object()
serializer = TaskSerializer(instance=task, fields=('id', 'task_template', 'module_data', 'status', 'has_comments'))
rating = models.WorkerRequesterRating.objects.filter(origin=request.user.userprofile.id,
target=task.module.owner.profile.id,
origin_type='worker', module=task.module.id)
requester_alias = task.module.owner.alias
module = task.module.id
target = task.module.owner.profile.id
if rating.count() != 0:
rating_serializer = WorkerRequesterRatingSerializer(instance=rating, many=True,
fields=('id', 'weight'))
return Response({'data': serializer.data,
'rating': rating_serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
else:
return Response({'data': serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
@list_route(methods=['get'])
def list_by_module(self, request, **kwargs):
tasks = Task.objects.filter(module=request.query_params.get('module_id'))
task_serializer = TaskSerializer(instance=tasks, many=True, fields=('id', 'status',
'template_items_monitoring',
'task_workers_monitoring',
'has_comments', 'comments'))
response_data = {
'project_name': tasks[0].module.project.name,
'project_id': tasks[0].module.project.id,
'module_name': tasks[0].module.name,
'module_id': tasks[0].module.id,
'tasks': task_serializer.data
}
return Response(response_data, status.HTTP_200_OK)
@list_route(methods=['get'])
def sample_by_submodule(self, request, **kwargs):
submodule = SubModule.objects.get(fake_module_id=request.query_params.get('fake_module_id'))
hours_before_results = submodule.hours_before_results
if submodule.created_timestamp + timedelta(hours=submodule.hours_before_results) <= timezone.now():
results_per_round = submodule.results_per_round
round_exp = submodule.round_exp
sample = len(submodule.taskworkers) == 0
pool = submodule.owner.pool
tasks = Task.objects.filter(module=submodule.origin_module.id)
task_serializer = TaskSerializer(instance=tasks, many=True,
context={'requester': request.user.userprofile.id, 'submodule': submodule.id,
'round_exp': round_exp, 'results_per_round': results_per_round,
'sample': sample, 'pool': pool},
fields=('id', 'status', 'template_items_monitoring', 'has_comments',
'comments', 'task_workers_sampled'))
for task in task_serializer.data:
task['task_workers_monitoring'] = task['task_workers_sampled']
response_data = {
'project_name': tasks[0].module.project.name,
'project_id': tasks[0].module.project.id,
'module_name': tasks[0].module.name,
'module_id': tasks[0].module.id,
'tasks': task_serializer.data
}
return Response(response_data, status.HTTP_200_OK)
else:
return Response([], status.HTTP_200_OK)
@detail_route(methods=['get'])
def list_comments(self, request, **kwargs):
comments = models.TaskComment.objects.filter(task=kwargs['pk'])
serializer = TaskCommentSerializer(instance=comments, many=True, fields=('comment', 'id',))
response_data = {
'task': kwargs['pk'],
'comments': serializer.data
}
return Response(response_data, status.HTTP_200_OK)
@detail_route(methods=['post'])
def post_comment(self, request, **kwargs):
serializer = TaskCommentSerializer(data=request.data)
task_comment_data = {}
if serializer.is_valid():
comment = serializer.create(task=kwargs['pk'], sender=request.user.userprofile)
task_comment_data = TaskCommentSerializer(comment, fields=('id', 'comment',)).data
return Response(task_comment_data, status.HTTP_200_OK)
class TaskWorkerViewSet(viewsets.ModelViewSet):
queryset = TaskWorker.objects.all()
serializer_class = TaskWorkerSerializer
permission_classes = [IsAuthenticated, HasExceededReservedLimit]
lookup_field = 'task__id'
def create(self, request, *args, **kwargs):
serializer = TaskWorkerSerializer(data=request.data)
if serializer.is_valid():
instance, http_status = serializer.create(worker=request.user.userprofile.worker,
module=request.data.get('module', None))
serialized_data = {}
if http_status == 200:
serialized_data = TaskWorkerSerializer(instance=instance).data
return Response(serialized_data, http_status)
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
serializer = TaskWorkerSerializer()
obj = self.queryset.get(task=kwargs['task__id'], worker=request.user.userprofile.worker.id)
instance, http_status = serializer.create(worker=request.user.userprofile.worker, module=obj.task.module_id)
obj.task_status = 6
obj.save()
serialized_data = {}
if http_status == 200:
serialized_data = TaskWorkerSerializer(instance=instance).data
return Response(serialized_data, http_status)
@list_route(methods=['post'])
def bulk_update_status(self, request, *args, **kwargs):
task_status = request.data.get('task_status', -1)
task_workers = TaskWorker.objects.filter(id__in=tuple(request.data.get('task_workers', [])))
task_workers.update(task_status=task_status, last_updated=timezone.now())
return Response(TaskWorkerSerializer(instance=task_workers, many=True,
fields=('id', 'task', 'task_status', 'task_worker_results_monitoring',
'worker_alias', 'updated_delta')).data, status.HTTP_200_OK)
@list_route(methods=['get'])
def list_by_status(self, request, *args, **kwargs):
status_map = {1: 'In Progress', 2: 'Submitted', 3: 'Accepted', 4: 'Rejected', 5: 'Returned'}
response = dict()
for key, value in status_map.iteritems():
task_workers = TaskWorker.objects.filter(worker=request.user.userprofile.worker, task_status=key)
serializer = TaskWorkerSerializer(instance=task_workers, many=True,
fields=(
'id', 'task_status', 'task', 'requester_alias', 'module', 'project_name',
'is_paid', 'last_updated'))
response[value] = serializer.data
return Response(response, status.HTTP_200_OK)
@detail_route(methods=['get'])
def retrieve_with_data_and_results(self, request, *args, **kwargs):
task_worker = TaskWorker.objects.get(id=request.query_params['id'])
serializer = TaskWorkerSerializer(instance=task_worker,
fields=('task', 'task_status', 'task_template', 'has_comments'))
rating = models.WorkerRequesterRating.objects.filter(origin=request.user.userprofile.id,
target=task_worker.task.module.owner.profile.id,
origin_type='worker', module=task_worker.task.module.id)
requester_alias = task_worker.task.module.owner.alias
module = task_worker.task.module.id
target = task_worker.task.module.owner.profile.id
if rating.count() != 0:
rating_serializer = WorkerRequesterRatingSerializer(instance=rating, many=True,
fields=('id', 'weight'))
return Response({'data': serializer.data,
'rating': rating_serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
else:
return Response({'data': serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
@list_route(methods=['post'])
def drop_saved_tasks(self, request, *args, **kwargs):
task_ids = request.data.get('task_ids', [])
self.queryset.filter(task_id__in=task_ids, worker=request.user.userprofile.worker.id).update(
task_status=6, last_updated=timezone.now())
return Response('Success', status.HTTP_200_OK)
@list_route(methods=['post'])
def bulk_pay_by_module(self, request, *args, **kwargs):
module = request.data.get('module')
accepted, rejected = 3, 4
task_workers = TaskWorker.objects.filter(task__module=module).filter(
Q(task_status=accepted) | Q(task_status=rejected))
task_workers.update(is_paid=True, last_updated=timezone.now())
return Response('Success', status.HTTP_200_OK)
class TaskWorkerResultViewSet(viewsets.ModelViewSet):
queryset = TaskWorkerResult.objects.all()
serializer_class = TaskWorkerResultSerializer
# permission_classes = [IsOwnerOrReadOnly]
def update(self, request, *args, **kwargs):
task_worker_result_serializer = TaskWorkerResultSerializer(data=request.data)
task_worker_result = self.queryset.filter(id=kwargs['pk'])[0]
status = 1
if 'status' in request.data:
status = request.data['status']
task_worker_result.status = status
task_worker_result.save()
return Response("Success")
def retrieve(self, request, *args, **kwargs):
worker = get_object_or_404(self.queryset, worker=request.worker)
serializer = TaskWorkerResultSerializer(instance=worker)
return Response(serializer.data)
@list_route(methods=['post'], url_path="submit-results")
def submit_results(self, request, *args, **kwargs):
task = request.data.get('task', None)
template_items = request.data.get('template_items', [])
task_status = request.data.get('task_status', None)
saved = request.data.get('saved')
task_worker = TaskWorker.objects.get(worker=request.user.userprofile.worker, task=task)
task_worker.task_status = task_status
task_worker.save()
task_worker_results = TaskWorkerResult.objects.filter(task_worker_id=task_worker.id)
if task_status == 1:
serializer = TaskWorkerResultSerializer(data=template_items, many=True, partial=True)
else:
serializer = TaskWorkerResultSerializer(data=template_items, many=True)
if serializer.is_valid():
if task_worker_results.count() != 0:
serializer.update(task_worker_results, serializer.validated_data)
else:
serializer.create(task_worker=task_worker)
if task_status == 1 or saved:
return Response('Success', status.HTTP_200_OK)
elif task_status == 2 and not saved:
task_worker_serializer = TaskWorkerSerializer()
instance, http_status = task_worker_serializer.create(
worker=request.user.userprofile.worker, module=task_worker.task.module_id)
serialized_data = {}
if http_status == 200:
serialized_data = TaskWorkerSerializer(instance=instance).data
return Response(serialized_data, http_status)
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
class CurrencyViewSet(viewsets.ModelViewSet):
from crowdsourcing.models import Currency
queryset = Currency.objects.all()
serializer_class = CurrencySerializer | 0.446495 | 0.059839 |
import copy
import logging
from django.conf import settings
from django.views.debug import ExceptionReporter
from behind import jarvis
class SlackExceptionHandler(logging.Handler):
"""
Code from djang-slack app
An exception log handler that sends log entries to a Slack channel.
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
logging.Handler.__init__(self)
def emit(self, record):
try:
request = record.request
internal = 'internal' if request.META.get('REMOTE_ADDR') in \
settings.INTERNAL_IPS else 'EXTERNAL'
subject = '{} ({} IP): {}'.format(
record.levelname,
internal,
record.getMessage(),
)
except Exception:
subject = '{}: {}'.format(
record.levelname,
record.getMessage(),
)
request = None
subject = self.format_subject(subject)
# Since we add a nicely formatted traceback on our own, create a copy
# of the log record without the exception data.
no_exc_record = copy.copy(record)
no_exc_record.exc_info = None
no_exc_record.exc_text = None
if record.exc_info:
exc_info = record.exc_info
else:
exc_info = (None, record.getMessage(), None)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
try:
tb = reporter.get_traceback_text()
except:
tb = "(An exception occured when getting the traceback text)"
if reporter.exc_type:
tb = "{} (An exception occured when rendering the " \
"traceback)".format(reporter.exc_type.__name__)
message = "{}\n\n{}".format(self.format(no_exc_record), tb)
text = f'{subject} - {message}'
jarvis.send_slack(text, channel='#monitoring')
def format_subject(self, subject):
"""
Escape CR and LF characters, and limit length. RFC 2822's hard limit is
998 characters per line. So, minus "Subject: " the actual subject must
be no longer than 989 characters.
"""
formatted_subject = subject.replace('\n', '\\n').replace('\r', '\\r')
return formatted_subject[:989] | behind/behind/log_handlers.py | import copy
import logging
from django.conf import settings
from django.views.debug import ExceptionReporter
from behind import jarvis
class SlackExceptionHandler(logging.Handler):
"""
Code from djang-slack app
An exception log handler that sends log entries to a Slack channel.
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
logging.Handler.__init__(self)
def emit(self, record):
try:
request = record.request
internal = 'internal' if request.META.get('REMOTE_ADDR') in \
settings.INTERNAL_IPS else 'EXTERNAL'
subject = '{} ({} IP): {}'.format(
record.levelname,
internal,
record.getMessage(),
)
except Exception:
subject = '{}: {}'.format(
record.levelname,
record.getMessage(),
)
request = None
subject = self.format_subject(subject)
# Since we add a nicely formatted traceback on our own, create a copy
# of the log record without the exception data.
no_exc_record = copy.copy(record)
no_exc_record.exc_info = None
no_exc_record.exc_text = None
if record.exc_info:
exc_info = record.exc_info
else:
exc_info = (None, record.getMessage(), None)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
try:
tb = reporter.get_traceback_text()
except:
tb = "(An exception occured when getting the traceback text)"
if reporter.exc_type:
tb = "{} (An exception occured when rendering the " \
"traceback)".format(reporter.exc_type.__name__)
message = "{}\n\n{}".format(self.format(no_exc_record), tb)
text = f'{subject} - {message}'
jarvis.send_slack(text, channel='#monitoring')
def format_subject(self, subject):
"""
Escape CR and LF characters, and limit length. RFC 2822's hard limit is
998 characters per line. So, minus "Subject: " the actual subject must
be no longer than 989 characters.
"""
formatted_subject = subject.replace('\n', '\\n').replace('\r', '\\r')
return formatted_subject[:989] | 0.482917 | 0.113138 |
from Lights.models import *
from django.http import HttpResponse, HttpResponseNotFound
from SharedFunctions.deviceControl import *
class LightApi():
def webRequest(self, request):
command = request.GET.get('command', '')
if command == "getStatus":
pageContent = "["
isNotInFirstLoop = False
for alight in Lights.objects.all():
if isNotInFirstLoop:
pageContent += ", "
else:
isNotInFirstLoop = True
pageContent += "{"
pageContent += "\"id\":"
pageContent += "\"" + str(alight.id) + "\", "
pageContent += "\"Light\":"
pageContent += "\"" + alight.LightName.replace("_", " ") + "\", "
pageContent += "\"Room\":"
pageContent += "\"" + alight.Room.Name.replace("_", " ") + "\", "
pageContent += "\"Type\":"
pageContent += "\"" + alight.LightType + "\", "
pageContent += "\"State\":"
pageContent += "\"" + alight.LightState + "\", "
pageContent += "\"r\":"
pageContent += "\"" + str(alight.R) + "\", "
pageContent += "\"g\":"
pageContent += "\"" + str(alight.G) + "\", "
pageContent += "\"b\":"
pageContent += "\"" + str(alight.B) + "\""
pageContent += "}"
pageContent += "]"
return HttpResponse(pageContent)
elif command == "SetRGB":
lightId = request.GET.get('light', '0')
theLight = Lights.objects.get(id=lightId)
newR = request.GET.get('r','')
newG = request.GET.get('g','')
newB = request.GET.get('b','')
if ((0 <= int(newR) < 256) and (0 <= int(newG) < 256) and (0 <= int(newB) < 256)):
if (theLight.LightState == "Off"):
oldR = 0
oldG = 0
oldB = 0
else:
oldR = theLight.R
oldG = theLight.G
oldB = theLight.B
DeviceControl().scrollDeviceRGBState(theLight.IpAddress, theLight.DeviceType, theLight.R, theLight.G, theLight.B, newR, theLight.G, theLight.B, setType="tread")
if ((int(newR) == 0) and (int(newG) == 0) and (int(newB) == 0)):
theLight.LightState = "Off"
else:
theLight.LightState = "On"
theLight.R = newR
theLight.G = newG
theLight.B = newB
theLight.save()
return HttpResponse("Ok")
else:
return HttpResponse("Please Enter A Number Between 0 And 255", status=400) | Api/api.py | from Lights.models import *
from django.http import HttpResponse, HttpResponseNotFound
from SharedFunctions.deviceControl import *
class LightApi():
def webRequest(self, request):
command = request.GET.get('command', '')
if command == "getStatus":
pageContent = "["
isNotInFirstLoop = False
for alight in Lights.objects.all():
if isNotInFirstLoop:
pageContent += ", "
else:
isNotInFirstLoop = True
pageContent += "{"
pageContent += "\"id\":"
pageContent += "\"" + str(alight.id) + "\", "
pageContent += "\"Light\":"
pageContent += "\"" + alight.LightName.replace("_", " ") + "\", "
pageContent += "\"Room\":"
pageContent += "\"" + alight.Room.Name.replace("_", " ") + "\", "
pageContent += "\"Type\":"
pageContent += "\"" + alight.LightType + "\", "
pageContent += "\"State\":"
pageContent += "\"" + alight.LightState + "\", "
pageContent += "\"r\":"
pageContent += "\"" + str(alight.R) + "\", "
pageContent += "\"g\":"
pageContent += "\"" + str(alight.G) + "\", "
pageContent += "\"b\":"
pageContent += "\"" + str(alight.B) + "\""
pageContent += "}"
pageContent += "]"
return HttpResponse(pageContent)
elif command == "SetRGB":
lightId = request.GET.get('light', '0')
theLight = Lights.objects.get(id=lightId)
newR = request.GET.get('r','')
newG = request.GET.get('g','')
newB = request.GET.get('b','')
if ((0 <= int(newR) < 256) and (0 <= int(newG) < 256) and (0 <= int(newB) < 256)):
if (theLight.LightState == "Off"):
oldR = 0
oldG = 0
oldB = 0
else:
oldR = theLight.R
oldG = theLight.G
oldB = theLight.B
DeviceControl().scrollDeviceRGBState(theLight.IpAddress, theLight.DeviceType, theLight.R, theLight.G, theLight.B, newR, theLight.G, theLight.B, setType="tread")
if ((int(newR) == 0) and (int(newG) == 0) and (int(newB) == 0)):
theLight.LightState = "Off"
else:
theLight.LightState = "On"
theLight.R = newR
theLight.G = newG
theLight.B = newB
theLight.save()
return HttpResponse("Ok")
else:
return HttpResponse("Please Enter A Number Between 0 And 255", status=400) | 0.188175 | 0.060585 |
from nengo.base import NengoObject, ObjView
from nengo.dists import Uniform, UniformHypersphere
from nengo.neurons import LIF, NeuronTypeParam, Direct
from nengo.params import (
Default, DistributionParam, IntParam, NumberParam,
StochasticProcessParam, StringParam)
import decimal as dc
class Ensemble(NengoObject):
"""A group of neurons that collectively represent a vector.
Parameters
----------
n_neurons : int
The number of neurons.
dimensions : int
The number of representational dimensions.
radius : int, optional
The representational radius of the ensemble.
encoders : Distribution or ndarray (`n_neurons`, `dimensions`), optional
The encoders, used to transform from representational space
to neuron space. Each row is a neuron's encoder, each column is a
representational dimension.
intercepts : Distribution or ndarray (`n_neurons`), optional
The point along each neuron's encoder where its activity is zero. If
e is the neuron's encoder, then the activity will be zero when
dot(x, e) <= c, where c is the given intercept.
max_rates : Distribution or ndarray (`n_neurons`), optional
The activity of each neuron when dot(x, e) = 1, where e is the neuron's
encoder.
eval_points : Distribution or ndarray (`n_eval_points`, `dims`), optional
The evaluation points used for decoder solving, spanning the interval
(-radius, radius) in each dimension, or a distribution from which to
choose evaluation points. Default: ``UniformHypersphere``.
n_eval_points : int, optional
The number of evaluation points to be drawn from the `eval_points`
distribution. If None (the default), then a heuristic is used to
determine the number of evaluation points.
neuron_type : Neurons, optional
The model that simulates all neurons in the ensemble.
noise : StochasticProcess, optional
Random noise injected directly into each neuron in the ensemble
as current. A sample is drawn for each individual neuron on
every simulation step.
seed : int, optional
The seed used for random number generation.
label : str, optional
A name for the ensemble. Used for debugging and visualization.
"""
n_neurons = IntParam(default=None, low=1)
dimensions = IntParam(default=None, low=1)
radius = NumberParam(default=1, low=1e-10)
neuron_type = NeuronTypeParam(default=LIF())
encoders = DistributionParam(default=UniformHypersphere(surface=True),
sample_shape=('n_neurons', 'dimensions'))
intercepts = DistributionParam(default=Uniform(-1.0, 1.0),
optional=True,
sample_shape=('n_neurons',))
max_rates = DistributionParam(default=Uniform(200, 400),
optional=True,
sample_shape=('n_neurons',))
n_eval_points = IntParam(default=None, optional=True)
eval_points = DistributionParam(default=UniformHypersphere(),
sample_shape=('*', 'dimensions'))
bias = DistributionParam(default=None,
optional=True,
sample_shape=('n_neurons',))
gain = DistributionParam(default=None,
optional=True,
sample_shape=('n_neurons',))
noise = StochasticProcessParam(default=None, optional=True)
seed = IntParam(default=None, optional=True)
label = StringParam(default=None, optional=True)
def __init__(self, n_neurons, dimensions, radius=Default, encoders=Default,
intercepts=Default, max_rates=Default, eval_points=Default,
n_eval_points=Default, neuron_type=Default, gain=Default,
bias=Default, noise=Default, seed=Default, label=Default):
self.n_neurons = n_neurons
self.dimensions = dimensions
self.radius = radius
self.encoders = encoders
self.intercepts = intercepts
self.max_rates = max_rates
self.label = label
self.n_eval_points = n_eval_points
self.eval_points = eval_points
self.bias = bias
self.gain = gain
self.neuron_type = neuron_type
self.noise = noise
self.seed = seed
self._neurons = Neurons(self)
def __getitem__(self, key):
return ObjView(self, key)
def __len__(self):
return self.dimensions
@property
def neurons(self):
return self._neurons
@neurons.setter
def neurons(self, dummy):
raise AttributeError("neurons cannot be overwritten.")
@property
def probeable(self):
return ["decoded_output", "input"]
@property
def size_in(self):
return self.dimensions
@property
def size_out(self):
return self.dimensions
class Neurons(object):
"""A wrapper around Ensemble for making connections directly to neurons.
This should only ever be used in the ``Ensemble.neurons`` property,
as a way to signal to Connection that the connection should be made
directly to the neurons rather than to the Ensemble's decoded value.
Does not currently support any other view-like operations.
"""
def __init__(self, ensemble):
self.ensemble = ensemble
def __getitem__(self, key):
return ObjView(self, key)
def __len__(self):
return self.ensemble.n_neurons
def __repr__(self):
return "<Neurons at 0x%x of %r>" % (id(self), self.ensemble)
def __str__(self):
return "<Neurons of %s>" % self.ensemble
@property
def size_in(self):
if isinstance(self.ensemble.neuron_type, Direct):
# This will prevent users from connecting/probing Direct neurons
# (since there aren't actually any neurons being simulated).
return 0
return self.ensemble.n_neurons
@property
def size_out(self):
if isinstance(self.ensemble.neuron_type, Direct):
# This will prevent users from connecting/probing Direct neurons
# (since there aren't actually any neurons being simulated).
return 0
return self.ensemble.n_neurons
@property
def probeable(self):
return ['output', 'input'] + self.ensemble.neuron_type.probeable | nengo/ensemble.py | from nengo.base import NengoObject, ObjView
from nengo.dists import Uniform, UniformHypersphere
from nengo.neurons import LIF, NeuronTypeParam, Direct
from nengo.params import (
Default, DistributionParam, IntParam, NumberParam,
StochasticProcessParam, StringParam)
import decimal as dc
class Ensemble(NengoObject):
"""A group of neurons that collectively represent a vector.
Parameters
----------
n_neurons : int
The number of neurons.
dimensions : int
The number of representational dimensions.
radius : int, optional
The representational radius of the ensemble.
encoders : Distribution or ndarray (`n_neurons`, `dimensions`), optional
The encoders, used to transform from representational space
to neuron space. Each row is a neuron's encoder, each column is a
representational dimension.
intercepts : Distribution or ndarray (`n_neurons`), optional
The point along each neuron's encoder where its activity is zero. If
e is the neuron's encoder, then the activity will be zero when
dot(x, e) <= c, where c is the given intercept.
max_rates : Distribution or ndarray (`n_neurons`), optional
The activity of each neuron when dot(x, e) = 1, where e is the neuron's
encoder.
eval_points : Distribution or ndarray (`n_eval_points`, `dims`), optional
The evaluation points used for decoder solving, spanning the interval
(-radius, radius) in each dimension, or a distribution from which to
choose evaluation points. Default: ``UniformHypersphere``.
n_eval_points : int, optional
The number of evaluation points to be drawn from the `eval_points`
distribution. If None (the default), then a heuristic is used to
determine the number of evaluation points.
neuron_type : Neurons, optional
The model that simulates all neurons in the ensemble.
noise : StochasticProcess, optional
Random noise injected directly into each neuron in the ensemble
as current. A sample is drawn for each individual neuron on
every simulation step.
seed : int, optional
The seed used for random number generation.
label : str, optional
A name for the ensemble. Used for debugging and visualization.
"""
n_neurons = IntParam(default=None, low=1)
dimensions = IntParam(default=None, low=1)
radius = NumberParam(default=1, low=1e-10)
neuron_type = NeuronTypeParam(default=LIF())
encoders = DistributionParam(default=UniformHypersphere(surface=True),
sample_shape=('n_neurons', 'dimensions'))
intercepts = DistributionParam(default=Uniform(-1.0, 1.0),
optional=True,
sample_shape=('n_neurons',))
max_rates = DistributionParam(default=Uniform(200, 400),
optional=True,
sample_shape=('n_neurons',))
n_eval_points = IntParam(default=None, optional=True)
eval_points = DistributionParam(default=UniformHypersphere(),
sample_shape=('*', 'dimensions'))
bias = DistributionParam(default=None,
optional=True,
sample_shape=('n_neurons',))
gain = DistributionParam(default=None,
optional=True,
sample_shape=('n_neurons',))
noise = StochasticProcessParam(default=None, optional=True)
seed = IntParam(default=None, optional=True)
label = StringParam(default=None, optional=True)
def __init__(self, n_neurons, dimensions, radius=Default, encoders=Default,
intercepts=Default, max_rates=Default, eval_points=Default,
n_eval_points=Default, neuron_type=Default, gain=Default,
bias=Default, noise=Default, seed=Default, label=Default):
self.n_neurons = n_neurons
self.dimensions = dimensions
self.radius = radius
self.encoders = encoders
self.intercepts = intercepts
self.max_rates = max_rates
self.label = label
self.n_eval_points = n_eval_points
self.eval_points = eval_points
self.bias = bias
self.gain = gain
self.neuron_type = neuron_type
self.noise = noise
self.seed = seed
self._neurons = Neurons(self)
def __getitem__(self, key):
return ObjView(self, key)
def __len__(self):
return self.dimensions
@property
def neurons(self):
return self._neurons
@neurons.setter
def neurons(self, dummy):
raise AttributeError("neurons cannot be overwritten.")
@property
def probeable(self):
return ["decoded_output", "input"]
@property
def size_in(self):
return self.dimensions
@property
def size_out(self):
return self.dimensions
class Neurons(object):
"""A wrapper around Ensemble for making connections directly to neurons.
This should only ever be used in the ``Ensemble.neurons`` property,
as a way to signal to Connection that the connection should be made
directly to the neurons rather than to the Ensemble's decoded value.
Does not currently support any other view-like operations.
"""
def __init__(self, ensemble):
self.ensemble = ensemble
def __getitem__(self, key):
return ObjView(self, key)
def __len__(self):
return self.ensemble.n_neurons
def __repr__(self):
return "<Neurons at 0x%x of %r>" % (id(self), self.ensemble)
def __str__(self):
return "<Neurons of %s>" % self.ensemble
@property
def size_in(self):
if isinstance(self.ensemble.neuron_type, Direct):
# This will prevent users from connecting/probing Direct neurons
# (since there aren't actually any neurons being simulated).
return 0
return self.ensemble.n_neurons
@property
def size_out(self):
if isinstance(self.ensemble.neuron_type, Direct):
# This will prevent users from connecting/probing Direct neurons
# (since there aren't actually any neurons being simulated).
return 0
return self.ensemble.n_neurons
@property
def probeable(self):
return ['output', 'input'] + self.ensemble.neuron_type.probeable | 0.912952 | 0.685117 |
from plotnine import *
import pandas as pd
import numpy as np
from mizani.palettes import hue_pal
df = pd.read_csv('rq4_f1.csv')
palette = hue_pal(0.01, 0.6, 0.65, color_space='hls')
pal = palette(4)
old_palette = [pal[1], pal[3], pal[0]]
def fix(model):
d = {
"Transfer-on": "1",
"Transfer-pc": "2",
"Transfer-en": "3"
}
return d[model]
palette = hue_pal(0.01, 0.6, 0.65, color_space='hls')
pal = palette(8)
new_df = df.dropna()
new_df = new_df.astype({'Number of training examples': str})
model_labels = ["SpanBERT (L)", "SpanBERT-On (L)",
"Transfer (on)", "Transfer (pc)"]
old_linetype = ["dashed", "dashed", "solid", "solid"]
old_palette = [pal[3], pal[1], pal[2], pal[6], "blue", pal[0], "pink"]
def relabel_facet(f):
if f == "1solid":
return "Low"
elif f == "2dashed":
return "Medium"
elif f == "3dotted":
return "All"
else:
return f[3:]
p = (ggplot(new_df, aes(x='layers', y='Average F1', color='model', group='type'))
+ labs(x='Top k layers are trainable', y='Coref Avg. F1', color='model', linetype="line type")
+ geom_line(aes(linetype="model"), size=1.0)
+ scale_y_continuous()
+ scale_x_continuous(breaks=[0,6,12,18,24])
+ facet_grid(("line type", "exp"), scales="free",
labeller=labeller(cols=relabel_facet,
rows=relabel_facet))
+ scale_color_manual(values = old_palette,
name="Initialization method", labels=model_labels)
+ scale_linetype_manual(values = old_linetype,
name="Initialization method", labels=model_labels)
+ theme_bw()
+ theme(panel_spacing_x=0.25,
axis_title_x=element_text(size=16),
axis_title_y=element_text(size=16),
legend_position="top",
legend_text=element_text(size=12),
legend_title=element_text(size=14),
legend_entry_spacing_x=10.0,
legend_entry_spacing_y=3.0
)
+ guides(color=guide_legend(nrow=2))
)
p.save(filename = 'rq4_f1.pdf', height=5, width=5, units = 'in', dpi=400) | analysis/model_transfer/plot_rq4.py | from plotnine import *
import pandas as pd
import numpy as np
from mizani.palettes import hue_pal
df = pd.read_csv('rq4_f1.csv')
palette = hue_pal(0.01, 0.6, 0.65, color_space='hls')
pal = palette(4)
old_palette = [pal[1], pal[3], pal[0]]
def fix(model):
d = {
"Transfer-on": "1",
"Transfer-pc": "2",
"Transfer-en": "3"
}
return d[model]
palette = hue_pal(0.01, 0.6, 0.65, color_space='hls')
pal = palette(8)
new_df = df.dropna()
new_df = new_df.astype({'Number of training examples': str})
model_labels = ["SpanBERT (L)", "SpanBERT-On (L)",
"Transfer (on)", "Transfer (pc)"]
old_linetype = ["dashed", "dashed", "solid", "solid"]
old_palette = [pal[3], pal[1], pal[2], pal[6], "blue", pal[0], "pink"]
def relabel_facet(f):
if f == "1solid":
return "Low"
elif f == "2dashed":
return "Medium"
elif f == "3dotted":
return "All"
else:
return f[3:]
p = (ggplot(new_df, aes(x='layers', y='Average F1', color='model', group='type'))
+ labs(x='Top k layers are trainable', y='Coref Avg. F1', color='model', linetype="line type")
+ geom_line(aes(linetype="model"), size=1.0)
+ scale_y_continuous()
+ scale_x_continuous(breaks=[0,6,12,18,24])
+ facet_grid(("line type", "exp"), scales="free",
labeller=labeller(cols=relabel_facet,
rows=relabel_facet))
+ scale_color_manual(values = old_palette,
name="Initialization method", labels=model_labels)
+ scale_linetype_manual(values = old_linetype,
name="Initialization method", labels=model_labels)
+ theme_bw()
+ theme(panel_spacing_x=0.25,
axis_title_x=element_text(size=16),
axis_title_y=element_text(size=16),
legend_position="top",
legend_text=element_text(size=12),
legend_title=element_text(size=14),
legend_entry_spacing_x=10.0,
legend_entry_spacing_y=3.0
)
+ guides(color=guide_legend(nrow=2))
)
p.save(filename = 'rq4_f1.pdf', height=5, width=5, units = 'in', dpi=400) | 0.74055 | 0.265184 |
from collections import defaultdict
from collections import OrderedDict
import datetime
import os
import sys
_REMOTE_API_DIR = os.path.join(os.path.dirname(__file__), os.path.pardir)
sys.path.insert(1, _REMOTE_API_DIR)
import remote_api
from model import analysis_status
from model.wf_swarming_task import WfSwarmingTask
NOT_AVAILABLE = 'N/A'
# TODO(lijeffrey): Refactor helper methods into module sharable with
# try_job_data_query.py.
def _GetAverageOfNumbersInList(numbers):
"""Returns a float average of numbers or NOT_AVAILABLE if numbers is empty."""
return (float(sum(numbers)) / len(numbers)) if numbers else NOT_AVAILABLE
def _FormatDigits(number):
"""Formats number into a 2-digit float, or NOT_AVAILABLE."""
if isinstance(number, (int, float)):
return float('%.2f' % number)
return NOT_AVAILABLE
def _FormatSecondsAsHMS(seconds):
"""Formats the number of seconds into hours, minutes, seconds."""
if seconds == NOT_AVAILABLE:
return NOT_AVAILABLE
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return '%d:%02d:%02d' % (hours, minutes, seconds)
def _FormatStepName(step_name):
# Formats step_name to return only the first word (the step name itself).
# Step names are expected to be in either the format 'step_name' or
# 'step_name on platform'.
return step_name.strip().split(' ')[0]
def _CategorizeSwarmingTaskData(swarming_task_list):
"""Categorizes swarming_task_list into a dict.
Args:
swarming_task_list: A list of WfSwarmingTask objects.
Returns:
A dict in the format:
{
priority1: {
master_name1': {
'builder_name1': {
'step_name1': [WfSwarmingTask1, WfSwarmingTask2, ...],
...
},
...
},
...
},
...
}
"""
categorized_data = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(list))))
for swarming_task in swarming_task_list:
if (not swarming_task.parameters or not swarming_task.tests_statuses or
swarming_task.status != analysis_status.COMPLETED):
# Disregard any swarming tasks that are not yet completed or were
# triggered before 'parameters' and 'tests_statuses' were introduced.
continue
priority = swarming_task.parameters['priority']
master_name = swarming_task.master_name
builder_name = swarming_task.builder_name
step_name = swarming_task.key.id()
categorized_data[priority][master_name][builder_name][step_name].append(
swarming_task)
return categorized_data
def _GetReportInformation(swarming_task_list, start_date, end_date):
"""Computes and returns swarming task metadata in a dict.
Args:
swarming_task_list: A list of WfSwarmingTask entities.
start_date: The earliest request date to compute data.
end_date: The latest request date to compute data.
Returns:
A dict in the following format:
{
'swarming_tasks_per_day': The average number of swwarming tasks
requested over the time period specified,
'average_execution_time': The average amount of time spent on each
swarming task not including in-queue time.
'average_time_in_queue': The average amount of time a swarming task
spends in-queue before it is picked up.
'longest_execution_time': The length of time of the slowest swarming
task in the period requested,
'shortest_execution_time': The length of time of the fastest swarming
task in the period requested.
'tests_times_iterations': The number of tests multiplied by the number
of iterations that test was run.
'average_number_of_iterations': The average number of iterations each
test for this step was run.
'error_rate': The number of tasks that ended in error out of all tasks
in swarming_task_list.
}
"""
swarming_tasks_per_day = NOT_AVAILABLE
average_execution_time = NOT_AVAILABLE
average_time_in_queue = NOT_AVAILABLE
longest_execution_time = NOT_AVAILABLE
shortest_execution_time = NOT_AVAILABLE
average_number_of_iterations = NOT_AVAILABLE
average_number_of_tests_run = NOT_AVAILABLE
error_rate = NOT_AVAILABLE
if swarming_task_list:
task_count = len(swarming_task_list)
swarming_tasks_per_day = task_count / float((end_date - start_date).days)
execution_times_seconds = []
in_queue_times = []
iteration_counts = []
tests_counts = []
error_count = 0
for swarming_task in swarming_task_list:
# Execution time.
if swarming_task.started_time and swarming_task.completed_time:
execution_times_seconds.append(
(swarming_task.completed_time - swarming_task.started_time).seconds)
# In-queue time.
if swarming_task.started_time and swarming_task.created_time:
in_queue_times.append(
(swarming_task.started_time - swarming_task.created_time).seconds)
# Number of iterations.
iterations_to_rerun = swarming_task.parameters.get(
'iterations_to_rerun')
if iterations_to_rerun is not None:
iteration_counts.append(iterations_to_rerun)
# Number of tests.
number_of_tests = len(swarming_task.tests_statuses)
if number_of_tests:
tests_counts.append(number_of_tests)
# Error rate.
if swarming_task.status == analysis_status.ERROR:
error_count += 1
average_execution_time = (_GetAverageOfNumbersInList(
execution_times_seconds) if execution_times_seconds else NOT_AVAILABLE)
average_time_in_queue = (
_GetAverageOfNumbersInList(in_queue_times) if in_queue_times else
NOT_AVAILABLE)
longest_execution_time = (
str(datetime.timedelta(seconds=max(execution_times_seconds)))
if execution_times_seconds else NOT_AVAILABLE)
shortest_execution_time = (
str(datetime.timedelta(seconds=min(execution_times_seconds)))
if execution_times_seconds else NOT_AVAILABLE)
average_number_of_iterations = _GetAverageOfNumbersInList(iteration_counts)
average_number_of_tests_run = _GetAverageOfNumbersInList(tests_counts)
tests_times_iterations = (
average_number_of_iterations * average_number_of_tests_run)
error_rate = error_count / task_count
return {
'swarming_tasks_per_day': swarming_tasks_per_day,
'average_execution_time': average_execution_time,
'average_time_in_queue': average_time_in_queue,
'longest_execution_time': longest_execution_time,
'shortest_execution_time': shortest_execution_time,
'tests_times_iterations': tests_times_iterations,
'average_number_of_iterations': average_number_of_iterations,
'average_number_of_tests_run': average_number_of_tests_run,
'error_rate': error_rate
}
def _GetReport(categorized_swarming_task_dict, start_date, end_date):
"""Returns a swarming task data report as an ordered dict sorted by priority.
Args:
categorized_swarming_task_dict: A dict categorizing WFSwarmingTask entities
organized by priority, master_name, builder_name, step_name. This dict
should be the output from _CategorizeSwarmingTaskData().
start_date: The earliest request date for which data should be computed.
end_date: The latest request date for which data should be computed.
Returns:
An ordered dict by highest priority (lower priority number) swarming tasks
in the format:
{
priority: {
'master_name': {
'builder_name': {
'step_name': {
'swarming_tasks_per_day': number or 'N/A',
'average_execution_time': number or 'N/A',
'average_time_in_queue': number or 'N/A',
'longest_execution_time': number or 'N/A',
'shortest_execution_time': number or 'N/A',
'tests_times_iterations': number or 'N/A'
'average_number_of_tests_run': number or 'N/A',
'average_number_of_iterations': number or 'N/A',
'error_rate': number or 'N/A'
},
...
},
...
},
...
},
...
}
"""
report = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
for priority, masters in categorized_swarming_task_dict.iteritems():
for master, builders in masters.iteritems():
for builder, steps in builders.iteritems():
for step, swarming_task_data_list in steps.iteritems():
report[priority][master][builder][step] = _GetReportInformation(
swarming_task_data_list, start_date, end_date)
return OrderedDict(sorted(report.items()))
def CreateHtmlPage(report, start_date, end_date):
"""Generates an html string for displaying the report.
Args:
report: A dict containing all the relevant information returned from
_GetReport().
start_date: The earliest date that a swarming task was requested.
end_date: The latest date that a swarming task was requested.
Returns:
A string containing the html body for the final report page.
"""
html = """
<style>
table {
border-collapse: collapse;
border: 1px solid gray;
}
table td, th {
border: 1px solid gray;
}
</style>"""
html += '<b>Swarming task metadata from %s to %s (%s days)</b>' % (
str(start_date), str(end_date), (end_date - start_date).days)
html += '<h1>Aggregate metadata for swarming tasks by priority</h1>'
cell_template = '<td>%s</td>'
for priority, masters in report.iteritems():
html += '<h2>Task Priority: %s</h2>' % priority
html += """
<table>
<tr>
<th>Master</th>
<th>Builder</th>
<th>Step</th>
<th>Average # Tasks Per Day</th>
<th>Average Time In Queue</th>
<th>Average Execution Time</th>
<th>Longest Execution Time</th>
<th>Shortest Execution Time</th>
<th># Tests * # Iterations</th>
<th>Average # Iterations</th>
<th>Average # Tests Run</th>
<th>Error Rate</th>
</tr>"""
for master_name, builder_reports in masters.iteritems():
for builder_name, steps in builder_reports.iteritems():
for step_name in steps:
builder_report = (
report[priority][master_name][builder_name][step_name])
html += '<tr>'
html += cell_template % master_name
html += cell_template % builder_name
html += cell_template % _FormatStepName(step_name)
html += cell_template % _FormatDigits(
builder_report['swarming_tasks_per_day'])
html += cell_template % _FormatSecondsAsHMS(_FormatDigits(
builder_report['average_time_in_queue']))
html += cell_template % _FormatSecondsAsHMS(_FormatDigits(
builder_report['average_execution_time']))
html += cell_template % builder_report['longest_execution_time']
html += cell_template % builder_report['shortest_execution_time']
html += cell_template % _FormatDigits(
builder_report['tests_times_iterations'])
html += cell_template % _FormatDigits(
builder_report['average_number_of_iterations'])
html += cell_template % _FormatDigits(
builder_report['average_number_of_tests_run'])
html += cell_template % _FormatDigits(builder_report['error_rate'])
html += '</table>'
return html
if __name__ == '__main__':
# Set up the Remote API to use services on the live App Engine.
remote_api.EnableRemoteApi(app_id='findit-for-me')
START_DATE = datetime.datetime(2016, 2, 1)
END_DATE = datetime.datetime(2016, 3, 7)
wf_analysis_query = WfSwarmingTask.query(
WfSwarmingTask.created_time >= START_DATE,
WfSwarmingTask.created_time < END_DATE)
data_list = wf_analysis_query.fetch()
categorized_data_dict = _CategorizeSwarmingTaskData(data_list)
final_report = _GetReport(categorized_data_dict, START_DATE, END_DATE)
findit_tmp_dir = os.environ.get('TMP_DIR')
if not findit_tmp_dir:
findit_tmp_dir = os.getcwd()
report_path = os.path.join(findit_tmp_dir,
'swarming_task_metadata_report.html')
with open(report_path, 'w') as f:
f.write(CreateHtmlPage(final_report, START_DATE, END_DATE))
print 'Swarming task metadata report available at file://%s' % report_path | appengine/findit/util_scripts/remote_queries/swarming_task_data_query.py | from collections import defaultdict
from collections import OrderedDict
import datetime
import os
import sys
_REMOTE_API_DIR = os.path.join(os.path.dirname(__file__), os.path.pardir)
sys.path.insert(1, _REMOTE_API_DIR)
import remote_api
from model import analysis_status
from model.wf_swarming_task import WfSwarmingTask
NOT_AVAILABLE = 'N/A'
# TODO(lijeffrey): Refactor helper methods into module sharable with
# try_job_data_query.py.
def _GetAverageOfNumbersInList(numbers):
"""Returns a float average of numbers or NOT_AVAILABLE if numbers is empty."""
return (float(sum(numbers)) / len(numbers)) if numbers else NOT_AVAILABLE
def _FormatDigits(number):
"""Formats number into a 2-digit float, or NOT_AVAILABLE."""
if isinstance(number, (int, float)):
return float('%.2f' % number)
return NOT_AVAILABLE
def _FormatSecondsAsHMS(seconds):
"""Formats the number of seconds into hours, minutes, seconds."""
if seconds == NOT_AVAILABLE:
return NOT_AVAILABLE
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return '%d:%02d:%02d' % (hours, minutes, seconds)
def _FormatStepName(step_name):
# Formats step_name to return only the first word (the step name itself).
# Step names are expected to be in either the format 'step_name' or
# 'step_name on platform'.
return step_name.strip().split(' ')[0]
def _CategorizeSwarmingTaskData(swarming_task_list):
"""Categorizes swarming_task_list into a dict.
Args:
swarming_task_list: A list of WfSwarmingTask objects.
Returns:
A dict in the format:
{
priority1: {
master_name1': {
'builder_name1': {
'step_name1': [WfSwarmingTask1, WfSwarmingTask2, ...],
...
},
...
},
...
},
...
}
"""
categorized_data = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(list))))
for swarming_task in swarming_task_list:
if (not swarming_task.parameters or not swarming_task.tests_statuses or
swarming_task.status != analysis_status.COMPLETED):
# Disregard any swarming tasks that are not yet completed or were
# triggered before 'parameters' and 'tests_statuses' were introduced.
continue
priority = swarming_task.parameters['priority']
master_name = swarming_task.master_name
builder_name = swarming_task.builder_name
step_name = swarming_task.key.id()
categorized_data[priority][master_name][builder_name][step_name].append(
swarming_task)
return categorized_data
def _GetReportInformation(swarming_task_list, start_date, end_date):
"""Computes and returns swarming task metadata in a dict.
Args:
swarming_task_list: A list of WfSwarmingTask entities.
start_date: The earliest request date to compute data.
end_date: The latest request date to compute data.
Returns:
A dict in the following format:
{
'swarming_tasks_per_day': The average number of swwarming tasks
requested over the time period specified,
'average_execution_time': The average amount of time spent on each
swarming task not including in-queue time.
'average_time_in_queue': The average amount of time a swarming task
spends in-queue before it is picked up.
'longest_execution_time': The length of time of the slowest swarming
task in the period requested,
'shortest_execution_time': The length of time of the fastest swarming
task in the period requested.
'tests_times_iterations': The number of tests multiplied by the number
of iterations that test was run.
'average_number_of_iterations': The average number of iterations each
test for this step was run.
'error_rate': The number of tasks that ended in error out of all tasks
in swarming_task_list.
}
"""
swarming_tasks_per_day = NOT_AVAILABLE
average_execution_time = NOT_AVAILABLE
average_time_in_queue = NOT_AVAILABLE
longest_execution_time = NOT_AVAILABLE
shortest_execution_time = NOT_AVAILABLE
average_number_of_iterations = NOT_AVAILABLE
average_number_of_tests_run = NOT_AVAILABLE
error_rate = NOT_AVAILABLE
if swarming_task_list:
task_count = len(swarming_task_list)
swarming_tasks_per_day = task_count / float((end_date - start_date).days)
execution_times_seconds = []
in_queue_times = []
iteration_counts = []
tests_counts = []
error_count = 0
for swarming_task in swarming_task_list:
# Execution time.
if swarming_task.started_time and swarming_task.completed_time:
execution_times_seconds.append(
(swarming_task.completed_time - swarming_task.started_time).seconds)
# In-queue time.
if swarming_task.started_time and swarming_task.created_time:
in_queue_times.append(
(swarming_task.started_time - swarming_task.created_time).seconds)
# Number of iterations.
iterations_to_rerun = swarming_task.parameters.get(
'iterations_to_rerun')
if iterations_to_rerun is not None:
iteration_counts.append(iterations_to_rerun)
# Number of tests.
number_of_tests = len(swarming_task.tests_statuses)
if number_of_tests:
tests_counts.append(number_of_tests)
# Error rate.
if swarming_task.status == analysis_status.ERROR:
error_count += 1
average_execution_time = (_GetAverageOfNumbersInList(
execution_times_seconds) if execution_times_seconds else NOT_AVAILABLE)
average_time_in_queue = (
_GetAverageOfNumbersInList(in_queue_times) if in_queue_times else
NOT_AVAILABLE)
longest_execution_time = (
str(datetime.timedelta(seconds=max(execution_times_seconds)))
if execution_times_seconds else NOT_AVAILABLE)
shortest_execution_time = (
str(datetime.timedelta(seconds=min(execution_times_seconds)))
if execution_times_seconds else NOT_AVAILABLE)
average_number_of_iterations = _GetAverageOfNumbersInList(iteration_counts)
average_number_of_tests_run = _GetAverageOfNumbersInList(tests_counts)
tests_times_iterations = (
average_number_of_iterations * average_number_of_tests_run)
error_rate = error_count / task_count
return {
'swarming_tasks_per_day': swarming_tasks_per_day,
'average_execution_time': average_execution_time,
'average_time_in_queue': average_time_in_queue,
'longest_execution_time': longest_execution_time,
'shortest_execution_time': shortest_execution_time,
'tests_times_iterations': tests_times_iterations,
'average_number_of_iterations': average_number_of_iterations,
'average_number_of_tests_run': average_number_of_tests_run,
'error_rate': error_rate
}
def _GetReport(categorized_swarming_task_dict, start_date, end_date):
"""Returns a swarming task data report as an ordered dict sorted by priority.
Args:
categorized_swarming_task_dict: A dict categorizing WFSwarmingTask entities
organized by priority, master_name, builder_name, step_name. This dict
should be the output from _CategorizeSwarmingTaskData().
start_date: The earliest request date for which data should be computed.
end_date: The latest request date for which data should be computed.
Returns:
An ordered dict by highest priority (lower priority number) swarming tasks
in the format:
{
priority: {
'master_name': {
'builder_name': {
'step_name': {
'swarming_tasks_per_day': number or 'N/A',
'average_execution_time': number or 'N/A',
'average_time_in_queue': number or 'N/A',
'longest_execution_time': number or 'N/A',
'shortest_execution_time': number or 'N/A',
'tests_times_iterations': number or 'N/A'
'average_number_of_tests_run': number or 'N/A',
'average_number_of_iterations': number or 'N/A',
'error_rate': number or 'N/A'
},
...
},
...
},
...
},
...
}
"""
report = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
for priority, masters in categorized_swarming_task_dict.iteritems():
for master, builders in masters.iteritems():
for builder, steps in builders.iteritems():
for step, swarming_task_data_list in steps.iteritems():
report[priority][master][builder][step] = _GetReportInformation(
swarming_task_data_list, start_date, end_date)
return OrderedDict(sorted(report.items()))
def CreateHtmlPage(report, start_date, end_date):
"""Generates an html string for displaying the report.
Args:
report: A dict containing all the relevant information returned from
_GetReport().
start_date: The earliest date that a swarming task was requested.
end_date: The latest date that a swarming task was requested.
Returns:
A string containing the html body for the final report page.
"""
html = """
<style>
table {
border-collapse: collapse;
border: 1px solid gray;
}
table td, th {
border: 1px solid gray;
}
</style>"""
html += '<b>Swarming task metadata from %s to %s (%s days)</b>' % (
str(start_date), str(end_date), (end_date - start_date).days)
html += '<h1>Aggregate metadata for swarming tasks by priority</h1>'
cell_template = '<td>%s</td>'
for priority, masters in report.iteritems():
html += '<h2>Task Priority: %s</h2>' % priority
html += """
<table>
<tr>
<th>Master</th>
<th>Builder</th>
<th>Step</th>
<th>Average # Tasks Per Day</th>
<th>Average Time In Queue</th>
<th>Average Execution Time</th>
<th>Longest Execution Time</th>
<th>Shortest Execution Time</th>
<th># Tests * # Iterations</th>
<th>Average # Iterations</th>
<th>Average # Tests Run</th>
<th>Error Rate</th>
</tr>"""
for master_name, builder_reports in masters.iteritems():
for builder_name, steps in builder_reports.iteritems():
for step_name in steps:
builder_report = (
report[priority][master_name][builder_name][step_name])
html += '<tr>'
html += cell_template % master_name
html += cell_template % builder_name
html += cell_template % _FormatStepName(step_name)
html += cell_template % _FormatDigits(
builder_report['swarming_tasks_per_day'])
html += cell_template % _FormatSecondsAsHMS(_FormatDigits(
builder_report['average_time_in_queue']))
html += cell_template % _FormatSecondsAsHMS(_FormatDigits(
builder_report['average_execution_time']))
html += cell_template % builder_report['longest_execution_time']
html += cell_template % builder_report['shortest_execution_time']
html += cell_template % _FormatDigits(
builder_report['tests_times_iterations'])
html += cell_template % _FormatDigits(
builder_report['average_number_of_iterations'])
html += cell_template % _FormatDigits(
builder_report['average_number_of_tests_run'])
html += cell_template % _FormatDigits(builder_report['error_rate'])
html += '</table>'
return html
if __name__ == '__main__':
# Set up the Remote API to use services on the live App Engine.
remote_api.EnableRemoteApi(app_id='findit-for-me')
START_DATE = datetime.datetime(2016, 2, 1)
END_DATE = datetime.datetime(2016, 3, 7)
wf_analysis_query = WfSwarmingTask.query(
WfSwarmingTask.created_time >= START_DATE,
WfSwarmingTask.created_time < END_DATE)
data_list = wf_analysis_query.fetch()
categorized_data_dict = _CategorizeSwarmingTaskData(data_list)
final_report = _GetReport(categorized_data_dict, START_DATE, END_DATE)
findit_tmp_dir = os.environ.get('TMP_DIR')
if not findit_tmp_dir:
findit_tmp_dir = os.getcwd()
report_path = os.path.join(findit_tmp_dir,
'swarming_task_metadata_report.html')
with open(report_path, 'w') as f:
f.write(CreateHtmlPage(final_report, START_DATE, END_DATE))
print 'Swarming task metadata report available at file://%s' % report_path | 0.608361 | 0.195786 |
import logging
import json
import os
import requests
from itertools import chain
from typing import Iterable
LOG = logging.getLogger(__name__)
DEFAULT_LABEL_API = os.environ.get("LABEL_API") \
or "https://backoffice.seattleflu.org/labels"
class LabelLayout:
"""
Layouts, based on the kind of identifier, affect the number of copies of
each barcode, the label presentation, and label text.
"""
sku: str
barcode_type: str
copies_per_barcode = 1
reference: str
layouts = {'default'}
blank = {
"text": "",
"copies": 1,
}
def __init__(self, barcodes, layout: str='default'):
if not self.sku:
raise NotImplementedError("sku must be set by a subclass")
if not self.barcode_type:
raise NotImplementedError("barcode_type must be set by a subclass")
if layout not in self.layouts:
raise NotImplementedError(f"layout must be one of: {self.layouts}")
self.barcodes = barcodes
def label(self, barcode):
"""
Returns a label spec for the given *barcode*.
"""
return {
"text": f"{self.barcode_type} {barcode}\n{self.reference or ''}",
"barcode": barcode,
"copies": self.copies_per_barcode,
}
def blanks_before(self, barcode_number):
"""
Returns the number of blank labels to insert before the given
*barcode_number*. Defaults to 0 (no blanks).
"""
return 0
def spec(self):
"""
Returns a layout spec suitable for passing to a `Lab Labels
<https://github.com/MullinsLab/Lab-Labels>`_ web service.
"""
def flatten(iterable):
return list(chain.from_iterable(iterable))
return {
"type": self.sku,
"labels": list(
flatten(
(*([self.blank] * self.blanks_before(number)), labels)
for number, labels
in enumerate(map(self.label, self.barcodes), start = 1)
)
),
}
class LCRY1100TriplicateLayout(LabelLayout):
sku = "LCRY-1100"
copies_per_barcode = 3
def blanks_before(self, barcode_number):
"""
Each barcode maps to 3 labels. Each row is 4 labels wide, so for
better UX we want all labels in the 4th column to be blank. We can
express this without using a mutable label sequence number by inserting
a blank label before every barcode except the first (e.g. the 2nd
barcode normally would start filling in the 4th label; by inserting a
blank, it starts filling in from the 1st label of the next row).
"""
return 1 if barcode_number > 1 else 0
class SamplesLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = "SAMPLE"
copies_per_barcode = 2
reference = "seattleflu.org"
def blanks_before(self, barcode_number):
"""
Each barcode maps to 2 labels. Each row is 7 labels wide, so for
better UX we want all labels in the 7th column to be blank. We can
express this without using a mutable label sequence number by
inserting a blank label before every fourth barcode (e.g. the 4th
barcode normally would start filling in the 7th label; by inserting a
blank, it starts filling in from the 1st label of the next row).
"""
return 1 if barcode_number > 1 and (barcode_number - 1) % 3 == 0 else 0
class CollectionsSeattleFluLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "COLLECTION"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsKiosksLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "KIOSK"
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsKiosksAsymptomaticLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "ASYMPTOMATIC KIOSK"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsEnvironmentalLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "ENVIRON"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsSwabAndSendLayout(LCRY1100TriplicateLayout):
barcode_type = "SWAB & SEND"
reference = "seattleflu.org"
class CollectionsHouseholdObservationLayout(LCRY1100TriplicateLayout):
barcode_type = "HH OBSERVATION"
reference = "seattleflu.org"
class CollectionsHouseholdObservationAsymptomaticLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "ASYMPTOMATIC HH OBS"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsHouseholdInterventionLayout(LCRY1100TriplicateLayout):
barcode_type = "HH INTERVENTION"
reference = "seattleflu.org"
class CollectionsHouseholdInterventionAsymptomaticLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "ASYMPTOMATIC HH INT"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsSelfTestLayout(LCRY1100TriplicateLayout):
barcode_type = "HOME TEST"
reference = "seattleflu.org"
class CollectionsFluAtHomeLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = "COLLECTION"
copies_per_barcode = 1
reference = "fluathome.org"
class KitsFluAtHomeLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "KIT"
copies_per_barcode = 1
reference = "fluathome.org"
class _TestStripsFluAtHomeLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = "TEST STRIP"
copies_per_barcode = 1
reference = "fluathome.org"
class CollectionsScanLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'SCAN'
copies_per_barcode = 2
reference = "scanpublichealth.org"
class CollectionsScanKiosksLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'SCAN - STAVE'
copies_per_barcode = 1
reference = "scanpublichealth.org"
class CollectionsCliaComplianceLayout(LabelLayout):
barcode_type = "CLIA"
copies_per_barcode = 1
reference = "seattleflu.org"
layouts = {'default', 'small'}
def __init__(self, barcodes, layout: str='default'):
self.layout = layout
self.sku = "LCRY-2380" if layout == 'small' else "LCRY-1100"
super().__init__(barcodes)
def label(self, barcode):
"""
Returns a label spec for the given *barcode*. If the small layout is
requested, excludes the barcode type and barcode text.
"""
if self.layout == 'small':
return {
"text": self.reference,
"barcode": barcode,
"copies": self.copies_per_barcode,
}
return super().label(barcode)
class CollectionsHaarviLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "COLLECTION"
copies_per_barcode = 1
reference = "HAARVI"
class SamplesHaarviLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = "SAMPLE"
copies_per_barcode = 1
reference = "HAARVI"
class CollectionsHouseholdGeneralLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "HH GENERAL"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsUWObservedLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'UW OBSERVED'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsUWHomeLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'UW HOME'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsChildcareLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'CHILDCARE'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsSchoolTestingHomeLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'SCHOOL TESTING HOME'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsSchoolTestingObservedLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'SCHOOL TESTING OBSERVED'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsAppleRespiratoryLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'APPLE'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsAppleRespiratorySerialLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'APPLE SERIAL'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsAdultFamilyHomeOutbreakLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'AFH OUTBREAK'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsWorkplaceOutbreakLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'WORKPLACE OUTBREAK'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsRadxupYakimaSchoolHomeLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'RADXUP YAKIMA HOME'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsRadxupYakimaSchoolObservedLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'RADXUP YAKIMA OBSERVED'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsUWTinySwabsLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = 'UW TINY'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsUWTinySwabsHomeLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = 'UW TINY U'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsUWTinySwabsObservedLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = 'UW TINY O'
copies_per_barcode = 1
reference = "seattleflu.org"
LAYOUTS = {
"samples": SamplesLayout,
"collections-scan": CollectionsScanLayout,
"collections-scan-kiosks": CollectionsScanKiosksLayout,
"collections-seattleflu.org": CollectionsSeattleFluLayout,
"collections-kiosks": CollectionsKiosksLayout,
"collections-kiosks-asymptomatic": CollectionsKiosksAsymptomaticLayout,
"collections-environmental": CollectionsEnvironmentalLayout,
"collections-swab&send": CollectionsSwabAndSendLayout,
"collections-household-observation": CollectionsHouseholdObservationLayout,
"collections-household-observation-asymptomatic": CollectionsHouseholdObservationAsymptomaticLayout,
"collections-household-intervention": CollectionsHouseholdInterventionLayout,
"collections-household-intervention-asymptomatic": CollectionsHouseholdInterventionAsymptomaticLayout,
"collections-household-general": CollectionsHouseholdGeneralLayout,
"collections-self-test": CollectionsSelfTestLayout,
"collections-fluathome.org": CollectionsFluAtHomeLayout,
"collections-clia-compliance": CollectionsCliaComplianceLayout,
"kits-fluathome.org": KitsFluAtHomeLayout,
"test-strips-fluathome.org": _TestStripsFluAtHomeLayout,
"samples-haarvi": SamplesHaarviLayout,
"collections-haarvi": CollectionsHaarviLayout,
'collections-uw-observed': CollectionsUWObservedLayout,
'collections-uw-home': CollectionsUWHomeLayout,
'collections-childcare': CollectionsChildcareLayout,
'collections-school-testing-home': CollectionsSchoolTestingHomeLayout,
'collections-school-testing-observed': CollectionsSchoolTestingObservedLayout,
'collections-apple-respiratory': CollectionsAppleRespiratoryLayout,
'collections-apple-respiratory-serial': CollectionsAppleRespiratorySerialLayout,
'collections-adult-family-home-outbreak': CollectionsAdultFamilyHomeOutbreakLayout,
'collections-workplace-outbreak': CollectionsWorkplaceOutbreakLayout,
'collections-radxup-yakima-schools-home': CollectionsRadxupYakimaSchoolHomeLayout,
'collections-radxup-yakima-schools-observed': CollectionsRadxupYakimaSchoolObservedLayout,
'collections-uw-tiny-swabs': CollectionsUWTinySwabsLayout,
'collections-uw-tiny-swabs-home': CollectionsUWTinySwabsHomeLayout,
'collections-uw-tiny-swabs-observed': CollectionsUWTinySwabsObservedLayout,
}
def layout_identifiers(set_name: str, identifiers: Iterable,
layout: str='default', copies_per_barcode: int=None) -> LabelLayout:
"""
Use the layout associated with the given identifier *set_name* to make
labels for the given *identifiers*.
Each item in *identifiers* must have a ``barcode`` attribute. These are
passed to the layout.
"""
layout_class = LAYOUTS[set_name]([id.barcode for id in identifiers], layout)
if copies_per_barcode:
setattr(layout_class, 'copies_per_barcode', copies_per_barcode)
return layout_class
def generate_pdf(layout: LabelLayout, api: str = DEFAULT_LABEL_API) -> bytes:
"""
Generate a PDF from the given *layout* using the `Lab Labels
<https://github.com/MullinsLab/Lab-Labels>`_ web service *api*.
Returns a byte string.
"""
spec = json.dumps(layout.spec())
LOG.info(f"Generating PDF using Lab Labels API at {api}")
response = requests.post(f"{api}/stickers",
headers = { "Content-Type": "application/json" },
data = spec)
response.raise_for_status()
return response.content | lib/id3c/labelmaker.py | import logging
import json
import os
import requests
from itertools import chain
from typing import Iterable
LOG = logging.getLogger(__name__)
DEFAULT_LABEL_API = os.environ.get("LABEL_API") \
or "https://backoffice.seattleflu.org/labels"
class LabelLayout:
"""
Layouts, based on the kind of identifier, affect the number of copies of
each barcode, the label presentation, and label text.
"""
sku: str
barcode_type: str
copies_per_barcode = 1
reference: str
layouts = {'default'}
blank = {
"text": "",
"copies": 1,
}
def __init__(self, barcodes, layout: str='default'):
if not self.sku:
raise NotImplementedError("sku must be set by a subclass")
if not self.barcode_type:
raise NotImplementedError("barcode_type must be set by a subclass")
if layout not in self.layouts:
raise NotImplementedError(f"layout must be one of: {self.layouts}")
self.barcodes = barcodes
def label(self, barcode):
"""
Returns a label spec for the given *barcode*.
"""
return {
"text": f"{self.barcode_type} {barcode}\n{self.reference or ''}",
"barcode": barcode,
"copies": self.copies_per_barcode,
}
def blanks_before(self, barcode_number):
"""
Returns the number of blank labels to insert before the given
*barcode_number*. Defaults to 0 (no blanks).
"""
return 0
def spec(self):
"""
Returns a layout spec suitable for passing to a `Lab Labels
<https://github.com/MullinsLab/Lab-Labels>`_ web service.
"""
def flatten(iterable):
return list(chain.from_iterable(iterable))
return {
"type": self.sku,
"labels": list(
flatten(
(*([self.blank] * self.blanks_before(number)), labels)
for number, labels
in enumerate(map(self.label, self.barcodes), start = 1)
)
),
}
class LCRY1100TriplicateLayout(LabelLayout):
sku = "LCRY-1100"
copies_per_barcode = 3
def blanks_before(self, barcode_number):
"""
Each barcode maps to 3 labels. Each row is 4 labels wide, so for
better UX we want all labels in the 4th column to be blank. We can
express this without using a mutable label sequence number by inserting
a blank label before every barcode except the first (e.g. the 2nd
barcode normally would start filling in the 4th label; by inserting a
blank, it starts filling in from the 1st label of the next row).
"""
return 1 if barcode_number > 1 else 0
class SamplesLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = "SAMPLE"
copies_per_barcode = 2
reference = "seattleflu.org"
def blanks_before(self, barcode_number):
"""
Each barcode maps to 2 labels. Each row is 7 labels wide, so for
better UX we want all labels in the 7th column to be blank. We can
express this without using a mutable label sequence number by
inserting a blank label before every fourth barcode (e.g. the 4th
barcode normally would start filling in the 7th label; by inserting a
blank, it starts filling in from the 1st label of the next row).
"""
return 1 if barcode_number > 1 and (barcode_number - 1) % 3 == 0 else 0
class CollectionsSeattleFluLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "COLLECTION"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsKiosksLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "KIOSK"
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsKiosksAsymptomaticLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "ASYMPTOMATIC KIOSK"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsEnvironmentalLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "ENVIRON"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsSwabAndSendLayout(LCRY1100TriplicateLayout):
barcode_type = "SWAB & SEND"
reference = "seattleflu.org"
class CollectionsHouseholdObservationLayout(LCRY1100TriplicateLayout):
barcode_type = "HH OBSERVATION"
reference = "seattleflu.org"
class CollectionsHouseholdObservationAsymptomaticLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "ASYMPTOMATIC HH OBS"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsHouseholdInterventionLayout(LCRY1100TriplicateLayout):
barcode_type = "HH INTERVENTION"
reference = "seattleflu.org"
class CollectionsHouseholdInterventionAsymptomaticLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "ASYMPTOMATIC HH INT"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsSelfTestLayout(LCRY1100TriplicateLayout):
barcode_type = "HOME TEST"
reference = "seattleflu.org"
class CollectionsFluAtHomeLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = "COLLECTION"
copies_per_barcode = 1
reference = "fluathome.org"
class KitsFluAtHomeLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "KIT"
copies_per_barcode = 1
reference = "fluathome.org"
class _TestStripsFluAtHomeLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = "TEST STRIP"
copies_per_barcode = 1
reference = "fluathome.org"
class CollectionsScanLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'SCAN'
copies_per_barcode = 2
reference = "scanpublichealth.org"
class CollectionsScanKiosksLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'SCAN - STAVE'
copies_per_barcode = 1
reference = "scanpublichealth.org"
class CollectionsCliaComplianceLayout(LabelLayout):
barcode_type = "CLIA"
copies_per_barcode = 1
reference = "seattleflu.org"
layouts = {'default', 'small'}
def __init__(self, barcodes, layout: str='default'):
self.layout = layout
self.sku = "LCRY-2380" if layout == 'small' else "LCRY-1100"
super().__init__(barcodes)
def label(self, barcode):
"""
Returns a label spec for the given *barcode*. If the small layout is
requested, excludes the barcode type and barcode text.
"""
if self.layout == 'small':
return {
"text": self.reference,
"barcode": barcode,
"copies": self.copies_per_barcode,
}
return super().label(barcode)
class CollectionsHaarviLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "COLLECTION"
copies_per_barcode = 1
reference = "HAARVI"
class SamplesHaarviLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = "SAMPLE"
copies_per_barcode = 1
reference = "HAARVI"
class CollectionsHouseholdGeneralLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "HH GENERAL"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsUWObservedLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'UW OBSERVED'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsUWHomeLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'UW HOME'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsChildcareLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'CHILDCARE'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsSchoolTestingHomeLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'SCHOOL TESTING HOME'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsSchoolTestingObservedLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'SCHOOL TESTING OBSERVED'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsAppleRespiratoryLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'APPLE'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsAppleRespiratorySerialLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'APPLE SERIAL'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsAdultFamilyHomeOutbreakLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'AFH OUTBREAK'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsWorkplaceOutbreakLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'WORKPLACE OUTBREAK'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsRadxupYakimaSchoolHomeLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'RADXUP YAKIMA HOME'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsRadxupYakimaSchoolObservedLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'RADXUP YAKIMA OBSERVED'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsUWTinySwabsLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = 'UW TINY'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsUWTinySwabsHomeLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = 'UW TINY U'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsUWTinySwabsObservedLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = 'UW TINY O'
copies_per_barcode = 1
reference = "seattleflu.org"
LAYOUTS = {
"samples": SamplesLayout,
"collections-scan": CollectionsScanLayout,
"collections-scan-kiosks": CollectionsScanKiosksLayout,
"collections-seattleflu.org": CollectionsSeattleFluLayout,
"collections-kiosks": CollectionsKiosksLayout,
"collections-kiosks-asymptomatic": CollectionsKiosksAsymptomaticLayout,
"collections-environmental": CollectionsEnvironmentalLayout,
"collections-swab&send": CollectionsSwabAndSendLayout,
"collections-household-observation": CollectionsHouseholdObservationLayout,
"collections-household-observation-asymptomatic": CollectionsHouseholdObservationAsymptomaticLayout,
"collections-household-intervention": CollectionsHouseholdInterventionLayout,
"collections-household-intervention-asymptomatic": CollectionsHouseholdInterventionAsymptomaticLayout,
"collections-household-general": CollectionsHouseholdGeneralLayout,
"collections-self-test": CollectionsSelfTestLayout,
"collections-fluathome.org": CollectionsFluAtHomeLayout,
"collections-clia-compliance": CollectionsCliaComplianceLayout,
"kits-fluathome.org": KitsFluAtHomeLayout,
"test-strips-fluathome.org": _TestStripsFluAtHomeLayout,
"samples-haarvi": SamplesHaarviLayout,
"collections-haarvi": CollectionsHaarviLayout,
'collections-uw-observed': CollectionsUWObservedLayout,
'collections-uw-home': CollectionsUWHomeLayout,
'collections-childcare': CollectionsChildcareLayout,
'collections-school-testing-home': CollectionsSchoolTestingHomeLayout,
'collections-school-testing-observed': CollectionsSchoolTestingObservedLayout,
'collections-apple-respiratory': CollectionsAppleRespiratoryLayout,
'collections-apple-respiratory-serial': CollectionsAppleRespiratorySerialLayout,
'collections-adult-family-home-outbreak': CollectionsAdultFamilyHomeOutbreakLayout,
'collections-workplace-outbreak': CollectionsWorkplaceOutbreakLayout,
'collections-radxup-yakima-schools-home': CollectionsRadxupYakimaSchoolHomeLayout,
'collections-radxup-yakima-schools-observed': CollectionsRadxupYakimaSchoolObservedLayout,
'collections-uw-tiny-swabs': CollectionsUWTinySwabsLayout,
'collections-uw-tiny-swabs-home': CollectionsUWTinySwabsHomeLayout,
'collections-uw-tiny-swabs-observed': CollectionsUWTinySwabsObservedLayout,
}
def layout_identifiers(set_name: str, identifiers: Iterable,
layout: str='default', copies_per_barcode: int=None) -> LabelLayout:
"""
Use the layout associated with the given identifier *set_name* to make
labels for the given *identifiers*.
Each item in *identifiers* must have a ``barcode`` attribute. These are
passed to the layout.
"""
layout_class = LAYOUTS[set_name]([id.barcode for id in identifiers], layout)
if copies_per_barcode:
setattr(layout_class, 'copies_per_barcode', copies_per_barcode)
return layout_class
def generate_pdf(layout: LabelLayout, api: str = DEFAULT_LABEL_API) -> bytes:
"""
Generate a PDF from the given *layout* using the `Lab Labels
<https://github.com/MullinsLab/Lab-Labels>`_ web service *api*.
Returns a byte string.
"""
spec = json.dumps(layout.spec())
LOG.info(f"Generating PDF using Lab Labels API at {api}")
response = requests.post(f"{api}/stickers",
headers = { "Content-Type": "application/json" },
data = spec)
response.raise_for_status()
return response.content | 0.81457 | 0.330579 |
import pytest
import pandas as pd
from pyranges import PyRanges
def pytest_configure(config):
config.addinivalue_line(
"markers", "bedtools: tests rely on",
)
config.addinivalue_line(
"markers", "explore: functionality not ready for prime-time"
)
@pytest.fixture
def names():
return "Chromosome Start End Name Score Strand".split()
@pytest.fixture
def chip_10(names):
df = pd.read_csv("tests/chip_10.bed", header=None, names=names, sep="\t")
gr = PyRanges(df)
assert gr.stranded
return gr
@pytest.fixture
def f1(names):
df = pd.read_csv(
"tests/f1.bed",
sep="\t",
header=None,
names="Chromosome Start End Name Score Strand".split())
return PyRanges(df)
@pytest.fixture
def f2(names):
df = pd.read_csv("tests/f2.bed", sep="\t", header=None, names=names)
return PyRanges(df)
@pytest.fixture
def chromsizes():
from io import StringIO
df = pd.read_csv(
StringIO("""
chr1 249250621
chr2 243199373
chr3 198022430
chr4 191154276
chr5 180915260
chr6 171115067
chr7 159138663
chrX 155270560
chr8 146364022
chr9 141213431
chr10 135534747
chr11 135006516
chr12 133851895
chr13 115169878
chr14 107349540
chr15 102531392
chr16 90354753
chr17 81195210
chr18 78077248
chr20 63025520
chrY 59373566
chr19 59128983
chr22 51304566
chr21 48129895"""),
sep="\s+",
header=None,
index_col=0)
df.insert(0, "Start", 0)
df = df.reset_index()
df.columns = ["Chromosome", "Start", "End"]
return PyRanges(df) | tests/conftest.py | import pytest
import pandas as pd
from pyranges import PyRanges
def pytest_configure(config):
config.addinivalue_line(
"markers", "bedtools: tests rely on",
)
config.addinivalue_line(
"markers", "explore: functionality not ready for prime-time"
)
@pytest.fixture
def names():
return "Chromosome Start End Name Score Strand".split()
@pytest.fixture
def chip_10(names):
df = pd.read_csv("tests/chip_10.bed", header=None, names=names, sep="\t")
gr = PyRanges(df)
assert gr.stranded
return gr
@pytest.fixture
def f1(names):
df = pd.read_csv(
"tests/f1.bed",
sep="\t",
header=None,
names="Chromosome Start End Name Score Strand".split())
return PyRanges(df)
@pytest.fixture
def f2(names):
df = pd.read_csv("tests/f2.bed", sep="\t", header=None, names=names)
return PyRanges(df)
@pytest.fixture
def chromsizes():
from io import StringIO
df = pd.read_csv(
StringIO("""
chr1 249250621
chr2 243199373
chr3 198022430
chr4 191154276
chr5 180915260
chr6 171115067
chr7 159138663
chrX 155270560
chr8 146364022
chr9 141213431
chr10 135534747
chr11 135006516
chr12 133851895
chr13 115169878
chr14 107349540
chr15 102531392
chr16 90354753
chr17 81195210
chr18 78077248
chr20 63025520
chrY 59373566
chr19 59128983
chr22 51304566
chr21 48129895"""),
sep="\s+",
header=None,
index_col=0)
df.insert(0, "Start", 0)
df = df.reset_index()
df.columns = ["Chromosome", "Start", "End"]
return PyRanges(df) | 0.547706 | 0.374133 |
import asyncio
import asynctest
import asynctest.mock as amock
from opsdroid import events
from opsdroid.core import OpsDroid
from opsdroid.connector import Connector
from opsdroid.cli.start import configure_lang
class TestEventCreator(asynctest.TestCase):
"""Test the opsdroid event creation class"""
async def setup(self):
pass
async def test_create_event(self):
creator = events.EventCreator(Connector({}))
self.assertEqual(None, await creator.create_event({"type": "NotAnEvent"}, ""))
class TestEvent(asynctest.TestCase):
"""Test the opsdroid event class."""
async def setup(self):
configure_lang({})
async def test_event(self):
opsdroid = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
event = events.Event("user_id", "user", "default", mock_connector)
self.assertEqual(event.user_id, "user_id")
self.assertEqual(event.user, "user")
self.assertEqual(event.target, "default")
async def test_entities(self):
opsdroid = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
event = events.Event("user_id", "user", "default", mock_connector)
event.update_entity("city", "London", 0.8)
assert event.entities["city"]["value"] == "London"
assert event.entities["city"]["confidence"] == 0.8
assert event.get_entity("city") == "London"
def test_unique_subclasses(self):
with self.assertRaises(NameError):
class Message(events.Event):
pass
class Message(events.Event): # noqa
_no_register = True
pass
class TestMessage(asynctest.TestCase):
"""Test the opsdroid message class."""
async def setup(self):
configure_lang({})
async def test_message(self):
with OpsDroid() as opsdroid:
mock_connector = Connector({}, opsdroid=opsdroid)
raw_message = {
"text": "Hello world",
"user_id": "user_id",
"user": "user",
"room": "default",
"timestamp": "01/01/2000 19:23:00",
"messageId": "101",
}
message = events.Message(
text="Hello world",
user_id="user_id",
user="user",
target="default",
connector=mock_connector,
raw_event=raw_message,
)
self.assertEqual(message.text, "Hello world")
self.assertEqual(message.user_id, "user_id")
self.assertEqual(message.user, "user")
self.assertEqual(message.target, "default")
self.assertEqual(message.raw_event["timestamp"], "01/01/2000 19:23:00")
self.assertEqual(message.raw_event["messageId"], "101")
with self.assertRaises(TypeError):
await message.respond("Goodbye world")
# Also try responding with just some empty Event
with self.assertRaises(TypeError):
await message.respond(
events.Event(message.user, message.target, message.connector)
)
async def test_response_effects(self):
"""Responding to a message shouldn't change the message."""
with OpsDroid() as opsdroid:
mock_connector = Connector({}, opsdroid=opsdroid)
message_text = "Hello world"
message = events.Message(
message_text, "user_id", "user", "default", mock_connector
)
with self.assertRaises(TypeError):
await message.respond("Goodbye world")
self.assertEqual(message_text, message.text)
async def test_thinking_delay(self):
with OpsDroid() as opsdroid:
mock_connector = Connector(
{
"name": "shell",
"thinking-delay": 3,
"type": "connector",
"module_path": "opsdroid-modules.connector.shell",
},
opsdroid=opsdroid,
)
with amock.patch("opsdroid.events.Message._thinking_delay") as logmock:
message = events.Message(
"hi", "user_id", "user", "default", mock_connector
)
with self.assertRaises(TypeError):
await message.respond("Hello there")
self.assertTrue(logmock.called)
async def test_thinking_sleep(self):
with OpsDroid() as opsdroid:
mock_connector_int = Connector(
{
"name": "shell",
"thinking-delay": 3,
"type": "connector",
"module_path": "opsdroid-modules.connector.shell",
},
opsdroid=opsdroid,
)
with amock.patch("asyncio.sleep") as mocksleep_int:
message = events.Message(
"hi", "user_id", "user", "default", mock_connector_int
)
with self.assertRaises(TypeError):
await message.respond("Hello there")
self.assertTrue(mocksleep_int.called)
# Test thinking-delay with a list
mock_connector_list = Connector(
{
"name": "shell",
"thinking-delay": [1, 4],
"type": "connector",
"module_path": "opsdroid-modules.connector.shell",
},
opsdroid=opsdroid,
)
with amock.patch("asyncio.sleep") as mocksleep_list:
message = events.Message(
"hi", "user_id", "user", "default", mock_connector_list
)
with self.assertRaises(TypeError):
await message.respond("Hello there")
self.assertTrue(mocksleep_list.called)
async def test_typing_delay(self):
with OpsDroid() as opsdroid:
mock_connector = Connector(
{
"name": "shell",
"typing-delay": 0.3,
"type": "connector",
"module_path": "opsdroid-modules.connector.shell",
},
opsdroid=opsdroid,
)
with amock.patch("opsdroid.events.Message._typing_delay") as logmock:
with amock.patch("asyncio.sleep") as mocksleep:
message = events.Message(
"hi", "user_id", "user", "default", mock_connector
)
with self.assertRaises(TypeError):
await message.respond("Hello there")
self.assertTrue(logmock.called)
self.assertTrue(mocksleep.called)
# Test thinking-delay with a list
mock_connector_list = Connector(
{
"name": "shell",
"typing-delay": [1, 4],
"type": "connector",
"module_path": "opsdroid-modules.connector.shell",
},
opsdroid=opsdroid,
)
with amock.patch("asyncio.sleep") as mocksleep_list:
message = events.Message(
"hi", "user_id", "user", "default", mock_connector_list
)
with self.assertRaises(TypeError):
await message.respond("Hello there")
self.assertTrue(mocksleep_list.called)
async def test_typing_sleep(self):
with OpsDroid() as opsdroid:
mock_connector = Connector(
{
"name": "shell",
"typing-delay": 6,
"type": "connector",
"module_path": "opsdroid-modules.connector.shell",
},
opsdroid=opsdroid,
)
with amock.patch("asyncio.sleep") as mocksleep:
message = events.Message(
"hi", "user_id", "user", "default", mock_connector
)
with self.assertRaises(TypeError):
await message.respond("Hello there")
self.assertTrue(mocksleep.called)
async def test_react(self):
with OpsDroid() as opsdroid:
mock_connector = Connector(
{"name": "shell", "thinking-delay": 2, "type": "connector"},
opsdroid=opsdroid,
)
with amock.patch("asyncio.sleep") as mocksleep:
message = events.Message(
"Hello world", "user_id", "user", "default", mock_connector
)
with self.assertRaises(TypeError):
await message.respond(events.Reaction("emoji"))
self.assertTrue(mocksleep.called)
class TestFile(asynctest.TestCase):
"""Test the opsdroid file class"""
async def setup(self):
configure_lang({})
async def test_file(self):
opsdroid = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
event = events.File(
bytes("some file contents", "utf-8"),
user_id="user_id",
user="user",
target="default",
connector=mock_connector,
)
self.assertEqual(event.user_id, "user_id")
self.assertEqual(event.user, "user")
self.assertEqual(event.target, "default")
self.assertEqual((await event.get_file_bytes()).decode(), "some file contents")
def test_error_on_construct(self):
with self.assertRaises(ValueError):
events.File()
with self.assertRaises(ValueError):
events.File(b"a", "https://localhost")
@amock.patch("aiohttp.ClientSession.get")
async def test_repeat_file_bytes(self, mock_get):
f = events.File(url="http://spam.eggs/monty.jpg")
fut = asyncio.Future()
fut.set_result(b"bob")
mock_get.return_value.__aenter__.return_value.read = amock.CoroutineMock(
return_value=fut
)
assert await f.get_file_bytes() == b"bob"
assert mock_get.call_count == 1
# Now test we don't re-download the url
assert await f.get_file_bytes() == b"bob"
assert mock_get.call_count == 1
class TestImage(asynctest.TestCase):
"""Test the opsdroid image class"""
gif_bytes = (
b"GIF89a\x01\x00\x01\x00\x00\xff\x00,"
b"\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x00;"
)
async def setup(self):
configure_lang({})
async def test_image(self):
opsdroid = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
event = events.Image(
self.gif_bytes,
user_id="user_id",
user="user",
target="default",
connector=mock_connector,
)
self.assertEqual(event.user_id, "user_id")
self.assertEqual(event.user, "user")
self.assertEqual(event.target, "default")
self.assertEqual(await event.get_file_bytes(), self.gif_bytes)
self.assertEqual(await event.get_mimetype(), "image/gif")
self.assertEqual(await event.get_dimensions(), (1, 1))
async def test_explicit_mime_type(self):
opsdroid = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
event = events.Image(
self.gif_bytes,
user_id="user_id",
user="user",
target="default",
mimetype="image/jpeg",
connector=mock_connector,
)
self.assertEqual(await event.get_mimetype(), "image/jpeg")
async def test_no_mime_type(self):
opsdroid = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
event = events.Image(
b"aslkdjsalkdjlaj",
user_id="user_id",
user="user",
target="default",
connector=mock_connector,
)
self.assertEqual(await event.get_mimetype(), "") | tests/test_events.py | import asyncio
import asynctest
import asynctest.mock as amock
from opsdroid import events
from opsdroid.core import OpsDroid
from opsdroid.connector import Connector
from opsdroid.cli.start import configure_lang
class TestEventCreator(asynctest.TestCase):
"""Test the opsdroid event creation class"""
async def setup(self):
pass
async def test_create_event(self):
creator = events.EventCreator(Connector({}))
self.assertEqual(None, await creator.create_event({"type": "NotAnEvent"}, ""))
class TestEvent(asynctest.TestCase):
"""Test the opsdroid event class."""
async def setup(self):
configure_lang({})
async def test_event(self):
opsdroid = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
event = events.Event("user_id", "user", "default", mock_connector)
self.assertEqual(event.user_id, "user_id")
self.assertEqual(event.user, "user")
self.assertEqual(event.target, "default")
async def test_entities(self):
opsdroid = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
event = events.Event("user_id", "user", "default", mock_connector)
event.update_entity("city", "London", 0.8)
assert event.entities["city"]["value"] == "London"
assert event.entities["city"]["confidence"] == 0.8
assert event.get_entity("city") == "London"
def test_unique_subclasses(self):
with self.assertRaises(NameError):
class Message(events.Event):
pass
class Message(events.Event): # noqa
_no_register = True
pass
class TestMessage(asynctest.TestCase):
"""Test the opsdroid message class."""
async def setup(self):
configure_lang({})
async def test_message(self):
with OpsDroid() as opsdroid:
mock_connector = Connector({}, opsdroid=opsdroid)
raw_message = {
"text": "Hello world",
"user_id": "user_id",
"user": "user",
"room": "default",
"timestamp": "01/01/2000 19:23:00",
"messageId": "101",
}
message = events.Message(
text="Hello world",
user_id="user_id",
user="user",
target="default",
connector=mock_connector,
raw_event=raw_message,
)
self.assertEqual(message.text, "Hello world")
self.assertEqual(message.user_id, "user_id")
self.assertEqual(message.user, "user")
self.assertEqual(message.target, "default")
self.assertEqual(message.raw_event["timestamp"], "01/01/2000 19:23:00")
self.assertEqual(message.raw_event["messageId"], "101")
with self.assertRaises(TypeError):
await message.respond("Goodbye world")
# Also try responding with just some empty Event
with self.assertRaises(TypeError):
await message.respond(
events.Event(message.user, message.target, message.connector)
)
async def test_response_effects(self):
"""Responding to a message shouldn't change the message."""
with OpsDroid() as opsdroid:
mock_connector = Connector({}, opsdroid=opsdroid)
message_text = "Hello world"
message = events.Message(
message_text, "user_id", "user", "default", mock_connector
)
with self.assertRaises(TypeError):
await message.respond("Goodbye world")
self.assertEqual(message_text, message.text)
async def test_thinking_delay(self):
with OpsDroid() as opsdroid:
mock_connector = Connector(
{
"name": "shell",
"thinking-delay": 3,
"type": "connector",
"module_path": "opsdroid-modules.connector.shell",
},
opsdroid=opsdroid,
)
with amock.patch("opsdroid.events.Message._thinking_delay") as logmock:
message = events.Message(
"hi", "user_id", "user", "default", mock_connector
)
with self.assertRaises(TypeError):
await message.respond("Hello there")
self.assertTrue(logmock.called)
async def test_thinking_sleep(self):
with OpsDroid() as opsdroid:
mock_connector_int = Connector(
{
"name": "shell",
"thinking-delay": 3,
"type": "connector",
"module_path": "opsdroid-modules.connector.shell",
},
opsdroid=opsdroid,
)
with amock.patch("asyncio.sleep") as mocksleep_int:
message = events.Message(
"hi", "user_id", "user", "default", mock_connector_int
)
with self.assertRaises(TypeError):
await message.respond("Hello there")
self.assertTrue(mocksleep_int.called)
# Test thinking-delay with a list
mock_connector_list = Connector(
{
"name": "shell",
"thinking-delay": [1, 4],
"type": "connector",
"module_path": "opsdroid-modules.connector.shell",
},
opsdroid=opsdroid,
)
with amock.patch("asyncio.sleep") as mocksleep_list:
message = events.Message(
"hi", "user_id", "user", "default", mock_connector_list
)
with self.assertRaises(TypeError):
await message.respond("Hello there")
self.assertTrue(mocksleep_list.called)
async def test_typing_delay(self):
with OpsDroid() as opsdroid:
mock_connector = Connector(
{
"name": "shell",
"typing-delay": 0.3,
"type": "connector",
"module_path": "opsdroid-modules.connector.shell",
},
opsdroid=opsdroid,
)
with amock.patch("opsdroid.events.Message._typing_delay") as logmock:
with amock.patch("asyncio.sleep") as mocksleep:
message = events.Message(
"hi", "user_id", "user", "default", mock_connector
)
with self.assertRaises(TypeError):
await message.respond("Hello there")
self.assertTrue(logmock.called)
self.assertTrue(mocksleep.called)
# Test thinking-delay with a list
mock_connector_list = Connector(
{
"name": "shell",
"typing-delay": [1, 4],
"type": "connector",
"module_path": "opsdroid-modules.connector.shell",
},
opsdroid=opsdroid,
)
with amock.patch("asyncio.sleep") as mocksleep_list:
message = events.Message(
"hi", "user_id", "user", "default", mock_connector_list
)
with self.assertRaises(TypeError):
await message.respond("Hello there")
self.assertTrue(mocksleep_list.called)
async def test_typing_sleep(self):
with OpsDroid() as opsdroid:
mock_connector = Connector(
{
"name": "shell",
"typing-delay": 6,
"type": "connector",
"module_path": "opsdroid-modules.connector.shell",
},
opsdroid=opsdroid,
)
with amock.patch("asyncio.sleep") as mocksleep:
message = events.Message(
"hi", "user_id", "user", "default", mock_connector
)
with self.assertRaises(TypeError):
await message.respond("Hello there")
self.assertTrue(mocksleep.called)
async def test_react(self):
with OpsDroid() as opsdroid:
mock_connector = Connector(
{"name": "shell", "thinking-delay": 2, "type": "connector"},
opsdroid=opsdroid,
)
with amock.patch("asyncio.sleep") as mocksleep:
message = events.Message(
"Hello world", "user_id", "user", "default", mock_connector
)
with self.assertRaises(TypeError):
await message.respond(events.Reaction("emoji"))
self.assertTrue(mocksleep.called)
class TestFile(asynctest.TestCase):
"""Test the opsdroid file class"""
async def setup(self):
configure_lang({})
async def test_file(self):
opsdroid = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
event = events.File(
bytes("some file contents", "utf-8"),
user_id="user_id",
user="user",
target="default",
connector=mock_connector,
)
self.assertEqual(event.user_id, "user_id")
self.assertEqual(event.user, "user")
self.assertEqual(event.target, "default")
self.assertEqual((await event.get_file_bytes()).decode(), "some file contents")
def test_error_on_construct(self):
with self.assertRaises(ValueError):
events.File()
with self.assertRaises(ValueError):
events.File(b"a", "https://localhost")
@amock.patch("aiohttp.ClientSession.get")
async def test_repeat_file_bytes(self, mock_get):
f = events.File(url="http://spam.eggs/monty.jpg")
fut = asyncio.Future()
fut.set_result(b"bob")
mock_get.return_value.__aenter__.return_value.read = amock.CoroutineMock(
return_value=fut
)
assert await f.get_file_bytes() == b"bob"
assert mock_get.call_count == 1
# Now test we don't re-download the url
assert await f.get_file_bytes() == b"bob"
assert mock_get.call_count == 1
class TestImage(asynctest.TestCase):
"""Test the opsdroid image class"""
gif_bytes = (
b"GIF89a\x01\x00\x01\x00\x00\xff\x00,"
b"\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x00;"
)
async def setup(self):
configure_lang({})
async def test_image(self):
opsdroid = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
event = events.Image(
self.gif_bytes,
user_id="user_id",
user="user",
target="default",
connector=mock_connector,
)
self.assertEqual(event.user_id, "user_id")
self.assertEqual(event.user, "user")
self.assertEqual(event.target, "default")
self.assertEqual(await event.get_file_bytes(), self.gif_bytes)
self.assertEqual(await event.get_mimetype(), "image/gif")
self.assertEqual(await event.get_dimensions(), (1, 1))
async def test_explicit_mime_type(self):
opsdroid = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
event = events.Image(
self.gif_bytes,
user_id="user_id",
user="user",
target="default",
mimetype="image/jpeg",
connector=mock_connector,
)
self.assertEqual(await event.get_mimetype(), "image/jpeg")
async def test_no_mime_type(self):
opsdroid = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
event = events.Image(
b"aslkdjsalkdjlaj",
user_id="user_id",
user="user",
target="default",
connector=mock_connector,
)
self.assertEqual(await event.get_mimetype(), "") | 0.657098 | 0.356195 |
from os import getcwd
import pickle
from PyQt5 import QtWidgets as QW
from sigman import file_manager as fm
from sigman import analyzer, EmptyPointsError
import sigman as sm
import importlib
import QtSigman
from QtSigman import DataActionWidgets, DefaultColors
from QtSigman.DataActionWidgets import DataActionStatus
from QtSigman.VisualObjects import VWave
class ActionCancelledError(Exception):
"""Raised when an action is cancelled."""
def loadWave(forbiddenNames):
"""Imports sm.Wave instances from files and opens up a dialog
window with possible metainformaiton options for each.
Returns a list of tuples containg Wave, chosen dictType, color and axis.
"""
fileFilter = ('dat (*.dat);;'
'Signal express export(*)')
fileDialog = QW.QFileDialog()
fileDialog.setFileMode(QW.QFileDialog.ExistingFiles)
path = fileDialog.getOpenFileNames(filter=fileFilter)
if path[0] == "":
raise ActionCancelledError
setOfWaves = []
if path[1] == 'dat (*.dat)':
for filename in path[0]:
title = filename.split("/")[-1]
title = title.split (".")[0]
wave = fm.import_wave(filename, title)
dictType, color, axis, offset, status = DataActionWidgets.DataSettingsDialog.getDataSettings(
forbiddenNames=forbiddenNames,
title=title)
if status is DataActionStatus.Ok:
wave.offset = offset
wave.type = dictType
setOfWaves.append((wave, dictType, color, axis))
else:
raise ActionCancelledError
elif path[1] == 'Signal express export(*)':
for filename in path[0]:
setOfWaves = fm.import_signal_from_signal_express_file(filename)
return setOfWaves
def saveData (data, key = ''):
fileDialog = QW.QFileDialog()
fileDialog.setFileMode(QW.QFileDialog.AnyFile)
fileDialog.setDefaultSuffix('.dat')
try:
path = fileDialog.getSaveFileName(directory=key+'.dat' )
if path[0] == "":
raise ActionCancelledError
fm.export(path[0], data)
except AssertionError:
pass
def loadPoints(forbiddenNames):
"""Imports sm.Points instances from files and opens up a dialog
window with possible metainformaiton options for each.
Returns a list of tuples containing Points, chosen dictType, color and axis.
"""
fileFilter = "dat (*.dat)"
fileDialog = QW.QFileDialog()
fileDialog.setFileMode(QW.QFileDialog.ExistingFiles)
path = fileDialog.getOpenFileNames(filter=fileFilter)
if path[0] == "":
raise ActionCancelledError
setOfPoints = []
for filename in path[0]:
title = filename.split("/")[-1]
title = title.split (".")[0]
points = fm.import_points(filename, title)
dictType, color, axis, offset, status = DataActionWidgets.DataSettingsDialog.getDataSettings(
forbiddenNames=forbiddenNames,
title=title)
if status is DataActionStatus.Ok:
points.move_in_time(offset)
points.type = dictType
setOfPoints.append((points, dictType, color, axis))
else:
raise ActionCancelledError
return setOfPoints
def loadModelflow(compositeDataWrapper):
"""Import modelflow data and returns tuple consisting of
modelflowPoints and modelflowData.
"""
#TODO: Update to most recent zyl functionality
fileFilter = ('all_supported_files (*.csv *.A00);; '
'BeatScope (*.A00);; Finapres Nova (*.csv);; '
'all_files (*)')
fileDialog = QW.QFileDialog()
fileDialog.setFileMode(QW.QFileDialog.ExistingFiles)
path = fileDialog.getOpenFileName(filter=fileFilter)
if path[0] == "":
raise ActionCancelledError
title = path[0].split("/")[-1]
ex = DataActionWidgets.ModelflowImportDialog(path[0], compositeDataWrapper)
try:
data_points = compositeDataWrapper.points[ex.SelectedPoints()]
except:
raise ActionCancelledError
modelflowPoints = []
if ex.result() == 1:
if ex.SelectedPointsType() == 0:
reference_data_type = 'sbp'
elif ex.SelectedPointsType() == 1:
reference_data_type = 'dbp'
else:
reference_data_type = 'r'
out_points, out_names = fm.import_modelflow_data(
ex.PathModelflow(), data_points, reference_data_type)
else:
raise ActionCancelledError
return out_points, out_names
def setVWaveSettings(vWave, key, allKeys):
forbiddenKeys = list(allKeys)
forbiddenKeys.remove(key)
newKey, color, axis, offset, status = DataActionWidgets.DataSettingsDialog.getDataSettings(
forbiddenNames=forbiddenKeys,
dictType=key,
title=key,
axis=vWave.axis,
offset=str(vWave.data.offset),
askDelete=True,
color=vWave.color)
if status is DataActionStatus.Ok:
vWave.setSettings(color, axis)
vWave.data.type = newKey
vWave.data.offset = offset
vWave.data.changed.emit()
vWave.setDictKey(newKey)
if status is DataActionStatus.Delete:
vWave.delete()
def setVPointsSettings(vPoints, key, allKeys):
forbiddenKeys = list(allKeys)
forbiddenKeys.remove(key)
newKey, color, axis, offset, status = DataActionWidgets.DataSettingsDialog.getDataSettings(
forbiddenNames=forbiddenKeys,
dictType=key,
title=key,
axis=vPoints.axis,
offset="0",
askDelete=True,
color=vPoints.color)
if status is DataActionStatus.Ok:
vPoints.setSettings(color, axis)
vPoints.data.type = newKey
vPoints.data.move_in_time(offset)
vPoints.setDictKey(newKey)
if status is DataActionStatus.Delete:
vPoints.delete()
class _PickledCompositeDataWrapper:
"""Object containing all important information from
CompositeDataWrapper, but without any Qt signals and graphical
information which would make it otherwise unpickle-able.
"""
def __init__(self, compositeDataWrapper):
self.waves = {}
self.points = {}
self.parameters = {}
for selfDict, argDict in [
(self.waves, compositeDataWrapper.waves),
(self.points, compositeDataWrapper.points),
(self.parameters, compositeDataWrapper.parameters)]:
for key, item in argDict.items():
selfDict[key] = item.copy()
def loadCompositeData():
fileFilter = "pickle (*.pickle)"
fileDialog = QW.QFileDialog()
fileDialog.setFileMode(QW.QFileDialog.ExistingFiles)
path = fileDialog.getOpenFileName(filter = fileFilter)
assert path[0] != ""
if path[0] == "":
raise ActionCancelledError
with open(path[0], 'rb') as pickleFile:
compositeData = pickle.load(pickleFile)
if (isinstance(compositeData, sm.Composite_data) or
isinstance(compositeData, QtSigman.CompositeDataWrapper) or
isinstance(compositeData, _PickledCompositeDataWrapper)):
return compositeData
else:
QW.QMessageBox.warning(None, 'Error', 'Invalid file')
def saveCompositeData(compositeData):
fileDialog = QW.QFileDialog()
fileDialog.setFileMode(QW.QFileDialog.AnyFile)
fileDialog.setDefaultSuffix('.pickle')
path = fileDialog.getSaveFileName()
if path[0] == "":
raise ActionCancelledError
with open(path[0], 'wb') as pickleFile:
pickledData = _PickledCompositeDataWrapper(compositeData)
pickle.dump(pickledData, pickleFile)
def modifyWave(compositeDataWrapper):
pr = DataActionWidgets.ProcedureDialog.getProcedure(
'modify', compositeDataWrapper)
waveKey, pointsDict, beginTime, endTime, procedure, arguments, status = pr
if status is DataActionStatus.Ok:
originalWave = compositeDataWrapper.waves[waveKey]
modifiedWave = analyzer.modify_wave(originalWave, pointsDict,
beginTime, endTime,
procedure, arguments)
compositeDataWrapper.waves[waveKey].replace_slice(
beginTime, endTime, modifiedWave)
else:
raise ActionCancelledError
def findPoints(compositeDataWrapper):
pr = DataActionWidgets.ProcedureDialog.getProcedure(
'points', compositeDataWrapper)
waveDict, pointsDict, beginTime, endTime, procedure, arguments, status = pr
if status is DataActionStatus.Ok:
try:
newPoints = analyzer.find_points(waveDict, pointsDict,
beginTime, endTime,
procedure, arguments)
dictType, color, axis, offset, status = DataActionWidgets.DataSettingsDialog.getDataSettings(
forbiddenNames=compositeDataWrapper.points.keys(),
title=procedure.__name__)
if status is DataActionStatus.Cancel:
raise ActionCancelledError
newPoints.move_in_time(offset)
return newPoints, dictType, color, axis
except EmptyPointsError:
QW.QMessageBox.warning(None, 'Error', 'Points not found')
raise ActionCancelledError
else:
raise ActionCancelledError
def executeMacro (compositeDataWraper, value):
#TODO: Docs
path = "macros"
macro = (importlib.import_module (path+'.'+value+'.start'))
[points, wave] = macro.execute(compositeDataWraper)
setOfPoints = []
setOfWaves = []
if (len(points)>0):
for p in points:
newPoints = sm.Points(p[0],p[1], p[2])
if (p[2] in compositeDataWraper.points.keys()):
dictType, color, axis, offset, status = DataActionWidgets.DataSettingsDialog.getDataSettings(
forbiddenNames = compositeDataWraper.points.keys(),
title=p[2])
if status is DataActionStatus.Cancel:
return
else:
dictType = p[2]
color=DefaultColors.getColor(p[2])
axis = -1
setOfPoints.append((newPoints, dictType, color, axis))
if (len(wave)>0):
for w in wave:
setOfWaves.append((w, w.type, DefaultColors.getColor(w.type), -1))
return setOfPoints, setOfWaves | QtSigman/DataActions.py | from os import getcwd
import pickle
from PyQt5 import QtWidgets as QW
from sigman import file_manager as fm
from sigman import analyzer, EmptyPointsError
import sigman as sm
import importlib
import QtSigman
from QtSigman import DataActionWidgets, DefaultColors
from QtSigman.DataActionWidgets import DataActionStatus
from QtSigman.VisualObjects import VWave
class ActionCancelledError(Exception):
"""Raised when an action is cancelled."""
def loadWave(forbiddenNames):
"""Imports sm.Wave instances from files and opens up a dialog
window with possible metainformaiton options for each.
Returns a list of tuples containg Wave, chosen dictType, color and axis.
"""
fileFilter = ('dat (*.dat);;'
'Signal express export(*)')
fileDialog = QW.QFileDialog()
fileDialog.setFileMode(QW.QFileDialog.ExistingFiles)
path = fileDialog.getOpenFileNames(filter=fileFilter)
if path[0] == "":
raise ActionCancelledError
setOfWaves = []
if path[1] == 'dat (*.dat)':
for filename in path[0]:
title = filename.split("/")[-1]
title = title.split (".")[0]
wave = fm.import_wave(filename, title)
dictType, color, axis, offset, status = DataActionWidgets.DataSettingsDialog.getDataSettings(
forbiddenNames=forbiddenNames,
title=title)
if status is DataActionStatus.Ok:
wave.offset = offset
wave.type = dictType
setOfWaves.append((wave, dictType, color, axis))
else:
raise ActionCancelledError
elif path[1] == 'Signal express export(*)':
for filename in path[0]:
setOfWaves = fm.import_signal_from_signal_express_file(filename)
return setOfWaves
def saveData (data, key = ''):
fileDialog = QW.QFileDialog()
fileDialog.setFileMode(QW.QFileDialog.AnyFile)
fileDialog.setDefaultSuffix('.dat')
try:
path = fileDialog.getSaveFileName(directory=key+'.dat' )
if path[0] == "":
raise ActionCancelledError
fm.export(path[0], data)
except AssertionError:
pass
def loadPoints(forbiddenNames):
"""Imports sm.Points instances from files and opens up a dialog
window with possible metainformaiton options for each.
Returns a list of tuples containing Points, chosen dictType, color and axis.
"""
fileFilter = "dat (*.dat)"
fileDialog = QW.QFileDialog()
fileDialog.setFileMode(QW.QFileDialog.ExistingFiles)
path = fileDialog.getOpenFileNames(filter=fileFilter)
if path[0] == "":
raise ActionCancelledError
setOfPoints = []
for filename in path[0]:
title = filename.split("/")[-1]
title = title.split (".")[0]
points = fm.import_points(filename, title)
dictType, color, axis, offset, status = DataActionWidgets.DataSettingsDialog.getDataSettings(
forbiddenNames=forbiddenNames,
title=title)
if status is DataActionStatus.Ok:
points.move_in_time(offset)
points.type = dictType
setOfPoints.append((points, dictType, color, axis))
else:
raise ActionCancelledError
return setOfPoints
def loadModelflow(compositeDataWrapper):
"""Import modelflow data and returns tuple consisting of
modelflowPoints and modelflowData.
"""
#TODO: Update to most recent zyl functionality
fileFilter = ('all_supported_files (*.csv *.A00);; '
'BeatScope (*.A00);; Finapres Nova (*.csv);; '
'all_files (*)')
fileDialog = QW.QFileDialog()
fileDialog.setFileMode(QW.QFileDialog.ExistingFiles)
path = fileDialog.getOpenFileName(filter=fileFilter)
if path[0] == "":
raise ActionCancelledError
title = path[0].split("/")[-1]
ex = DataActionWidgets.ModelflowImportDialog(path[0], compositeDataWrapper)
try:
data_points = compositeDataWrapper.points[ex.SelectedPoints()]
except:
raise ActionCancelledError
modelflowPoints = []
if ex.result() == 1:
if ex.SelectedPointsType() == 0:
reference_data_type = 'sbp'
elif ex.SelectedPointsType() == 1:
reference_data_type = 'dbp'
else:
reference_data_type = 'r'
out_points, out_names = fm.import_modelflow_data(
ex.PathModelflow(), data_points, reference_data_type)
else:
raise ActionCancelledError
return out_points, out_names
def setVWaveSettings(vWave, key, allKeys):
forbiddenKeys = list(allKeys)
forbiddenKeys.remove(key)
newKey, color, axis, offset, status = DataActionWidgets.DataSettingsDialog.getDataSettings(
forbiddenNames=forbiddenKeys,
dictType=key,
title=key,
axis=vWave.axis,
offset=str(vWave.data.offset),
askDelete=True,
color=vWave.color)
if status is DataActionStatus.Ok:
vWave.setSettings(color, axis)
vWave.data.type = newKey
vWave.data.offset = offset
vWave.data.changed.emit()
vWave.setDictKey(newKey)
if status is DataActionStatus.Delete:
vWave.delete()
def setVPointsSettings(vPoints, key, allKeys):
forbiddenKeys = list(allKeys)
forbiddenKeys.remove(key)
newKey, color, axis, offset, status = DataActionWidgets.DataSettingsDialog.getDataSettings(
forbiddenNames=forbiddenKeys,
dictType=key,
title=key,
axis=vPoints.axis,
offset="0",
askDelete=True,
color=vPoints.color)
if status is DataActionStatus.Ok:
vPoints.setSettings(color, axis)
vPoints.data.type = newKey
vPoints.data.move_in_time(offset)
vPoints.setDictKey(newKey)
if status is DataActionStatus.Delete:
vPoints.delete()
class _PickledCompositeDataWrapper:
"""Object containing all important information from
CompositeDataWrapper, but without any Qt signals and graphical
information which would make it otherwise unpickle-able.
"""
def __init__(self, compositeDataWrapper):
self.waves = {}
self.points = {}
self.parameters = {}
for selfDict, argDict in [
(self.waves, compositeDataWrapper.waves),
(self.points, compositeDataWrapper.points),
(self.parameters, compositeDataWrapper.parameters)]:
for key, item in argDict.items():
selfDict[key] = item.copy()
def loadCompositeData():
fileFilter = "pickle (*.pickle)"
fileDialog = QW.QFileDialog()
fileDialog.setFileMode(QW.QFileDialog.ExistingFiles)
path = fileDialog.getOpenFileName(filter = fileFilter)
assert path[0] != ""
if path[0] == "":
raise ActionCancelledError
with open(path[0], 'rb') as pickleFile:
compositeData = pickle.load(pickleFile)
if (isinstance(compositeData, sm.Composite_data) or
isinstance(compositeData, QtSigman.CompositeDataWrapper) or
isinstance(compositeData, _PickledCompositeDataWrapper)):
return compositeData
else:
QW.QMessageBox.warning(None, 'Error', 'Invalid file')
def saveCompositeData(compositeData):
fileDialog = QW.QFileDialog()
fileDialog.setFileMode(QW.QFileDialog.AnyFile)
fileDialog.setDefaultSuffix('.pickle')
path = fileDialog.getSaveFileName()
if path[0] == "":
raise ActionCancelledError
with open(path[0], 'wb') as pickleFile:
pickledData = _PickledCompositeDataWrapper(compositeData)
pickle.dump(pickledData, pickleFile)
def modifyWave(compositeDataWrapper):
pr = DataActionWidgets.ProcedureDialog.getProcedure(
'modify', compositeDataWrapper)
waveKey, pointsDict, beginTime, endTime, procedure, arguments, status = pr
if status is DataActionStatus.Ok:
originalWave = compositeDataWrapper.waves[waveKey]
modifiedWave = analyzer.modify_wave(originalWave, pointsDict,
beginTime, endTime,
procedure, arguments)
compositeDataWrapper.waves[waveKey].replace_slice(
beginTime, endTime, modifiedWave)
else:
raise ActionCancelledError
def findPoints(compositeDataWrapper):
pr = DataActionWidgets.ProcedureDialog.getProcedure(
'points', compositeDataWrapper)
waveDict, pointsDict, beginTime, endTime, procedure, arguments, status = pr
if status is DataActionStatus.Ok:
try:
newPoints = analyzer.find_points(waveDict, pointsDict,
beginTime, endTime,
procedure, arguments)
dictType, color, axis, offset, status = DataActionWidgets.DataSettingsDialog.getDataSettings(
forbiddenNames=compositeDataWrapper.points.keys(),
title=procedure.__name__)
if status is DataActionStatus.Cancel:
raise ActionCancelledError
newPoints.move_in_time(offset)
return newPoints, dictType, color, axis
except EmptyPointsError:
QW.QMessageBox.warning(None, 'Error', 'Points not found')
raise ActionCancelledError
else:
raise ActionCancelledError
def executeMacro (compositeDataWraper, value):
#TODO: Docs
path = "macros"
macro = (importlib.import_module (path+'.'+value+'.start'))
[points, wave] = macro.execute(compositeDataWraper)
setOfPoints = []
setOfWaves = []
if (len(points)>0):
for p in points:
newPoints = sm.Points(p[0],p[1], p[2])
if (p[2] in compositeDataWraper.points.keys()):
dictType, color, axis, offset, status = DataActionWidgets.DataSettingsDialog.getDataSettings(
forbiddenNames = compositeDataWraper.points.keys(),
title=p[2])
if status is DataActionStatus.Cancel:
return
else:
dictType = p[2]
color=DefaultColors.getColor(p[2])
axis = -1
setOfPoints.append((newPoints, dictType, color, axis))
if (len(wave)>0):
for w in wave:
setOfWaves.append((w, w.type, DefaultColors.getColor(w.type), -1))
return setOfPoints, setOfWaves | 0.236164 | 0.132206 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'ListAgentPoolQueueStatusResult',
'AwaitableListAgentPoolQueueStatusResult',
'list_agent_pool_queue_status',
]
@pulumi.output_type
class ListAgentPoolQueueStatusResult:
"""
The QueueStatus of Agent Pool
"""
def __init__(__self__, count=None):
if count and not isinstance(count, int):
raise TypeError("Expected argument 'count' to be a int")
pulumi.set(__self__, "count", count)
@property
@pulumi.getter
def count(self) -> Optional[int]:
"""
The number of pending runs in the queue
"""
return pulumi.get(self, "count")
class AwaitableListAgentPoolQueueStatusResult(ListAgentPoolQueueStatusResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListAgentPoolQueueStatusResult(
count=self.count)
def list_agent_pool_queue_status(agent_pool_name: Optional[str] = None,
registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListAgentPoolQueueStatusResult:
"""
The QueueStatus of Agent Pool
API Version: 2019-06-01-preview.
:param str agent_pool_name: The name of the agent pool.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
"""
__args__ = dict()
__args__['agentPoolName'] = agent_pool_name
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:containerregistry:listAgentPoolQueueStatus', __args__, opts=opts, typ=ListAgentPoolQueueStatusResult).value
return AwaitableListAgentPoolQueueStatusResult(
count=__ret__.count) | sdk/python/pulumi_azure_nextgen/containerregistry/list_agent_pool_queue_status.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'ListAgentPoolQueueStatusResult',
'AwaitableListAgentPoolQueueStatusResult',
'list_agent_pool_queue_status',
]
@pulumi.output_type
class ListAgentPoolQueueStatusResult:
"""
The QueueStatus of Agent Pool
"""
def __init__(__self__, count=None):
if count and not isinstance(count, int):
raise TypeError("Expected argument 'count' to be a int")
pulumi.set(__self__, "count", count)
@property
@pulumi.getter
def count(self) -> Optional[int]:
"""
The number of pending runs in the queue
"""
return pulumi.get(self, "count")
class AwaitableListAgentPoolQueueStatusResult(ListAgentPoolQueueStatusResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListAgentPoolQueueStatusResult(
count=self.count)
def list_agent_pool_queue_status(agent_pool_name: Optional[str] = None,
registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListAgentPoolQueueStatusResult:
"""
The QueueStatus of Agent Pool
API Version: 2019-06-01-preview.
:param str agent_pool_name: The name of the agent pool.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
"""
__args__ = dict()
__args__['agentPoolName'] = agent_pool_name
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:containerregistry:listAgentPoolQueueStatus', __args__, opts=opts, typ=ListAgentPoolQueueStatusResult).value
return AwaitableListAgentPoolQueueStatusResult(
count=__ret__.count) | 0.790166 | 0.060891 |
import sys
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from oslo_utils import timeutils
from sqlalchemy.orm import exc as orm_exc
from tacker.common import topics
from tacker import context as t_context
from tacker.db.common_services import common_services_db
from tacker.db.nfvo import nfvo_db
from tacker.extensions import nfvo
from tacker import manager
from tacker.plugins.common import constants
from tacker import service as tacker_service
from tacker import version
LOG = logging.getLogger(__name__)
class Conductor(manager.Manager):
def __init__(self, host, conf=None):
if conf:
self.conf = conf
else:
self.conf = cfg.CONF
super(Conductor, self).__init__(host=self.conf.host)
def update_vim(self, context, vim_id, status):
t_admin_context = t_context.get_admin_context()
update_time = timeutils.utcnow()
with t_admin_context.session.begin(subtransactions=True):
try:
query = t_admin_context.session.query(nfvo_db.Vim)
query.filter(
nfvo_db.Vim.id == vim_id).update(
{'status': status,
'updated_at': update_time})
except orm_exc.NoResultFound:
raise nfvo.VimNotFoundException(vim_id=vim_id)
event_db = common_services_db.Event(
resource_id=vim_id,
resource_type=constants.RES_TYPE_VIM,
resource_state=status,
event_details="",
event_type=constants.RES_EVT_MONITOR,
timestamp=update_time)
t_admin_context.session.add(event_db)
return status
def init(args, **kwargs):
cfg.CONF(args=args, project='tacker',
version='%%prog %s' % version.version_info.release_string(),
**kwargs)
# FIXME(ihrachys): if import is put in global, circular import
# failure occurs
from tacker.common import rpc as n_rpc
n_rpc.init(cfg.CONF)
def main(manager='tacker.conductor.conductor_server.Conductor'):
init(sys.argv[1:])
logging.setup(cfg.CONF, "tacker")
oslo_messaging.set_transport_defaults(control_exchange='tacker')
logging.setup(cfg.CONF, "tacker")
cfg.CONF.log_opt_values(LOG, logging.DEBUG)
server = tacker_service.Service.create(
binary='tacker-conductor',
topic=topics.TOPIC_CONDUCTOR,
manager=manager)
service.launch(cfg.CONF, server).wait() | tacker/conductor/conductor_server.py |
import sys
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from oslo_utils import timeutils
from sqlalchemy.orm import exc as orm_exc
from tacker.common import topics
from tacker import context as t_context
from tacker.db.common_services import common_services_db
from tacker.db.nfvo import nfvo_db
from tacker.extensions import nfvo
from tacker import manager
from tacker.plugins.common import constants
from tacker import service as tacker_service
from tacker import version
LOG = logging.getLogger(__name__)
class Conductor(manager.Manager):
def __init__(self, host, conf=None):
if conf:
self.conf = conf
else:
self.conf = cfg.CONF
super(Conductor, self).__init__(host=self.conf.host)
def update_vim(self, context, vim_id, status):
t_admin_context = t_context.get_admin_context()
update_time = timeutils.utcnow()
with t_admin_context.session.begin(subtransactions=True):
try:
query = t_admin_context.session.query(nfvo_db.Vim)
query.filter(
nfvo_db.Vim.id == vim_id).update(
{'status': status,
'updated_at': update_time})
except orm_exc.NoResultFound:
raise nfvo.VimNotFoundException(vim_id=vim_id)
event_db = common_services_db.Event(
resource_id=vim_id,
resource_type=constants.RES_TYPE_VIM,
resource_state=status,
event_details="",
event_type=constants.RES_EVT_MONITOR,
timestamp=update_time)
t_admin_context.session.add(event_db)
return status
def init(args, **kwargs):
cfg.CONF(args=args, project='tacker',
version='%%prog %s' % version.version_info.release_string(),
**kwargs)
# FIXME(ihrachys): if import is put in global, circular import
# failure occurs
from tacker.common import rpc as n_rpc
n_rpc.init(cfg.CONF)
def main(manager='tacker.conductor.conductor_server.Conductor'):
init(sys.argv[1:])
logging.setup(cfg.CONF, "tacker")
oslo_messaging.set_transport_defaults(control_exchange='tacker')
logging.setup(cfg.CONF, "tacker")
cfg.CONF.log_opt_values(LOG, logging.DEBUG)
server = tacker_service.Service.create(
binary='tacker-conductor',
topic=topics.TOPIC_CONDUCTOR,
manager=manager)
service.launch(cfg.CONF, server).wait() | 0.179567 | 0.047958 |
"""Tests the GRR hunt collectors."""
import unittest
import zipfile
import mock
from grr_response_proto import flows_pb2
from dftimewolf import config
from dftimewolf.lib import state
from dftimewolf.lib import errors
from dftimewolf.lib.collectors import grr_hunt
from tests.lib.collectors.test_data import mock_grr_hosts
# Mocking of classes.
# pylint: disable=invalid-name,arguments-differ
class GRRHuntArtifactCollectorTest(unittest.TestCase):
"""Tests for the GRR artifact collector."""
@mock.patch('grr_api_client.api.InitHttp')
def setUp(self, mock_InitHttp):
self.mock_grr_api = mock.Mock()
mock_InitHttp.return_value = self.mock_grr_api
self.test_state = state.DFTimewolfState(config.Config)
self.grr_hunt_artifact_collector = grr_hunt.GRRHuntArtifactCollector(
self.test_state)
self.grr_hunt_artifact_collector.SetUp(
artifacts='RandomArtifact',
use_tsk=True,
reason='random reason',
grr_server_url='http://fake/endpoint',
grr_username='admin',
grr_password='<PASSWORD>',
approvers='approver1,approver2'
)
def testProcess(self):
"""Tests that the process function issues correct GRR API calls."""
self.grr_hunt_artifact_collector.Process()
# extract call kwargs
call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]
self.assertEqual(call_kwargs['flow_args'].artifact_list,
['RandomArtifact'])
self.assertEqual(call_kwargs['flow_args'].use_tsk, True)
self.assertEqual(call_kwargs['flow_name'], 'ArtifactCollectorFlow')
self.assertEqual(call_kwargs['hunt_runner_args'].description,
'random reason')
class GRRHuntFileCollectorTest(unittest.TestCase):
"""Tests for the GRR file collector."""
@mock.patch('grr_api_client.api.InitHttp')
def setUp(self, mock_InitHttp):
self.mock_grr_api = mock.Mock()
mock_InitHttp.return_value = self.mock_grr_api
self.test_state = state.DFTimewolfState(config.Config)
self.grr_hunt_file_collector = grr_hunt.GRRHuntFileCollector(
self.test_state)
self.grr_hunt_file_collector.SetUp(
file_path_list='/etc/passwd,/etc/shadow',
reason='random reason',
grr_server_url='http://fake/endpoint',
grr_username='admin',
grr_password='<PASSWORD>',
approvers='approver1,approver2'
)
def testInitialization(self):
"""Tests that the collector can be initialized."""
self.assertEqual(
self.grr_hunt_file_collector.file_path_list,
['/etc/passwd', '/etc/shadow']
)
def testProcess(self):
"""Tests that the process method invokes the correct GRR API calls."""
self.grr_hunt_file_collector.Process()
# extract call kwargs
call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]
self.assertEqual(call_kwargs['flow_args'].paths,
['/etc/passwd', '/etc/shadow'])
self.assertEqual(call_kwargs['flow_args'].action.action_type,
flows_pb2.FileFinderAction.DOWNLOAD)
self.assertEqual(call_kwargs['flow_name'], 'FileFinder')
self.assertEqual(call_kwargs['hunt_runner_args'].description,
'random reason')
class GRRFHuntDownloader(unittest.TestCase):
"""Tests for the GRR hunt downloader."""
@mock.patch('grr_api_client.api.InitHttp')
def setUp(self, mock_InitHttp):
self.mock_grr_api = mock.Mock()
mock_InitHttp.return_value = self.mock_grr_api
self.test_state = state.DFTimewolfState(config.Config)
self.grr_hunt_downloader = grr_hunt.GRRHuntDownloader(self.test_state)
self.grr_hunt_downloader.SetUp(
hunt_id='H:12345',
reason='random reason',
grr_server_url='http://fake/endpoint',
grr_username='admin',
grr_password='<PASSWORD>',
approvers='approver1,approver2'
)
self.grr_hunt_downloader.output_path = '/tmp/test'
def testInitialization(self):
"""Tests that the collector is correctly initialized."""
self.assertEqual(self.grr_hunt_downloader.hunt_id, 'H:12345')
@mock.patch('dftimewolf.lib.collectors.grr_hunt.GRRHuntDownloader._ExtractHuntResults') # pylint: disable=line-too-long
@mock.patch('dftimewolf.lib.collectors.grr_hunt.GRRHuntDownloader._GetAndWriteArchive') # pylint: disable=line-too-long
def testCollectHuntResults(self,
mock_get_write_archive,
mock_ExtractHuntResults):
"""Tests that hunt results are downloaded to the correct file."""
self.mock_grr_api.Hunt.return_value.Get.return_value = \
mock_grr_hosts.MOCK_HUNT
self.grr_hunt_downloader.Process()
mock_get_write_archive.assert_called_with(mock_grr_hosts.MOCK_HUNT,
'/tmp/test/H:12345.zip')
mock_ExtractHuntResults.assert_called_with('/tmp/test/H:12345.zip')
@mock.patch('os.remove')
@mock.patch('zipfile.ZipFile.extract')
def testExtractHuntResults(self, _, mock_remove):
"""Tests that hunt results are correctly extracted."""
self.grr_hunt_downloader.output_path = '/directory'
expected = sorted([
('greendale-student04.c.greendale.internal',
'/directory/hunt_H_A43ABF9D/C.4c4223a2ea9cf6f1'),
('greendale-admin.c.greendale.internal',
'/directory/hunt_H_A43ABF9D/C.ba6b63df5d330589'),
('greendale-student05.c.greendale.internal',
'/directory/hunt_H_A43ABF9D/C.fc693a148af801d5')
])
test_zip = 'tests/lib/collectors/test_data/hunt.zip'
# pylint: disable=protected-access
result = sorted(self.grr_hunt_downloader._ExtractHuntResults(test_zip))
self.assertEqual(result, expected)
mock_remove.assert_called_with('tests/lib/collectors/test_data/hunt.zip')
@mock.patch('os.remove')
@mock.patch('zipfile.ZipFile.extract')
def testOSErrorExtractHuntResults(self, mock_extract, mock_remove):
"""Tests that an OSError when reading files generate errors."""
self.grr_hunt_downloader.output_path = '/directory'
test_zip = 'tests/lib/collectors/test_data/hunt.zip'
mock_extract.side_effect = OSError
# pylint: disable=protected-access
with self.assertRaises(errors.DFTimewolfError) as error:
self.grr_hunt_downloader._ExtractHuntResults(test_zip)
self.assertEqual(1, len(self.test_state.errors))
self.assertEqual(
error.exception.message,
'Error manipulating file tests/lib/collectors/test_data/hunt.zip: ')
self.assertTrue(error.exception.critical)
mock_remove.assert_not_called()
@mock.patch('os.remove')
@mock.patch('zipfile.ZipFile.extract')
def testBadZipFileExtractHuntResults(self, mock_extract, mock_remove):
"""Tests that a BadZipFile error when reading files generate errors."""
self.grr_hunt_downloader.output_path = '/directory'
test_zip = 'tests/lib/collectors/test_data/hunt.zip'
mock_extract.side_effect = zipfile.BadZipfile
# pylint: disable=protected-access
with self.assertRaises(errors.DFTimewolfError) as error:
self.grr_hunt_downloader._ExtractHuntResults(test_zip)
self.assertEqual(1, len(self.test_state.errors))
self.assertEqual(
error.exception.message,
'Bad zipfile tests/lib/collectors/test_data/hunt.zip: ')
self.assertTrue(error.exception.critical)
mock_remove.assert_not_called()
if __name__ == '__main__':
unittest.main() | tests/lib/collectors/grr_hunt.py | """Tests the GRR hunt collectors."""
import unittest
import zipfile
import mock
from grr_response_proto import flows_pb2
from dftimewolf import config
from dftimewolf.lib import state
from dftimewolf.lib import errors
from dftimewolf.lib.collectors import grr_hunt
from tests.lib.collectors.test_data import mock_grr_hosts
# Mocking of classes.
# pylint: disable=invalid-name,arguments-differ
class GRRHuntArtifactCollectorTest(unittest.TestCase):
"""Tests for the GRR artifact collector."""
@mock.patch('grr_api_client.api.InitHttp')
def setUp(self, mock_InitHttp):
self.mock_grr_api = mock.Mock()
mock_InitHttp.return_value = self.mock_grr_api
self.test_state = state.DFTimewolfState(config.Config)
self.grr_hunt_artifact_collector = grr_hunt.GRRHuntArtifactCollector(
self.test_state)
self.grr_hunt_artifact_collector.SetUp(
artifacts='RandomArtifact',
use_tsk=True,
reason='random reason',
grr_server_url='http://fake/endpoint',
grr_username='admin',
grr_password='<PASSWORD>',
approvers='approver1,approver2'
)
def testProcess(self):
"""Tests that the process function issues correct GRR API calls."""
self.grr_hunt_artifact_collector.Process()
# extract call kwargs
call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]
self.assertEqual(call_kwargs['flow_args'].artifact_list,
['RandomArtifact'])
self.assertEqual(call_kwargs['flow_args'].use_tsk, True)
self.assertEqual(call_kwargs['flow_name'], 'ArtifactCollectorFlow')
self.assertEqual(call_kwargs['hunt_runner_args'].description,
'random reason')
class GRRHuntFileCollectorTest(unittest.TestCase):
"""Tests for the GRR file collector."""
@mock.patch('grr_api_client.api.InitHttp')
def setUp(self, mock_InitHttp):
self.mock_grr_api = mock.Mock()
mock_InitHttp.return_value = self.mock_grr_api
self.test_state = state.DFTimewolfState(config.Config)
self.grr_hunt_file_collector = grr_hunt.GRRHuntFileCollector(
self.test_state)
self.grr_hunt_file_collector.SetUp(
file_path_list='/etc/passwd,/etc/shadow',
reason='random reason',
grr_server_url='http://fake/endpoint',
grr_username='admin',
grr_password='<PASSWORD>',
approvers='approver1,approver2'
)
def testInitialization(self):
"""Tests that the collector can be initialized."""
self.assertEqual(
self.grr_hunt_file_collector.file_path_list,
['/etc/passwd', '/etc/shadow']
)
def testProcess(self):
"""Tests that the process method invokes the correct GRR API calls."""
self.grr_hunt_file_collector.Process()
# extract call kwargs
call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]
self.assertEqual(call_kwargs['flow_args'].paths,
['/etc/passwd', '/etc/shadow'])
self.assertEqual(call_kwargs['flow_args'].action.action_type,
flows_pb2.FileFinderAction.DOWNLOAD)
self.assertEqual(call_kwargs['flow_name'], 'FileFinder')
self.assertEqual(call_kwargs['hunt_runner_args'].description,
'random reason')
class GRRFHuntDownloader(unittest.TestCase):
"""Tests for the GRR hunt downloader."""
@mock.patch('grr_api_client.api.InitHttp')
def setUp(self, mock_InitHttp):
self.mock_grr_api = mock.Mock()
mock_InitHttp.return_value = self.mock_grr_api
self.test_state = state.DFTimewolfState(config.Config)
self.grr_hunt_downloader = grr_hunt.GRRHuntDownloader(self.test_state)
self.grr_hunt_downloader.SetUp(
hunt_id='H:12345',
reason='random reason',
grr_server_url='http://fake/endpoint',
grr_username='admin',
grr_password='<PASSWORD>',
approvers='approver1,approver2'
)
self.grr_hunt_downloader.output_path = '/tmp/test'
def testInitialization(self):
"""Tests that the collector is correctly initialized."""
self.assertEqual(self.grr_hunt_downloader.hunt_id, 'H:12345')
@mock.patch('dftimewolf.lib.collectors.grr_hunt.GRRHuntDownloader._ExtractHuntResults') # pylint: disable=line-too-long
@mock.patch('dftimewolf.lib.collectors.grr_hunt.GRRHuntDownloader._GetAndWriteArchive') # pylint: disable=line-too-long
def testCollectHuntResults(self,
mock_get_write_archive,
mock_ExtractHuntResults):
"""Tests that hunt results are downloaded to the correct file."""
self.mock_grr_api.Hunt.return_value.Get.return_value = \
mock_grr_hosts.MOCK_HUNT
self.grr_hunt_downloader.Process()
mock_get_write_archive.assert_called_with(mock_grr_hosts.MOCK_HUNT,
'/tmp/test/H:12345.zip')
mock_ExtractHuntResults.assert_called_with('/tmp/test/H:12345.zip')
@mock.patch('os.remove')
@mock.patch('zipfile.ZipFile.extract')
def testExtractHuntResults(self, _, mock_remove):
"""Tests that hunt results are correctly extracted."""
self.grr_hunt_downloader.output_path = '/directory'
expected = sorted([
('greendale-student04.c.greendale.internal',
'/directory/hunt_H_A43ABF9D/C.4c4223a2ea9cf6f1'),
('greendale-admin.c.greendale.internal',
'/directory/hunt_H_A43ABF9D/C.ba6b63df5d330589'),
('greendale-student05.c.greendale.internal',
'/directory/hunt_H_A43ABF9D/C.fc693a148af801d5')
])
test_zip = 'tests/lib/collectors/test_data/hunt.zip'
# pylint: disable=protected-access
result = sorted(self.grr_hunt_downloader._ExtractHuntResults(test_zip))
self.assertEqual(result, expected)
mock_remove.assert_called_with('tests/lib/collectors/test_data/hunt.zip')
@mock.patch('os.remove')
@mock.patch('zipfile.ZipFile.extract')
def testOSErrorExtractHuntResults(self, mock_extract, mock_remove):
"""Tests that an OSError when reading files generate errors."""
self.grr_hunt_downloader.output_path = '/directory'
test_zip = 'tests/lib/collectors/test_data/hunt.zip'
mock_extract.side_effect = OSError
# pylint: disable=protected-access
with self.assertRaises(errors.DFTimewolfError) as error:
self.grr_hunt_downloader._ExtractHuntResults(test_zip)
self.assertEqual(1, len(self.test_state.errors))
self.assertEqual(
error.exception.message,
'Error manipulating file tests/lib/collectors/test_data/hunt.zip: ')
self.assertTrue(error.exception.critical)
mock_remove.assert_not_called()
@mock.patch('os.remove')
@mock.patch('zipfile.ZipFile.extract')
def testBadZipFileExtractHuntResults(self, mock_extract, mock_remove):
"""Tests that a BadZipFile error when reading files generate errors."""
self.grr_hunt_downloader.output_path = '/directory'
test_zip = 'tests/lib/collectors/test_data/hunt.zip'
mock_extract.side_effect = zipfile.BadZipfile
# pylint: disable=protected-access
with self.assertRaises(errors.DFTimewolfError) as error:
self.grr_hunt_downloader._ExtractHuntResults(test_zip)
self.assertEqual(1, len(self.test_state.errors))
self.assertEqual(
error.exception.message,
'Bad zipfile tests/lib/collectors/test_data/hunt.zip: ')
self.assertTrue(error.exception.critical)
mock_remove.assert_not_called()
if __name__ == '__main__':
unittest.main() | 0.661376 | 0.221814 |
class Solution:
def findSubstring(self, s: str, words: List[str]) -> List[int]:
"""
Assume:
substring
no empty
case insenstively
Intution:
1) permute all concatenations
2) lookup concats in the s
Approach:
1) Brute force:
1) permute all concatenations - factorial time
["bar","foo","the"]
b
f
t
_ _ _
/ \
f__ b__
/\
bft btf
2) lookup concats in the s
for each permutation:
s.index(perm)
3)
Edge Case:
s = words together --> out: 0
3 Keys:
1) goal - full concatentation
2) choice - words not used yet
3) constraints - words list
"""
def find_all_occurences(s, c):
starts, s_index = list(), 0
while s_index < len(s):
# scan for first matching letter
if s[s_index : s_index + len(c)] == c:
# scan through to make sure it is a whole substring match
starts.append(s_index)
# always move to the next letter
s_index += 1
return starts
def permute(choices: List[str], all_perms: set, current: List[str]):
# Base Case:
if len(choices) == 0:
all_perms.add("".join(current))
return
# Recursive Case:
else:
for index in range(len(choices)):
choice = choices[index]
current.append(choice)
new_choices = [c for i, c in enumerate(choices) if i != index]
permute(new_choices, all_perms, current)
current.pop()
return all_perms
# A: form the permuatations of the concatenation
concatenations = permute(words, set(), [])
# B: look up each perm
indices = list()
for c in concatenations:
indices.extend(find_all_occurences(s, c))
# C: return the indices
return indices
"""
words = ["foo","bar"] all_perms = {}, current = [bar]
^
new_choices = ["bar"]
i = 0, 1
-------------------------------------------------------
#2 - POPPED
c = ["bar"] ap = {} curren = [foo, ]
i = 0
nc = []
-------------------------------------------------------
#3 - POPPED
c = [""] ap = { curren = [foo, bar]
foobar,
}
""" | practice_fall_2021/substring_with_concatentation_of_all_words.py | class Solution:
def findSubstring(self, s: str, words: List[str]) -> List[int]:
"""
Assume:
substring
no empty
case insenstively
Intution:
1) permute all concatenations
2) lookup concats in the s
Approach:
1) Brute force:
1) permute all concatenations - factorial time
["bar","foo","the"]
b
f
t
_ _ _
/ \
f__ b__
/\
bft btf
2) lookup concats in the s
for each permutation:
s.index(perm)
3)
Edge Case:
s = words together --> out: 0
3 Keys:
1) goal - full concatentation
2) choice - words not used yet
3) constraints - words list
"""
def find_all_occurences(s, c):
starts, s_index = list(), 0
while s_index < len(s):
# scan for first matching letter
if s[s_index : s_index + len(c)] == c:
# scan through to make sure it is a whole substring match
starts.append(s_index)
# always move to the next letter
s_index += 1
return starts
def permute(choices: List[str], all_perms: set, current: List[str]):
# Base Case:
if len(choices) == 0:
all_perms.add("".join(current))
return
# Recursive Case:
else:
for index in range(len(choices)):
choice = choices[index]
current.append(choice)
new_choices = [c for i, c in enumerate(choices) if i != index]
permute(new_choices, all_perms, current)
current.pop()
return all_perms
# A: form the permuatations of the concatenation
concatenations = permute(words, set(), [])
# B: look up each perm
indices = list()
for c in concatenations:
indices.extend(find_all_occurences(s, c))
# C: return the indices
return indices
"""
words = ["foo","bar"] all_perms = {}, current = [bar]
^
new_choices = ["bar"]
i = 0, 1
-------------------------------------------------------
#2 - POPPED
c = ["bar"] ap = {} curren = [foo, ]
i = 0
nc = []
-------------------------------------------------------
#3 - POPPED
c = [""] ap = { curren = [foo, bar]
foobar,
}
""" | 0.692018 | 0.443179 |
import random
import torch
import argparse
import hparams_registry
import datasets
import imageio
import torchvision.utils as vutils
import os
from tqdm import tqdm
def __write_images(image_outputs, display_image_num, file_name, run):
image_outputs = [images.expand(-1, 3, -1, -1) for images in image_outputs] # expand gray-scale images to 3 channels
image_tensor = torch.cat([images[:display_image_num] for images in image_outputs], 0)
image_grid = vutils.make_grid(image_tensor.data, nrow=display_image_num, padding=0, normalize=True, scale_each=True)
vutils.save_image(image_grid, file_name, nrow=1)
run.log_image('images', file_name)
def write_2images(image_outputs, display_image_num, image_directory, postfix, run):
n = len(image_outputs)
__write_images(image_outputs[0:n], display_image_num, '%s/gen_%s.jpg' % (image_directory, postfix), run)
#__write_images(image_outputs[n//2:n], display_image_num, '%s/gen_b2a_%s.jpg' % (image_directory, postfix), run)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Domain generalization')
parser.add_argument('--data_dir', type=str)
parser.add_argument('--output_dir', type=str)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
datasets_to_save = ['OfficeHome', 'TerraIncognita', 'DomainNet', 'RotatedMNIST', 'ColoredMNIST', 'SVIRO']
for dataset_name in tqdm(datasets_to_save):
hparams = hparams_registry.default_hparams('ERM', dataset_name)
dataset = datasets.get_dataset_class(dataset_name)(
args.data_dir,
list(range(datasets.num_environments(dataset_name))),
hparams)
for env_idx, env in enumerate(tqdm(dataset)):
for i in tqdm(range(50)):
idx = random.choice(list(range(len(env))))
x, y = env[idx]
while y > 10:
idx = random.choice(list(range(len(env))))
x, y = env[idx]
if x.shape[0] == 2:
x = torch.cat([x, torch.zeros_like(x)], dim=0)[:3,:,:]
if x.min() < 0:
mean = torch.tensor([0.485, 0.456, 0.406])[:,None,None]
std = torch.tensor([0.229, 0.224, 0.225])[:,None,None]
x = (x * std) + mean
assert(x.min() >= 0)
assert(x.max() <= 1)
x = (x * 255.99)
x = x.numpy().astype('uint8').transpose(1,2,0)
imageio.imwrite(
os.path.join(args.output_dir,
f'{dataset_name}_env{env_idx}{dataset.ENVIRONMENTS[env_idx]}_{i}_idx{idx}_class{y}.png'),
x) | scripts/save_images.py | import random
import torch
import argparse
import hparams_registry
import datasets
import imageio
import torchvision.utils as vutils
import os
from tqdm import tqdm
def __write_images(image_outputs, display_image_num, file_name, run):
image_outputs = [images.expand(-1, 3, -1, -1) for images in image_outputs] # expand gray-scale images to 3 channels
image_tensor = torch.cat([images[:display_image_num] for images in image_outputs], 0)
image_grid = vutils.make_grid(image_tensor.data, nrow=display_image_num, padding=0, normalize=True, scale_each=True)
vutils.save_image(image_grid, file_name, nrow=1)
run.log_image('images', file_name)
def write_2images(image_outputs, display_image_num, image_directory, postfix, run):
n = len(image_outputs)
__write_images(image_outputs[0:n], display_image_num, '%s/gen_%s.jpg' % (image_directory, postfix), run)
#__write_images(image_outputs[n//2:n], display_image_num, '%s/gen_b2a_%s.jpg' % (image_directory, postfix), run)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Domain generalization')
parser.add_argument('--data_dir', type=str)
parser.add_argument('--output_dir', type=str)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
datasets_to_save = ['OfficeHome', 'TerraIncognita', 'DomainNet', 'RotatedMNIST', 'ColoredMNIST', 'SVIRO']
for dataset_name in tqdm(datasets_to_save):
hparams = hparams_registry.default_hparams('ERM', dataset_name)
dataset = datasets.get_dataset_class(dataset_name)(
args.data_dir,
list(range(datasets.num_environments(dataset_name))),
hparams)
for env_idx, env in enumerate(tqdm(dataset)):
for i in tqdm(range(50)):
idx = random.choice(list(range(len(env))))
x, y = env[idx]
while y > 10:
idx = random.choice(list(range(len(env))))
x, y = env[idx]
if x.shape[0] == 2:
x = torch.cat([x, torch.zeros_like(x)], dim=0)[:3,:,:]
if x.min() < 0:
mean = torch.tensor([0.485, 0.456, 0.406])[:,None,None]
std = torch.tensor([0.229, 0.224, 0.225])[:,None,None]
x = (x * std) + mean
assert(x.min() >= 0)
assert(x.max() <= 1)
x = (x * 255.99)
x = x.numpy().astype('uint8').transpose(1,2,0)
imageio.imwrite(
os.path.join(args.output_dir,
f'{dataset_name}_env{env_idx}{dataset.ENVIRONMENTS[env_idx]}_{i}_idx{idx}_class{y}.png'),
x) | 0.408985 | 0.297553 |
import datetime
from django.utils.timezone import make_aware
from freezegun import freeze_time
from rest_framework import status
from care.facility.api.serializers.patient_sample import PatientSampleSerializer
from care.facility.models import PatientSample
from care.utils.tests.test_base import TestBase
from config.tests.helper import mock_equal
class TestPatientSampleApi(TestBase):
def get_base_url(self, **kwargs):
patient = kwargs.get("patient", self.patient)
return f"/api/v1/patient/{str(patient.external_id)}/test_sample"
def get_list_representation(self, obj: PatientSample) -> dict:
return {
"id": mock_equal,
"patient_name": obj.patient.name,
"patient_has_sari": obj.patient.has_SARI,
"patient_has_confirmed_contact": obj.patient.contact_with_confirmed_carrier,
"patient_has_suspected_contact": obj.patient.contact_with_suspected_carrier,
"patient_travel_history": obj.patient.countries_travelled,
"facility": str(obj.consultation.facility.external_id),
"facility_object": self.get_facility_representation(obj.consultation.facility),
"sample_type": obj.get_sample_type_display(),
"status": obj.get_status_display(),
"result": obj.get_result_display(),
"patient": str(obj.patient.external_id),
"consultation": str(obj.consultation.external_id),
"date_of_sample": obj.date_of_sample,
"date_of_result": obj.date_of_result,
"sample_type_other": obj.sample_type_other,
"has_sari": obj.has_sari,
"has_ari": obj.has_ari,
"doctor_name": obj.doctor_name,
"diagnosis": obj.diagnosis,
"diff_diagnosis": obj.diff_diagnosis,
"etiology_identified": obj.etiology_identified,
"is_atypical_presentation": obj.is_atypical_presentation,
"atypical_presentation": obj.atypical_presentation,
"is_unusual_course": obj.is_unusual_course,
"fast_track": obj.fast_track,
}
def get_detail_representation(self, obj=None) -> dict:
list_repr = self.get_list_representation(obj)
detail_repr = list_repr.copy()
return detail_repr
def get_sample_data(self, **kwargs):
patient = kwargs.get("patient", self.patient)
consultation = kwargs.get("consultation", self.consultation)
return {
"patient": str(patient.external_id),
"consultation": str(consultation.external_id),
"sample_type": "BA/ETA",
"testing_facility": str(patient.facility.external_id),
}
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.consultation = cls.create_consultation(cls.patient, cls.facility)
def setUp(self) -> None:
self.client.force_authenticate(self.user)
def create_sample(self, **kwargs):
sample_data = self.get_sample_data(**kwargs)
serializer = PatientSampleSerializer(data=sample_data)
serializer.is_valid(raise_exception=True)
return serializer.create(serializer.validated_data)
def test_create_sample_api(self):
sample_data = self.get_sample_data()
response = self.client.post(path=self.get_url(), data=sample_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
sample = self.patient.patientsample_set.last()
self.assertEqual(sample.result, PatientSample.SAMPLE_TEST_RESULT_MAP["AWAITING"])
self.assertIsNone(sample.date_of_sample)
self.assertIsNone(sample.date_of_result)
def test_sample_flow(self):
self.client.force_authenticate(self.super_user)
with freeze_time("2020-04-01"):
sample = self.create_sample()
response = self.client.patch(self.get_url(str(sample.external_id)), {"status": "APPROVED"}, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
sample.refresh_from_db()
self.assertEquals(sample.status, PatientSample.SAMPLE_TEST_FLOW_MAP["APPROVED"])
self.assertEquals(sample.result, PatientSample.SAMPLE_TEST_RESULT_MAP["AWAITING"])
self.assertIsNone(sample.date_of_sample)
self.assertIsNone(sample.date_of_result)
with freeze_time("2020-04-02"):
response = self.client.patch(
self.get_url(str(sample.external_id)), {"status": "SENT_TO_COLLECTON_CENTRE"}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
sample.refresh_from_db()
self.assertEquals(sample.status, PatientSample.SAMPLE_TEST_FLOW_MAP["SENT_TO_COLLECTON_CENTRE"])
self.assertEquals(sample.result, PatientSample.SAMPLE_TEST_RESULT_MAP["AWAITING"])
self.assertEquals(sample.date_of_sample.date(), make_aware(datetime.datetime(2020, 4, 2)).date())
self.assertIsNone(sample.date_of_result)
with freeze_time("2020-04-03"):
response = self.client.patch(
self.get_url(str(sample.external_id)), {"status": "RECEIVED_AND_FORWARED"}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
sample.refresh_from_db()
self.assertEquals(sample.status, PatientSample.SAMPLE_TEST_FLOW_MAP["RECEIVED_AND_FORWARED"])
self.assertEquals(sample.result, PatientSample.SAMPLE_TEST_RESULT_MAP["AWAITING"])
self.assertEquals(sample.date_of_sample.date(), make_aware(datetime.datetime(2020, 4, 2)).date())
self.assertIsNone(sample.date_of_result)
with freeze_time("2020-04-04"):
response = self.client.patch(
self.get_url(str(sample.external_id)), {"status": "RECEIVED_AT_LAB"}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
sample.refresh_from_db()
self.assertEquals(sample.status, PatientSample.SAMPLE_TEST_FLOW_MAP["RECEIVED_AT_LAB"])
self.assertEquals(sample.result, PatientSample.SAMPLE_TEST_RESULT_MAP["AWAITING"])
self.assertEquals(sample.date_of_sample.date(), make_aware(datetime.datetime(2020, 4, 2)).date())
self.assertIsNone(sample.date_of_result)
with freeze_time("2020-04-05"):
response = self.client.patch(self.get_url(str(sample.external_id)), {"result": "NEGATIVE"}, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
sample.refresh_from_db()
self.assertEquals(sample.status, PatientSample.SAMPLE_TEST_FLOW_MAP["COMPLETED"])
self.assertEquals(sample.result, PatientSample.SAMPLE_TEST_RESULT_MAP["NEGATIVE"])
self.assertEquals(sample.date_of_sample.date(), make_aware(datetime.datetime(2020, 4, 2)).date())
self.assertEquals(sample.date_of_result.date(), make_aware(datetime.datetime(2020, 4, 5)).date()) | care/facility/tests/test_patient_sample_api.py | import datetime
from django.utils.timezone import make_aware
from freezegun import freeze_time
from rest_framework import status
from care.facility.api.serializers.patient_sample import PatientSampleSerializer
from care.facility.models import PatientSample
from care.utils.tests.test_base import TestBase
from config.tests.helper import mock_equal
class TestPatientSampleApi(TestBase):
def get_base_url(self, **kwargs):
patient = kwargs.get("patient", self.patient)
return f"/api/v1/patient/{str(patient.external_id)}/test_sample"
def get_list_representation(self, obj: PatientSample) -> dict:
return {
"id": mock_equal,
"patient_name": obj.patient.name,
"patient_has_sari": obj.patient.has_SARI,
"patient_has_confirmed_contact": obj.patient.contact_with_confirmed_carrier,
"patient_has_suspected_contact": obj.patient.contact_with_suspected_carrier,
"patient_travel_history": obj.patient.countries_travelled,
"facility": str(obj.consultation.facility.external_id),
"facility_object": self.get_facility_representation(obj.consultation.facility),
"sample_type": obj.get_sample_type_display(),
"status": obj.get_status_display(),
"result": obj.get_result_display(),
"patient": str(obj.patient.external_id),
"consultation": str(obj.consultation.external_id),
"date_of_sample": obj.date_of_sample,
"date_of_result": obj.date_of_result,
"sample_type_other": obj.sample_type_other,
"has_sari": obj.has_sari,
"has_ari": obj.has_ari,
"doctor_name": obj.doctor_name,
"diagnosis": obj.diagnosis,
"diff_diagnosis": obj.diff_diagnosis,
"etiology_identified": obj.etiology_identified,
"is_atypical_presentation": obj.is_atypical_presentation,
"atypical_presentation": obj.atypical_presentation,
"is_unusual_course": obj.is_unusual_course,
"fast_track": obj.fast_track,
}
def get_detail_representation(self, obj=None) -> dict:
list_repr = self.get_list_representation(obj)
detail_repr = list_repr.copy()
return detail_repr
def get_sample_data(self, **kwargs):
patient = kwargs.get("patient", self.patient)
consultation = kwargs.get("consultation", self.consultation)
return {
"patient": str(patient.external_id),
"consultation": str(consultation.external_id),
"sample_type": "BA/ETA",
"testing_facility": str(patient.facility.external_id),
}
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.consultation = cls.create_consultation(cls.patient, cls.facility)
def setUp(self) -> None:
self.client.force_authenticate(self.user)
def create_sample(self, **kwargs):
sample_data = self.get_sample_data(**kwargs)
serializer = PatientSampleSerializer(data=sample_data)
serializer.is_valid(raise_exception=True)
return serializer.create(serializer.validated_data)
def test_create_sample_api(self):
sample_data = self.get_sample_data()
response = self.client.post(path=self.get_url(), data=sample_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
sample = self.patient.patientsample_set.last()
self.assertEqual(sample.result, PatientSample.SAMPLE_TEST_RESULT_MAP["AWAITING"])
self.assertIsNone(sample.date_of_sample)
self.assertIsNone(sample.date_of_result)
def test_sample_flow(self):
self.client.force_authenticate(self.super_user)
with freeze_time("2020-04-01"):
sample = self.create_sample()
response = self.client.patch(self.get_url(str(sample.external_id)), {"status": "APPROVED"}, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
sample.refresh_from_db()
self.assertEquals(sample.status, PatientSample.SAMPLE_TEST_FLOW_MAP["APPROVED"])
self.assertEquals(sample.result, PatientSample.SAMPLE_TEST_RESULT_MAP["AWAITING"])
self.assertIsNone(sample.date_of_sample)
self.assertIsNone(sample.date_of_result)
with freeze_time("2020-04-02"):
response = self.client.patch(
self.get_url(str(sample.external_id)), {"status": "SENT_TO_COLLECTON_CENTRE"}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
sample.refresh_from_db()
self.assertEquals(sample.status, PatientSample.SAMPLE_TEST_FLOW_MAP["SENT_TO_COLLECTON_CENTRE"])
self.assertEquals(sample.result, PatientSample.SAMPLE_TEST_RESULT_MAP["AWAITING"])
self.assertEquals(sample.date_of_sample.date(), make_aware(datetime.datetime(2020, 4, 2)).date())
self.assertIsNone(sample.date_of_result)
with freeze_time("2020-04-03"):
response = self.client.patch(
self.get_url(str(sample.external_id)), {"status": "RECEIVED_AND_FORWARED"}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
sample.refresh_from_db()
self.assertEquals(sample.status, PatientSample.SAMPLE_TEST_FLOW_MAP["RECEIVED_AND_FORWARED"])
self.assertEquals(sample.result, PatientSample.SAMPLE_TEST_RESULT_MAP["AWAITING"])
self.assertEquals(sample.date_of_sample.date(), make_aware(datetime.datetime(2020, 4, 2)).date())
self.assertIsNone(sample.date_of_result)
with freeze_time("2020-04-04"):
response = self.client.patch(
self.get_url(str(sample.external_id)), {"status": "RECEIVED_AT_LAB"}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
sample.refresh_from_db()
self.assertEquals(sample.status, PatientSample.SAMPLE_TEST_FLOW_MAP["RECEIVED_AT_LAB"])
self.assertEquals(sample.result, PatientSample.SAMPLE_TEST_RESULT_MAP["AWAITING"])
self.assertEquals(sample.date_of_sample.date(), make_aware(datetime.datetime(2020, 4, 2)).date())
self.assertIsNone(sample.date_of_result)
with freeze_time("2020-04-05"):
response = self.client.patch(self.get_url(str(sample.external_id)), {"result": "NEGATIVE"}, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
sample.refresh_from_db()
self.assertEquals(sample.status, PatientSample.SAMPLE_TEST_FLOW_MAP["COMPLETED"])
self.assertEquals(sample.result, PatientSample.SAMPLE_TEST_RESULT_MAP["NEGATIVE"])
self.assertEquals(sample.date_of_sample.date(), make_aware(datetime.datetime(2020, 4, 2)).date())
self.assertEquals(sample.date_of_result.date(), make_aware(datetime.datetime(2020, 4, 5)).date()) | 0.576423 | 0.23541 |
import sys
import logging
from webgme_bindings import PluginBase
# Setup a logger
logger = logging.getLogger('PythonBindings')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout) # By default it logs to stderr..
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class PythonBindings(PluginBase):
def main(self):
core = self.core
root_node = self.root_node
active_node = self.active_node
logger.info(self.get_current_config())
name = core.get_attribute(active_node, 'name')
logger.info('ActiveNode at "{0}" has name {1}'.format(core.get_path(active_node), name))
def traverse_tree_rec(node, depth):
indent = ' ' * depth
node_name = core.get_attribute(node, 'name')
type_node = core.get_base_type(node)
type_name = 'N/A'
if type_node:
type_name = core.get_attribute(type_node, 'name')
logger.info('{0}# {1} is of type {2}'.format(indent, node_name, type_name))
logger.info('{0} attributes:'.format(indent))
for attr_name in core.get_attribute_names(node):
val = core.get_attribute(node, attr_name)
logger.info(' {0} {1} "{2}" [{3}]:'.format(indent, attr_name, val, type(val)))
logger.info('{0} pointers:'.format(indent))
for ptr_name in core.get_valid_pointer_names(node):
val = core.get_pointer_path(node, ptr_name)
logger.info(' {0} {1} "{2}" [{3}]:'.format(indent, ptr_name, val, type(val)))
for child_node in core.load_children(node):
traverse_tree_rec(child_node, depth + 1)
logger.info('## Node Tree ##')
traverse_tree_rec(root_node, 1)
self.create_message(root_node, 'Hello')
self.add_file('f.txt', 'Hello') | src/plugins/PythonBindings/PythonBindings/__init__.py | import sys
import logging
from webgme_bindings import PluginBase
# Setup a logger
logger = logging.getLogger('PythonBindings')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout) # By default it logs to stderr..
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class PythonBindings(PluginBase):
def main(self):
core = self.core
root_node = self.root_node
active_node = self.active_node
logger.info(self.get_current_config())
name = core.get_attribute(active_node, 'name')
logger.info('ActiveNode at "{0}" has name {1}'.format(core.get_path(active_node), name))
def traverse_tree_rec(node, depth):
indent = ' ' * depth
node_name = core.get_attribute(node, 'name')
type_node = core.get_base_type(node)
type_name = 'N/A'
if type_node:
type_name = core.get_attribute(type_node, 'name')
logger.info('{0}# {1} is of type {2}'.format(indent, node_name, type_name))
logger.info('{0} attributes:'.format(indent))
for attr_name in core.get_attribute_names(node):
val = core.get_attribute(node, attr_name)
logger.info(' {0} {1} "{2}" [{3}]:'.format(indent, attr_name, val, type(val)))
logger.info('{0} pointers:'.format(indent))
for ptr_name in core.get_valid_pointer_names(node):
val = core.get_pointer_path(node, ptr_name)
logger.info(' {0} {1} "{2}" [{3}]:'.format(indent, ptr_name, val, type(val)))
for child_node in core.load_children(node):
traverse_tree_rec(child_node, depth + 1)
logger.info('## Node Tree ##')
traverse_tree_rec(root_node, 1)
self.create_message(root_node, 'Hello')
self.add_file('f.txt', 'Hello') | 0.274449 | 0.061791 |
import numpy as np
import scipy.stats
from ..base_parameters import (
ParamHelper, PriorHelper, PrecondHelper,
get_value_func, get_hyperparam_func, get_dim_func,
set_value_func, set_hyperparam_func,
)
from .._utils import (
normal_logpdf,
matrix_normal_logpdf,
pos_def_mat_inv,
varp_stability_projection,
tril_vector_to_mat,
)
import logging
logger = logging.getLogger(name=__name__)
## Implementations of Vector, Square, Rectangular Parameters
# Single Square
class VectorParamHelper(ParamHelper):
def __init__(self, name='mu', dim_names=None):
self.name = name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
n = np.shape(kwargs[self.name])
if np.ndim(kwargs[self.name]) != 1:
raise ValueError("{} must be vector".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
mu = np.reshape(vector[vector_index:vector_index+n], (n))
var_dict[self.name] = mu
return vector_index+n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} vector".format(
self.name, self.dim_names[0]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class VectorPriorHelper(PriorHelper):
def __init__(self, name='mu', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
if not np.isscalar(kwargs[self._var_col_name]):
raise ValueError("{} must be scalar".format(self._var_col_name))
else:
raise ValueError("{} must be provided".format(self._var_col_name))
prior._set_check_dim(**{self.dim_names[0]: n})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = np.random.multivariate_normal(
mean=mean_mu,
cov=var_col_mu*pos_def_mat_inv(Qinv),
)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = var_col_mu**-1 + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_mu * var_col_mu**-1 + \
sufficient_stat[self.name]['S_curprev']
post_mean_mu = S_curprev/S_prevprev
var_dict[self.name] = np.random.multivariate_normal(
mean=post_mean_mu,
cov=pos_def_mat_inv(Qinv)/S_prevprev,
)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += normal_logpdf(parameters.var_dict[self.name],
mean=mean_mu,
Lprec=var_col_mu_k**-0.5 * LQinv,
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
mu = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(var_col_mu**-1 * Qinv, mu - mean_mu)
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
mu = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_mu = mu.copy()
else:
mean_mu = np.zeros_like(mu)
var_col_mu = var
prior_kwargs[self._mean_name] = mean_mu
prior_kwargs[self._var_col_name] = var_col_mu
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
var = kwargs['var']
mean_mu = np.zeros((n))
var_col_mu = var
default_kwargs[self._mean_name] = mean_mu
default_kwargs[self._var_col_name] = var_col_mu
return
class VectorPrecondHelper(PrecondHelper):
def __init__(self, name='mu', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=(LQinv.shape[0]))
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Square
class VectorsParamHelper(ParamHelper):
def __init__(self, name='mu', dim_names=None):
self.name = name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
mu = np.reshape(vector[vector_index:vector_index+num_states*n],
(num_states, n))
var_dict[self.name] = mu
return vector_index+num_states*n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is {2} {1} vectors".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class VectorsPriorHelper(PriorHelper):
def __init__(self, name='mu', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name) # num_states by n
self._var_col_name = 'var_col_{0}'.format(name) # num_states by n
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[1]]])
mus = [None for k in range(prior.dim[self.dim_names[1]])]
for k in range(len(mus)):
mus[k] = np.random.multivariate_normal(
mean=mean_mu[k],
cov=var_col_mu[k]*pos_def_mat_inv(Qinvs[k]),
)
var_dict[self.name] = np.array(mus)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
mus = [None for k in range(num_states)]
for k in range(len(mus)):
S_prevprev = var_col_mu[k]**-1 + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_mu[k] * var_col_mu[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
post_mean_mu_k = S_curprev/S_prevprev
mus[k] = np.random.multivariate_normal(
mean=post_mean_mu_k,
cov=pos_def_mat_inv(Qinvs[k])/S_prevprev,
)
var_dict[self.name] = np.array(mus)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in parameters.var_dict[self._lt_vec_name]])
else:
LQinvs = np.array([np.eye(n)
for _ in range(num_states)])
for mu_k, mean_mu_k, var_col_mu_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_mu, var_col_mu, LQinvs):
logprior += normal_logpdf(mu_k,
mean=mean_mu_k,
Lprec=var_col_mu_k**-0.5 * LQinv_k,
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mu = parameters.var_dict[self.name]
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(n)
for _ in range(num_states)])
grad[self.name] = np.array([
-1.0 * np.dot(var_col_mu[k]**-1 * Qinvs[k], mu[k] - mean_mu[k])
for k in range(num_states)])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
mu = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_mu = mu.copy()
else:
mean_mu = np.zeros_like(mu)
var_col_mu = np.array([
var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_mu
prior_kwargs[self._var_col_name] = var_col_mu
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_mu = np.zeros((num_states, n))
var_col_mu = np.ones((num_states))*var
default_kwargs[self._mean_name] = mean_mu
default_kwargs[self._var_col_name] = var_col_mu
return
class VectorsPrecondHelper(PrecondHelper):
def __init__(self, name='mu', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.array([
np.dot(Q[k], grad[self.name][k])
for k in range(Q.shape[0])
])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.array([
np.linalg.solve(LQinv[k].T,
np.random.normal(loc=0, size=LQinv.shape[-1])
)
for k in range(LQinv.shape[0])
])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Single Square
class SquareMatrixParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
n, n2 = np.shape(kwargs[self.name])
if n != n2:
raise ValueError("{} must be square matrices".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', True):
A = param.var_dict[self.name]
A = varp_stability_projection(A,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
A = np.reshape(vector[vector_index:vector_index+n**2], (n, n))
var_dict[self.name] = A
return vector_index+n**2
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} by {1} matrix".format(
self.name, self.dim_names[0]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class SquareMatrixPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
n, n2 = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
n3 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("{} must be square".format(self._mean_name))
if n != n3:
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = scipy.stats.matrix_normal(
mean=mean_A,
rowcov=pos_def_mat_inv(Qinv),
colcov=np.diag(var_col_A),
).rvs()
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = np.diag(var_col_A**-1) + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_A * var_col_A**-1 + \
sufficient_stat[self.name]['S_curprev']
var_dict[self.name] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinv),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += matrix_normal_logpdf(parameters.var_dict[self.name],
mean=mean_A,
Lrowprec=LQinv,
Lcolprec=np.diag(var_col_A**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(Qinv, A - mean_A) * var_col_A**-1
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.ones(A.shape[0])*var
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
var = kwargs['var']
mean_A = np.zeros((n,n))
var_col_A = np.ones(n)*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class SquareMatrixPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=LQinv.shape)
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Square
class SquareMatricesParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, n, n2 = np.shape(kwargs[self.name])
if n != n2:
raise ValueError("{} must be square matrices".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', True):
A = param.var_dict[self.name]
for k, A_k in enumerate(A):
A_k = varp_stability_projection(A_k,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
A[k] = A_k
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
A = np.reshape(vector[vector_index:vector_index+num_states*n**2],
(num_states, n, n))
var_dict[self.name] = A
return vector_index+num_states*n**2
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {2} of {1} by {1} matrices".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class SquareMatricesPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, n, n2 = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2, n3 = np.shape(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("{} must be square".format(self._mean_name))
if (n != n3) or (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
As[k] = scipy.stats.matrix_normal(
mean=mean_A[k],
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=np.diag(var_col_A[k]),
).rvs()
var_dict[self.name] = np.array(As)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
S_prevprev = np.diag(var_col_A[k]**-1) + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_A[k] * var_col_A[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
As[k] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
var_dict[self.name] = np.array(As)
return
def logprior(self, prior, logprior, parameters, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv_vec = getattr(parameters, self._lt_vec_name)
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in LQinv_vec])
else:
LQinvs = np.array([np.eye(n) for _ in range(num_states)])
for A_k, mean_A_k, var_col_A_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_A, var_col_A, LQinvs):
logprior += matrix_normal_logpdf(A_k,
mean=mean_A_k,
Lrowprec=LQinv_k,
Lcolprec=np.diag(var_col_A_k**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[1]]])
grad[self.name] = np.array([
-1.0 * np.dot(Qinvs[k], A[k] - mean_A[k]) * var_col_A[k]**-1
for k in range(prior.dim[self.dim_names[1]])
])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.array([
np.ones(A.shape[0])*var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_A = np.zeros((num_states, n,n))
var_col_A = np.ones((num_states,n))*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class SquareMatricesPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.array([
np.dot(Q[k], grad[self.name][k])
for k in range(Q.shape[0])
])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.array([
np.linalg.solve(LQinv[k].T,
np.random.normal(loc=0, size=LQinv[k].shape)
)
for k in range(LQinv.shape[0])
])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Single Rectangular (m by n)
class RectMatrixParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['m','n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
m, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', False):
A = param.var_dict[self.name]
A = varp_stability_projection(A,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
if name_kwargs.get('fixed_eye', False):
k = min(param.dim[self.dim_names[0]], param.dim[self.dim_names[1]])
A = param.var_dict[self.name]
A[0:k, 0:k] = np.eye(k)
param.var_dict[self.name] = A
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
A = np.reshape(vector[vector_index:vector_index+m*n], (m, n))
var_dict[self.name] = A
return vector_index+m*n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} by {2} matrix".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class RectMatrixPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name) # m by n ndarray
self._var_col_name = 'var_col_{0}'.format(name) # n ndarray
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name) # m by m ndarray
self.dim_names = ['m', 'n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
m, n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
n2 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = scipy.stats.matrix_normal(
mean=mean_A,
rowcov=pos_def_mat_inv(Qinv),
colcov=np.diag(var_col_A),
).rvs()
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = np.diag(var_col_A**-1) + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_A * var_col_A**-1 + \
sufficient_stat[self.name]['S_curprev']
var_dict[self.name] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinv),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += matrix_normal_logpdf(parameters.var_dict[self.name],
mean=mean_A,
Lrowprec=LQinv,
Lcolprec=np.diag(var_col_A**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(Qinv, A - mean_A) * var_col_A**-1
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.ones(A.shape[1])*var
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_A = np.zeros((m,n))
var_col_A = np.ones(n)*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class RectMatrixPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['m', 'n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
m = parameters.dim[self.dim_names[0]]
n = parameters.dim[self.dim_names[1]]
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=(m, n))
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Rectangular
class RectMatricesParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['m', 'n', 'num_states'] \
if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, m, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
self.dim_names[2]: num_states,
})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', False):
A = param.var_dict[self.name]
for k, A_k in enumerate(A):
A_k = varp_stability_projection(A_k,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
A[k] = A_k
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
if name_kwargs.get('fixed_eye', False):
k = min(param.dim[self.dim_names[0]], param.dim[self.dim_names[1]])
A = param.var_dict[self.name]
for kk in range(self.num_states):
A[kk, 0:k, 0:k] = np.eye(k)
param.var_dict[self.name] = A
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
num_states = kwargs[self.dim_names[2]]
A = np.reshape(vector[vector_index:vector_index+num_states*m*n],
(num_states, m, n))
var_dict[self.name] = A
return vector_index+num_states*m*n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {3} by {1} by {2} matrices".format(
self.name, self.dim_names[0],
self.dim_names[1], self.dim_names[2]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class RectMatricesPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name) # num_states x m x n
self._var_col_name = 'var_col_{0}'.format(name) # num_states x n
self._var_row_name = var_row_name # num_states x m x m
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['m', 'n', 'num_states'] \
if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, m, n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2, n2 = np.shape(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if (n != n2) or (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
self.dim_names[2]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
num_states, m, n = np.shape(mean_A)
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(m)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(m) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
As[k] = scipy.stats.matrix_normal(
mean=mean_A[k],
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=np.diag(var_col_A[k]),
).rvs()
var_dict[self.name] = np.array(As)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
num_states, m, n = np.shape(mean_A)
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(m)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(m) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
S_prevprev = np.diag(var_col_A[k]**-1) + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_A[k] * var_col_A[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
As[k] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
var_dict[self.name] = np.array(As)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
num_states, m, n = np.shape(mean_A)
if self._var_row_name is not None:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in parameters.var_dict[self._lt_vec_name]])
else:
LQinvs = np.array([np.eye(m) for _ in range(num_states)])
for A_k, mean_A_k, var_col_A_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_A, var_col_A, LQinvs):
logprior += matrix_normal_logpdf(A_k,
mean=mean_A_k,
Lrowprec=LQinv_k,
Lcolprec=np.diag(var_col_A_k**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[2]]])
grad[self.name] = np.array([
-1.0 * np.dot(Qinvs[k], A[k] - mean_A[k]) * var_col_A[k]**-1
for k in range(prior.dim[self.dim_names[2]])
])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.array([
np.ones(A.shape[2])*var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
num_states = kwargs[self.dim_names[2]]
var = kwargs['var']
mean_A = np.zeros((num_states,m,n))
var_col_A = np.ones((num_states,n))*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class RectMatricesPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['m', 'n', 'num_states'] \
if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.array([
np.dot(Q[k], grad[self.name][k])
for k in range(Q.shape[0])
])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
m = parameters.dim[self.dim_names[0]]
n = parameters.dim[self.dim_names[1]]
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.array([
np.linalg.solve(LQinv[k].T,
np.random.normal(loc=0, size=(m,n))
)
for k in range(LQinv.shape[0])
])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
if __name__ == "__main__":
# Demo of Parameters
class SquareParameters(BaseParameters):
""" Square Parameters """
_param_helper_list = [
SquareMatrixParamHelper(name='A', dim_names=['n'])
]
for param_helper in _param_helper_list:
properties = param_helper.get_properties()
for name, prop in properties.items():
vars()[name] = prop
def __str__(self):
my_str = "SquareParameters:"
my_str += "\nA:\n" + str(self.A)
return my_str
class SquareMatrixPrior(BasePrior):
""" Square Prior """
_Parameters = SquareParameters
_prior_helper_list = [
SquareMatrixPriorHelper(name='A', dim_names=['n'], var_row_name=None)
] | sgmcmc_ssm/variables/matrices.py | import numpy as np
import scipy.stats
from ..base_parameters import (
ParamHelper, PriorHelper, PrecondHelper,
get_value_func, get_hyperparam_func, get_dim_func,
set_value_func, set_hyperparam_func,
)
from .._utils import (
normal_logpdf,
matrix_normal_logpdf,
pos_def_mat_inv,
varp_stability_projection,
tril_vector_to_mat,
)
import logging
logger = logging.getLogger(name=__name__)
## Implementations of Vector, Square, Rectangular Parameters
# Single Square
class VectorParamHelper(ParamHelper):
def __init__(self, name='mu', dim_names=None):
self.name = name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
n = np.shape(kwargs[self.name])
if np.ndim(kwargs[self.name]) != 1:
raise ValueError("{} must be vector".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
mu = np.reshape(vector[vector_index:vector_index+n], (n))
var_dict[self.name] = mu
return vector_index+n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} vector".format(
self.name, self.dim_names[0]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class VectorPriorHelper(PriorHelper):
def __init__(self, name='mu', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
if not np.isscalar(kwargs[self._var_col_name]):
raise ValueError("{} must be scalar".format(self._var_col_name))
else:
raise ValueError("{} must be provided".format(self._var_col_name))
prior._set_check_dim(**{self.dim_names[0]: n})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = np.random.multivariate_normal(
mean=mean_mu,
cov=var_col_mu*pos_def_mat_inv(Qinv),
)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = var_col_mu**-1 + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_mu * var_col_mu**-1 + \
sufficient_stat[self.name]['S_curprev']
post_mean_mu = S_curprev/S_prevprev
var_dict[self.name] = np.random.multivariate_normal(
mean=post_mean_mu,
cov=pos_def_mat_inv(Qinv)/S_prevprev,
)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += normal_logpdf(parameters.var_dict[self.name],
mean=mean_mu,
Lprec=var_col_mu_k**-0.5 * LQinv,
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
mu = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(var_col_mu**-1 * Qinv, mu - mean_mu)
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
mu = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_mu = mu.copy()
else:
mean_mu = np.zeros_like(mu)
var_col_mu = var
prior_kwargs[self._mean_name] = mean_mu
prior_kwargs[self._var_col_name] = var_col_mu
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
var = kwargs['var']
mean_mu = np.zeros((n))
var_col_mu = var
default_kwargs[self._mean_name] = mean_mu
default_kwargs[self._var_col_name] = var_col_mu
return
class VectorPrecondHelper(PrecondHelper):
def __init__(self, name='mu', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=(LQinv.shape[0]))
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Square
class VectorsParamHelper(ParamHelper):
def __init__(self, name='mu', dim_names=None):
self.name = name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
mu = np.reshape(vector[vector_index:vector_index+num_states*n],
(num_states, n))
var_dict[self.name] = mu
return vector_index+num_states*n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is {2} {1} vectors".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class VectorsPriorHelper(PriorHelper):
def __init__(self, name='mu', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name) # num_states by n
self._var_col_name = 'var_col_{0}'.format(name) # num_states by n
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[1]]])
mus = [None for k in range(prior.dim[self.dim_names[1]])]
for k in range(len(mus)):
mus[k] = np.random.multivariate_normal(
mean=mean_mu[k],
cov=var_col_mu[k]*pos_def_mat_inv(Qinvs[k]),
)
var_dict[self.name] = np.array(mus)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
mus = [None for k in range(num_states)]
for k in range(len(mus)):
S_prevprev = var_col_mu[k]**-1 + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_mu[k] * var_col_mu[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
post_mean_mu_k = S_curprev/S_prevprev
mus[k] = np.random.multivariate_normal(
mean=post_mean_mu_k,
cov=pos_def_mat_inv(Qinvs[k])/S_prevprev,
)
var_dict[self.name] = np.array(mus)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in parameters.var_dict[self._lt_vec_name]])
else:
LQinvs = np.array([np.eye(n)
for _ in range(num_states)])
for mu_k, mean_mu_k, var_col_mu_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_mu, var_col_mu, LQinvs):
logprior += normal_logpdf(mu_k,
mean=mean_mu_k,
Lprec=var_col_mu_k**-0.5 * LQinv_k,
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mu = parameters.var_dict[self.name]
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(n)
for _ in range(num_states)])
grad[self.name] = np.array([
-1.0 * np.dot(var_col_mu[k]**-1 * Qinvs[k], mu[k] - mean_mu[k])
for k in range(num_states)])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
mu = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_mu = mu.copy()
else:
mean_mu = np.zeros_like(mu)
var_col_mu = np.array([
var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_mu
prior_kwargs[self._var_col_name] = var_col_mu
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_mu = np.zeros((num_states, n))
var_col_mu = np.ones((num_states))*var
default_kwargs[self._mean_name] = mean_mu
default_kwargs[self._var_col_name] = var_col_mu
return
class VectorsPrecondHelper(PrecondHelper):
def __init__(self, name='mu', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.array([
np.dot(Q[k], grad[self.name][k])
for k in range(Q.shape[0])
])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.array([
np.linalg.solve(LQinv[k].T,
np.random.normal(loc=0, size=LQinv.shape[-1])
)
for k in range(LQinv.shape[0])
])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Single Square
class SquareMatrixParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
n, n2 = np.shape(kwargs[self.name])
if n != n2:
raise ValueError("{} must be square matrices".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', True):
A = param.var_dict[self.name]
A = varp_stability_projection(A,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
A = np.reshape(vector[vector_index:vector_index+n**2], (n, n))
var_dict[self.name] = A
return vector_index+n**2
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} by {1} matrix".format(
self.name, self.dim_names[0]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class SquareMatrixPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
n, n2 = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
n3 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("{} must be square".format(self._mean_name))
if n != n3:
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = scipy.stats.matrix_normal(
mean=mean_A,
rowcov=pos_def_mat_inv(Qinv),
colcov=np.diag(var_col_A),
).rvs()
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = np.diag(var_col_A**-1) + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_A * var_col_A**-1 + \
sufficient_stat[self.name]['S_curprev']
var_dict[self.name] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinv),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += matrix_normal_logpdf(parameters.var_dict[self.name],
mean=mean_A,
Lrowprec=LQinv,
Lcolprec=np.diag(var_col_A**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(Qinv, A - mean_A) * var_col_A**-1
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.ones(A.shape[0])*var
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
var = kwargs['var']
mean_A = np.zeros((n,n))
var_col_A = np.ones(n)*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class SquareMatrixPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=LQinv.shape)
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Square
class SquareMatricesParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, n, n2 = np.shape(kwargs[self.name])
if n != n2:
raise ValueError("{} must be square matrices".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', True):
A = param.var_dict[self.name]
for k, A_k in enumerate(A):
A_k = varp_stability_projection(A_k,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
A[k] = A_k
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
A = np.reshape(vector[vector_index:vector_index+num_states*n**2],
(num_states, n, n))
var_dict[self.name] = A
return vector_index+num_states*n**2
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {2} of {1} by {1} matrices".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class SquareMatricesPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, n, n2 = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2, n3 = np.shape(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("{} must be square".format(self._mean_name))
if (n != n3) or (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
As[k] = scipy.stats.matrix_normal(
mean=mean_A[k],
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=np.diag(var_col_A[k]),
).rvs()
var_dict[self.name] = np.array(As)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
S_prevprev = np.diag(var_col_A[k]**-1) + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_A[k] * var_col_A[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
As[k] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
var_dict[self.name] = np.array(As)
return
def logprior(self, prior, logprior, parameters, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv_vec = getattr(parameters, self._lt_vec_name)
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in LQinv_vec])
else:
LQinvs = np.array([np.eye(n) for _ in range(num_states)])
for A_k, mean_A_k, var_col_A_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_A, var_col_A, LQinvs):
logprior += matrix_normal_logpdf(A_k,
mean=mean_A_k,
Lrowprec=LQinv_k,
Lcolprec=np.diag(var_col_A_k**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[1]]])
grad[self.name] = np.array([
-1.0 * np.dot(Qinvs[k], A[k] - mean_A[k]) * var_col_A[k]**-1
for k in range(prior.dim[self.dim_names[1]])
])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.array([
np.ones(A.shape[0])*var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_A = np.zeros((num_states, n,n))
var_col_A = np.ones((num_states,n))*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class SquareMatricesPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.array([
np.dot(Q[k], grad[self.name][k])
for k in range(Q.shape[0])
])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.array([
np.linalg.solve(LQinv[k].T,
np.random.normal(loc=0, size=LQinv[k].shape)
)
for k in range(LQinv.shape[0])
])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Single Rectangular (m by n)
class RectMatrixParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['m','n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
m, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', False):
A = param.var_dict[self.name]
A = varp_stability_projection(A,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
if name_kwargs.get('fixed_eye', False):
k = min(param.dim[self.dim_names[0]], param.dim[self.dim_names[1]])
A = param.var_dict[self.name]
A[0:k, 0:k] = np.eye(k)
param.var_dict[self.name] = A
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
A = np.reshape(vector[vector_index:vector_index+m*n], (m, n))
var_dict[self.name] = A
return vector_index+m*n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} by {2} matrix".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class RectMatrixPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name) # m by n ndarray
self._var_col_name = 'var_col_{0}'.format(name) # n ndarray
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name) # m by m ndarray
self.dim_names = ['m', 'n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
m, n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
n2 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = scipy.stats.matrix_normal(
mean=mean_A,
rowcov=pos_def_mat_inv(Qinv),
colcov=np.diag(var_col_A),
).rvs()
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = np.diag(var_col_A**-1) + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_A * var_col_A**-1 + \
sufficient_stat[self.name]['S_curprev']
var_dict[self.name] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinv),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += matrix_normal_logpdf(parameters.var_dict[self.name],
mean=mean_A,
Lrowprec=LQinv,
Lcolprec=np.diag(var_col_A**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(Qinv, A - mean_A) * var_col_A**-1
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.ones(A.shape[1])*var
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_A = np.zeros((m,n))
var_col_A = np.ones(n)*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class RectMatrixPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['m', 'n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
m = parameters.dim[self.dim_names[0]]
n = parameters.dim[self.dim_names[1]]
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=(m, n))
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Rectangular
class RectMatricesParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['m', 'n', 'num_states'] \
if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, m, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
self.dim_names[2]: num_states,
})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', False):
A = param.var_dict[self.name]
for k, A_k in enumerate(A):
A_k = varp_stability_projection(A_k,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
A[k] = A_k
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
if name_kwargs.get('fixed_eye', False):
k = min(param.dim[self.dim_names[0]], param.dim[self.dim_names[1]])
A = param.var_dict[self.name]
for kk in range(self.num_states):
A[kk, 0:k, 0:k] = np.eye(k)
param.var_dict[self.name] = A
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
num_states = kwargs[self.dim_names[2]]
A = np.reshape(vector[vector_index:vector_index+num_states*m*n],
(num_states, m, n))
var_dict[self.name] = A
return vector_index+num_states*m*n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {3} by {1} by {2} matrices".format(
self.name, self.dim_names[0],
self.dim_names[1], self.dim_names[2]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class RectMatricesPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name) # num_states x m x n
self._var_col_name = 'var_col_{0}'.format(name) # num_states x n
self._var_row_name = var_row_name # num_states x m x m
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['m', 'n', 'num_states'] \
if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, m, n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2, n2 = np.shape(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if (n != n2) or (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
self.dim_names[2]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
num_states, m, n = np.shape(mean_A)
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(m)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(m) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
As[k] = scipy.stats.matrix_normal(
mean=mean_A[k],
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=np.diag(var_col_A[k]),
).rvs()
var_dict[self.name] = np.array(As)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
num_states, m, n = np.shape(mean_A)
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(m)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(m) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
S_prevprev = np.diag(var_col_A[k]**-1) + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_A[k] * var_col_A[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
As[k] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
var_dict[self.name] = np.array(As)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
num_states, m, n = np.shape(mean_A)
if self._var_row_name is not None:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in parameters.var_dict[self._lt_vec_name]])
else:
LQinvs = np.array([np.eye(m) for _ in range(num_states)])
for A_k, mean_A_k, var_col_A_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_A, var_col_A, LQinvs):
logprior += matrix_normal_logpdf(A_k,
mean=mean_A_k,
Lrowprec=LQinv_k,
Lcolprec=np.diag(var_col_A_k**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[2]]])
grad[self.name] = np.array([
-1.0 * np.dot(Qinvs[k], A[k] - mean_A[k]) * var_col_A[k]**-1
for k in range(prior.dim[self.dim_names[2]])
])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.array([
np.ones(A.shape[2])*var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
num_states = kwargs[self.dim_names[2]]
var = kwargs['var']
mean_A = np.zeros((num_states,m,n))
var_col_A = np.ones((num_states,n))*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class RectMatricesPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['m', 'n', 'num_states'] \
if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.array([
np.dot(Q[k], grad[self.name][k])
for k in range(Q.shape[0])
])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
m = parameters.dim[self.dim_names[0]]
n = parameters.dim[self.dim_names[1]]
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.array([
np.linalg.solve(LQinv[k].T,
np.random.normal(loc=0, size=(m,n))
)
for k in range(LQinv.shape[0])
])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
if __name__ == "__main__":
# Demo of Parameters
class SquareParameters(BaseParameters):
""" Square Parameters """
_param_helper_list = [
SquareMatrixParamHelper(name='A', dim_names=['n'])
]
for param_helper in _param_helper_list:
properties = param_helper.get_properties()
for name, prop in properties.items():
vars()[name] = prop
def __str__(self):
my_str = "SquareParameters:"
my_str += "\nA:\n" + str(self.A)
return my_str
class SquareMatrixPrior(BasePrior):
""" Square Prior """
_Parameters = SquareParameters
_prior_helper_list = [
SquareMatrixPriorHelper(name='A', dim_names=['n'], var_row_name=None)
] | 0.587115 | 0.158272 |
# Built-in libraries
import os
# External libraries
import pandas as pd
import numpy as np
#%% Functions to select specific glacier numbers
def get_same_glaciers(glac_fp):
"""
Get same 1000 glaciers for testing of priors
Parameters
----------
glac_fp : str
filepath to where netcdf files of individual glaciers are held
Returns
-------
glac_list : list
list of rgi glacier numbers
"""
glac_list = []
for i in os.listdir(glac_fp):
if i.endswith('.nc'):
glac_list.append(i.split('.')[1])
glac_list = sorted(glac_list)
return glac_list
def get_shean_glacier_nos(region_no, number_glaciers=0, option_random=0):
"""
Generate list of glaciers that have calibration data and select number of glaciers to include.
The list is currently sorted in terms of area such that the largest glaciers are modeled first.
Parameters
----------
region_no : int
region number (Shean data available for regions 13, 14, and 15)
number_glaciers : int
number of glaciers to include in model run (default = 0)
option_random : int
option to select glaciers randomly for model run (default = 0, not random)
Returns
-------
num : list of strings
list of rgi glacier numbers
"""
# safety, convert input to int
region_no = int(region_no)
# get shean's data, convert to dataframe, get
# glacier numbers
current_directory = os.getcwd()
csv_path = current_directory + '/../DEMs/Shean_2019_0213/hma_mb_20190215_0815_std+mean_all_filled_bolch.csv'
ds_all = pd.read_csv(csv_path)
ds_reg = ds_all[(ds_all['RGIId'] > region_no) & (ds_all['RGIId'] < region_no + 1)].copy()
if option_random == 1:
ds_reg = ds_reg.sample(n=number_glaciers)
else:
ds_reg = ds_reg.sort_values('area_m2', ascending=False)
ds_reg.reset_index(drop=True, inplace=True)
# Glacier number and index for comparison
ds_reg['glacno'] = ((ds_reg['RGIId'] % 1) * 10**5).round(0).astype(int)
ds_reg['glacno_str'] = (ds_reg['glacno'] / 10**5).apply(lambda x: '%.5f' % x).astype(str).str.split('.').str[1]
num = list(ds_reg['glacno_str'].values)
num = sorted(num)
return num
def glac_num_fromrange(int_low, int_high):
"""
Generate list of glaciers for all numbers between two integers.
Parameters
----------
int_low : int64
low value of range
int_high : int64
high value of range
Returns
-------
y : list
list of rgi glacier numbers
"""
x = (np.arange(int_low, int_high+1)).tolist()
y = [str(i).zfill(5) for i in x]
return y
def glac_fromcsv(csv_fullfn, cn='RGIId'):
"""
Generate list of glaciers from csv file
Parameters
----------
csv_fp, csv_fn : str
csv filepath and filename
Returns
-------
y : list
list of glacier numbers, e.g., ['14.00001', 15.00001']
"""
df = pd.read_csv(csv_fullfn)
return [x.split('-')[1] for x in df['RGIId'].values]
#%%
# Model setup directory
main_directory = os.getcwd()
# Output directory
output_filepath = main_directory + '/../Output/'
# ===== GLACIER SELECTION =====
rgi_regionsO1 = [1] # 1st order region number (RGI V6.0)
rgi_regionsO2 = 'all' # 2nd order region number (RGI V6.0)
# RGI glacier number (RGI V6.0)
# Two options: (1) use glacier numbers for a given region (or 'all'), must have glac_no set to None
# (2) glac_no is not None, e.g., ['1.00001', 13.0001'], overrides rgi_glac_number
#rgi_glac_number = 'all'
rgi_glac_number = ['27108']
#rgi_glac_number = glac_num_fromrange(1,100)
#rgi_glac_number = glac_num_fromrange(1130,1153)
#rgi_glac_number = get_same_glaciers(output_filepath + 'cal_opt1/reg1/')
#rgi_glac_number = get_shean_glacier_nos(rgi_regionsO1[0], 1, option_random=1)
glac_no = None
#glac_no = glac_fromcsv(main_directory + '/../qgis_himat/trishuli_and_naltar_RGIIds.csv')
#glac_no = ['15.01152']
if glac_no is not None:
rgi_regionsO1 = sorted(list(set([int(x.split('.')[0]) for x in glac_no])))
# ===== CLIMATE DATA =====
# Reference period runs
#ref_gcm_name = 'ERA-Interim' # reference climate dataset
ref_gcm_name = 'ERA5' # reference climate dataset
startyear = 1980 # first year of model run (reference dataset)
endyear = 2018 # last year of model run (reference dataset)
option_wateryear = 1 # 1: water year, 2: calendar year, 3: custom defined
#startyear = 2000 # first year of model run (reference dataset)
#endyear = 2018 # last year of model run (reference dataset)
#option_wateryear = 3 # 1: water year, 2: calendar year, 3: custom defined
constantarea_years = 0 # number of years to not let the area or volume change
spinupyears = 0 # spin up years
# Simulation runs (separate so calibration and simulations can be run at same time; also needed for bias adjustments)
#gcm_startyear = 2000 # first year of model run (simulation dataset)
#gcm_endyear = 2018 # last year of model run (simulation dataset)
gcm_startyear = 1980 # first year of model run (simulation dataset)
gcm_endyear = 2018 # last year of model run (simulation dataset)
gcm_spinupyears = 0 # spin up years for simulation
gcm_wateryear = 1 # water year for simmulation
# Hindcast option (flips array so 1960-2000 would run 2000-1960 ensuring that glacier area at 2000 is correct)
hindcast = 0 # 1: run hindcast simulation, 0: do not
if hindcast == 1:
constantarea_years = 18 # number of years to not let the area or volume change
gcm_startyear = 1960 # first year of model run (simulation dataset)
gcm_endyear = 2017 # last year of model run (simulation dataset)
# Synthetic options (synthetic refers to created climate data, e.g., repeat 1995-2015 for the next 100 years)
option_synthetic_sim = 0 # 1: run synthetic simulation, 0: do not
if option_synthetic_sim == 1:
synthetic_startyear = 1995 # synthetic start year
synthetic_endyear = 2015 # synethetic end year
synthetic_spinupyears = 0 # synthetic spinup years
synthetic_temp_adjust = 3 # Temperature adjustment factor for synthetic runs
synthetic_prec_factor = 1.12 # Precipitation adjustment factor for synthetic runs
#%% SIMULATION OPTIONS
# MCMC options
sim_iters = 100 # number of simulations (needed for cal_opt 2)
sim_burn = 200 # number of burn-in (needed for cal_opt 2)
# Simulation output filepath
output_sim_fp = output_filepath + 'simulations/'
# Simulation output statistics (can include 'mean', 'std', '2.5%', '25%', 'median', '75%', '97.5%')
sim_stat_cns = ['mean', 'std']
# Bias adjustment options (0: no adjustment, 1: new prec scheme and temp from HH2015, 2: HH2015 methods)
option_bias_adjustment = 1
#%% ===== CALIBRATION OPTIONS =====
# Calibration option (1 = minimization, 2 = MCMC, 3=HH2015, 4=modified HH2015)
option_calibration = 4
# Calibration datasets ('shean', 'larsen', 'mcnabb', 'wgms_d', 'wgms_ee', 'group')
cal_datasets = ['braun']
#cal_datasets = ['berthier']
# Calibration output filepath
output_fp_cal = output_filepath + 'cal_opt' + str(option_calibration) + '/'
# OPTION 1: Minimization
# Model parameter bounds for each calibration roun
precfactor_bnds_list_init = [(0.8, 2.0), (0.8,2), (0.8,2), (0.2,5)]
precgrad_bnds_list_init = [(0.0001,0.0001), (0.0001,0.0001), (0.0001,0.0001), (0.0001,0.0001)]
ddfsnow_bnds_list_init = [(0.003, 0.003), (0.00175, 0.0045), (0.00175, 0.0045), (0.00175, 0.0045)]
tempchange_bnds_list_init = [(0,0), (0,0), (-2.5,2.5), (-10,10)]
# Minimization details
method_opt = 'SLSQP' # SciPy optimization scheme ('SLSQP' or 'L-BFGS-B')
ftol_opt = 1e-3 # tolerance for SciPy optimization scheme
massbal_uncertainty_mwea = 0.1 # mass balance uncertainty [mwea] for glaciers lacking uncertainty data
zscore_tolerance_all = 1 # tolerance if multiple calibration points (shortcut that could be improved)
zscore_tolerance_single = 0.1 # tolerance if only a single calibration point (want this to be more exact)
zscore_update_threshold = 0.1 # threshold to update model params only if significantly better
extra_calrounds = 3 # additional calibration rounds in case optimization is getting stuck
# OPTION 2: MCMC
# Chain options
n_chains = 1 # number of chains (min 1, max 3)
mcmc_sample_no = 10000 # number of steps (10000 was found to be sufficient in HMA)
mcmc_burn_no = 0 # number of steps to burn-in (0 records all steps in chain)
mcmc_step = None # step option (None or 'am')
thin_interval = 1 # thin interval if need to reduce file size (best to leave at 1 if space allows)
# Precipitation factor distribution options
precfactor_disttype = 'gamma' # distribution type ('gamma', 'lognormal', 'uniform')
precfactor_gamma_region_dict_fullfn = main_directory + '/../Output/precfactor_gamma_region_dict.csv'
precfactor_gamma_region_df = pd.read_csv(precfactor_gamma_region_dict_fullfn)
precfactor_gamma_region_dict = dict(zip(
precfactor_gamma_region_df.Region.values,
[[precfactor_gamma_region_df.loc[x,'alpha'], precfactor_gamma_region_df.loc[x,'beta']]
for x in precfactor_gamma_region_df.index.values]))
precfactor_gamma_alpha = 3.0
precfactor_gamma_beta = 0.84
precfactor_lognorm_mu = 0
precfactor_lognorm_tau = 4
precfactor_mu = 0
precfactor_sigma = 1.5
precfactor_boundlow = 0.5
precfactor_boundhigh = 1.5
precfactor_start = 1
# Temperature bias distribution options
tempchange_disttype = 'normal' # distribution type ('normal', 'truncnormal', 'uniform')
tempchange_norm_region_dict_fullfn = main_directory + '/../Output/tempchange_norm_region_dict.csv'
tempchange_norm_region_df = pd.read_csv(tempchange_norm_region_dict_fullfn)
tempchange_norm_region_dict = dict(zip(
tempchange_norm_region_df.Region.values,
[[tempchange_norm_region_df.loc[x,'mu'], tempchange_norm_region_df.loc[x,'sigma']]
for x in tempchange_norm_region_df.index.values]))
tempchange_mu = 0.91
tempchange_sigma = 1.4
tempchange_boundlow = -10
tempchange_boundhigh = 10
tempchange_start = tempchange_mu
# Degree-day factor of snow distribution options
ddfsnow_disttype = 'truncnormal' # distribution type ('truncnormal', 'uniform')
ddfsnow_mu = 0.0041
ddfsnow_sigma = 0.0015
ddfsnow_boundlow = 0
ddfsnow_boundhigh = np.inf
ddfsnow_start=ddfsnow_mu
#%% MODEL PARAMETERS
option_import_modelparams = 1 # 0: input values, 1: calibrated model parameters from netcdf files
precfactor = 1 # precipitation factor [-] (k_p in Radic etal 2013; c_prec in HH2015)
precgrad = 0.0001 # precipitation gradient on glacier [m-1]
ddfsnow = 0.0041 # degree-day factor of snow [m w.e. d-1 degC-1]
ddfsnow_iceratio = 0.7 # Ratio degree-day factor snow snow to ice
ddfice = ddfsnow / ddfsnow_iceratio # degree-day factor of ice [m w.e. d-1 degC-1]
tempchange = 0 # temperature bias [deg C]
lrgcm = -0.0065 # lapse rate from gcm to glacier [K m-1]
lrglac = -0.0065 # lapse rate on glacier for bins [K m-1]
tempsnow = 1.0 # temperature threshold for snow [deg C] (HH2015 used 1.5 degC +/- 1 degC)
frontalablation_k = 2 # frontal ablation rate [yr-1]
af = 0.7 # Bulk flow parameter for frontal ablation (m^-0.5)
# Calving width dictionary to override RGI elevation bins, which can be highly inaccurate at the calving front
width_calving_dict_fullfn = main_directory + '/../Calving_data/calvingfront_widths.csv'
width_calving_df = pd.read_csv(width_calving_dict_fullfn)
width_calving_dict = dict(zip(width_calving_df.RGIId, width_calving_df.front_width_m))
# Calving option (1=values from HH2015, 2=calibrate glaciers independently and use transfer fxns for others)
option_frontalablation_k = 1
# Calving parameter dictionary (according to Supplementary Table 3 in HH2015)
frontalablation_k0dict_fullfn = main_directory + '/../Calving_data/frontalablation_k0_dict.csv'
frontalablation_k0dict_df = pd.read_csv(frontalablation_k0dict_fullfn)
frontalablation_k0dict = dict(zip(frontalablation_k0dict_df.O1Region, frontalablation_k0dict_df.k0))
# Model parameter column names and filepaths
modelparams_colnames = ['lrgcm', 'lrglac', 'precfactor', 'precgrad', 'ddfsnow', 'ddfice', 'tempsnow', 'tempchange']
# Model parameter filepath
modelparams_fp = output_filepath + 'cal_opt' + str(option_calibration) + '/'
#modelparams_fp = output_filepath + 'cal_opt2_spc_20190806/'
#%% CLIMATE DATA
# ERA-INTERIM (Reference data)
# Variable names
era_varnames = ['temperature', 'precipitation', 'geopotential', 'temperature_pressurelevels']
# Note: do not change variable names as these are set to run with the download_erainterim_data.py script.
# If option 2 is being used to calculate the lapse rates, then the pressure level data is unnecessary.
# Dates
eraint_start_date = '19790101'
eraint_end_date = '20180501'
# Resolution
grid_res = '0.5/0.5'
# Bounding box (N/W/S/E)
#bounding_box = '90/0/-90/360'
bounding_box = '50/70/25/105'
# Lapse rate option
# option 0 - lapse rates are constant defined by input
# option 1 (default) - lapse rates derived from gcm pressure level temperature data (varies spatially and temporally)
# option 2 - lapse rates derived from surrounding pixels (varies spatially and temporally)
# Note: Be careful with option 2 as the ocean vs land/glacier temperatures can causeƒ unrealistic inversions
# This is the option used by Marzeion et al. (2012)
option_lr_method = 1
# ERA5
era5_fp = '/Volumes/PyGEM_data/ERA5/'
era5_temp_fn = 'ERA5_temp_monthly.nc'
era5_tempstd_fn = 'ERA5_tempstd_monthly.nc'
era5_prec_fn = 'ERA5_totalprecip_monthly.nc'
era5_elev_fn = 'ERA5_geopotential_monthly.nc'
era5_pressureleveltemp_fn = 'ERA5_pressureleveltemp_monthly.nc'
era5_lr_fn = 'ERA5_lapserates_monthly.nc'
# ERA-Interim
eraint_fp = main_directory + '/../Climate_data/ERA_Interim/download/'
eraint_temp_fn = 'ERAInterim_Temp2m_DailyMeanMonthly_' + eraint_start_date + '_' + eraint_end_date + '.nc'
eraint_prec_fn = 'ERAInterim_TotalPrec_DailyMeanMonthly_' + eraint_start_date + '_' + eraint_end_date + '.nc'
eraint_elev_fn = 'ERAInterim_geopotential.nc'
eraint_pressureleveltemp_fn = 'ERAInterim_pressureleveltemp_' + eraint_start_date + '_' + eraint_end_date + '.nc'
eraint_lr_fn = ('ERAInterim_lapserates_' + eraint_start_date + '_' + eraint_end_date + '_opt' + str(option_lr_method) +
'_world.nc')
# CMIP5 (GCM data)
cmip5_fp_var_prefix = main_directory + '/../Climate_data/cmip5/'
cmip5_fp_var_ending = '_r1i1p1_monNG/'
cmip5_fp_fx_prefix = main_directory + '/../Climate_data/cmip5/'
cmip5_fp_fx_ending = '_r0i0p0_fx/'
cmip5_fp_lr = main_directory + '/../Climate_data/cmip5/bias_adjusted_1995_2100/2018_0524/'
cmip5_lr_fn = 'biasadj_mon_lravg_1995_2015_R15.csv'
# COAWST (High-resolution climate data over HMA)
coawst_fp_unmerged = main_directory + '/../Climate_data/coawst/Monthly/'
coawst_fp = main_directory + '/../Climate_data/coawst/'
coawst_fn_prefix_d02 = 'wrfout_d02_Monthly_'
coawst_fn_prefix_d01 = 'wrfout_d01_Monthly_'
coawst_temp_fn_d02 = 'wrfout_d02_Monthly_T2_1999100100-2006123123.nc'
coawst_prec_fn_d02 = 'wrfout_d02_Monthly_TOTPRECIP_1999100100-2006123123.nc'
coawst_elev_fn_d02 = 'wrfout_d02_Monthly_HGHT.nc'
coawst_temp_fn_d01 = 'wrfout_d01_Monthly_T2_1999100100-2006123123.nc'
coawst_prec_fn_d01 = 'wrfout_d01_Monthly_TOTPRECIP_1999100100-2006123123.nc'
coawst_elev_fn_d01 = 'wrfout_d01_Monthly_HGHT.nc'
coawst_vns = ['T2', 'TOTPRECIP', 'HGHT']
coawst_d02_lon_min = 65
coawst_d02_lon_max = 99
coawst_d02_lat_min = 20
coawst_d02_lat_max = 38
#%% GLACIER DATA (RGI, ICE THICKNESS, ETC.)
# ===== RGI DATA =====
# Filepath for RGI files
rgi_fp = main_directory + '/../RGI/rgi60/00_rgi60_attribs/'
# Column names
rgi_lat_colname = 'CenLat'
rgi_lon_colname = 'CenLon'
elev_colname = 'elev'
indexname = 'GlacNo'
rgi_O1Id_colname = 'glacno'
rgi_glacno_float_colname = 'RGIId_float'
# Column names from table to drop
rgi_cols_drop = ['GLIMSId','BgnDate','EndDate','Status','Connect','Linkages','Name']
# ===== ADDITIONAL DATA (hypsometry, ice thickness, width) =====
# Filepath for the hypsometry files
binsize = 10 # Elevation bin height [m]
hyps_data = 'Farinotti' # Hypsometry dataset (options: 'Huss' from GlacierMIP or 'Farinotti' from Farinotti etal 2019)
if hyps_data == 'Farinotti':
option_shift_elevbins_20m = 0 # option to shift bins by 20 m (needed since off by 20 m, seem email 5/24/2018)
# Dictionary of hypsometry filenames
hyps_filepath = main_directory + '/../IceThickness_Farinotti/output/'
hyps_filedict = {1: 'area_km2_01_Farinotti2019_10m.csv',}
hyps_colsdrop = ['RGIId']
# Thickness data
thickness_filepath = main_directory + '/../IceThickness_Farinotti/output/'
thickness_filedict = {1: 'thickness_m_01_Farinotti2019_10m.csv'}
thickness_colsdrop = ['RGIId']
# Width data
width_filepath = main_directory + '/../IceThickness_Farinotti/output/'
width_filedict = {1: 'width_km_01_Farinotti2019_10m.csv'}
width_colsdrop = ['RGIId']
elif hyps_data == 'Huss':
option_shift_elevbins_20m = 1 # option to shift bins by 20 m (needed since off by 20 m, seem email 5/24/2018)
# Dictionary of hypsometry filenames
# (Files from <NAME> should be manually pre-processed to be 'RGI-ID', 'Cont_range', and bins starting at 5)
hyps_filepath = main_directory + '/../IceThickness_Huss/bands_10m_DRR/'
hyps_filedict = {
1: 'area_01_Huss_Alaska_10m.csv',
3: 'area_RGI03_10.csv',
4: 'area_RGI04_10.csv',
6: 'area_RGI06_10.csv',
7: 'area_RGI07_10.csv',
8: 'area_RGI08_10.csv',
9: 'area_RGI09_10.csv',
13: 'area_13_Huss_CentralAsia_10m.csv',
14: 'area_14_Huss_SouthAsiaWest_10m.csv',
15: 'area_15_Huss_SouthAsiaEast_10m.csv',
16: 'area_16_Huss_LowLatitudes_10m.csv',
17: 'area_17_Huss_SouthernAndes_10m.csv'}
hyps_colsdrop = ['RGI-ID','Cont_range']
# Thickness data
thickness_filepath = main_directory + '/../IceThickness_Huss/bands_10m_DRR/'
thickness_filedict = {
1: 'thickness_01_Huss_Alaska_10m.csv',
3: 'thickness_RGI03_10.csv',
4: 'thickness_RGI04_10.csv',
6: 'thickness_RGI06_10.csv',
7: 'thickness_RGI07_10.csv',
8: 'thickness_RGI08_10.csv',
9: 'thickness_RGI09_10.csv',
13: 'thickness_13_Huss_CentralAsia_10m.csv',
14: 'thickness_14_Huss_SouthAsiaWest_10m.csv',
15: 'thickness_15_Huss_SouthAsiaEast_10m.csv',
16: 'thickness_16_Huss_LowLatitudes_10m.csv',
17: 'thickness_17_Huss_SouthernAndes_10m.csv'}
thickness_colsdrop = ['RGI-ID','Cont_range']
# Width data
width_filepath = main_directory + '/../IceThickness_Huss/bands_10m_DRR/'
width_filedict = {
1: 'width_01_Huss_Alaska_10m.csv',
3: 'width_RGI03_10.csv',
4: 'width_RGI04_10.csv',
6: 'width_RGI06_10.csv',
7: 'width_RGI07_10.csv',
8: 'width_RGI08_10.csv',
9: 'width_RGI09_10.csv',
13: 'width_13_Huss_CentralAsia_10m.csv',
14: 'width_14_Huss_SouthAsiaWest_10m.csv',
15: 'width_15_Huss_SouthAsiaEast_10m.csv',
16: 'width_16_Huss_LowLatitudes_10m.csv',
17: 'width_17_Huss_SouthernAndes_10m.csv'}
width_colsdrop = ['RGI-ID','Cont_range']
#%% MODEL TIME FRAME DATA
# Models require complete data for each year such that refreezing, scaling, etc. can be calculated
# Leap year option
option_leapyear = 1 # 1: include leap year days, 0: exclude leap years so February always has 28 days
# User specified start/end dates
# note: start and end dates must refer to whole years
startmonthday = '06-01'
endmonthday = '05-31'
wateryear_month_start = 10 # water year starting month
winter_month_start = 10 # first month of winter (for HMA winter is October 1 - April 30)
summer_month_start = 5 # first month of summer (for HMA summer is May 1 - Sept 30)
option_dates = 1 # 1: use dates from date table (first of each month), 2: dates from climate data
timestep = 'monthly' # time step ('monthly' only option at present)
# Seasonal dictionaries for WGMS data that is not provided
lat_threshold = 75
# Winter (start/end) and Summer (start/end)
monthdict = {'northernmost': [9, 5, 6, 8],
'north': [10, 4, 5, 9],
'south': [4, 9, 10, 3],
'southernmost': [3, 10, 11, 2]}
# Latitude threshold
# 01 - Alaska - < 75
# 02 - W Can - < 75
# 03 - N Can - > 74
# 04 - S Can - < 74
# 05 - Greenland - 60 - 80
# 06 - Iceland - < 75
# 07 - Svalbard - 70 - 80
# 08 - Scandinavia - < 70
# 09 - Russia - 72 - 82
# 10 - N Asia - 46 - 77
#%% CALIBRATION DATASETS
# ===== SHEAN GEODETIC =====
#shean_fp = main_directory + '/../DEMs/Shean_2019_0213/'
#shean_fn = 'hma_mb_20190215_0815_std+mean_all_filled_bolch.csv'
#shean_rgi_glacno_cn = 'RGIId'
#shean_mb_cn = 'mb_mwea'
#shean_mb_err_cn = 'mb_mwea_sigma'
#shean_time1_cn = 't1'
#shean_time2_cn = 't2'
#shean_area_cn = 'area_m2'
# ===== BERTHIER GEODETIC =====
berthier_fp = main_directory + '/../DEMs/Berthier/output/'
#berthier_fn = 'AK_all_20190913_wextrapolations_1980cheat.csv'
berthier_fn = 'AK_all_20190913.csv'
berthier_rgi_glacno_cn = 'RGIId'
berthier_mb_cn = 'mb_mwea'
berthier_mb_err_cn = 'mb_mwea_sigma'
berthier_time1_cn = 't1'
berthier_time2_cn = 't2'
berthier_area_cn = 'area_km2'
# ===== BRAUN GEODETIC =====
braun_fp = main_directory + '/../DEMs/Braun/output/'
braun_fn = 'braun_AK_all_20190924_wlarsen_mcnabb_best.csv'
#braun_fn = 'braun_AK_all_20190924_wextrapolations.csv'
#braun_fn = 'braun_AK_all_20190924.csv'
braun_rgi_glacno_cn = 'RGIId'
braun_mb_cn = 'mb_mwea'
braun_mb_err_cn = 'mb_mwea_sigma'
braun_time1_cn = 't1'
braun_time2_cn = 't2'
braun_area_cn = 'area_km2'
# ===== BRUN GEODETIC =====
brun_fp = main_directory + '/../DEMs/'
brun_fn = 'Brun_Nature2017_MB_glacier-wide.csv'
brun_rgi_glacno_cn = 'GLA_ID'
brun_mb_cn = 'MB [m w.a a-1]'
brun_mb_err_cn = 'err. on MB [m w.e a-1]'
# NEED TO FINISH SETTING UP BRUN WITH CLASS_MBDATA
# ===== MAUER GEODETIC =====
mauer_fp = main_directory + '/../DEMs/'
mauer_fn = 'Mauer_geoMB_HMA_1970s_2000_min80pctCov.csv'
mauer_rgi_glacno_cn = 'RGIId'
mauer_mb_cn = 'geoMassBal'
mauer_mb_err_cn = 'geoMassBalSig'
mauer_time1_cn = 't1'
mauer_time2_cn = 't2'
# ===== MCNABB GEODETIC =====
mcnabb_fp = main_directory + '/../DEMs/McNabb_data/wgms_dv/'
mcnabb_fn = 'McNabb_data_all_preprocessed.csv'
mcnabb_rgiid_cn = 'RGIId'
mcnabb_mb_cn = 'mb_mwea'
mcnabb_mb_err_cn = 'mb_mwea_sigma'
mcnabb_time1_cn = 'date0'
mcnabb_time2_cn = 'date1'
mcnabb_area_cn = 'area'
# ===== LARSEN GEODETIC =====
larsen_fp = main_directory + '/../DEMs/larsen/'
larsen_fn = 'larsen2015_supplementdata_wRGIIds_v3.csv'
larsen_rgiid_cn = 'RGIId'
larsen_mb_cn = 'mb_mwea'
larsen_mb_err_cn = 'mb_mwea_sigma'
larsen_time1_cn = 'date0'
larsen_time2_cn = 'date1'
larsen_area_cn = 'area'
# ===== WGMS =====
wgms_datasets = ['wgms_d', 'wgms_ee']
#wgms_datasets = ['wgms_d']
wgms_fp = main_directory + '/../WGMS/DOI-WGMS-FoG-2018-06/'
wgms_rgi_glacno_cn = 'glacno'
wgms_obs_type_cn = 'obs_type'
# WGMS lookup tables information
wgms_lookup_fn = 'WGMS-FoG-2018-06-AA-GLACIER-ID-LUT.csv'
rgilookup_fullfn = main_directory + '/../RGI/rgi60/00_rgi60_links/00_rgi60_links.csv'
rgiv6_fn_prefix = main_directory + '/../RGI/rgi60/00_rgi60_attribs/' + '*'
rgiv5_fn_prefix = main_directory + '/../RGI/00_rgi50_attribs/' + '*'
# WGMS (d) geodetic mass balance information
wgms_d_fn = 'WGMS-FoG-2018-06-D-CHANGE.csv'
wgms_d_fn_preprocessed = 'wgms_d_rgiv6_preprocessed.csv'
wgms_d_thickness_chg_cn = 'THICKNESS_CHG'
wgms_d_thickness_chg_err_cn = 'THICKNESS_CHG_UNC'
wgms_d_volume_chg_cn = 'VOLUME_CHANGE'
wgms_d_volume_chg_err_cn = 'VOLUME_CHANGE_UNC'
wgms_d_z1_cn = 'LOWER_BOUND'
wgms_d_z2_cn = 'UPPER_BOUND'
# WGMS (e/ee) glaciological mass balance information
wgms_e_fn = 'WGMS-FoG-2018-06-E-MASS-BALANCE-OVERVIEW.csv'
wgms_ee_fn = 'WGMS-FoG-2018-06-EE-MASS-BALANCE.csv'
wgms_ee_fn_preprocessed = 'wgms_ee_rgiv6_preprocessed.csv'
wgms_ee_mb_cn = 'BALANCE'
wgms_ee_mb_err_cn = 'BALANCE_UNC'
wgms_ee_t1_cn = 'YEAR'
wgms_ee_z1_cn = 'LOWER_BOUND'
wgms_ee_z2_cn = 'UPPER_BOUND'
wgms_ee_period_cn = 'period'
# ===== COGLEY DATA =====
cogley_fp = main_directory + '/../Calibration_datasets/'
cogley_fn_preprocessed = 'Cogley_Arctic_processed_wInfo.csv'
cogley_rgi_glacno_cn = 'glacno'
cogley_mass_chg_cn = 'geo_mass_kgm2a'
cogley_mass_chg_err_cn = 'geo_mass_unc'
cogley_z1_cn = 'Zmin'
cogley_z2_cn = 'Zmax'
cogley_obs_type_cn = 'obs_type'
# ===== REGIONAL DATA =====
# Regional data refers to all measurements that have lumped multiple glaciers together
# - a dictionary linking the regions to RGIIds is required
mb_group_fp = main_directory + '/../Calibration_datasets/'
mb_group_dict_fn = 'mb_group_dict.csv'
mb_group_data_fn = 'mb_group_data.csv'
mb_group_t1_cn = 'begin_period'
mb_group_t2_cn = 'end_period'
#%% REGIONS
grouping = ''
if grouping == 'watershed':
reg_vn = 'watershed'
reg_dict_fn = main_directory + '/../qgis_himat/rgi60_HMA_dict_watershed.csv'
reg_csv = pd.read_csv(reg_dict_fn)
reg_dict = dict(zip(reg_csv.RGIId, reg_csv[reg_vn]))
elif grouping == 'kaab':
reg_vn = 'kaab_name'
reg_dict_fn = main_directory + '/../qgis_himat/rgi60_HMA_dict_kaab.csv'
reg_csv = pd.read_csv(reg_dict_fn)
reg_dict = dict(zip(reg_csv.RGIId, reg_csv[reg_vn]))
elif grouping == 'himap':
reg_vn = 'bolch_name'
reg_dict_fn = main_directory + '/../qgis_himat/rgi60_HMA_dict_bolch.csv'
reg_csv = pd.read_csv(reg_dict_fn)
reg_dict = dict(zip(reg_csv.RGIId, reg_csv[reg_vn]))
else:
reg_dict = {}
#%% MASS BALANCE MODEL OPTIONS
# Initial surface type options
option_surfacetype_initial = 1
# option 1 (default) - use median elevation to classify snow/firn above the median and ice below.
# > Sakai et al. (2015) found that the decadal ELAs are consistent with the median elevation of nine glaciers in High
# Mountain Asia, and Nuimura et al. (2015) also found that the snow line altitude of glaciers in China corresponded
# well with the median elevation. Therefore, the use of the median elevation for defining the initial surface type
# appears to be a fairly reasonable assumption in High Mountain Asia.
# option 2 - use mean elevation
# option 3 (Need to code) - specify an AAR ratio and apply this to estimate initial conditions
option_surfacetype_firn = 1 # 1: firn included; 0: no included (firn is snow)
option_surfacetype_debris = 0 # 1: debris cover included; 0: not included
# Downscaling model options
# Reference elevation options for downscaling climate variables
option_elev_ref_downscale = 'Zmed' # 'Zmed', 'Zmax', or 'Zmin' for median, maximum or minimum glacier elevations
# Downscale temperature to bins options
option_temp2bins = 1 # 1: lr_gcm and lr_glac to adjust temp from gcm to the glacier bins
option_adjusttemp_surfelev = 1 # 1: adjust temps based on surface elev changes; 0: no adjustment
# Downscale precipitation to bins options
option_prec2bins = 1 # 1: prec_factor and prec_grad to adjust precip from gcm to the glacier bins
option_preclimit = 1 # 1: limit the uppermost 25% using an expontial fxn
# Accumulation model options
option_accumulation = 2 # 1: single threshold, 2: threshold +/- 1 deg using linear interpolation
# Ablation model options
option_ablation = 2 # 1: monthly temp, 2: superimposed daily temps enabling melt near 0 (HH2015)
option_ddf_firn = 1 # 0: ddf_firn = ddf_snow; 1: ddf_firn = mean of ddf_snow and ddf_ice
ddfdebris = ddfice # add options for handling debris-covered glaciers
# Refreezing model options
option_refreezing = 1 # 1: heat conduction (HH2015), 2: annual air temp (Woodward etal 1997)
if option_refreezing == 1:
rf_layers = 5 # number of layers for refreezing model (8 is sufficient - Matthias)
# rf_layers_max = 8 # number of layers to include for refreeze calculation
rf_dz = 10/rf_layers # layer thickness (m)
rf_dsc = 3 # number of time steps for numerical stability (3 is sufficient - Matthias)
rf_meltcrit = 0.002 # critical amount of melt [m w.e.] for initializing refreezing module
pp = 0.3 # additional refreeze water to account for water refreezing at bare-ice surface
rf_dens_top = 300 # snow density at surface (kg m-3)
rf_dens_bot = 650 # snow density at bottom refreezing layer (kg m-3)
option_rf_limit_meltsnow = 1
elif option_refreezing == 2:
rf_month = 10 # refreeze month
# Mass redistribution / Glacier geometry change options
option_massredistribution = 1 # 1: mass redistribution (Huss and Hock, 2015)
option_glaciershape = 1 # 1: parabolic (Huss and Hock, 2015), 2: rectangular, 3: triangular
option_glaciershape_width = 1 # 1: include width, 0: do not include
icethickness_advancethreshold = 5 # advancing glacier ice thickness change threshold (5 m in Huss and Hock, 2015)
terminus_percentage = 20 # glacier (%) considered terminus (20% in HH2015), used to size advancing new bins
#%% OUTPUT OPTIONS
# Output package
# option 0 - no netcdf package
# option 1 - "raw package" [preferred units: m w.e.]
# monthly variables for each bin (temp, prec, acc, refreeze, snowpack, melt, frontalablation,
# massbal_clim)
# annual variables for each bin (area, icethickness, surfacetype)
# option 2 - "Glaciologist Package" output [units: m w.e. unless otherwise specified]:
# monthly glacier-wide variables (prec, acc, refreeze, melt, frontalablation, massbal_total, runoff,
# snowline)
# annual glacier-wide variables (area, volume, ELA)
output_package = 2
output_glacier_attr_vns = ['glacno', 'RGIId_float', 'CenLon', 'CenLat', 'O1Region', 'O2Region', 'Area', 'Zmin', 'Zmax',
'Zmed', 'Slope', 'Aspect', 'Lmax', 'Form', 'TermType', 'Surging']
time_names = ['time', 'year', 'year_plus1']
# Output package variables
output_variables_package2 = ['temp_glac_monthly', 'prec_glac_monthly', 'acc_glac_monthly',
'refreeze_glac_monthly', 'melt_glac_monthly', 'frontalablation_glac_monthly',
'massbaltotal_glac_monthly', 'runoff_glac_monthly', 'snowline_glac_monthly',
'area_glac_annual', 'volume_glac_annual', 'ELA_glac_annual',
'offglac_prec_monthly', 'offglac_refreeze_monthly', 'offglac_melt_monthly',
'offglac_snowpack_monthly', 'offglac_runoff_monthly']
#%% MODEL PROPERTIES
density_ice = 900 # Density of ice [kg m-3] (or Gt / 1000 km3)
density_water = 1000 # Density of water [kg m-3]
area_ocean = 362.5 * 10**6 # Area of ocean [km2]
k_ice = 2.33 # Thermal conductivity of ice [J s-1 K-1 m-1] recall (W = J s-1)
k_air = 0.023 # Thermal conductivity of air [J s-1 K-1 m-1] (Mellor, 1997)
#k_air = 0.001 # Thermal conductivity of air [J s-1 K-1 m-1]
ch_ice = 1890000 # Volumetric heat capacity of ice [J K-1 m-3] (density=900, heat_capacity=2100 J K-1 kg-1)
ch_air = 1297 # Volumetric Heat capacity of air [J K-1 m-3] (density=1.29, heat_capacity=1005 J K-1 kg-1)
Lh_rf = 333550 # Latent heat of fusion [J kg-1]
tolerance = 1e-12 # Model tolerance (used to remove low values caused by rounding errors)
gravity = 9.81 # Gravity [m s-2]
pressure_std = 101325 # Standard pressure [Pa]
temp_std = 288.15 # Standard temperature [K]
R_gas = 8.3144598 # Universal gas constant [J mol-1 K-1]
molarmass_air = 0.0289644 # Molar mass of Earth's air [kg mol-1]
#%% DEBUGGING OPTIONS
debug_refreeze = False
debug_mb = False
# Pass variable to shell script
if __name__ == '__main__':
print(rgi_regionsO1[0])
print(rgi_glac_number[0:10]) | pygem_input_oct19.py |
# Built-in libraries
import os
# External libraries
import pandas as pd
import numpy as np
#%% Functions to select specific glacier numbers
def get_same_glaciers(glac_fp):
"""
Get same 1000 glaciers for testing of priors
Parameters
----------
glac_fp : str
filepath to where netcdf files of individual glaciers are held
Returns
-------
glac_list : list
list of rgi glacier numbers
"""
glac_list = []
for i in os.listdir(glac_fp):
if i.endswith('.nc'):
glac_list.append(i.split('.')[1])
glac_list = sorted(glac_list)
return glac_list
def get_shean_glacier_nos(region_no, number_glaciers=0, option_random=0):
"""
Generate list of glaciers that have calibration data and select number of glaciers to include.
The list is currently sorted in terms of area such that the largest glaciers are modeled first.
Parameters
----------
region_no : int
region number (Shean data available for regions 13, 14, and 15)
number_glaciers : int
number of glaciers to include in model run (default = 0)
option_random : int
option to select glaciers randomly for model run (default = 0, not random)
Returns
-------
num : list of strings
list of rgi glacier numbers
"""
# safety, convert input to int
region_no = int(region_no)
# get shean's data, convert to dataframe, get
# glacier numbers
current_directory = os.getcwd()
csv_path = current_directory + '/../DEMs/Shean_2019_0213/hma_mb_20190215_0815_std+mean_all_filled_bolch.csv'
ds_all = pd.read_csv(csv_path)
ds_reg = ds_all[(ds_all['RGIId'] > region_no) & (ds_all['RGIId'] < region_no + 1)].copy()
if option_random == 1:
ds_reg = ds_reg.sample(n=number_glaciers)
else:
ds_reg = ds_reg.sort_values('area_m2', ascending=False)
ds_reg.reset_index(drop=True, inplace=True)
# Glacier number and index for comparison
ds_reg['glacno'] = ((ds_reg['RGIId'] % 1) * 10**5).round(0).astype(int)
ds_reg['glacno_str'] = (ds_reg['glacno'] / 10**5).apply(lambda x: '%.5f' % x).astype(str).str.split('.').str[1]
num = list(ds_reg['glacno_str'].values)
num = sorted(num)
return num
def glac_num_fromrange(int_low, int_high):
"""
Generate list of glaciers for all numbers between two integers.
Parameters
----------
int_low : int64
low value of range
int_high : int64
high value of range
Returns
-------
y : list
list of rgi glacier numbers
"""
x = (np.arange(int_low, int_high+1)).tolist()
y = [str(i).zfill(5) for i in x]
return y
def glac_fromcsv(csv_fullfn, cn='RGIId'):
"""
Generate list of glaciers from csv file
Parameters
----------
csv_fp, csv_fn : str
csv filepath and filename
Returns
-------
y : list
list of glacier numbers, e.g., ['14.00001', 15.00001']
"""
df = pd.read_csv(csv_fullfn)
return [x.split('-')[1] for x in df['RGIId'].values]
#%%
# Model setup directory
main_directory = os.getcwd()
# Output directory
output_filepath = main_directory + '/../Output/'
# ===== GLACIER SELECTION =====
rgi_regionsO1 = [1] # 1st order region number (RGI V6.0)
rgi_regionsO2 = 'all' # 2nd order region number (RGI V6.0)
# RGI glacier number (RGI V6.0)
# Two options: (1) use glacier numbers for a given region (or 'all'), must have glac_no set to None
# (2) glac_no is not None, e.g., ['1.00001', 13.0001'], overrides rgi_glac_number
#rgi_glac_number = 'all'
rgi_glac_number = ['27108']
#rgi_glac_number = glac_num_fromrange(1,100)
#rgi_glac_number = glac_num_fromrange(1130,1153)
#rgi_glac_number = get_same_glaciers(output_filepath + 'cal_opt1/reg1/')
#rgi_glac_number = get_shean_glacier_nos(rgi_regionsO1[0], 1, option_random=1)
glac_no = None
#glac_no = glac_fromcsv(main_directory + '/../qgis_himat/trishuli_and_naltar_RGIIds.csv')
#glac_no = ['15.01152']
if glac_no is not None:
rgi_regionsO1 = sorted(list(set([int(x.split('.')[0]) for x in glac_no])))
# ===== CLIMATE DATA =====
# Reference period runs
#ref_gcm_name = 'ERA-Interim' # reference climate dataset
ref_gcm_name = 'ERA5' # reference climate dataset
startyear = 1980 # first year of model run (reference dataset)
endyear = 2018 # last year of model run (reference dataset)
option_wateryear = 1 # 1: water year, 2: calendar year, 3: custom defined
#startyear = 2000 # first year of model run (reference dataset)
#endyear = 2018 # last year of model run (reference dataset)
#option_wateryear = 3 # 1: water year, 2: calendar year, 3: custom defined
constantarea_years = 0 # number of years to not let the area or volume change
spinupyears = 0 # spin up years
# Simulation runs (separate so calibration and simulations can be run at same time; also needed for bias adjustments)
#gcm_startyear = 2000 # first year of model run (simulation dataset)
#gcm_endyear = 2018 # last year of model run (simulation dataset)
gcm_startyear = 1980 # first year of model run (simulation dataset)
gcm_endyear = 2018 # last year of model run (simulation dataset)
gcm_spinupyears = 0 # spin up years for simulation
gcm_wateryear = 1 # water year for simmulation
# Hindcast option (flips array so 1960-2000 would run 2000-1960 ensuring that glacier area at 2000 is correct)
hindcast = 0 # 1: run hindcast simulation, 0: do not
if hindcast == 1:
constantarea_years = 18 # number of years to not let the area or volume change
gcm_startyear = 1960 # first year of model run (simulation dataset)
gcm_endyear = 2017 # last year of model run (simulation dataset)
# Synthetic options (synthetic refers to created climate data, e.g., repeat 1995-2015 for the next 100 years)
option_synthetic_sim = 0 # 1: run synthetic simulation, 0: do not
if option_synthetic_sim == 1:
synthetic_startyear = 1995 # synthetic start year
synthetic_endyear = 2015 # synethetic end year
synthetic_spinupyears = 0 # synthetic spinup years
synthetic_temp_adjust = 3 # Temperature adjustment factor for synthetic runs
synthetic_prec_factor = 1.12 # Precipitation adjustment factor for synthetic runs
#%% SIMULATION OPTIONS
# MCMC options
sim_iters = 100 # number of simulations (needed for cal_opt 2)
sim_burn = 200 # number of burn-in (needed for cal_opt 2)
# Simulation output filepath
output_sim_fp = output_filepath + 'simulations/'
# Simulation output statistics (can include 'mean', 'std', '2.5%', '25%', 'median', '75%', '97.5%')
sim_stat_cns = ['mean', 'std']
# Bias adjustment options (0: no adjustment, 1: new prec scheme and temp from HH2015, 2: HH2015 methods)
option_bias_adjustment = 1
#%% ===== CALIBRATION OPTIONS =====
# Calibration option (1 = minimization, 2 = MCMC, 3=HH2015, 4=modified HH2015)
option_calibration = 4
# Calibration datasets ('shean', 'larsen', 'mcnabb', 'wgms_d', 'wgms_ee', 'group')
cal_datasets = ['braun']
#cal_datasets = ['berthier']
# Calibration output filepath
output_fp_cal = output_filepath + 'cal_opt' + str(option_calibration) + '/'
# OPTION 1: Minimization
# Model parameter bounds for each calibration roun
precfactor_bnds_list_init = [(0.8, 2.0), (0.8,2), (0.8,2), (0.2,5)]
precgrad_bnds_list_init = [(0.0001,0.0001), (0.0001,0.0001), (0.0001,0.0001), (0.0001,0.0001)]
ddfsnow_bnds_list_init = [(0.003, 0.003), (0.00175, 0.0045), (0.00175, 0.0045), (0.00175, 0.0045)]
tempchange_bnds_list_init = [(0,0), (0,0), (-2.5,2.5), (-10,10)]
# Minimization details
method_opt = 'SLSQP' # SciPy optimization scheme ('SLSQP' or 'L-BFGS-B')
ftol_opt = 1e-3 # tolerance for SciPy optimization scheme
massbal_uncertainty_mwea = 0.1 # mass balance uncertainty [mwea] for glaciers lacking uncertainty data
zscore_tolerance_all = 1 # tolerance if multiple calibration points (shortcut that could be improved)
zscore_tolerance_single = 0.1 # tolerance if only a single calibration point (want this to be more exact)
zscore_update_threshold = 0.1 # threshold to update model params only if significantly better
extra_calrounds = 3 # additional calibration rounds in case optimization is getting stuck
# OPTION 2: MCMC
# Chain options
n_chains = 1 # number of chains (min 1, max 3)
mcmc_sample_no = 10000 # number of steps (10000 was found to be sufficient in HMA)
mcmc_burn_no = 0 # number of steps to burn-in (0 records all steps in chain)
mcmc_step = None # step option (None or 'am')
thin_interval = 1 # thin interval if need to reduce file size (best to leave at 1 if space allows)
# Precipitation factor distribution options
precfactor_disttype = 'gamma' # distribution type ('gamma', 'lognormal', 'uniform')
precfactor_gamma_region_dict_fullfn = main_directory + '/../Output/precfactor_gamma_region_dict.csv'
precfactor_gamma_region_df = pd.read_csv(precfactor_gamma_region_dict_fullfn)
precfactor_gamma_region_dict = dict(zip(
precfactor_gamma_region_df.Region.values,
[[precfactor_gamma_region_df.loc[x,'alpha'], precfactor_gamma_region_df.loc[x,'beta']]
for x in precfactor_gamma_region_df.index.values]))
precfactor_gamma_alpha = 3.0
precfactor_gamma_beta = 0.84
precfactor_lognorm_mu = 0
precfactor_lognorm_tau = 4
precfactor_mu = 0
precfactor_sigma = 1.5
precfactor_boundlow = 0.5
precfactor_boundhigh = 1.5
precfactor_start = 1
# Temperature bias distribution options
tempchange_disttype = 'normal' # distribution type ('normal', 'truncnormal', 'uniform')
tempchange_norm_region_dict_fullfn = main_directory + '/../Output/tempchange_norm_region_dict.csv'
tempchange_norm_region_df = pd.read_csv(tempchange_norm_region_dict_fullfn)
tempchange_norm_region_dict = dict(zip(
tempchange_norm_region_df.Region.values,
[[tempchange_norm_region_df.loc[x,'mu'], tempchange_norm_region_df.loc[x,'sigma']]
for x in tempchange_norm_region_df.index.values]))
tempchange_mu = 0.91
tempchange_sigma = 1.4
tempchange_boundlow = -10
tempchange_boundhigh = 10
tempchange_start = tempchange_mu
# Degree-day factor of snow distribution options
ddfsnow_disttype = 'truncnormal' # distribution type ('truncnormal', 'uniform')
ddfsnow_mu = 0.0041
ddfsnow_sigma = 0.0015
ddfsnow_boundlow = 0
ddfsnow_boundhigh = np.inf
ddfsnow_start=ddfsnow_mu
#%% MODEL PARAMETERS
option_import_modelparams = 1 # 0: input values, 1: calibrated model parameters from netcdf files
precfactor = 1 # precipitation factor [-] (k_p in Radic etal 2013; c_prec in HH2015)
precgrad = 0.0001 # precipitation gradient on glacier [m-1]
ddfsnow = 0.0041 # degree-day factor of snow [m w.e. d-1 degC-1]
ddfsnow_iceratio = 0.7 # Ratio degree-day factor snow snow to ice
ddfice = ddfsnow / ddfsnow_iceratio # degree-day factor of ice [m w.e. d-1 degC-1]
tempchange = 0 # temperature bias [deg C]
lrgcm = -0.0065 # lapse rate from gcm to glacier [K m-1]
lrglac = -0.0065 # lapse rate on glacier for bins [K m-1]
tempsnow = 1.0 # temperature threshold for snow [deg C] (HH2015 used 1.5 degC +/- 1 degC)
frontalablation_k = 2 # frontal ablation rate [yr-1]
af = 0.7 # Bulk flow parameter for frontal ablation (m^-0.5)
# Calving width dictionary to override RGI elevation bins, which can be highly inaccurate at the calving front
width_calving_dict_fullfn = main_directory + '/../Calving_data/calvingfront_widths.csv'
width_calving_df = pd.read_csv(width_calving_dict_fullfn)
width_calving_dict = dict(zip(width_calving_df.RGIId, width_calving_df.front_width_m))
# Calving option (1=values from HH2015, 2=calibrate glaciers independently and use transfer fxns for others)
option_frontalablation_k = 1
# Calving parameter dictionary (according to Supplementary Table 3 in HH2015)
frontalablation_k0dict_fullfn = main_directory + '/../Calving_data/frontalablation_k0_dict.csv'
frontalablation_k0dict_df = pd.read_csv(frontalablation_k0dict_fullfn)
frontalablation_k0dict = dict(zip(frontalablation_k0dict_df.O1Region, frontalablation_k0dict_df.k0))
# Model parameter column names and filepaths
modelparams_colnames = ['lrgcm', 'lrglac', 'precfactor', 'precgrad', 'ddfsnow', 'ddfice', 'tempsnow', 'tempchange']
# Model parameter filepath
modelparams_fp = output_filepath + 'cal_opt' + str(option_calibration) + '/'
#modelparams_fp = output_filepath + 'cal_opt2_spc_20190806/'
#%% CLIMATE DATA
# ERA-INTERIM (Reference data)
# Variable names
era_varnames = ['temperature', 'precipitation', 'geopotential', 'temperature_pressurelevels']
# Note: do not change variable names as these are set to run with the download_erainterim_data.py script.
# If option 2 is being used to calculate the lapse rates, then the pressure level data is unnecessary.
# Dates
eraint_start_date = '19790101'
eraint_end_date = '20180501'
# Resolution
grid_res = '0.5/0.5'
# Bounding box (N/W/S/E)
#bounding_box = '90/0/-90/360'
bounding_box = '50/70/25/105'
# Lapse rate option
# option 0 - lapse rates are constant defined by input
# option 1 (default) - lapse rates derived from gcm pressure level temperature data (varies spatially and temporally)
# option 2 - lapse rates derived from surrounding pixels (varies spatially and temporally)
# Note: Be careful with option 2 as the ocean vs land/glacier temperatures can causeƒ unrealistic inversions
# This is the option used by Marzeion et al. (2012)
option_lr_method = 1
# ERA5
era5_fp = '/Volumes/PyGEM_data/ERA5/'
era5_temp_fn = 'ERA5_temp_monthly.nc'
era5_tempstd_fn = 'ERA5_tempstd_monthly.nc'
era5_prec_fn = 'ERA5_totalprecip_monthly.nc'
era5_elev_fn = 'ERA5_geopotential_monthly.nc'
era5_pressureleveltemp_fn = 'ERA5_pressureleveltemp_monthly.nc'
era5_lr_fn = 'ERA5_lapserates_monthly.nc'
# ERA-Interim
eraint_fp = main_directory + '/../Climate_data/ERA_Interim/download/'
eraint_temp_fn = 'ERAInterim_Temp2m_DailyMeanMonthly_' + eraint_start_date + '_' + eraint_end_date + '.nc'
eraint_prec_fn = 'ERAInterim_TotalPrec_DailyMeanMonthly_' + eraint_start_date + '_' + eraint_end_date + '.nc'
eraint_elev_fn = 'ERAInterim_geopotential.nc'
eraint_pressureleveltemp_fn = 'ERAInterim_pressureleveltemp_' + eraint_start_date + '_' + eraint_end_date + '.nc'
eraint_lr_fn = ('ERAInterim_lapserates_' + eraint_start_date + '_' + eraint_end_date + '_opt' + str(option_lr_method) +
'_world.nc')
# CMIP5 (GCM data)
cmip5_fp_var_prefix = main_directory + '/../Climate_data/cmip5/'
cmip5_fp_var_ending = '_r1i1p1_monNG/'
cmip5_fp_fx_prefix = main_directory + '/../Climate_data/cmip5/'
cmip5_fp_fx_ending = '_r0i0p0_fx/'
cmip5_fp_lr = main_directory + '/../Climate_data/cmip5/bias_adjusted_1995_2100/2018_0524/'
cmip5_lr_fn = 'biasadj_mon_lravg_1995_2015_R15.csv'
# COAWST (High-resolution climate data over HMA)
coawst_fp_unmerged = main_directory + '/../Climate_data/coawst/Monthly/'
coawst_fp = main_directory + '/../Climate_data/coawst/'
coawst_fn_prefix_d02 = 'wrfout_d02_Monthly_'
coawst_fn_prefix_d01 = 'wrfout_d01_Monthly_'
coawst_temp_fn_d02 = 'wrfout_d02_Monthly_T2_1999100100-2006123123.nc'
coawst_prec_fn_d02 = 'wrfout_d02_Monthly_TOTPRECIP_1999100100-2006123123.nc'
coawst_elev_fn_d02 = 'wrfout_d02_Monthly_HGHT.nc'
coawst_temp_fn_d01 = 'wrfout_d01_Monthly_T2_1999100100-2006123123.nc'
coawst_prec_fn_d01 = 'wrfout_d01_Monthly_TOTPRECIP_1999100100-2006123123.nc'
coawst_elev_fn_d01 = 'wrfout_d01_Monthly_HGHT.nc'
coawst_vns = ['T2', 'TOTPRECIP', 'HGHT']
coawst_d02_lon_min = 65
coawst_d02_lon_max = 99
coawst_d02_lat_min = 20
coawst_d02_lat_max = 38
#%% GLACIER DATA (RGI, ICE THICKNESS, ETC.)
# ===== RGI DATA =====
# Filepath for RGI files
rgi_fp = main_directory + '/../RGI/rgi60/00_rgi60_attribs/'
# Column names
rgi_lat_colname = 'CenLat'
rgi_lon_colname = 'CenLon'
elev_colname = 'elev'
indexname = 'GlacNo'
rgi_O1Id_colname = 'glacno'
rgi_glacno_float_colname = 'RGIId_float'
# Column names from table to drop
rgi_cols_drop = ['GLIMSId','BgnDate','EndDate','Status','Connect','Linkages','Name']
# ===== ADDITIONAL DATA (hypsometry, ice thickness, width) =====
# Filepath for the hypsometry files
binsize = 10 # Elevation bin height [m]
hyps_data = 'Farinotti' # Hypsometry dataset (options: 'Huss' from GlacierMIP or 'Farinotti' from Farinotti etal 2019)
if hyps_data == 'Farinotti':
option_shift_elevbins_20m = 0 # option to shift bins by 20 m (needed since off by 20 m, seem email 5/24/2018)
# Dictionary of hypsometry filenames
hyps_filepath = main_directory + '/../IceThickness_Farinotti/output/'
hyps_filedict = {1: 'area_km2_01_Farinotti2019_10m.csv',}
hyps_colsdrop = ['RGIId']
# Thickness data
thickness_filepath = main_directory + '/../IceThickness_Farinotti/output/'
thickness_filedict = {1: 'thickness_m_01_Farinotti2019_10m.csv'}
thickness_colsdrop = ['RGIId']
# Width data
width_filepath = main_directory + '/../IceThickness_Farinotti/output/'
width_filedict = {1: 'width_km_01_Farinotti2019_10m.csv'}
width_colsdrop = ['RGIId']
elif hyps_data == 'Huss':
option_shift_elevbins_20m = 1 # option to shift bins by 20 m (needed since off by 20 m, seem email 5/24/2018)
# Dictionary of hypsometry filenames
# (Files from <NAME> should be manually pre-processed to be 'RGI-ID', 'Cont_range', and bins starting at 5)
hyps_filepath = main_directory + '/../IceThickness_Huss/bands_10m_DRR/'
hyps_filedict = {
1: 'area_01_Huss_Alaska_10m.csv',
3: 'area_RGI03_10.csv',
4: 'area_RGI04_10.csv',
6: 'area_RGI06_10.csv',
7: 'area_RGI07_10.csv',
8: 'area_RGI08_10.csv',
9: 'area_RGI09_10.csv',
13: 'area_13_Huss_CentralAsia_10m.csv',
14: 'area_14_Huss_SouthAsiaWest_10m.csv',
15: 'area_15_Huss_SouthAsiaEast_10m.csv',
16: 'area_16_Huss_LowLatitudes_10m.csv',
17: 'area_17_Huss_SouthernAndes_10m.csv'}
hyps_colsdrop = ['RGI-ID','Cont_range']
# Thickness data
thickness_filepath = main_directory + '/../IceThickness_Huss/bands_10m_DRR/'
thickness_filedict = {
1: 'thickness_01_Huss_Alaska_10m.csv',
3: 'thickness_RGI03_10.csv',
4: 'thickness_RGI04_10.csv',
6: 'thickness_RGI06_10.csv',
7: 'thickness_RGI07_10.csv',
8: 'thickness_RGI08_10.csv',
9: 'thickness_RGI09_10.csv',
13: 'thickness_13_Huss_CentralAsia_10m.csv',
14: 'thickness_14_Huss_SouthAsiaWest_10m.csv',
15: 'thickness_15_Huss_SouthAsiaEast_10m.csv',
16: 'thickness_16_Huss_LowLatitudes_10m.csv',
17: 'thickness_17_Huss_SouthernAndes_10m.csv'}
thickness_colsdrop = ['RGI-ID','Cont_range']
# Width data
width_filepath = main_directory + '/../IceThickness_Huss/bands_10m_DRR/'
width_filedict = {
1: 'width_01_Huss_Alaska_10m.csv',
3: 'width_RGI03_10.csv',
4: 'width_RGI04_10.csv',
6: 'width_RGI06_10.csv',
7: 'width_RGI07_10.csv',
8: 'width_RGI08_10.csv',
9: 'width_RGI09_10.csv',
13: 'width_13_Huss_CentralAsia_10m.csv',
14: 'width_14_Huss_SouthAsiaWest_10m.csv',
15: 'width_15_Huss_SouthAsiaEast_10m.csv',
16: 'width_16_Huss_LowLatitudes_10m.csv',
17: 'width_17_Huss_SouthernAndes_10m.csv'}
width_colsdrop = ['RGI-ID','Cont_range']
#%% MODEL TIME FRAME DATA
# Models require complete data for each year such that refreezing, scaling, etc. can be calculated
# Leap year option
option_leapyear = 1 # 1: include leap year days, 0: exclude leap years so February always has 28 days
# User specified start/end dates
# note: start and end dates must refer to whole years
startmonthday = '06-01'
endmonthday = '05-31'
wateryear_month_start = 10 # water year starting month
winter_month_start = 10 # first month of winter (for HMA winter is October 1 - April 30)
summer_month_start = 5 # first month of summer (for HMA summer is May 1 - Sept 30)
option_dates = 1 # 1: use dates from date table (first of each month), 2: dates from climate data
timestep = 'monthly' # time step ('monthly' only option at present)
# Seasonal dictionaries for WGMS data that is not provided
lat_threshold = 75
# Winter (start/end) and Summer (start/end)
monthdict = {'northernmost': [9, 5, 6, 8],
'north': [10, 4, 5, 9],
'south': [4, 9, 10, 3],
'southernmost': [3, 10, 11, 2]}
# Latitude threshold
# 01 - Alaska - < 75
# 02 - W Can - < 75
# 03 - N Can - > 74
# 04 - S Can - < 74
# 05 - Greenland - 60 - 80
# 06 - Iceland - < 75
# 07 - Svalbard - 70 - 80
# 08 - Scandinavia - < 70
# 09 - Russia - 72 - 82
# 10 - N Asia - 46 - 77
#%% CALIBRATION DATASETS
# ===== SHEAN GEODETIC =====
#shean_fp = main_directory + '/../DEMs/Shean_2019_0213/'
#shean_fn = 'hma_mb_20190215_0815_std+mean_all_filled_bolch.csv'
#shean_rgi_glacno_cn = 'RGIId'
#shean_mb_cn = 'mb_mwea'
#shean_mb_err_cn = 'mb_mwea_sigma'
#shean_time1_cn = 't1'
#shean_time2_cn = 't2'
#shean_area_cn = 'area_m2'
# ===== BERTHIER GEODETIC =====
berthier_fp = main_directory + '/../DEMs/Berthier/output/'
#berthier_fn = 'AK_all_20190913_wextrapolations_1980cheat.csv'
berthier_fn = 'AK_all_20190913.csv'
berthier_rgi_glacno_cn = 'RGIId'
berthier_mb_cn = 'mb_mwea'
berthier_mb_err_cn = 'mb_mwea_sigma'
berthier_time1_cn = 't1'
berthier_time2_cn = 't2'
berthier_area_cn = 'area_km2'
# ===== BRAUN GEODETIC =====
braun_fp = main_directory + '/../DEMs/Braun/output/'
braun_fn = 'braun_AK_all_20190924_wlarsen_mcnabb_best.csv'
#braun_fn = 'braun_AK_all_20190924_wextrapolations.csv'
#braun_fn = 'braun_AK_all_20190924.csv'
braun_rgi_glacno_cn = 'RGIId'
braun_mb_cn = 'mb_mwea'
braun_mb_err_cn = 'mb_mwea_sigma'
braun_time1_cn = 't1'
braun_time2_cn = 't2'
braun_area_cn = 'area_km2'
# ===== BRUN GEODETIC =====
brun_fp = main_directory + '/../DEMs/'
brun_fn = 'Brun_Nature2017_MB_glacier-wide.csv'
brun_rgi_glacno_cn = 'GLA_ID'
brun_mb_cn = 'MB [m w.a a-1]'
brun_mb_err_cn = 'err. on MB [m w.e a-1]'
# NEED TO FINISH SETTING UP BRUN WITH CLASS_MBDATA
# ===== MAUER GEODETIC =====
mauer_fp = main_directory + '/../DEMs/'
mauer_fn = 'Mauer_geoMB_HMA_1970s_2000_min80pctCov.csv'
mauer_rgi_glacno_cn = 'RGIId'
mauer_mb_cn = 'geoMassBal'
mauer_mb_err_cn = 'geoMassBalSig'
mauer_time1_cn = 't1'
mauer_time2_cn = 't2'
# ===== MCNABB GEODETIC =====
mcnabb_fp = main_directory + '/../DEMs/McNabb_data/wgms_dv/'
mcnabb_fn = 'McNabb_data_all_preprocessed.csv'
mcnabb_rgiid_cn = 'RGIId'
mcnabb_mb_cn = 'mb_mwea'
mcnabb_mb_err_cn = 'mb_mwea_sigma'
mcnabb_time1_cn = 'date0'
mcnabb_time2_cn = 'date1'
mcnabb_area_cn = 'area'
# ===== LARSEN GEODETIC =====
larsen_fp = main_directory + '/../DEMs/larsen/'
larsen_fn = 'larsen2015_supplementdata_wRGIIds_v3.csv'
larsen_rgiid_cn = 'RGIId'
larsen_mb_cn = 'mb_mwea'
larsen_mb_err_cn = 'mb_mwea_sigma'
larsen_time1_cn = 'date0'
larsen_time2_cn = 'date1'
larsen_area_cn = 'area'
# ===== WGMS =====
wgms_datasets = ['wgms_d', 'wgms_ee']
#wgms_datasets = ['wgms_d']
wgms_fp = main_directory + '/../WGMS/DOI-WGMS-FoG-2018-06/'
wgms_rgi_glacno_cn = 'glacno'
wgms_obs_type_cn = 'obs_type'
# WGMS lookup tables information
wgms_lookup_fn = 'WGMS-FoG-2018-06-AA-GLACIER-ID-LUT.csv'
rgilookup_fullfn = main_directory + '/../RGI/rgi60/00_rgi60_links/00_rgi60_links.csv'
rgiv6_fn_prefix = main_directory + '/../RGI/rgi60/00_rgi60_attribs/' + '*'
rgiv5_fn_prefix = main_directory + '/../RGI/00_rgi50_attribs/' + '*'
# WGMS (d) geodetic mass balance information
wgms_d_fn = 'WGMS-FoG-2018-06-D-CHANGE.csv'
wgms_d_fn_preprocessed = 'wgms_d_rgiv6_preprocessed.csv'
wgms_d_thickness_chg_cn = 'THICKNESS_CHG'
wgms_d_thickness_chg_err_cn = 'THICKNESS_CHG_UNC'
wgms_d_volume_chg_cn = 'VOLUME_CHANGE'
wgms_d_volume_chg_err_cn = 'VOLUME_CHANGE_UNC'
wgms_d_z1_cn = 'LOWER_BOUND'
wgms_d_z2_cn = 'UPPER_BOUND'
# WGMS (e/ee) glaciological mass balance information
wgms_e_fn = 'WGMS-FoG-2018-06-E-MASS-BALANCE-OVERVIEW.csv'
wgms_ee_fn = 'WGMS-FoG-2018-06-EE-MASS-BALANCE.csv'
wgms_ee_fn_preprocessed = 'wgms_ee_rgiv6_preprocessed.csv'
wgms_ee_mb_cn = 'BALANCE'
wgms_ee_mb_err_cn = 'BALANCE_UNC'
wgms_ee_t1_cn = 'YEAR'
wgms_ee_z1_cn = 'LOWER_BOUND'
wgms_ee_z2_cn = 'UPPER_BOUND'
wgms_ee_period_cn = 'period'
# ===== COGLEY DATA =====
cogley_fp = main_directory + '/../Calibration_datasets/'
cogley_fn_preprocessed = 'Cogley_Arctic_processed_wInfo.csv'
cogley_rgi_glacno_cn = 'glacno'
cogley_mass_chg_cn = 'geo_mass_kgm2a'
cogley_mass_chg_err_cn = 'geo_mass_unc'
cogley_z1_cn = 'Zmin'
cogley_z2_cn = 'Zmax'
cogley_obs_type_cn = 'obs_type'
# ===== REGIONAL DATA =====
# Regional data refers to all measurements that have lumped multiple glaciers together
# - a dictionary linking the regions to RGIIds is required
mb_group_fp = main_directory + '/../Calibration_datasets/'
mb_group_dict_fn = 'mb_group_dict.csv'
mb_group_data_fn = 'mb_group_data.csv'
mb_group_t1_cn = 'begin_period'
mb_group_t2_cn = 'end_period'
#%% REGIONS
grouping = ''
if grouping == 'watershed':
reg_vn = 'watershed'
reg_dict_fn = main_directory + '/../qgis_himat/rgi60_HMA_dict_watershed.csv'
reg_csv = pd.read_csv(reg_dict_fn)
reg_dict = dict(zip(reg_csv.RGIId, reg_csv[reg_vn]))
elif grouping == 'kaab':
reg_vn = 'kaab_name'
reg_dict_fn = main_directory + '/../qgis_himat/rgi60_HMA_dict_kaab.csv'
reg_csv = pd.read_csv(reg_dict_fn)
reg_dict = dict(zip(reg_csv.RGIId, reg_csv[reg_vn]))
elif grouping == 'himap':
reg_vn = 'bolch_name'
reg_dict_fn = main_directory + '/../qgis_himat/rgi60_HMA_dict_bolch.csv'
reg_csv = pd.read_csv(reg_dict_fn)
reg_dict = dict(zip(reg_csv.RGIId, reg_csv[reg_vn]))
else:
reg_dict = {}
#%% MASS BALANCE MODEL OPTIONS
# Initial surface type options
option_surfacetype_initial = 1
# option 1 (default) - use median elevation to classify snow/firn above the median and ice below.
# > Sakai et al. (2015) found that the decadal ELAs are consistent with the median elevation of nine glaciers in High
# Mountain Asia, and Nuimura et al. (2015) also found that the snow line altitude of glaciers in China corresponded
# well with the median elevation. Therefore, the use of the median elevation for defining the initial surface type
# appears to be a fairly reasonable assumption in High Mountain Asia.
# option 2 - use mean elevation
# option 3 (Need to code) - specify an AAR ratio and apply this to estimate initial conditions
option_surfacetype_firn = 1 # 1: firn included; 0: no included (firn is snow)
option_surfacetype_debris = 0 # 1: debris cover included; 0: not included
# Downscaling model options
# Reference elevation options for downscaling climate variables
option_elev_ref_downscale = 'Zmed' # 'Zmed', 'Zmax', or 'Zmin' for median, maximum or minimum glacier elevations
# Downscale temperature to bins options
option_temp2bins = 1 # 1: lr_gcm and lr_glac to adjust temp from gcm to the glacier bins
option_adjusttemp_surfelev = 1 # 1: adjust temps based on surface elev changes; 0: no adjustment
# Downscale precipitation to bins options
option_prec2bins = 1 # 1: prec_factor and prec_grad to adjust precip from gcm to the glacier bins
option_preclimit = 1 # 1: limit the uppermost 25% using an expontial fxn
# Accumulation model options
option_accumulation = 2 # 1: single threshold, 2: threshold +/- 1 deg using linear interpolation
# Ablation model options
option_ablation = 2 # 1: monthly temp, 2: superimposed daily temps enabling melt near 0 (HH2015)
option_ddf_firn = 1 # 0: ddf_firn = ddf_snow; 1: ddf_firn = mean of ddf_snow and ddf_ice
ddfdebris = ddfice # add options for handling debris-covered glaciers
# Refreezing model options
option_refreezing = 1 # 1: heat conduction (HH2015), 2: annual air temp (Woodward etal 1997)
if option_refreezing == 1:
rf_layers = 5 # number of layers for refreezing model (8 is sufficient - Matthias)
# rf_layers_max = 8 # number of layers to include for refreeze calculation
rf_dz = 10/rf_layers # layer thickness (m)
rf_dsc = 3 # number of time steps for numerical stability (3 is sufficient - Matthias)
rf_meltcrit = 0.002 # critical amount of melt [m w.e.] for initializing refreezing module
pp = 0.3 # additional refreeze water to account for water refreezing at bare-ice surface
rf_dens_top = 300 # snow density at surface (kg m-3)
rf_dens_bot = 650 # snow density at bottom refreezing layer (kg m-3)
option_rf_limit_meltsnow = 1
elif option_refreezing == 2:
rf_month = 10 # refreeze month
# Mass redistribution / Glacier geometry change options
option_massredistribution = 1 # 1: mass redistribution (Huss and Hock, 2015)
option_glaciershape = 1 # 1: parabolic (Huss and Hock, 2015), 2: rectangular, 3: triangular
option_glaciershape_width = 1 # 1: include width, 0: do not include
icethickness_advancethreshold = 5 # advancing glacier ice thickness change threshold (5 m in Huss and Hock, 2015)
terminus_percentage = 20 # glacier (%) considered terminus (20% in HH2015), used to size advancing new bins
#%% OUTPUT OPTIONS
# Output package
# option 0 - no netcdf package
# option 1 - "raw package" [preferred units: m w.e.]
# monthly variables for each bin (temp, prec, acc, refreeze, snowpack, melt, frontalablation,
# massbal_clim)
# annual variables for each bin (area, icethickness, surfacetype)
# option 2 - "Glaciologist Package" output [units: m w.e. unless otherwise specified]:
# monthly glacier-wide variables (prec, acc, refreeze, melt, frontalablation, massbal_total, runoff,
# snowline)
# annual glacier-wide variables (area, volume, ELA)
output_package = 2
output_glacier_attr_vns = ['glacno', 'RGIId_float', 'CenLon', 'CenLat', 'O1Region', 'O2Region', 'Area', 'Zmin', 'Zmax',
'Zmed', 'Slope', 'Aspect', 'Lmax', 'Form', 'TermType', 'Surging']
time_names = ['time', 'year', 'year_plus1']
# Output package variables
output_variables_package2 = ['temp_glac_monthly', 'prec_glac_monthly', 'acc_glac_monthly',
'refreeze_glac_monthly', 'melt_glac_monthly', 'frontalablation_glac_monthly',
'massbaltotal_glac_monthly', 'runoff_glac_monthly', 'snowline_glac_monthly',
'area_glac_annual', 'volume_glac_annual', 'ELA_glac_annual',
'offglac_prec_monthly', 'offglac_refreeze_monthly', 'offglac_melt_monthly',
'offglac_snowpack_monthly', 'offglac_runoff_monthly']
#%% MODEL PROPERTIES
density_ice = 900 # Density of ice [kg m-3] (or Gt / 1000 km3)
density_water = 1000 # Density of water [kg m-3]
area_ocean = 362.5 * 10**6 # Area of ocean [km2]
k_ice = 2.33 # Thermal conductivity of ice [J s-1 K-1 m-1] recall (W = J s-1)
k_air = 0.023 # Thermal conductivity of air [J s-1 K-1 m-1] (Mellor, 1997)
#k_air = 0.001 # Thermal conductivity of air [J s-1 K-1 m-1]
ch_ice = 1890000 # Volumetric heat capacity of ice [J K-1 m-3] (density=900, heat_capacity=2100 J K-1 kg-1)
ch_air = 1297 # Volumetric Heat capacity of air [J K-1 m-3] (density=1.29, heat_capacity=1005 J K-1 kg-1)
Lh_rf = 333550 # Latent heat of fusion [J kg-1]
tolerance = 1e-12 # Model tolerance (used to remove low values caused by rounding errors)
gravity = 9.81 # Gravity [m s-2]
pressure_std = 101325 # Standard pressure [Pa]
temp_std = 288.15 # Standard temperature [K]
R_gas = 8.3144598 # Universal gas constant [J mol-1 K-1]
molarmass_air = 0.0289644 # Molar mass of Earth's air [kg mol-1]
#%% DEBUGGING OPTIONS
debug_refreeze = False
debug_mb = False
# Pass variable to shell script
if __name__ == '__main__':
print(rgi_regionsO1[0])
print(rgi_glac_number[0:10]) | 0.748352 | 0.425963 |
"""This module contains several functions common to all modules."""
from weresync.exception import DeviceError, InvalidVersionError
import subprocess
import logging
import logging.handlers
import os
import gettext
import sys
LOGGER = logging.getLogger(__name__)
LANGUAGES = ["en"]
"""Currently translated languages. See `here <translation.html>`_ for more
information."""
DEFAULT_USER_LOG_LOCATION = os.path.expanduser(
"~/.local/log/weresync/weresync-user.log")
"""The default location for WereSync's user log files."""
def run_proc(args,
target="",
error=None,
valid_returncodes=[0],
throw_error=DeviceError):
"""Creates an runs a subprocess with the passed args. and throws an error
if the valid returncodes do not exist.
This expects either :py:class:`~weresync.exception.DeviceError` or an
exception which takes 2 arguments, the first for a custom error message
and the second for the output of the processs.
:param args: the argument list to run. Passed to the first paramter of
`subprocess.Popen`
:param error: the custom error code to display. Optional
:param valid_returncodes: the list of return codes which should *not* throw
an error.
:param throw_error: the error class to be thrown if there is an error.
Defaults to :py:class:`~weresync.exception.DeviceError`
:returns: the output of the process
"""
proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _ = proc.communicate()
output = str(output, "utf-8")
if proc.returncode not in valid_returncodes:
if error is None:
error = ""
if throw_error == DeviceError:
raise DeviceError(target, error, output)
else:
raise throw_error(error, output)
return output
def start_logging_handler(log_loc,
stream_level=logging.WARNING,
file_level=logging.DEBUG):
os.makedirs(os.path.dirname(log_loc), exist_ok=True)
logger = logging.getLogger()
logger.setLevel(file_level if file_level < stream_level else stream_level)
formatter = logging.Formatter(
"%(levelname)s - %(asctime)s - %(name)s - %(message)s")
def enableHandler(hand, level, formatter):
hand.setLevel(level)
hand.setFormatter(formatter)
logger.addHandler(hand)
handler = logging.handlers.TimedRotatingFileHandler(
log_loc, when="D", interval=1, backupCount=15)
enableHandler(handler, file_level, formatter)
streamHandler = logging.StreamHandler()
enableHandler(streamHandler, stream_level, formatter)
logging.getLogger("yapsy").setLevel(logging.INFO)
def enable_localization():
"""Activates the `gettext` module to start internalization and enable
translation."""
LOGGER.debug("Enabling localization")
lodir = os.path.dirname(os.path.realpath(__file__)) + "/resources/locale"
es = gettext.translation("weresync", localedir=lodir, languages=LANGUAGES)
es.install()
def check_python_version():
"""This method tests if the running version of python supports WereSync.
If it does not, it raises a InvalidVersionException"""
try:
assert sys.version_info > (3, 0)
except AssertionError:
info = sys.version_info
raise InvalidVersionError(
"Python version {major}.{minor} not supported. WereSync requires "
"at least Python 3.0\n"
"Considering installing WereSync with the pip3 command to insure "
"it installs with Python3.".format(major=info[0], minor=info[1])) | src/weresync/utils.py | """This module contains several functions common to all modules."""
from weresync.exception import DeviceError, InvalidVersionError
import subprocess
import logging
import logging.handlers
import os
import gettext
import sys
LOGGER = logging.getLogger(__name__)
LANGUAGES = ["en"]
"""Currently translated languages. See `here <translation.html>`_ for more
information."""
DEFAULT_USER_LOG_LOCATION = os.path.expanduser(
"~/.local/log/weresync/weresync-user.log")
"""The default location for WereSync's user log files."""
def run_proc(args,
target="",
error=None,
valid_returncodes=[0],
throw_error=DeviceError):
"""Creates an runs a subprocess with the passed args. and throws an error
if the valid returncodes do not exist.
This expects either :py:class:`~weresync.exception.DeviceError` or an
exception which takes 2 arguments, the first for a custom error message
and the second for the output of the processs.
:param args: the argument list to run. Passed to the first paramter of
`subprocess.Popen`
:param error: the custom error code to display. Optional
:param valid_returncodes: the list of return codes which should *not* throw
an error.
:param throw_error: the error class to be thrown if there is an error.
Defaults to :py:class:`~weresync.exception.DeviceError`
:returns: the output of the process
"""
proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _ = proc.communicate()
output = str(output, "utf-8")
if proc.returncode not in valid_returncodes:
if error is None:
error = ""
if throw_error == DeviceError:
raise DeviceError(target, error, output)
else:
raise throw_error(error, output)
return output
def start_logging_handler(log_loc,
stream_level=logging.WARNING,
file_level=logging.DEBUG):
os.makedirs(os.path.dirname(log_loc), exist_ok=True)
logger = logging.getLogger()
logger.setLevel(file_level if file_level < stream_level else stream_level)
formatter = logging.Formatter(
"%(levelname)s - %(asctime)s - %(name)s - %(message)s")
def enableHandler(hand, level, formatter):
hand.setLevel(level)
hand.setFormatter(formatter)
logger.addHandler(hand)
handler = logging.handlers.TimedRotatingFileHandler(
log_loc, when="D", interval=1, backupCount=15)
enableHandler(handler, file_level, formatter)
streamHandler = logging.StreamHandler()
enableHandler(streamHandler, stream_level, formatter)
logging.getLogger("yapsy").setLevel(logging.INFO)
def enable_localization():
"""Activates the `gettext` module to start internalization and enable
translation."""
LOGGER.debug("Enabling localization")
lodir = os.path.dirname(os.path.realpath(__file__)) + "/resources/locale"
es = gettext.translation("weresync", localedir=lodir, languages=LANGUAGES)
es.install()
def check_python_version():
"""This method tests if the running version of python supports WereSync.
If it does not, it raises a InvalidVersionException"""
try:
assert sys.version_info > (3, 0)
except AssertionError:
info = sys.version_info
raise InvalidVersionError(
"Python version {major}.{minor} not supported. WereSync requires "
"at least Python 3.0\n"
"Considering installing WereSync with the pip3 command to insure "
"it installs with Python3.".format(major=info[0], minor=info[1])) | 0.639286 | 0.319413 |
from fastapi import APIRouter
from .library.component import *
from .library.feedback import *
from .library.module import *
from .library.utils import *
from .library.marks import *
from .library.user import *
router = APIRouter()
if (not dynamodb_select({"module_code": "ICT2103"}, "ict2x01_module")):
ict2103 = Module(module_code="ICT2103",
module_name="Information Management")
ict2103_quiz_1 = Assessment(
name="Quiz 1", max_marks=100, weightage=20, end_date=0)
ict2103_workshop = Assessment(
name="NoSQL Workshop", max_marks=5, weightage=5, end_date=0)
ict2103_project = Assessment(
name="Project", max_marks=100, weightage=40, end_date=0)
ict2103_project_nosql = Subcomponent(
name="NoSQL Component", max_marks=30, weightage=12, end_date=0)
ict2103_project_sql = Subcomponent(
name="SQL Component", max_marks=30, weightage=12, end_date=0)
ict2103_project_report = Subcomponent(
name="Report", max_marks=10, weightage=4, end_date=0
)
ict2103_project_presentation = Subcomponent(
name="Presentation", max_marks=10, weightage=12, end_date=0
)
ict2103.add_assessment(ict2103_project)
ict2103.add_assessment(ict2103_workshop)
ict2103.add_assessment(ict2103_quiz_1)
ict2103.save()
ict2103_project.save()
ict2103_workshop.save()
ict2103_quiz_1.save()
ict2103_project.add_subcomponent(ict2103_project_nosql)
ict2103_project.add_subcomponent(ict2103_project_sql)
ict2103_project.add_subcomponent(ict2103_project_presentation)
ict2103_project.save()
ict2103_workshop.save()
ict2103_quiz_1.save()
ict2103_project_nosql.save()
ict2103_project_sql.save()
ict2103_project_presentation.save()
""" ict2102 = Module(module_code="ICT2102",
module_name="Human Computer Interaction")
ict2x01 = Module(module_code="ICT2X01",
module_name="Introduction to Software Engineering")
ict2901 = Module(module_code="ICT2901",
module_name="Professional and Career Development 1")
ict2104 = Module(module_code="ICT2104",
module_name="Embedded Systems Programming")
""" | backend/api/endpoints/DebuggerAPI.py | from fastapi import APIRouter
from .library.component import *
from .library.feedback import *
from .library.module import *
from .library.utils import *
from .library.marks import *
from .library.user import *
router = APIRouter()
if (not dynamodb_select({"module_code": "ICT2103"}, "ict2x01_module")):
ict2103 = Module(module_code="ICT2103",
module_name="Information Management")
ict2103_quiz_1 = Assessment(
name="Quiz 1", max_marks=100, weightage=20, end_date=0)
ict2103_workshop = Assessment(
name="NoSQL Workshop", max_marks=5, weightage=5, end_date=0)
ict2103_project = Assessment(
name="Project", max_marks=100, weightage=40, end_date=0)
ict2103_project_nosql = Subcomponent(
name="NoSQL Component", max_marks=30, weightage=12, end_date=0)
ict2103_project_sql = Subcomponent(
name="SQL Component", max_marks=30, weightage=12, end_date=0)
ict2103_project_report = Subcomponent(
name="Report", max_marks=10, weightage=4, end_date=0
)
ict2103_project_presentation = Subcomponent(
name="Presentation", max_marks=10, weightage=12, end_date=0
)
ict2103.add_assessment(ict2103_project)
ict2103.add_assessment(ict2103_workshop)
ict2103.add_assessment(ict2103_quiz_1)
ict2103.save()
ict2103_project.save()
ict2103_workshop.save()
ict2103_quiz_1.save()
ict2103_project.add_subcomponent(ict2103_project_nosql)
ict2103_project.add_subcomponent(ict2103_project_sql)
ict2103_project.add_subcomponent(ict2103_project_presentation)
ict2103_project.save()
ict2103_workshop.save()
ict2103_quiz_1.save()
ict2103_project_nosql.save()
ict2103_project_sql.save()
ict2103_project_presentation.save()
""" ict2102 = Module(module_code="ICT2102",
module_name="Human Computer Interaction")
ict2x01 = Module(module_code="ICT2X01",
module_name="Introduction to Software Engineering")
ict2901 = Module(module_code="ICT2901",
module_name="Professional and Career Development 1")
ict2104 = Module(module_code="ICT2104",
module_name="Embedded Systems Programming")
""" | 0.306527 | 0.100879 |
import buildVersion
import re
from logHandler import log
"""
This module contains add-on API version information for this build of NVDA. This file provides information on
how the API has changed as well as the range of API versions supported by this build of NVDA
"""
CURRENT = (buildVersion.version_year, buildVersion.version_major, buildVersion.version_minor)
BACK_COMPAT_TO = (0, 0, 0)
"""
As BACK_COMPAT_TO is incremented, the changed / removed parts / or reasoning should be added below.
EG: (x, y, z): Large changes to speech.py
---
(0, 0, 0): API version zero, used to signify addons released prior to API version checks.
"""
#: Compiled regular expression to match an addon API version string.
#: Supports year.major.minor versions (e.g. 2018.1.1).
# Although year and major are mandatory, minor is optional.
#: Resulting match objects expose three groups reflecting release year, release major, and release minor version,
# respectively.
# As minor is optional, the final group in the resulting match object may be None if minor is not provided in the original string. In this case it should be treated as being 0.
#: @type: RegexObject
ADDON_API_VERSION_REGEX = re.compile(r"^(0|\d{4})\.(\d)(?:\.(\d))?$")
def getAPIVersionTupleFromString(version):
"""Converts a string containing an NVDA version to a tuple of the form (versionYear, versionMajor, versionMinor)"""
match = ADDON_API_VERSION_REGEX.match(version)
if not match:
raise ValueError(version)
return tuple(int(i) if i is not None else 0 for i in match.groups())
def formatForGUI(versionTuple):
"""Converts a version tuple to a string for displaying in the GUI
Examples:
- (2018, 1, 1) becomes "2018.1.1"
- (2018, 1, 0) becomes "2018.1"
- (0, 0, 0) becomes "0.0"
"""
try:
year, major, minor = versionTuple
return buildVersion.formatVersionForGUI(year, major, minor)
except (
ValueError, # Too few/many values to unpack
TypeError # versionTuple is None or some other incorrect type
):
# This path should never be hit. But the appearance of "unknown" in the GUI is a better outcome
# than an exception and unusable dialog.
# Translators: shown when an addon API version string is unknown
default = _("unknown")
log.error("Unable to format versionTuple: {}".format(repr(versionTuple)), exc_info=True)
return default | source/addonAPIVersion.py |
import buildVersion
import re
from logHandler import log
"""
This module contains add-on API version information for this build of NVDA. This file provides information on
how the API has changed as well as the range of API versions supported by this build of NVDA
"""
CURRENT = (buildVersion.version_year, buildVersion.version_major, buildVersion.version_minor)
BACK_COMPAT_TO = (0, 0, 0)
"""
As BACK_COMPAT_TO is incremented, the changed / removed parts / or reasoning should be added below.
EG: (x, y, z): Large changes to speech.py
---
(0, 0, 0): API version zero, used to signify addons released prior to API version checks.
"""
#: Compiled regular expression to match an addon API version string.
#: Supports year.major.minor versions (e.g. 2018.1.1).
# Although year and major are mandatory, minor is optional.
#: Resulting match objects expose three groups reflecting release year, release major, and release minor version,
# respectively.
# As minor is optional, the final group in the resulting match object may be None if minor is not provided in the original string. In this case it should be treated as being 0.
#: @type: RegexObject
ADDON_API_VERSION_REGEX = re.compile(r"^(0|\d{4})\.(\d)(?:\.(\d))?$")
def getAPIVersionTupleFromString(version):
"""Converts a string containing an NVDA version to a tuple of the form (versionYear, versionMajor, versionMinor)"""
match = ADDON_API_VERSION_REGEX.match(version)
if not match:
raise ValueError(version)
return tuple(int(i) if i is not None else 0 for i in match.groups())
def formatForGUI(versionTuple):
"""Converts a version tuple to a string for displaying in the GUI
Examples:
- (2018, 1, 1) becomes "2018.1.1"
- (2018, 1, 0) becomes "2018.1"
- (0, 0, 0) becomes "0.0"
"""
try:
year, major, minor = versionTuple
return buildVersion.formatVersionForGUI(year, major, minor)
except (
ValueError, # Too few/many values to unpack
TypeError # versionTuple is None or some other incorrect type
):
# This path should never be hit. But the appearance of "unknown" in the GUI is a better outcome
# than an exception and unusable dialog.
# Translators: shown when an addon API version string is unknown
default = _("unknown")
log.error("Unable to format versionTuple: {}".format(repr(versionTuple)), exc_info=True)
return default | 0.439507 | 0.210705 |
from datetime import datetime, timedelta
from huey import crontab
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import exists
from app import create_app
from app.models.session import Session
from app.models.period import Period
from app.models.event import Event
from app.models.lesson import Lesson
from app.utils.tasks import huey
@huey.task()
def remove_session(session_id):
"""Remove session
"""
app, db = create_app(None, minimal=True)
with app.app_context():
try:
session: Session = db.session.query(session_id) \
.filter(Session.id == session_id) \
.one()
if session.is_expired():
db.session.delete(session)
else:
expiration_date = session.last_use + timedelta(days=60)
remove_session.schedule(args=(session_id,), eta=expiration_date)
except NoResultFound:
print(f'Session {session_id} not found.')
@huey.periodic_task(crontab(day_of_week='1'))
def create_events():
app, db = create_app(None, minimal=True)
with app.app_context():
lessons = db.session.query(Lesson).all()
for lesson in lessons:
create_event_for_lesson(db, lesson)
@huey.task()
def create_events_now(lesson_id):
app, db = create_app(None, minimal=True)
with app.app_context():
lesson: Lesson = db.session.query(Lesson) \
.filter(Lesson.id == lesson_id).one()
create_event_for_lesson(db, lesson)
def next_weekday(date, weekday: int):
days_ahead = weekday - date.weekday()
if days_ahead <= 0:
days_ahead += 7
return (date + timedelta(days=days_ahead))
def create_event_for_lesson(db, lesson):
events = db.session.query(Event) \
.filter((Event.lesson_id == lesson.id) &
(Event.date > datetime.now().date())) \
.order_by(Event.date) \
.all()
last_date = datetime.now().date() if len(events) == 0 else events[-1].date
for i in range(len(events), 4):
next_date = next_weekday(last_date, lesson.weekday.value)
"""current_period = db.session.query(Period) \
.filter((Period.school_id == lesson.school_id) &
(Period.begin_date < next_date) &
(Period.due_date > next_date)) \
.one()"""
_event = Event()
_event.date = next_date
_event.school_id = lesson.school_id
_event.lesson_id = lesson.id
_event.period_id = 1
db.session.add(_event)
last_date = next_date
db.session.commit() | app/utils/tasks/tasks.py | from datetime import datetime, timedelta
from huey import crontab
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import exists
from app import create_app
from app.models.session import Session
from app.models.period import Period
from app.models.event import Event
from app.models.lesson import Lesson
from app.utils.tasks import huey
@huey.task()
def remove_session(session_id):
"""Remove session
"""
app, db = create_app(None, minimal=True)
with app.app_context():
try:
session: Session = db.session.query(session_id) \
.filter(Session.id == session_id) \
.one()
if session.is_expired():
db.session.delete(session)
else:
expiration_date = session.last_use + timedelta(days=60)
remove_session.schedule(args=(session_id,), eta=expiration_date)
except NoResultFound:
print(f'Session {session_id} not found.')
@huey.periodic_task(crontab(day_of_week='1'))
def create_events():
app, db = create_app(None, minimal=True)
with app.app_context():
lessons = db.session.query(Lesson).all()
for lesson in lessons:
create_event_for_lesson(db, lesson)
@huey.task()
def create_events_now(lesson_id):
app, db = create_app(None, minimal=True)
with app.app_context():
lesson: Lesson = db.session.query(Lesson) \
.filter(Lesson.id == lesson_id).one()
create_event_for_lesson(db, lesson)
def next_weekday(date, weekday: int):
days_ahead = weekday - date.weekday()
if days_ahead <= 0:
days_ahead += 7
return (date + timedelta(days=days_ahead))
def create_event_for_lesson(db, lesson):
events = db.session.query(Event) \
.filter((Event.lesson_id == lesson.id) &
(Event.date > datetime.now().date())) \
.order_by(Event.date) \
.all()
last_date = datetime.now().date() if len(events) == 0 else events[-1].date
for i in range(len(events), 4):
next_date = next_weekday(last_date, lesson.weekday.value)
"""current_period = db.session.query(Period) \
.filter((Period.school_id == lesson.school_id) &
(Period.begin_date < next_date) &
(Period.due_date > next_date)) \
.one()"""
_event = Event()
_event.date = next_date
_event.school_id = lesson.school_id
_event.lesson_id = lesson.id
_event.period_id = 1
db.session.add(_event)
last_date = next_date
db.session.commit() | 0.451568 | 0.099645 |
import pprint
import re # noqa: F401
import six
class HandledError(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error_id': 'string',
'timestamp': 'string',
'device_name': 'string',
'os_version': 'string',
'os_type': 'string',
'country': 'string',
'language': 'string',
'user_id': 'string'
}
attribute_map = {
'error_id': 'error_id',
'timestamp': 'timestamp',
'device_name': 'device_name',
'os_version': 'os_version',
'os_type': 'os_type',
'country': 'country',
'language': 'language',
'user_id': 'user_id'
}
def __init__(self, error_id=None, timestamp=None, device_name=None, os_version=None, os_type=None, country=None, language=None, user_id=None): # noqa: E501
"""HandledError - a model defined in Swagger""" # noqa: E501
self._error_id = None
self._timestamp = None
self._device_name = None
self._os_version = None
self._os_type = None
self._country = None
self._language = None
self._user_id = None
self.discriminator = None
if error_id is not None:
self.error_id = error_id
if timestamp is not None:
self.timestamp = timestamp
if device_name is not None:
self.device_name = device_name
if os_version is not None:
self.os_version = os_version
if os_type is not None:
self.os_type = os_type
if country is not None:
self.country = country
if language is not None:
self.language = language
if user_id is not None:
self.user_id = user_id
@property
def error_id(self):
"""Gets the error_id of this HandledError. # noqa: E501
:return: The error_id of this HandledError. # noqa: E501
:rtype: string
"""
return self._error_id
@error_id.setter
def error_id(self, error_id):
"""Sets the error_id of this HandledError.
:param error_id: The error_id of this HandledError. # noqa: E501
:type: string
"""
self._error_id = error_id
@property
def timestamp(self):
"""Gets the timestamp of this HandledError. # noqa: E501
:return: The timestamp of this HandledError. # noqa: E501
:rtype: string
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""Sets the timestamp of this HandledError.
:param timestamp: The timestamp of this HandledError. # noqa: E501
:type: string
"""
self._timestamp = timestamp
@property
def device_name(self):
"""Gets the device_name of this HandledError. # noqa: E501
:return: The device_name of this HandledError. # noqa: E501
:rtype: string
"""
return self._device_name
@device_name.setter
def device_name(self, device_name):
"""Sets the device_name of this HandledError.
:param device_name: The device_name of this HandledError. # noqa: E501
:type: string
"""
self._device_name = device_name
@property
def os_version(self):
"""Gets the os_version of this HandledError. # noqa: E501
:return: The os_version of this HandledError. # noqa: E501
:rtype: string
"""
return self._os_version
@os_version.setter
def os_version(self, os_version):
"""Sets the os_version of this HandledError.
:param os_version: The os_version of this HandledError. # noqa: E501
:type: string
"""
self._os_version = os_version
@property
def os_type(self):
"""Gets the os_type of this HandledError. # noqa: E501
:return: The os_type of this HandledError. # noqa: E501
:rtype: string
"""
return self._os_type
@os_type.setter
def os_type(self, os_type):
"""Sets the os_type of this HandledError.
:param os_type: The os_type of this HandledError. # noqa: E501
:type: string
"""
self._os_type = os_type
@property
def country(self):
"""Gets the country of this HandledError. # noqa: E501
:return: The country of this HandledError. # noqa: E501
:rtype: string
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this HandledError.
:param country: The country of this HandledError. # noqa: E501
:type: string
"""
self._country = country
@property
def language(self):
"""Gets the language of this HandledError. # noqa: E501
:return: The language of this HandledError. # noqa: E501
:rtype: string
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this HandledError.
:param language: The language of this HandledError. # noqa: E501
:type: string
"""
self._language = language
@property
def user_id(self):
"""Gets the user_id of this HandledError. # noqa: E501
:return: The user_id of this HandledError. # noqa: E501
:rtype: string
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this HandledError.
:param user_id: The user_id of this HandledError. # noqa: E501
:type: string
"""
self._user_id = user_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HandledError):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | sdks/python/appcenter_sdk/models/HandledError.py | import pprint
import re # noqa: F401
import six
class HandledError(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error_id': 'string',
'timestamp': 'string',
'device_name': 'string',
'os_version': 'string',
'os_type': 'string',
'country': 'string',
'language': 'string',
'user_id': 'string'
}
attribute_map = {
'error_id': 'error_id',
'timestamp': 'timestamp',
'device_name': 'device_name',
'os_version': 'os_version',
'os_type': 'os_type',
'country': 'country',
'language': 'language',
'user_id': 'user_id'
}
def __init__(self, error_id=None, timestamp=None, device_name=None, os_version=None, os_type=None, country=None, language=None, user_id=None): # noqa: E501
"""HandledError - a model defined in Swagger""" # noqa: E501
self._error_id = None
self._timestamp = None
self._device_name = None
self._os_version = None
self._os_type = None
self._country = None
self._language = None
self._user_id = None
self.discriminator = None
if error_id is not None:
self.error_id = error_id
if timestamp is not None:
self.timestamp = timestamp
if device_name is not None:
self.device_name = device_name
if os_version is not None:
self.os_version = os_version
if os_type is not None:
self.os_type = os_type
if country is not None:
self.country = country
if language is not None:
self.language = language
if user_id is not None:
self.user_id = user_id
@property
def error_id(self):
"""Gets the error_id of this HandledError. # noqa: E501
:return: The error_id of this HandledError. # noqa: E501
:rtype: string
"""
return self._error_id
@error_id.setter
def error_id(self, error_id):
"""Sets the error_id of this HandledError.
:param error_id: The error_id of this HandledError. # noqa: E501
:type: string
"""
self._error_id = error_id
@property
def timestamp(self):
"""Gets the timestamp of this HandledError. # noqa: E501
:return: The timestamp of this HandledError. # noqa: E501
:rtype: string
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""Sets the timestamp of this HandledError.
:param timestamp: The timestamp of this HandledError. # noqa: E501
:type: string
"""
self._timestamp = timestamp
@property
def device_name(self):
"""Gets the device_name of this HandledError. # noqa: E501
:return: The device_name of this HandledError. # noqa: E501
:rtype: string
"""
return self._device_name
@device_name.setter
def device_name(self, device_name):
"""Sets the device_name of this HandledError.
:param device_name: The device_name of this HandledError. # noqa: E501
:type: string
"""
self._device_name = device_name
@property
def os_version(self):
"""Gets the os_version of this HandledError. # noqa: E501
:return: The os_version of this HandledError. # noqa: E501
:rtype: string
"""
return self._os_version
@os_version.setter
def os_version(self, os_version):
"""Sets the os_version of this HandledError.
:param os_version: The os_version of this HandledError. # noqa: E501
:type: string
"""
self._os_version = os_version
@property
def os_type(self):
"""Gets the os_type of this HandledError. # noqa: E501
:return: The os_type of this HandledError. # noqa: E501
:rtype: string
"""
return self._os_type
@os_type.setter
def os_type(self, os_type):
"""Sets the os_type of this HandledError.
:param os_type: The os_type of this HandledError. # noqa: E501
:type: string
"""
self._os_type = os_type
@property
def country(self):
"""Gets the country of this HandledError. # noqa: E501
:return: The country of this HandledError. # noqa: E501
:rtype: string
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this HandledError.
:param country: The country of this HandledError. # noqa: E501
:type: string
"""
self._country = country
@property
def language(self):
"""Gets the language of this HandledError. # noqa: E501
:return: The language of this HandledError. # noqa: E501
:rtype: string
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this HandledError.
:param language: The language of this HandledError. # noqa: E501
:type: string
"""
self._language = language
@property
def user_id(self):
"""Gets the user_id of this HandledError. # noqa: E501
:return: The user_id of this HandledError. # noqa: E501
:rtype: string
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this HandledError.
:param user_id: The user_id of this HandledError. # noqa: E501
:type: string
"""
self._user_id = user_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HandledError):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | 0.639736 | 0.070336 |
from typing import List, Optional
import attr
from genyrator.entities.Entity import Entity
from genyrator.entities.File import create_files_from_template_config, FileList
from genyrator.template_config import create_template_config, TemplateConfig
@attr.s
class Schema(object):
module_name: str = attr.ib()
entities: List[Entity] = attr.ib()
templates: TemplateConfig = attr.ib()
files: FileList = attr.ib()
api_name: str = attr.ib()
api_description: str = attr.ib()
def write_files(self) -> None:
for file_list in self.files:
[f.write() for f in file_list]
def write_resources(self):
for resource in self.files.resources:
resource.write()
def write_db_init(self):
for model in self.files.db_models:
model.write()
def write_db_models(self):
for model in self.files.db_models:
model.write()
def write_fixtures(self):
for model in self.files.fixtures:
model.write()
def write_core_files(self):
for core_file in self.files.core:
core_file.write()
def write_domain_models(self):
for domain_model in self.files.domain_models:
domain_model.write()
def create_schema(
module_name: str,
entities: List[Entity],
db_import_path: Optional[str] = None,
api_name: Optional[str] = None,
api_description: Optional[str] = None,
file_path: Optional[List[str]] = None,
) -> Schema:
db_import_path = db_import_path if db_import_path else '{}.sqlalchemy'.format(module_name)
file_path = file_path if file_path else [module_name]
api_name = api_name if api_name else module_name
api_description = api_description if api_description else ''
template_config = create_template_config(
module_name=module_name,
db_import_path=db_import_path,
entities=entities,
api_name=api_name,
api_description=api_description,
)
file_list = create_files_from_template_config(file_path, template_config)
return Schema(
module_name=module_name,
entities=entities,
templates=template_config,
files=file_list,
api_name=api_name,
api_description=api_description,
) | genyrator/entities/Schema.py | from typing import List, Optional
import attr
from genyrator.entities.Entity import Entity
from genyrator.entities.File import create_files_from_template_config, FileList
from genyrator.template_config import create_template_config, TemplateConfig
@attr.s
class Schema(object):
module_name: str = attr.ib()
entities: List[Entity] = attr.ib()
templates: TemplateConfig = attr.ib()
files: FileList = attr.ib()
api_name: str = attr.ib()
api_description: str = attr.ib()
def write_files(self) -> None:
for file_list in self.files:
[f.write() for f in file_list]
def write_resources(self):
for resource in self.files.resources:
resource.write()
def write_db_init(self):
for model in self.files.db_models:
model.write()
def write_db_models(self):
for model in self.files.db_models:
model.write()
def write_fixtures(self):
for model in self.files.fixtures:
model.write()
def write_core_files(self):
for core_file in self.files.core:
core_file.write()
def write_domain_models(self):
for domain_model in self.files.domain_models:
domain_model.write()
def create_schema(
module_name: str,
entities: List[Entity],
db_import_path: Optional[str] = None,
api_name: Optional[str] = None,
api_description: Optional[str] = None,
file_path: Optional[List[str]] = None,
) -> Schema:
db_import_path = db_import_path if db_import_path else '{}.sqlalchemy'.format(module_name)
file_path = file_path if file_path else [module_name]
api_name = api_name if api_name else module_name
api_description = api_description if api_description else ''
template_config = create_template_config(
module_name=module_name,
db_import_path=db_import_path,
entities=entities,
api_name=api_name,
api_description=api_description,
)
file_list = create_files_from_template_config(file_path, template_config)
return Schema(
module_name=module_name,
entities=entities,
templates=template_config,
files=file_list,
api_name=api_name,
api_description=api_description,
) | 0.615088 | 0.134719 |
from datetime import datetime, timedelta
from functools import partial
import voluptuous as vol
from homeassistant.components import sensor
from homeassistant.const import (
ATTR_DEVICE_CLASS,
CONF_AT,
CONF_ENTITY_ID,
CONF_OFFSET,
CONF_PLATFORM,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import HassJob, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import (
async_track_point_in_time,
async_track_state_change_event,
async_track_time_change,
)
import homeassistant.util.dt as dt_util
# mypy: allow-untyped-defs, no-check-untyped-defs
_TIME_TRIGGER_ENTITY_REFERENCE = vol.All(
str, cv.entity_domain(["input_datetime", "sensor"])
)
_TIME_TRIGGER_WITH_OFFSET_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): _TIME_TRIGGER_ENTITY_REFERENCE,
vol.Required(CONF_OFFSET): cv.time_period,
}
)
_TIME_TRIGGER_SCHEMA = vol.Any(
cv.time,
_TIME_TRIGGER_ENTITY_REFERENCE,
_TIME_TRIGGER_WITH_OFFSET_SCHEMA,
msg="Expected HH:MM, HH:MM:SS or Entity ID with domain 'input_datetime' or 'sensor'",
)
TRIGGER_SCHEMA = cv.TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): "time",
vol.Required(CONF_AT): vol.All(cv.ensure_list, [_TIME_TRIGGER_SCHEMA]),
}
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
trigger_data = automation_info["trigger_data"]
entities = {}
removes = []
job = HassJob(action)
offsets = {}
@callback
def time_automation_listener(description, now, *, entity_id=None):
"""Listen for time changes and calls action."""
hass.async_run_hass_job(
job,
{
"trigger": {
**trigger_data,
"platform": "time",
"now": now,
"description": description,
"entity_id": entity_id,
}
},
)
@callback
def update_entity_trigger_event(event):
"""update_entity_trigger from the event."""
return update_entity_trigger(event.data["entity_id"], event.data["new_state"])
@callback
def update_entity_trigger(entity_id, new_state=None):
"""Update the entity trigger for the entity_id."""
# If a listener was already set up for entity, remove it.
remove = entities.pop(entity_id, None)
if remove:
remove()
remove = None
if not new_state:
return
offset = offsets[entity_id] if entity_id in offsets else timedelta(0)
# Check state of entity. If valid, set up a listener.
if new_state.domain == "input_datetime":
if has_date := new_state.attributes["has_date"]:
year = new_state.attributes["year"]
month = new_state.attributes["month"]
day = new_state.attributes["day"]
if has_time := new_state.attributes["has_time"]:
hour = new_state.attributes["hour"]
minute = new_state.attributes["minute"]
second = new_state.attributes["second"]
else:
# If no time then use midnight.
hour = minute = second = 0
if has_date:
# If input_datetime has date, then track point in time.
trigger_dt = (
datetime(
year,
month,
day,
hour,
minute,
second,
tzinfo=dt_util.DEFAULT_TIME_ZONE,
)
+ offset
)
# Only set up listener if time is now or in the future.
if trigger_dt >= dt_util.now():
remove = async_track_point_in_time(
hass,
partial(
time_automation_listener,
f"time set in {entity_id}",
entity_id=entity_id,
),
trigger_dt,
)
elif has_time:
# Else if it has time, then track time change.
remove = async_track_time_change(
hass,
partial(
time_automation_listener,
f"time set in {entity_id}",
entity_id=entity_id,
),
hour=hour,
minute=minute,
second=second,
)
elif (
new_state.domain == "sensor"
and new_state.attributes.get(ATTR_DEVICE_CLASS)
== sensor.DEVICE_CLASS_TIMESTAMP
and new_state.state not in (STATE_UNAVAILABLE, STATE_UNKNOWN)
):
trigger_dt = dt_util.parse_datetime(new_state.state) + offset
if trigger_dt is not None and trigger_dt > dt_util.utcnow():
remove = async_track_point_in_time(
hass,
partial(
time_automation_listener,
f"time set in {entity_id}",
entity_id=entity_id,
),
trigger_dt,
)
# Was a listener set up?
if remove:
entities[entity_id] = remove
to_track = []
for at_time in config[CONF_AT]:
if isinstance(at_time, str):
# entity
to_track.append(at_time)
update_entity_trigger(at_time, new_state=hass.states.get(at_time))
elif isinstance(at_time, dict) and CONF_OFFSET in at_time:
# entity with offset
entity_id = at_time.get(CONF_ENTITY_ID)
to_track.append(entity_id)
offsets[entity_id] = at_time.get(CONF_OFFSET)
update_entity_trigger(
entity_id,
new_state=hass.states.get(entity_id),
)
else:
# datetime.time
removes.append(
async_track_time_change(
hass,
partial(time_automation_listener, "time"),
hour=at_time.hour,
minute=at_time.minute,
second=at_time.second,
)
)
# Track state changes of any entities.
removes.append(
async_track_state_change_event(hass, to_track, update_entity_trigger_event)
)
@callback
def remove_track_time_changes():
"""Remove tracked time changes."""
for remove in entities.values():
remove()
for remove in removes:
remove()
return remove_track_time_changes | homeassistant/components/homeassistant/triggers/time.py | from datetime import datetime, timedelta
from functools import partial
import voluptuous as vol
from homeassistant.components import sensor
from homeassistant.const import (
ATTR_DEVICE_CLASS,
CONF_AT,
CONF_ENTITY_ID,
CONF_OFFSET,
CONF_PLATFORM,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import HassJob, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import (
async_track_point_in_time,
async_track_state_change_event,
async_track_time_change,
)
import homeassistant.util.dt as dt_util
# mypy: allow-untyped-defs, no-check-untyped-defs
_TIME_TRIGGER_ENTITY_REFERENCE = vol.All(
str, cv.entity_domain(["input_datetime", "sensor"])
)
_TIME_TRIGGER_WITH_OFFSET_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): _TIME_TRIGGER_ENTITY_REFERENCE,
vol.Required(CONF_OFFSET): cv.time_period,
}
)
_TIME_TRIGGER_SCHEMA = vol.Any(
cv.time,
_TIME_TRIGGER_ENTITY_REFERENCE,
_TIME_TRIGGER_WITH_OFFSET_SCHEMA,
msg="Expected HH:MM, HH:MM:SS or Entity ID with domain 'input_datetime' or 'sensor'",
)
TRIGGER_SCHEMA = cv.TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): "time",
vol.Required(CONF_AT): vol.All(cv.ensure_list, [_TIME_TRIGGER_SCHEMA]),
}
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
trigger_data = automation_info["trigger_data"]
entities = {}
removes = []
job = HassJob(action)
offsets = {}
@callback
def time_automation_listener(description, now, *, entity_id=None):
"""Listen for time changes and calls action."""
hass.async_run_hass_job(
job,
{
"trigger": {
**trigger_data,
"platform": "time",
"now": now,
"description": description,
"entity_id": entity_id,
}
},
)
@callback
def update_entity_trigger_event(event):
"""update_entity_trigger from the event."""
return update_entity_trigger(event.data["entity_id"], event.data["new_state"])
@callback
def update_entity_trigger(entity_id, new_state=None):
"""Update the entity trigger for the entity_id."""
# If a listener was already set up for entity, remove it.
remove = entities.pop(entity_id, None)
if remove:
remove()
remove = None
if not new_state:
return
offset = offsets[entity_id] if entity_id in offsets else timedelta(0)
# Check state of entity. If valid, set up a listener.
if new_state.domain == "input_datetime":
if has_date := new_state.attributes["has_date"]:
year = new_state.attributes["year"]
month = new_state.attributes["month"]
day = new_state.attributes["day"]
if has_time := new_state.attributes["has_time"]:
hour = new_state.attributes["hour"]
minute = new_state.attributes["minute"]
second = new_state.attributes["second"]
else:
# If no time then use midnight.
hour = minute = second = 0
if has_date:
# If input_datetime has date, then track point in time.
trigger_dt = (
datetime(
year,
month,
day,
hour,
minute,
second,
tzinfo=dt_util.DEFAULT_TIME_ZONE,
)
+ offset
)
# Only set up listener if time is now or in the future.
if trigger_dt >= dt_util.now():
remove = async_track_point_in_time(
hass,
partial(
time_automation_listener,
f"time set in {entity_id}",
entity_id=entity_id,
),
trigger_dt,
)
elif has_time:
# Else if it has time, then track time change.
remove = async_track_time_change(
hass,
partial(
time_automation_listener,
f"time set in {entity_id}",
entity_id=entity_id,
),
hour=hour,
minute=minute,
second=second,
)
elif (
new_state.domain == "sensor"
and new_state.attributes.get(ATTR_DEVICE_CLASS)
== sensor.DEVICE_CLASS_TIMESTAMP
and new_state.state not in (STATE_UNAVAILABLE, STATE_UNKNOWN)
):
trigger_dt = dt_util.parse_datetime(new_state.state) + offset
if trigger_dt is not None and trigger_dt > dt_util.utcnow():
remove = async_track_point_in_time(
hass,
partial(
time_automation_listener,
f"time set in {entity_id}",
entity_id=entity_id,
),
trigger_dt,
)
# Was a listener set up?
if remove:
entities[entity_id] = remove
to_track = []
for at_time in config[CONF_AT]:
if isinstance(at_time, str):
# entity
to_track.append(at_time)
update_entity_trigger(at_time, new_state=hass.states.get(at_time))
elif isinstance(at_time, dict) and CONF_OFFSET in at_time:
# entity with offset
entity_id = at_time.get(CONF_ENTITY_ID)
to_track.append(entity_id)
offsets[entity_id] = at_time.get(CONF_OFFSET)
update_entity_trigger(
entity_id,
new_state=hass.states.get(entity_id),
)
else:
# datetime.time
removes.append(
async_track_time_change(
hass,
partial(time_automation_listener, "time"),
hour=at_time.hour,
minute=at_time.minute,
second=at_time.second,
)
)
# Track state changes of any entities.
removes.append(
async_track_state_change_event(hass, to_track, update_entity_trigger_event)
)
@callback
def remove_track_time_changes():
"""Remove tracked time changes."""
for remove in entities.values():
remove()
for remove in removes:
remove()
return remove_track_time_changes | 0.632957 | 0.150278 |
from django.db import models
from django.contrib.auth.models import User, Group
from django.utils.translation import ugettext as _
class Room(models.Model):
"""
Représente une chambre de Maiz.
Accessoirement, il n'y a qu'une chambre par utilisateur, donc
pour les chambres du style Cyclament où on met plusieurs
résidents, il faut créer plusieurs chambres.
"""
number = models.CharField(max_length = 20, name = _("room number"), help_text = _("Number of the room, written on the door."))
ticket = models.CharField(max_length = 14, name = _("ticket"), help_text = _("A unique identifier that was given to you with your key."), unique = True, blank = True, null = True)
def __unicode__(self):
return self.number
def _gen_rand_ticket(self):
from random import randrange
part1 = randrange(0, 9999)
from time import time
part2 = (time() * 10000) % 10000
part3 = ((int(time() * 100) % 100) * randrange(0, 99) + randrange(0, 4200)) % 10000
return "%04d-%04d-%04d" % (part1, part2, part3)
def get_new_ticket(self, commit=True):
"""
Génère un nouveau ticket, l'enregistre, et le retourne.
Le ticket est enregistré uniquement si *commit* vaut
*True*. Sinon il faut appeller *save()* à la main.
"""
while True:
t = self._gen_rand_ticket()
if Room.objects.filter(ticket=t).count() == 0:
break
self.ticket = t
if commit:
self.save()
return t
class Presence(models.Model):
"""
Représente la présence d'un utilisateur à Maiz. C'est ce qui
fait office de profil utilisateur au sens de Django. La présence
lie un utilisateur à une chambre et des cartes réseau.
"""
user = models.ForeignKey(User, unique = True)
room = models.ForeignKey(Room, unique = True, blank = True, null = True)
netif = models.TextField(name = _("network interface"), help_text = ("The MAC adress(es) of your network card. If unsure, keep the pre-filled value"))
talkings = models.BooleanField(default=True)
def __unicode__(self):
if self.room != None:
return _('Room %(room)s') % {'room': unicode(self.room)}
else:
return _('(No room assigned)')
class Promo(Group):
"""
Une promo. Fondamentalement, c'est exactement pareil qu'un
groupe, et d'ailleurs quand une promo est créée, un groupe est
créé en conséquence. Mais on demande aux utilisateurs de choisir
une promo, pas un groupe.
"""
class Meta:
ordering = ['name']
# Hacks
# Ça sert à avoir une authentification insensible à la casse. On ne pose
# pas de questions, merci.
_tmp = User.objects.get
def hack_user_get(*args, **kwargs):
"""
Ce hack permet de rendre la recherche d'utilisateurs insensible
à la casse. Particulièrement utile pour l'authentification des
utilisateurs avec des moteurs SQL sensibles à la casse, comme
sqlite par exemple.
"""
if 'username' in kwargs:
kwargs['username__iexact'] = kwargs.pop('username')
return _tmp(*args, **kwargs)
User.objects.get = hack_user_get
# Là ça sert à avoir des mots de passe stockés en quasi-cleartext...
# C'est pas une bonne idée je sais, un jour faudra faire mieux, mais là
# c'est la moins pire qu'on ait pour pouvoir brancher sur la base des
# services inconnus dont on ne sait pas s'ils vont gérer ce hash là en
# particulier ou pas.
from django.utils.encoding import smart_str
import django.contrib.auth.models
# Encodage d'une chaîne en hexadeciman
def _hexstr(s):
return "".join([hex(ord(x))[2:] for x in smart_str(s)]).upper()
# Remplacement pour la méthode de hashage
def _hack_set_password(self, raw_password):
self.password = '<PASSWORD>' % _hexstr(raw_password)
User.set_password = _hack_set_password
# Remplacement pour la méthode de vérification du hash
_orig_get_hexdigest = django.contrib.auth.models.get_hexdigest
def _hack_get_hexdigest(algorithm, salt, raw_password):
if algorithm == 'maiz':
return _hexstr(raw_password)
else:
return _orig_get_hexdigest(algorithm, salt, raw_password)
django.contrib.auth.models.get_hexdigest = _hack_get_hexdigest | register/models.py | from django.db import models
from django.contrib.auth.models import User, Group
from django.utils.translation import ugettext as _
class Room(models.Model):
"""
Représente une chambre de Maiz.
Accessoirement, il n'y a qu'une chambre par utilisateur, donc
pour les chambres du style Cyclament où on met plusieurs
résidents, il faut créer plusieurs chambres.
"""
number = models.CharField(max_length = 20, name = _("room number"), help_text = _("Number of the room, written on the door."))
ticket = models.CharField(max_length = 14, name = _("ticket"), help_text = _("A unique identifier that was given to you with your key."), unique = True, blank = True, null = True)
def __unicode__(self):
return self.number
def _gen_rand_ticket(self):
from random import randrange
part1 = randrange(0, 9999)
from time import time
part2 = (time() * 10000) % 10000
part3 = ((int(time() * 100) % 100) * randrange(0, 99) + randrange(0, 4200)) % 10000
return "%04d-%04d-%04d" % (part1, part2, part3)
def get_new_ticket(self, commit=True):
"""
Génère un nouveau ticket, l'enregistre, et le retourne.
Le ticket est enregistré uniquement si *commit* vaut
*True*. Sinon il faut appeller *save()* à la main.
"""
while True:
t = self._gen_rand_ticket()
if Room.objects.filter(ticket=t).count() == 0:
break
self.ticket = t
if commit:
self.save()
return t
class Presence(models.Model):
"""
Représente la présence d'un utilisateur à Maiz. C'est ce qui
fait office de profil utilisateur au sens de Django. La présence
lie un utilisateur à une chambre et des cartes réseau.
"""
user = models.ForeignKey(User, unique = True)
room = models.ForeignKey(Room, unique = True, blank = True, null = True)
netif = models.TextField(name = _("network interface"), help_text = ("The MAC adress(es) of your network card. If unsure, keep the pre-filled value"))
talkings = models.BooleanField(default=True)
def __unicode__(self):
if self.room != None:
return _('Room %(room)s') % {'room': unicode(self.room)}
else:
return _('(No room assigned)')
class Promo(Group):
"""
Une promo. Fondamentalement, c'est exactement pareil qu'un
groupe, et d'ailleurs quand une promo est créée, un groupe est
créé en conséquence. Mais on demande aux utilisateurs de choisir
une promo, pas un groupe.
"""
class Meta:
ordering = ['name']
# Hacks
# Ça sert à avoir une authentification insensible à la casse. On ne pose
# pas de questions, merci.
_tmp = User.objects.get
def hack_user_get(*args, **kwargs):
"""
Ce hack permet de rendre la recherche d'utilisateurs insensible
à la casse. Particulièrement utile pour l'authentification des
utilisateurs avec des moteurs SQL sensibles à la casse, comme
sqlite par exemple.
"""
if 'username' in kwargs:
kwargs['username__iexact'] = kwargs.pop('username')
return _tmp(*args, **kwargs)
User.objects.get = hack_user_get
# Là ça sert à avoir des mots de passe stockés en quasi-cleartext...
# C'est pas une bonne idée je sais, un jour faudra faire mieux, mais là
# c'est la moins pire qu'on ait pour pouvoir brancher sur la base des
# services inconnus dont on ne sait pas s'ils vont gérer ce hash là en
# particulier ou pas.
from django.utils.encoding import smart_str
import django.contrib.auth.models
# Encodage d'une chaîne en hexadeciman
def _hexstr(s):
return "".join([hex(ord(x))[2:] for x in smart_str(s)]).upper()
# Remplacement pour la méthode de hashage
def _hack_set_password(self, raw_password):
self.password = '<PASSWORD>' % _hexstr(raw_password)
User.set_password = _hack_set_password
# Remplacement pour la méthode de vérification du hash
_orig_get_hexdigest = django.contrib.auth.models.get_hexdigest
def _hack_get_hexdigest(algorithm, salt, raw_password):
if algorithm == 'maiz':
return _hexstr(raw_password)
else:
return _orig_get_hexdigest(algorithm, salt, raw_password)
django.contrib.auth.models.get_hexdigest = _hack_get_hexdigest | 0.347869 | 0.237443 |
from __future__ import absolute_import
import os
import os.path
import unittest
from ... import config
from ... import logging
from ...utils import registry
_TEST_CASES = {}
def make_test_case(test_kind, *args, **kwargs):
"""
Factory function for creating TestCase instances.
"""
if test_kind not in _TEST_CASES:
raise ValueError("Unknown test kind '%s'" % (test_kind))
return _TEST_CASES[test_kind](*args, **kwargs)
class TestCase(unittest.TestCase):
"""
A test case to execute.
"""
__metaclass__ = registry.make_registry_metaclass(_TEST_CASES)
REGISTERED_NAME = registry.LEAVE_UNREGISTERED
def __init__(self, logger, test_kind, test_name):
"""
Initializes the TestCase with the name of the test.
"""
unittest.TestCase.__init__(self, methodName="run_test")
if not isinstance(logger, logging.Logger):
raise TypeError("logger must be a Logger instance")
if not isinstance(test_kind, basestring):
raise TypeError("test_kind must be a string")
if not isinstance(test_name, basestring):
raise TypeError("test_name must be a string")
# When the TestCase is created by the TestSuiteExecutor (through a call to make_test_case())
# logger is an instance of TestQueueLogger. When the TestCase is created by a hook
# implementation it is an instance of BaseLogger.
self.logger = logger
self.test_kind = test_kind
self.test_name = test_name
self.fixture = None
self.return_code = None
self.is_configured = False
def long_name(self):
"""
Returns the path to the test, relative to the current working directory.
"""
return os.path.relpath(self.test_name)
def basename(self):
"""
Returns the basename of the test.
"""
return os.path.basename(self.test_name)
def short_name(self):
"""
Returns the basename of the test without the file extension.
"""
return os.path.splitext(self.basename())[0]
def id(self):
return self.test_name
def shortDescription(self):
return "%s %s" % (self.test_kind, self.test_name)
def configure(self, fixture, *args, **kwargs):
"""
Stores 'fixture' as an attribute for later use during execution.
"""
if self.is_configured:
raise RuntimeError("configure can only be called once")
self.is_configured = True
self.fixture = fixture
def run_test(self):
"""
Runs the specified test.
"""
raise NotImplementedError("run_test must be implemented by TestCase subclasses")
def as_command(self):
"""
Returns the command invocation used to run the test.
"""
return self._make_process().as_command()
def _execute(self, process):
"""
Runs the specified process.
"""
self.logger.info("Starting %s...\n%s", self.shortDescription(), process.as_command())
process.start()
self.logger.info("%s started with pid %s.", self.shortDescription(), process.pid)
self.return_code = process.wait()
if self.return_code != 0:
raise self.failureException("%s failed" % (self.shortDescription()))
self.logger.info("%s finished.", self.shortDescription())
def _make_process(self):
"""
Returns a new Process instance that could be used to run the
test or log the command.
"""
raise NotImplementedError("_make_process must be implemented by TestCase subclasses") | buildscripts/resmokelib/testing/testcases/interface.py | from __future__ import absolute_import
import os
import os.path
import unittest
from ... import config
from ... import logging
from ...utils import registry
_TEST_CASES = {}
def make_test_case(test_kind, *args, **kwargs):
"""
Factory function for creating TestCase instances.
"""
if test_kind not in _TEST_CASES:
raise ValueError("Unknown test kind '%s'" % (test_kind))
return _TEST_CASES[test_kind](*args, **kwargs)
class TestCase(unittest.TestCase):
"""
A test case to execute.
"""
__metaclass__ = registry.make_registry_metaclass(_TEST_CASES)
REGISTERED_NAME = registry.LEAVE_UNREGISTERED
def __init__(self, logger, test_kind, test_name):
"""
Initializes the TestCase with the name of the test.
"""
unittest.TestCase.__init__(self, methodName="run_test")
if not isinstance(logger, logging.Logger):
raise TypeError("logger must be a Logger instance")
if not isinstance(test_kind, basestring):
raise TypeError("test_kind must be a string")
if not isinstance(test_name, basestring):
raise TypeError("test_name must be a string")
# When the TestCase is created by the TestSuiteExecutor (through a call to make_test_case())
# logger is an instance of TestQueueLogger. When the TestCase is created by a hook
# implementation it is an instance of BaseLogger.
self.logger = logger
self.test_kind = test_kind
self.test_name = test_name
self.fixture = None
self.return_code = None
self.is_configured = False
def long_name(self):
"""
Returns the path to the test, relative to the current working directory.
"""
return os.path.relpath(self.test_name)
def basename(self):
"""
Returns the basename of the test.
"""
return os.path.basename(self.test_name)
def short_name(self):
"""
Returns the basename of the test without the file extension.
"""
return os.path.splitext(self.basename())[0]
def id(self):
return self.test_name
def shortDescription(self):
return "%s %s" % (self.test_kind, self.test_name)
def configure(self, fixture, *args, **kwargs):
"""
Stores 'fixture' as an attribute for later use during execution.
"""
if self.is_configured:
raise RuntimeError("configure can only be called once")
self.is_configured = True
self.fixture = fixture
def run_test(self):
"""
Runs the specified test.
"""
raise NotImplementedError("run_test must be implemented by TestCase subclasses")
def as_command(self):
"""
Returns the command invocation used to run the test.
"""
return self._make_process().as_command()
def _execute(self, process):
"""
Runs the specified process.
"""
self.logger.info("Starting %s...\n%s", self.shortDescription(), process.as_command())
process.start()
self.logger.info("%s started with pid %s.", self.shortDescription(), process.pid)
self.return_code = process.wait()
if self.return_code != 0:
raise self.failureException("%s failed" % (self.shortDescription()))
self.logger.info("%s finished.", self.shortDescription())
def _make_process(self):
"""
Returns a new Process instance that could be used to run the
test or log the command.
"""
raise NotImplementedError("_make_process must be implemented by TestCase subclasses") | 0.701202 | 0.344719 |
import PhysicsMixin
import ID
BODIES = """
<dict>
<key>body</key>
<dict>
<key>x</key>
<integer>%(x)s</integer>
<key>y</key>
<integer>%(y)s</integer>
<key>width</key>
<integer>%(width)s</integer>
<key>height</key>
<integer>%(height)s</integer>
<key>firstFrame</key>
<string>%(firstframe)s</string>
<key>sheet_id</key>
<integer>%(sheet)s</integer>
<key>id</key>
<integer>%(__objID__)s</integer>
<key>name</key>
<string>%(name)s</string>
<key>classname</key>
<string>%(classname)s</string>
<key>static</key>
<%(static)s/>
<key>spawnframe</key>
<integer>%(spawnframe)s</integer>
%(spawnEventXML)s
<key>spritedata</key>
<string>%(spritedata)s</string>
<key>angle</key>
<integer>%(angle)s</integer>
</dict>
<key>shapes</key>
<array>
<dict>
<key>x</key>
<integer>0</integer>
<key>y</key>
<integer>0</integer>
<key>width</key>
<integer>%(width)s</integer>
<key>height</key>
<integer>%(height)s</integer>
<key>type</key>
<string>%(shape)s</string>
<key>friction</key>
<real>%(f)s</real>
<key>density</key>
<integer>%(d)s</integer>
<key>restitution</key>
<real>%(r)s</real>
<key>groupIndex</key>
<integer>%(groupIndex)s</integer>
</dict>
</array>
</dict>
"""
JOINTS = """"""
CONTACTS = """
<dict>
<key>sprite1</key>
<string>%(classname)s</string>
<key>sprite2</key>
<string></string>
<key>eventName</key>
<string>onEnemyHit</string>
</dict>
<dict>
<key>sprite1</key>
<string>:body</string>
<key>sprite2</key>
<string>%(name)s</string>
<key>eventName</key>
<string>onDamage</string>
</dict>
<dict>
<key>sprite1</key>
<string>:head</string>
<key>sprite2</key>
<string>%(name)s</string>
<key>eventName</key>
<string>onDamage</string>
</dict>
<dict>
<key>sprite1</key>
<string>%(name)s</string>
<key>sprite2</key>
<string>rbullet</string>
<key>eventName</key>
<string>onBulletHit</string>
</dict>
<dict>
<key>sprite1</key>
<string>%(name)s</string>
<key>sprite2</key>
<string>lbullet</string>
<key>eventName</key>
<string>onBulletHit</string>
</dict>
<dict>
<key>sprite1</key>
<string>%(name)s</string>
<key>sprite2</key>
<string>cannonBall</string>
<key>eventName</key>
<string>onCannonBallHit</string>
</dict>
"""
class EnemySprite(PhysicsMixin.PhysicsMixin):
def __init__(self,**kwargs):
self.params = kwargs
self.params['name'] = "Enemy"
self.process(kwargs)
self.addDefault('classname','EnemySprite')
self.addDefault('firstframe','pumpkin.png')
self.addDefault("spritedata", "")
self.addDefault('shape','circ')
self.params['__objID__'] = ID.next()
def render(self):
return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params)
if __name__ == "__main__":
print EnemySprite(friction=0.3,x=160,y=10,width=100, height=100,density=10,restitution=0.9).render()[0] | utils/scripts/OOOlevelGen/src/sprites/Enemy.py | import PhysicsMixin
import ID
BODIES = """
<dict>
<key>body</key>
<dict>
<key>x</key>
<integer>%(x)s</integer>
<key>y</key>
<integer>%(y)s</integer>
<key>width</key>
<integer>%(width)s</integer>
<key>height</key>
<integer>%(height)s</integer>
<key>firstFrame</key>
<string>%(firstframe)s</string>
<key>sheet_id</key>
<integer>%(sheet)s</integer>
<key>id</key>
<integer>%(__objID__)s</integer>
<key>name</key>
<string>%(name)s</string>
<key>classname</key>
<string>%(classname)s</string>
<key>static</key>
<%(static)s/>
<key>spawnframe</key>
<integer>%(spawnframe)s</integer>
%(spawnEventXML)s
<key>spritedata</key>
<string>%(spritedata)s</string>
<key>angle</key>
<integer>%(angle)s</integer>
</dict>
<key>shapes</key>
<array>
<dict>
<key>x</key>
<integer>0</integer>
<key>y</key>
<integer>0</integer>
<key>width</key>
<integer>%(width)s</integer>
<key>height</key>
<integer>%(height)s</integer>
<key>type</key>
<string>%(shape)s</string>
<key>friction</key>
<real>%(f)s</real>
<key>density</key>
<integer>%(d)s</integer>
<key>restitution</key>
<real>%(r)s</real>
<key>groupIndex</key>
<integer>%(groupIndex)s</integer>
</dict>
</array>
</dict>
"""
JOINTS = """"""
CONTACTS = """
<dict>
<key>sprite1</key>
<string>%(classname)s</string>
<key>sprite2</key>
<string></string>
<key>eventName</key>
<string>onEnemyHit</string>
</dict>
<dict>
<key>sprite1</key>
<string>:body</string>
<key>sprite2</key>
<string>%(name)s</string>
<key>eventName</key>
<string>onDamage</string>
</dict>
<dict>
<key>sprite1</key>
<string>:head</string>
<key>sprite2</key>
<string>%(name)s</string>
<key>eventName</key>
<string>onDamage</string>
</dict>
<dict>
<key>sprite1</key>
<string>%(name)s</string>
<key>sprite2</key>
<string>rbullet</string>
<key>eventName</key>
<string>onBulletHit</string>
</dict>
<dict>
<key>sprite1</key>
<string>%(name)s</string>
<key>sprite2</key>
<string>lbullet</string>
<key>eventName</key>
<string>onBulletHit</string>
</dict>
<dict>
<key>sprite1</key>
<string>%(name)s</string>
<key>sprite2</key>
<string>cannonBall</string>
<key>eventName</key>
<string>onCannonBallHit</string>
</dict>
"""
class EnemySprite(PhysicsMixin.PhysicsMixin):
def __init__(self,**kwargs):
self.params = kwargs
self.params['name'] = "Enemy"
self.process(kwargs)
self.addDefault('classname','EnemySprite')
self.addDefault('firstframe','pumpkin.png')
self.addDefault("spritedata", "")
self.addDefault('shape','circ')
self.params['__objID__'] = ID.next()
def render(self):
return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params)
if __name__ == "__main__":
print EnemySprite(friction=0.3,x=160,y=10,width=100, height=100,density=10,restitution=0.9).render()[0] | 0.195671 | 0.124479 |
from datetime import datetime
from marshmallow import fields
from .field_set import FieldSet, FieldSetSchema
class Event(FieldSet):
def __init__(self,
action: str = None,
category: str = None,
created: datetime = None,
dataset: str = None,
duration: int = None,
end: datetime = None,
hash: str = None,
id: str = None,
kind: str = None,
module: str = None,
original: str = None,
outcome: str = None,
risk_score: float = None,
risk_score_norm: float = None,
severity: int = None,
start: datetime = None,
timezone: str = None,
type: str = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.action = action
self.category = category
self.created = created
self.dataset = dataset
self.duration = duration
self.end = end
self.hash = hash
self.id = id
self.kind = kind
self.module = module
self.original = original
self.outcome = outcome
self.risk_score = risk_score
self.risk_score_norm = risk_score_norm
self.severity = severity
self.start = start
self.timezone = timezone
self.type = type
class EventSchema(FieldSetSchema):
action = fields.String()
category = fields.String()
created = fields.DateTime(format="iso")
dataset = fields.String()
duration = fields.Integer()
end = fields.DateTime(format="iso")
hash = fields.String()
id = fields.String()
kind = fields.String()
module = fields.String()
original = fields.String()
outcome = fields.String()
risk_score = fields.Float()
risk_score_norm = fields.Float()
severity = fields.Integer()
start = fields.DateTime(format="iso")
timezone = fields.String()
type = fields.String() | kubi_ecs_logger/models/fields/event.py | from datetime import datetime
from marshmallow import fields
from .field_set import FieldSet, FieldSetSchema
class Event(FieldSet):
def __init__(self,
action: str = None,
category: str = None,
created: datetime = None,
dataset: str = None,
duration: int = None,
end: datetime = None,
hash: str = None,
id: str = None,
kind: str = None,
module: str = None,
original: str = None,
outcome: str = None,
risk_score: float = None,
risk_score_norm: float = None,
severity: int = None,
start: datetime = None,
timezone: str = None,
type: str = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.action = action
self.category = category
self.created = created
self.dataset = dataset
self.duration = duration
self.end = end
self.hash = hash
self.id = id
self.kind = kind
self.module = module
self.original = original
self.outcome = outcome
self.risk_score = risk_score
self.risk_score_norm = risk_score_norm
self.severity = severity
self.start = start
self.timezone = timezone
self.type = type
class EventSchema(FieldSetSchema):
action = fields.String()
category = fields.String()
created = fields.DateTime(format="iso")
dataset = fields.String()
duration = fields.Integer()
end = fields.DateTime(format="iso")
hash = fields.String()
id = fields.String()
kind = fields.String()
module = fields.String()
original = fields.String()
outcome = fields.String()
risk_score = fields.Float()
risk_score_norm = fields.Float()
severity = fields.Integer()
start = fields.DateTime(format="iso")
timezone = fields.String()
type = fields.String() | 0.74512 | 0.116588 |
import unittest
from chirp.common import http
from chirp.stream import barix
class RemoveTagsTestCase(unittest.TestCase):
def test_basic(self):
self.assertEqual("foo bar",
barix._remove_tags("foo bar"))
self.assertEqual("foo bar baz",
barix._remove_tags("foo <tag>bar</tag> baz"))
_TEST_STATUS_PAGE = """<html>
<head>
<meta http-equiv=refresh content="2; url=uistatusl.html">
</head>
<body><font face="Arial, Helvetica, sans-serif"><font size=2>
<p><br><font size=4><b>
<font color=#8F2635>SENDING</b></font></font><br><br>
<b>5610<br>
5422<br><br>
<img src=d2.gif width=28 height=12> <br><br><br>
<img src=d0.gif width=28 height=12> <br><br>
<img src=d0.gif width=28 height=12> <br><br>
<!--
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
-->
</body>
</html>
"""
_TEST_CLIENTS_PAGE = """<html><head><meta http-equiv=refresh content="2; url=clients.cgi"></head><body>
<h3>BRTP clients: 0</h3><pre></pre><h3>TCP connections</h3><pre>172.16.17.32:src port 80 :dst port 43624
172.16.17.32:src port 11111 :dst port 52770
192.168.80.10:src port 22222 :dst port 60320
10.0.99.98:src port 33333 :dst port 42911
</pre></body></html>
"""
def _mock_get_with_timeout(host, port, path, timeout_s):
if path == "/clients.cgi":
return _TEST_CLIENTS_PAGE
if path == "/uistatusl.html":
return _TEST_STATUS_PAGE
assert False
# Monkey-patch in our mock of get_with_timeout.
http.get_with_timeout = _mock_get_with_timeout
class BarixTestCase(unittest.TestCase):
def test_basic(self):
barix_obj = barix.Barix("test.url.com", 666)
barix_obj.ping()
self.assertEqual("SENDING", barix_obj.status)
self.assertEqual("5610", barix_obj.left_level)
self.assertEqual("5422", barix_obj.right_level)
print barix_obj.clients
self.assertEqual(3, len(barix_obj.clients))
self.assertEqual(("172.16.17.32", "52770"),
barix_obj.clients["11111"])
self.assertEqual(("192.168.80.10", "60320"),
barix_obj.clients["22222"])
self.assertEqual(("10.0.99.98", "42911"),
barix_obj.clients["33333"])
if __name__ == "__main__":
unittest.main() | chirp/stream/barix_test.py | import unittest
from chirp.common import http
from chirp.stream import barix
class RemoveTagsTestCase(unittest.TestCase):
def test_basic(self):
self.assertEqual("foo bar",
barix._remove_tags("foo bar"))
self.assertEqual("foo bar baz",
barix._remove_tags("foo <tag>bar</tag> baz"))
_TEST_STATUS_PAGE = """<html>
<head>
<meta http-equiv=refresh content="2; url=uistatusl.html">
</head>
<body><font face="Arial, Helvetica, sans-serif"><font size=2>
<p><br><font size=4><b>
<font color=#8F2635>SENDING</b></font></font><br><br>
<b>5610<br>
5422<br><br>
<img src=d2.gif width=28 height=12> <br><br><br>
<img src=d0.gif width=28 height=12> <br><br>
<img src=d0.gif width=28 height=12> <br><br>
<!--
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
<img src=d1.gif width=28 height=12> <br>
-->
</body>
</html>
"""
_TEST_CLIENTS_PAGE = """<html><head><meta http-equiv=refresh content="2; url=clients.cgi"></head><body>
<h3>BRTP clients: 0</h3><pre></pre><h3>TCP connections</h3><pre>172.16.17.32:src port 80 :dst port 43624
172.16.17.32:src port 11111 :dst port 52770
192.168.80.10:src port 22222 :dst port 60320
10.0.99.98:src port 33333 :dst port 42911
</pre></body></html>
"""
def _mock_get_with_timeout(host, port, path, timeout_s):
if path == "/clients.cgi":
return _TEST_CLIENTS_PAGE
if path == "/uistatusl.html":
return _TEST_STATUS_PAGE
assert False
# Monkey-patch in our mock of get_with_timeout.
http.get_with_timeout = _mock_get_with_timeout
class BarixTestCase(unittest.TestCase):
def test_basic(self):
barix_obj = barix.Barix("test.url.com", 666)
barix_obj.ping()
self.assertEqual("SENDING", barix_obj.status)
self.assertEqual("5610", barix_obj.left_level)
self.assertEqual("5422", barix_obj.right_level)
print barix_obj.clients
self.assertEqual(3, len(barix_obj.clients))
self.assertEqual(("172.16.17.32", "52770"),
barix_obj.clients["11111"])
self.assertEqual(("192.168.80.10", "60320"),
barix_obj.clients["22222"])
self.assertEqual(("10.0.99.98", "42911"),
barix_obj.clients["33333"])
if __name__ == "__main__":
unittest.main() | 0.583678 | 0.280857 |